blob: 4189bcce52ea1de94db09d284f78543c7d9e6f98 [file] [log] [blame]
Tom Stellard1aaad692014-07-21 16:55:33 +00001//===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// The pass tries to use the 32-bit encoding for instructions when possible.
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000013#include "AMDGPUSubtarget.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000014#include "SIInstrInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000015#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000016#include "llvm/ADT/Statistic.h"
17#include "llvm/CodeGen/MachineFunctionPass.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6407e1e2014-08-01 00:32:33 +000020#include "llvm/IR/Constants.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000021#include "llvm/IR/Function.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000022#include "llvm/IR/LLVMContext.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000023#include "llvm/Support/Debug.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000024#include "llvm/Support/raw_ostream.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000025#include "llvm/Target/TargetMachine.h"
26
27#define DEBUG_TYPE "si-shrink-instructions"
28
29STATISTIC(NumInstructionsShrunk,
30 "Number of 64-bit instruction reduced to 32-bit.");
Tom Stellard6407e1e2014-08-01 00:32:33 +000031STATISTIC(NumLiteralConstantsFolded,
32 "Number of literal constants folded into 32-bit instructions.");
Tom Stellard1aaad692014-07-21 16:55:33 +000033
Tom Stellard1aaad692014-07-21 16:55:33 +000034using namespace llvm;
35
36namespace {
37
38class SIShrinkInstructions : public MachineFunctionPass {
39public:
40 static char ID;
41
42public:
43 SIShrinkInstructions() : MachineFunctionPass(ID) {
44 }
45
Craig Topperfd38cbe2014-08-30 16:48:34 +000046 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard1aaad692014-07-21 16:55:33 +000047
Mehdi Amini117296c2016-10-01 02:56:57 +000048 StringRef getPassName() const override { return "SI Shrink Instructions"; }
Tom Stellard1aaad692014-07-21 16:55:33 +000049
Craig Topperfd38cbe2014-08-30 16:48:34 +000050 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard1aaad692014-07-21 16:55:33 +000051 AU.setPreservesCFG();
52 MachineFunctionPass::getAnalysisUsage(AU);
53 }
54};
55
56} // End anonymous namespace.
57
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +000058INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
59 "SI Shrink Instructions", false, false)
Tom Stellard1aaad692014-07-21 16:55:33 +000060
61char SIShrinkInstructions::ID = 0;
62
63FunctionPass *llvm::createSIShrinkInstructionsPass() {
64 return new SIShrinkInstructions();
65}
66
Tom Stellard1aaad692014-07-21 16:55:33 +000067static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
68 const SIRegisterInfo &TRI,
69 const MachineRegisterInfo &MRI) {
70
71 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
72 // Can't shrink instruction with three operands.
Tom Stellard5224df32015-03-10 16:16:44 +000073 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add
74 // a special case for it. It can only be shrunk if the third operand
75 // is vcc. We should handle this the same way we handle vopc, by addding
Matt Arsenault28bd4cb2017-01-11 22:35:17 +000076 // a register allocation hint pre-regalloc and then do the shrinking
Tom Stellard5224df32015-03-10 16:16:44 +000077 // post-regalloc.
Tom Stellarddb5a11f2015-07-13 15:47:57 +000078 if (Src2) {
Tom Stellarde48fe2a2015-07-14 14:15:03 +000079 switch (MI.getOpcode()) {
80 default: return false;
Tom Stellarddb5a11f2015-07-13 15:47:57 +000081
Matt Arsenault24a12732017-01-11 22:58:12 +000082 case AMDGPU::V_ADDC_U32_e64:
83 case AMDGPU::V_SUBB_U32_e64:
Matt Arsenault5ae765e2018-07-20 21:20:36 +000084 case AMDGPU::V_SUBBREV_U32_e64: {
85 const MachineOperand *Src1
86 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
87 if (!Src1->isReg() || !TRI.isVGPR(MRI, Src1->getReg()))
Stanislav Mekhanoshina9d846c2017-06-20 20:33:44 +000088 return false;
Matt Arsenault24a12732017-01-11 22:58:12 +000089 // Additional verification is needed for sdst/src2.
90 return true;
Matt Arsenault5ae765e2018-07-20 21:20:36 +000091 }
Tom Stellarde48fe2a2015-07-14 14:15:03 +000092 case AMDGPU::V_MAC_F32_e64:
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +000093 case AMDGPU::V_MAC_F16_e64:
Matt Arsenault0084adc2018-04-30 19:08:16 +000094 case AMDGPU::V_FMAC_F32_e64:
Matt Arsenault5ae765e2018-07-20 21:20:36 +000095 if (!Src2->isReg() || !TRI.isVGPR(MRI, Src2->getReg()) ||
Tom Stellarde48fe2a2015-07-14 14:15:03 +000096 TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
97 return false;
98 break;
99
100 case AMDGPU::V_CNDMASK_B32_e64:
101 break;
102 }
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000103 }
Tom Stellard1aaad692014-07-21 16:55:33 +0000104
105 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
Matt Arsenault5ae765e2018-07-20 21:20:36 +0000106 if (Src1 && (!Src1->isReg() || !TRI.isVGPR(MRI, Src1->getReg()) ||
Matt Arsenaulta81198d2017-07-06 20:56:59 +0000107 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers)))
Tom Stellard1aaad692014-07-21 16:55:33 +0000108 return false;
109
Matt Arsenault8943d242014-10-17 18:00:45 +0000110 // We don't need to check src0, all input types are legal, so just make sure
111 // src0 isn't using any modifiers.
112 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
Tom Stellard1aaad692014-07-21 16:55:33 +0000113 return false;
114
115 // Check output modifiers
Matt Arsenaulta81198d2017-07-06 20:56:59 +0000116 return !TII->hasModifiersSet(MI, AMDGPU::OpName::omod) &&
117 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp);
Tom Stellard1aaad692014-07-21 16:55:33 +0000118}
119
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000120/// This function checks \p MI for operands defined by a move immediate
Tom Stellard6407e1e2014-08-01 00:32:33 +0000121/// instruction and then folds the literal constant into the instruction if it
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000122/// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions.
123static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
Tom Stellard6407e1e2014-08-01 00:32:33 +0000124 MachineRegisterInfo &MRI, bool TryToCommute = true) {
Matt Arsenault3add6432015-10-20 04:35:43 +0000125 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
Tom Stellard6407e1e2014-08-01 00:32:33 +0000126
Matt Arsenault11a4d672015-02-13 19:05:03 +0000127 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
Tom Stellard6407e1e2014-08-01 00:32:33 +0000128
Tom Stellard6407e1e2014-08-01 00:32:33 +0000129 // Try to fold Src0
Matt Arsenault4bd72362016-12-10 00:39:12 +0000130 MachineOperand &Src0 = MI.getOperand(Src0Idx);
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000131 if (Src0.isReg()) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000132 unsigned Reg = Src0.getReg();
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000133 if (TargetRegisterInfo::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
134 MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
135 if (Def && Def->isMoveImmediate()) {
136 MachineOperand &MovSrc = Def->getOperand(1);
137 bool ConstantFolded = false;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000138
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000139 if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) ||
140 isUInt<32>(MovSrc.getImm()))) {
141 // It's possible to have only one component of a super-reg defined by
142 // a single mov, so we need to clear any subregister flag.
143 Src0.setSubReg(0);
144 Src0.ChangeToImmediate(MovSrc.getImm());
145 ConstantFolded = true;
Matt Arsenault9cff06f2017-07-10 20:04:35 +0000146 } else if (MovSrc.isFI()) {
147 Src0.setSubReg(0);
148 Src0.ChangeToFrameIndex(MovSrc.getIndex());
149 ConstantFolded = true;
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000150 }
151
152 if (ConstantFolded) {
153 assert(MRI.use_empty(Reg));
Tom Stellard6407e1e2014-08-01 00:32:33 +0000154 Def->eraseFromParent();
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000155 ++NumLiteralConstantsFolded;
156 return true;
157 }
Tom Stellard6407e1e2014-08-01 00:32:33 +0000158 }
159 }
160 }
161
162 // We have failed to fold src0, so commute the instruction and try again.
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000163 if (TryToCommute && MI.isCommutable()) {
164 if (TII->commuteInstruction(MI)) {
165 if (foldImmediates(MI, TII, MRI, false))
166 return true;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000167
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000168 // Commute back.
169 TII->commuteInstruction(MI);
170 }
171 }
172
173 return false;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000174}
175
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000176// Copy MachineOperand with all flags except setting it as implicit.
Matt Arsenault22096252016-06-20 18:34:00 +0000177static void copyFlagsToImplicitVCC(MachineInstr &MI,
178 const MachineOperand &Orig) {
179
180 for (MachineOperand &Use : MI.implicit_operands()) {
Matt Arsenault24a12732017-01-11 22:58:12 +0000181 if (Use.isUse() && Use.getReg() == AMDGPU::VCC) {
Matt Arsenault22096252016-06-20 18:34:00 +0000182 Use.setIsUndef(Orig.isUndef());
183 Use.setIsKill(Orig.isKill());
184 return;
185 }
186 }
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000187}
188
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000189static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000190 return isInt<16>(Src.getImm()) &&
191 !TII->isInlineConstant(*Src.getParent(),
192 Src.getParent()->getOperandNo(&Src));
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000193}
194
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000195static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000196 return isUInt<16>(Src.getImm()) &&
197 !TII->isInlineConstant(*Src.getParent(),
198 Src.getParent()->getOperandNo(&Src));
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000199}
200
201static bool isKImmOrKUImmOperand(const SIInstrInfo *TII,
202 const MachineOperand &Src,
203 bool &IsUnsigned) {
204 if (isInt<16>(Src.getImm())) {
205 IsUnsigned = false;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000206 return !TII->isInlineConstant(Src);
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000207 }
208
209 if (isUInt<16>(Src.getImm())) {
210 IsUnsigned = true;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000211 return !TII->isInlineConstant(Src);
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000212 }
213
214 return false;
215}
216
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000217/// \returns true if the constant in \p Src should be replaced with a bitreverse
218/// of an inline immediate.
219static bool isReverseInlineImm(const SIInstrInfo *TII,
220 const MachineOperand &Src,
221 int32_t &ReverseImm) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000222 if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000223 return false;
224
225 ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
226 return ReverseImm >= -16 && ReverseImm <= 64;
227}
228
Matt Arsenault5ffe3e12016-09-03 17:25:39 +0000229/// Copy implicit register operands from specified instruction to this
230/// instruction that are not part of the instruction definition.
231static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF,
232 const MachineInstr &MI) {
233 for (unsigned i = MI.getDesc().getNumOperands() +
234 MI.getDesc().getNumImplicitUses() +
235 MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
236 i != e; ++i) {
237 const MachineOperand &MO = MI.getOperand(i);
238 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
239 NewMI.addOperand(MF, MO);
240 }
241}
242
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000243static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) {
244 // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
245 // get constants on the RHS.
246 if (!MI.getOperand(0).isReg())
247 TII->commuteInstruction(MI, false, 0, 1);
248
249 const MachineOperand &Src1 = MI.getOperand(1);
250 if (!Src1.isImm())
251 return;
252
253 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
254 if (SOPKOpc == -1)
255 return;
256
257 // eq/ne is special because the imm16 can be treated as signed or unsigned,
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000258 // and initially selectd to the unsigned versions.
259 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000260 bool HasUImm;
261 if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) {
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000262 if (!HasUImm) {
263 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
264 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000265 }
266
267 MI.setDesc(TII->get(SOPKOpc));
268 }
269
270 return;
271 }
272
273 const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
274
275 if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) ||
276 (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) {
277 MI.setDesc(NewDesc);
278 }
279}
280
Tom Stellard1aaad692014-07-21 16:55:33 +0000281bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +0000282 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000283 return false;
284
Tom Stellard1aaad692014-07-21 16:55:33 +0000285 MachineRegisterInfo &MRI = MF.getRegInfo();
Tom Stellard5bfbae52018-07-11 20:59:01 +0000286 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000287 const SIInstrInfo *TII = ST.getInstrInfo();
Tom Stellard1aaad692014-07-21 16:55:33 +0000288 const SIRegisterInfo &TRI = TII->getRegisterInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000289
Tom Stellard1aaad692014-07-21 16:55:33 +0000290 std::vector<unsigned> I1Defs;
291
292 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
293 BI != BE; ++BI) {
294
295 MachineBasicBlock &MBB = *BI;
296 MachineBasicBlock::iterator I, Next;
297 for (I = MBB.begin(); I != MBB.end(); I = Next) {
298 Next = std::next(I);
299 MachineInstr &MI = *I;
300
Matt Arsenault9a19c242016-03-11 07:42:49 +0000301 if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
302 // If this has a literal constant source that is the same as the
303 // reversed bits of an inline immediate, replace with a bitreverse of
304 // that constant. This saves 4 bytes in the common case of materializing
305 // sign bits.
306
307 // Test if we are after regalloc. We only want to do this after any
308 // optimizations happen because this will confuse them.
309 // XXX - not exactly a check for post-regalloc run.
310 MachineOperand &Src = MI.getOperand(1);
311 if (Src.isImm() &&
312 TargetRegisterInfo::isPhysicalRegister(MI.getOperand(0).getReg())) {
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000313 int32_t ReverseImm;
314 if (isReverseInlineImm(TII, Src, ReverseImm)) {
315 MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
316 Src.setImm(ReverseImm);
317 continue;
Matt Arsenault9a19c242016-03-11 07:42:49 +0000318 }
319 }
320 }
321
Matt Arsenault074ea282016-04-25 19:53:22 +0000322 // Combine adjacent s_nops to use the immediate operand encoding how long
323 // to wait.
324 //
325 // s_nop N
326 // s_nop M
327 // =>
328 // s_nop (N + M)
329 if (MI.getOpcode() == AMDGPU::S_NOP &&
330 Next != MBB.end() &&
331 (*Next).getOpcode() == AMDGPU::S_NOP) {
332
333 MachineInstr &NextMI = *Next;
334 // The instruction encodes the amount to wait with an offset of 1,
335 // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back
336 // after adding.
337 uint8_t Nop0 = MI.getOperand(0).getImm() + 1;
338 uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1;
339
340 // Make sure we don't overflow the bounds.
341 if (Nop0 + Nop1 <= 8) {
342 NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1);
343 MI.eraseFromParent();
344 }
345
346 continue;
347 }
348
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000349 // FIXME: We also need to consider movs of constant operands since
350 // immediate operands are not folded if they have more than one use, and
351 // the operand folding pass is unaware if the immediate will be free since
352 // it won't know if the src == dest constraint will end up being
353 // satisfied.
354 if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
355 MI.getOpcode() == AMDGPU::S_MUL_I32) {
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000356 const MachineOperand *Dest = &MI.getOperand(0);
357 MachineOperand *Src0 = &MI.getOperand(1);
358 MachineOperand *Src1 = &MI.getOperand(2);
359
360 if (!Src0->isReg() && Src1->isReg()) {
361 if (TII->commuteInstruction(MI, false, 1, 2))
362 std::swap(Src0, Src1);
363 }
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000364
365 // FIXME: This could work better if hints worked with subregisters. If
366 // we have a vector add of a constant, we usually don't get the correct
367 // allocation due to the subregister usage.
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000368 if (TargetRegisterInfo::isVirtualRegister(Dest->getReg()) &&
369 Src0->isReg()) {
370 MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
371 MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000372 continue;
373 }
374
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000375 if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
376 if (Src1->isImm() && isKImmOperand(TII, *Src1)) {
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000377 unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
378 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
379
380 MI.setDesc(TII->get(Opc));
381 MI.tieOperands(0, 1);
382 }
383 }
384 }
385
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000386 // Try to use s_cmpk_*
387 if (MI.isCompare() && TII->isSOPC(MI)) {
388 shrinkScalarCompare(TII, MI);
389 continue;
390 }
391
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000392 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
393 if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000394 const MachineOperand &Dst = MI.getOperand(0);
395 MachineOperand &Src = MI.getOperand(1);
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000396
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000397 if (Src.isImm() &&
398 TargetRegisterInfo::isPhysicalRegister(Dst.getReg())) {
399 int32_t ReverseImm;
400 if (isKImmOperand(TII, Src))
401 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
402 else if (isReverseInlineImm(TII, Src, ReverseImm)) {
403 MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
404 Src.setImm(ReverseImm);
405 }
406 }
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000407
408 continue;
409 }
410
Tom Stellard86d12eb2014-08-01 00:32:28 +0000411 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
Tom Stellard1aaad692014-07-21 16:55:33 +0000412 continue;
413
414 if (!canShrink(MI, TII, TRI, MRI)) {
Matt Arsenault66524032014-09-16 18:00:23 +0000415 // Try commuting the instruction and see if that enables us to shrink
Tom Stellard1aaad692014-07-21 16:55:33 +0000416 // it.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000417 if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
Tom Stellard1aaad692014-07-21 16:55:33 +0000418 !canShrink(MI, TII, TRI, MRI))
419 continue;
420 }
421
Marek Olsaka93603d2015-01-15 18:42:51 +0000422 // getVOPe32 could be -1 here if we started with an instruction that had
Tom Stellard86d12eb2014-08-01 00:32:28 +0000423 // a 32-bit encoding and then commuted it to an instruction that did not.
Marek Olsaka93603d2015-01-15 18:42:51 +0000424 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
Tom Stellard86d12eb2014-08-01 00:32:28 +0000425 continue;
426
Marek Olsaka93603d2015-01-15 18:42:51 +0000427 int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
428
Tom Stellard1aaad692014-07-21 16:55:33 +0000429 if (TII->isVOPC(Op32)) {
430 unsigned DstReg = MI.getOperand(0).getReg();
431 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000432 // VOPC instructions can only write to the VCC register. We can't
433 // force them to use VCC here, because this is only one register and
434 // cannot deal with sequences which would require multiple copies of
435 // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
Tom Stellard1aaad692014-07-21 16:55:33 +0000436 //
Matt Arsenaulta9627ae2014-09-21 17:27:32 +0000437 // So, instead of forcing the instruction to write to VCC, we provide
Hiroshi Inoue372ffa12018-04-13 11:37:06 +0000438 // a hint to the register allocator to use VCC and then we will run
Matt Arsenaulta9627ae2014-09-21 17:27:32 +0000439 // this pass again after RA and shrink it if it outputs to VCC.
Tom Stellard1aaad692014-07-21 16:55:33 +0000440 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
441 continue;
442 }
443 if (DstReg != AMDGPU::VCC)
444 continue;
445 }
446
Tom Stellarde48fe2a2015-07-14 14:15:03 +0000447 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
448 // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
449 // instructions.
450 const MachineOperand *Src2 =
451 TII->getNamedOperand(MI, AMDGPU::OpName::src2);
452 if (!Src2->isReg())
453 continue;
454 unsigned SReg = Src2->getReg();
455 if (TargetRegisterInfo::isVirtualRegister(SReg)) {
456 MRI.setRegAllocationHint(SReg, 0, AMDGPU::VCC);
457 continue;
458 }
459 if (SReg != AMDGPU::VCC)
460 continue;
461 }
462
Matt Arsenault28bd4cb2017-01-11 22:35:17 +0000463 // Check for the bool flag output for instructions like V_ADD_I32_e64.
464 const MachineOperand *SDst = TII->getNamedOperand(MI,
465 AMDGPU::OpName::sdst);
Matt Arsenault28bd4cb2017-01-11 22:35:17 +0000466
Matt Arsenault24a12732017-01-11 22:58:12 +0000467 // Check the carry-in operand for v_addc_u32_e64.
468 const MachineOperand *Src2 = TII->getNamedOperand(MI,
469 AMDGPU::OpName::src2);
470
471 if (SDst) {
472 if (SDst->getReg() != AMDGPU::VCC) {
473 if (TargetRegisterInfo::isVirtualRegister(SDst->getReg()))
474 MRI.setRegAllocationHint(SDst->getReg(), 0, AMDGPU::VCC);
475 continue;
476 }
477
478 // All of the instructions with carry outs also have an SGPR input in
479 // src2.
480 if (Src2 && Src2->getReg() != AMDGPU::VCC) {
481 if (TargetRegisterInfo::isVirtualRegister(Src2->getReg()))
482 MRI.setRegAllocationHint(Src2->getReg(), 0, AMDGPU::VCC);
483
484 continue;
485 }
Matt Arsenault28bd4cb2017-01-11 22:35:17 +0000486 }
487
Tom Stellard1aaad692014-07-21 16:55:33 +0000488 // We can shrink this instruction
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000489 LLVM_DEBUG(dbgs() << "Shrinking " << MI);
Tom Stellard1aaad692014-07-21 16:55:33 +0000490
Tom Stellard6407e1e2014-08-01 00:32:33 +0000491 MachineInstrBuilder Inst32 =
Tom Stellard1aaad692014-07-21 16:55:33 +0000492 BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
493
Tom Stellardcc4c8712016-02-16 18:14:56 +0000494 // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
Matt Arsenault46359152015-08-08 00:41:48 +0000495 // For VOPC instructions, this is replaced by an implicit def of vcc.
Tom Stellardcc4c8712016-02-16 18:14:56 +0000496 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst);
Matt Arsenault46359152015-08-08 00:41:48 +0000497 if (Op32DstIdx != -1) {
498 // dst
Diana Picus116bbab2017-01-13 09:58:52 +0000499 Inst32.add(MI.getOperand(0));
Matt Arsenault46359152015-08-08 00:41:48 +0000500 } else {
501 assert(MI.getOperand(0).getReg() == AMDGPU::VCC &&
502 "Unexpected case");
503 }
504
Tom Stellard1aaad692014-07-21 16:55:33 +0000505
Diana Picus116bbab2017-01-13 09:58:52 +0000506 Inst32.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
Tom Stellard1aaad692014-07-21 16:55:33 +0000507
508 const MachineOperand *Src1 =
509 TII->getNamedOperand(MI, AMDGPU::OpName::src1);
510 if (Src1)
Diana Picus116bbab2017-01-13 09:58:52 +0000511 Inst32.add(*Src1);
Tom Stellard1aaad692014-07-21 16:55:33 +0000512
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000513 if (Src2) {
514 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2);
515 if (Op32Src2Idx != -1) {
Diana Picus116bbab2017-01-13 09:58:52 +0000516 Inst32.add(*Src2);
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000517 } else {
518 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
Matt Arsenault22096252016-06-20 18:34:00 +0000519 // replaced with an implicit read of vcc. This was already added
520 // during the initial BuildMI, so find it to preserve the flags.
521 copyFlagsToImplicitVCC(*Inst32, *Src2);
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000522 }
523 }
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000524
Tom Stellard1aaad692014-07-21 16:55:33 +0000525 ++NumInstructionsShrunk;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000526
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000527 // Copy extra operands not present in the instruction definition.
Matt Arsenault5ffe3e12016-09-03 17:25:39 +0000528 copyExtraImplicitOps(*Inst32, MF, MI);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000529
530 MI.eraseFromParent();
Tom Stellard6407e1e2014-08-01 00:32:33 +0000531 foldImmediates(*Inst32, TII, MRI);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000532
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000533 LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
Tom Stellard1aaad692014-07-21 16:55:33 +0000534 }
535 }
536 return false;
537}