blob: b78bcc05d78d700c996a66c9d41101a8fa5401f0 [file] [log] [blame]
Tom Stellard1aaad692014-07-21 16:55:33 +00001//===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// The pass tries to use the 32-bit encoding for instructions when possible.
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000013#include "AMDGPUSubtarget.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000014#include "SIInstrInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000015#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000016#include "llvm/ADT/Statistic.h"
17#include "llvm/CodeGen/MachineFunctionPass.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6407e1e2014-08-01 00:32:33 +000020#include "llvm/IR/Constants.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000021#include "llvm/IR/Function.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000022#include "llvm/IR/LLVMContext.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000023#include "llvm/Support/Debug.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000024#include "llvm/Support/raw_ostream.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000025#include "llvm/Target/TargetMachine.h"
26
27#define DEBUG_TYPE "si-shrink-instructions"
28
29STATISTIC(NumInstructionsShrunk,
30 "Number of 64-bit instruction reduced to 32-bit.");
Tom Stellard6407e1e2014-08-01 00:32:33 +000031STATISTIC(NumLiteralConstantsFolded,
32 "Number of literal constants folded into 32-bit instructions.");
Tom Stellard1aaad692014-07-21 16:55:33 +000033
Tom Stellard1aaad692014-07-21 16:55:33 +000034using namespace llvm;
35
36namespace {
37
38class SIShrinkInstructions : public MachineFunctionPass {
39public:
40 static char ID;
41
42public:
43 SIShrinkInstructions() : MachineFunctionPass(ID) {
44 }
45
Craig Topperfd38cbe2014-08-30 16:48:34 +000046 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard1aaad692014-07-21 16:55:33 +000047
Mehdi Amini117296c2016-10-01 02:56:57 +000048 StringRef getPassName() const override { return "SI Shrink Instructions"; }
Tom Stellard1aaad692014-07-21 16:55:33 +000049
Craig Topperfd38cbe2014-08-30 16:48:34 +000050 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard1aaad692014-07-21 16:55:33 +000051 AU.setPreservesCFG();
52 MachineFunctionPass::getAnalysisUsage(AU);
53 }
54};
55
56} // End anonymous namespace.
57
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +000058INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
59 "SI Shrink Instructions", false, false)
Tom Stellard1aaad692014-07-21 16:55:33 +000060
61char SIShrinkInstructions::ID = 0;
62
63FunctionPass *llvm::createSIShrinkInstructionsPass() {
64 return new SIShrinkInstructions();
65}
66
67static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI,
68 const MachineRegisterInfo &MRI) {
69 if (!MO->isReg())
70 return false;
71
72 if (TargetRegisterInfo::isVirtualRegister(MO->getReg()))
73 return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()));
74
75 return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()));
76}
77
78static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
79 const SIRegisterInfo &TRI,
80 const MachineRegisterInfo &MRI) {
81
82 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
83 // Can't shrink instruction with three operands.
Tom Stellard5224df32015-03-10 16:16:44 +000084 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add
85 // a special case for it. It can only be shrunk if the third operand
86 // is vcc. We should handle this the same way we handle vopc, by addding
Matt Arsenault28bd4cb2017-01-11 22:35:17 +000087 // a register allocation hint pre-regalloc and then do the shrinking
Tom Stellard5224df32015-03-10 16:16:44 +000088 // post-regalloc.
Tom Stellarddb5a11f2015-07-13 15:47:57 +000089 if (Src2) {
Tom Stellarde48fe2a2015-07-14 14:15:03 +000090 switch (MI.getOpcode()) {
91 default: return false;
Tom Stellarddb5a11f2015-07-13 15:47:57 +000092
Matt Arsenault24a12732017-01-11 22:58:12 +000093 case AMDGPU::V_ADDC_U32_e64:
94 case AMDGPU::V_SUBB_U32_e64:
Stanislav Mekhanoshinfa48c492018-02-24 01:32:32 +000095 case AMDGPU::V_SUBBREV_U32_e64:
96 if (!isVGPR(TII->getNamedOperand(MI, AMDGPU::OpName::src1), TRI, MRI))
Stanislav Mekhanoshina9d846c2017-06-20 20:33:44 +000097 return false;
Matt Arsenault24a12732017-01-11 22:58:12 +000098 // Additional verification is needed for sdst/src2.
99 return true;
100
Tom Stellarde48fe2a2015-07-14 14:15:03 +0000101 case AMDGPU::V_MAC_F32_e64:
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000102 case AMDGPU::V_MAC_F16_e64:
Matt Arsenault0084adc2018-04-30 19:08:16 +0000103 case AMDGPU::V_FMAC_F32_e64:
Tom Stellarde48fe2a2015-07-14 14:15:03 +0000104 if (!isVGPR(Src2, TRI, MRI) ||
105 TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
106 return false;
107 break;
108
109 case AMDGPU::V_CNDMASK_B32_e64:
110 break;
111 }
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000112 }
Tom Stellard1aaad692014-07-21 16:55:33 +0000113
114 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
Matt Arsenaulta81198d2017-07-06 20:56:59 +0000115 if (Src1 && (!isVGPR(Src1, TRI, MRI) ||
116 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers)))
Tom Stellard1aaad692014-07-21 16:55:33 +0000117 return false;
118
Matt Arsenault8943d242014-10-17 18:00:45 +0000119 // We don't need to check src0, all input types are legal, so just make sure
120 // src0 isn't using any modifiers.
121 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
Tom Stellard1aaad692014-07-21 16:55:33 +0000122 return false;
123
124 // Check output modifiers
Matt Arsenaulta81198d2017-07-06 20:56:59 +0000125 return !TII->hasModifiersSet(MI, AMDGPU::OpName::omod) &&
126 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp);
Tom Stellard1aaad692014-07-21 16:55:33 +0000127}
128
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000129/// This function checks \p MI for operands defined by a move immediate
Tom Stellard6407e1e2014-08-01 00:32:33 +0000130/// instruction and then folds the literal constant into the instruction if it
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000131/// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions.
132static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
Tom Stellard6407e1e2014-08-01 00:32:33 +0000133 MachineRegisterInfo &MRI, bool TryToCommute = true) {
Matt Arsenault3add6432015-10-20 04:35:43 +0000134 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
Tom Stellard6407e1e2014-08-01 00:32:33 +0000135
Matt Arsenault11a4d672015-02-13 19:05:03 +0000136 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
Tom Stellard6407e1e2014-08-01 00:32:33 +0000137
Tom Stellard6407e1e2014-08-01 00:32:33 +0000138 // Try to fold Src0
Matt Arsenault4bd72362016-12-10 00:39:12 +0000139 MachineOperand &Src0 = MI.getOperand(Src0Idx);
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000140 if (Src0.isReg()) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000141 unsigned Reg = Src0.getReg();
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000142 if (TargetRegisterInfo::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
143 MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
144 if (Def && Def->isMoveImmediate()) {
145 MachineOperand &MovSrc = Def->getOperand(1);
146 bool ConstantFolded = false;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000147
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000148 if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) ||
149 isUInt<32>(MovSrc.getImm()))) {
150 // It's possible to have only one component of a super-reg defined by
151 // a single mov, so we need to clear any subregister flag.
152 Src0.setSubReg(0);
153 Src0.ChangeToImmediate(MovSrc.getImm());
154 ConstantFolded = true;
Matt Arsenault9cff06f2017-07-10 20:04:35 +0000155 } else if (MovSrc.isFI()) {
156 Src0.setSubReg(0);
157 Src0.ChangeToFrameIndex(MovSrc.getIndex());
158 ConstantFolded = true;
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000159 }
160
161 if (ConstantFolded) {
162 assert(MRI.use_empty(Reg));
Tom Stellard6407e1e2014-08-01 00:32:33 +0000163 Def->eraseFromParent();
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000164 ++NumLiteralConstantsFolded;
165 return true;
166 }
Tom Stellard6407e1e2014-08-01 00:32:33 +0000167 }
168 }
169 }
170
171 // We have failed to fold src0, so commute the instruction and try again.
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000172 if (TryToCommute && MI.isCommutable()) {
173 if (TII->commuteInstruction(MI)) {
174 if (foldImmediates(MI, TII, MRI, false))
175 return true;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000176
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000177 // Commute back.
178 TII->commuteInstruction(MI);
179 }
180 }
181
182 return false;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000183}
184
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000185// Copy MachineOperand with all flags except setting it as implicit.
Matt Arsenault22096252016-06-20 18:34:00 +0000186static void copyFlagsToImplicitVCC(MachineInstr &MI,
187 const MachineOperand &Orig) {
188
189 for (MachineOperand &Use : MI.implicit_operands()) {
Matt Arsenault24a12732017-01-11 22:58:12 +0000190 if (Use.isUse() && Use.getReg() == AMDGPU::VCC) {
Matt Arsenault22096252016-06-20 18:34:00 +0000191 Use.setIsUndef(Orig.isUndef());
192 Use.setIsKill(Orig.isKill());
193 return;
194 }
195 }
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000196}
197
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000198static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000199 return isInt<16>(Src.getImm()) &&
200 !TII->isInlineConstant(*Src.getParent(),
201 Src.getParent()->getOperandNo(&Src));
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000202}
203
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000204static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000205 return isUInt<16>(Src.getImm()) &&
206 !TII->isInlineConstant(*Src.getParent(),
207 Src.getParent()->getOperandNo(&Src));
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000208}
209
210static bool isKImmOrKUImmOperand(const SIInstrInfo *TII,
211 const MachineOperand &Src,
212 bool &IsUnsigned) {
213 if (isInt<16>(Src.getImm())) {
214 IsUnsigned = false;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000215 return !TII->isInlineConstant(Src);
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000216 }
217
218 if (isUInt<16>(Src.getImm())) {
219 IsUnsigned = true;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000220 return !TII->isInlineConstant(Src);
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000221 }
222
223 return false;
224}
225
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000226/// \returns true if the constant in \p Src should be replaced with a bitreverse
227/// of an inline immediate.
228static bool isReverseInlineImm(const SIInstrInfo *TII,
229 const MachineOperand &Src,
230 int32_t &ReverseImm) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000231 if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000232 return false;
233
234 ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
235 return ReverseImm >= -16 && ReverseImm <= 64;
236}
237
Matt Arsenault5ffe3e12016-09-03 17:25:39 +0000238/// Copy implicit register operands from specified instruction to this
239/// instruction that are not part of the instruction definition.
240static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF,
241 const MachineInstr &MI) {
242 for (unsigned i = MI.getDesc().getNumOperands() +
243 MI.getDesc().getNumImplicitUses() +
244 MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
245 i != e; ++i) {
246 const MachineOperand &MO = MI.getOperand(i);
247 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
248 NewMI.addOperand(MF, MO);
249 }
250}
251
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000252static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) {
253 // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
254 // get constants on the RHS.
255 if (!MI.getOperand(0).isReg())
256 TII->commuteInstruction(MI, false, 0, 1);
257
258 const MachineOperand &Src1 = MI.getOperand(1);
259 if (!Src1.isImm())
260 return;
261
262 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
263 if (SOPKOpc == -1)
264 return;
265
266 // eq/ne is special because the imm16 can be treated as signed or unsigned,
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000267 // and initially selectd to the unsigned versions.
268 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000269 bool HasUImm;
270 if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) {
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000271 if (!HasUImm) {
272 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
273 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000274 }
275
276 MI.setDesc(TII->get(SOPKOpc));
277 }
278
279 return;
280 }
281
282 const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
283
284 if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) ||
285 (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) {
286 MI.setDesc(NewDesc);
287 }
288}
289
Tom Stellard1aaad692014-07-21 16:55:33 +0000290bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +0000291 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000292 return false;
293
Tom Stellard1aaad692014-07-21 16:55:33 +0000294 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000295 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
296 const SIInstrInfo *TII = ST.getInstrInfo();
Tom Stellard1aaad692014-07-21 16:55:33 +0000297 const SIRegisterInfo &TRI = TII->getRegisterInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000298
Tom Stellard1aaad692014-07-21 16:55:33 +0000299 std::vector<unsigned> I1Defs;
300
301 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
302 BI != BE; ++BI) {
303
304 MachineBasicBlock &MBB = *BI;
305 MachineBasicBlock::iterator I, Next;
306 for (I = MBB.begin(); I != MBB.end(); I = Next) {
307 Next = std::next(I);
308 MachineInstr &MI = *I;
309
Matt Arsenault9a19c242016-03-11 07:42:49 +0000310 if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
311 // If this has a literal constant source that is the same as the
312 // reversed bits of an inline immediate, replace with a bitreverse of
313 // that constant. This saves 4 bytes in the common case of materializing
314 // sign bits.
315
316 // Test if we are after regalloc. We only want to do this after any
317 // optimizations happen because this will confuse them.
318 // XXX - not exactly a check for post-regalloc run.
319 MachineOperand &Src = MI.getOperand(1);
320 if (Src.isImm() &&
321 TargetRegisterInfo::isPhysicalRegister(MI.getOperand(0).getReg())) {
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000322 int32_t ReverseImm;
323 if (isReverseInlineImm(TII, Src, ReverseImm)) {
324 MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
325 Src.setImm(ReverseImm);
326 continue;
Matt Arsenault9a19c242016-03-11 07:42:49 +0000327 }
328 }
329 }
330
Matt Arsenault074ea282016-04-25 19:53:22 +0000331 // Combine adjacent s_nops to use the immediate operand encoding how long
332 // to wait.
333 //
334 // s_nop N
335 // s_nop M
336 // =>
337 // s_nop (N + M)
338 if (MI.getOpcode() == AMDGPU::S_NOP &&
339 Next != MBB.end() &&
340 (*Next).getOpcode() == AMDGPU::S_NOP) {
341
342 MachineInstr &NextMI = *Next;
343 // The instruction encodes the amount to wait with an offset of 1,
344 // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back
345 // after adding.
346 uint8_t Nop0 = MI.getOperand(0).getImm() + 1;
347 uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1;
348
349 // Make sure we don't overflow the bounds.
350 if (Nop0 + Nop1 <= 8) {
351 NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1);
352 MI.eraseFromParent();
353 }
354
355 continue;
356 }
357
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000358 // FIXME: We also need to consider movs of constant operands since
359 // immediate operands are not folded if they have more than one use, and
360 // the operand folding pass is unaware if the immediate will be free since
361 // it won't know if the src == dest constraint will end up being
362 // satisfied.
363 if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
364 MI.getOpcode() == AMDGPU::S_MUL_I32) {
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000365 const MachineOperand *Dest = &MI.getOperand(0);
366 MachineOperand *Src0 = &MI.getOperand(1);
367 MachineOperand *Src1 = &MI.getOperand(2);
368
369 if (!Src0->isReg() && Src1->isReg()) {
370 if (TII->commuteInstruction(MI, false, 1, 2))
371 std::swap(Src0, Src1);
372 }
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000373
374 // FIXME: This could work better if hints worked with subregisters. If
375 // we have a vector add of a constant, we usually don't get the correct
376 // allocation due to the subregister usage.
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000377 if (TargetRegisterInfo::isVirtualRegister(Dest->getReg()) &&
378 Src0->isReg()) {
379 MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
380 MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000381 continue;
382 }
383
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000384 if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
385 if (Src1->isImm() && isKImmOperand(TII, *Src1)) {
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000386 unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
387 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
388
389 MI.setDesc(TII->get(Opc));
390 MI.tieOperands(0, 1);
391 }
392 }
393 }
394
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000395 // Try to use s_cmpk_*
396 if (MI.isCompare() && TII->isSOPC(MI)) {
397 shrinkScalarCompare(TII, MI);
398 continue;
399 }
400
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000401 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
402 if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000403 const MachineOperand &Dst = MI.getOperand(0);
404 MachineOperand &Src = MI.getOperand(1);
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000405
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000406 if (Src.isImm() &&
407 TargetRegisterInfo::isPhysicalRegister(Dst.getReg())) {
408 int32_t ReverseImm;
409 if (isKImmOperand(TII, Src))
410 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
411 else if (isReverseInlineImm(TII, Src, ReverseImm)) {
412 MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
413 Src.setImm(ReverseImm);
414 }
415 }
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000416
417 continue;
418 }
419
Tom Stellard86d12eb2014-08-01 00:32:28 +0000420 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
Tom Stellard1aaad692014-07-21 16:55:33 +0000421 continue;
422
423 if (!canShrink(MI, TII, TRI, MRI)) {
Matt Arsenault66524032014-09-16 18:00:23 +0000424 // Try commuting the instruction and see if that enables us to shrink
Tom Stellard1aaad692014-07-21 16:55:33 +0000425 // it.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000426 if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
Tom Stellard1aaad692014-07-21 16:55:33 +0000427 !canShrink(MI, TII, TRI, MRI))
428 continue;
429 }
430
Marek Olsaka93603d2015-01-15 18:42:51 +0000431 // getVOPe32 could be -1 here if we started with an instruction that had
Tom Stellard86d12eb2014-08-01 00:32:28 +0000432 // a 32-bit encoding and then commuted it to an instruction that did not.
Marek Olsaka93603d2015-01-15 18:42:51 +0000433 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
Tom Stellard86d12eb2014-08-01 00:32:28 +0000434 continue;
435
Marek Olsaka93603d2015-01-15 18:42:51 +0000436 int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
437
Tom Stellard1aaad692014-07-21 16:55:33 +0000438 if (TII->isVOPC(Op32)) {
439 unsigned DstReg = MI.getOperand(0).getReg();
440 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000441 // VOPC instructions can only write to the VCC register. We can't
442 // force them to use VCC here, because this is only one register and
443 // cannot deal with sequences which would require multiple copies of
444 // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
Tom Stellard1aaad692014-07-21 16:55:33 +0000445 //
Matt Arsenaulta9627ae2014-09-21 17:27:32 +0000446 // So, instead of forcing the instruction to write to VCC, we provide
Hiroshi Inoue372ffa12018-04-13 11:37:06 +0000447 // a hint to the register allocator to use VCC and then we will run
Matt Arsenaulta9627ae2014-09-21 17:27:32 +0000448 // this pass again after RA and shrink it if it outputs to VCC.
Tom Stellard1aaad692014-07-21 16:55:33 +0000449 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
450 continue;
451 }
452 if (DstReg != AMDGPU::VCC)
453 continue;
454 }
455
Tom Stellarde48fe2a2015-07-14 14:15:03 +0000456 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
457 // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
458 // instructions.
459 const MachineOperand *Src2 =
460 TII->getNamedOperand(MI, AMDGPU::OpName::src2);
461 if (!Src2->isReg())
462 continue;
463 unsigned SReg = Src2->getReg();
464 if (TargetRegisterInfo::isVirtualRegister(SReg)) {
465 MRI.setRegAllocationHint(SReg, 0, AMDGPU::VCC);
466 continue;
467 }
468 if (SReg != AMDGPU::VCC)
469 continue;
470 }
471
Matt Arsenault28bd4cb2017-01-11 22:35:17 +0000472 // Check for the bool flag output for instructions like V_ADD_I32_e64.
473 const MachineOperand *SDst = TII->getNamedOperand(MI,
474 AMDGPU::OpName::sdst);
Matt Arsenault28bd4cb2017-01-11 22:35:17 +0000475
Matt Arsenault24a12732017-01-11 22:58:12 +0000476 // Check the carry-in operand for v_addc_u32_e64.
477 const MachineOperand *Src2 = TII->getNamedOperand(MI,
478 AMDGPU::OpName::src2);
479
480 if (SDst) {
481 if (SDst->getReg() != AMDGPU::VCC) {
482 if (TargetRegisterInfo::isVirtualRegister(SDst->getReg()))
483 MRI.setRegAllocationHint(SDst->getReg(), 0, AMDGPU::VCC);
484 continue;
485 }
486
487 // All of the instructions with carry outs also have an SGPR input in
488 // src2.
489 if (Src2 && Src2->getReg() != AMDGPU::VCC) {
490 if (TargetRegisterInfo::isVirtualRegister(Src2->getReg()))
491 MRI.setRegAllocationHint(Src2->getReg(), 0, AMDGPU::VCC);
492
493 continue;
494 }
Matt Arsenault28bd4cb2017-01-11 22:35:17 +0000495 }
496
Tom Stellard1aaad692014-07-21 16:55:33 +0000497 // We can shrink this instruction
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000498 LLVM_DEBUG(dbgs() << "Shrinking " << MI);
Tom Stellard1aaad692014-07-21 16:55:33 +0000499
Tom Stellard6407e1e2014-08-01 00:32:33 +0000500 MachineInstrBuilder Inst32 =
Tom Stellard1aaad692014-07-21 16:55:33 +0000501 BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
502
Tom Stellardcc4c8712016-02-16 18:14:56 +0000503 // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
Matt Arsenault46359152015-08-08 00:41:48 +0000504 // For VOPC instructions, this is replaced by an implicit def of vcc.
Tom Stellardcc4c8712016-02-16 18:14:56 +0000505 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst);
Matt Arsenault46359152015-08-08 00:41:48 +0000506 if (Op32DstIdx != -1) {
507 // dst
Diana Picus116bbab2017-01-13 09:58:52 +0000508 Inst32.add(MI.getOperand(0));
Matt Arsenault46359152015-08-08 00:41:48 +0000509 } else {
510 assert(MI.getOperand(0).getReg() == AMDGPU::VCC &&
511 "Unexpected case");
512 }
513
Tom Stellard1aaad692014-07-21 16:55:33 +0000514
Diana Picus116bbab2017-01-13 09:58:52 +0000515 Inst32.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
Tom Stellard1aaad692014-07-21 16:55:33 +0000516
517 const MachineOperand *Src1 =
518 TII->getNamedOperand(MI, AMDGPU::OpName::src1);
519 if (Src1)
Diana Picus116bbab2017-01-13 09:58:52 +0000520 Inst32.add(*Src1);
Tom Stellard1aaad692014-07-21 16:55:33 +0000521
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000522 if (Src2) {
523 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2);
524 if (Op32Src2Idx != -1) {
Diana Picus116bbab2017-01-13 09:58:52 +0000525 Inst32.add(*Src2);
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000526 } else {
527 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
Matt Arsenault22096252016-06-20 18:34:00 +0000528 // replaced with an implicit read of vcc. This was already added
529 // during the initial BuildMI, so find it to preserve the flags.
530 copyFlagsToImplicitVCC(*Inst32, *Src2);
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000531 }
532 }
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000533
Tom Stellard1aaad692014-07-21 16:55:33 +0000534 ++NumInstructionsShrunk;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000535
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000536 // Copy extra operands not present in the instruction definition.
Matt Arsenault5ffe3e12016-09-03 17:25:39 +0000537 copyExtraImplicitOps(*Inst32, MF, MI);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000538
539 MI.eraseFromParent();
Tom Stellard6407e1e2014-08-01 00:32:33 +0000540 foldImmediates(*Inst32, TII, MRI);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000541
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000542 LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
Tom Stellard1aaad692014-07-21 16:55:33 +0000543 }
544 }
545 return false;
546}