blob: 75d201245b8d22a77339f87f638be4dbfd083c46 [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard6596ba72014-11-21 22:06:37 +00006//
7/// \file
8//===----------------------------------------------------------------------===//
9//
10
11#include "AMDGPU.h"
12#include "AMDGPUSubtarget.h"
13#include "SIInstrInfo.h"
Matt Arsenault3cb39042017-02-27 19:35:42 +000014#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000015#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenaultff3f9122017-06-20 18:56:32 +000016#include "llvm/ADT/DepthFirstIterator.h"
Matthias Braunf8422972017-12-13 02:51:04 +000017#include "llvm/CodeGen/LiveIntervals.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000018#include "llvm/CodeGen/MachineFunctionPass.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000021#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000022#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000023#include "llvm/Target/TargetMachine.h"
24
25#define DEBUG_TYPE "si-fold-operands"
26using namespace llvm;
27
28namespace {
29
Tom Stellardbb763e62015-01-07 17:42:16 +000030struct FoldCandidate {
31 MachineInstr *UseMI;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000032 union {
33 MachineOperand *OpToFold;
34 uint64_t ImmToFold;
35 int FrameIndexToFold;
36 };
Matt Arsenaultde6c4212018-08-28 18:34:24 +000037 int ShrinkOpcode;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000038 unsigned char UseOpNo;
39 MachineOperand::MachineOperandType Kind;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000040 bool Commuted;
Tom Stellardbb763e62015-01-07 17:42:16 +000041
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000042 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
Matt Arsenaultde6c4212018-08-28 18:34:24 +000043 bool Commuted_ = false,
44 int ShrinkOp = -1) :
45 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46 Kind(FoldOp->getType()),
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000047 Commuted(Commuted_) {
Tom Stellard05992972015-01-07 22:44:19 +000048 if (FoldOp->isImm()) {
Tom Stellard05992972015-01-07 22:44:19 +000049 ImmToFold = FoldOp->getImm();
Matt Arsenault2bc198a2016-09-14 15:51:33 +000050 } else if (FoldOp->isFI()) {
51 FrameIndexToFold = FoldOp->getIndex();
Tom Stellard05992972015-01-07 22:44:19 +000052 } else {
53 assert(FoldOp->isReg());
54 OpToFold = FoldOp;
55 }
56 }
Tom Stellardbb763e62015-01-07 17:42:16 +000057
Matt Arsenault2bc198a2016-09-14 15:51:33 +000058 bool isFI() const {
59 return Kind == MachineOperand::MO_FrameIndex;
60 }
61
Tom Stellardbb763e62015-01-07 17:42:16 +000062 bool isImm() const {
Matt Arsenault2bc198a2016-09-14 15:51:33 +000063 return Kind == MachineOperand::MO_Immediate;
64 }
65
66 bool isReg() const {
67 return Kind == MachineOperand::MO_Register;
Tom Stellardbb763e62015-01-07 17:42:16 +000068 }
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000069
70 bool isCommuted() const {
71 return Commuted;
72 }
Matt Arsenaultde6c4212018-08-28 18:34:24 +000073
74 bool needsShrink() const {
75 return ShrinkOpcode != -1;
76 }
77
78 int getShrinkOpcode() const {
79 return ShrinkOpcode;
80 }
Tom Stellardbb763e62015-01-07 17:42:16 +000081};
82
Matt Arsenault51818c12017-01-10 23:32:04 +000083class SIFoldOperands : public MachineFunctionPass {
84public:
85 static char ID;
86 MachineRegisterInfo *MRI;
87 const SIInstrInfo *TII;
88 const SIRegisterInfo *TRI;
Tom Stellard5bfbae52018-07-11 20:59:01 +000089 const GCNSubtarget *ST;
Matt Arsenault51818c12017-01-10 23:32:04 +000090
91 void foldOperand(MachineOperand &OpToFold,
92 MachineInstr *UseMI,
93 unsigned UseOpIdx,
94 SmallVectorImpl<FoldCandidate> &FoldList,
95 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
96
97 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
98
Matt Arsenaultd5c65152017-02-22 23:27:53 +000099 const MachineOperand *isClamp(const MachineInstr &MI) const;
100 bool tryFoldClamp(MachineInstr &MI);
101
Matt Arsenault3cb39042017-02-27 19:35:42 +0000102 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
103 bool tryFoldOMod(MachineInstr &MI);
104
Matt Arsenault51818c12017-01-10 23:32:04 +0000105public:
106 SIFoldOperands() : MachineFunctionPass(ID) {
107 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
108 }
109
110 bool runOnMachineFunction(MachineFunction &MF) override;
111
112 StringRef getPassName() const override { return "SI Fold Operands"; }
113
114 void getAnalysisUsage(AnalysisUsage &AU) const override {
115 AU.setPreservesCFG();
116 MachineFunctionPass::getAnalysisUsage(AU);
117 }
118};
119
Tom Stellard6596ba72014-11-21 22:06:37 +0000120} // End anonymous namespace.
121
Matt Arsenault427c5482016-02-11 06:15:34 +0000122INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
123 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +0000124
125char SIFoldOperands::ID = 0;
126
127char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
128
Matt Arsenault69e30012017-01-11 22:00:02 +0000129// Wrapper around isInlineConstant that understands special cases when
130// instruction types are replaced during operand folding.
131static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
132 const MachineInstr &UseMI,
133 unsigned OpNo,
134 const MachineOperand &OpToFold) {
135 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
136 return true;
137
138 unsigned Opc = UseMI.getOpcode();
139 switch (Opc) {
140 case AMDGPU::V_MAC_F32_e64:
Matt Arsenault0084adc2018-04-30 19:08:16 +0000141 case AMDGPU::V_MAC_F16_e64:
142 case AMDGPU::V_FMAC_F32_e64: {
Matt Arsenault69e30012017-01-11 22:00:02 +0000143 // Special case for mac. Since this is replaced with mad when folded into
144 // src2, we need to check the legality for the final instruction.
145 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
146 if (static_cast<int>(OpNo) == Src2Idx) {
Matt Arsenault0084adc2018-04-30 19:08:16 +0000147 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
Matt Arsenault69e30012017-01-11 22:00:02 +0000148 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Matt Arsenault0084adc2018-04-30 19:08:16 +0000149
150 unsigned Opc = IsFMA ?
151 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
152 const MCInstrDesc &MadDesc = TII->get(Opc);
Matt Arsenault69e30012017-01-11 22:00:02 +0000153 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
154 }
Simon Pilgrim0f5b3502017-07-07 10:18:57 +0000155 return false;
Matt Arsenault69e30012017-01-11 22:00:02 +0000156 }
157 default:
158 return false;
159 }
160}
161
Tom Stellard6596ba72014-11-21 22:06:37 +0000162FunctionPass *llvm::createSIFoldOperandsPass() {
163 return new SIFoldOperands();
164}
165
Tom Stellardbb763e62015-01-07 17:42:16 +0000166static bool updateOperand(FoldCandidate &Fold,
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000167 const SIInstrInfo &TII,
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000168 const TargetRegisterInfo &TRI,
169 const GCNSubtarget &ST) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000170 MachineInstr *MI = Fold.UseMI;
171 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000172 assert(Old.isReg());
173
Tom Stellardbb763e62015-01-07 17:42:16 +0000174 if (Fold.isImm()) {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000175 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
176 AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
177 ST.hasInv2PiInlineImm())) {
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000178 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
179 // already set.
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000180 unsigned Opcode = MI->getOpcode();
181 int OpNo = MI->getOperandNo(&Old);
182 int ModIdx = -1;
183 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
184 ModIdx = AMDGPU::OpName::src0_modifiers;
185 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
186 ModIdx = AMDGPU::OpName::src1_modifiers;
187 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
188 ModIdx = AMDGPU::OpName::src2_modifiers;
189 assert(ModIdx != -1);
190 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
191 MachineOperand &Mod = MI->getOperand(ModIdx);
192 unsigned Val = Mod.getImm();
193 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
194 return false;
Michael Liao389d5a32019-04-22 22:05:49 +0000195 // Only apply the following transformation if that operand requries
196 // a packed immediate.
197 switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000198 case AMDGPU::OPERAND_REG_IMM_V2FP16:
199 case AMDGPU::OPERAND_REG_IMM_V2INT16:
Michael Liao389d5a32019-04-22 22:05:49 +0000200 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
201 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
202 // If upper part is all zero we do not need op_sel_hi.
203 if (!isUInt<16>(Fold.ImmToFold)) {
204 if (!(Fold.ImmToFold & 0xffff)) {
205 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
206 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
207 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
208 return true;
209 }
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000210 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000211 Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
212 return true;
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000213 }
Michael Liao389d5a32019-04-22 22:05:49 +0000214 break;
215 default:
216 break;
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000217 }
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000218 }
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000219
220 if (Fold.needsShrink()) {
221 MachineBasicBlock *MBB = MI->getParent();
222 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI);
223 if (Liveness != MachineBasicBlock::LQR_Dead)
224 return false;
225
Matt Arsenault44a8a752018-08-28 18:44:16 +0000226 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000227 int Op32 = Fold.getShrinkOpcode();
228 MachineOperand &Dst0 = MI->getOperand(0);
229 MachineOperand &Dst1 = MI->getOperand(1);
230 assert(Dst0.isDef() && Dst1.isDef());
231
Matt Arsenault44a8a752018-08-28 18:44:16 +0000232 bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
233
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000234 const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
235 unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC);
236 const TargetRegisterClass *Dst1RC = MRI.getRegClass(Dst1.getReg());
237 unsigned NewReg1 = MRI.createVirtualRegister(Dst1RC);
238
239 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
240
Matt Arsenault44a8a752018-08-28 18:44:16 +0000241 if (HaveNonDbgCarryUse) {
242 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
243 .addReg(AMDGPU::VCC, RegState::Kill);
244 }
245
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000246 // Keep the old instruction around to avoid breaking iterators, but
247 // replace the outputs with dummy registers.
248 Dst0.setReg(NewReg0);
249 Dst1.setReg(NewReg1);
250
251 if (Fold.isCommuted())
252 TII.commuteInstruction(*Inst32, false);
253 return true;
254 }
255
Tom Stellardbb763e62015-01-07 17:42:16 +0000256 Old.ChangeToImmediate(Fold.ImmToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000257 return true;
258 }
259
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000260 assert(!Fold.needsShrink() && "not handled");
261
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000262 if (Fold.isFI()) {
263 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
264 return true;
265 }
266
Tom Stellardbb763e62015-01-07 17:42:16 +0000267 MachineOperand *New = Fold.OpToFold;
268 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
269 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
270 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
Matt Arsenault76858f52017-06-20 18:41:31 +0000271
272 Old.setIsUndef(New->isUndef());
Tom Stellard6596ba72014-11-21 22:06:37 +0000273 return true;
274 }
275
Tom Stellard6596ba72014-11-21 22:06:37 +0000276 // FIXME: Handle physical registers.
277
278 return false;
279}
280
Matt Arsenault51818c12017-01-10 23:32:04 +0000281static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000282 const MachineInstr *MI) {
283 for (auto Candidate : FoldList) {
284 if (Candidate.UseMI == MI)
285 return true;
286 }
287 return false;
288}
289
Matt Arsenault51818c12017-01-10 23:32:04 +0000290static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
Tom Stellard05992972015-01-07 22:44:19 +0000291 MachineInstr *MI, unsigned OpNo,
292 MachineOperand *OpToFold,
293 const SIInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000294 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000295
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000296 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000297 unsigned Opc = MI->getOpcode();
Matt Arsenault0084adc2018-04-30 19:08:16 +0000298 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
299 Opc == AMDGPU::V_FMAC_F32_e64) &&
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000300 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
Matt Arsenault0084adc2018-04-30 19:08:16 +0000301 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
Matt Arsenault69e30012017-01-11 22:00:02 +0000302 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Matt Arsenault0084adc2018-04-30 19:08:16 +0000303 unsigned NewOpc = IsFMA ?
304 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000305
306 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
307 // to fold the operand.
Matt Arsenault0084adc2018-04-30 19:08:16 +0000308 MI->setDesc(TII->get(NewOpc));
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000309 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
310 if (FoldAsMAD) {
311 MI->untieRegOperand(OpNo);
312 return true;
313 }
314 MI->setDesc(TII->get(Opc));
315 }
316
Tom Stellard8485fa02016-12-07 02:42:15 +0000317 // Special case for s_setreg_b32
318 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
319 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
320 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
321 return true;
322 }
323
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000324 // If we are already folding into another operand of MI, then
325 // we can't commute the instruction, otherwise we risk making the
326 // other fold illegal.
327 if (isUseMIInFoldList(FoldList, MI))
328 return false;
329
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000330 unsigned CommuteOpNo = OpNo;
331
Tom Stellard05992972015-01-07 22:44:19 +0000332 // Operand is not legal, so try to commute the instruction to
333 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000334 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
335 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000336 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000337
338 if (CanCommute) {
339 if (CommuteIdx0 == OpNo)
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000340 CommuteOpNo = CommuteIdx1;
Tom Stellard05992972015-01-07 22:44:19 +0000341 else if (CommuteIdx1 == OpNo)
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000342 CommuteOpNo = CommuteIdx0;
Tom Stellard05992972015-01-07 22:44:19 +0000343 }
344
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000345
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000346 // One of operands might be an Imm operand, and OpNo may refer to it after
347 // the call of commuteInstruction() below. Such situations are avoided
348 // here explicitly as OpNo must be a register operand to be a candidate
349 // for memory folding.
350 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
351 !MI->getOperand(CommuteIdx1).isReg()))
352 return false;
353
354 if (!CanCommute ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000355 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000356 return false;
357
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000358 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
359 if ((Opc == AMDGPU::V_ADD_I32_e64 ||
360 Opc == AMDGPU::V_SUB_I32_e64 ||
361 Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
362 OpToFold->isImm()) {
363 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
364
365 // Verify the other operand is a VGPR, otherwise we would violate the
366 // constant bus restriction.
367 unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
368 MachineOperand &OtherOp = MI->getOperand(OtherIdx);
369 if (!OtherOp.isReg() ||
370 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
371 return false;
372
Fangrui Song9cca2272018-08-28 19:19:03 +0000373 assert(MI->getOperand(1).isDef());
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000374
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000375 int Op32 = AMDGPU::getVOPe32(Opc);
376 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true,
377 Op32));
378 return true;
379 }
380
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000381 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000382 return false;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000383 }
384
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000385 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true));
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000386 return true;
Tom Stellard05992972015-01-07 22:44:19 +0000387 }
388
389 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
390 return true;
391}
392
Matt Arsenault5e63a042016-10-06 18:12:13 +0000393// If the use operand doesn't care about the value, this may be an operand only
394// used for register indexing, in which case it is unsafe to fold.
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000395static bool isUseSafeToFold(const SIInstrInfo *TII,
396 const MachineInstr &MI,
Matt Arsenault5e63a042016-10-06 18:12:13 +0000397 const MachineOperand &UseMO) {
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000398 return !UseMO.isUndef() && !TII->isSDWA(MI);
Matt Arsenault5e63a042016-10-06 18:12:13 +0000399 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
400}
401
Matt Arsenault51818c12017-01-10 23:32:04 +0000402void SIFoldOperands::foldOperand(
403 MachineOperand &OpToFold,
404 MachineInstr *UseMI,
405 unsigned UseOpIdx,
406 SmallVectorImpl<FoldCandidate> &FoldList,
407 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000408 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
409
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000410 if (!isUseSafeToFold(TII, *UseMI, UseOp))
Matt Arsenault5e63a042016-10-06 18:12:13 +0000411 return;
412
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000413 // FIXME: Fold operands with subregs.
Matt Arsenault3661e902016-08-15 16:18:36 +0000414 if (UseOp.isReg() && OpToFold.isReg()) {
415 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
416 return;
417
418 // Don't fold subregister extracts into tied operands, only if it is a full
419 // copy since a subregister use tied to a full register def doesn't really
420 // make sense. e.g. don't fold:
421 //
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +0000422 // %1 = COPY %0:sub1
423 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000424 //
425 // into
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +0000426 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000427 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
428 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000429 }
430
Tom Stellard9a197672015-09-09 15:43:26 +0000431 // Special case for REG_SEQUENCE: We can't fold literals into
432 // REG_SEQUENCE instructions, so we have to fold them into the
433 // uses of REG_SEQUENCE.
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000434 if (UseMI->isRegSequence()) {
Tom Stellard9a197672015-09-09 15:43:26 +0000435 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
436 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
437
438 for (MachineRegisterInfo::use_iterator
Matt Arsenault51818c12017-01-10 23:32:04 +0000439 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000440 RSUse != RSE; ++RSUse) {
Tom Stellard9a197672015-09-09 15:43:26 +0000441
442 MachineInstr *RSUseMI = RSUse->getParent();
443 if (RSUse->getSubReg() != RegSeqDstSubReg)
444 continue;
445
446 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenault51818c12017-01-10 23:32:04 +0000447 CopiesToReplace);
Tom Stellard9a197672015-09-09 15:43:26 +0000448 }
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000449
Tom Stellard9a197672015-09-09 15:43:26 +0000450 return;
451 }
452
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000453
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000454 bool FoldingImm = OpToFold.isImm();
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000455
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000456 if (FoldingImm && UseMI->isCopy()) {
457 unsigned DestReg = UseMI->getOperand(0).getReg();
458 const TargetRegisterClass *DestRC
459 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000460 MRI->getRegClass(DestReg) :
461 TRI->getPhysRegClass(DestReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000462
Alexander Timofeev201f8922018-08-30 13:55:04 +0000463 unsigned SrcReg = UseMI->getOperand(1).getReg();
464 if (TargetRegisterInfo::isVirtualRegister(DestReg) &&
465 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
466 const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
467 if (TRI->isSGPRClass(SrcRC) && TRI->hasVGPRs(DestRC)) {
468 MachineRegisterInfo::use_iterator NextUse;
469 SmallVector<FoldCandidate, 4> CopyUses;
470 for (MachineRegisterInfo::use_iterator
471 Use = MRI->use_begin(DestReg), E = MRI->use_end();
472 Use != E; Use = NextUse) {
473 NextUse = std::next(Use);
474 FoldCandidate FC = FoldCandidate(Use->getParent(),
475 Use.getOperandNo(), &UseMI->getOperand(1));
476 CopyUses.push_back(FC);
477 }
478 for (auto & F : CopyUses) {
479 foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
480 FoldList, CopiesToReplace);
481 }
482 }
483 }
484
485 // In order to fold immediates into copies, we need to change the
486 // copy to a MOV.
487
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000488 unsigned MovOp = TII->getMovOpcode(DestRC);
489 if (MovOp == AMDGPU::COPY)
490 return;
491
492 UseMI->setDesc(TII->get(MovOp));
493 CopiesToReplace.push_back(UseMI);
494 } else {
Stanislav Mekhanoshinb080adf2018-09-27 18:55:20 +0000495 if (UseMI->isCopy() && OpToFold.isReg() &&
496 TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
497 TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(1).getReg()) &&
498 TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
499 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()) &&
500 !UseMI->getOperand(1).getSubReg()) {
501 UseMI->getOperand(1).setReg(OpToFold.getReg());
502 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
503 UseMI->getOperand(1).setIsKill(false);
504 CopiesToReplace.push_back(UseMI);
505 OpToFold.setIsKill(false);
506 return;
507 }
508
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000509 const MCInstrDesc &UseDesc = UseMI->getDesc();
510
511 // Don't fold into target independent nodes. Target independent opcodes
512 // don't have defined register classes.
513 if (UseDesc.isVariadic() ||
Matt Arsenaultc908e3f2018-02-08 01:12:46 +0000514 UseOp.isImplicit() ||
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000515 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
516 return;
517 }
518
519 if (!FoldingImm) {
520 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
521
522 // FIXME: We could try to change the instruction from 64-bit to 32-bit
523 // to enable more folding opportunites. The shrink operands pass
524 // already does this.
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000525 return;
526 }
527
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000528
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000529 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
530 const TargetRegisterClass *FoldRC =
Matt Arsenault51818c12017-01-10 23:32:04 +0000531 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000532
Matt Arsenault4bd72362016-12-10 00:39:12 +0000533
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000534 // Split 64-bit constants into 32-bits for folding.
535 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
536 unsigned UseReg = UseOp.getReg();
537 const TargetRegisterClass *UseRC
538 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000539 MRI->getRegClass(UseReg) :
540 TRI->getPhysRegClass(UseReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000541
542 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
543 return;
544
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000545 APInt Imm(64, OpToFold.getImm());
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000546 if (UseOp.getSubReg() == AMDGPU::sub0) {
547 Imm = Imm.getLoBits(32);
548 } else {
549 assert(UseOp.getSubReg() == AMDGPU::sub1);
550 Imm = Imm.getHiBits(32);
551 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000552
553 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
554 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
555 return;
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000556 }
557
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000558
559
560 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000561}
562
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000563static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
Matt Arsenault51818c12017-01-10 23:32:04 +0000564 uint32_t LHS, uint32_t RHS) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000565 switch (Opcode) {
566 case AMDGPU::V_AND_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000567 case AMDGPU::V_AND_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000568 case AMDGPU::S_AND_B32:
569 Result = LHS & RHS;
570 return true;
571 case AMDGPU::V_OR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000572 case AMDGPU::V_OR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000573 case AMDGPU::S_OR_B32:
574 Result = LHS | RHS;
575 return true;
576 case AMDGPU::V_XOR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000577 case AMDGPU::V_XOR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000578 case AMDGPU::S_XOR_B32:
579 Result = LHS ^ RHS;
580 return true;
Matt Arsenault51818c12017-01-10 23:32:04 +0000581 case AMDGPU::V_LSHL_B32_e64:
582 case AMDGPU::V_LSHL_B32_e32:
583 case AMDGPU::S_LSHL_B32:
584 // The instruction ignores the high bits for out of bounds shifts.
585 Result = LHS << (RHS & 31);
586 return true;
587 case AMDGPU::V_LSHLREV_B32_e64:
588 case AMDGPU::V_LSHLREV_B32_e32:
589 Result = RHS << (LHS & 31);
590 return true;
591 case AMDGPU::V_LSHR_B32_e64:
592 case AMDGPU::V_LSHR_B32_e32:
593 case AMDGPU::S_LSHR_B32:
594 Result = LHS >> (RHS & 31);
595 return true;
596 case AMDGPU::V_LSHRREV_B32_e64:
597 case AMDGPU::V_LSHRREV_B32_e32:
598 Result = RHS >> (LHS & 31);
599 return true;
600 case AMDGPU::V_ASHR_I32_e64:
601 case AMDGPU::V_ASHR_I32_e32:
602 case AMDGPU::S_ASHR_I32:
603 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
604 return true;
605 case AMDGPU::V_ASHRREV_I32_e64:
606 case AMDGPU::V_ASHRREV_I32_e32:
607 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
608 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000609 default:
610 return false;
611 }
612}
613
614static unsigned getMovOpc(bool IsScalar) {
615 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
616}
617
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000618/// Remove any leftover implicit operands from mutating the instruction. e.g.
619/// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
620/// anymore.
621static void stripExtraCopyOperands(MachineInstr &MI) {
622 const MCInstrDesc &Desc = MI.getDesc();
623 unsigned NumOps = Desc.getNumOperands() +
624 Desc.getNumImplicitUses() +
625 Desc.getNumImplicitDefs();
626
627 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
628 MI.RemoveOperand(I);
629}
630
631static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
632 MI.setDesc(NewDesc);
633 stripExtraCopyOperands(MI);
634}
635
Matt Arsenault51818c12017-01-10 23:32:04 +0000636static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
637 MachineOperand &Op) {
638 if (Op.isReg()) {
639 // If this has a subregister, it obviously is a register source.
Matt Arsenaultcbda7ff2018-03-10 16:05:35 +0000640 if (Op.getSubReg() != AMDGPU::NoSubRegister ||
641 !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
Matt Arsenault51818c12017-01-10 23:32:04 +0000642 return &Op;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000643
Matt Arsenault51818c12017-01-10 23:32:04 +0000644 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
Matt Arsenault7f67b352017-06-20 18:28:02 +0000645 if (Def && Def->isMoveImmediate()) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000646 MachineOperand &ImmSrc = Def->getOperand(1);
647 if (ImmSrc.isImm())
648 return &ImmSrc;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000649 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000650 }
651
Matt Arsenault51818c12017-01-10 23:32:04 +0000652 return &Op;
653}
654
655// Try to simplify operations with a constant that may appear after instruction
656// selection.
657// TODO: See if a frame index with a fixed offset can fold.
658static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
659 const SIInstrInfo *TII,
660 MachineInstr *MI,
661 MachineOperand *ImmOp) {
662 unsigned Opc = MI->getOpcode();
663 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
664 Opc == AMDGPU::S_NOT_B32) {
665 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
666 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
667 return true;
668 }
669
670 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
671 if (Src1Idx == -1)
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000672 return false;
673
674 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Matt Arsenault51818c12017-01-10 23:32:04 +0000675 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
676 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000677
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000678 if (!Src0->isImm() && !Src1->isImm())
679 return false;
680
Matt Arsenault0d1b3932018-08-06 15:40:20 +0000681 if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) {
682 if (Src0->isImm() && Src0->getImm() == 0) {
683 // v_lshl_or_b32 0, X, Y -> copy Y
684 // v_lshl_or_b32 0, X, K -> v_mov_b32 K
685 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
686 MI->RemoveOperand(Src1Idx);
687 MI->RemoveOperand(Src0Idx);
688
689 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
690 return true;
691 }
692 }
693
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000694 // and k0, k1 -> v_mov_b32 (k0 & k1)
695 // or k0, k1 -> v_mov_b32 (k0 | k1)
696 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
697 if (Src0->isImm() && Src1->isImm()) {
698 int32_t NewImm;
699 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
700 return false;
701
702 const SIRegisterInfo &TRI = TII->getRegisterInfo();
703 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
704
Matt Arsenault51818c12017-01-10 23:32:04 +0000705 // Be careful to change the right operand, src0 may belong to a different
706 // instruction.
707 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000708 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000709 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000710 return true;
711 }
712
Matt Arsenault51818c12017-01-10 23:32:04 +0000713 if (!MI->isCommutable())
714 return false;
715
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000716 if (Src0->isImm() && !Src1->isImm()) {
717 std::swap(Src0, Src1);
718 std::swap(Src0Idx, Src1Idx);
719 }
720
721 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
Matt Arsenault51818c12017-01-10 23:32:04 +0000722 if (Opc == AMDGPU::V_OR_B32_e64 ||
723 Opc == AMDGPU::V_OR_B32_e32 ||
724 Opc == AMDGPU::S_OR_B32) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000725 if (Src1Val == 0) {
726 // y = or x, 0 => y = copy x
727 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000728 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000729 } else if (Src1Val == -1) {
730 // y = or x, -1 => y = v_mov_b32 -1
731 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000732 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000733 } else
734 return false;
735
736 return true;
737 }
738
739 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000740 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000741 MI->getOpcode() == AMDGPU::S_AND_B32) {
742 if (Src1Val == 0) {
743 // y = and x, 0 => y = v_mov_b32 0
744 MI->RemoveOperand(Src0Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000745 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000746 } else if (Src1Val == -1) {
747 // y = and x, -1 => y = copy x
748 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000749 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
750 stripExtraCopyOperands(*MI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000751 } else
752 return false;
753
754 return true;
755 }
756
757 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000758 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000759 MI->getOpcode() == AMDGPU::S_XOR_B32) {
760 if (Src1Val == 0) {
761 // y = xor x, 0 => y = copy x
762 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000763 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenault51818c12017-01-10 23:32:04 +0000764 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000765 }
766 }
767
768 return false;
769}
770
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000771// Try to fold an instruction into a simpler one
772static bool tryFoldInst(const SIInstrInfo *TII,
773 MachineInstr *MI) {
774 unsigned Opc = MI->getOpcode();
775
776 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
777 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
778 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
779 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
780 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000781 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
782 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
783 if (Src1->isIdenticalTo(*Src0) &&
784 (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
785 (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000786 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000787 auto &NewDesc =
788 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000789 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
790 if (Src2Idx != -1)
791 MI->RemoveOperand(Src2Idx);
792 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000793 if (Src1ModIdx != -1)
794 MI->RemoveOperand(Src1ModIdx);
795 if (Src0ModIdx != -1)
796 MI->RemoveOperand(Src0ModIdx);
797 mutateCopyOp(*MI, NewDesc);
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000798 LLVM_DEBUG(dbgs() << *MI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000799 return true;
800 }
801 }
802
803 return false;
804}
805
Matt Arsenault51818c12017-01-10 23:32:04 +0000806void SIFoldOperands::foldInstOperand(MachineInstr &MI,
807 MachineOperand &OpToFold) const {
808 // We need mutate the operands of new mov instructions to add implicit
809 // uses of EXEC, but adding them invalidates the use_iterator, so defer
810 // this.
811 SmallVector<MachineInstr *, 4> CopiesToReplace;
812 SmallVector<FoldCandidate, 4> FoldList;
813 MachineOperand &Dst = MI.getOperand(0);
814
815 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
816 if (FoldingImm) {
817 unsigned NumLiteralUses = 0;
818 MachineOperand *NonInlineUse = nullptr;
819 int NonInlineUseOpNo = -1;
820
Vitaly Buka74503982017-10-15 05:35:02 +0000821 MachineRegisterInfo::use_iterator NextUse;
Matt Arsenault51818c12017-01-10 23:32:04 +0000822 for (MachineRegisterInfo::use_iterator
823 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
824 Use != E; Use = NextUse) {
825 NextUse = std::next(Use);
826 MachineInstr *UseMI = Use->getParent();
827 unsigned OpNo = Use.getOperandNo();
828
829 // Folding the immediate may reveal operations that can be constant
830 // folded or replaced with a copy. This can happen for example after
831 // frame indices are lowered to constants or from splitting 64-bit
832 // constants.
833 //
834 // We may also encounter cases where one or both operands are
835 // immediates materialized into a register, which would ordinarily not
836 // be folded due to multiple uses or operand constraints.
837
838 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000839 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
Matt Arsenault51818c12017-01-10 23:32:04 +0000840
841 // Some constant folding cases change the same immediate's use to a new
842 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
843 // again. The same constant folded instruction could also have a second
844 // use operand.
845 NextUse = MRI->use_begin(Dst.getReg());
Nicolai Haehnlea253e4c2017-07-18 14:54:41 +0000846 FoldList.clear();
Matt Arsenault51818c12017-01-10 23:32:04 +0000847 continue;
848 }
849
850 // Try to fold any inline immediate uses, and then only fold other
851 // constants if they have one use.
852 //
853 // The legality of the inline immediate must be checked based on the use
854 // operand, not the defining instruction, because 32-bit instructions
855 // with 32-bit inline immediate sources may be used to materialize
856 // constants used in 16-bit operands.
857 //
858 // e.g. it is unsafe to fold:
859 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
860 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
861
862 // Folding immediates with more than one use will increase program size.
863 // FIXME: This will also reduce register usage, which may be better
864 // in some cases. A better heuristic is needed.
Matt Arsenault69e30012017-01-11 22:00:02 +0000865 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000866 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
867 } else {
868 if (++NumLiteralUses == 1) {
869 NonInlineUse = &*Use;
870 NonInlineUseOpNo = OpNo;
871 }
872 }
873 }
874
875 if (NumLiteralUses == 1) {
876 MachineInstr *UseMI = NonInlineUse->getParent();
877 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
878 }
879 } else {
880 // Folding register.
Alexander Timofeev993e2792019-01-03 19:55:32 +0000881 SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
Matt Arsenault51818c12017-01-10 23:32:04 +0000882 for (MachineRegisterInfo::use_iterator
883 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
884 Use != E; ++Use) {
Alexander Timofeev993e2792019-01-03 19:55:32 +0000885 UsesToProcess.push_back(Use);
886 }
887 for (auto U : UsesToProcess) {
888 MachineInstr *UseMI = U->getParent();
Matt Arsenault51818c12017-01-10 23:32:04 +0000889
Alexander Timofeev993e2792019-01-03 19:55:32 +0000890 foldOperand(OpToFold, UseMI, U.getOperandNo(),
891 FoldList, CopiesToReplace);
Matt Arsenault51818c12017-01-10 23:32:04 +0000892 }
893 }
894
895 MachineFunction *MF = MI.getParent()->getParent();
896 // Make sure we add EXEC uses to any new v_mov instructions created.
897 for (MachineInstr *Copy : CopiesToReplace)
898 Copy->addImplicitDefUseOperands(*MF);
899
900 for (FoldCandidate &Fold : FoldList) {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000901 if (updateOperand(Fold, *TII, *TRI, *ST)) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000902 // Clear kill flags.
903 if (Fold.isReg()) {
904 assert(Fold.OpToFold && Fold.OpToFold->isReg());
905 // FIXME: Probably shouldn't bother trying to fold if not an
906 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
907 // copies.
908 MRI->clearKillFlags(Fold.OpToFold->getReg());
909 }
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000910 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
911 << static_cast<int>(Fold.UseOpNo) << " of "
912 << *Fold.UseMI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000913 tryFoldInst(TII, Fold.UseMI);
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000914 } else if (Fold.isCommuted()) {
915 // Restoring instruction's original operand order if fold has failed.
916 TII->commuteInstruction(*Fold.UseMI, false);
Matt Arsenault51818c12017-01-10 23:32:04 +0000917 }
918 }
919}
920
Matt Arsenaultf48e5c92017-10-05 00:13:20 +0000921// Clamp patterns are canonically selected to v_max_* instructions, so only
922// handle them.
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000923const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
924 unsigned Op = MI.getOpcode();
925 switch (Op) {
926 case AMDGPU::V_MAX_F32_e64:
Matt Arsenault79a45db2017-02-22 23:53:37 +0000927 case AMDGPU::V_MAX_F16_e64:
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000928 case AMDGPU::V_MAX_F64:
929 case AMDGPU::V_PK_MAX_F16: {
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000930 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
931 return nullptr;
932
933 // Make sure sources are identical.
934 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
935 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +0000936 if (!Src0->isReg() || !Src1->isReg() ||
Matt Arsenaultaafff872017-10-05 00:13:17 +0000937 Src0->getReg() != Src1->getReg() ||
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +0000938 Src0->getSubReg() != Src1->getSubReg() ||
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000939 Src0->getSubReg() != AMDGPU::NoSubRegister)
940 return nullptr;
941
942 // Can't fold up if we have modifiers.
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000943 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
944 return nullptr;
945
946 unsigned Src0Mods
947 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
948 unsigned Src1Mods
949 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
950
951 // Having a 0 op_sel_hi would require swizzling the output in the source
952 // instruction, which we can't do.
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +0000953 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
954 : 0u;
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000955 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000956 return nullptr;
957 return Src0;
958 }
959 default:
960 return nullptr;
961 }
962}
963
964// We obviously have multiple uses in a clamp since the register is used twice
965// in the same instruction.
966static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
967 int Count = 0;
968 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
969 I != E; ++I) {
970 if (++Count > 1)
971 return false;
972 }
973
974 return true;
975}
976
Matt Arsenault8cbb4882017-09-20 21:01:24 +0000977// FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000978bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
979 const MachineOperand *ClampSrc = isClamp(MI);
980 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
981 return false;
982
983 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000984
985 // The type of clamp must be compatible.
986 if (TII->getClampMask(*Def) != TII->getClampMask(MI))
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000987 return false;
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000988
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000989 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
990 if (!DefClamp)
991 return false;
992
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000993 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
994 << '\n');
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000995
996 // Clamp is applied after omod, so it is OK if omod is set.
997 DefClamp->setImm(1);
998 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
999 MI.eraseFromParent();
1000 return true;
1001}
1002
Matt Arsenault3cb39042017-02-27 19:35:42 +00001003static int getOModValue(unsigned Opc, int64_t Val) {
1004 switch (Opc) {
1005 case AMDGPU::V_MUL_F32_e64: {
1006 switch (static_cast<uint32_t>(Val)) {
1007 case 0x3f000000: // 0.5
1008 return SIOutMods::DIV2;
1009 case 0x40000000: // 2.0
1010 return SIOutMods::MUL2;
1011 case 0x40800000: // 4.0
1012 return SIOutMods::MUL4;
1013 default:
1014 return SIOutMods::NONE;
1015 }
1016 }
1017 case AMDGPU::V_MUL_F16_e64: {
1018 switch (static_cast<uint16_t>(Val)) {
1019 case 0x3800: // 0.5
1020 return SIOutMods::DIV2;
1021 case 0x4000: // 2.0
1022 return SIOutMods::MUL2;
1023 case 0x4400: // 4.0
1024 return SIOutMods::MUL4;
1025 default:
1026 return SIOutMods::NONE;
1027 }
1028 }
1029 default:
1030 llvm_unreachable("invalid mul opcode");
1031 }
1032}
1033
1034// FIXME: Does this really not support denormals with f16?
1035// FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1036// handled, so will anything other than that break?
1037std::pair<const MachineOperand *, int>
1038SIFoldOperands::isOMod(const MachineInstr &MI) const {
1039 unsigned Op = MI.getOpcode();
1040 switch (Op) {
1041 case AMDGPU::V_MUL_F32_e64:
1042 case AMDGPU::V_MUL_F16_e64: {
1043 // If output denormals are enabled, omod is ignored.
1044 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
1045 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
1046 return std::make_pair(nullptr, SIOutMods::NONE);
1047
1048 const MachineOperand *RegOp = nullptr;
1049 const MachineOperand *ImmOp = nullptr;
1050 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1051 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1052 if (Src0->isImm()) {
1053 ImmOp = Src0;
1054 RegOp = Src1;
1055 } else if (Src1->isImm()) {
1056 ImmOp = Src1;
1057 RegOp = Src0;
1058 } else
1059 return std::make_pair(nullptr, SIOutMods::NONE);
1060
1061 int OMod = getOModValue(Op, ImmOp->getImm());
1062 if (OMod == SIOutMods::NONE ||
1063 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1064 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1065 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1066 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1067 return std::make_pair(nullptr, SIOutMods::NONE);
1068
1069 return std::make_pair(RegOp, OMod);
1070 }
1071 case AMDGPU::V_ADD_F32_e64:
1072 case AMDGPU::V_ADD_F16_e64: {
1073 // If output denormals are enabled, omod is ignored.
1074 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
1075 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
1076 return std::make_pair(nullptr, SIOutMods::NONE);
1077
1078 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1079 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1080 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1081
1082 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1083 Src0->getSubReg() == Src1->getSubReg() &&
1084 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1085 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1086 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1087 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1088 return std::make_pair(Src0, SIOutMods::MUL2);
1089
1090 return std::make_pair(nullptr, SIOutMods::NONE);
1091 }
1092 default:
1093 return std::make_pair(nullptr, SIOutMods::NONE);
1094 }
1095}
1096
1097// FIXME: Does this need to check IEEE bit on function?
1098bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1099 const MachineOperand *RegOp;
1100 int OMod;
1101 std::tie(RegOp, OMod) = isOMod(MI);
1102 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1103 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1104 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1105 return false;
1106
1107 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1108 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1109 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1110 return false;
1111
1112 // Clamp is applied after omod. If the source already has clamp set, don't
1113 // fold it.
1114 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1115 return false;
1116
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001117 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
Matt Arsenault3cb39042017-02-27 19:35:42 +00001118
1119 DefOMod->setImm(OMod);
1120 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1121 MI.eraseFromParent();
1122 return true;
1123}
1124
Tom Stellard6596ba72014-11-21 22:06:37 +00001125bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +00001126 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +00001127 return false;
1128
Matt Arsenault51818c12017-01-10 23:32:04 +00001129 MRI = &MF.getRegInfo();
Tom Stellard5bfbae52018-07-11 20:59:01 +00001130 ST = &MF.getSubtarget<GCNSubtarget>();
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001131 TII = ST->getInstrInfo();
Matt Arsenault51818c12017-01-10 23:32:04 +00001132 TRI = &TII->getRegisterInfo();
Tom Stellard6596ba72014-11-21 22:06:37 +00001133
Matt Arsenault3cb39042017-02-27 19:35:42 +00001134 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1135
1136 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1137 // correctly handle signed zeros.
1138 //
Matt Arsenault055e4dc2019-03-29 19:14:54 +00001139 // FIXME: Also need to check strictfp
1140 bool IsIEEEMode = MFI->getMode().IEEE;
Matt Arsenault13b0db92018-08-12 08:44:25 +00001141 bool HasNSZ = MFI->hasNoSignedZerosFPMath();
Matt Arsenault3cb39042017-02-27 19:35:42 +00001142
Matt Arsenaultff3f9122017-06-20 18:56:32 +00001143 for (MachineBasicBlock *MBB : depth_first(&MF)) {
Tom Stellard6596ba72014-11-21 22:06:37 +00001144 MachineBasicBlock::iterator I, Next;
Matt Arsenaultff3f9122017-06-20 18:56:32 +00001145 for (I = MBB->begin(); I != MBB->end(); I = Next) {
Tom Stellard6596ba72014-11-21 22:06:37 +00001146 Next = std::next(I);
1147 MachineInstr &MI = *I;
1148
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +00001149 tryFoldInst(TII, &MI);
1150
Sam Kolton27e0f8b2017-03-31 11:42:43 +00001151 if (!TII->isFoldableCopy(MI)) {
Matt Arsenault13b0db92018-08-12 08:44:25 +00001152 // TODO: Omod might be OK if there is NSZ only on the source
1153 // instruction, and not the omod multiply.
1154 if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1155 !tryFoldOMod(MI))
Matt Arsenault3cb39042017-02-27 19:35:42 +00001156 tryFoldClamp(MI);
Tom Stellard6596ba72014-11-21 22:06:37 +00001157 continue;
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001158 }
Tom Stellard6596ba72014-11-21 22:06:37 +00001159
1160 MachineOperand &OpToFold = MI.getOperand(1);
Matt Arsenault2bc198a2016-09-14 15:51:33 +00001161 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
Tom Stellard26cc18d2015-01-07 22:18:27 +00001162
Matt Arsenault51818c12017-01-10 23:32:04 +00001163 // FIXME: We could also be folding things like TargetIndexes.
Tom Stellard05992972015-01-07 22:44:19 +00001164 if (!FoldingImm && !OpToFold.isReg())
1165 continue;
1166
Tom Stellard6596ba72014-11-21 22:06:37 +00001167 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +00001168 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +00001169 continue;
1170
Marek Olsak926c56f2016-01-13 11:44:29 +00001171 // Prevent folding operands backwards in the function. For example,
1172 // the COPY opcode must not be replaced by 1 in this example:
1173 //
Francis Visoiu Mistriha8a83d12017-12-07 10:40:31 +00001174 // %3 = COPY %vgpr0; VGPR_32:%3
Marek Olsak926c56f2016-01-13 11:44:29 +00001175 // ...
Francis Visoiu Mistriha8a83d12017-12-07 10:40:31 +00001176 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec
Marek Olsak926c56f2016-01-13 11:44:29 +00001177 MachineOperand &Dst = MI.getOperand(0);
1178 if (Dst.isReg() &&
1179 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
1180 continue;
1181
Matt Arsenault51818c12017-01-10 23:32:04 +00001182 foldInstOperand(MI, OpToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +00001183 }
1184 }
1185 return false;
1186}