blob: 74ed6f1fed1ded6df7995196c202966782f78fc5 [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard6596ba72014-11-21 22:06:37 +00006//
7/// \file
8//===----------------------------------------------------------------------===//
9//
10
11#include "AMDGPU.h"
12#include "AMDGPUSubtarget.h"
13#include "SIInstrInfo.h"
Matt Arsenault3cb39042017-02-27 19:35:42 +000014#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000015#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenaultff3f9122017-06-20 18:56:32 +000016#include "llvm/ADT/DepthFirstIterator.h"
Matthias Braunf8422972017-12-13 02:51:04 +000017#include "llvm/CodeGen/LiveIntervals.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000018#include "llvm/CodeGen/MachineFunctionPass.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000021#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000022#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000023#include "llvm/Target/TargetMachine.h"
24
25#define DEBUG_TYPE "si-fold-operands"
26using namespace llvm;
27
28namespace {
29
Tom Stellardbb763e62015-01-07 17:42:16 +000030struct FoldCandidate {
31 MachineInstr *UseMI;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000032 union {
33 MachineOperand *OpToFold;
34 uint64_t ImmToFold;
35 int FrameIndexToFold;
36 };
Matt Arsenaultde6c4212018-08-28 18:34:24 +000037 int ShrinkOpcode;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000038 unsigned char UseOpNo;
39 MachineOperand::MachineOperandType Kind;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000040 bool Commuted;
Tom Stellardbb763e62015-01-07 17:42:16 +000041
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000042 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
Matt Arsenaultde6c4212018-08-28 18:34:24 +000043 bool Commuted_ = false,
44 int ShrinkOp = -1) :
45 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46 Kind(FoldOp->getType()),
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000047 Commuted(Commuted_) {
Tom Stellard05992972015-01-07 22:44:19 +000048 if (FoldOp->isImm()) {
Tom Stellard05992972015-01-07 22:44:19 +000049 ImmToFold = FoldOp->getImm();
Matt Arsenault2bc198a2016-09-14 15:51:33 +000050 } else if (FoldOp->isFI()) {
51 FrameIndexToFold = FoldOp->getIndex();
Tom Stellard05992972015-01-07 22:44:19 +000052 } else {
Nicolai Haehnle27101712019-06-25 11:52:30 +000053 assert(FoldOp->isReg() || FoldOp->isGlobal());
Tom Stellard05992972015-01-07 22:44:19 +000054 OpToFold = FoldOp;
55 }
56 }
Tom Stellardbb763e62015-01-07 17:42:16 +000057
Matt Arsenault2bc198a2016-09-14 15:51:33 +000058 bool isFI() const {
59 return Kind == MachineOperand::MO_FrameIndex;
60 }
61
Tom Stellardbb763e62015-01-07 17:42:16 +000062 bool isImm() const {
Matt Arsenault2bc198a2016-09-14 15:51:33 +000063 return Kind == MachineOperand::MO_Immediate;
64 }
65
66 bool isReg() const {
67 return Kind == MachineOperand::MO_Register;
Tom Stellardbb763e62015-01-07 17:42:16 +000068 }
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000069
Nicolai Haehnle27101712019-06-25 11:52:30 +000070 bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
71
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000072 bool isCommuted() const {
73 return Commuted;
74 }
Matt Arsenaultde6c4212018-08-28 18:34:24 +000075
76 bool needsShrink() const {
77 return ShrinkOpcode != -1;
78 }
79
80 int getShrinkOpcode() const {
81 return ShrinkOpcode;
82 }
Tom Stellardbb763e62015-01-07 17:42:16 +000083};
84
Matt Arsenault51818c12017-01-10 23:32:04 +000085class SIFoldOperands : public MachineFunctionPass {
86public:
87 static char ID;
88 MachineRegisterInfo *MRI;
89 const SIInstrInfo *TII;
90 const SIRegisterInfo *TRI;
Tom Stellard5bfbae52018-07-11 20:59:01 +000091 const GCNSubtarget *ST;
Matt Arsenault60957cb2019-06-24 14:53:56 +000092 const SIMachineFunctionInfo *MFI;
Matt Arsenault51818c12017-01-10 23:32:04 +000093
94 void foldOperand(MachineOperand &OpToFold,
95 MachineInstr *UseMI,
Matt Arsenault60957cb2019-06-24 14:53:56 +000096 int UseOpIdx,
Matt Arsenault51818c12017-01-10 23:32:04 +000097 SmallVectorImpl<FoldCandidate> &FoldList,
98 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
99
100 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
101
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000102 const MachineOperand *isClamp(const MachineInstr &MI) const;
103 bool tryFoldClamp(MachineInstr &MI);
104
Matt Arsenault3cb39042017-02-27 19:35:42 +0000105 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
106 bool tryFoldOMod(MachineInstr &MI);
107
Matt Arsenault51818c12017-01-10 23:32:04 +0000108public:
109 SIFoldOperands() : MachineFunctionPass(ID) {
110 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
111 }
112
113 bool runOnMachineFunction(MachineFunction &MF) override;
114
115 StringRef getPassName() const override { return "SI Fold Operands"; }
116
117 void getAnalysisUsage(AnalysisUsage &AU) const override {
118 AU.setPreservesCFG();
119 MachineFunctionPass::getAnalysisUsage(AU);
120 }
121};
122
Tom Stellard6596ba72014-11-21 22:06:37 +0000123} // End anonymous namespace.
124
Matt Arsenault427c5482016-02-11 06:15:34 +0000125INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
126 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +0000127
128char SIFoldOperands::ID = 0;
129
130char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
131
Matt Arsenault69e30012017-01-11 22:00:02 +0000132// Wrapper around isInlineConstant that understands special cases when
133// instruction types are replaced during operand folding.
134static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
135 const MachineInstr &UseMI,
136 unsigned OpNo,
137 const MachineOperand &OpToFold) {
138 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
139 return true;
140
141 unsigned Opc = UseMI.getOpcode();
142 switch (Opc) {
143 case AMDGPU::V_MAC_F32_e64:
Matt Arsenault0084adc2018-04-30 19:08:16 +0000144 case AMDGPU::V_MAC_F16_e64:
145 case AMDGPU::V_FMAC_F32_e64: {
Matt Arsenault69e30012017-01-11 22:00:02 +0000146 // Special case for mac. Since this is replaced with mad when folded into
147 // src2, we need to check the legality for the final instruction.
148 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
149 if (static_cast<int>(OpNo) == Src2Idx) {
Matt Arsenault0084adc2018-04-30 19:08:16 +0000150 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
Matt Arsenault69e30012017-01-11 22:00:02 +0000151 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Matt Arsenault0084adc2018-04-30 19:08:16 +0000152
153 unsigned Opc = IsFMA ?
154 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
155 const MCInstrDesc &MadDesc = TII->get(Opc);
Matt Arsenault69e30012017-01-11 22:00:02 +0000156 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
157 }
Simon Pilgrim0f5b3502017-07-07 10:18:57 +0000158 return false;
Matt Arsenault69e30012017-01-11 22:00:02 +0000159 }
160 default:
161 return false;
162 }
163}
164
Matt Arsenault60957cb2019-06-24 14:53:56 +0000165// TODO: Add heuristic that the frame index might not fit in the addressing mode
166// immediate offset to avoid materializing in loops.
167static bool frameIndexMayFold(const SIInstrInfo *TII,
168 const MachineInstr &UseMI,
169 int OpNo,
170 const MachineOperand &OpToFold) {
171 return OpToFold.isFI() &&
172 (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) &&
173 OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr);
174}
175
Tom Stellard6596ba72014-11-21 22:06:37 +0000176FunctionPass *llvm::createSIFoldOperandsPass() {
177 return new SIFoldOperands();
178}
179
Tom Stellardbb763e62015-01-07 17:42:16 +0000180static bool updateOperand(FoldCandidate &Fold,
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000181 const SIInstrInfo &TII,
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000182 const TargetRegisterInfo &TRI,
183 const GCNSubtarget &ST) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000184 MachineInstr *MI = Fold.UseMI;
185 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000186 assert(Old.isReg());
187
Tom Stellardbb763e62015-01-07 17:42:16 +0000188 if (Fold.isImm()) {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000189 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
190 AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
191 ST.hasInv2PiInlineImm())) {
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000192 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
193 // already set.
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000194 unsigned Opcode = MI->getOpcode();
195 int OpNo = MI->getOperandNo(&Old);
196 int ModIdx = -1;
197 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
198 ModIdx = AMDGPU::OpName::src0_modifiers;
199 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
200 ModIdx = AMDGPU::OpName::src1_modifiers;
201 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
202 ModIdx = AMDGPU::OpName::src2_modifiers;
203 assert(ModIdx != -1);
204 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
205 MachineOperand &Mod = MI->getOperand(ModIdx);
206 unsigned Val = Mod.getImm();
207 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
208 return false;
Michael Liao389d5a32019-04-22 22:05:49 +0000209 // Only apply the following transformation if that operand requries
210 // a packed immediate.
211 switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000212 case AMDGPU::OPERAND_REG_IMM_V2FP16:
213 case AMDGPU::OPERAND_REG_IMM_V2INT16:
Michael Liao389d5a32019-04-22 22:05:49 +0000214 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
215 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
216 // If upper part is all zero we do not need op_sel_hi.
217 if (!isUInt<16>(Fold.ImmToFold)) {
218 if (!(Fold.ImmToFold & 0xffff)) {
219 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
220 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
221 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
222 return true;
223 }
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000224 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000225 Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
226 return true;
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000227 }
Michael Liao389d5a32019-04-22 22:05:49 +0000228 break;
229 default:
230 break;
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000231 }
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000232 }
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000233 }
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000234
Nicolai Haehnle27101712019-06-25 11:52:30 +0000235 if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000236 MachineBasicBlock *MBB = MI->getParent();
237 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI);
238 if (Liveness != MachineBasicBlock::LQR_Dead)
239 return false;
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000240
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000241 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
242 int Op32 = Fold.getShrinkOpcode();
243 MachineOperand &Dst0 = MI->getOperand(0);
244 MachineOperand &Dst1 = MI->getOperand(1);
245 assert(Dst0.isDef() && Dst1.isDef());
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000246
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000247 bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
Matt Arsenault44a8a752018-08-28 18:44:16 +0000248
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000249 const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
250 unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC);
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000251
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000252 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000253
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000254 if (HaveNonDbgCarryUse) {
255 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
256 .addReg(AMDGPU::VCC, RegState::Kill);
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000257 }
258
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000259 // Keep the old instruction around to avoid breaking iterators, but
260 // replace it with a dummy instruction to remove uses.
261 //
262 // FIXME: We should not invert how this pass looks at operands to avoid
263 // this. Should track set of foldable movs instead of looking for uses
264 // when looking at a use.
265 Dst0.setReg(NewReg0);
266 for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
267 MI->RemoveOperand(I);
268 MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
269
270 if (Fold.isCommuted())
271 TII.commuteInstruction(*Inst32, false);
Tom Stellard6596ba72014-11-21 22:06:37 +0000272 return true;
273 }
274
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000275 assert(!Fold.needsShrink() && "not handled");
276
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000277 if (Fold.isImm()) {
278 Old.ChangeToImmediate(Fold.ImmToFold);
279 return true;
280 }
281
Nicolai Haehnle27101712019-06-25 11:52:30 +0000282 if (Fold.isGlobal()) {
283 Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
284 Fold.OpToFold->getTargetFlags());
285 return true;
286 }
287
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000288 if (Fold.isFI()) {
289 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
290 return true;
291 }
292
Tom Stellardbb763e62015-01-07 17:42:16 +0000293 MachineOperand *New = Fold.OpToFold;
Matt Arsenaulte75e1972019-06-18 12:23:45 +0000294 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
295 Old.setIsUndef(New->isUndef());
296 return true;
Tom Stellard6596ba72014-11-21 22:06:37 +0000297}
298
Matt Arsenault51818c12017-01-10 23:32:04 +0000299static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000300 const MachineInstr *MI) {
301 for (auto Candidate : FoldList) {
302 if (Candidate.UseMI == MI)
303 return true;
304 }
305 return false;
306}
307
Matt Arsenault51818c12017-01-10 23:32:04 +0000308static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
Tom Stellard05992972015-01-07 22:44:19 +0000309 MachineInstr *MI, unsigned OpNo,
310 MachineOperand *OpToFold,
311 const SIInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000312 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000313 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000314 unsigned Opc = MI->getOpcode();
Matt Arsenault0084adc2018-04-30 19:08:16 +0000315 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
316 Opc == AMDGPU::V_FMAC_F32_e64) &&
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000317 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
Matt Arsenault0084adc2018-04-30 19:08:16 +0000318 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
Matt Arsenault69e30012017-01-11 22:00:02 +0000319 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Matt Arsenault0084adc2018-04-30 19:08:16 +0000320 unsigned NewOpc = IsFMA ?
321 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000322
323 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
324 // to fold the operand.
Matt Arsenault0084adc2018-04-30 19:08:16 +0000325 MI->setDesc(TII->get(NewOpc));
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000326 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
327 if (FoldAsMAD) {
328 MI->untieRegOperand(OpNo);
329 return true;
330 }
331 MI->setDesc(TII->get(Opc));
332 }
333
Tom Stellard8485fa02016-12-07 02:42:15 +0000334 // Special case for s_setreg_b32
335 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
336 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
337 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
338 return true;
339 }
340
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000341 // If we are already folding into another operand of MI, then
342 // we can't commute the instruction, otherwise we risk making the
343 // other fold illegal.
344 if (isUseMIInFoldList(FoldList, MI))
345 return false;
346
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000347 unsigned CommuteOpNo = OpNo;
348
Tom Stellard05992972015-01-07 22:44:19 +0000349 // Operand is not legal, so try to commute the instruction to
350 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000351 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
352 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000353 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000354
355 if (CanCommute) {
356 if (CommuteIdx0 == OpNo)
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000357 CommuteOpNo = CommuteIdx1;
Tom Stellard05992972015-01-07 22:44:19 +0000358 else if (CommuteIdx1 == OpNo)
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000359 CommuteOpNo = CommuteIdx0;
Tom Stellard05992972015-01-07 22:44:19 +0000360 }
361
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000362
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000363 // One of operands might be an Imm operand, and OpNo may refer to it after
364 // the call of commuteInstruction() below. Such situations are avoided
365 // here explicitly as OpNo must be a register operand to be a candidate
366 // for memory folding.
367 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
368 !MI->getOperand(CommuteIdx1).isReg()))
369 return false;
370
371 if (!CanCommute ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000372 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000373 return false;
374
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000375 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
376 if ((Opc == AMDGPU::V_ADD_I32_e64 ||
377 Opc == AMDGPU::V_SUB_I32_e64 ||
378 Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
Nicolai Haehnle27101712019-06-25 11:52:30 +0000379 (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000380 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
381
382 // Verify the other operand is a VGPR, otherwise we would violate the
383 // constant bus restriction.
384 unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
385 MachineOperand &OtherOp = MI->getOperand(OtherIdx);
386 if (!OtherOp.isReg() ||
387 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
388 return false;
389
Fangrui Song9cca2272018-08-28 19:19:03 +0000390 assert(MI->getOperand(1).isDef());
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000391
Matt Arsenault2c8936f2019-05-03 13:42:56 +0000392 // Make sure to get the 32-bit version of the commuted opcode.
393 unsigned MaybeCommutedOpc = MI->getOpcode();
394 int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
395
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000396 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true,
397 Op32));
398 return true;
399 }
400
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000401 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000402 return false;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000403 }
404
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000405 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true));
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000406 return true;
Tom Stellard05992972015-01-07 22:44:19 +0000407 }
408
409 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
410 return true;
411}
412
Matt Arsenault5e63a042016-10-06 18:12:13 +0000413// If the use operand doesn't care about the value, this may be an operand only
414// used for register indexing, in which case it is unsafe to fold.
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000415static bool isUseSafeToFold(const SIInstrInfo *TII,
416 const MachineInstr &MI,
Matt Arsenault5e63a042016-10-06 18:12:13 +0000417 const MachineOperand &UseMO) {
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000418 return !UseMO.isUndef() && !TII->isSDWA(MI);
Matt Arsenault5e63a042016-10-06 18:12:13 +0000419 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
420}
421
Matt Arsenault51818c12017-01-10 23:32:04 +0000422void SIFoldOperands::foldOperand(
423 MachineOperand &OpToFold,
424 MachineInstr *UseMI,
Matt Arsenault60957cb2019-06-24 14:53:56 +0000425 int UseOpIdx,
Matt Arsenault51818c12017-01-10 23:32:04 +0000426 SmallVectorImpl<FoldCandidate> &FoldList,
427 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000428 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
429
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000430 if (!isUseSafeToFold(TII, *UseMI, UseOp))
Matt Arsenault5e63a042016-10-06 18:12:13 +0000431 return;
432
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000433 // FIXME: Fold operands with subregs.
Matt Arsenault3661e902016-08-15 16:18:36 +0000434 if (UseOp.isReg() && OpToFold.isReg()) {
435 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
436 return;
437
438 // Don't fold subregister extracts into tied operands, only if it is a full
439 // copy since a subregister use tied to a full register def doesn't really
440 // make sense. e.g. don't fold:
441 //
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +0000442 // %1 = COPY %0:sub1
443 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000444 //
445 // into
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +0000446 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000447 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
448 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000449 }
450
Tom Stellard9a197672015-09-09 15:43:26 +0000451 // Special case for REG_SEQUENCE: We can't fold literals into
452 // REG_SEQUENCE instructions, so we have to fold them into the
453 // uses of REG_SEQUENCE.
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000454 if (UseMI->isRegSequence()) {
Tom Stellard9a197672015-09-09 15:43:26 +0000455 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
456 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
457
Matt Arsenault4d000d22019-06-19 20:44:15 +0000458 MachineRegisterInfo::use_iterator Next;
Tom Stellard9a197672015-09-09 15:43:26 +0000459 for (MachineRegisterInfo::use_iterator
Matt Arsenault51818c12017-01-10 23:32:04 +0000460 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
Matt Arsenault4d000d22019-06-19 20:44:15 +0000461 RSUse != RSE; RSUse = Next) {
462 Next = std::next(RSUse);
Tom Stellard9a197672015-09-09 15:43:26 +0000463
464 MachineInstr *RSUseMI = RSUse->getParent();
465 if (RSUse->getSubReg() != RegSeqDstSubReg)
466 continue;
467
468 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenault51818c12017-01-10 23:32:04 +0000469 CopiesToReplace);
Tom Stellard9a197672015-09-09 15:43:26 +0000470 }
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000471
Tom Stellard9a197672015-09-09 15:43:26 +0000472 return;
473 }
474
Matt Arsenault60957cb2019-06-24 14:53:56 +0000475 if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
476 // Sanity check that this is a stack access.
477 // FIXME: Should probably use stack pseudos before frame lowering.
478 MachineOperand *SOff = TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
479 if (!SOff->isReg() || (SOff->getReg() != MFI->getScratchWaveOffsetReg() &&
480 SOff->getReg() != MFI->getStackPtrOffsetReg()))
481 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000482
Matt Arsenault60957cb2019-06-24 14:53:56 +0000483 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
484 MFI->getScratchRSrcReg())
485 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000486
Matt Arsenault60957cb2019-06-24 14:53:56 +0000487 // A frame index will resolve to a positive constant, so it should always be
488 // safe to fold the addressing mode, even pre-GFX9.
489 UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
490 SOff->setReg(MFI->getStackPtrOffsetReg());
491 return;
492 }
493
Nicolai Haehnle27101712019-06-25 11:52:30 +0000494 bool FoldingImmLike =
495 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
Matt Arsenault60957cb2019-06-24 14:53:56 +0000496
497 if (FoldingImmLike && UseMI->isCopy()) {
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000498 unsigned DestReg = UseMI->getOperand(0).getReg();
499 const TargetRegisterClass *DestRC
500 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000501 MRI->getRegClass(DestReg) :
502 TRI->getPhysRegClass(DestReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000503
Alexander Timofeev201f8922018-08-30 13:55:04 +0000504 unsigned SrcReg = UseMI->getOperand(1).getReg();
505 if (TargetRegisterInfo::isVirtualRegister(DestReg) &&
506 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
507 const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
508 if (TRI->isSGPRClass(SrcRC) && TRI->hasVGPRs(DestRC)) {
509 MachineRegisterInfo::use_iterator NextUse;
510 SmallVector<FoldCandidate, 4> CopyUses;
511 for (MachineRegisterInfo::use_iterator
512 Use = MRI->use_begin(DestReg), E = MRI->use_end();
513 Use != E; Use = NextUse) {
514 NextUse = std::next(Use);
515 FoldCandidate FC = FoldCandidate(Use->getParent(),
516 Use.getOperandNo(), &UseMI->getOperand(1));
517 CopyUses.push_back(FC);
518 }
519 for (auto & F : CopyUses) {
520 foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
521 FoldList, CopiesToReplace);
522 }
523 }
524 }
525
526 // In order to fold immediates into copies, we need to change the
527 // copy to a MOV.
528
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000529 unsigned MovOp = TII->getMovOpcode(DestRC);
530 if (MovOp == AMDGPU::COPY)
531 return;
532
533 UseMI->setDesc(TII->get(MovOp));
534 CopiesToReplace.push_back(UseMI);
535 } else {
Stanislav Mekhanoshinb080adf2018-09-27 18:55:20 +0000536 if (UseMI->isCopy() && OpToFold.isReg() &&
537 TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
Stanislav Mekhanoshinb080adf2018-09-27 18:55:20 +0000538 TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
539 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()) &&
540 !UseMI->getOperand(1).getSubReg()) {
541 UseMI->getOperand(1).setReg(OpToFold.getReg());
542 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
543 UseMI->getOperand(1).setIsKill(false);
544 CopiesToReplace.push_back(UseMI);
545 OpToFold.setIsKill(false);
546 return;
547 }
548
Matt Arsenaultbcb5ea02019-06-18 12:23:46 +0000549 unsigned UseOpc = UseMI->getOpcode();
550 if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
551 (UseOpc == AMDGPU::V_READLANE_B32 &&
552 (int)UseOpIdx ==
553 AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
554 // %vgpr = V_MOV_B32 imm
555 // %sgpr = V_READFIRSTLANE_B32 %vgpr
556 // =>
557 // %sgpr = S_MOV_B32 imm
Matt Arsenault60957cb2019-06-24 14:53:56 +0000558 if (FoldingImmLike) {
Matt Arsenaultf39f3bd2019-06-18 12:48:36 +0000559 if (execMayBeModifiedBeforeUse(*MRI,
560 UseMI->getOperand(UseOpIdx).getReg(),
561 *OpToFold.getParent(),
562 UseMI))
Matt Arsenaultbcb5ea02019-06-18 12:23:46 +0000563 return;
564
565 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
Matt Arsenault4d000d22019-06-19 20:44:15 +0000566
567 // FIXME: ChangeToImmediate should clear subreg
568 UseMI->getOperand(1).setSubReg(0);
Matt Arsenault60957cb2019-06-24 14:53:56 +0000569 if (OpToFold.isImm())
570 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
571 else
572 UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
Matt Arsenaultbcb5ea02019-06-18 12:23:46 +0000573 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
574 return;
575 }
576
577 if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
Matt Arsenaultf39f3bd2019-06-18 12:48:36 +0000578 if (execMayBeModifiedBeforeUse(*MRI,
579 UseMI->getOperand(UseOpIdx).getReg(),
580 *OpToFold.getParent(),
581 UseMI))
Matt Arsenaultbcb5ea02019-06-18 12:23:46 +0000582 return;
583
584 // %vgpr = COPY %sgpr0
585 // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
586 // =>
587 // %sgpr1 = COPY %sgpr0
588 UseMI->setDesc(TII->get(AMDGPU::COPY));
589 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
590 return;
591 }
592 }
593
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000594 const MCInstrDesc &UseDesc = UseMI->getDesc();
595
596 // Don't fold into target independent nodes. Target independent opcodes
597 // don't have defined register classes.
598 if (UseDesc.isVariadic() ||
Matt Arsenaultc908e3f2018-02-08 01:12:46 +0000599 UseOp.isImplicit() ||
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000600 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
601 return;
602 }
603
Matt Arsenault60957cb2019-06-24 14:53:56 +0000604 if (!FoldingImmLike) {
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000605 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
606
607 // FIXME: We could try to change the instruction from 64-bit to 32-bit
608 // to enable more folding opportunites. The shrink operands pass
609 // already does this.
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000610 return;
611 }
612
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000613
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000614 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
615 const TargetRegisterClass *FoldRC =
Matt Arsenault51818c12017-01-10 23:32:04 +0000616 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000617
618 // Split 64-bit constants into 32-bits for folding.
619 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
620 unsigned UseReg = UseOp.getReg();
Matt Arsenaulte75e1972019-06-18 12:23:45 +0000621 const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000622
623 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
624 return;
625
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000626 APInt Imm(64, OpToFold.getImm());
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000627 if (UseOp.getSubReg() == AMDGPU::sub0) {
628 Imm = Imm.getLoBits(32);
629 } else {
630 assert(UseOp.getSubReg() == AMDGPU::sub1);
631 Imm = Imm.getHiBits(32);
632 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000633
634 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
635 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
636 return;
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000637 }
638
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000639
640
641 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000642}
643
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000644static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
Matt Arsenault51818c12017-01-10 23:32:04 +0000645 uint32_t LHS, uint32_t RHS) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000646 switch (Opcode) {
647 case AMDGPU::V_AND_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000648 case AMDGPU::V_AND_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000649 case AMDGPU::S_AND_B32:
650 Result = LHS & RHS;
651 return true;
652 case AMDGPU::V_OR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000653 case AMDGPU::V_OR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000654 case AMDGPU::S_OR_B32:
655 Result = LHS | RHS;
656 return true;
657 case AMDGPU::V_XOR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000658 case AMDGPU::V_XOR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000659 case AMDGPU::S_XOR_B32:
660 Result = LHS ^ RHS;
661 return true;
Matt Arsenault51818c12017-01-10 23:32:04 +0000662 case AMDGPU::V_LSHL_B32_e64:
663 case AMDGPU::V_LSHL_B32_e32:
664 case AMDGPU::S_LSHL_B32:
665 // The instruction ignores the high bits for out of bounds shifts.
666 Result = LHS << (RHS & 31);
667 return true;
668 case AMDGPU::V_LSHLREV_B32_e64:
669 case AMDGPU::V_LSHLREV_B32_e32:
670 Result = RHS << (LHS & 31);
671 return true;
672 case AMDGPU::V_LSHR_B32_e64:
673 case AMDGPU::V_LSHR_B32_e32:
674 case AMDGPU::S_LSHR_B32:
675 Result = LHS >> (RHS & 31);
676 return true;
677 case AMDGPU::V_LSHRREV_B32_e64:
678 case AMDGPU::V_LSHRREV_B32_e32:
679 Result = RHS >> (LHS & 31);
680 return true;
681 case AMDGPU::V_ASHR_I32_e64:
682 case AMDGPU::V_ASHR_I32_e32:
683 case AMDGPU::S_ASHR_I32:
684 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
685 return true;
686 case AMDGPU::V_ASHRREV_I32_e64:
687 case AMDGPU::V_ASHRREV_I32_e32:
688 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
689 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000690 default:
691 return false;
692 }
693}
694
695static unsigned getMovOpc(bool IsScalar) {
696 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
697}
698
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000699/// Remove any leftover implicit operands from mutating the instruction. e.g.
700/// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
701/// anymore.
702static void stripExtraCopyOperands(MachineInstr &MI) {
703 const MCInstrDesc &Desc = MI.getDesc();
704 unsigned NumOps = Desc.getNumOperands() +
705 Desc.getNumImplicitUses() +
706 Desc.getNumImplicitDefs();
707
708 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
709 MI.RemoveOperand(I);
710}
711
712static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
713 MI.setDesc(NewDesc);
714 stripExtraCopyOperands(MI);
715}
716
Matt Arsenault51818c12017-01-10 23:32:04 +0000717static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
718 MachineOperand &Op) {
719 if (Op.isReg()) {
720 // If this has a subregister, it obviously is a register source.
Matt Arsenaultcbda7ff2018-03-10 16:05:35 +0000721 if (Op.getSubReg() != AMDGPU::NoSubRegister ||
722 !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
Matt Arsenault51818c12017-01-10 23:32:04 +0000723 return &Op;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000724
Matt Arsenault51818c12017-01-10 23:32:04 +0000725 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
Matt Arsenault7f67b352017-06-20 18:28:02 +0000726 if (Def && Def->isMoveImmediate()) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000727 MachineOperand &ImmSrc = Def->getOperand(1);
728 if (ImmSrc.isImm())
729 return &ImmSrc;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000730 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000731 }
732
Matt Arsenault51818c12017-01-10 23:32:04 +0000733 return &Op;
734}
735
736// Try to simplify operations with a constant that may appear after instruction
737// selection.
738// TODO: See if a frame index with a fixed offset can fold.
739static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
740 const SIInstrInfo *TII,
741 MachineInstr *MI,
742 MachineOperand *ImmOp) {
743 unsigned Opc = MI->getOpcode();
744 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
745 Opc == AMDGPU::S_NOT_B32) {
746 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
747 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
748 return true;
749 }
750
751 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
752 if (Src1Idx == -1)
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000753 return false;
754
755 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Matt Arsenault51818c12017-01-10 23:32:04 +0000756 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
757 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000758
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000759 if (!Src0->isImm() && !Src1->isImm())
760 return false;
761
Matt Arsenault0d1b3932018-08-06 15:40:20 +0000762 if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) {
763 if (Src0->isImm() && Src0->getImm() == 0) {
764 // v_lshl_or_b32 0, X, Y -> copy Y
765 // v_lshl_or_b32 0, X, K -> v_mov_b32 K
766 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
767 MI->RemoveOperand(Src1Idx);
768 MI->RemoveOperand(Src0Idx);
769
770 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
771 return true;
772 }
773 }
774
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000775 // and k0, k1 -> v_mov_b32 (k0 & k1)
776 // or k0, k1 -> v_mov_b32 (k0 | k1)
777 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
778 if (Src0->isImm() && Src1->isImm()) {
779 int32_t NewImm;
780 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
781 return false;
782
783 const SIRegisterInfo &TRI = TII->getRegisterInfo();
784 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
785
Matt Arsenault51818c12017-01-10 23:32:04 +0000786 // Be careful to change the right operand, src0 may belong to a different
787 // instruction.
788 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000789 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000790 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000791 return true;
792 }
793
Matt Arsenault51818c12017-01-10 23:32:04 +0000794 if (!MI->isCommutable())
795 return false;
796
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000797 if (Src0->isImm() && !Src1->isImm()) {
798 std::swap(Src0, Src1);
799 std::swap(Src0Idx, Src1Idx);
800 }
801
802 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
Matt Arsenault51818c12017-01-10 23:32:04 +0000803 if (Opc == AMDGPU::V_OR_B32_e64 ||
804 Opc == AMDGPU::V_OR_B32_e32 ||
805 Opc == AMDGPU::S_OR_B32) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000806 if (Src1Val == 0) {
807 // y = or x, 0 => y = copy x
808 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000809 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000810 } else if (Src1Val == -1) {
811 // y = or x, -1 => y = v_mov_b32 -1
812 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000813 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000814 } else
815 return false;
816
817 return true;
818 }
819
820 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000821 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000822 MI->getOpcode() == AMDGPU::S_AND_B32) {
823 if (Src1Val == 0) {
824 // y = and x, 0 => y = v_mov_b32 0
825 MI->RemoveOperand(Src0Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000826 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000827 } else if (Src1Val == -1) {
828 // y = and x, -1 => y = copy x
829 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000830 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
831 stripExtraCopyOperands(*MI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000832 } else
833 return false;
834
835 return true;
836 }
837
838 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000839 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000840 MI->getOpcode() == AMDGPU::S_XOR_B32) {
841 if (Src1Val == 0) {
842 // y = xor x, 0 => y = copy x
843 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000844 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenault51818c12017-01-10 23:32:04 +0000845 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000846 }
847 }
848
849 return false;
850}
851
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000852// Try to fold an instruction into a simpler one
853static bool tryFoldInst(const SIInstrInfo *TII,
854 MachineInstr *MI) {
855 unsigned Opc = MI->getOpcode();
856
857 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
858 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
859 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
860 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
861 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000862 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
863 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
864 if (Src1->isIdenticalTo(*Src0) &&
865 (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
866 (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000867 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000868 auto &NewDesc =
869 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000870 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
871 if (Src2Idx != -1)
872 MI->RemoveOperand(Src2Idx);
873 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000874 if (Src1ModIdx != -1)
875 MI->RemoveOperand(Src1ModIdx);
876 if (Src0ModIdx != -1)
877 MI->RemoveOperand(Src0ModIdx);
878 mutateCopyOp(*MI, NewDesc);
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000879 LLVM_DEBUG(dbgs() << *MI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000880 return true;
881 }
882 }
883
884 return false;
885}
886
Matt Arsenault51818c12017-01-10 23:32:04 +0000887void SIFoldOperands::foldInstOperand(MachineInstr &MI,
888 MachineOperand &OpToFold) const {
889 // We need mutate the operands of new mov instructions to add implicit
890 // uses of EXEC, but adding them invalidates the use_iterator, so defer
891 // this.
892 SmallVector<MachineInstr *, 4> CopiesToReplace;
893 SmallVector<FoldCandidate, 4> FoldList;
894 MachineOperand &Dst = MI.getOperand(0);
895
Nicolai Haehnle27101712019-06-25 11:52:30 +0000896 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
Matt Arsenault51818c12017-01-10 23:32:04 +0000897 if (FoldingImm) {
898 unsigned NumLiteralUses = 0;
899 MachineOperand *NonInlineUse = nullptr;
900 int NonInlineUseOpNo = -1;
901
Vitaly Buka74503982017-10-15 05:35:02 +0000902 MachineRegisterInfo::use_iterator NextUse;
Matt Arsenault51818c12017-01-10 23:32:04 +0000903 for (MachineRegisterInfo::use_iterator
904 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
905 Use != E; Use = NextUse) {
906 NextUse = std::next(Use);
907 MachineInstr *UseMI = Use->getParent();
908 unsigned OpNo = Use.getOperandNo();
909
910 // Folding the immediate may reveal operations that can be constant
911 // folded or replaced with a copy. This can happen for example after
912 // frame indices are lowered to constants or from splitting 64-bit
913 // constants.
914 //
915 // We may also encounter cases where one or both operands are
916 // immediates materialized into a register, which would ordinarily not
917 // be folded due to multiple uses or operand constraints.
918
919 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000920 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
Matt Arsenault51818c12017-01-10 23:32:04 +0000921
922 // Some constant folding cases change the same immediate's use to a new
923 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
924 // again. The same constant folded instruction could also have a second
925 // use operand.
926 NextUse = MRI->use_begin(Dst.getReg());
Nicolai Haehnlea253e4c2017-07-18 14:54:41 +0000927 FoldList.clear();
Matt Arsenault51818c12017-01-10 23:32:04 +0000928 continue;
929 }
930
931 // Try to fold any inline immediate uses, and then only fold other
932 // constants if they have one use.
933 //
934 // The legality of the inline immediate must be checked based on the use
935 // operand, not the defining instruction, because 32-bit instructions
936 // with 32-bit inline immediate sources may be used to materialize
937 // constants used in 16-bit operands.
938 //
939 // e.g. it is unsafe to fold:
940 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
941 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
942
943 // Folding immediates with more than one use will increase program size.
944 // FIXME: This will also reduce register usage, which may be better
945 // in some cases. A better heuristic is needed.
Matt Arsenault69e30012017-01-11 22:00:02 +0000946 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000947 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
Matt Arsenault60957cb2019-06-24 14:53:56 +0000948 } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
949 foldOperand(OpToFold, UseMI, OpNo, FoldList,
950 CopiesToReplace);
Matt Arsenault51818c12017-01-10 23:32:04 +0000951 } else {
952 if (++NumLiteralUses == 1) {
953 NonInlineUse = &*Use;
954 NonInlineUseOpNo = OpNo;
955 }
956 }
957 }
958
959 if (NumLiteralUses == 1) {
960 MachineInstr *UseMI = NonInlineUse->getParent();
961 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
962 }
963 } else {
964 // Folding register.
Alexander Timofeev993e2792019-01-03 19:55:32 +0000965 SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
Matt Arsenault51818c12017-01-10 23:32:04 +0000966 for (MachineRegisterInfo::use_iterator
967 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
968 Use != E; ++Use) {
Alexander Timofeev993e2792019-01-03 19:55:32 +0000969 UsesToProcess.push_back(Use);
970 }
971 for (auto U : UsesToProcess) {
972 MachineInstr *UseMI = U->getParent();
Matt Arsenault51818c12017-01-10 23:32:04 +0000973
Alexander Timofeev993e2792019-01-03 19:55:32 +0000974 foldOperand(OpToFold, UseMI, U.getOperandNo(),
975 FoldList, CopiesToReplace);
Matt Arsenault51818c12017-01-10 23:32:04 +0000976 }
977 }
978
979 MachineFunction *MF = MI.getParent()->getParent();
980 // Make sure we add EXEC uses to any new v_mov instructions created.
981 for (MachineInstr *Copy : CopiesToReplace)
982 Copy->addImplicitDefUseOperands(*MF);
983
984 for (FoldCandidate &Fold : FoldList) {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000985 if (updateOperand(Fold, *TII, *TRI, *ST)) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000986 // Clear kill flags.
987 if (Fold.isReg()) {
988 assert(Fold.OpToFold && Fold.OpToFold->isReg());
989 // FIXME: Probably shouldn't bother trying to fold if not an
990 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
991 // copies.
992 MRI->clearKillFlags(Fold.OpToFold->getReg());
993 }
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000994 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
995 << static_cast<int>(Fold.UseOpNo) << " of "
996 << *Fold.UseMI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000997 tryFoldInst(TII, Fold.UseMI);
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000998 } else if (Fold.isCommuted()) {
999 // Restoring instruction's original operand order if fold has failed.
1000 TII->commuteInstruction(*Fold.UseMI, false);
Matt Arsenault51818c12017-01-10 23:32:04 +00001001 }
1002 }
1003}
1004
Matt Arsenaultf48e5c92017-10-05 00:13:20 +00001005// Clamp patterns are canonically selected to v_max_* instructions, so only
1006// handle them.
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001007const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1008 unsigned Op = MI.getOpcode();
1009 switch (Op) {
1010 case AMDGPU::V_MAX_F32_e64:
Matt Arsenault79a45db2017-02-22 23:53:37 +00001011 case AMDGPU::V_MAX_F16_e64:
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +00001012 case AMDGPU::V_MAX_F64:
1013 case AMDGPU::V_PK_MAX_F16: {
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001014 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1015 return nullptr;
1016
1017 // Make sure sources are identical.
1018 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1019 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +00001020 if (!Src0->isReg() || !Src1->isReg() ||
Matt Arsenaultaafff872017-10-05 00:13:17 +00001021 Src0->getReg() != Src1->getReg() ||
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +00001022 Src0->getSubReg() != Src1->getSubReg() ||
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001023 Src0->getSubReg() != AMDGPU::NoSubRegister)
1024 return nullptr;
1025
1026 // Can't fold up if we have modifiers.
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +00001027 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1028 return nullptr;
1029
1030 unsigned Src0Mods
1031 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1032 unsigned Src1Mods
1033 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1034
1035 // Having a 0 op_sel_hi would require swizzling the output in the source
1036 // instruction, which we can't do.
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +00001037 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1038 : 0u;
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +00001039 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001040 return nullptr;
1041 return Src0;
1042 }
1043 default:
1044 return nullptr;
1045 }
1046}
1047
1048// We obviously have multiple uses in a clamp since the register is used twice
1049// in the same instruction.
1050static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1051 int Count = 0;
1052 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1053 I != E; ++I) {
1054 if (++Count > 1)
1055 return false;
1056 }
1057
1058 return true;
1059}
1060
Matt Arsenault8cbb4882017-09-20 21:01:24 +00001061// FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001062bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1063 const MachineOperand *ClampSrc = isClamp(MI);
1064 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1065 return false;
1066
1067 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +00001068
1069 // The type of clamp must be compatible.
1070 if (TII->getClampMask(*Def) != TII->getClampMask(MI))
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001071 return false;
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +00001072
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001073 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1074 if (!DefClamp)
1075 return false;
1076
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001077 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1078 << '\n');
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001079
1080 // Clamp is applied after omod, so it is OK if omod is set.
1081 DefClamp->setImm(1);
1082 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1083 MI.eraseFromParent();
1084 return true;
1085}
1086
Matt Arsenault3cb39042017-02-27 19:35:42 +00001087static int getOModValue(unsigned Opc, int64_t Val) {
1088 switch (Opc) {
1089 case AMDGPU::V_MUL_F32_e64: {
1090 switch (static_cast<uint32_t>(Val)) {
1091 case 0x3f000000: // 0.5
1092 return SIOutMods::DIV2;
1093 case 0x40000000: // 2.0
1094 return SIOutMods::MUL2;
1095 case 0x40800000: // 4.0
1096 return SIOutMods::MUL4;
1097 default:
1098 return SIOutMods::NONE;
1099 }
1100 }
1101 case AMDGPU::V_MUL_F16_e64: {
1102 switch (static_cast<uint16_t>(Val)) {
1103 case 0x3800: // 0.5
1104 return SIOutMods::DIV2;
1105 case 0x4000: // 2.0
1106 return SIOutMods::MUL2;
1107 case 0x4400: // 4.0
1108 return SIOutMods::MUL4;
1109 default:
1110 return SIOutMods::NONE;
1111 }
1112 }
1113 default:
1114 llvm_unreachable("invalid mul opcode");
1115 }
1116}
1117
1118// FIXME: Does this really not support denormals with f16?
1119// FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1120// handled, so will anything other than that break?
1121std::pair<const MachineOperand *, int>
1122SIFoldOperands::isOMod(const MachineInstr &MI) const {
1123 unsigned Op = MI.getOpcode();
1124 switch (Op) {
1125 case AMDGPU::V_MUL_F32_e64:
1126 case AMDGPU::V_MUL_F16_e64: {
1127 // If output denormals are enabled, omod is ignored.
1128 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
1129 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
1130 return std::make_pair(nullptr, SIOutMods::NONE);
1131
1132 const MachineOperand *RegOp = nullptr;
1133 const MachineOperand *ImmOp = nullptr;
1134 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1135 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1136 if (Src0->isImm()) {
1137 ImmOp = Src0;
1138 RegOp = Src1;
1139 } else if (Src1->isImm()) {
1140 ImmOp = Src1;
1141 RegOp = Src0;
1142 } else
1143 return std::make_pair(nullptr, SIOutMods::NONE);
1144
1145 int OMod = getOModValue(Op, ImmOp->getImm());
1146 if (OMod == SIOutMods::NONE ||
1147 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1148 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1149 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1150 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1151 return std::make_pair(nullptr, SIOutMods::NONE);
1152
1153 return std::make_pair(RegOp, OMod);
1154 }
1155 case AMDGPU::V_ADD_F32_e64:
1156 case AMDGPU::V_ADD_F16_e64: {
1157 // If output denormals are enabled, omod is ignored.
1158 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
1159 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
1160 return std::make_pair(nullptr, SIOutMods::NONE);
1161
1162 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1163 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1164 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1165
1166 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1167 Src0->getSubReg() == Src1->getSubReg() &&
1168 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1169 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1170 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1171 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1172 return std::make_pair(Src0, SIOutMods::MUL2);
1173
1174 return std::make_pair(nullptr, SIOutMods::NONE);
1175 }
1176 default:
1177 return std::make_pair(nullptr, SIOutMods::NONE);
1178 }
1179}
1180
1181// FIXME: Does this need to check IEEE bit on function?
1182bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1183 const MachineOperand *RegOp;
1184 int OMod;
1185 std::tie(RegOp, OMod) = isOMod(MI);
1186 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1187 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1188 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1189 return false;
1190
1191 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1192 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1193 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1194 return false;
1195
1196 // Clamp is applied after omod. If the source already has clamp set, don't
1197 // fold it.
1198 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1199 return false;
1200
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001201 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
Matt Arsenault3cb39042017-02-27 19:35:42 +00001202
1203 DefOMod->setImm(OMod);
1204 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1205 MI.eraseFromParent();
1206 return true;
1207}
1208
Tom Stellard6596ba72014-11-21 22:06:37 +00001209bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +00001210 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +00001211 return false;
1212
Matt Arsenault51818c12017-01-10 23:32:04 +00001213 MRI = &MF.getRegInfo();
Tom Stellard5bfbae52018-07-11 20:59:01 +00001214 ST = &MF.getSubtarget<GCNSubtarget>();
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001215 TII = ST->getInstrInfo();
Matt Arsenault51818c12017-01-10 23:32:04 +00001216 TRI = &TII->getRegisterInfo();
Matt Arsenault60957cb2019-06-24 14:53:56 +00001217 MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault3cb39042017-02-27 19:35:42 +00001218
1219 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1220 // correctly handle signed zeros.
1221 //
Matt Arsenault055e4dc2019-03-29 19:14:54 +00001222 // FIXME: Also need to check strictfp
1223 bool IsIEEEMode = MFI->getMode().IEEE;
Matt Arsenault13b0db92018-08-12 08:44:25 +00001224 bool HasNSZ = MFI->hasNoSignedZerosFPMath();
Matt Arsenault3cb39042017-02-27 19:35:42 +00001225
Matt Arsenaultff3f9122017-06-20 18:56:32 +00001226 for (MachineBasicBlock *MBB : depth_first(&MF)) {
Tom Stellard6596ba72014-11-21 22:06:37 +00001227 MachineBasicBlock::iterator I, Next;
Matt Arsenaultff3f9122017-06-20 18:56:32 +00001228 for (I = MBB->begin(); I != MBB->end(); I = Next) {
Tom Stellard6596ba72014-11-21 22:06:37 +00001229 Next = std::next(I);
1230 MachineInstr &MI = *I;
1231
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +00001232 tryFoldInst(TII, &MI);
1233
Sam Kolton27e0f8b2017-03-31 11:42:43 +00001234 if (!TII->isFoldableCopy(MI)) {
Matt Arsenault13b0db92018-08-12 08:44:25 +00001235 // TODO: Omod might be OK if there is NSZ only on the source
1236 // instruction, and not the omod multiply.
1237 if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1238 !tryFoldOMod(MI))
Matt Arsenault3cb39042017-02-27 19:35:42 +00001239 tryFoldClamp(MI);
Tom Stellard6596ba72014-11-21 22:06:37 +00001240 continue;
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001241 }
Tom Stellard6596ba72014-11-21 22:06:37 +00001242
1243 MachineOperand &OpToFold = MI.getOperand(1);
Nicolai Haehnle27101712019-06-25 11:52:30 +00001244 bool FoldingImm =
1245 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
Tom Stellard26cc18d2015-01-07 22:18:27 +00001246
Matt Arsenault51818c12017-01-10 23:32:04 +00001247 // FIXME: We could also be folding things like TargetIndexes.
Tom Stellard05992972015-01-07 22:44:19 +00001248 if (!FoldingImm && !OpToFold.isReg())
1249 continue;
1250
Tom Stellard6596ba72014-11-21 22:06:37 +00001251 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +00001252 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +00001253 continue;
1254
Marek Olsak926c56f2016-01-13 11:44:29 +00001255 // Prevent folding operands backwards in the function. For example,
1256 // the COPY opcode must not be replaced by 1 in this example:
1257 //
Francis Visoiu Mistriha8a83d12017-12-07 10:40:31 +00001258 // %3 = COPY %vgpr0; VGPR_32:%3
Marek Olsak926c56f2016-01-13 11:44:29 +00001259 // ...
Francis Visoiu Mistriha8a83d12017-12-07 10:40:31 +00001260 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec
Marek Olsak926c56f2016-01-13 11:44:29 +00001261 MachineOperand &Dst = MI.getOperand(0);
1262 if (Dst.isReg() &&
1263 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
1264 continue;
1265
Matt Arsenault51818c12017-01-10 23:32:04 +00001266 foldInstOperand(MI, OpToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +00001267 }
1268 }
1269 return false;
1270}