blob: bcc3478c67b8cb63ef28383ce75bd30051a49d1c [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard6596ba72014-11-21 22:06:37 +00006//
7/// \file
8//===----------------------------------------------------------------------===//
9//
10
11#include "AMDGPU.h"
12#include "AMDGPUSubtarget.h"
13#include "SIInstrInfo.h"
Matt Arsenault3cb39042017-02-27 19:35:42 +000014#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000015#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenaultff3f9122017-06-20 18:56:32 +000016#include "llvm/ADT/DepthFirstIterator.h"
Matthias Braunf8422972017-12-13 02:51:04 +000017#include "llvm/CodeGen/LiveIntervals.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000018#include "llvm/CodeGen/MachineFunctionPass.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000021#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000022#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000023#include "llvm/Target/TargetMachine.h"
24
25#define DEBUG_TYPE "si-fold-operands"
26using namespace llvm;
27
28namespace {
29
Tom Stellardbb763e62015-01-07 17:42:16 +000030struct FoldCandidate {
31 MachineInstr *UseMI;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000032 union {
33 MachineOperand *OpToFold;
34 uint64_t ImmToFold;
35 int FrameIndexToFold;
36 };
Matt Arsenaultde6c4212018-08-28 18:34:24 +000037 int ShrinkOpcode;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000038 unsigned char UseOpNo;
39 MachineOperand::MachineOperandType Kind;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000040 bool Commuted;
Tom Stellardbb763e62015-01-07 17:42:16 +000041
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000042 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
Matt Arsenaultde6c4212018-08-28 18:34:24 +000043 bool Commuted_ = false,
44 int ShrinkOp = -1) :
45 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46 Kind(FoldOp->getType()),
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000047 Commuted(Commuted_) {
Tom Stellard05992972015-01-07 22:44:19 +000048 if (FoldOp->isImm()) {
Tom Stellard05992972015-01-07 22:44:19 +000049 ImmToFold = FoldOp->getImm();
Matt Arsenault2bc198a2016-09-14 15:51:33 +000050 } else if (FoldOp->isFI()) {
51 FrameIndexToFold = FoldOp->getIndex();
Tom Stellard05992972015-01-07 22:44:19 +000052 } else {
Nicolai Haehnle27101712019-06-25 11:52:30 +000053 assert(FoldOp->isReg() || FoldOp->isGlobal());
Tom Stellard05992972015-01-07 22:44:19 +000054 OpToFold = FoldOp;
55 }
56 }
Tom Stellardbb763e62015-01-07 17:42:16 +000057
Matt Arsenault2bc198a2016-09-14 15:51:33 +000058 bool isFI() const {
59 return Kind == MachineOperand::MO_FrameIndex;
60 }
61
Tom Stellardbb763e62015-01-07 17:42:16 +000062 bool isImm() const {
Matt Arsenault2bc198a2016-09-14 15:51:33 +000063 return Kind == MachineOperand::MO_Immediate;
64 }
65
66 bool isReg() const {
67 return Kind == MachineOperand::MO_Register;
Tom Stellardbb763e62015-01-07 17:42:16 +000068 }
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000069
Nicolai Haehnle27101712019-06-25 11:52:30 +000070 bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
71
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000072 bool isCommuted() const {
73 return Commuted;
74 }
Matt Arsenaultde6c4212018-08-28 18:34:24 +000075
76 bool needsShrink() const {
77 return ShrinkOpcode != -1;
78 }
79
80 int getShrinkOpcode() const {
81 return ShrinkOpcode;
82 }
Tom Stellardbb763e62015-01-07 17:42:16 +000083};
84
Matt Arsenault51818c12017-01-10 23:32:04 +000085class SIFoldOperands : public MachineFunctionPass {
86public:
87 static char ID;
88 MachineRegisterInfo *MRI;
89 const SIInstrInfo *TII;
90 const SIRegisterInfo *TRI;
Tom Stellard5bfbae52018-07-11 20:59:01 +000091 const GCNSubtarget *ST;
Matt Arsenault60957cb2019-06-24 14:53:56 +000092 const SIMachineFunctionInfo *MFI;
Matt Arsenault51818c12017-01-10 23:32:04 +000093
94 void foldOperand(MachineOperand &OpToFold,
95 MachineInstr *UseMI,
Matt Arsenault60957cb2019-06-24 14:53:56 +000096 int UseOpIdx,
Matt Arsenault51818c12017-01-10 23:32:04 +000097 SmallVectorImpl<FoldCandidate> &FoldList,
98 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
99
100 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
101
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000102 const MachineOperand *isClamp(const MachineInstr &MI) const;
103 bool tryFoldClamp(MachineInstr &MI);
104
Matt Arsenault3cb39042017-02-27 19:35:42 +0000105 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
106 bool tryFoldOMod(MachineInstr &MI);
107
Matt Arsenault51818c12017-01-10 23:32:04 +0000108public:
109 SIFoldOperands() : MachineFunctionPass(ID) {
110 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
111 }
112
113 bool runOnMachineFunction(MachineFunction &MF) override;
114
115 StringRef getPassName() const override { return "SI Fold Operands"; }
116
117 void getAnalysisUsage(AnalysisUsage &AU) const override {
118 AU.setPreservesCFG();
119 MachineFunctionPass::getAnalysisUsage(AU);
120 }
121};
122
Tom Stellard6596ba72014-11-21 22:06:37 +0000123} // End anonymous namespace.
124
Matt Arsenault427c5482016-02-11 06:15:34 +0000125INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
126 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +0000127
128char SIFoldOperands::ID = 0;
129
130char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
131
Matt Arsenault69e30012017-01-11 22:00:02 +0000132// Wrapper around isInlineConstant that understands special cases when
133// instruction types are replaced during operand folding.
134static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
135 const MachineInstr &UseMI,
136 unsigned OpNo,
137 const MachineOperand &OpToFold) {
138 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
139 return true;
140
141 unsigned Opc = UseMI.getOpcode();
142 switch (Opc) {
143 case AMDGPU::V_MAC_F32_e64:
Matt Arsenault0084adc2018-04-30 19:08:16 +0000144 case AMDGPU::V_MAC_F16_e64:
145 case AMDGPU::V_FMAC_F32_e64: {
Matt Arsenault69e30012017-01-11 22:00:02 +0000146 // Special case for mac. Since this is replaced with mad when folded into
147 // src2, we need to check the legality for the final instruction.
148 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
149 if (static_cast<int>(OpNo) == Src2Idx) {
Matt Arsenault0084adc2018-04-30 19:08:16 +0000150 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
Matt Arsenault69e30012017-01-11 22:00:02 +0000151 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Matt Arsenault0084adc2018-04-30 19:08:16 +0000152
153 unsigned Opc = IsFMA ?
154 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
155 const MCInstrDesc &MadDesc = TII->get(Opc);
Matt Arsenault69e30012017-01-11 22:00:02 +0000156 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
157 }
Simon Pilgrim0f5b3502017-07-07 10:18:57 +0000158 return false;
Matt Arsenault69e30012017-01-11 22:00:02 +0000159 }
160 default:
161 return false;
162 }
163}
164
Matt Arsenault60957cb2019-06-24 14:53:56 +0000165// TODO: Add heuristic that the frame index might not fit in the addressing mode
166// immediate offset to avoid materializing in loops.
167static bool frameIndexMayFold(const SIInstrInfo *TII,
168 const MachineInstr &UseMI,
169 int OpNo,
170 const MachineOperand &OpToFold) {
171 return OpToFold.isFI() &&
172 (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) &&
173 OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr);
174}
175
Tom Stellard6596ba72014-11-21 22:06:37 +0000176FunctionPass *llvm::createSIFoldOperandsPass() {
177 return new SIFoldOperands();
178}
179
Tom Stellardbb763e62015-01-07 17:42:16 +0000180static bool updateOperand(FoldCandidate &Fold,
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000181 const SIInstrInfo &TII,
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000182 const TargetRegisterInfo &TRI,
183 const GCNSubtarget &ST) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000184 MachineInstr *MI = Fold.UseMI;
185 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000186 assert(Old.isReg());
187
Tom Stellardbb763e62015-01-07 17:42:16 +0000188 if (Fold.isImm()) {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000189 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000190 !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000191 AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
192 ST.hasInv2PiInlineImm())) {
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000193 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
194 // already set.
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000195 unsigned Opcode = MI->getOpcode();
196 int OpNo = MI->getOperandNo(&Old);
197 int ModIdx = -1;
198 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
199 ModIdx = AMDGPU::OpName::src0_modifiers;
200 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
201 ModIdx = AMDGPU::OpName::src1_modifiers;
202 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
203 ModIdx = AMDGPU::OpName::src2_modifiers;
204 assert(ModIdx != -1);
205 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
206 MachineOperand &Mod = MI->getOperand(ModIdx);
207 unsigned Val = Mod.getImm();
208 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
209 return false;
Michael Liao389d5a32019-04-22 22:05:49 +0000210 // Only apply the following transformation if that operand requries
211 // a packed immediate.
212 switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000213 case AMDGPU::OPERAND_REG_IMM_V2FP16:
214 case AMDGPU::OPERAND_REG_IMM_V2INT16:
Michael Liao389d5a32019-04-22 22:05:49 +0000215 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
216 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
217 // If upper part is all zero we do not need op_sel_hi.
218 if (!isUInt<16>(Fold.ImmToFold)) {
219 if (!(Fold.ImmToFold & 0xffff)) {
220 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
221 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
222 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
223 return true;
224 }
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000225 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +0000226 Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
227 return true;
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000228 }
Michael Liao389d5a32019-04-22 22:05:49 +0000229 break;
230 default:
231 break;
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000232 }
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000233 }
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000234 }
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000235
Nicolai Haehnle27101712019-06-25 11:52:30 +0000236 if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000237 MachineBasicBlock *MBB = MI->getParent();
238 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI);
239 if (Liveness != MachineBasicBlock::LQR_Dead)
240 return false;
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000241
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000242 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
243 int Op32 = Fold.getShrinkOpcode();
244 MachineOperand &Dst0 = MI->getOperand(0);
245 MachineOperand &Dst1 = MI->getOperand(1);
246 assert(Dst0.isDef() && Dst1.isDef());
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000247
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000248 bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
Matt Arsenault44a8a752018-08-28 18:44:16 +0000249
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000250 const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
251 unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC);
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000252
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000253 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000254
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000255 if (HaveNonDbgCarryUse) {
256 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
257 .addReg(AMDGPU::VCC, RegState::Kill);
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000258 }
259
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000260 // Keep the old instruction around to avoid breaking iterators, but
261 // replace it with a dummy instruction to remove uses.
262 //
263 // FIXME: We should not invert how this pass looks at operands to avoid
264 // this. Should track set of foldable movs instead of looking for uses
265 // when looking at a use.
266 Dst0.setReg(NewReg0);
267 for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
268 MI->RemoveOperand(I);
269 MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
270
271 if (Fold.isCommuted())
272 TII.commuteInstruction(*Inst32, false);
Tom Stellard6596ba72014-11-21 22:06:37 +0000273 return true;
274 }
275
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000276 assert(!Fold.needsShrink() && "not handled");
277
Matt Arsenaultcfd0ca32019-05-03 15:21:53 +0000278 if (Fold.isImm()) {
279 Old.ChangeToImmediate(Fold.ImmToFold);
280 return true;
281 }
282
Nicolai Haehnle27101712019-06-25 11:52:30 +0000283 if (Fold.isGlobal()) {
284 Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
285 Fold.OpToFold->getTargetFlags());
286 return true;
287 }
288
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000289 if (Fold.isFI()) {
290 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
291 return true;
292 }
293
Tom Stellardbb763e62015-01-07 17:42:16 +0000294 MachineOperand *New = Fold.OpToFold;
Matt Arsenaulte75e1972019-06-18 12:23:45 +0000295 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
296 Old.setIsUndef(New->isUndef());
297 return true;
Tom Stellard6596ba72014-11-21 22:06:37 +0000298}
299
Matt Arsenault51818c12017-01-10 23:32:04 +0000300static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000301 const MachineInstr *MI) {
302 for (auto Candidate : FoldList) {
303 if (Candidate.UseMI == MI)
304 return true;
305 }
306 return false;
307}
308
Matt Arsenault51818c12017-01-10 23:32:04 +0000309static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
Tom Stellard05992972015-01-07 22:44:19 +0000310 MachineInstr *MI, unsigned OpNo,
311 MachineOperand *OpToFold,
312 const SIInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000313 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000314 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000315 unsigned Opc = MI->getOpcode();
Matt Arsenault0084adc2018-04-30 19:08:16 +0000316 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
317 Opc == AMDGPU::V_FMAC_F32_e64) &&
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000318 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
Matt Arsenault0084adc2018-04-30 19:08:16 +0000319 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
Matt Arsenault69e30012017-01-11 22:00:02 +0000320 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Matt Arsenault0084adc2018-04-30 19:08:16 +0000321 unsigned NewOpc = IsFMA ?
322 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000323
324 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
325 // to fold the operand.
Matt Arsenault0084adc2018-04-30 19:08:16 +0000326 MI->setDesc(TII->get(NewOpc));
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000327 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
328 if (FoldAsMAD) {
329 MI->untieRegOperand(OpNo);
330 return true;
331 }
332 MI->setDesc(TII->get(Opc));
333 }
334
Tom Stellard8485fa02016-12-07 02:42:15 +0000335 // Special case for s_setreg_b32
336 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
337 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
338 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
339 return true;
340 }
341
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000342 // If we are already folding into another operand of MI, then
343 // we can't commute the instruction, otherwise we risk making the
344 // other fold illegal.
345 if (isUseMIInFoldList(FoldList, MI))
346 return false;
347
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000348 unsigned CommuteOpNo = OpNo;
349
Tom Stellard05992972015-01-07 22:44:19 +0000350 // Operand is not legal, so try to commute the instruction to
351 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000352 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
353 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000354 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000355
356 if (CanCommute) {
357 if (CommuteIdx0 == OpNo)
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000358 CommuteOpNo = CommuteIdx1;
Tom Stellard05992972015-01-07 22:44:19 +0000359 else if (CommuteIdx1 == OpNo)
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000360 CommuteOpNo = CommuteIdx0;
Tom Stellard05992972015-01-07 22:44:19 +0000361 }
362
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000363
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000364 // One of operands might be an Imm operand, and OpNo may refer to it after
365 // the call of commuteInstruction() below. Such situations are avoided
366 // here explicitly as OpNo must be a register operand to be a candidate
367 // for memory folding.
368 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
369 !MI->getOperand(CommuteIdx1).isReg()))
370 return false;
371
372 if (!CanCommute ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000373 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000374 return false;
375
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000376 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
377 if ((Opc == AMDGPU::V_ADD_I32_e64 ||
378 Opc == AMDGPU::V_SUB_I32_e64 ||
379 Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
Nicolai Haehnle27101712019-06-25 11:52:30 +0000380 (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000381 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
382
383 // Verify the other operand is a VGPR, otherwise we would violate the
384 // constant bus restriction.
385 unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
386 MachineOperand &OtherOp = MI->getOperand(OtherIdx);
387 if (!OtherOp.isReg() ||
388 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
389 return false;
390
Fangrui Song9cca2272018-08-28 19:19:03 +0000391 assert(MI->getOperand(1).isDef());
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000392
Matt Arsenault2c8936f2019-05-03 13:42:56 +0000393 // Make sure to get the 32-bit version of the commuted opcode.
394 unsigned MaybeCommutedOpc = MI->getOpcode();
395 int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
396
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000397 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true,
398 Op32));
399 return true;
400 }
401
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000402 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000403 return false;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000404 }
405
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000406 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true));
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000407 return true;
Tom Stellard05992972015-01-07 22:44:19 +0000408 }
409
410 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
411 return true;
412}
413
Matt Arsenault5e63a042016-10-06 18:12:13 +0000414// If the use operand doesn't care about the value, this may be an operand only
415// used for register indexing, in which case it is unsafe to fold.
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000416static bool isUseSafeToFold(const SIInstrInfo *TII,
417 const MachineInstr &MI,
Matt Arsenault5e63a042016-10-06 18:12:13 +0000418 const MachineOperand &UseMO) {
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000419 return !UseMO.isUndef() && !TII->isSDWA(MI);
Matt Arsenault5e63a042016-10-06 18:12:13 +0000420 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
421}
422
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000423static bool tryToFoldACImm(const SIInstrInfo *TII,
424 const MachineOperand &OpToFold,
425 MachineInstr *UseMI,
426 unsigned UseOpIdx,
427 SmallVectorImpl<FoldCandidate> &FoldList) {
428 const MCInstrDesc &Desc = UseMI->getDesc();
429 const MCOperandInfo *OpInfo = Desc.OpInfo;
430 if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
431 return false;
432
433 uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
434 if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
435 OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST)
436 return false;
437
438 if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy)) {
439 UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
440 return true;
441 }
442
443 if (!OpToFold.isReg())
444 return false;
445
446 unsigned UseReg = OpToFold.getReg();
447 if (!TargetRegisterInfo::isVirtualRegister(UseReg))
448 return false;
449
450 if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) {
451 return FC.UseMI == UseMI; }) != FoldList.end())
452 return false;
453
454 MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
455 const MachineInstr *Def = MRI.getUniqueVRegDef(UseReg);
456 if (!Def || !Def->isRegSequence())
457 return false;
458
459 int64_t Imm;
460 MachineOperand *Op;
461 for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
462 const MachineOperand &Sub = Def->getOperand(I);
463 if (!Sub.isReg() || Sub.getSubReg())
464 return false;
465 MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub.getReg());
466 while (SubDef && !SubDef->isMoveImmediate() &&
467 !SubDef->getOperand(1).isImm() && TII->isFoldableCopy(*SubDef))
468 SubDef = MRI.getUniqueVRegDef(SubDef->getOperand(1).getReg());
469 if (!SubDef || !SubDef->isMoveImmediate() || !SubDef->getOperand(1).isImm())
470 return false;
471 Op = &SubDef->getOperand(1);
472 auto SubImm = Op->getImm();
473 if (I == 1) {
474 if (!TII->isInlineConstant(SubDef->getOperand(1), OpTy))
475 return false;
476
477 Imm = SubImm;
478 continue;
479 }
480 if (Imm != SubImm)
481 return false; // Can only fold splat constants
482 }
483
484 FoldList.push_back(FoldCandidate(UseMI, UseOpIdx, Op));
485 return true;
486}
487
Matt Arsenault51818c12017-01-10 23:32:04 +0000488void SIFoldOperands::foldOperand(
489 MachineOperand &OpToFold,
490 MachineInstr *UseMI,
Matt Arsenault60957cb2019-06-24 14:53:56 +0000491 int UseOpIdx,
Matt Arsenault51818c12017-01-10 23:32:04 +0000492 SmallVectorImpl<FoldCandidate> &FoldList,
493 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000494 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
495
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000496 if (!isUseSafeToFold(TII, *UseMI, UseOp))
Matt Arsenault5e63a042016-10-06 18:12:13 +0000497 return;
498
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000499 // FIXME: Fold operands with subregs.
Matt Arsenault3661e902016-08-15 16:18:36 +0000500 if (UseOp.isReg() && OpToFold.isReg()) {
501 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
502 return;
503
504 // Don't fold subregister extracts into tied operands, only if it is a full
505 // copy since a subregister use tied to a full register def doesn't really
506 // make sense. e.g. don't fold:
507 //
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +0000508 // %1 = COPY %0:sub1
509 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000510 //
511 // into
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +0000512 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000513 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
514 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000515 }
516
Tom Stellard9a197672015-09-09 15:43:26 +0000517 // Special case for REG_SEQUENCE: We can't fold literals into
518 // REG_SEQUENCE instructions, so we have to fold them into the
519 // uses of REG_SEQUENCE.
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000520 if (UseMI->isRegSequence()) {
Tom Stellard9a197672015-09-09 15:43:26 +0000521 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
522 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
523
Matt Arsenault4d000d22019-06-19 20:44:15 +0000524 MachineRegisterInfo::use_iterator Next;
Tom Stellard9a197672015-09-09 15:43:26 +0000525 for (MachineRegisterInfo::use_iterator
Matt Arsenault51818c12017-01-10 23:32:04 +0000526 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
Matt Arsenault4d000d22019-06-19 20:44:15 +0000527 RSUse != RSE; RSUse = Next) {
528 Next = std::next(RSUse);
Tom Stellard9a197672015-09-09 15:43:26 +0000529
530 MachineInstr *RSUseMI = RSUse->getParent();
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000531
532 if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
533 RSUse.getOperandNo(), FoldList))
534 continue;
535
Tom Stellard9a197672015-09-09 15:43:26 +0000536 if (RSUse->getSubReg() != RegSeqDstSubReg)
537 continue;
538
539 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenault51818c12017-01-10 23:32:04 +0000540 CopiesToReplace);
Tom Stellard9a197672015-09-09 15:43:26 +0000541 }
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000542
Tom Stellard9a197672015-09-09 15:43:26 +0000543 return;
544 }
545
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000546 if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
547 return;
548
Matt Arsenault60957cb2019-06-24 14:53:56 +0000549 if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
550 // Sanity check that this is a stack access.
551 // FIXME: Should probably use stack pseudos before frame lowering.
552 MachineOperand *SOff = TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
553 if (!SOff->isReg() || (SOff->getReg() != MFI->getScratchWaveOffsetReg() &&
554 SOff->getReg() != MFI->getStackPtrOffsetReg()))
555 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000556
Matt Arsenault60957cb2019-06-24 14:53:56 +0000557 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
558 MFI->getScratchRSrcReg())
559 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000560
Matt Arsenault60957cb2019-06-24 14:53:56 +0000561 // A frame index will resolve to a positive constant, so it should always be
562 // safe to fold the addressing mode, even pre-GFX9.
563 UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
564 SOff->setReg(MFI->getStackPtrOffsetReg());
565 return;
566 }
567
Nicolai Haehnle27101712019-06-25 11:52:30 +0000568 bool FoldingImmLike =
569 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
Matt Arsenault60957cb2019-06-24 14:53:56 +0000570
571 if (FoldingImmLike && UseMI->isCopy()) {
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000572 unsigned DestReg = UseMI->getOperand(0).getReg();
573 const TargetRegisterClass *DestRC
574 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000575 MRI->getRegClass(DestReg) :
576 TRI->getPhysRegClass(DestReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000577
Alexander Timofeev201f8922018-08-30 13:55:04 +0000578 unsigned SrcReg = UseMI->getOperand(1).getReg();
579 if (TargetRegisterInfo::isVirtualRegister(DestReg) &&
580 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
581 const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000582 if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
Alexander Timofeev201f8922018-08-30 13:55:04 +0000583 MachineRegisterInfo::use_iterator NextUse;
584 SmallVector<FoldCandidate, 4> CopyUses;
585 for (MachineRegisterInfo::use_iterator
586 Use = MRI->use_begin(DestReg), E = MRI->use_end();
587 Use != E; Use = NextUse) {
588 NextUse = std::next(Use);
589 FoldCandidate FC = FoldCandidate(Use->getParent(),
590 Use.getOperandNo(), &UseMI->getOperand(1));
591 CopyUses.push_back(FC);
592 }
593 for (auto & F : CopyUses) {
594 foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
595 FoldList, CopiesToReplace);
596 }
597 }
598 }
599
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000600 if (DestRC == &AMDGPU::AGPR_32RegClass &&
601 TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
602 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
603 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
604 CopiesToReplace.push_back(UseMI);
605 return;
606 }
607
Alexander Timofeev201f8922018-08-30 13:55:04 +0000608 // In order to fold immediates into copies, we need to change the
609 // copy to a MOV.
610
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000611 unsigned MovOp = TII->getMovOpcode(DestRC);
612 if (MovOp == AMDGPU::COPY)
613 return;
614
615 UseMI->setDesc(TII->get(MovOp));
616 CopiesToReplace.push_back(UseMI);
617 } else {
Stanislav Mekhanoshinb080adf2018-09-27 18:55:20 +0000618 if (UseMI->isCopy() && OpToFold.isReg() &&
619 TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000620 TRI->isVectorRegister(*MRI, UseMI->getOperand(0).getReg()) &&
621 TRI->isVectorRegister(*MRI, UseMI->getOperand(1).getReg()) &&
Stanislav Mekhanoshinb080adf2018-09-27 18:55:20 +0000622 !UseMI->getOperand(1).getSubReg()) {
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000623 unsigned Size = TII->getOpSize(*UseMI, 1);
Stanislav Mekhanoshinb080adf2018-09-27 18:55:20 +0000624 UseMI->getOperand(1).setReg(OpToFold.getReg());
625 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
626 UseMI->getOperand(1).setIsKill(false);
627 CopiesToReplace.push_back(UseMI);
628 OpToFold.setIsKill(false);
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000629 if (Size != 4)
630 return;
631 if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
632 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
633 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
634 else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
635 TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
636 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32));
Stanislav Mekhanoshinb080adf2018-09-27 18:55:20 +0000637 return;
638 }
639
Matt Arsenaultbcb5ea02019-06-18 12:23:46 +0000640 unsigned UseOpc = UseMI->getOpcode();
641 if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
642 (UseOpc == AMDGPU::V_READLANE_B32 &&
643 (int)UseOpIdx ==
644 AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
645 // %vgpr = V_MOV_B32 imm
646 // %sgpr = V_READFIRSTLANE_B32 %vgpr
647 // =>
648 // %sgpr = S_MOV_B32 imm
Matt Arsenault60957cb2019-06-24 14:53:56 +0000649 if (FoldingImmLike) {
Matt Arsenaultf39f3bd2019-06-18 12:48:36 +0000650 if (execMayBeModifiedBeforeUse(*MRI,
651 UseMI->getOperand(UseOpIdx).getReg(),
652 *OpToFold.getParent(),
653 UseMI))
Matt Arsenaultbcb5ea02019-06-18 12:23:46 +0000654 return;
655
656 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
Matt Arsenault4d000d22019-06-19 20:44:15 +0000657
658 // FIXME: ChangeToImmediate should clear subreg
659 UseMI->getOperand(1).setSubReg(0);
Matt Arsenault60957cb2019-06-24 14:53:56 +0000660 if (OpToFold.isImm())
661 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
662 else
663 UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
Matt Arsenaultbcb5ea02019-06-18 12:23:46 +0000664 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
665 return;
666 }
667
668 if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
Matt Arsenaultf39f3bd2019-06-18 12:48:36 +0000669 if (execMayBeModifiedBeforeUse(*MRI,
670 UseMI->getOperand(UseOpIdx).getReg(),
671 *OpToFold.getParent(),
672 UseMI))
Matt Arsenaultbcb5ea02019-06-18 12:23:46 +0000673 return;
674
675 // %vgpr = COPY %sgpr0
676 // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
677 // =>
678 // %sgpr1 = COPY %sgpr0
679 UseMI->setDesc(TII->get(AMDGPU::COPY));
680 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
681 return;
682 }
683 }
684
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000685 const MCInstrDesc &UseDesc = UseMI->getDesc();
686
687 // Don't fold into target independent nodes. Target independent opcodes
688 // don't have defined register classes.
689 if (UseDesc.isVariadic() ||
Matt Arsenaultc908e3f2018-02-08 01:12:46 +0000690 UseOp.isImplicit() ||
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000691 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
692 return;
693 }
694
Matt Arsenault60957cb2019-06-24 14:53:56 +0000695 if (!FoldingImmLike) {
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000696 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
697
698 // FIXME: We could try to change the instruction from 64-bit to 32-bit
699 // to enable more folding opportunites. The shrink operands pass
700 // already does this.
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000701 return;
702 }
703
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000704
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000705 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
706 const TargetRegisterClass *FoldRC =
Matt Arsenault51818c12017-01-10 23:32:04 +0000707 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000708
709 // Split 64-bit constants into 32-bits for folding.
710 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
711 unsigned UseReg = UseOp.getReg();
Matt Arsenaulte75e1972019-06-18 12:23:45 +0000712 const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000713
714 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
715 return;
716
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000717 APInt Imm(64, OpToFold.getImm());
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000718 if (UseOp.getSubReg() == AMDGPU::sub0) {
719 Imm = Imm.getLoBits(32);
720 } else {
721 assert(UseOp.getSubReg() == AMDGPU::sub1);
722 Imm = Imm.getHiBits(32);
723 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000724
725 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
726 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
727 return;
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000728 }
729
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000730
731
732 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000733}
734
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000735static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
Matt Arsenault51818c12017-01-10 23:32:04 +0000736 uint32_t LHS, uint32_t RHS) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000737 switch (Opcode) {
738 case AMDGPU::V_AND_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000739 case AMDGPU::V_AND_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000740 case AMDGPU::S_AND_B32:
741 Result = LHS & RHS;
742 return true;
743 case AMDGPU::V_OR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000744 case AMDGPU::V_OR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000745 case AMDGPU::S_OR_B32:
746 Result = LHS | RHS;
747 return true;
748 case AMDGPU::V_XOR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000749 case AMDGPU::V_XOR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000750 case AMDGPU::S_XOR_B32:
751 Result = LHS ^ RHS;
752 return true;
Matt Arsenault51818c12017-01-10 23:32:04 +0000753 case AMDGPU::V_LSHL_B32_e64:
754 case AMDGPU::V_LSHL_B32_e32:
755 case AMDGPU::S_LSHL_B32:
756 // The instruction ignores the high bits for out of bounds shifts.
757 Result = LHS << (RHS & 31);
758 return true;
759 case AMDGPU::V_LSHLREV_B32_e64:
760 case AMDGPU::V_LSHLREV_B32_e32:
761 Result = RHS << (LHS & 31);
762 return true;
763 case AMDGPU::V_LSHR_B32_e64:
764 case AMDGPU::V_LSHR_B32_e32:
765 case AMDGPU::S_LSHR_B32:
766 Result = LHS >> (RHS & 31);
767 return true;
768 case AMDGPU::V_LSHRREV_B32_e64:
769 case AMDGPU::V_LSHRREV_B32_e32:
770 Result = RHS >> (LHS & 31);
771 return true;
772 case AMDGPU::V_ASHR_I32_e64:
773 case AMDGPU::V_ASHR_I32_e32:
774 case AMDGPU::S_ASHR_I32:
775 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
776 return true;
777 case AMDGPU::V_ASHRREV_I32_e64:
778 case AMDGPU::V_ASHRREV_I32_e32:
779 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
780 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000781 default:
782 return false;
783 }
784}
785
786static unsigned getMovOpc(bool IsScalar) {
787 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
788}
789
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000790/// Remove any leftover implicit operands from mutating the instruction. e.g.
791/// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
792/// anymore.
793static void stripExtraCopyOperands(MachineInstr &MI) {
794 const MCInstrDesc &Desc = MI.getDesc();
795 unsigned NumOps = Desc.getNumOperands() +
796 Desc.getNumImplicitUses() +
797 Desc.getNumImplicitDefs();
798
799 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
800 MI.RemoveOperand(I);
801}
802
803static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
804 MI.setDesc(NewDesc);
805 stripExtraCopyOperands(MI);
806}
807
Matt Arsenault51818c12017-01-10 23:32:04 +0000808static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
809 MachineOperand &Op) {
810 if (Op.isReg()) {
811 // If this has a subregister, it obviously is a register source.
Matt Arsenaultcbda7ff2018-03-10 16:05:35 +0000812 if (Op.getSubReg() != AMDGPU::NoSubRegister ||
813 !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
Matt Arsenault51818c12017-01-10 23:32:04 +0000814 return &Op;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000815
Matt Arsenault51818c12017-01-10 23:32:04 +0000816 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
Matt Arsenault7f67b352017-06-20 18:28:02 +0000817 if (Def && Def->isMoveImmediate()) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000818 MachineOperand &ImmSrc = Def->getOperand(1);
819 if (ImmSrc.isImm())
820 return &ImmSrc;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000821 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000822 }
823
Matt Arsenault51818c12017-01-10 23:32:04 +0000824 return &Op;
825}
826
827// Try to simplify operations with a constant that may appear after instruction
828// selection.
829// TODO: See if a frame index with a fixed offset can fold.
830static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
831 const SIInstrInfo *TII,
832 MachineInstr *MI,
833 MachineOperand *ImmOp) {
834 unsigned Opc = MI->getOpcode();
835 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
836 Opc == AMDGPU::S_NOT_B32) {
837 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
838 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
839 return true;
840 }
841
842 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
843 if (Src1Idx == -1)
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000844 return false;
845
846 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Matt Arsenault51818c12017-01-10 23:32:04 +0000847 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
848 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000849
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000850 if (!Src0->isImm() && !Src1->isImm())
851 return false;
852
Matt Arsenault0d1b3932018-08-06 15:40:20 +0000853 if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) {
854 if (Src0->isImm() && Src0->getImm() == 0) {
855 // v_lshl_or_b32 0, X, Y -> copy Y
856 // v_lshl_or_b32 0, X, K -> v_mov_b32 K
857 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
858 MI->RemoveOperand(Src1Idx);
859 MI->RemoveOperand(Src0Idx);
860
861 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
862 return true;
863 }
864 }
865
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000866 // and k0, k1 -> v_mov_b32 (k0 & k1)
867 // or k0, k1 -> v_mov_b32 (k0 | k1)
868 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
869 if (Src0->isImm() && Src1->isImm()) {
870 int32_t NewImm;
871 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
872 return false;
873
874 const SIRegisterInfo &TRI = TII->getRegisterInfo();
875 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
876
Matt Arsenault51818c12017-01-10 23:32:04 +0000877 // Be careful to change the right operand, src0 may belong to a different
878 // instruction.
879 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000880 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000881 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000882 return true;
883 }
884
Matt Arsenault51818c12017-01-10 23:32:04 +0000885 if (!MI->isCommutable())
886 return false;
887
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000888 if (Src0->isImm() && !Src1->isImm()) {
889 std::swap(Src0, Src1);
890 std::swap(Src0Idx, Src1Idx);
891 }
892
893 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
Matt Arsenault51818c12017-01-10 23:32:04 +0000894 if (Opc == AMDGPU::V_OR_B32_e64 ||
895 Opc == AMDGPU::V_OR_B32_e32 ||
896 Opc == AMDGPU::S_OR_B32) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000897 if (Src1Val == 0) {
898 // y = or x, 0 => y = copy x
899 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000900 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000901 } else if (Src1Val == -1) {
902 // y = or x, -1 => y = v_mov_b32 -1
903 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000904 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000905 } else
906 return false;
907
908 return true;
909 }
910
911 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000912 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000913 MI->getOpcode() == AMDGPU::S_AND_B32) {
914 if (Src1Val == 0) {
915 // y = and x, 0 => y = v_mov_b32 0
916 MI->RemoveOperand(Src0Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000917 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000918 } else if (Src1Val == -1) {
919 // y = and x, -1 => y = copy x
920 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000921 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
922 stripExtraCopyOperands(*MI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000923 } else
924 return false;
925
926 return true;
927 }
928
929 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000930 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000931 MI->getOpcode() == AMDGPU::S_XOR_B32) {
932 if (Src1Val == 0) {
933 // y = xor x, 0 => y = copy x
934 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000935 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenault51818c12017-01-10 23:32:04 +0000936 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000937 }
938 }
939
940 return false;
941}
942
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000943// Try to fold an instruction into a simpler one
944static bool tryFoldInst(const SIInstrInfo *TII,
945 MachineInstr *MI) {
946 unsigned Opc = MI->getOpcode();
947
948 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
949 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
950 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
951 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
952 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000953 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
954 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
955 if (Src1->isIdenticalTo(*Src0) &&
956 (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
957 (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000958 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000959 auto &NewDesc =
960 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000961 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
962 if (Src2Idx != -1)
963 MI->RemoveOperand(Src2Idx);
964 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000965 if (Src1ModIdx != -1)
966 MI->RemoveOperand(Src1ModIdx);
967 if (Src0ModIdx != -1)
968 MI->RemoveOperand(Src0ModIdx);
969 mutateCopyOp(*MI, NewDesc);
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000970 LLVM_DEBUG(dbgs() << *MI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000971 return true;
972 }
973 }
974
975 return false;
976}
977
Matt Arsenault51818c12017-01-10 23:32:04 +0000978void SIFoldOperands::foldInstOperand(MachineInstr &MI,
979 MachineOperand &OpToFold) const {
980 // We need mutate the operands of new mov instructions to add implicit
981 // uses of EXEC, but adding them invalidates the use_iterator, so defer
982 // this.
983 SmallVector<MachineInstr *, 4> CopiesToReplace;
984 SmallVector<FoldCandidate, 4> FoldList;
985 MachineOperand &Dst = MI.getOperand(0);
986
Nicolai Haehnle27101712019-06-25 11:52:30 +0000987 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
Matt Arsenault51818c12017-01-10 23:32:04 +0000988 if (FoldingImm) {
989 unsigned NumLiteralUses = 0;
990 MachineOperand *NonInlineUse = nullptr;
991 int NonInlineUseOpNo = -1;
992
Vitaly Buka74503982017-10-15 05:35:02 +0000993 MachineRegisterInfo::use_iterator NextUse;
Matt Arsenault51818c12017-01-10 23:32:04 +0000994 for (MachineRegisterInfo::use_iterator
995 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
996 Use != E; Use = NextUse) {
997 NextUse = std::next(Use);
998 MachineInstr *UseMI = Use->getParent();
999 unsigned OpNo = Use.getOperandNo();
1000
1001 // Folding the immediate may reveal operations that can be constant
1002 // folded or replaced with a copy. This can happen for example after
1003 // frame indices are lowered to constants or from splitting 64-bit
1004 // constants.
1005 //
1006 // We may also encounter cases where one or both operands are
1007 // immediates materialized into a register, which would ordinarily not
1008 // be folded due to multiple uses or operand constraints.
1009
1010 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001011 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
Matt Arsenault51818c12017-01-10 23:32:04 +00001012
1013 // Some constant folding cases change the same immediate's use to a new
1014 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
1015 // again. The same constant folded instruction could also have a second
1016 // use operand.
1017 NextUse = MRI->use_begin(Dst.getReg());
Nicolai Haehnlea253e4c2017-07-18 14:54:41 +00001018 FoldList.clear();
Matt Arsenault51818c12017-01-10 23:32:04 +00001019 continue;
1020 }
1021
1022 // Try to fold any inline immediate uses, and then only fold other
1023 // constants if they have one use.
1024 //
1025 // The legality of the inline immediate must be checked based on the use
1026 // operand, not the defining instruction, because 32-bit instructions
1027 // with 32-bit inline immediate sources may be used to materialize
1028 // constants used in 16-bit operands.
1029 //
1030 // e.g. it is unsafe to fold:
1031 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
1032 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
1033
1034 // Folding immediates with more than one use will increase program size.
1035 // FIXME: This will also reduce register usage, which may be better
1036 // in some cases. A better heuristic is needed.
Matt Arsenault69e30012017-01-11 22:00:02 +00001037 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
Matt Arsenault51818c12017-01-10 23:32:04 +00001038 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
Matt Arsenault60957cb2019-06-24 14:53:56 +00001039 } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1040 foldOperand(OpToFold, UseMI, OpNo, FoldList,
1041 CopiesToReplace);
Matt Arsenault51818c12017-01-10 23:32:04 +00001042 } else {
1043 if (++NumLiteralUses == 1) {
1044 NonInlineUse = &*Use;
1045 NonInlineUseOpNo = OpNo;
1046 }
1047 }
1048 }
1049
1050 if (NumLiteralUses == 1) {
1051 MachineInstr *UseMI = NonInlineUse->getParent();
1052 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
1053 }
1054 } else {
1055 // Folding register.
Alexander Timofeev993e2792019-01-03 19:55:32 +00001056 SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
Matt Arsenault51818c12017-01-10 23:32:04 +00001057 for (MachineRegisterInfo::use_iterator
1058 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1059 Use != E; ++Use) {
Alexander Timofeev993e2792019-01-03 19:55:32 +00001060 UsesToProcess.push_back(Use);
1061 }
1062 for (auto U : UsesToProcess) {
1063 MachineInstr *UseMI = U->getParent();
Matt Arsenault51818c12017-01-10 23:32:04 +00001064
Alexander Timofeev993e2792019-01-03 19:55:32 +00001065 foldOperand(OpToFold, UseMI, U.getOperandNo(),
1066 FoldList, CopiesToReplace);
Matt Arsenault51818c12017-01-10 23:32:04 +00001067 }
1068 }
1069
1070 MachineFunction *MF = MI.getParent()->getParent();
1071 // Make sure we add EXEC uses to any new v_mov instructions created.
1072 for (MachineInstr *Copy : CopiesToReplace)
1073 Copy->addImplicitDefUseOperands(*MF);
1074
1075 for (FoldCandidate &Fold : FoldList) {
Stanislav Mekhanoshin5cf81672019-05-02 04:01:39 +00001076 if (updateOperand(Fold, *TII, *TRI, *ST)) {
Matt Arsenault51818c12017-01-10 23:32:04 +00001077 // Clear kill flags.
1078 if (Fold.isReg()) {
1079 assert(Fold.OpToFold && Fold.OpToFold->isReg());
1080 // FIXME: Probably shouldn't bother trying to fold if not an
1081 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1082 // copies.
1083 MRI->clearKillFlags(Fold.OpToFold->getReg());
1084 }
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001085 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1086 << static_cast<int>(Fold.UseOpNo) << " of "
1087 << *Fold.UseMI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +00001088 tryFoldInst(TII, Fold.UseMI);
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +00001089 } else if (Fold.isCommuted()) {
1090 // Restoring instruction's original operand order if fold has failed.
1091 TII->commuteInstruction(*Fold.UseMI, false);
Matt Arsenault51818c12017-01-10 23:32:04 +00001092 }
1093 }
1094}
1095
Matt Arsenaultf48e5c92017-10-05 00:13:20 +00001096// Clamp patterns are canonically selected to v_max_* instructions, so only
1097// handle them.
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001098const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1099 unsigned Op = MI.getOpcode();
1100 switch (Op) {
1101 case AMDGPU::V_MAX_F32_e64:
Matt Arsenault79a45db2017-02-22 23:53:37 +00001102 case AMDGPU::V_MAX_F16_e64:
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +00001103 case AMDGPU::V_MAX_F64:
1104 case AMDGPU::V_PK_MAX_F16: {
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001105 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1106 return nullptr;
1107
1108 // Make sure sources are identical.
1109 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1110 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +00001111 if (!Src0->isReg() || !Src1->isReg() ||
Matt Arsenaultaafff872017-10-05 00:13:17 +00001112 Src0->getReg() != Src1->getReg() ||
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +00001113 Src0->getSubReg() != Src1->getSubReg() ||
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001114 Src0->getSubReg() != AMDGPU::NoSubRegister)
1115 return nullptr;
1116
1117 // Can't fold up if we have modifiers.
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +00001118 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1119 return nullptr;
1120
1121 unsigned Src0Mods
1122 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1123 unsigned Src1Mods
1124 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1125
1126 // Having a 0 op_sel_hi would require swizzling the output in the source
1127 // instruction, which we can't do.
Stanislav Mekhanoshinda644c02019-03-13 21:15:52 +00001128 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1129 : 0u;
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +00001130 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001131 return nullptr;
1132 return Src0;
1133 }
1134 default:
1135 return nullptr;
1136 }
1137}
1138
1139// We obviously have multiple uses in a clamp since the register is used twice
1140// in the same instruction.
1141static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1142 int Count = 0;
1143 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1144 I != E; ++I) {
1145 if (++Count > 1)
1146 return false;
1147 }
1148
1149 return true;
1150}
1151
Matt Arsenault8cbb4882017-09-20 21:01:24 +00001152// FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001153bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1154 const MachineOperand *ClampSrc = isClamp(MI);
1155 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1156 return false;
1157
1158 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +00001159
1160 // The type of clamp must be compatible.
1161 if (TII->getClampMask(*Def) != TII->getClampMask(MI))
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001162 return false;
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +00001163
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001164 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1165 if (!DefClamp)
1166 return false;
1167
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001168 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1169 << '\n');
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001170
1171 // Clamp is applied after omod, so it is OK if omod is set.
1172 DefClamp->setImm(1);
1173 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1174 MI.eraseFromParent();
1175 return true;
1176}
1177
Matt Arsenault3cb39042017-02-27 19:35:42 +00001178static int getOModValue(unsigned Opc, int64_t Val) {
1179 switch (Opc) {
1180 case AMDGPU::V_MUL_F32_e64: {
1181 switch (static_cast<uint32_t>(Val)) {
1182 case 0x3f000000: // 0.5
1183 return SIOutMods::DIV2;
1184 case 0x40000000: // 2.0
1185 return SIOutMods::MUL2;
1186 case 0x40800000: // 4.0
1187 return SIOutMods::MUL4;
1188 default:
1189 return SIOutMods::NONE;
1190 }
1191 }
1192 case AMDGPU::V_MUL_F16_e64: {
1193 switch (static_cast<uint16_t>(Val)) {
1194 case 0x3800: // 0.5
1195 return SIOutMods::DIV2;
1196 case 0x4000: // 2.0
1197 return SIOutMods::MUL2;
1198 case 0x4400: // 4.0
1199 return SIOutMods::MUL4;
1200 default:
1201 return SIOutMods::NONE;
1202 }
1203 }
1204 default:
1205 llvm_unreachable("invalid mul opcode");
1206 }
1207}
1208
1209// FIXME: Does this really not support denormals with f16?
1210// FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1211// handled, so will anything other than that break?
1212std::pair<const MachineOperand *, int>
1213SIFoldOperands::isOMod(const MachineInstr &MI) const {
1214 unsigned Op = MI.getOpcode();
1215 switch (Op) {
1216 case AMDGPU::V_MUL_F32_e64:
1217 case AMDGPU::V_MUL_F16_e64: {
1218 // If output denormals are enabled, omod is ignored.
1219 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
1220 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
1221 return std::make_pair(nullptr, SIOutMods::NONE);
1222
1223 const MachineOperand *RegOp = nullptr;
1224 const MachineOperand *ImmOp = nullptr;
1225 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1226 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1227 if (Src0->isImm()) {
1228 ImmOp = Src0;
1229 RegOp = Src1;
1230 } else if (Src1->isImm()) {
1231 ImmOp = Src1;
1232 RegOp = Src0;
1233 } else
1234 return std::make_pair(nullptr, SIOutMods::NONE);
1235
1236 int OMod = getOModValue(Op, ImmOp->getImm());
1237 if (OMod == SIOutMods::NONE ||
1238 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1239 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1240 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1241 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1242 return std::make_pair(nullptr, SIOutMods::NONE);
1243
1244 return std::make_pair(RegOp, OMod);
1245 }
1246 case AMDGPU::V_ADD_F32_e64:
1247 case AMDGPU::V_ADD_F16_e64: {
1248 // If output denormals are enabled, omod is ignored.
1249 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
1250 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
1251 return std::make_pair(nullptr, SIOutMods::NONE);
1252
1253 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1254 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1255 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1256
1257 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1258 Src0->getSubReg() == Src1->getSubReg() &&
1259 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1260 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1261 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1262 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1263 return std::make_pair(Src0, SIOutMods::MUL2);
1264
1265 return std::make_pair(nullptr, SIOutMods::NONE);
1266 }
1267 default:
1268 return std::make_pair(nullptr, SIOutMods::NONE);
1269 }
1270}
1271
1272// FIXME: Does this need to check IEEE bit on function?
1273bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1274 const MachineOperand *RegOp;
1275 int OMod;
1276 std::tie(RegOp, OMod) = isOMod(MI);
1277 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1278 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1279 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1280 return false;
1281
1282 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1283 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1284 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1285 return false;
1286
1287 // Clamp is applied after omod. If the source already has clamp set, don't
1288 // fold it.
1289 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1290 return false;
1291
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001292 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
Matt Arsenault3cb39042017-02-27 19:35:42 +00001293
1294 DefOMod->setImm(OMod);
1295 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1296 MI.eraseFromParent();
1297 return true;
1298}
1299
Tom Stellard6596ba72014-11-21 22:06:37 +00001300bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +00001301 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +00001302 return false;
1303
Matt Arsenault51818c12017-01-10 23:32:04 +00001304 MRI = &MF.getRegInfo();
Tom Stellard5bfbae52018-07-11 20:59:01 +00001305 ST = &MF.getSubtarget<GCNSubtarget>();
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001306 TII = ST->getInstrInfo();
Matt Arsenault51818c12017-01-10 23:32:04 +00001307 TRI = &TII->getRegisterInfo();
Matt Arsenault60957cb2019-06-24 14:53:56 +00001308 MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault3cb39042017-02-27 19:35:42 +00001309
1310 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1311 // correctly handle signed zeros.
1312 //
Matt Arsenault055e4dc2019-03-29 19:14:54 +00001313 // FIXME: Also need to check strictfp
1314 bool IsIEEEMode = MFI->getMode().IEEE;
Matt Arsenault13b0db92018-08-12 08:44:25 +00001315 bool HasNSZ = MFI->hasNoSignedZerosFPMath();
Matt Arsenault3cb39042017-02-27 19:35:42 +00001316
Matt Arsenaultff3f9122017-06-20 18:56:32 +00001317 for (MachineBasicBlock *MBB : depth_first(&MF)) {
Tom Stellard6596ba72014-11-21 22:06:37 +00001318 MachineBasicBlock::iterator I, Next;
Matt Arsenaultff3f9122017-06-20 18:56:32 +00001319 for (I = MBB->begin(); I != MBB->end(); I = Next) {
Tom Stellard6596ba72014-11-21 22:06:37 +00001320 Next = std::next(I);
1321 MachineInstr &MI = *I;
1322
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +00001323 tryFoldInst(TII, &MI);
1324
Sam Kolton27e0f8b2017-03-31 11:42:43 +00001325 if (!TII->isFoldableCopy(MI)) {
Matt Arsenault13b0db92018-08-12 08:44:25 +00001326 // TODO: Omod might be OK if there is NSZ only on the source
1327 // instruction, and not the omod multiply.
1328 if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1329 !tryFoldOMod(MI))
Matt Arsenault3cb39042017-02-27 19:35:42 +00001330 tryFoldClamp(MI);
Tom Stellard6596ba72014-11-21 22:06:37 +00001331 continue;
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001332 }
Tom Stellard6596ba72014-11-21 22:06:37 +00001333
1334 MachineOperand &OpToFold = MI.getOperand(1);
Nicolai Haehnle27101712019-06-25 11:52:30 +00001335 bool FoldingImm =
1336 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
Tom Stellard26cc18d2015-01-07 22:18:27 +00001337
Matt Arsenault51818c12017-01-10 23:32:04 +00001338 // FIXME: We could also be folding things like TargetIndexes.
Tom Stellard05992972015-01-07 22:44:19 +00001339 if (!FoldingImm && !OpToFold.isReg())
1340 continue;
1341
Tom Stellard6596ba72014-11-21 22:06:37 +00001342 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +00001343 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +00001344 continue;
1345
Marek Olsak926c56f2016-01-13 11:44:29 +00001346 // Prevent folding operands backwards in the function. For example,
1347 // the COPY opcode must not be replaced by 1 in this example:
1348 //
Francis Visoiu Mistriha8a83d12017-12-07 10:40:31 +00001349 // %3 = COPY %vgpr0; VGPR_32:%3
Marek Olsak926c56f2016-01-13 11:44:29 +00001350 // ...
Francis Visoiu Mistriha8a83d12017-12-07 10:40:31 +00001351 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec
Marek Olsak926c56f2016-01-13 11:44:29 +00001352 MachineOperand &Dst = MI.getOperand(0);
1353 if (Dst.isReg() &&
1354 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
1355 continue;
1356
Matt Arsenault51818c12017-01-10 23:32:04 +00001357 foldInstOperand(MI, OpToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +00001358 }
1359 }
1360 return false;
1361}