blob: d41d151492d60946fe48740fa2b7a21461d37506 [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
13#include "AMDGPUSubtarget.h"
14#include "SIInstrInfo.h"
Matt Arsenault3cb39042017-02-27 19:35:42 +000015#include "SIMachineFunctionInfo.h"
Matt Arsenaultff3f9122017-06-20 18:56:32 +000016#include "llvm/ADT/DepthFirstIterator.h"
Matthias Braunf8422972017-12-13 02:51:04 +000017#include "llvm/CodeGen/LiveIntervals.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000018#include "llvm/CodeGen/MachineFunctionPass.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000021#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000022#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000023#include "llvm/Target/TargetMachine.h"
24
25#define DEBUG_TYPE "si-fold-operands"
26using namespace llvm;
27
28namespace {
29
Tom Stellardbb763e62015-01-07 17:42:16 +000030struct FoldCandidate {
31 MachineInstr *UseMI;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000032 union {
33 MachineOperand *OpToFold;
34 uint64_t ImmToFold;
35 int FrameIndexToFold;
36 };
37 unsigned char UseOpNo;
38 MachineOperand::MachineOperandType Kind;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000039 bool Commuted;
Tom Stellardbb763e62015-01-07 17:42:16 +000040
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000041 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
42 bool Commuted_ = false) :
43 UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()),
44 Commuted(Commuted_) {
Tom Stellard05992972015-01-07 22:44:19 +000045 if (FoldOp->isImm()) {
Tom Stellard05992972015-01-07 22:44:19 +000046 ImmToFold = FoldOp->getImm();
Matt Arsenault2bc198a2016-09-14 15:51:33 +000047 } else if (FoldOp->isFI()) {
48 FrameIndexToFold = FoldOp->getIndex();
Tom Stellard05992972015-01-07 22:44:19 +000049 } else {
50 assert(FoldOp->isReg());
51 OpToFold = FoldOp;
52 }
53 }
Tom Stellardbb763e62015-01-07 17:42:16 +000054
Matt Arsenault2bc198a2016-09-14 15:51:33 +000055 bool isFI() const {
56 return Kind == MachineOperand::MO_FrameIndex;
57 }
58
Tom Stellardbb763e62015-01-07 17:42:16 +000059 bool isImm() const {
Matt Arsenault2bc198a2016-09-14 15:51:33 +000060 return Kind == MachineOperand::MO_Immediate;
61 }
62
63 bool isReg() const {
64 return Kind == MachineOperand::MO_Register;
Tom Stellardbb763e62015-01-07 17:42:16 +000065 }
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000066
67 bool isCommuted() const {
68 return Commuted;
69 }
Tom Stellardbb763e62015-01-07 17:42:16 +000070};
71
Matt Arsenault51818c12017-01-10 23:32:04 +000072class SIFoldOperands : public MachineFunctionPass {
73public:
74 static char ID;
75 MachineRegisterInfo *MRI;
76 const SIInstrInfo *TII;
77 const SIRegisterInfo *TRI;
Matt Arsenaultd5c65152017-02-22 23:27:53 +000078 const SISubtarget *ST;
Matt Arsenault51818c12017-01-10 23:32:04 +000079
80 void foldOperand(MachineOperand &OpToFold,
81 MachineInstr *UseMI,
82 unsigned UseOpIdx,
83 SmallVectorImpl<FoldCandidate> &FoldList,
84 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
85
86 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
87
Matt Arsenaultd5c65152017-02-22 23:27:53 +000088 const MachineOperand *isClamp(const MachineInstr &MI) const;
89 bool tryFoldClamp(MachineInstr &MI);
90
Matt Arsenault3cb39042017-02-27 19:35:42 +000091 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
92 bool tryFoldOMod(MachineInstr &MI);
93
Matt Arsenault51818c12017-01-10 23:32:04 +000094public:
95 SIFoldOperands() : MachineFunctionPass(ID) {
96 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
97 }
98
99 bool runOnMachineFunction(MachineFunction &MF) override;
100
101 StringRef getPassName() const override { return "SI Fold Operands"; }
102
103 void getAnalysisUsage(AnalysisUsage &AU) const override {
104 AU.setPreservesCFG();
105 MachineFunctionPass::getAnalysisUsage(AU);
106 }
107};
108
Tom Stellard6596ba72014-11-21 22:06:37 +0000109} // End anonymous namespace.
110
Matt Arsenault427c5482016-02-11 06:15:34 +0000111INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
112 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +0000113
114char SIFoldOperands::ID = 0;
115
116char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
117
Matt Arsenault69e30012017-01-11 22:00:02 +0000118// Wrapper around isInlineConstant that understands special cases when
119// instruction types are replaced during operand folding.
120static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
121 const MachineInstr &UseMI,
122 unsigned OpNo,
123 const MachineOperand &OpToFold) {
124 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
125 return true;
126
127 unsigned Opc = UseMI.getOpcode();
128 switch (Opc) {
129 case AMDGPU::V_MAC_F32_e64:
Matt Arsenault0084adc2018-04-30 19:08:16 +0000130 case AMDGPU::V_MAC_F16_e64:
131 case AMDGPU::V_FMAC_F32_e64: {
Matt Arsenault69e30012017-01-11 22:00:02 +0000132 // Special case for mac. Since this is replaced with mad when folded into
133 // src2, we need to check the legality for the final instruction.
134 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
135 if (static_cast<int>(OpNo) == Src2Idx) {
Matt Arsenault0084adc2018-04-30 19:08:16 +0000136 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
Matt Arsenault69e30012017-01-11 22:00:02 +0000137 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Matt Arsenault0084adc2018-04-30 19:08:16 +0000138
139 unsigned Opc = IsFMA ?
140 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
141 const MCInstrDesc &MadDesc = TII->get(Opc);
Matt Arsenault69e30012017-01-11 22:00:02 +0000142 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
143 }
Simon Pilgrim0f5b3502017-07-07 10:18:57 +0000144 return false;
Matt Arsenault69e30012017-01-11 22:00:02 +0000145 }
146 default:
147 return false;
148 }
149}
150
Tom Stellard6596ba72014-11-21 22:06:37 +0000151FunctionPass *llvm::createSIFoldOperandsPass() {
152 return new SIFoldOperands();
153}
154
Tom Stellardbb763e62015-01-07 17:42:16 +0000155static bool updateOperand(FoldCandidate &Fold,
Tom Stellard6596ba72014-11-21 22:06:37 +0000156 const TargetRegisterInfo &TRI) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000157 MachineInstr *MI = Fold.UseMI;
158 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000159 assert(Old.isReg());
160
Tom Stellardbb763e62015-01-07 17:42:16 +0000161 if (Fold.isImm()) {
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000162 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked) {
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000163 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
164 // already set.
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000165 unsigned Opcode = MI->getOpcode();
166 int OpNo = MI->getOperandNo(&Old);
167 int ModIdx = -1;
168 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
169 ModIdx = AMDGPU::OpName::src0_modifiers;
170 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
171 ModIdx = AMDGPU::OpName::src1_modifiers;
172 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
173 ModIdx = AMDGPU::OpName::src2_modifiers;
174 assert(ModIdx != -1);
175 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
176 MachineOperand &Mod = MI->getOperand(ModIdx);
177 unsigned Val = Mod.getImm();
178 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
179 return false;
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000180 // If upper part is all zero we do not need op_sel_hi.
181 if (!isUInt<16>(Fold.ImmToFold)) {
182 if (!(Fold.ImmToFold & 0xffff)) {
183 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
184 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
Stanislav Mekhanoshina4bfb3c2018-04-24 18:17:55 +0000185 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000186 return true;
187 }
188 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
189 }
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000190 }
Tom Stellardbb763e62015-01-07 17:42:16 +0000191 Old.ChangeToImmediate(Fold.ImmToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000192 return true;
193 }
194
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000195 if (Fold.isFI()) {
196 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
197 return true;
198 }
199
Tom Stellardbb763e62015-01-07 17:42:16 +0000200 MachineOperand *New = Fold.OpToFold;
201 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
202 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
203 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
Matt Arsenault76858f52017-06-20 18:41:31 +0000204
205 Old.setIsUndef(New->isUndef());
Tom Stellard6596ba72014-11-21 22:06:37 +0000206 return true;
207 }
208
Tom Stellard6596ba72014-11-21 22:06:37 +0000209 // FIXME: Handle physical registers.
210
211 return false;
212}
213
Matt Arsenault51818c12017-01-10 23:32:04 +0000214static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000215 const MachineInstr *MI) {
216 for (auto Candidate : FoldList) {
217 if (Candidate.UseMI == MI)
218 return true;
219 }
220 return false;
221}
222
Matt Arsenault51818c12017-01-10 23:32:04 +0000223static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
Tom Stellard05992972015-01-07 22:44:19 +0000224 MachineInstr *MI, unsigned OpNo,
225 MachineOperand *OpToFold,
226 const SIInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000227 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000228
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000229 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000230 unsigned Opc = MI->getOpcode();
Matt Arsenault0084adc2018-04-30 19:08:16 +0000231 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
232 Opc == AMDGPU::V_FMAC_F32_e64) &&
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000233 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
Matt Arsenault0084adc2018-04-30 19:08:16 +0000234 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
Matt Arsenault69e30012017-01-11 22:00:02 +0000235 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Matt Arsenault0084adc2018-04-30 19:08:16 +0000236 unsigned NewOpc = IsFMA ?
237 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000238
239 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
240 // to fold the operand.
Matt Arsenault0084adc2018-04-30 19:08:16 +0000241 MI->setDesc(TII->get(NewOpc));
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000242 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
243 if (FoldAsMAD) {
244 MI->untieRegOperand(OpNo);
245 return true;
246 }
247 MI->setDesc(TII->get(Opc));
248 }
249
Tom Stellard8485fa02016-12-07 02:42:15 +0000250 // Special case for s_setreg_b32
251 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
252 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
253 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
254 return true;
255 }
256
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000257 // If we are already folding into another operand of MI, then
258 // we can't commute the instruction, otherwise we risk making the
259 // other fold illegal.
260 if (isUseMIInFoldList(FoldList, MI))
261 return false;
262
Tom Stellard05992972015-01-07 22:44:19 +0000263 // Operand is not legal, so try to commute the instruction to
264 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000265 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
266 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000267 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000268
269 if (CanCommute) {
270 if (CommuteIdx0 == OpNo)
271 OpNo = CommuteIdx1;
272 else if (CommuteIdx1 == OpNo)
273 OpNo = CommuteIdx0;
274 }
275
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000276 // One of operands might be an Imm operand, and OpNo may refer to it after
277 // the call of commuteInstruction() below. Such situations are avoided
278 // here explicitly as OpNo must be a register operand to be a candidate
279 // for memory folding.
280 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
281 !MI->getOperand(CommuteIdx1).isReg()))
282 return false;
283
284 if (!CanCommute ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000285 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000286 return false;
287
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000288 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
289 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000290 return false;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000291 }
292
293 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold, true));
294 return true;
Tom Stellard05992972015-01-07 22:44:19 +0000295 }
296
297 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
298 return true;
299}
300
Matt Arsenault5e63a042016-10-06 18:12:13 +0000301// If the use operand doesn't care about the value, this may be an operand only
302// used for register indexing, in which case it is unsafe to fold.
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000303static bool isUseSafeToFold(const SIInstrInfo *TII,
304 const MachineInstr &MI,
Matt Arsenault5e63a042016-10-06 18:12:13 +0000305 const MachineOperand &UseMO) {
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000306 return !UseMO.isUndef() && !TII->isSDWA(MI);
Matt Arsenault5e63a042016-10-06 18:12:13 +0000307 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
308}
309
Matt Arsenault51818c12017-01-10 23:32:04 +0000310void SIFoldOperands::foldOperand(
311 MachineOperand &OpToFold,
312 MachineInstr *UseMI,
313 unsigned UseOpIdx,
314 SmallVectorImpl<FoldCandidate> &FoldList,
315 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000316 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
317
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000318 if (!isUseSafeToFold(TII, *UseMI, UseOp))
Matt Arsenault5e63a042016-10-06 18:12:13 +0000319 return;
320
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000321 // FIXME: Fold operands with subregs.
Matt Arsenault3661e902016-08-15 16:18:36 +0000322 if (UseOp.isReg() && OpToFold.isReg()) {
323 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
324 return;
325
326 // Don't fold subregister extracts into tied operands, only if it is a full
327 // copy since a subregister use tied to a full register def doesn't really
328 // make sense. e.g. don't fold:
329 //
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +0000330 // %1 = COPY %0:sub1
331 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000332 //
333 // into
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +0000334 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000335 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
336 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000337 }
338
Tom Stellard9a197672015-09-09 15:43:26 +0000339 // Special case for REG_SEQUENCE: We can't fold literals into
340 // REG_SEQUENCE instructions, so we have to fold them into the
341 // uses of REG_SEQUENCE.
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000342 if (UseMI->isRegSequence()) {
Tom Stellard9a197672015-09-09 15:43:26 +0000343 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
344 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
345
346 for (MachineRegisterInfo::use_iterator
Matt Arsenault51818c12017-01-10 23:32:04 +0000347 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000348 RSUse != RSE; ++RSUse) {
Tom Stellard9a197672015-09-09 15:43:26 +0000349
350 MachineInstr *RSUseMI = RSUse->getParent();
351 if (RSUse->getSubReg() != RegSeqDstSubReg)
352 continue;
353
354 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenault51818c12017-01-10 23:32:04 +0000355 CopiesToReplace);
Tom Stellard9a197672015-09-09 15:43:26 +0000356 }
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000357
Tom Stellard9a197672015-09-09 15:43:26 +0000358 return;
359 }
360
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000361
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000362 bool FoldingImm = OpToFold.isImm();
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000363
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000364 // In order to fold immediates into copies, we need to change the
365 // copy to a MOV.
366 if (FoldingImm && UseMI->isCopy()) {
367 unsigned DestReg = UseMI->getOperand(0).getReg();
368 const TargetRegisterClass *DestRC
369 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000370 MRI->getRegClass(DestReg) :
371 TRI->getPhysRegClass(DestReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000372
373 unsigned MovOp = TII->getMovOpcode(DestRC);
374 if (MovOp == AMDGPU::COPY)
375 return;
376
377 UseMI->setDesc(TII->get(MovOp));
378 CopiesToReplace.push_back(UseMI);
379 } else {
380 const MCInstrDesc &UseDesc = UseMI->getDesc();
381
382 // Don't fold into target independent nodes. Target independent opcodes
383 // don't have defined register classes.
384 if (UseDesc.isVariadic() ||
Matt Arsenaultc908e3f2018-02-08 01:12:46 +0000385 UseOp.isImplicit() ||
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000386 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
387 return;
388 }
389
390 if (!FoldingImm) {
391 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
392
393 // FIXME: We could try to change the instruction from 64-bit to 32-bit
394 // to enable more folding opportunites. The shrink operands pass
395 // already does this.
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000396 return;
397 }
398
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000399
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000400 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
401 const TargetRegisterClass *FoldRC =
Matt Arsenault51818c12017-01-10 23:32:04 +0000402 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000403
Matt Arsenault4bd72362016-12-10 00:39:12 +0000404
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000405 // Split 64-bit constants into 32-bits for folding.
406 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
407 unsigned UseReg = UseOp.getReg();
408 const TargetRegisterClass *UseRC
409 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000410 MRI->getRegClass(UseReg) :
411 TRI->getPhysRegClass(UseReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000412
413 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
414 return;
415
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000416 APInt Imm(64, OpToFold.getImm());
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000417 if (UseOp.getSubReg() == AMDGPU::sub0) {
418 Imm = Imm.getLoBits(32);
419 } else {
420 assert(UseOp.getSubReg() == AMDGPU::sub1);
421 Imm = Imm.getHiBits(32);
422 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000423
424 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
425 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
426 return;
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000427 }
428
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000429
430
431 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000432}
433
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000434static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
Matt Arsenault51818c12017-01-10 23:32:04 +0000435 uint32_t LHS, uint32_t RHS) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000436 switch (Opcode) {
437 case AMDGPU::V_AND_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000438 case AMDGPU::V_AND_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000439 case AMDGPU::S_AND_B32:
440 Result = LHS & RHS;
441 return true;
442 case AMDGPU::V_OR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000443 case AMDGPU::V_OR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000444 case AMDGPU::S_OR_B32:
445 Result = LHS | RHS;
446 return true;
447 case AMDGPU::V_XOR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000448 case AMDGPU::V_XOR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000449 case AMDGPU::S_XOR_B32:
450 Result = LHS ^ RHS;
451 return true;
Matt Arsenault51818c12017-01-10 23:32:04 +0000452 case AMDGPU::V_LSHL_B32_e64:
453 case AMDGPU::V_LSHL_B32_e32:
454 case AMDGPU::S_LSHL_B32:
455 // The instruction ignores the high bits for out of bounds shifts.
456 Result = LHS << (RHS & 31);
457 return true;
458 case AMDGPU::V_LSHLREV_B32_e64:
459 case AMDGPU::V_LSHLREV_B32_e32:
460 Result = RHS << (LHS & 31);
461 return true;
462 case AMDGPU::V_LSHR_B32_e64:
463 case AMDGPU::V_LSHR_B32_e32:
464 case AMDGPU::S_LSHR_B32:
465 Result = LHS >> (RHS & 31);
466 return true;
467 case AMDGPU::V_LSHRREV_B32_e64:
468 case AMDGPU::V_LSHRREV_B32_e32:
469 Result = RHS >> (LHS & 31);
470 return true;
471 case AMDGPU::V_ASHR_I32_e64:
472 case AMDGPU::V_ASHR_I32_e32:
473 case AMDGPU::S_ASHR_I32:
474 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
475 return true;
476 case AMDGPU::V_ASHRREV_I32_e64:
477 case AMDGPU::V_ASHRREV_I32_e32:
478 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
479 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000480 default:
481 return false;
482 }
483}
484
485static unsigned getMovOpc(bool IsScalar) {
486 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
487}
488
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000489/// Remove any leftover implicit operands from mutating the instruction. e.g.
490/// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
491/// anymore.
492static void stripExtraCopyOperands(MachineInstr &MI) {
493 const MCInstrDesc &Desc = MI.getDesc();
494 unsigned NumOps = Desc.getNumOperands() +
495 Desc.getNumImplicitUses() +
496 Desc.getNumImplicitDefs();
497
498 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
499 MI.RemoveOperand(I);
500}
501
502static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
503 MI.setDesc(NewDesc);
504 stripExtraCopyOperands(MI);
505}
506
Matt Arsenault51818c12017-01-10 23:32:04 +0000507static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
508 MachineOperand &Op) {
509 if (Op.isReg()) {
510 // If this has a subregister, it obviously is a register source.
Matt Arsenaultcbda7ff2018-03-10 16:05:35 +0000511 if (Op.getSubReg() != AMDGPU::NoSubRegister ||
512 !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
Matt Arsenault51818c12017-01-10 23:32:04 +0000513 return &Op;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000514
Matt Arsenault51818c12017-01-10 23:32:04 +0000515 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
Matt Arsenault7f67b352017-06-20 18:28:02 +0000516 if (Def && Def->isMoveImmediate()) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000517 MachineOperand &ImmSrc = Def->getOperand(1);
518 if (ImmSrc.isImm())
519 return &ImmSrc;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000520 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000521 }
522
Matt Arsenault51818c12017-01-10 23:32:04 +0000523 return &Op;
524}
525
526// Try to simplify operations with a constant that may appear after instruction
527// selection.
528// TODO: See if a frame index with a fixed offset can fold.
529static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
530 const SIInstrInfo *TII,
531 MachineInstr *MI,
532 MachineOperand *ImmOp) {
533 unsigned Opc = MI->getOpcode();
534 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
535 Opc == AMDGPU::S_NOT_B32) {
536 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
537 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
538 return true;
539 }
540
541 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
542 if (Src1Idx == -1)
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000543 return false;
544
545 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Matt Arsenault51818c12017-01-10 23:32:04 +0000546 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
547 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000548
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000549 if (!Src0->isImm() && !Src1->isImm())
550 return false;
551
552 // and k0, k1 -> v_mov_b32 (k0 & k1)
553 // or k0, k1 -> v_mov_b32 (k0 | k1)
554 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
555 if (Src0->isImm() && Src1->isImm()) {
556 int32_t NewImm;
557 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
558 return false;
559
560 const SIRegisterInfo &TRI = TII->getRegisterInfo();
561 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
562
Matt Arsenault51818c12017-01-10 23:32:04 +0000563 // Be careful to change the right operand, src0 may belong to a different
564 // instruction.
565 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000566 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000567 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000568 return true;
569 }
570
Matt Arsenault51818c12017-01-10 23:32:04 +0000571 if (!MI->isCommutable())
572 return false;
573
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000574 if (Src0->isImm() && !Src1->isImm()) {
575 std::swap(Src0, Src1);
576 std::swap(Src0Idx, Src1Idx);
577 }
578
579 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
Matt Arsenault51818c12017-01-10 23:32:04 +0000580 if (Opc == AMDGPU::V_OR_B32_e64 ||
581 Opc == AMDGPU::V_OR_B32_e32 ||
582 Opc == AMDGPU::S_OR_B32) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000583 if (Src1Val == 0) {
584 // y = or x, 0 => y = copy x
585 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000586 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000587 } else if (Src1Val == -1) {
588 // y = or x, -1 => y = v_mov_b32 -1
589 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000590 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000591 } else
592 return false;
593
594 return true;
595 }
596
597 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000598 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000599 MI->getOpcode() == AMDGPU::S_AND_B32) {
600 if (Src1Val == 0) {
601 // y = and x, 0 => y = v_mov_b32 0
602 MI->RemoveOperand(Src0Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000603 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000604 } else if (Src1Val == -1) {
605 // y = and x, -1 => y = copy x
606 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000607 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
608 stripExtraCopyOperands(*MI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000609 } else
610 return false;
611
612 return true;
613 }
614
615 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000616 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000617 MI->getOpcode() == AMDGPU::S_XOR_B32) {
618 if (Src1Val == 0) {
619 // y = xor x, 0 => y = copy x
620 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000621 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenault51818c12017-01-10 23:32:04 +0000622 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000623 }
624 }
625
626 return false;
627}
628
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000629// Try to fold an instruction into a simpler one
630static bool tryFoldInst(const SIInstrInfo *TII,
631 MachineInstr *MI) {
632 unsigned Opc = MI->getOpcode();
633
634 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
635 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
636 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
637 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
638 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
639 if (Src1->isIdenticalTo(*Src0)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000640 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000641 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
642 if (Src2Idx != -1)
643 MI->RemoveOperand(Src2Idx);
644 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
645 mutateCopyOp(*MI, TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY
646 : getMovOpc(false)));
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000647 LLVM_DEBUG(dbgs() << *MI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000648 return true;
649 }
650 }
651
652 return false;
653}
654
Matt Arsenault51818c12017-01-10 23:32:04 +0000655void SIFoldOperands::foldInstOperand(MachineInstr &MI,
656 MachineOperand &OpToFold) const {
657 // We need mutate the operands of new mov instructions to add implicit
658 // uses of EXEC, but adding them invalidates the use_iterator, so defer
659 // this.
660 SmallVector<MachineInstr *, 4> CopiesToReplace;
661 SmallVector<FoldCandidate, 4> FoldList;
662 MachineOperand &Dst = MI.getOperand(0);
663
664 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
665 if (FoldingImm) {
666 unsigned NumLiteralUses = 0;
667 MachineOperand *NonInlineUse = nullptr;
668 int NonInlineUseOpNo = -1;
669
Vitaly Buka74503982017-10-15 05:35:02 +0000670 MachineRegisterInfo::use_iterator NextUse;
Matt Arsenault51818c12017-01-10 23:32:04 +0000671 for (MachineRegisterInfo::use_iterator
672 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
673 Use != E; Use = NextUse) {
674 NextUse = std::next(Use);
675 MachineInstr *UseMI = Use->getParent();
676 unsigned OpNo = Use.getOperandNo();
677
678 // Folding the immediate may reveal operations that can be constant
679 // folded or replaced with a copy. This can happen for example after
680 // frame indices are lowered to constants or from splitting 64-bit
681 // constants.
682 //
683 // We may also encounter cases where one or both operands are
684 // immediates materialized into a register, which would ordinarily not
685 // be folded due to multiple uses or operand constraints.
686
687 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000688 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
Matt Arsenault51818c12017-01-10 23:32:04 +0000689
690 // Some constant folding cases change the same immediate's use to a new
691 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
692 // again. The same constant folded instruction could also have a second
693 // use operand.
694 NextUse = MRI->use_begin(Dst.getReg());
Nicolai Haehnlea253e4c2017-07-18 14:54:41 +0000695 FoldList.clear();
Matt Arsenault51818c12017-01-10 23:32:04 +0000696 continue;
697 }
698
699 // Try to fold any inline immediate uses, and then only fold other
700 // constants if they have one use.
701 //
702 // The legality of the inline immediate must be checked based on the use
703 // operand, not the defining instruction, because 32-bit instructions
704 // with 32-bit inline immediate sources may be used to materialize
705 // constants used in 16-bit operands.
706 //
707 // e.g. it is unsafe to fold:
708 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
709 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
710
711 // Folding immediates with more than one use will increase program size.
712 // FIXME: This will also reduce register usage, which may be better
713 // in some cases. A better heuristic is needed.
Matt Arsenault69e30012017-01-11 22:00:02 +0000714 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000715 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
716 } else {
717 if (++NumLiteralUses == 1) {
718 NonInlineUse = &*Use;
719 NonInlineUseOpNo = OpNo;
720 }
721 }
722 }
723
724 if (NumLiteralUses == 1) {
725 MachineInstr *UseMI = NonInlineUse->getParent();
726 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
727 }
728 } else {
729 // Folding register.
730 for (MachineRegisterInfo::use_iterator
731 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
732 Use != E; ++Use) {
733 MachineInstr *UseMI = Use->getParent();
734
735 foldOperand(OpToFold, UseMI, Use.getOperandNo(),
736 FoldList, CopiesToReplace);
737 }
738 }
739
740 MachineFunction *MF = MI.getParent()->getParent();
741 // Make sure we add EXEC uses to any new v_mov instructions created.
742 for (MachineInstr *Copy : CopiesToReplace)
743 Copy->addImplicitDefUseOperands(*MF);
744
745 for (FoldCandidate &Fold : FoldList) {
746 if (updateOperand(Fold, *TRI)) {
747 // Clear kill flags.
748 if (Fold.isReg()) {
749 assert(Fold.OpToFold && Fold.OpToFold->isReg());
750 // FIXME: Probably shouldn't bother trying to fold if not an
751 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
752 // copies.
753 MRI->clearKillFlags(Fold.OpToFold->getReg());
754 }
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000755 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
756 << static_cast<int>(Fold.UseOpNo) << " of "
757 << *Fold.UseMI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000758 tryFoldInst(TII, Fold.UseMI);
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000759 } else if (Fold.isCommuted()) {
760 // Restoring instruction's original operand order if fold has failed.
761 TII->commuteInstruction(*Fold.UseMI, false);
Matt Arsenault51818c12017-01-10 23:32:04 +0000762 }
763 }
764}
765
Matt Arsenaultf48e5c92017-10-05 00:13:20 +0000766// Clamp patterns are canonically selected to v_max_* instructions, so only
767// handle them.
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000768const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
769 unsigned Op = MI.getOpcode();
770 switch (Op) {
771 case AMDGPU::V_MAX_F32_e64:
Matt Arsenault79a45db2017-02-22 23:53:37 +0000772 case AMDGPU::V_MAX_F16_e64:
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000773 case AMDGPU::V_MAX_F64:
774 case AMDGPU::V_PK_MAX_F16: {
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000775 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
776 return nullptr;
777
778 // Make sure sources are identical.
779 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
780 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +0000781 if (!Src0->isReg() || !Src1->isReg() ||
Matt Arsenaultaafff872017-10-05 00:13:17 +0000782 Src0->getReg() != Src1->getReg() ||
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +0000783 Src0->getSubReg() != Src1->getSubReg() ||
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000784 Src0->getSubReg() != AMDGPU::NoSubRegister)
785 return nullptr;
786
787 // Can't fold up if we have modifiers.
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000788 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
789 return nullptr;
790
791 unsigned Src0Mods
792 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
793 unsigned Src1Mods
794 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
795
796 // Having a 0 op_sel_hi would require swizzling the output in the source
797 // instruction, which we can't do.
798 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 : 0;
799 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000800 return nullptr;
801 return Src0;
802 }
803 default:
804 return nullptr;
805 }
806}
807
808// We obviously have multiple uses in a clamp since the register is used twice
809// in the same instruction.
810static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
811 int Count = 0;
812 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
813 I != E; ++I) {
814 if (++Count > 1)
815 return false;
816 }
817
818 return true;
819}
820
Matt Arsenault8cbb4882017-09-20 21:01:24 +0000821// FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000822bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
823 const MachineOperand *ClampSrc = isClamp(MI);
824 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
825 return false;
826
827 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000828
829 // The type of clamp must be compatible.
830 if (TII->getClampMask(*Def) != TII->getClampMask(MI))
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000831 return false;
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000832
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000833 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
834 if (!DefClamp)
835 return false;
836
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000837 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
838 << '\n');
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000839
840 // Clamp is applied after omod, so it is OK if omod is set.
841 DefClamp->setImm(1);
842 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
843 MI.eraseFromParent();
844 return true;
845}
846
Matt Arsenault3cb39042017-02-27 19:35:42 +0000847static int getOModValue(unsigned Opc, int64_t Val) {
848 switch (Opc) {
849 case AMDGPU::V_MUL_F32_e64: {
850 switch (static_cast<uint32_t>(Val)) {
851 case 0x3f000000: // 0.5
852 return SIOutMods::DIV2;
853 case 0x40000000: // 2.0
854 return SIOutMods::MUL2;
855 case 0x40800000: // 4.0
856 return SIOutMods::MUL4;
857 default:
858 return SIOutMods::NONE;
859 }
860 }
861 case AMDGPU::V_MUL_F16_e64: {
862 switch (static_cast<uint16_t>(Val)) {
863 case 0x3800: // 0.5
864 return SIOutMods::DIV2;
865 case 0x4000: // 2.0
866 return SIOutMods::MUL2;
867 case 0x4400: // 4.0
868 return SIOutMods::MUL4;
869 default:
870 return SIOutMods::NONE;
871 }
872 }
873 default:
874 llvm_unreachable("invalid mul opcode");
875 }
876}
877
878// FIXME: Does this really not support denormals with f16?
879// FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
880// handled, so will anything other than that break?
881std::pair<const MachineOperand *, int>
882SIFoldOperands::isOMod(const MachineInstr &MI) const {
883 unsigned Op = MI.getOpcode();
884 switch (Op) {
885 case AMDGPU::V_MUL_F32_e64:
886 case AMDGPU::V_MUL_F16_e64: {
887 // If output denormals are enabled, omod is ignored.
888 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
889 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
890 return std::make_pair(nullptr, SIOutMods::NONE);
891
892 const MachineOperand *RegOp = nullptr;
893 const MachineOperand *ImmOp = nullptr;
894 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
895 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
896 if (Src0->isImm()) {
897 ImmOp = Src0;
898 RegOp = Src1;
899 } else if (Src1->isImm()) {
900 ImmOp = Src1;
901 RegOp = Src0;
902 } else
903 return std::make_pair(nullptr, SIOutMods::NONE);
904
905 int OMod = getOModValue(Op, ImmOp->getImm());
906 if (OMod == SIOutMods::NONE ||
907 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
908 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
909 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
910 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
911 return std::make_pair(nullptr, SIOutMods::NONE);
912
913 return std::make_pair(RegOp, OMod);
914 }
915 case AMDGPU::V_ADD_F32_e64:
916 case AMDGPU::V_ADD_F16_e64: {
917 // If output denormals are enabled, omod is ignored.
918 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
919 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
920 return std::make_pair(nullptr, SIOutMods::NONE);
921
922 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
923 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
924 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
925
926 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
927 Src0->getSubReg() == Src1->getSubReg() &&
928 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
929 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
930 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
931 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
932 return std::make_pair(Src0, SIOutMods::MUL2);
933
934 return std::make_pair(nullptr, SIOutMods::NONE);
935 }
936 default:
937 return std::make_pair(nullptr, SIOutMods::NONE);
938 }
939}
940
941// FIXME: Does this need to check IEEE bit on function?
942bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
943 const MachineOperand *RegOp;
944 int OMod;
945 std::tie(RegOp, OMod) = isOMod(MI);
946 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
947 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
948 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
949 return false;
950
951 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
952 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
953 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
954 return false;
955
956 // Clamp is applied after omod. If the source already has clamp set, don't
957 // fold it.
958 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
959 return false;
960
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000961 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
Matt Arsenault3cb39042017-02-27 19:35:42 +0000962
963 DefOMod->setImm(OMod);
964 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
965 MI.eraseFromParent();
966 return true;
967}
968
Tom Stellard6596ba72014-11-21 22:06:37 +0000969bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +0000970 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000971 return false;
972
Matt Arsenault51818c12017-01-10 23:32:04 +0000973 MRI = &MF.getRegInfo();
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000974 ST = &MF.getSubtarget<SISubtarget>();
975 TII = ST->getInstrInfo();
Matt Arsenault51818c12017-01-10 23:32:04 +0000976 TRI = &TII->getRegisterInfo();
Tom Stellard6596ba72014-11-21 22:06:37 +0000977
Matt Arsenault3cb39042017-02-27 19:35:42 +0000978 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
979
980 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
981 // correctly handle signed zeros.
982 //
983 // TODO: Check nsz on instructions when fast math flags are preserved to MI
984 // level.
985 bool IsIEEEMode = ST->enableIEEEBit(MF) || !MFI->hasNoSignedZerosFPMath();
986
Matt Arsenaultff3f9122017-06-20 18:56:32 +0000987 for (MachineBasicBlock *MBB : depth_first(&MF)) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000988 MachineBasicBlock::iterator I, Next;
Matt Arsenaultff3f9122017-06-20 18:56:32 +0000989 for (I = MBB->begin(); I != MBB->end(); I = Next) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000990 Next = std::next(I);
991 MachineInstr &MI = *I;
992
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000993 tryFoldInst(TII, &MI);
994
Sam Kolton27e0f8b2017-03-31 11:42:43 +0000995 if (!TII->isFoldableCopy(MI)) {
Matt Arsenault3cb39042017-02-27 19:35:42 +0000996 if (IsIEEEMode || !tryFoldOMod(MI))
997 tryFoldClamp(MI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000998 continue;
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000999 }
Tom Stellard6596ba72014-11-21 22:06:37 +00001000
1001 MachineOperand &OpToFold = MI.getOperand(1);
Matt Arsenault2bc198a2016-09-14 15:51:33 +00001002 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
Tom Stellard26cc18d2015-01-07 22:18:27 +00001003
Matt Arsenault51818c12017-01-10 23:32:04 +00001004 // FIXME: We could also be folding things like TargetIndexes.
Tom Stellard05992972015-01-07 22:44:19 +00001005 if (!FoldingImm && !OpToFold.isReg())
1006 continue;
1007
Tom Stellard6596ba72014-11-21 22:06:37 +00001008 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +00001009 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +00001010 continue;
1011
Marek Olsak926c56f2016-01-13 11:44:29 +00001012 // Prevent folding operands backwards in the function. For example,
1013 // the COPY opcode must not be replaced by 1 in this example:
1014 //
Francis Visoiu Mistriha8a83d12017-12-07 10:40:31 +00001015 // %3 = COPY %vgpr0; VGPR_32:%3
Marek Olsak926c56f2016-01-13 11:44:29 +00001016 // ...
Francis Visoiu Mistriha8a83d12017-12-07 10:40:31 +00001017 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec
Marek Olsak926c56f2016-01-13 11:44:29 +00001018 MachineOperand &Dst = MI.getOperand(0);
1019 if (Dst.isReg() &&
1020 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
1021 continue;
1022
Matt Arsenault51818c12017-01-10 23:32:04 +00001023 foldInstOperand(MI, OpToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +00001024 }
1025 }
1026 return false;
1027}