blob: f391f67a241f1f0dd4fc65c0eb6004356e4904b2 [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
13#include "AMDGPUSubtarget.h"
14#include "SIInstrInfo.h"
Matt Arsenault3cb39042017-02-27 19:35:42 +000015#include "SIMachineFunctionInfo.h"
Matt Arsenaultff3f9122017-06-20 18:56:32 +000016#include "llvm/ADT/DepthFirstIterator.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000017#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000018#include "llvm/CodeGen/MachineFunctionPass.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000021#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000022#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000023#include "llvm/Target/TargetMachine.h"
24
25#define DEBUG_TYPE "si-fold-operands"
26using namespace llvm;
27
28namespace {
29
Tom Stellardbb763e62015-01-07 17:42:16 +000030struct FoldCandidate {
31 MachineInstr *UseMI;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000032 union {
33 MachineOperand *OpToFold;
34 uint64_t ImmToFold;
35 int FrameIndexToFold;
36 };
37 unsigned char UseOpNo;
38 MachineOperand::MachineOperandType Kind;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000039 bool Commuted;
Tom Stellardbb763e62015-01-07 17:42:16 +000040
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000041 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
42 bool Commuted_ = false) :
43 UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()),
44 Commuted(Commuted_) {
Tom Stellard05992972015-01-07 22:44:19 +000045 if (FoldOp->isImm()) {
Tom Stellard05992972015-01-07 22:44:19 +000046 ImmToFold = FoldOp->getImm();
Matt Arsenault2bc198a2016-09-14 15:51:33 +000047 } else if (FoldOp->isFI()) {
48 FrameIndexToFold = FoldOp->getIndex();
Tom Stellard05992972015-01-07 22:44:19 +000049 } else {
50 assert(FoldOp->isReg());
51 OpToFold = FoldOp;
52 }
53 }
Tom Stellardbb763e62015-01-07 17:42:16 +000054
Matt Arsenault2bc198a2016-09-14 15:51:33 +000055 bool isFI() const {
56 return Kind == MachineOperand::MO_FrameIndex;
57 }
58
Tom Stellardbb763e62015-01-07 17:42:16 +000059 bool isImm() const {
Matt Arsenault2bc198a2016-09-14 15:51:33 +000060 return Kind == MachineOperand::MO_Immediate;
61 }
62
63 bool isReg() const {
64 return Kind == MachineOperand::MO_Register;
Tom Stellardbb763e62015-01-07 17:42:16 +000065 }
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000066
67 bool isCommuted() const {
68 return Commuted;
69 }
Tom Stellardbb763e62015-01-07 17:42:16 +000070};
71
Matt Arsenault51818c12017-01-10 23:32:04 +000072class SIFoldOperands : public MachineFunctionPass {
73public:
74 static char ID;
75 MachineRegisterInfo *MRI;
76 const SIInstrInfo *TII;
77 const SIRegisterInfo *TRI;
Matt Arsenaultd5c65152017-02-22 23:27:53 +000078 const SISubtarget *ST;
Matt Arsenault51818c12017-01-10 23:32:04 +000079
80 void foldOperand(MachineOperand &OpToFold,
81 MachineInstr *UseMI,
82 unsigned UseOpIdx,
83 SmallVectorImpl<FoldCandidate> &FoldList,
84 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
85
86 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
87
Matt Arsenaultd5c65152017-02-22 23:27:53 +000088 const MachineOperand *isClamp(const MachineInstr &MI) const;
89 bool tryFoldClamp(MachineInstr &MI);
90
Matt Arsenault3cb39042017-02-27 19:35:42 +000091 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
92 bool tryFoldOMod(MachineInstr &MI);
93
Matt Arsenault51818c12017-01-10 23:32:04 +000094public:
95 SIFoldOperands() : MachineFunctionPass(ID) {
96 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
97 }
98
99 bool runOnMachineFunction(MachineFunction &MF) override;
100
101 StringRef getPassName() const override { return "SI Fold Operands"; }
102
103 void getAnalysisUsage(AnalysisUsage &AU) const override {
104 AU.setPreservesCFG();
105 MachineFunctionPass::getAnalysisUsage(AU);
106 }
107};
108
Tom Stellard6596ba72014-11-21 22:06:37 +0000109} // End anonymous namespace.
110
Matt Arsenault427c5482016-02-11 06:15:34 +0000111INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
112 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +0000113
114char SIFoldOperands::ID = 0;
115
116char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
117
Matt Arsenault69e30012017-01-11 22:00:02 +0000118// Wrapper around isInlineConstant that understands special cases when
119// instruction types are replaced during operand folding.
120static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
121 const MachineInstr &UseMI,
122 unsigned OpNo,
123 const MachineOperand &OpToFold) {
124 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
125 return true;
126
127 unsigned Opc = UseMI.getOpcode();
128 switch (Opc) {
129 case AMDGPU::V_MAC_F32_e64:
130 case AMDGPU::V_MAC_F16_e64: {
131 // Special case for mac. Since this is replaced with mad when folded into
132 // src2, we need to check the legality for the final instruction.
133 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
134 if (static_cast<int>(OpNo) == Src2Idx) {
135 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
136 const MCInstrDesc &MadDesc
137 = TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
138 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
139 }
140 }
141 default:
142 return false;
143 }
144}
145
Tom Stellard6596ba72014-11-21 22:06:37 +0000146FunctionPass *llvm::createSIFoldOperandsPass() {
147 return new SIFoldOperands();
148}
149
Tom Stellardbb763e62015-01-07 17:42:16 +0000150static bool updateOperand(FoldCandidate &Fold,
Tom Stellard6596ba72014-11-21 22:06:37 +0000151 const TargetRegisterInfo &TRI) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000152 MachineInstr *MI = Fold.UseMI;
153 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000154 assert(Old.isReg());
155
Tom Stellardbb763e62015-01-07 17:42:16 +0000156 if (Fold.isImm()) {
157 Old.ChangeToImmediate(Fold.ImmToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000158 return true;
159 }
160
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000161 if (Fold.isFI()) {
162 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
163 return true;
164 }
165
Tom Stellardbb763e62015-01-07 17:42:16 +0000166 MachineOperand *New = Fold.OpToFold;
167 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
168 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
169 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
Matt Arsenault76858f52017-06-20 18:41:31 +0000170
171 Old.setIsUndef(New->isUndef());
Tom Stellard6596ba72014-11-21 22:06:37 +0000172 return true;
173 }
174
Tom Stellard6596ba72014-11-21 22:06:37 +0000175 // FIXME: Handle physical registers.
176
177 return false;
178}
179
Matt Arsenault51818c12017-01-10 23:32:04 +0000180static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000181 const MachineInstr *MI) {
182 for (auto Candidate : FoldList) {
183 if (Candidate.UseMI == MI)
184 return true;
185 }
186 return false;
187}
188
Matt Arsenault51818c12017-01-10 23:32:04 +0000189static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
Tom Stellard05992972015-01-07 22:44:19 +0000190 MachineInstr *MI, unsigned OpNo,
191 MachineOperand *OpToFold,
192 const SIInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000193 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000194
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000195 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000196 unsigned Opc = MI->getOpcode();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000197 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) &&
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000198 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
Matt Arsenault69e30012017-01-11 22:00:02 +0000199 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000200
201 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
202 // to fold the operand.
203 MI->setDesc(TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16));
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000204 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
205 if (FoldAsMAD) {
206 MI->untieRegOperand(OpNo);
207 return true;
208 }
209 MI->setDesc(TII->get(Opc));
210 }
211
Tom Stellard8485fa02016-12-07 02:42:15 +0000212 // Special case for s_setreg_b32
213 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
214 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
215 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
216 return true;
217 }
218
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000219 // If we are already folding into another operand of MI, then
220 // we can't commute the instruction, otherwise we risk making the
221 // other fold illegal.
222 if (isUseMIInFoldList(FoldList, MI))
223 return false;
224
Tom Stellard05992972015-01-07 22:44:19 +0000225 // Operand is not legal, so try to commute the instruction to
226 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000227 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
228 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000229 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000230
231 if (CanCommute) {
232 if (CommuteIdx0 == OpNo)
233 OpNo = CommuteIdx1;
234 else if (CommuteIdx1 == OpNo)
235 OpNo = CommuteIdx0;
236 }
237
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000238 // One of operands might be an Imm operand, and OpNo may refer to it after
239 // the call of commuteInstruction() below. Such situations are avoided
240 // here explicitly as OpNo must be a register operand to be a candidate
241 // for memory folding.
242 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
243 !MI->getOperand(CommuteIdx1).isReg()))
244 return false;
245
246 if (!CanCommute ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000247 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000248 return false;
249
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000250 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
251 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000252 return false;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000253 }
254
255 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold, true));
256 return true;
Tom Stellard05992972015-01-07 22:44:19 +0000257 }
258
259 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
260 return true;
261}
262
Matt Arsenault5e63a042016-10-06 18:12:13 +0000263// If the use operand doesn't care about the value, this may be an operand only
264// used for register indexing, in which case it is unsafe to fold.
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000265static bool isUseSafeToFold(const SIInstrInfo *TII,
266 const MachineInstr &MI,
Matt Arsenault5e63a042016-10-06 18:12:13 +0000267 const MachineOperand &UseMO) {
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000268 return !UseMO.isUndef() && !TII->isSDWA(MI);
Matt Arsenault5e63a042016-10-06 18:12:13 +0000269 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
270}
271
Matt Arsenault51818c12017-01-10 23:32:04 +0000272void SIFoldOperands::foldOperand(
273 MachineOperand &OpToFold,
274 MachineInstr *UseMI,
275 unsigned UseOpIdx,
276 SmallVectorImpl<FoldCandidate> &FoldList,
277 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000278 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
279
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000280 if (!isUseSafeToFold(TII, *UseMI, UseOp))
Matt Arsenault5e63a042016-10-06 18:12:13 +0000281 return;
282
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000283 // FIXME: Fold operands with subregs.
Matt Arsenault3661e902016-08-15 16:18:36 +0000284 if (UseOp.isReg() && OpToFold.isReg()) {
285 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
286 return;
287
288 // Don't fold subregister extracts into tied operands, only if it is a full
289 // copy since a subregister use tied to a full register def doesn't really
290 // make sense. e.g. don't fold:
291 //
292 // %vreg1 = COPY %vreg0:sub1
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000293 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000294 //
295 // into
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000296 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000297 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
298 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000299 }
300
Tom Stellard9a197672015-09-09 15:43:26 +0000301 // Special case for REG_SEQUENCE: We can't fold literals into
302 // REG_SEQUENCE instructions, so we have to fold them into the
303 // uses of REG_SEQUENCE.
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000304 if (UseMI->isRegSequence()) {
Tom Stellard9a197672015-09-09 15:43:26 +0000305 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
306 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
307
308 for (MachineRegisterInfo::use_iterator
Matt Arsenault51818c12017-01-10 23:32:04 +0000309 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000310 RSUse != RSE; ++RSUse) {
Tom Stellard9a197672015-09-09 15:43:26 +0000311
312 MachineInstr *RSUseMI = RSUse->getParent();
313 if (RSUse->getSubReg() != RegSeqDstSubReg)
314 continue;
315
316 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenault51818c12017-01-10 23:32:04 +0000317 CopiesToReplace);
Tom Stellard9a197672015-09-09 15:43:26 +0000318 }
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000319
Tom Stellard9a197672015-09-09 15:43:26 +0000320 return;
321 }
322
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000323
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000324 bool FoldingImm = OpToFold.isImm();
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000325
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000326 // In order to fold immediates into copies, we need to change the
327 // copy to a MOV.
328 if (FoldingImm && UseMI->isCopy()) {
329 unsigned DestReg = UseMI->getOperand(0).getReg();
330 const TargetRegisterClass *DestRC
331 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000332 MRI->getRegClass(DestReg) :
333 TRI->getPhysRegClass(DestReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000334
335 unsigned MovOp = TII->getMovOpcode(DestRC);
336 if (MovOp == AMDGPU::COPY)
337 return;
338
339 UseMI->setDesc(TII->get(MovOp));
340 CopiesToReplace.push_back(UseMI);
341 } else {
342 const MCInstrDesc &UseDesc = UseMI->getDesc();
343
344 // Don't fold into target independent nodes. Target independent opcodes
345 // don't have defined register classes.
346 if (UseDesc.isVariadic() ||
347 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
348 return;
349 }
350
351 if (!FoldingImm) {
352 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
353
354 // FIXME: We could try to change the instruction from 64-bit to 32-bit
355 // to enable more folding opportunites. The shrink operands pass
356 // already does this.
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000357 return;
358 }
359
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000360
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000361 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
362 const TargetRegisterClass *FoldRC =
Matt Arsenault51818c12017-01-10 23:32:04 +0000363 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000364
Matt Arsenault4bd72362016-12-10 00:39:12 +0000365
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000366 // Split 64-bit constants into 32-bits for folding.
367 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
368 unsigned UseReg = UseOp.getReg();
369 const TargetRegisterClass *UseRC
370 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000371 MRI->getRegClass(UseReg) :
372 TRI->getPhysRegClass(UseReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000373
374 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
375 return;
376
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000377 APInt Imm(64, OpToFold.getImm());
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000378 if (UseOp.getSubReg() == AMDGPU::sub0) {
379 Imm = Imm.getLoBits(32);
380 } else {
381 assert(UseOp.getSubReg() == AMDGPU::sub1);
382 Imm = Imm.getHiBits(32);
383 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000384
385 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
386 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
387 return;
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000388 }
389
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000390
391
392 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000393}
394
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000395static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
Matt Arsenault51818c12017-01-10 23:32:04 +0000396 uint32_t LHS, uint32_t RHS) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000397 switch (Opcode) {
398 case AMDGPU::V_AND_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000399 case AMDGPU::V_AND_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000400 case AMDGPU::S_AND_B32:
401 Result = LHS & RHS;
402 return true;
403 case AMDGPU::V_OR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000404 case AMDGPU::V_OR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000405 case AMDGPU::S_OR_B32:
406 Result = LHS | RHS;
407 return true;
408 case AMDGPU::V_XOR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000409 case AMDGPU::V_XOR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000410 case AMDGPU::S_XOR_B32:
411 Result = LHS ^ RHS;
412 return true;
Matt Arsenault51818c12017-01-10 23:32:04 +0000413 case AMDGPU::V_LSHL_B32_e64:
414 case AMDGPU::V_LSHL_B32_e32:
415 case AMDGPU::S_LSHL_B32:
416 // The instruction ignores the high bits for out of bounds shifts.
417 Result = LHS << (RHS & 31);
418 return true;
419 case AMDGPU::V_LSHLREV_B32_e64:
420 case AMDGPU::V_LSHLREV_B32_e32:
421 Result = RHS << (LHS & 31);
422 return true;
423 case AMDGPU::V_LSHR_B32_e64:
424 case AMDGPU::V_LSHR_B32_e32:
425 case AMDGPU::S_LSHR_B32:
426 Result = LHS >> (RHS & 31);
427 return true;
428 case AMDGPU::V_LSHRREV_B32_e64:
429 case AMDGPU::V_LSHRREV_B32_e32:
430 Result = RHS >> (LHS & 31);
431 return true;
432 case AMDGPU::V_ASHR_I32_e64:
433 case AMDGPU::V_ASHR_I32_e32:
434 case AMDGPU::S_ASHR_I32:
435 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
436 return true;
437 case AMDGPU::V_ASHRREV_I32_e64:
438 case AMDGPU::V_ASHRREV_I32_e32:
439 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
440 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000441 default:
442 return false;
443 }
444}
445
446static unsigned getMovOpc(bool IsScalar) {
447 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
448}
449
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000450/// Remove any leftover implicit operands from mutating the instruction. e.g.
451/// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
452/// anymore.
453static void stripExtraCopyOperands(MachineInstr &MI) {
454 const MCInstrDesc &Desc = MI.getDesc();
455 unsigned NumOps = Desc.getNumOperands() +
456 Desc.getNumImplicitUses() +
457 Desc.getNumImplicitDefs();
458
459 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
460 MI.RemoveOperand(I);
461}
462
463static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
464 MI.setDesc(NewDesc);
465 stripExtraCopyOperands(MI);
466}
467
Matt Arsenault51818c12017-01-10 23:32:04 +0000468static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
469 MachineOperand &Op) {
470 if (Op.isReg()) {
471 // If this has a subregister, it obviously is a register source.
472 if (Op.getSubReg() != AMDGPU::NoSubRegister)
473 return &Op;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000474
Matt Arsenault51818c12017-01-10 23:32:04 +0000475 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
Matt Arsenault7f67b352017-06-20 18:28:02 +0000476 if (Def && Def->isMoveImmediate()) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000477 MachineOperand &ImmSrc = Def->getOperand(1);
478 if (ImmSrc.isImm())
479 return &ImmSrc;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000480 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000481 }
482
Matt Arsenault51818c12017-01-10 23:32:04 +0000483 return &Op;
484}
485
486// Try to simplify operations with a constant that may appear after instruction
487// selection.
488// TODO: See if a frame index with a fixed offset can fold.
489static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
490 const SIInstrInfo *TII,
491 MachineInstr *MI,
492 MachineOperand *ImmOp) {
493 unsigned Opc = MI->getOpcode();
494 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
495 Opc == AMDGPU::S_NOT_B32) {
496 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
497 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
498 return true;
499 }
500
501 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
502 if (Src1Idx == -1)
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000503 return false;
504
505 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Matt Arsenault51818c12017-01-10 23:32:04 +0000506 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
507 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000508
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000509 if (!Src0->isImm() && !Src1->isImm())
510 return false;
511
512 // and k0, k1 -> v_mov_b32 (k0 & k1)
513 // or k0, k1 -> v_mov_b32 (k0 | k1)
514 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
515 if (Src0->isImm() && Src1->isImm()) {
516 int32_t NewImm;
517 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
518 return false;
519
520 const SIRegisterInfo &TRI = TII->getRegisterInfo();
521 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
522
Matt Arsenault51818c12017-01-10 23:32:04 +0000523 // Be careful to change the right operand, src0 may belong to a different
524 // instruction.
525 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000526 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000527 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000528 return true;
529 }
530
Matt Arsenault51818c12017-01-10 23:32:04 +0000531 if (!MI->isCommutable())
532 return false;
533
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000534 if (Src0->isImm() && !Src1->isImm()) {
535 std::swap(Src0, Src1);
536 std::swap(Src0Idx, Src1Idx);
537 }
538
539 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
Matt Arsenault51818c12017-01-10 23:32:04 +0000540 if (Opc == AMDGPU::V_OR_B32_e64 ||
541 Opc == AMDGPU::V_OR_B32_e32 ||
542 Opc == AMDGPU::S_OR_B32) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000543 if (Src1Val == 0) {
544 // y = or x, 0 => y = copy x
545 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000546 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000547 } else if (Src1Val == -1) {
548 // y = or x, -1 => y = v_mov_b32 -1
549 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000550 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000551 } else
552 return false;
553
554 return true;
555 }
556
557 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000558 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000559 MI->getOpcode() == AMDGPU::S_AND_B32) {
560 if (Src1Val == 0) {
561 // y = and x, 0 => y = v_mov_b32 0
562 MI->RemoveOperand(Src0Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000563 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000564 } else if (Src1Val == -1) {
565 // y = and x, -1 => y = copy x
566 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000567 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
568 stripExtraCopyOperands(*MI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000569 } else
570 return false;
571
572 return true;
573 }
574
575 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000576 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000577 MI->getOpcode() == AMDGPU::S_XOR_B32) {
578 if (Src1Val == 0) {
579 // y = xor x, 0 => y = copy x
580 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000581 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenault51818c12017-01-10 23:32:04 +0000582 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000583 }
584 }
585
586 return false;
587}
588
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000589// Try to fold an instruction into a simpler one
590static bool tryFoldInst(const SIInstrInfo *TII,
591 MachineInstr *MI) {
592 unsigned Opc = MI->getOpcode();
593
594 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
595 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
596 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
597 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
598 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
599 if (Src1->isIdenticalTo(*Src0)) {
600 DEBUG(dbgs() << "Folded " << *MI << " into ");
601 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
602 if (Src2Idx != -1)
603 MI->RemoveOperand(Src2Idx);
604 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
605 mutateCopyOp(*MI, TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY
606 : getMovOpc(false)));
607 DEBUG(dbgs() << *MI << '\n');
608 return true;
609 }
610 }
611
612 return false;
613}
614
Matt Arsenault51818c12017-01-10 23:32:04 +0000615void SIFoldOperands::foldInstOperand(MachineInstr &MI,
616 MachineOperand &OpToFold) const {
617 // We need mutate the operands of new mov instructions to add implicit
618 // uses of EXEC, but adding them invalidates the use_iterator, so defer
619 // this.
620 SmallVector<MachineInstr *, 4> CopiesToReplace;
621 SmallVector<FoldCandidate, 4> FoldList;
622 MachineOperand &Dst = MI.getOperand(0);
623
624 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
625 if (FoldingImm) {
626 unsigned NumLiteralUses = 0;
627 MachineOperand *NonInlineUse = nullptr;
628 int NonInlineUseOpNo = -1;
629
630 MachineRegisterInfo::use_iterator NextUse, NextInstUse;
631 for (MachineRegisterInfo::use_iterator
632 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
633 Use != E; Use = NextUse) {
634 NextUse = std::next(Use);
635 MachineInstr *UseMI = Use->getParent();
636 unsigned OpNo = Use.getOperandNo();
637
638 // Folding the immediate may reveal operations that can be constant
639 // folded or replaced with a copy. This can happen for example after
640 // frame indices are lowered to constants or from splitting 64-bit
641 // constants.
642 //
643 // We may also encounter cases where one or both operands are
644 // immediates materialized into a register, which would ordinarily not
645 // be folded due to multiple uses or operand constraints.
646
647 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
648 DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n');
649
650 // Some constant folding cases change the same immediate's use to a new
651 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
652 // again. The same constant folded instruction could also have a second
653 // use operand.
654 NextUse = MRI->use_begin(Dst.getReg());
655 continue;
656 }
657
658 // Try to fold any inline immediate uses, and then only fold other
659 // constants if they have one use.
660 //
661 // The legality of the inline immediate must be checked based on the use
662 // operand, not the defining instruction, because 32-bit instructions
663 // with 32-bit inline immediate sources may be used to materialize
664 // constants used in 16-bit operands.
665 //
666 // e.g. it is unsafe to fold:
667 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
668 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
669
670 // Folding immediates with more than one use will increase program size.
671 // FIXME: This will also reduce register usage, which may be better
672 // in some cases. A better heuristic is needed.
Matt Arsenault69e30012017-01-11 22:00:02 +0000673 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000674 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
675 } else {
676 if (++NumLiteralUses == 1) {
677 NonInlineUse = &*Use;
678 NonInlineUseOpNo = OpNo;
679 }
680 }
681 }
682
683 if (NumLiteralUses == 1) {
684 MachineInstr *UseMI = NonInlineUse->getParent();
685 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
686 }
687 } else {
688 // Folding register.
689 for (MachineRegisterInfo::use_iterator
690 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
691 Use != E; ++Use) {
692 MachineInstr *UseMI = Use->getParent();
693
694 foldOperand(OpToFold, UseMI, Use.getOperandNo(),
695 FoldList, CopiesToReplace);
696 }
697 }
698
699 MachineFunction *MF = MI.getParent()->getParent();
700 // Make sure we add EXEC uses to any new v_mov instructions created.
701 for (MachineInstr *Copy : CopiesToReplace)
702 Copy->addImplicitDefUseOperands(*MF);
703
704 for (FoldCandidate &Fold : FoldList) {
705 if (updateOperand(Fold, *TRI)) {
706 // Clear kill flags.
707 if (Fold.isReg()) {
708 assert(Fold.OpToFold && Fold.OpToFold->isReg());
709 // FIXME: Probably shouldn't bother trying to fold if not an
710 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
711 // copies.
712 MRI->clearKillFlags(Fold.OpToFold->getReg());
713 }
714 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
715 static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000716 tryFoldInst(TII, Fold.UseMI);
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000717 } else if (Fold.isCommuted()) {
718 // Restoring instruction's original operand order if fold has failed.
719 TII->commuteInstruction(*Fold.UseMI, false);
Matt Arsenault51818c12017-01-10 23:32:04 +0000720 }
721 }
722}
723
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000724const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
725 unsigned Op = MI.getOpcode();
726 switch (Op) {
727 case AMDGPU::V_MAX_F32_e64:
Matt Arsenault79a45db2017-02-22 23:53:37 +0000728 case AMDGPU::V_MAX_F16_e64:
729 case AMDGPU::V_MAX_F64: {
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000730 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
731 return nullptr;
732
733 // Make sure sources are identical.
734 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
735 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +0000736 if (!Src0->isReg() || !Src1->isReg() ||
737 Src0->getSubReg() != Src1->getSubReg() ||
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000738 Src0->getSubReg() != AMDGPU::NoSubRegister)
739 return nullptr;
740
741 // Can't fold up if we have modifiers.
742 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
743 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
744 TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
745 return nullptr;
746 return Src0;
747 }
748 default:
749 return nullptr;
750 }
751}
752
753// We obviously have multiple uses in a clamp since the register is used twice
754// in the same instruction.
755static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
756 int Count = 0;
757 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
758 I != E; ++I) {
759 if (++Count > 1)
760 return false;
761 }
762
763 return true;
764}
765
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000766bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
767 const MachineOperand *ClampSrc = isClamp(MI);
768 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
769 return false;
770
771 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
772 if (!TII->hasFPClamp(*Def))
773 return false;
774 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
775 if (!DefClamp)
776 return false;
777
778 DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def << '\n');
779
780 // Clamp is applied after omod, so it is OK if omod is set.
781 DefClamp->setImm(1);
782 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
783 MI.eraseFromParent();
784 return true;
785}
786
Matt Arsenault3cb39042017-02-27 19:35:42 +0000787static int getOModValue(unsigned Opc, int64_t Val) {
788 switch (Opc) {
789 case AMDGPU::V_MUL_F32_e64: {
790 switch (static_cast<uint32_t>(Val)) {
791 case 0x3f000000: // 0.5
792 return SIOutMods::DIV2;
793 case 0x40000000: // 2.0
794 return SIOutMods::MUL2;
795 case 0x40800000: // 4.0
796 return SIOutMods::MUL4;
797 default:
798 return SIOutMods::NONE;
799 }
800 }
801 case AMDGPU::V_MUL_F16_e64: {
802 switch (static_cast<uint16_t>(Val)) {
803 case 0x3800: // 0.5
804 return SIOutMods::DIV2;
805 case 0x4000: // 2.0
806 return SIOutMods::MUL2;
807 case 0x4400: // 4.0
808 return SIOutMods::MUL4;
809 default:
810 return SIOutMods::NONE;
811 }
812 }
813 default:
814 llvm_unreachable("invalid mul opcode");
815 }
816}
817
818// FIXME: Does this really not support denormals with f16?
819// FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
820// handled, so will anything other than that break?
821std::pair<const MachineOperand *, int>
822SIFoldOperands::isOMod(const MachineInstr &MI) const {
823 unsigned Op = MI.getOpcode();
824 switch (Op) {
825 case AMDGPU::V_MUL_F32_e64:
826 case AMDGPU::V_MUL_F16_e64: {
827 // If output denormals are enabled, omod is ignored.
828 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
829 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
830 return std::make_pair(nullptr, SIOutMods::NONE);
831
832 const MachineOperand *RegOp = nullptr;
833 const MachineOperand *ImmOp = nullptr;
834 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
835 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
836 if (Src0->isImm()) {
837 ImmOp = Src0;
838 RegOp = Src1;
839 } else if (Src1->isImm()) {
840 ImmOp = Src1;
841 RegOp = Src0;
842 } else
843 return std::make_pair(nullptr, SIOutMods::NONE);
844
845 int OMod = getOModValue(Op, ImmOp->getImm());
846 if (OMod == SIOutMods::NONE ||
847 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
848 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
849 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
850 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
851 return std::make_pair(nullptr, SIOutMods::NONE);
852
853 return std::make_pair(RegOp, OMod);
854 }
855 case AMDGPU::V_ADD_F32_e64:
856 case AMDGPU::V_ADD_F16_e64: {
857 // If output denormals are enabled, omod is ignored.
858 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
859 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
860 return std::make_pair(nullptr, SIOutMods::NONE);
861
862 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
863 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
864 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
865
866 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
867 Src0->getSubReg() == Src1->getSubReg() &&
868 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
869 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
870 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
871 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
872 return std::make_pair(Src0, SIOutMods::MUL2);
873
874 return std::make_pair(nullptr, SIOutMods::NONE);
875 }
876 default:
877 return std::make_pair(nullptr, SIOutMods::NONE);
878 }
879}
880
881// FIXME: Does this need to check IEEE bit on function?
882bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
883 const MachineOperand *RegOp;
884 int OMod;
885 std::tie(RegOp, OMod) = isOMod(MI);
886 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
887 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
888 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
889 return false;
890
891 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
892 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
893 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
894 return false;
895
896 // Clamp is applied after omod. If the source already has clamp set, don't
897 // fold it.
898 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
899 return false;
900
901 DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
902
903 DefOMod->setImm(OMod);
904 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
905 MI.eraseFromParent();
906 return true;
907}
908
Tom Stellard6596ba72014-11-21 22:06:37 +0000909bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000910 if (skipFunction(*MF.getFunction()))
911 return false;
912
Matt Arsenault51818c12017-01-10 23:32:04 +0000913 MRI = &MF.getRegInfo();
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000914 ST = &MF.getSubtarget<SISubtarget>();
915 TII = ST->getInstrInfo();
Matt Arsenault51818c12017-01-10 23:32:04 +0000916 TRI = &TII->getRegisterInfo();
Tom Stellard6596ba72014-11-21 22:06:37 +0000917
Matt Arsenault3cb39042017-02-27 19:35:42 +0000918 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
919
920 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
921 // correctly handle signed zeros.
922 //
923 // TODO: Check nsz on instructions when fast math flags are preserved to MI
924 // level.
925 bool IsIEEEMode = ST->enableIEEEBit(MF) || !MFI->hasNoSignedZerosFPMath();
926
Matt Arsenaultff3f9122017-06-20 18:56:32 +0000927 for (MachineBasicBlock *MBB : depth_first(&MF)) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000928 MachineBasicBlock::iterator I, Next;
Matt Arsenaultff3f9122017-06-20 18:56:32 +0000929 for (I = MBB->begin(); I != MBB->end(); I = Next) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000930 Next = std::next(I);
931 MachineInstr &MI = *I;
932
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000933 tryFoldInst(TII, &MI);
934
Sam Kolton27e0f8b2017-03-31 11:42:43 +0000935 if (!TII->isFoldableCopy(MI)) {
Matt Arsenault3cb39042017-02-27 19:35:42 +0000936 if (IsIEEEMode || !tryFoldOMod(MI))
937 tryFoldClamp(MI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000938 continue;
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000939 }
Tom Stellard6596ba72014-11-21 22:06:37 +0000940
941 MachineOperand &OpToFold = MI.getOperand(1);
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000942 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
Tom Stellard26cc18d2015-01-07 22:18:27 +0000943
Matt Arsenault51818c12017-01-10 23:32:04 +0000944 // FIXME: We could also be folding things like TargetIndexes.
Tom Stellard05992972015-01-07 22:44:19 +0000945 if (!FoldingImm && !OpToFold.isReg())
946 continue;
947
Tom Stellard6596ba72014-11-21 22:06:37 +0000948 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +0000949 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +0000950 continue;
951
Marek Olsak926c56f2016-01-13 11:44:29 +0000952 // Prevent folding operands backwards in the function. For example,
953 // the COPY opcode must not be replaced by 1 in this example:
954 //
955 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
956 // ...
957 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
958 MachineOperand &Dst = MI.getOperand(0);
959 if (Dst.isReg() &&
960 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
961 continue;
962
Matt Arsenault51818c12017-01-10 23:32:04 +0000963 foldInstOperand(MI, OpToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000964 }
965 }
966 return false;
967}