blob: 92d59569226b1c0bf2bb230c6e21e55cc5920211 [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
13#include "AMDGPUSubtarget.h"
14#include "SIInstrInfo.h"
Matt Arsenault3cb39042017-02-27 19:35:42 +000015#include "SIMachineFunctionInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000016#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000017#include "llvm/CodeGen/MachineFunctionPass.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000020#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000021#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000022#include "llvm/Target/TargetMachine.h"
23
24#define DEBUG_TYPE "si-fold-operands"
25using namespace llvm;
26
27namespace {
28
Tom Stellardbb763e62015-01-07 17:42:16 +000029struct FoldCandidate {
30 MachineInstr *UseMI;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000031 union {
32 MachineOperand *OpToFold;
33 uint64_t ImmToFold;
34 int FrameIndexToFold;
35 };
36 unsigned char UseOpNo;
37 MachineOperand::MachineOperandType Kind;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000038 bool Commuted;
Tom Stellardbb763e62015-01-07 17:42:16 +000039
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000040 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
41 bool Commuted_ = false) :
42 UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()),
43 Commuted(Commuted_) {
Tom Stellard05992972015-01-07 22:44:19 +000044 if (FoldOp->isImm()) {
Tom Stellard05992972015-01-07 22:44:19 +000045 ImmToFold = FoldOp->getImm();
Matt Arsenault2bc198a2016-09-14 15:51:33 +000046 } else if (FoldOp->isFI()) {
47 FrameIndexToFold = FoldOp->getIndex();
Tom Stellard05992972015-01-07 22:44:19 +000048 } else {
49 assert(FoldOp->isReg());
50 OpToFold = FoldOp;
51 }
52 }
Tom Stellardbb763e62015-01-07 17:42:16 +000053
Matt Arsenault2bc198a2016-09-14 15:51:33 +000054 bool isFI() const {
55 return Kind == MachineOperand::MO_FrameIndex;
56 }
57
Tom Stellardbb763e62015-01-07 17:42:16 +000058 bool isImm() const {
Matt Arsenault2bc198a2016-09-14 15:51:33 +000059 return Kind == MachineOperand::MO_Immediate;
60 }
61
62 bool isReg() const {
63 return Kind == MachineOperand::MO_Register;
Tom Stellardbb763e62015-01-07 17:42:16 +000064 }
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000065
66 bool isCommuted() const {
67 return Commuted;
68 }
Tom Stellardbb763e62015-01-07 17:42:16 +000069};
70
Matt Arsenault51818c12017-01-10 23:32:04 +000071class SIFoldOperands : public MachineFunctionPass {
72public:
73 static char ID;
74 MachineRegisterInfo *MRI;
75 const SIInstrInfo *TII;
76 const SIRegisterInfo *TRI;
Matt Arsenaultd5c65152017-02-22 23:27:53 +000077 const SISubtarget *ST;
Matt Arsenault51818c12017-01-10 23:32:04 +000078
79 void foldOperand(MachineOperand &OpToFold,
80 MachineInstr *UseMI,
81 unsigned UseOpIdx,
82 SmallVectorImpl<FoldCandidate> &FoldList,
83 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
84
85 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
86
Matt Arsenaultd5c65152017-02-22 23:27:53 +000087 const MachineOperand *isClamp(const MachineInstr &MI) const;
88 bool tryFoldClamp(MachineInstr &MI);
89
Matt Arsenault3cb39042017-02-27 19:35:42 +000090 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
91 bool tryFoldOMod(MachineInstr &MI);
92
Matt Arsenault51818c12017-01-10 23:32:04 +000093public:
94 SIFoldOperands() : MachineFunctionPass(ID) {
95 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
96 }
97
98 bool runOnMachineFunction(MachineFunction &MF) override;
99
100 StringRef getPassName() const override { return "SI Fold Operands"; }
101
102 void getAnalysisUsage(AnalysisUsage &AU) const override {
103 AU.setPreservesCFG();
104 MachineFunctionPass::getAnalysisUsage(AU);
105 }
106};
107
Tom Stellard6596ba72014-11-21 22:06:37 +0000108} // End anonymous namespace.
109
Matt Arsenault427c5482016-02-11 06:15:34 +0000110INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
111 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +0000112
113char SIFoldOperands::ID = 0;
114
115char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
116
Matt Arsenault69e30012017-01-11 22:00:02 +0000117// Wrapper around isInlineConstant that understands special cases when
118// instruction types are replaced during operand folding.
119static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
120 const MachineInstr &UseMI,
121 unsigned OpNo,
122 const MachineOperand &OpToFold) {
123 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
124 return true;
125
126 unsigned Opc = UseMI.getOpcode();
127 switch (Opc) {
128 case AMDGPU::V_MAC_F32_e64:
129 case AMDGPU::V_MAC_F16_e64: {
130 // Special case for mac. Since this is replaced with mad when folded into
131 // src2, we need to check the legality for the final instruction.
132 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
133 if (static_cast<int>(OpNo) == Src2Idx) {
134 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
135 const MCInstrDesc &MadDesc
136 = TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
137 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
138 }
139 }
140 default:
141 return false;
142 }
143}
144
Tom Stellard6596ba72014-11-21 22:06:37 +0000145FunctionPass *llvm::createSIFoldOperandsPass() {
146 return new SIFoldOperands();
147}
148
Tom Stellardbb763e62015-01-07 17:42:16 +0000149static bool updateOperand(FoldCandidate &Fold,
Tom Stellard6596ba72014-11-21 22:06:37 +0000150 const TargetRegisterInfo &TRI) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000151 MachineInstr *MI = Fold.UseMI;
152 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000153 assert(Old.isReg());
154
Tom Stellardbb763e62015-01-07 17:42:16 +0000155 if (Fold.isImm()) {
156 Old.ChangeToImmediate(Fold.ImmToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000157 return true;
158 }
159
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000160 if (Fold.isFI()) {
161 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
162 return true;
163 }
164
Tom Stellardbb763e62015-01-07 17:42:16 +0000165 MachineOperand *New = Fold.OpToFold;
166 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
167 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
168 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
Matt Arsenault76858f52017-06-20 18:41:31 +0000169
170 Old.setIsUndef(New->isUndef());
Tom Stellard6596ba72014-11-21 22:06:37 +0000171 return true;
172 }
173
Tom Stellard6596ba72014-11-21 22:06:37 +0000174 // FIXME: Handle physical registers.
175
176 return false;
177}
178
Matt Arsenault51818c12017-01-10 23:32:04 +0000179static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000180 const MachineInstr *MI) {
181 for (auto Candidate : FoldList) {
182 if (Candidate.UseMI == MI)
183 return true;
184 }
185 return false;
186}
187
Matt Arsenault51818c12017-01-10 23:32:04 +0000188static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
Tom Stellard05992972015-01-07 22:44:19 +0000189 MachineInstr *MI, unsigned OpNo,
190 MachineOperand *OpToFold,
191 const SIInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000192 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000193
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000194 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000195 unsigned Opc = MI->getOpcode();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000196 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) &&
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000197 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
Matt Arsenault69e30012017-01-11 22:00:02 +0000198 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000199
200 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
201 // to fold the operand.
202 MI->setDesc(TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16));
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000203 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
204 if (FoldAsMAD) {
205 MI->untieRegOperand(OpNo);
206 return true;
207 }
208 MI->setDesc(TII->get(Opc));
209 }
210
Tom Stellard8485fa02016-12-07 02:42:15 +0000211 // Special case for s_setreg_b32
212 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
213 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
214 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
215 return true;
216 }
217
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000218 // If we are already folding into another operand of MI, then
219 // we can't commute the instruction, otherwise we risk making the
220 // other fold illegal.
221 if (isUseMIInFoldList(FoldList, MI))
222 return false;
223
Tom Stellard05992972015-01-07 22:44:19 +0000224 // Operand is not legal, so try to commute the instruction to
225 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000226 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
227 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000228 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000229
230 if (CanCommute) {
231 if (CommuteIdx0 == OpNo)
232 OpNo = CommuteIdx1;
233 else if (CommuteIdx1 == OpNo)
234 OpNo = CommuteIdx0;
235 }
236
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000237 // One of operands might be an Imm operand, and OpNo may refer to it after
238 // the call of commuteInstruction() below. Such situations are avoided
239 // here explicitly as OpNo must be a register operand to be a candidate
240 // for memory folding.
241 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
242 !MI->getOperand(CommuteIdx1).isReg()))
243 return false;
244
245 if (!CanCommute ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000246 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000247 return false;
248
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000249 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
250 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000251 return false;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000252 }
253
254 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold, true));
255 return true;
Tom Stellard05992972015-01-07 22:44:19 +0000256 }
257
258 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
259 return true;
260}
261
Matt Arsenault5e63a042016-10-06 18:12:13 +0000262// If the use operand doesn't care about the value, this may be an operand only
263// used for register indexing, in which case it is unsafe to fold.
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000264static bool isUseSafeToFold(const SIInstrInfo *TII,
265 const MachineInstr &MI,
Matt Arsenault5e63a042016-10-06 18:12:13 +0000266 const MachineOperand &UseMO) {
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000267 return !UseMO.isUndef() && !TII->isSDWA(MI);
Matt Arsenault5e63a042016-10-06 18:12:13 +0000268 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
269}
270
Matt Arsenault51818c12017-01-10 23:32:04 +0000271void SIFoldOperands::foldOperand(
272 MachineOperand &OpToFold,
273 MachineInstr *UseMI,
274 unsigned UseOpIdx,
275 SmallVectorImpl<FoldCandidate> &FoldList,
276 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000277 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
278
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000279 if (!isUseSafeToFold(TII, *UseMI, UseOp))
Matt Arsenault5e63a042016-10-06 18:12:13 +0000280 return;
281
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000282 // FIXME: Fold operands with subregs.
Matt Arsenault3661e902016-08-15 16:18:36 +0000283 if (UseOp.isReg() && OpToFold.isReg()) {
284 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
285 return;
286
287 // Don't fold subregister extracts into tied operands, only if it is a full
288 // copy since a subregister use tied to a full register def doesn't really
289 // make sense. e.g. don't fold:
290 //
291 // %vreg1 = COPY %vreg0:sub1
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000292 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000293 //
294 // into
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000295 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000296 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
297 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000298 }
299
Tom Stellard9a197672015-09-09 15:43:26 +0000300 // Special case for REG_SEQUENCE: We can't fold literals into
301 // REG_SEQUENCE instructions, so we have to fold them into the
302 // uses of REG_SEQUENCE.
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000303 if (UseMI->isRegSequence()) {
Tom Stellard9a197672015-09-09 15:43:26 +0000304 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
305 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
306
307 for (MachineRegisterInfo::use_iterator
Matt Arsenault51818c12017-01-10 23:32:04 +0000308 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000309 RSUse != RSE; ++RSUse) {
Tom Stellard9a197672015-09-09 15:43:26 +0000310
311 MachineInstr *RSUseMI = RSUse->getParent();
312 if (RSUse->getSubReg() != RegSeqDstSubReg)
313 continue;
314
315 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenault51818c12017-01-10 23:32:04 +0000316 CopiesToReplace);
Tom Stellard9a197672015-09-09 15:43:26 +0000317 }
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000318
Tom Stellard9a197672015-09-09 15:43:26 +0000319 return;
320 }
321
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000322
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000323 bool FoldingImm = OpToFold.isImm();
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000324
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000325 // In order to fold immediates into copies, we need to change the
326 // copy to a MOV.
327 if (FoldingImm && UseMI->isCopy()) {
328 unsigned DestReg = UseMI->getOperand(0).getReg();
329 const TargetRegisterClass *DestRC
330 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000331 MRI->getRegClass(DestReg) :
332 TRI->getPhysRegClass(DestReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000333
334 unsigned MovOp = TII->getMovOpcode(DestRC);
335 if (MovOp == AMDGPU::COPY)
336 return;
337
338 UseMI->setDesc(TII->get(MovOp));
339 CopiesToReplace.push_back(UseMI);
340 } else {
341 const MCInstrDesc &UseDesc = UseMI->getDesc();
342
343 // Don't fold into target independent nodes. Target independent opcodes
344 // don't have defined register classes.
345 if (UseDesc.isVariadic() ||
346 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
347 return;
348 }
349
350 if (!FoldingImm) {
351 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
352
353 // FIXME: We could try to change the instruction from 64-bit to 32-bit
354 // to enable more folding opportunites. The shrink operands pass
355 // already does this.
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000356 return;
357 }
358
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000359
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000360 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
361 const TargetRegisterClass *FoldRC =
Matt Arsenault51818c12017-01-10 23:32:04 +0000362 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000363
Matt Arsenault4bd72362016-12-10 00:39:12 +0000364
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000365 // Split 64-bit constants into 32-bits for folding.
366 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
367 unsigned UseReg = UseOp.getReg();
368 const TargetRegisterClass *UseRC
369 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000370 MRI->getRegClass(UseReg) :
371 TRI->getPhysRegClass(UseReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000372
373 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
374 return;
375
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000376 APInt Imm(64, OpToFold.getImm());
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000377 if (UseOp.getSubReg() == AMDGPU::sub0) {
378 Imm = Imm.getLoBits(32);
379 } else {
380 assert(UseOp.getSubReg() == AMDGPU::sub1);
381 Imm = Imm.getHiBits(32);
382 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000383
384 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
385 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
386 return;
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000387 }
388
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000389
390
391 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000392}
393
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000394static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
Matt Arsenault51818c12017-01-10 23:32:04 +0000395 uint32_t LHS, uint32_t RHS) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000396 switch (Opcode) {
397 case AMDGPU::V_AND_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000398 case AMDGPU::V_AND_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000399 case AMDGPU::S_AND_B32:
400 Result = LHS & RHS;
401 return true;
402 case AMDGPU::V_OR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000403 case AMDGPU::V_OR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000404 case AMDGPU::S_OR_B32:
405 Result = LHS | RHS;
406 return true;
407 case AMDGPU::V_XOR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000408 case AMDGPU::V_XOR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000409 case AMDGPU::S_XOR_B32:
410 Result = LHS ^ RHS;
411 return true;
Matt Arsenault51818c12017-01-10 23:32:04 +0000412 case AMDGPU::V_LSHL_B32_e64:
413 case AMDGPU::V_LSHL_B32_e32:
414 case AMDGPU::S_LSHL_B32:
415 // The instruction ignores the high bits for out of bounds shifts.
416 Result = LHS << (RHS & 31);
417 return true;
418 case AMDGPU::V_LSHLREV_B32_e64:
419 case AMDGPU::V_LSHLREV_B32_e32:
420 Result = RHS << (LHS & 31);
421 return true;
422 case AMDGPU::V_LSHR_B32_e64:
423 case AMDGPU::V_LSHR_B32_e32:
424 case AMDGPU::S_LSHR_B32:
425 Result = LHS >> (RHS & 31);
426 return true;
427 case AMDGPU::V_LSHRREV_B32_e64:
428 case AMDGPU::V_LSHRREV_B32_e32:
429 Result = RHS >> (LHS & 31);
430 return true;
431 case AMDGPU::V_ASHR_I32_e64:
432 case AMDGPU::V_ASHR_I32_e32:
433 case AMDGPU::S_ASHR_I32:
434 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
435 return true;
436 case AMDGPU::V_ASHRREV_I32_e64:
437 case AMDGPU::V_ASHRREV_I32_e32:
438 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
439 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000440 default:
441 return false;
442 }
443}
444
445static unsigned getMovOpc(bool IsScalar) {
446 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
447}
448
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000449/// Remove any leftover implicit operands from mutating the instruction. e.g.
450/// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
451/// anymore.
452static void stripExtraCopyOperands(MachineInstr &MI) {
453 const MCInstrDesc &Desc = MI.getDesc();
454 unsigned NumOps = Desc.getNumOperands() +
455 Desc.getNumImplicitUses() +
456 Desc.getNumImplicitDefs();
457
458 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
459 MI.RemoveOperand(I);
460}
461
462static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
463 MI.setDesc(NewDesc);
464 stripExtraCopyOperands(MI);
465}
466
Matt Arsenault51818c12017-01-10 23:32:04 +0000467static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
468 MachineOperand &Op) {
469 if (Op.isReg()) {
470 // If this has a subregister, it obviously is a register source.
471 if (Op.getSubReg() != AMDGPU::NoSubRegister)
472 return &Op;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000473
Matt Arsenault51818c12017-01-10 23:32:04 +0000474 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
Matt Arsenault7f67b352017-06-20 18:28:02 +0000475 if (Def && Def->isMoveImmediate()) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000476 MachineOperand &ImmSrc = Def->getOperand(1);
477 if (ImmSrc.isImm())
478 return &ImmSrc;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000479 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000480 }
481
Matt Arsenault51818c12017-01-10 23:32:04 +0000482 return &Op;
483}
484
485// Try to simplify operations with a constant that may appear after instruction
486// selection.
487// TODO: See if a frame index with a fixed offset can fold.
488static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
489 const SIInstrInfo *TII,
490 MachineInstr *MI,
491 MachineOperand *ImmOp) {
492 unsigned Opc = MI->getOpcode();
493 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
494 Opc == AMDGPU::S_NOT_B32) {
495 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
496 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
497 return true;
498 }
499
500 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
501 if (Src1Idx == -1)
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000502 return false;
503
504 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Matt Arsenault51818c12017-01-10 23:32:04 +0000505 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
506 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000507
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000508 if (!Src0->isImm() && !Src1->isImm())
509 return false;
510
511 // and k0, k1 -> v_mov_b32 (k0 & k1)
512 // or k0, k1 -> v_mov_b32 (k0 | k1)
513 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
514 if (Src0->isImm() && Src1->isImm()) {
515 int32_t NewImm;
516 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
517 return false;
518
519 const SIRegisterInfo &TRI = TII->getRegisterInfo();
520 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
521
Matt Arsenault51818c12017-01-10 23:32:04 +0000522 // Be careful to change the right operand, src0 may belong to a different
523 // instruction.
524 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000525 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000526 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000527 return true;
528 }
529
Matt Arsenault51818c12017-01-10 23:32:04 +0000530 if (!MI->isCommutable())
531 return false;
532
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000533 if (Src0->isImm() && !Src1->isImm()) {
534 std::swap(Src0, Src1);
535 std::swap(Src0Idx, Src1Idx);
536 }
537
538 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
Matt Arsenault51818c12017-01-10 23:32:04 +0000539 if (Opc == AMDGPU::V_OR_B32_e64 ||
540 Opc == AMDGPU::V_OR_B32_e32 ||
541 Opc == AMDGPU::S_OR_B32) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000542 if (Src1Val == 0) {
543 // y = or x, 0 => y = copy x
544 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000545 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000546 } else if (Src1Val == -1) {
547 // y = or x, -1 => y = v_mov_b32 -1
548 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000549 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000550 } else
551 return false;
552
553 return true;
554 }
555
556 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000557 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000558 MI->getOpcode() == AMDGPU::S_AND_B32) {
559 if (Src1Val == 0) {
560 // y = and x, 0 => y = v_mov_b32 0
561 MI->RemoveOperand(Src0Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000562 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000563 } else if (Src1Val == -1) {
564 // y = and x, -1 => y = copy x
565 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000566 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
567 stripExtraCopyOperands(*MI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000568 } else
569 return false;
570
571 return true;
572 }
573
574 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000575 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000576 MI->getOpcode() == AMDGPU::S_XOR_B32) {
577 if (Src1Val == 0) {
578 // y = xor x, 0 => y = copy x
579 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000580 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenault51818c12017-01-10 23:32:04 +0000581 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000582 }
583 }
584
585 return false;
586}
587
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000588// Try to fold an instruction into a simpler one
589static bool tryFoldInst(const SIInstrInfo *TII,
590 MachineInstr *MI) {
591 unsigned Opc = MI->getOpcode();
592
593 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
594 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
595 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
596 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
597 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
598 if (Src1->isIdenticalTo(*Src0)) {
599 DEBUG(dbgs() << "Folded " << *MI << " into ");
600 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
601 if (Src2Idx != -1)
602 MI->RemoveOperand(Src2Idx);
603 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
604 mutateCopyOp(*MI, TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY
605 : getMovOpc(false)));
606 DEBUG(dbgs() << *MI << '\n');
607 return true;
608 }
609 }
610
611 return false;
612}
613
Matt Arsenault51818c12017-01-10 23:32:04 +0000614void SIFoldOperands::foldInstOperand(MachineInstr &MI,
615 MachineOperand &OpToFold) const {
616 // We need mutate the operands of new mov instructions to add implicit
617 // uses of EXEC, but adding them invalidates the use_iterator, so defer
618 // this.
619 SmallVector<MachineInstr *, 4> CopiesToReplace;
620 SmallVector<FoldCandidate, 4> FoldList;
621 MachineOperand &Dst = MI.getOperand(0);
622
623 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
624 if (FoldingImm) {
625 unsigned NumLiteralUses = 0;
626 MachineOperand *NonInlineUse = nullptr;
627 int NonInlineUseOpNo = -1;
628
629 MachineRegisterInfo::use_iterator NextUse, NextInstUse;
630 for (MachineRegisterInfo::use_iterator
631 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
632 Use != E; Use = NextUse) {
633 NextUse = std::next(Use);
634 MachineInstr *UseMI = Use->getParent();
635 unsigned OpNo = Use.getOperandNo();
636
637 // Folding the immediate may reveal operations that can be constant
638 // folded or replaced with a copy. This can happen for example after
639 // frame indices are lowered to constants or from splitting 64-bit
640 // constants.
641 //
642 // We may also encounter cases where one or both operands are
643 // immediates materialized into a register, which would ordinarily not
644 // be folded due to multiple uses or operand constraints.
645
646 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
647 DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n');
648
649 // Some constant folding cases change the same immediate's use to a new
650 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
651 // again. The same constant folded instruction could also have a second
652 // use operand.
653 NextUse = MRI->use_begin(Dst.getReg());
654 continue;
655 }
656
657 // Try to fold any inline immediate uses, and then only fold other
658 // constants if they have one use.
659 //
660 // The legality of the inline immediate must be checked based on the use
661 // operand, not the defining instruction, because 32-bit instructions
662 // with 32-bit inline immediate sources may be used to materialize
663 // constants used in 16-bit operands.
664 //
665 // e.g. it is unsafe to fold:
666 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
667 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
668
669 // Folding immediates with more than one use will increase program size.
670 // FIXME: This will also reduce register usage, which may be better
671 // in some cases. A better heuristic is needed.
Matt Arsenault69e30012017-01-11 22:00:02 +0000672 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000673 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
674 } else {
675 if (++NumLiteralUses == 1) {
676 NonInlineUse = &*Use;
677 NonInlineUseOpNo = OpNo;
678 }
679 }
680 }
681
682 if (NumLiteralUses == 1) {
683 MachineInstr *UseMI = NonInlineUse->getParent();
684 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
685 }
686 } else {
687 // Folding register.
688 for (MachineRegisterInfo::use_iterator
689 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
690 Use != E; ++Use) {
691 MachineInstr *UseMI = Use->getParent();
692
693 foldOperand(OpToFold, UseMI, Use.getOperandNo(),
694 FoldList, CopiesToReplace);
695 }
696 }
697
698 MachineFunction *MF = MI.getParent()->getParent();
699 // Make sure we add EXEC uses to any new v_mov instructions created.
700 for (MachineInstr *Copy : CopiesToReplace)
701 Copy->addImplicitDefUseOperands(*MF);
702
703 for (FoldCandidate &Fold : FoldList) {
704 if (updateOperand(Fold, *TRI)) {
705 // Clear kill flags.
706 if (Fold.isReg()) {
707 assert(Fold.OpToFold && Fold.OpToFold->isReg());
708 // FIXME: Probably shouldn't bother trying to fold if not an
709 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
710 // copies.
711 MRI->clearKillFlags(Fold.OpToFold->getReg());
712 }
713 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
714 static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000715 tryFoldInst(TII, Fold.UseMI);
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000716 } else if (Fold.isCommuted()) {
717 // Restoring instruction's original operand order if fold has failed.
718 TII->commuteInstruction(*Fold.UseMI, false);
Matt Arsenault51818c12017-01-10 23:32:04 +0000719 }
720 }
721}
722
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000723const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
724 unsigned Op = MI.getOpcode();
725 switch (Op) {
726 case AMDGPU::V_MAX_F32_e64:
Matt Arsenault79a45db2017-02-22 23:53:37 +0000727 case AMDGPU::V_MAX_F16_e64:
728 case AMDGPU::V_MAX_F64: {
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000729 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
730 return nullptr;
731
732 // Make sure sources are identical.
733 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
734 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +0000735 if (!Src0->isReg() || !Src1->isReg() ||
736 Src0->getSubReg() != Src1->getSubReg() ||
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000737 Src0->getSubReg() != AMDGPU::NoSubRegister)
738 return nullptr;
739
740 // Can't fold up if we have modifiers.
741 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
742 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
743 TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
744 return nullptr;
745 return Src0;
746 }
747 default:
748 return nullptr;
749 }
750}
751
752// We obviously have multiple uses in a clamp since the register is used twice
753// in the same instruction.
754static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
755 int Count = 0;
756 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
757 I != E; ++I) {
758 if (++Count > 1)
759 return false;
760 }
761
762 return true;
763}
764
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000765bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
766 const MachineOperand *ClampSrc = isClamp(MI);
767 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
768 return false;
769
770 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
771 if (!TII->hasFPClamp(*Def))
772 return false;
773 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
774 if (!DefClamp)
775 return false;
776
777 DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def << '\n');
778
779 // Clamp is applied after omod, so it is OK if omod is set.
780 DefClamp->setImm(1);
781 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
782 MI.eraseFromParent();
783 return true;
784}
785
Matt Arsenault3cb39042017-02-27 19:35:42 +0000786static int getOModValue(unsigned Opc, int64_t Val) {
787 switch (Opc) {
788 case AMDGPU::V_MUL_F32_e64: {
789 switch (static_cast<uint32_t>(Val)) {
790 case 0x3f000000: // 0.5
791 return SIOutMods::DIV2;
792 case 0x40000000: // 2.0
793 return SIOutMods::MUL2;
794 case 0x40800000: // 4.0
795 return SIOutMods::MUL4;
796 default:
797 return SIOutMods::NONE;
798 }
799 }
800 case AMDGPU::V_MUL_F16_e64: {
801 switch (static_cast<uint16_t>(Val)) {
802 case 0x3800: // 0.5
803 return SIOutMods::DIV2;
804 case 0x4000: // 2.0
805 return SIOutMods::MUL2;
806 case 0x4400: // 4.0
807 return SIOutMods::MUL4;
808 default:
809 return SIOutMods::NONE;
810 }
811 }
812 default:
813 llvm_unreachable("invalid mul opcode");
814 }
815}
816
817// FIXME: Does this really not support denormals with f16?
818// FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
819// handled, so will anything other than that break?
820std::pair<const MachineOperand *, int>
821SIFoldOperands::isOMod(const MachineInstr &MI) const {
822 unsigned Op = MI.getOpcode();
823 switch (Op) {
824 case AMDGPU::V_MUL_F32_e64:
825 case AMDGPU::V_MUL_F16_e64: {
826 // If output denormals are enabled, omod is ignored.
827 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
828 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
829 return std::make_pair(nullptr, SIOutMods::NONE);
830
831 const MachineOperand *RegOp = nullptr;
832 const MachineOperand *ImmOp = nullptr;
833 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
834 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
835 if (Src0->isImm()) {
836 ImmOp = Src0;
837 RegOp = Src1;
838 } else if (Src1->isImm()) {
839 ImmOp = Src1;
840 RegOp = Src0;
841 } else
842 return std::make_pair(nullptr, SIOutMods::NONE);
843
844 int OMod = getOModValue(Op, ImmOp->getImm());
845 if (OMod == SIOutMods::NONE ||
846 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
847 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
848 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
849 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
850 return std::make_pair(nullptr, SIOutMods::NONE);
851
852 return std::make_pair(RegOp, OMod);
853 }
854 case AMDGPU::V_ADD_F32_e64:
855 case AMDGPU::V_ADD_F16_e64: {
856 // If output denormals are enabled, omod is ignored.
857 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
858 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
859 return std::make_pair(nullptr, SIOutMods::NONE);
860
861 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
862 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
863 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
864
865 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
866 Src0->getSubReg() == Src1->getSubReg() &&
867 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
868 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
869 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
870 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
871 return std::make_pair(Src0, SIOutMods::MUL2);
872
873 return std::make_pair(nullptr, SIOutMods::NONE);
874 }
875 default:
876 return std::make_pair(nullptr, SIOutMods::NONE);
877 }
878}
879
880// FIXME: Does this need to check IEEE bit on function?
881bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
882 const MachineOperand *RegOp;
883 int OMod;
884 std::tie(RegOp, OMod) = isOMod(MI);
885 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
886 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
887 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
888 return false;
889
890 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
891 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
892 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
893 return false;
894
895 // Clamp is applied after omod. If the source already has clamp set, don't
896 // fold it.
897 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
898 return false;
899
900 DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
901
902 DefOMod->setImm(OMod);
903 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
904 MI.eraseFromParent();
905 return true;
906}
907
Tom Stellard6596ba72014-11-21 22:06:37 +0000908bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000909 if (skipFunction(*MF.getFunction()))
910 return false;
911
Matt Arsenault51818c12017-01-10 23:32:04 +0000912 MRI = &MF.getRegInfo();
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000913 ST = &MF.getSubtarget<SISubtarget>();
914 TII = ST->getInstrInfo();
Matt Arsenault51818c12017-01-10 23:32:04 +0000915 TRI = &TII->getRegisterInfo();
Tom Stellard6596ba72014-11-21 22:06:37 +0000916
Matt Arsenault3cb39042017-02-27 19:35:42 +0000917 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
918
919 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
920 // correctly handle signed zeros.
921 //
922 // TODO: Check nsz on instructions when fast math flags are preserved to MI
923 // level.
924 bool IsIEEEMode = ST->enableIEEEBit(MF) || !MFI->hasNoSignedZerosFPMath();
925
Tom Stellard6596ba72014-11-21 22:06:37 +0000926 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
Matt Arsenault51818c12017-01-10 23:32:04 +0000927 BI != BE; ++BI) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000928
929 MachineBasicBlock &MBB = *BI;
930 MachineBasicBlock::iterator I, Next;
931 for (I = MBB.begin(); I != MBB.end(); I = Next) {
932 Next = std::next(I);
933 MachineInstr &MI = *I;
934
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000935 tryFoldInst(TII, &MI);
936
Sam Kolton27e0f8b2017-03-31 11:42:43 +0000937 if (!TII->isFoldableCopy(MI)) {
Matt Arsenault3cb39042017-02-27 19:35:42 +0000938 if (IsIEEEMode || !tryFoldOMod(MI))
939 tryFoldClamp(MI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000940 continue;
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000941 }
Tom Stellard6596ba72014-11-21 22:06:37 +0000942
943 MachineOperand &OpToFold = MI.getOperand(1);
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000944 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
Tom Stellard26cc18d2015-01-07 22:18:27 +0000945
Matt Arsenault51818c12017-01-10 23:32:04 +0000946 // FIXME: We could also be folding things like TargetIndexes.
Tom Stellard05992972015-01-07 22:44:19 +0000947 if (!FoldingImm && !OpToFold.isReg())
948 continue;
949
Tom Stellard6596ba72014-11-21 22:06:37 +0000950 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +0000951 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +0000952 continue;
953
Marek Olsak926c56f2016-01-13 11:44:29 +0000954 // Prevent folding operands backwards in the function. For example,
955 // the COPY opcode must not be replaced by 1 in this example:
956 //
957 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
958 // ...
959 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
960 MachineOperand &Dst = MI.getOperand(0);
961 if (Dst.isReg() &&
962 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
963 continue;
964
Matt Arsenault51818c12017-01-10 23:32:04 +0000965 foldInstOperand(MI, OpToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000966 }
967 }
968 return false;
969}