blob: 04922f652b957deaa923ff16db25b5ccf290670e [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
13#include "AMDGPUSubtarget.h"
14#include "SIInstrInfo.h"
Matt Arsenault3cb39042017-02-27 19:35:42 +000015#include "SIMachineFunctionInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000016#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000017#include "llvm/CodeGen/MachineFunctionPass.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000020#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000021#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000022#include "llvm/Target/TargetMachine.h"
23
24#define DEBUG_TYPE "si-fold-operands"
25using namespace llvm;
26
27namespace {
28
Tom Stellardbb763e62015-01-07 17:42:16 +000029struct FoldCandidate {
30 MachineInstr *UseMI;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000031 union {
32 MachineOperand *OpToFold;
33 uint64_t ImmToFold;
34 int FrameIndexToFold;
35 };
36 unsigned char UseOpNo;
37 MachineOperand::MachineOperandType Kind;
Tom Stellardbb763e62015-01-07 17:42:16 +000038
39 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
Matt Arsenault2bc198a2016-09-14 15:51:33 +000040 UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()) {
Tom Stellard05992972015-01-07 22:44:19 +000041 if (FoldOp->isImm()) {
Tom Stellard05992972015-01-07 22:44:19 +000042 ImmToFold = FoldOp->getImm();
Matt Arsenault2bc198a2016-09-14 15:51:33 +000043 } else if (FoldOp->isFI()) {
44 FrameIndexToFold = FoldOp->getIndex();
Tom Stellard05992972015-01-07 22:44:19 +000045 } else {
46 assert(FoldOp->isReg());
47 OpToFold = FoldOp;
48 }
49 }
Tom Stellardbb763e62015-01-07 17:42:16 +000050
Matt Arsenault2bc198a2016-09-14 15:51:33 +000051 bool isFI() const {
52 return Kind == MachineOperand::MO_FrameIndex;
53 }
54
Tom Stellardbb763e62015-01-07 17:42:16 +000055 bool isImm() const {
Matt Arsenault2bc198a2016-09-14 15:51:33 +000056 return Kind == MachineOperand::MO_Immediate;
57 }
58
59 bool isReg() const {
60 return Kind == MachineOperand::MO_Register;
Tom Stellardbb763e62015-01-07 17:42:16 +000061 }
62};
63
Matt Arsenault51818c12017-01-10 23:32:04 +000064class SIFoldOperands : public MachineFunctionPass {
65public:
66 static char ID;
67 MachineRegisterInfo *MRI;
68 const SIInstrInfo *TII;
69 const SIRegisterInfo *TRI;
Matt Arsenaultd5c65152017-02-22 23:27:53 +000070 const SISubtarget *ST;
Matt Arsenault51818c12017-01-10 23:32:04 +000071
72 void foldOperand(MachineOperand &OpToFold,
73 MachineInstr *UseMI,
74 unsigned UseOpIdx,
75 SmallVectorImpl<FoldCandidate> &FoldList,
76 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
77
78 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
79
Matt Arsenaultd5c65152017-02-22 23:27:53 +000080 const MachineOperand *isClamp(const MachineInstr &MI) const;
81 bool tryFoldClamp(MachineInstr &MI);
82
Matt Arsenault3cb39042017-02-27 19:35:42 +000083 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
84 bool tryFoldOMod(MachineInstr &MI);
85
Matt Arsenault51818c12017-01-10 23:32:04 +000086public:
87 SIFoldOperands() : MachineFunctionPass(ID) {
88 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
89 }
90
91 bool runOnMachineFunction(MachineFunction &MF) override;
92
93 StringRef getPassName() const override { return "SI Fold Operands"; }
94
95 void getAnalysisUsage(AnalysisUsage &AU) const override {
96 AU.setPreservesCFG();
97 MachineFunctionPass::getAnalysisUsage(AU);
98 }
99};
100
Tom Stellard6596ba72014-11-21 22:06:37 +0000101} // End anonymous namespace.
102
Matt Arsenault427c5482016-02-11 06:15:34 +0000103INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
104 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +0000105
106char SIFoldOperands::ID = 0;
107
108char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
109
Matt Arsenault69e30012017-01-11 22:00:02 +0000110// Wrapper around isInlineConstant that understands special cases when
111// instruction types are replaced during operand folding.
112static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
113 const MachineInstr &UseMI,
114 unsigned OpNo,
115 const MachineOperand &OpToFold) {
116 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
117 return true;
118
119 unsigned Opc = UseMI.getOpcode();
120 switch (Opc) {
121 case AMDGPU::V_MAC_F32_e64:
122 case AMDGPU::V_MAC_F16_e64: {
123 // Special case for mac. Since this is replaced with mad when folded into
124 // src2, we need to check the legality for the final instruction.
125 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
126 if (static_cast<int>(OpNo) == Src2Idx) {
127 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
128 const MCInstrDesc &MadDesc
129 = TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
130 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
131 }
132 }
133 default:
134 return false;
135 }
136}
137
Tom Stellard6596ba72014-11-21 22:06:37 +0000138FunctionPass *llvm::createSIFoldOperandsPass() {
139 return new SIFoldOperands();
140}
141
Matt Arsenault3cb39042017-02-27 19:35:42 +0000142static bool isFoldableCopy(const MachineInstr &MI) {
Matt Arsenault5e63a042016-10-06 18:12:13 +0000143 switch (MI.getOpcode()) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000144 case AMDGPU::V_MOV_B32_e32:
145 case AMDGPU::V_MOV_B32_e64:
Matt Arsenault5e63a042016-10-06 18:12:13 +0000146 case AMDGPU::V_MOV_B64_PSEUDO: {
147 // If there are additional implicit register operands, this may be used for
148 // register indexing so the source register operand isn't simply copied.
149 unsigned NumOps = MI.getDesc().getNumOperands() +
150 MI.getDesc().getNumImplicitUses();
151
152 return MI.getNumOperands() == NumOps;
153 }
Tom Stellard6596ba72014-11-21 22:06:37 +0000154 case AMDGPU::S_MOV_B32:
155 case AMDGPU::S_MOV_B64:
156 case AMDGPU::COPY:
157 return true;
158 default:
159 return false;
160 }
161}
162
Tom Stellardbb763e62015-01-07 17:42:16 +0000163static bool updateOperand(FoldCandidate &Fold,
Tom Stellard6596ba72014-11-21 22:06:37 +0000164 const TargetRegisterInfo &TRI) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000165 MachineInstr *MI = Fold.UseMI;
166 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000167 assert(Old.isReg());
168
Tom Stellardbb763e62015-01-07 17:42:16 +0000169 if (Fold.isImm()) {
170 Old.ChangeToImmediate(Fold.ImmToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000171 return true;
172 }
173
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000174 if (Fold.isFI()) {
175 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
176 return true;
177 }
178
Tom Stellardbb763e62015-01-07 17:42:16 +0000179 MachineOperand *New = Fold.OpToFold;
180 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
181 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
182 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000183 return true;
184 }
185
Tom Stellard6596ba72014-11-21 22:06:37 +0000186 // FIXME: Handle physical registers.
187
188 return false;
189}
190
Matt Arsenault51818c12017-01-10 23:32:04 +0000191static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000192 const MachineInstr *MI) {
193 for (auto Candidate : FoldList) {
194 if (Candidate.UseMI == MI)
195 return true;
196 }
197 return false;
198}
199
Matt Arsenault51818c12017-01-10 23:32:04 +0000200static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
Tom Stellard05992972015-01-07 22:44:19 +0000201 MachineInstr *MI, unsigned OpNo,
202 MachineOperand *OpToFold,
203 const SIInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000204 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000205
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000206 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000207 unsigned Opc = MI->getOpcode();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000208 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) &&
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000209 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
Matt Arsenault69e30012017-01-11 22:00:02 +0000210 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000211
212 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
213 // to fold the operand.
214 MI->setDesc(TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16));
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000215 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
216 if (FoldAsMAD) {
217 MI->untieRegOperand(OpNo);
218 return true;
219 }
220 MI->setDesc(TII->get(Opc));
221 }
222
Tom Stellard8485fa02016-12-07 02:42:15 +0000223 // Special case for s_setreg_b32
224 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
225 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
226 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
227 return true;
228 }
229
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000230 // If we are already folding into another operand of MI, then
231 // we can't commute the instruction, otherwise we risk making the
232 // other fold illegal.
233 if (isUseMIInFoldList(FoldList, MI))
234 return false;
235
Tom Stellard05992972015-01-07 22:44:19 +0000236 // Operand is not legal, so try to commute the instruction to
237 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000238 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
239 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000240 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000241
242 if (CanCommute) {
243 if (CommuteIdx0 == OpNo)
244 OpNo = CommuteIdx1;
245 else if (CommuteIdx1 == OpNo)
246 OpNo = CommuteIdx0;
247 }
248
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000249 // One of operands might be an Imm operand, and OpNo may refer to it after
250 // the call of commuteInstruction() below. Such situations are avoided
251 // here explicitly as OpNo must be a register operand to be a candidate
252 // for memory folding.
253 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
254 !MI->getOperand(CommuteIdx1).isReg()))
255 return false;
256
257 if (!CanCommute ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000258 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000259 return false;
260
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000261 if (!TII->isOperandLegal(*MI, OpNo, OpToFold))
Tom Stellard05992972015-01-07 22:44:19 +0000262 return false;
263 }
264
265 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
266 return true;
267}
268
Matt Arsenault5e63a042016-10-06 18:12:13 +0000269// If the use operand doesn't care about the value, this may be an operand only
270// used for register indexing, in which case it is unsafe to fold.
271static bool isUseSafeToFold(const MachineInstr &MI,
272 const MachineOperand &UseMO) {
273 return !UseMO.isUndef();
274 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
275}
276
Matt Arsenault51818c12017-01-10 23:32:04 +0000277void SIFoldOperands::foldOperand(
278 MachineOperand &OpToFold,
279 MachineInstr *UseMI,
280 unsigned UseOpIdx,
281 SmallVectorImpl<FoldCandidate> &FoldList,
282 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000283 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
284
Matt Arsenault5e63a042016-10-06 18:12:13 +0000285 if (!isUseSafeToFold(*UseMI, UseOp))
286 return;
287
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000288 // FIXME: Fold operands with subregs.
Matt Arsenault3661e902016-08-15 16:18:36 +0000289 if (UseOp.isReg() && OpToFold.isReg()) {
290 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
291 return;
292
293 // Don't fold subregister extracts into tied operands, only if it is a full
294 // copy since a subregister use tied to a full register def doesn't really
295 // make sense. e.g. don't fold:
296 //
297 // %vreg1 = COPY %vreg0:sub1
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000298 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000299 //
300 // into
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000301 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000302 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
303 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000304 }
305
Tom Stellard9a197672015-09-09 15:43:26 +0000306 // Special case for REG_SEQUENCE: We can't fold literals into
307 // REG_SEQUENCE instructions, so we have to fold them into the
308 // uses of REG_SEQUENCE.
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000309 if (UseMI->isRegSequence()) {
Tom Stellard9a197672015-09-09 15:43:26 +0000310 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
311 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
312
313 for (MachineRegisterInfo::use_iterator
Matt Arsenault51818c12017-01-10 23:32:04 +0000314 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000315 RSUse != RSE; ++RSUse) {
Tom Stellard9a197672015-09-09 15:43:26 +0000316
317 MachineInstr *RSUseMI = RSUse->getParent();
318 if (RSUse->getSubReg() != RegSeqDstSubReg)
319 continue;
320
321 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenault51818c12017-01-10 23:32:04 +0000322 CopiesToReplace);
Tom Stellard9a197672015-09-09 15:43:26 +0000323 }
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000324
Tom Stellard9a197672015-09-09 15:43:26 +0000325 return;
326 }
327
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000328
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000329 bool FoldingImm = OpToFold.isImm();
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000330
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000331 // In order to fold immediates into copies, we need to change the
332 // copy to a MOV.
333 if (FoldingImm && UseMI->isCopy()) {
334 unsigned DestReg = UseMI->getOperand(0).getReg();
335 const TargetRegisterClass *DestRC
336 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000337 MRI->getRegClass(DestReg) :
338 TRI->getPhysRegClass(DestReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000339
340 unsigned MovOp = TII->getMovOpcode(DestRC);
341 if (MovOp == AMDGPU::COPY)
342 return;
343
344 UseMI->setDesc(TII->get(MovOp));
345 CopiesToReplace.push_back(UseMI);
346 } else {
347 const MCInstrDesc &UseDesc = UseMI->getDesc();
348
349 // Don't fold into target independent nodes. Target independent opcodes
350 // don't have defined register classes.
351 if (UseDesc.isVariadic() ||
352 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
353 return;
354 }
355
356 if (!FoldingImm) {
357 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
358
359 // FIXME: We could try to change the instruction from 64-bit to 32-bit
360 // to enable more folding opportunites. The shrink operands pass
361 // already does this.
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000362 return;
363 }
364
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000365
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000366 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
367 const TargetRegisterClass *FoldRC =
Matt Arsenault51818c12017-01-10 23:32:04 +0000368 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000369
Matt Arsenault4bd72362016-12-10 00:39:12 +0000370
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000371 // Split 64-bit constants into 32-bits for folding.
372 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
373 unsigned UseReg = UseOp.getReg();
374 const TargetRegisterClass *UseRC
375 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000376 MRI->getRegClass(UseReg) :
377 TRI->getPhysRegClass(UseReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000378
379 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
380 return;
381
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000382 APInt Imm(64, OpToFold.getImm());
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000383 if (UseOp.getSubReg() == AMDGPU::sub0) {
384 Imm = Imm.getLoBits(32);
385 } else {
386 assert(UseOp.getSubReg() == AMDGPU::sub1);
387 Imm = Imm.getHiBits(32);
388 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000389
390 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
391 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
392 return;
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000393 }
394
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000395
396
397 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000398}
399
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000400static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
Matt Arsenault51818c12017-01-10 23:32:04 +0000401 uint32_t LHS, uint32_t RHS) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000402 switch (Opcode) {
403 case AMDGPU::V_AND_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000404 case AMDGPU::V_AND_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000405 case AMDGPU::S_AND_B32:
406 Result = LHS & RHS;
407 return true;
408 case AMDGPU::V_OR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000409 case AMDGPU::V_OR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000410 case AMDGPU::S_OR_B32:
411 Result = LHS | RHS;
412 return true;
413 case AMDGPU::V_XOR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000414 case AMDGPU::V_XOR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000415 case AMDGPU::S_XOR_B32:
416 Result = LHS ^ RHS;
417 return true;
Matt Arsenault51818c12017-01-10 23:32:04 +0000418 case AMDGPU::V_LSHL_B32_e64:
419 case AMDGPU::V_LSHL_B32_e32:
420 case AMDGPU::S_LSHL_B32:
421 // The instruction ignores the high bits for out of bounds shifts.
422 Result = LHS << (RHS & 31);
423 return true;
424 case AMDGPU::V_LSHLREV_B32_e64:
425 case AMDGPU::V_LSHLREV_B32_e32:
426 Result = RHS << (LHS & 31);
427 return true;
428 case AMDGPU::V_LSHR_B32_e64:
429 case AMDGPU::V_LSHR_B32_e32:
430 case AMDGPU::S_LSHR_B32:
431 Result = LHS >> (RHS & 31);
432 return true;
433 case AMDGPU::V_LSHRREV_B32_e64:
434 case AMDGPU::V_LSHRREV_B32_e32:
435 Result = RHS >> (LHS & 31);
436 return true;
437 case AMDGPU::V_ASHR_I32_e64:
438 case AMDGPU::V_ASHR_I32_e32:
439 case AMDGPU::S_ASHR_I32:
440 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
441 return true;
442 case AMDGPU::V_ASHRREV_I32_e64:
443 case AMDGPU::V_ASHRREV_I32_e32:
444 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
445 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000446 default:
447 return false;
448 }
449}
450
451static unsigned getMovOpc(bool IsScalar) {
452 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
453}
454
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000455/// Remove any leftover implicit operands from mutating the instruction. e.g.
456/// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
457/// anymore.
458static void stripExtraCopyOperands(MachineInstr &MI) {
459 const MCInstrDesc &Desc = MI.getDesc();
460 unsigned NumOps = Desc.getNumOperands() +
461 Desc.getNumImplicitUses() +
462 Desc.getNumImplicitDefs();
463
464 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
465 MI.RemoveOperand(I);
466}
467
468static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
469 MI.setDesc(NewDesc);
470 stripExtraCopyOperands(MI);
471}
472
Matt Arsenault51818c12017-01-10 23:32:04 +0000473static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
474 MachineOperand &Op) {
475 if (Op.isReg()) {
476 // If this has a subregister, it obviously is a register source.
477 if (Op.getSubReg() != AMDGPU::NoSubRegister)
478 return &Op;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000479
Matt Arsenault51818c12017-01-10 23:32:04 +0000480 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
481 if (Def->isMoveImmediate()) {
482 MachineOperand &ImmSrc = Def->getOperand(1);
483 if (ImmSrc.isImm())
484 return &ImmSrc;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000485 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000486 }
487
Matt Arsenault51818c12017-01-10 23:32:04 +0000488 return &Op;
489}
490
491// Try to simplify operations with a constant that may appear after instruction
492// selection.
493// TODO: See if a frame index with a fixed offset can fold.
494static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
495 const SIInstrInfo *TII,
496 MachineInstr *MI,
497 MachineOperand *ImmOp) {
498 unsigned Opc = MI->getOpcode();
499 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
500 Opc == AMDGPU::S_NOT_B32) {
501 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
502 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
503 return true;
504 }
505
506 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
507 if (Src1Idx == -1)
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000508 return false;
509
510 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Matt Arsenault51818c12017-01-10 23:32:04 +0000511 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
512 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000513
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000514 if (!Src0->isImm() && !Src1->isImm())
515 return false;
516
517 // and k0, k1 -> v_mov_b32 (k0 & k1)
518 // or k0, k1 -> v_mov_b32 (k0 | k1)
519 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
520 if (Src0->isImm() && Src1->isImm()) {
521 int32_t NewImm;
522 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
523 return false;
524
525 const SIRegisterInfo &TRI = TII->getRegisterInfo();
526 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
527
Matt Arsenault51818c12017-01-10 23:32:04 +0000528 // Be careful to change the right operand, src0 may belong to a different
529 // instruction.
530 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000531 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000532 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000533 return true;
534 }
535
Matt Arsenault51818c12017-01-10 23:32:04 +0000536 if (!MI->isCommutable())
537 return false;
538
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000539 if (Src0->isImm() && !Src1->isImm()) {
540 std::swap(Src0, Src1);
541 std::swap(Src0Idx, Src1Idx);
542 }
543
544 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
Matt Arsenault51818c12017-01-10 23:32:04 +0000545 if (Opc == AMDGPU::V_OR_B32_e64 ||
546 Opc == AMDGPU::V_OR_B32_e32 ||
547 Opc == AMDGPU::S_OR_B32) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000548 if (Src1Val == 0) {
549 // y = or x, 0 => y = copy x
550 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000551 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000552 } else if (Src1Val == -1) {
553 // y = or x, -1 => y = v_mov_b32 -1
554 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000555 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000556 } else
557 return false;
558
559 return true;
560 }
561
562 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000563 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000564 MI->getOpcode() == AMDGPU::S_AND_B32) {
565 if (Src1Val == 0) {
566 // y = and x, 0 => y = v_mov_b32 0
567 MI->RemoveOperand(Src0Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000568 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000569 } else if (Src1Val == -1) {
570 // y = and x, -1 => y = copy x
571 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000572 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
573 stripExtraCopyOperands(*MI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000574 } else
575 return false;
576
577 return true;
578 }
579
580 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000581 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000582 MI->getOpcode() == AMDGPU::S_XOR_B32) {
583 if (Src1Val == 0) {
584 // y = xor x, 0 => y = copy x
585 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000586 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenault51818c12017-01-10 23:32:04 +0000587 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000588 }
589 }
590
591 return false;
592}
593
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000594// Try to fold an instruction into a simpler one
595static bool tryFoldInst(const SIInstrInfo *TII,
596 MachineInstr *MI) {
597 unsigned Opc = MI->getOpcode();
598
599 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
600 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
601 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
602 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
603 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
604 if (Src1->isIdenticalTo(*Src0)) {
605 DEBUG(dbgs() << "Folded " << *MI << " into ");
606 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
607 if (Src2Idx != -1)
608 MI->RemoveOperand(Src2Idx);
609 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
610 mutateCopyOp(*MI, TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY
611 : getMovOpc(false)));
612 DEBUG(dbgs() << *MI << '\n');
613 return true;
614 }
615 }
616
617 return false;
618}
619
Matt Arsenault51818c12017-01-10 23:32:04 +0000620void SIFoldOperands::foldInstOperand(MachineInstr &MI,
621 MachineOperand &OpToFold) const {
622 // We need mutate the operands of new mov instructions to add implicit
623 // uses of EXEC, but adding them invalidates the use_iterator, so defer
624 // this.
625 SmallVector<MachineInstr *, 4> CopiesToReplace;
626 SmallVector<FoldCandidate, 4> FoldList;
627 MachineOperand &Dst = MI.getOperand(0);
628
629 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
630 if (FoldingImm) {
631 unsigned NumLiteralUses = 0;
632 MachineOperand *NonInlineUse = nullptr;
633 int NonInlineUseOpNo = -1;
634
635 MachineRegisterInfo::use_iterator NextUse, NextInstUse;
636 for (MachineRegisterInfo::use_iterator
637 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
638 Use != E; Use = NextUse) {
639 NextUse = std::next(Use);
640 MachineInstr *UseMI = Use->getParent();
641 unsigned OpNo = Use.getOperandNo();
642
643 // Folding the immediate may reveal operations that can be constant
644 // folded or replaced with a copy. This can happen for example after
645 // frame indices are lowered to constants or from splitting 64-bit
646 // constants.
647 //
648 // We may also encounter cases where one or both operands are
649 // immediates materialized into a register, which would ordinarily not
650 // be folded due to multiple uses or operand constraints.
651
652 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
653 DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n');
654
655 // Some constant folding cases change the same immediate's use to a new
656 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
657 // again. The same constant folded instruction could also have a second
658 // use operand.
659 NextUse = MRI->use_begin(Dst.getReg());
660 continue;
661 }
662
663 // Try to fold any inline immediate uses, and then only fold other
664 // constants if they have one use.
665 //
666 // The legality of the inline immediate must be checked based on the use
667 // operand, not the defining instruction, because 32-bit instructions
668 // with 32-bit inline immediate sources may be used to materialize
669 // constants used in 16-bit operands.
670 //
671 // e.g. it is unsafe to fold:
672 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
673 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
674
675 // Folding immediates with more than one use will increase program size.
676 // FIXME: This will also reduce register usage, which may be better
677 // in some cases. A better heuristic is needed.
Matt Arsenault69e30012017-01-11 22:00:02 +0000678 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000679 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
680 } else {
681 if (++NumLiteralUses == 1) {
682 NonInlineUse = &*Use;
683 NonInlineUseOpNo = OpNo;
684 }
685 }
686 }
687
688 if (NumLiteralUses == 1) {
689 MachineInstr *UseMI = NonInlineUse->getParent();
690 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
691 }
692 } else {
693 // Folding register.
694 for (MachineRegisterInfo::use_iterator
695 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
696 Use != E; ++Use) {
697 MachineInstr *UseMI = Use->getParent();
698
699 foldOperand(OpToFold, UseMI, Use.getOperandNo(),
700 FoldList, CopiesToReplace);
701 }
702 }
703
704 MachineFunction *MF = MI.getParent()->getParent();
705 // Make sure we add EXEC uses to any new v_mov instructions created.
706 for (MachineInstr *Copy : CopiesToReplace)
707 Copy->addImplicitDefUseOperands(*MF);
708
709 for (FoldCandidate &Fold : FoldList) {
710 if (updateOperand(Fold, *TRI)) {
711 // Clear kill flags.
712 if (Fold.isReg()) {
713 assert(Fold.OpToFold && Fold.OpToFold->isReg());
714 // FIXME: Probably shouldn't bother trying to fold if not an
715 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
716 // copies.
717 MRI->clearKillFlags(Fold.OpToFold->getReg());
718 }
719 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
720 static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000721 tryFoldInst(TII, Fold.UseMI);
Matt Arsenault51818c12017-01-10 23:32:04 +0000722 }
723 }
724}
725
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000726const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
727 unsigned Op = MI.getOpcode();
728 switch (Op) {
729 case AMDGPU::V_MAX_F32_e64:
Matt Arsenault79a45db2017-02-22 23:53:37 +0000730 case AMDGPU::V_MAX_F16_e64:
731 case AMDGPU::V_MAX_F64: {
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000732 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
733 return nullptr;
734
735 // Make sure sources are identical.
736 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
737 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
738 if (!Src0->isReg() || Src0->getSubReg() != Src1->getSubReg() ||
739 Src0->getSubReg() != AMDGPU::NoSubRegister)
740 return nullptr;
741
742 // Can't fold up if we have modifiers.
743 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
744 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
745 TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
746 return nullptr;
747 return Src0;
748 }
749 default:
750 return nullptr;
751 }
752}
753
754// We obviously have multiple uses in a clamp since the register is used twice
755// in the same instruction.
756static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
757 int Count = 0;
758 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
759 I != E; ++I) {
760 if (++Count > 1)
761 return false;
762 }
763
764 return true;
765}
766
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000767bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
768 const MachineOperand *ClampSrc = isClamp(MI);
769 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
770 return false;
771
772 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
773 if (!TII->hasFPClamp(*Def))
774 return false;
775 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
776 if (!DefClamp)
777 return false;
778
779 DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def << '\n');
780
781 // Clamp is applied after omod, so it is OK if omod is set.
782 DefClamp->setImm(1);
783 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
784 MI.eraseFromParent();
785 return true;
786}
787
Matt Arsenault3cb39042017-02-27 19:35:42 +0000788static int getOModValue(unsigned Opc, int64_t Val) {
789 switch (Opc) {
790 case AMDGPU::V_MUL_F32_e64: {
791 switch (static_cast<uint32_t>(Val)) {
792 case 0x3f000000: // 0.5
793 return SIOutMods::DIV2;
794 case 0x40000000: // 2.0
795 return SIOutMods::MUL2;
796 case 0x40800000: // 4.0
797 return SIOutMods::MUL4;
798 default:
799 return SIOutMods::NONE;
800 }
801 }
802 case AMDGPU::V_MUL_F16_e64: {
803 switch (static_cast<uint16_t>(Val)) {
804 case 0x3800: // 0.5
805 return SIOutMods::DIV2;
806 case 0x4000: // 2.0
807 return SIOutMods::MUL2;
808 case 0x4400: // 4.0
809 return SIOutMods::MUL4;
810 default:
811 return SIOutMods::NONE;
812 }
813 }
814 default:
815 llvm_unreachable("invalid mul opcode");
816 }
817}
818
819// FIXME: Does this really not support denormals with f16?
820// FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
821// handled, so will anything other than that break?
822std::pair<const MachineOperand *, int>
823SIFoldOperands::isOMod(const MachineInstr &MI) const {
824 unsigned Op = MI.getOpcode();
825 switch (Op) {
826 case AMDGPU::V_MUL_F32_e64:
827 case AMDGPU::V_MUL_F16_e64: {
828 // If output denormals are enabled, omod is ignored.
829 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
830 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
831 return std::make_pair(nullptr, SIOutMods::NONE);
832
833 const MachineOperand *RegOp = nullptr;
834 const MachineOperand *ImmOp = nullptr;
835 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
836 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
837 if (Src0->isImm()) {
838 ImmOp = Src0;
839 RegOp = Src1;
840 } else if (Src1->isImm()) {
841 ImmOp = Src1;
842 RegOp = Src0;
843 } else
844 return std::make_pair(nullptr, SIOutMods::NONE);
845
846 int OMod = getOModValue(Op, ImmOp->getImm());
847 if (OMod == SIOutMods::NONE ||
848 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
849 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
850 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
851 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
852 return std::make_pair(nullptr, SIOutMods::NONE);
853
854 return std::make_pair(RegOp, OMod);
855 }
856 case AMDGPU::V_ADD_F32_e64:
857 case AMDGPU::V_ADD_F16_e64: {
858 // If output denormals are enabled, omod is ignored.
859 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
860 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
861 return std::make_pair(nullptr, SIOutMods::NONE);
862
863 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
864 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
865 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
866
867 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
868 Src0->getSubReg() == Src1->getSubReg() &&
869 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
870 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
871 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
872 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
873 return std::make_pair(Src0, SIOutMods::MUL2);
874
875 return std::make_pair(nullptr, SIOutMods::NONE);
876 }
877 default:
878 return std::make_pair(nullptr, SIOutMods::NONE);
879 }
880}
881
882// FIXME: Does this need to check IEEE bit on function?
883bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
884 const MachineOperand *RegOp;
885 int OMod;
886 std::tie(RegOp, OMod) = isOMod(MI);
887 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
888 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
889 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
890 return false;
891
892 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
893 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
894 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
895 return false;
896
897 // Clamp is applied after omod. If the source already has clamp set, don't
898 // fold it.
899 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
900 return false;
901
902 DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
903
904 DefOMod->setImm(OMod);
905 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
906 MI.eraseFromParent();
907 return true;
908}
909
Tom Stellard6596ba72014-11-21 22:06:37 +0000910bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000911 if (skipFunction(*MF.getFunction()))
912 return false;
913
Matt Arsenault51818c12017-01-10 23:32:04 +0000914 MRI = &MF.getRegInfo();
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000915 ST = &MF.getSubtarget<SISubtarget>();
916 TII = ST->getInstrInfo();
Matt Arsenault51818c12017-01-10 23:32:04 +0000917 TRI = &TII->getRegisterInfo();
Tom Stellard6596ba72014-11-21 22:06:37 +0000918
Matt Arsenault3cb39042017-02-27 19:35:42 +0000919 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
920
921 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
922 // correctly handle signed zeros.
923 //
924 // TODO: Check nsz on instructions when fast math flags are preserved to MI
925 // level.
926 bool IsIEEEMode = ST->enableIEEEBit(MF) || !MFI->hasNoSignedZerosFPMath();
927
Tom Stellard6596ba72014-11-21 22:06:37 +0000928 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
Matt Arsenault51818c12017-01-10 23:32:04 +0000929 BI != BE; ++BI) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000930
931 MachineBasicBlock &MBB = *BI;
932 MachineBasicBlock::iterator I, Next;
933 for (I = MBB.begin(); I != MBB.end(); I = Next) {
934 Next = std::next(I);
935 MachineInstr &MI = *I;
936
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000937 tryFoldInst(TII, &MI);
938
Matt Arsenault3cb39042017-02-27 19:35:42 +0000939 if (!isFoldableCopy(MI)) {
940 if (IsIEEEMode || !tryFoldOMod(MI))
941 tryFoldClamp(MI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000942 continue;
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000943 }
Tom Stellard6596ba72014-11-21 22:06:37 +0000944
945 MachineOperand &OpToFold = MI.getOperand(1);
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000946 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
Tom Stellard26cc18d2015-01-07 22:18:27 +0000947
Matt Arsenault51818c12017-01-10 23:32:04 +0000948 // FIXME: We could also be folding things like TargetIndexes.
Tom Stellard05992972015-01-07 22:44:19 +0000949 if (!FoldingImm && !OpToFold.isReg())
950 continue;
951
Tom Stellard6596ba72014-11-21 22:06:37 +0000952 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +0000953 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +0000954 continue;
955
Marek Olsak926c56f2016-01-13 11:44:29 +0000956 // Prevent folding operands backwards in the function. For example,
957 // the COPY opcode must not be replaced by 1 in this example:
958 //
959 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
960 // ...
961 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
962 MachineOperand &Dst = MI.getOperand(0);
963 if (Dst.isReg() &&
964 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
965 continue;
966
Matt Arsenault51818c12017-01-10 23:32:04 +0000967 foldInstOperand(MI, OpToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000968 }
969 }
970 return false;
971}