blob: bd0bc734060c8e3573b1c99a008a20ce57d4b6c9 [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
13#include "AMDGPUSubtarget.h"
14#include "SIInstrInfo.h"
Matt Arsenault3cb39042017-02-27 19:35:42 +000015#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000016#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenaultff3f9122017-06-20 18:56:32 +000017#include "llvm/ADT/DepthFirstIterator.h"
Matthias Braunf8422972017-12-13 02:51:04 +000018#include "llvm/CodeGen/LiveIntervals.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000019#include "llvm/CodeGen/MachineFunctionPass.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000022#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000023#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000024#include "llvm/Target/TargetMachine.h"
25
26#define DEBUG_TYPE "si-fold-operands"
27using namespace llvm;
28
29namespace {
30
Tom Stellardbb763e62015-01-07 17:42:16 +000031struct FoldCandidate {
32 MachineInstr *UseMI;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000033 union {
34 MachineOperand *OpToFold;
35 uint64_t ImmToFold;
36 int FrameIndexToFold;
37 };
Matt Arsenaultde6c4212018-08-28 18:34:24 +000038 int ShrinkOpcode;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000039 unsigned char UseOpNo;
40 MachineOperand::MachineOperandType Kind;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000041 bool Commuted;
Tom Stellardbb763e62015-01-07 17:42:16 +000042
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000043 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
Matt Arsenaultde6c4212018-08-28 18:34:24 +000044 bool Commuted_ = false,
45 int ShrinkOp = -1) :
46 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
47 Kind(FoldOp->getType()),
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000048 Commuted(Commuted_) {
Tom Stellard05992972015-01-07 22:44:19 +000049 if (FoldOp->isImm()) {
Tom Stellard05992972015-01-07 22:44:19 +000050 ImmToFold = FoldOp->getImm();
Matt Arsenault2bc198a2016-09-14 15:51:33 +000051 } else if (FoldOp->isFI()) {
52 FrameIndexToFold = FoldOp->getIndex();
Tom Stellard05992972015-01-07 22:44:19 +000053 } else {
54 assert(FoldOp->isReg());
55 OpToFold = FoldOp;
56 }
57 }
Tom Stellardbb763e62015-01-07 17:42:16 +000058
Matt Arsenault2bc198a2016-09-14 15:51:33 +000059 bool isFI() const {
60 return Kind == MachineOperand::MO_FrameIndex;
61 }
62
Tom Stellardbb763e62015-01-07 17:42:16 +000063 bool isImm() const {
Matt Arsenault2bc198a2016-09-14 15:51:33 +000064 return Kind == MachineOperand::MO_Immediate;
65 }
66
67 bool isReg() const {
68 return Kind == MachineOperand::MO_Register;
Tom Stellardbb763e62015-01-07 17:42:16 +000069 }
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +000070
71 bool isCommuted() const {
72 return Commuted;
73 }
Matt Arsenaultde6c4212018-08-28 18:34:24 +000074
75 bool needsShrink() const {
76 return ShrinkOpcode != -1;
77 }
78
79 int getShrinkOpcode() const {
80 return ShrinkOpcode;
81 }
Tom Stellardbb763e62015-01-07 17:42:16 +000082};
83
Matt Arsenault51818c12017-01-10 23:32:04 +000084class SIFoldOperands : public MachineFunctionPass {
85public:
86 static char ID;
87 MachineRegisterInfo *MRI;
88 const SIInstrInfo *TII;
89 const SIRegisterInfo *TRI;
Tom Stellard5bfbae52018-07-11 20:59:01 +000090 const GCNSubtarget *ST;
Matt Arsenault51818c12017-01-10 23:32:04 +000091
92 void foldOperand(MachineOperand &OpToFold,
93 MachineInstr *UseMI,
94 unsigned UseOpIdx,
95 SmallVectorImpl<FoldCandidate> &FoldList,
96 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
97
98 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
99
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000100 const MachineOperand *isClamp(const MachineInstr &MI) const;
101 bool tryFoldClamp(MachineInstr &MI);
102
Matt Arsenault3cb39042017-02-27 19:35:42 +0000103 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
104 bool tryFoldOMod(MachineInstr &MI);
105
Matt Arsenault51818c12017-01-10 23:32:04 +0000106public:
107 SIFoldOperands() : MachineFunctionPass(ID) {
108 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
109 }
110
111 bool runOnMachineFunction(MachineFunction &MF) override;
112
113 StringRef getPassName() const override { return "SI Fold Operands"; }
114
115 void getAnalysisUsage(AnalysisUsage &AU) const override {
116 AU.setPreservesCFG();
117 MachineFunctionPass::getAnalysisUsage(AU);
118 }
119};
120
Tom Stellard6596ba72014-11-21 22:06:37 +0000121} // End anonymous namespace.
122
Matt Arsenault427c5482016-02-11 06:15:34 +0000123INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
124 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +0000125
126char SIFoldOperands::ID = 0;
127
128char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
129
Matt Arsenault69e30012017-01-11 22:00:02 +0000130// Wrapper around isInlineConstant that understands special cases when
131// instruction types are replaced during operand folding.
132static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
133 const MachineInstr &UseMI,
134 unsigned OpNo,
135 const MachineOperand &OpToFold) {
136 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
137 return true;
138
139 unsigned Opc = UseMI.getOpcode();
140 switch (Opc) {
141 case AMDGPU::V_MAC_F32_e64:
Matt Arsenault0084adc2018-04-30 19:08:16 +0000142 case AMDGPU::V_MAC_F16_e64:
143 case AMDGPU::V_FMAC_F32_e64: {
Matt Arsenault69e30012017-01-11 22:00:02 +0000144 // Special case for mac. Since this is replaced with mad when folded into
145 // src2, we need to check the legality for the final instruction.
146 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
147 if (static_cast<int>(OpNo) == Src2Idx) {
Matt Arsenault0084adc2018-04-30 19:08:16 +0000148 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
Matt Arsenault69e30012017-01-11 22:00:02 +0000149 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Matt Arsenault0084adc2018-04-30 19:08:16 +0000150
151 unsigned Opc = IsFMA ?
152 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
153 const MCInstrDesc &MadDesc = TII->get(Opc);
Matt Arsenault69e30012017-01-11 22:00:02 +0000154 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
155 }
Simon Pilgrim0f5b3502017-07-07 10:18:57 +0000156 return false;
Matt Arsenault69e30012017-01-11 22:00:02 +0000157 }
158 default:
159 return false;
160 }
161}
162
Tom Stellard6596ba72014-11-21 22:06:37 +0000163FunctionPass *llvm::createSIFoldOperandsPass() {
164 return new SIFoldOperands();
165}
166
Tom Stellardbb763e62015-01-07 17:42:16 +0000167static bool updateOperand(FoldCandidate &Fold,
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000168 const SIInstrInfo &TII,
Tom Stellard6596ba72014-11-21 22:06:37 +0000169 const TargetRegisterInfo &TRI) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000170 MachineInstr *MI = Fold.UseMI;
171 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000172 assert(Old.isReg());
173
Tom Stellardbb763e62015-01-07 17:42:16 +0000174 if (Fold.isImm()) {
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000175 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked) {
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000176 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
177 // already set.
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000178 unsigned Opcode = MI->getOpcode();
179 int OpNo = MI->getOperandNo(&Old);
180 int ModIdx = -1;
181 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
182 ModIdx = AMDGPU::OpName::src0_modifiers;
183 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
184 ModIdx = AMDGPU::OpName::src1_modifiers;
185 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
186 ModIdx = AMDGPU::OpName::src2_modifiers;
187 assert(ModIdx != -1);
188 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
189 MachineOperand &Mod = MI->getOperand(ModIdx);
190 unsigned Val = Mod.getImm();
191 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
192 return false;
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000193 // If upper part is all zero we do not need op_sel_hi.
194 if (!isUInt<16>(Fold.ImmToFold)) {
195 if (!(Fold.ImmToFold & 0xffff)) {
196 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
197 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
Stanislav Mekhanoshina4bfb3c2018-04-24 18:17:55 +0000198 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +0000199 return true;
200 }
201 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
202 }
Stanislav Mekhanoshin8b20b7d2018-04-17 23:09:05 +0000203 }
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000204
205 if (Fold.needsShrink()) {
206 MachineBasicBlock *MBB = MI->getParent();
207 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI);
208 if (Liveness != MachineBasicBlock::LQR_Dead)
209 return false;
210
Matt Arsenault44a8a752018-08-28 18:44:16 +0000211 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000212 int Op32 = Fold.getShrinkOpcode();
213 MachineOperand &Dst0 = MI->getOperand(0);
214 MachineOperand &Dst1 = MI->getOperand(1);
215 assert(Dst0.isDef() && Dst1.isDef());
216
Matt Arsenault44a8a752018-08-28 18:44:16 +0000217 bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
218
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000219 const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
220 unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC);
221 const TargetRegisterClass *Dst1RC = MRI.getRegClass(Dst1.getReg());
222 unsigned NewReg1 = MRI.createVirtualRegister(Dst1RC);
223
224 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
225
Matt Arsenault44a8a752018-08-28 18:44:16 +0000226 if (HaveNonDbgCarryUse) {
227 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
228 .addReg(AMDGPU::VCC, RegState::Kill);
229 }
230
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000231 // Keep the old instruction around to avoid breaking iterators, but
232 // replace the outputs with dummy registers.
233 Dst0.setReg(NewReg0);
234 Dst1.setReg(NewReg1);
235
236 if (Fold.isCommuted())
237 TII.commuteInstruction(*Inst32, false);
238 return true;
239 }
240
Tom Stellardbb763e62015-01-07 17:42:16 +0000241 Old.ChangeToImmediate(Fold.ImmToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000242 return true;
243 }
244
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000245 assert(!Fold.needsShrink() && "not handled");
246
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000247 if (Fold.isFI()) {
248 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
249 return true;
250 }
251
Tom Stellardbb763e62015-01-07 17:42:16 +0000252 MachineOperand *New = Fold.OpToFold;
253 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
254 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
255 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
Matt Arsenault76858f52017-06-20 18:41:31 +0000256
257 Old.setIsUndef(New->isUndef());
Tom Stellard6596ba72014-11-21 22:06:37 +0000258 return true;
259 }
260
Tom Stellard6596ba72014-11-21 22:06:37 +0000261 // FIXME: Handle physical registers.
262
263 return false;
264}
265
Matt Arsenault51818c12017-01-10 23:32:04 +0000266static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000267 const MachineInstr *MI) {
268 for (auto Candidate : FoldList) {
269 if (Candidate.UseMI == MI)
270 return true;
271 }
272 return false;
273}
274
Matt Arsenault51818c12017-01-10 23:32:04 +0000275static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
Tom Stellard05992972015-01-07 22:44:19 +0000276 MachineInstr *MI, unsigned OpNo,
277 MachineOperand *OpToFold,
278 const SIInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000279 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000280
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000281 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000282 unsigned Opc = MI->getOpcode();
Matt Arsenault0084adc2018-04-30 19:08:16 +0000283 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
284 Opc == AMDGPU::V_FMAC_F32_e64) &&
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000285 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
Matt Arsenault0084adc2018-04-30 19:08:16 +0000286 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
Matt Arsenault69e30012017-01-11 22:00:02 +0000287 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Matt Arsenault0084adc2018-04-30 19:08:16 +0000288 unsigned NewOpc = IsFMA ?
289 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000290
291 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
292 // to fold the operand.
Matt Arsenault0084adc2018-04-30 19:08:16 +0000293 MI->setDesc(TII->get(NewOpc));
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000294 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
295 if (FoldAsMAD) {
296 MI->untieRegOperand(OpNo);
297 return true;
298 }
299 MI->setDesc(TII->get(Opc));
300 }
301
Tom Stellard8485fa02016-12-07 02:42:15 +0000302 // Special case for s_setreg_b32
303 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
304 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
305 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
306 return true;
307 }
308
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000309 // If we are already folding into another operand of MI, then
310 // we can't commute the instruction, otherwise we risk making the
311 // other fold illegal.
312 if (isUseMIInFoldList(FoldList, MI))
313 return false;
314
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000315 unsigned CommuteOpNo = OpNo;
316
Tom Stellard05992972015-01-07 22:44:19 +0000317 // Operand is not legal, so try to commute the instruction to
318 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000319 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
320 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000321 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000322
323 if (CanCommute) {
324 if (CommuteIdx0 == OpNo)
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000325 CommuteOpNo = CommuteIdx1;
Tom Stellard05992972015-01-07 22:44:19 +0000326 else if (CommuteIdx1 == OpNo)
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000327 CommuteOpNo = CommuteIdx0;
Tom Stellard05992972015-01-07 22:44:19 +0000328 }
329
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000330
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000331 // One of operands might be an Imm operand, and OpNo may refer to it after
332 // the call of commuteInstruction() below. Such situations are avoided
333 // here explicitly as OpNo must be a register operand to be a candidate
334 // for memory folding.
335 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
336 !MI->getOperand(CommuteIdx1).isReg()))
337 return false;
338
339 if (!CanCommute ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000340 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000341 return false;
342
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000343 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
344 if ((Opc == AMDGPU::V_ADD_I32_e64 ||
345 Opc == AMDGPU::V_SUB_I32_e64 ||
346 Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
347 OpToFold->isImm()) {
348 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
349
350 // Verify the other operand is a VGPR, otherwise we would violate the
351 // constant bus restriction.
352 unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
353 MachineOperand &OtherOp = MI->getOperand(OtherIdx);
354 if (!OtherOp.isReg() ||
355 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
356 return false;
357
Fangrui Song9cca2272018-08-28 19:19:03 +0000358 assert(MI->getOperand(1).isDef());
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000359
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000360 int Op32 = AMDGPU::getVOPe32(Opc);
361 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true,
362 Op32));
363 return true;
364 }
365
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000366 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000367 return false;
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000368 }
369
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000370 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true));
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000371 return true;
Tom Stellard05992972015-01-07 22:44:19 +0000372 }
373
374 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
375 return true;
376}
377
Matt Arsenault5e63a042016-10-06 18:12:13 +0000378// If the use operand doesn't care about the value, this may be an operand only
379// used for register indexing, in which case it is unsafe to fold.
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000380static bool isUseSafeToFold(const SIInstrInfo *TII,
381 const MachineInstr &MI,
Matt Arsenault5e63a042016-10-06 18:12:13 +0000382 const MachineOperand &UseMO) {
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000383 return !UseMO.isUndef() && !TII->isSDWA(MI);
Matt Arsenault5e63a042016-10-06 18:12:13 +0000384 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
385}
386
Matt Arsenault51818c12017-01-10 23:32:04 +0000387void SIFoldOperands::foldOperand(
388 MachineOperand &OpToFold,
389 MachineInstr *UseMI,
390 unsigned UseOpIdx,
391 SmallVectorImpl<FoldCandidate> &FoldList,
392 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000393 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
394
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000395 if (!isUseSafeToFold(TII, *UseMI, UseOp))
Matt Arsenault5e63a042016-10-06 18:12:13 +0000396 return;
397
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000398 // FIXME: Fold operands with subregs.
Matt Arsenault3661e902016-08-15 16:18:36 +0000399 if (UseOp.isReg() && OpToFold.isReg()) {
400 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
401 return;
402
403 // Don't fold subregister extracts into tied operands, only if it is a full
404 // copy since a subregister use tied to a full register def doesn't really
405 // make sense. e.g. don't fold:
406 //
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +0000407 // %1 = COPY %0:sub1
408 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000409 //
410 // into
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +0000411 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000412 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
413 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000414 }
415
Tom Stellard9a197672015-09-09 15:43:26 +0000416 // Special case for REG_SEQUENCE: We can't fold literals into
417 // REG_SEQUENCE instructions, so we have to fold them into the
418 // uses of REG_SEQUENCE.
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000419 if (UseMI->isRegSequence()) {
Tom Stellard9a197672015-09-09 15:43:26 +0000420 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
421 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
422
423 for (MachineRegisterInfo::use_iterator
Matt Arsenault51818c12017-01-10 23:32:04 +0000424 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000425 RSUse != RSE; ++RSUse) {
Tom Stellard9a197672015-09-09 15:43:26 +0000426
427 MachineInstr *RSUseMI = RSUse->getParent();
428 if (RSUse->getSubReg() != RegSeqDstSubReg)
429 continue;
430
431 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenault51818c12017-01-10 23:32:04 +0000432 CopiesToReplace);
Tom Stellard9a197672015-09-09 15:43:26 +0000433 }
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000434
Tom Stellard9a197672015-09-09 15:43:26 +0000435 return;
436 }
437
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000438
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000439 bool FoldingImm = OpToFold.isImm();
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000440
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000441 if (FoldingImm && UseMI->isCopy()) {
442 unsigned DestReg = UseMI->getOperand(0).getReg();
443 const TargetRegisterClass *DestRC
444 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000445 MRI->getRegClass(DestReg) :
446 TRI->getPhysRegClass(DestReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000447
Alexander Timofeev201f8922018-08-30 13:55:04 +0000448 unsigned SrcReg = UseMI->getOperand(1).getReg();
449 if (TargetRegisterInfo::isVirtualRegister(DestReg) &&
450 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
451 const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
452 if (TRI->isSGPRClass(SrcRC) && TRI->hasVGPRs(DestRC)) {
453 MachineRegisterInfo::use_iterator NextUse;
454 SmallVector<FoldCandidate, 4> CopyUses;
455 for (MachineRegisterInfo::use_iterator
456 Use = MRI->use_begin(DestReg), E = MRI->use_end();
457 Use != E; Use = NextUse) {
458 NextUse = std::next(Use);
459 FoldCandidate FC = FoldCandidate(Use->getParent(),
460 Use.getOperandNo(), &UseMI->getOperand(1));
461 CopyUses.push_back(FC);
462 }
463 for (auto & F : CopyUses) {
464 foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
465 FoldList, CopiesToReplace);
466 }
467 }
468 }
469
470 // In order to fold immediates into copies, we need to change the
471 // copy to a MOV.
472
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000473 unsigned MovOp = TII->getMovOpcode(DestRC);
474 if (MovOp == AMDGPU::COPY)
475 return;
476
477 UseMI->setDesc(TII->get(MovOp));
478 CopiesToReplace.push_back(UseMI);
479 } else {
Stanislav Mekhanoshinb080adf2018-09-27 18:55:20 +0000480 if (UseMI->isCopy() && OpToFold.isReg() &&
481 TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
482 TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(1).getReg()) &&
483 TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
484 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()) &&
485 !UseMI->getOperand(1).getSubReg()) {
486 UseMI->getOperand(1).setReg(OpToFold.getReg());
487 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
488 UseMI->getOperand(1).setIsKill(false);
489 CopiesToReplace.push_back(UseMI);
490 OpToFold.setIsKill(false);
491 return;
492 }
493
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000494 const MCInstrDesc &UseDesc = UseMI->getDesc();
495
496 // Don't fold into target independent nodes. Target independent opcodes
497 // don't have defined register classes.
498 if (UseDesc.isVariadic() ||
Matt Arsenaultc908e3f2018-02-08 01:12:46 +0000499 UseOp.isImplicit() ||
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000500 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
501 return;
502 }
503
504 if (!FoldingImm) {
505 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
506
507 // FIXME: We could try to change the instruction from 64-bit to 32-bit
508 // to enable more folding opportunites. The shrink operands pass
509 // already does this.
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000510 return;
511 }
512
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000513
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000514 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
515 const TargetRegisterClass *FoldRC =
Matt Arsenault51818c12017-01-10 23:32:04 +0000516 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000517
Matt Arsenault4bd72362016-12-10 00:39:12 +0000518
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000519 // Split 64-bit constants into 32-bits for folding.
520 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
521 unsigned UseReg = UseOp.getReg();
522 const TargetRegisterClass *UseRC
523 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000524 MRI->getRegClass(UseReg) :
525 TRI->getPhysRegClass(UseReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000526
527 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
528 return;
529
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000530 APInt Imm(64, OpToFold.getImm());
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000531 if (UseOp.getSubReg() == AMDGPU::sub0) {
532 Imm = Imm.getLoBits(32);
533 } else {
534 assert(UseOp.getSubReg() == AMDGPU::sub1);
535 Imm = Imm.getHiBits(32);
536 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000537
538 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
539 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
540 return;
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000541 }
542
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000543
544
545 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000546}
547
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000548static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
Matt Arsenault51818c12017-01-10 23:32:04 +0000549 uint32_t LHS, uint32_t RHS) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000550 switch (Opcode) {
551 case AMDGPU::V_AND_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000552 case AMDGPU::V_AND_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000553 case AMDGPU::S_AND_B32:
554 Result = LHS & RHS;
555 return true;
556 case AMDGPU::V_OR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000557 case AMDGPU::V_OR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000558 case AMDGPU::S_OR_B32:
559 Result = LHS | RHS;
560 return true;
561 case AMDGPU::V_XOR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000562 case AMDGPU::V_XOR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000563 case AMDGPU::S_XOR_B32:
564 Result = LHS ^ RHS;
565 return true;
Matt Arsenault51818c12017-01-10 23:32:04 +0000566 case AMDGPU::V_LSHL_B32_e64:
567 case AMDGPU::V_LSHL_B32_e32:
568 case AMDGPU::S_LSHL_B32:
569 // The instruction ignores the high bits for out of bounds shifts.
570 Result = LHS << (RHS & 31);
571 return true;
572 case AMDGPU::V_LSHLREV_B32_e64:
573 case AMDGPU::V_LSHLREV_B32_e32:
574 Result = RHS << (LHS & 31);
575 return true;
576 case AMDGPU::V_LSHR_B32_e64:
577 case AMDGPU::V_LSHR_B32_e32:
578 case AMDGPU::S_LSHR_B32:
579 Result = LHS >> (RHS & 31);
580 return true;
581 case AMDGPU::V_LSHRREV_B32_e64:
582 case AMDGPU::V_LSHRREV_B32_e32:
583 Result = RHS >> (LHS & 31);
584 return true;
585 case AMDGPU::V_ASHR_I32_e64:
586 case AMDGPU::V_ASHR_I32_e32:
587 case AMDGPU::S_ASHR_I32:
588 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
589 return true;
590 case AMDGPU::V_ASHRREV_I32_e64:
591 case AMDGPU::V_ASHRREV_I32_e32:
592 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
593 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000594 default:
595 return false;
596 }
597}
598
599static unsigned getMovOpc(bool IsScalar) {
600 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
601}
602
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000603/// Remove any leftover implicit operands from mutating the instruction. e.g.
604/// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
605/// anymore.
606static void stripExtraCopyOperands(MachineInstr &MI) {
607 const MCInstrDesc &Desc = MI.getDesc();
608 unsigned NumOps = Desc.getNumOperands() +
609 Desc.getNumImplicitUses() +
610 Desc.getNumImplicitDefs();
611
612 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
613 MI.RemoveOperand(I);
614}
615
616static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
617 MI.setDesc(NewDesc);
618 stripExtraCopyOperands(MI);
619}
620
Matt Arsenault51818c12017-01-10 23:32:04 +0000621static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
622 MachineOperand &Op) {
623 if (Op.isReg()) {
624 // If this has a subregister, it obviously is a register source.
Matt Arsenaultcbda7ff2018-03-10 16:05:35 +0000625 if (Op.getSubReg() != AMDGPU::NoSubRegister ||
626 !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
Matt Arsenault51818c12017-01-10 23:32:04 +0000627 return &Op;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000628
Matt Arsenault51818c12017-01-10 23:32:04 +0000629 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
Matt Arsenault7f67b352017-06-20 18:28:02 +0000630 if (Def && Def->isMoveImmediate()) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000631 MachineOperand &ImmSrc = Def->getOperand(1);
632 if (ImmSrc.isImm())
633 return &ImmSrc;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000634 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000635 }
636
Matt Arsenault51818c12017-01-10 23:32:04 +0000637 return &Op;
638}
639
640// Try to simplify operations with a constant that may appear after instruction
641// selection.
642// TODO: See if a frame index with a fixed offset can fold.
643static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
644 const SIInstrInfo *TII,
645 MachineInstr *MI,
646 MachineOperand *ImmOp) {
647 unsigned Opc = MI->getOpcode();
648 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
649 Opc == AMDGPU::S_NOT_B32) {
650 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
651 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
652 return true;
653 }
654
655 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
656 if (Src1Idx == -1)
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000657 return false;
658
659 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Matt Arsenault51818c12017-01-10 23:32:04 +0000660 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
661 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000662
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000663 if (!Src0->isImm() && !Src1->isImm())
664 return false;
665
Matt Arsenault0d1b3932018-08-06 15:40:20 +0000666 if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) {
667 if (Src0->isImm() && Src0->getImm() == 0) {
668 // v_lshl_or_b32 0, X, Y -> copy Y
669 // v_lshl_or_b32 0, X, K -> v_mov_b32 K
670 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
671 MI->RemoveOperand(Src1Idx);
672 MI->RemoveOperand(Src0Idx);
673
674 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
675 return true;
676 }
677 }
678
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000679 // and k0, k1 -> v_mov_b32 (k0 & k1)
680 // or k0, k1 -> v_mov_b32 (k0 | k1)
681 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
682 if (Src0->isImm() && Src1->isImm()) {
683 int32_t NewImm;
684 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
685 return false;
686
687 const SIRegisterInfo &TRI = TII->getRegisterInfo();
688 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
689
Matt Arsenault51818c12017-01-10 23:32:04 +0000690 // Be careful to change the right operand, src0 may belong to a different
691 // instruction.
692 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000693 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000694 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000695 return true;
696 }
697
Matt Arsenault51818c12017-01-10 23:32:04 +0000698 if (!MI->isCommutable())
699 return false;
700
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000701 if (Src0->isImm() && !Src1->isImm()) {
702 std::swap(Src0, Src1);
703 std::swap(Src0Idx, Src1Idx);
704 }
705
706 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
Matt Arsenault51818c12017-01-10 23:32:04 +0000707 if (Opc == AMDGPU::V_OR_B32_e64 ||
708 Opc == AMDGPU::V_OR_B32_e32 ||
709 Opc == AMDGPU::S_OR_B32) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000710 if (Src1Val == 0) {
711 // y = or x, 0 => y = copy x
712 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000713 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000714 } else if (Src1Val == -1) {
715 // y = or x, -1 => y = v_mov_b32 -1
716 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000717 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000718 } else
719 return false;
720
721 return true;
722 }
723
724 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000725 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000726 MI->getOpcode() == AMDGPU::S_AND_B32) {
727 if (Src1Val == 0) {
728 // y = and x, 0 => y = v_mov_b32 0
729 MI->RemoveOperand(Src0Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000730 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000731 } else if (Src1Val == -1) {
732 // y = and x, -1 => y = copy x
733 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000734 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
735 stripExtraCopyOperands(*MI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000736 } else
737 return false;
738
739 return true;
740 }
741
742 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000743 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000744 MI->getOpcode() == AMDGPU::S_XOR_B32) {
745 if (Src1Val == 0) {
746 // y = xor x, 0 => y = copy x
747 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000748 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenault51818c12017-01-10 23:32:04 +0000749 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000750 }
751 }
752
753 return false;
754}
755
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000756// Try to fold an instruction into a simpler one
757static bool tryFoldInst(const SIInstrInfo *TII,
758 MachineInstr *MI) {
759 unsigned Opc = MI->getOpcode();
760
761 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
762 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
763 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
764 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
765 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
766 if (Src1->isIdenticalTo(*Src0)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000767 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000768 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
769 if (Src2Idx != -1)
770 MI->RemoveOperand(Src2Idx);
771 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
772 mutateCopyOp(*MI, TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY
773 : getMovOpc(false)));
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000774 LLVM_DEBUG(dbgs() << *MI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000775 return true;
776 }
777 }
778
779 return false;
780}
781
Matt Arsenault51818c12017-01-10 23:32:04 +0000782void SIFoldOperands::foldInstOperand(MachineInstr &MI,
783 MachineOperand &OpToFold) const {
784 // We need mutate the operands of new mov instructions to add implicit
785 // uses of EXEC, but adding them invalidates the use_iterator, so defer
786 // this.
787 SmallVector<MachineInstr *, 4> CopiesToReplace;
788 SmallVector<FoldCandidate, 4> FoldList;
789 MachineOperand &Dst = MI.getOperand(0);
790
791 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
792 if (FoldingImm) {
793 unsigned NumLiteralUses = 0;
794 MachineOperand *NonInlineUse = nullptr;
795 int NonInlineUseOpNo = -1;
796
Vitaly Buka74503982017-10-15 05:35:02 +0000797 MachineRegisterInfo::use_iterator NextUse;
Matt Arsenault51818c12017-01-10 23:32:04 +0000798 for (MachineRegisterInfo::use_iterator
799 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
800 Use != E; Use = NextUse) {
801 NextUse = std::next(Use);
802 MachineInstr *UseMI = Use->getParent();
803 unsigned OpNo = Use.getOperandNo();
804
805 // Folding the immediate may reveal operations that can be constant
806 // folded or replaced with a copy. This can happen for example after
807 // frame indices are lowered to constants or from splitting 64-bit
808 // constants.
809 //
810 // We may also encounter cases where one or both operands are
811 // immediates materialized into a register, which would ordinarily not
812 // be folded due to multiple uses or operand constraints.
813
814 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000815 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
Matt Arsenault51818c12017-01-10 23:32:04 +0000816
817 // Some constant folding cases change the same immediate's use to a new
818 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
819 // again. The same constant folded instruction could also have a second
820 // use operand.
821 NextUse = MRI->use_begin(Dst.getReg());
Nicolai Haehnlea253e4c2017-07-18 14:54:41 +0000822 FoldList.clear();
Matt Arsenault51818c12017-01-10 23:32:04 +0000823 continue;
824 }
825
826 // Try to fold any inline immediate uses, and then only fold other
827 // constants if they have one use.
828 //
829 // The legality of the inline immediate must be checked based on the use
830 // operand, not the defining instruction, because 32-bit instructions
831 // with 32-bit inline immediate sources may be used to materialize
832 // constants used in 16-bit operands.
833 //
834 // e.g. it is unsafe to fold:
835 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
836 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
837
838 // Folding immediates with more than one use will increase program size.
839 // FIXME: This will also reduce register usage, which may be better
840 // in some cases. A better heuristic is needed.
Matt Arsenault69e30012017-01-11 22:00:02 +0000841 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000842 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
843 } else {
844 if (++NumLiteralUses == 1) {
845 NonInlineUse = &*Use;
846 NonInlineUseOpNo = OpNo;
847 }
848 }
849 }
850
851 if (NumLiteralUses == 1) {
852 MachineInstr *UseMI = NonInlineUse->getParent();
853 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
854 }
855 } else {
856 // Folding register.
857 for (MachineRegisterInfo::use_iterator
858 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
859 Use != E; ++Use) {
860 MachineInstr *UseMI = Use->getParent();
861
862 foldOperand(OpToFold, UseMI, Use.getOperandNo(),
863 FoldList, CopiesToReplace);
864 }
865 }
866
867 MachineFunction *MF = MI.getParent()->getParent();
868 // Make sure we add EXEC uses to any new v_mov instructions created.
869 for (MachineInstr *Copy : CopiesToReplace)
870 Copy->addImplicitDefUseOperands(*MF);
871
872 for (FoldCandidate &Fold : FoldList) {
Matt Arsenaultde6c4212018-08-28 18:34:24 +0000873 if (updateOperand(Fold, *TII, *TRI)) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000874 // Clear kill flags.
875 if (Fold.isReg()) {
876 assert(Fold.OpToFold && Fold.OpToFold->isReg());
877 // FIXME: Probably shouldn't bother trying to fold if not an
878 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
879 // copies.
880 MRI->clearKillFlags(Fold.OpToFold->getReg());
881 }
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000882 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
883 << static_cast<int>(Fold.UseOpNo) << " of "
884 << *Fold.UseMI << '\n');
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +0000885 tryFoldInst(TII, Fold.UseMI);
Stanislav Mekhanoshinf154b4f2017-06-03 00:41:52 +0000886 } else if (Fold.isCommuted()) {
887 // Restoring instruction's original operand order if fold has failed.
888 TII->commuteInstruction(*Fold.UseMI, false);
Matt Arsenault51818c12017-01-10 23:32:04 +0000889 }
890 }
891}
892
Matt Arsenaultf48e5c92017-10-05 00:13:20 +0000893// Clamp patterns are canonically selected to v_max_* instructions, so only
894// handle them.
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000895const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
896 unsigned Op = MI.getOpcode();
897 switch (Op) {
898 case AMDGPU::V_MAX_F32_e64:
Matt Arsenault79a45db2017-02-22 23:53:37 +0000899 case AMDGPU::V_MAX_F16_e64:
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000900 case AMDGPU::V_MAX_F64:
901 case AMDGPU::V_PK_MAX_F16: {
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000902 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
903 return nullptr;
904
905 // Make sure sources are identical.
906 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
907 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +0000908 if (!Src0->isReg() || !Src1->isReg() ||
Matt Arsenaultaafff872017-10-05 00:13:17 +0000909 Src0->getReg() != Src1->getReg() ||
Stanislav Mekhanoshin286a4222017-06-05 01:03:04 +0000910 Src0->getSubReg() != Src1->getSubReg() ||
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000911 Src0->getSubReg() != AMDGPU::NoSubRegister)
912 return nullptr;
913
914 // Can't fold up if we have modifiers.
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000915 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
916 return nullptr;
917
918 unsigned Src0Mods
919 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
920 unsigned Src1Mods
921 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
922
923 // Having a 0 op_sel_hi would require swizzling the output in the source
924 // instruction, which we can't do.
925 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 : 0;
926 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000927 return nullptr;
928 return Src0;
929 }
930 default:
931 return nullptr;
932 }
933}
934
935// We obviously have multiple uses in a clamp since the register is used twice
936// in the same instruction.
937static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
938 int Count = 0;
939 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
940 I != E; ++I) {
941 if (++Count > 1)
942 return false;
943 }
944
945 return true;
946}
947
Matt Arsenault8cbb4882017-09-20 21:01:24 +0000948// FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000949bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
950 const MachineOperand *ClampSrc = isClamp(MI);
951 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
952 return false;
953
954 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000955
956 // The type of clamp must be compatible.
957 if (TII->getClampMask(*Def) != TII->getClampMask(MI))
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000958 return false;
Matt Arsenaultab4a5cd2017-08-31 23:53:50 +0000959
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000960 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
961 if (!DefClamp)
962 return false;
963
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000964 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
965 << '\n');
Matt Arsenaultd5c65152017-02-22 23:27:53 +0000966
967 // Clamp is applied after omod, so it is OK if omod is set.
968 DefClamp->setImm(1);
969 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
970 MI.eraseFromParent();
971 return true;
972}
973
Matt Arsenault3cb39042017-02-27 19:35:42 +0000974static int getOModValue(unsigned Opc, int64_t Val) {
975 switch (Opc) {
976 case AMDGPU::V_MUL_F32_e64: {
977 switch (static_cast<uint32_t>(Val)) {
978 case 0x3f000000: // 0.5
979 return SIOutMods::DIV2;
980 case 0x40000000: // 2.0
981 return SIOutMods::MUL2;
982 case 0x40800000: // 4.0
983 return SIOutMods::MUL4;
984 default:
985 return SIOutMods::NONE;
986 }
987 }
988 case AMDGPU::V_MUL_F16_e64: {
989 switch (static_cast<uint16_t>(Val)) {
990 case 0x3800: // 0.5
991 return SIOutMods::DIV2;
992 case 0x4000: // 2.0
993 return SIOutMods::MUL2;
994 case 0x4400: // 4.0
995 return SIOutMods::MUL4;
996 default:
997 return SIOutMods::NONE;
998 }
999 }
1000 default:
1001 llvm_unreachable("invalid mul opcode");
1002 }
1003}
1004
1005// FIXME: Does this really not support denormals with f16?
1006// FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1007// handled, so will anything other than that break?
1008std::pair<const MachineOperand *, int>
1009SIFoldOperands::isOMod(const MachineInstr &MI) const {
1010 unsigned Op = MI.getOpcode();
1011 switch (Op) {
1012 case AMDGPU::V_MUL_F32_e64:
1013 case AMDGPU::V_MUL_F16_e64: {
1014 // If output denormals are enabled, omod is ignored.
1015 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
1016 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
1017 return std::make_pair(nullptr, SIOutMods::NONE);
1018
1019 const MachineOperand *RegOp = nullptr;
1020 const MachineOperand *ImmOp = nullptr;
1021 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1022 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1023 if (Src0->isImm()) {
1024 ImmOp = Src0;
1025 RegOp = Src1;
1026 } else if (Src1->isImm()) {
1027 ImmOp = Src1;
1028 RegOp = Src0;
1029 } else
1030 return std::make_pair(nullptr, SIOutMods::NONE);
1031
1032 int OMod = getOModValue(Op, ImmOp->getImm());
1033 if (OMod == SIOutMods::NONE ||
1034 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1035 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1036 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1037 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1038 return std::make_pair(nullptr, SIOutMods::NONE);
1039
1040 return std::make_pair(RegOp, OMod);
1041 }
1042 case AMDGPU::V_ADD_F32_e64:
1043 case AMDGPU::V_ADD_F16_e64: {
1044 // If output denormals are enabled, omod is ignored.
1045 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
1046 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
1047 return std::make_pair(nullptr, SIOutMods::NONE);
1048
1049 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1050 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1051 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1052
1053 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1054 Src0->getSubReg() == Src1->getSubReg() &&
1055 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1056 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1057 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1058 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1059 return std::make_pair(Src0, SIOutMods::MUL2);
1060
1061 return std::make_pair(nullptr, SIOutMods::NONE);
1062 }
1063 default:
1064 return std::make_pair(nullptr, SIOutMods::NONE);
1065 }
1066}
1067
1068// FIXME: Does this need to check IEEE bit on function?
1069bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1070 const MachineOperand *RegOp;
1071 int OMod;
1072 std::tie(RegOp, OMod) = isOMod(MI);
1073 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1074 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1075 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1076 return false;
1077
1078 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1079 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1080 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1081 return false;
1082
1083 // Clamp is applied after omod. If the source already has clamp set, don't
1084 // fold it.
1085 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1086 return false;
1087
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001088 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
Matt Arsenault3cb39042017-02-27 19:35:42 +00001089
1090 DefOMod->setImm(OMod);
1091 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1092 MI.eraseFromParent();
1093 return true;
1094}
1095
Tom Stellard6596ba72014-11-21 22:06:37 +00001096bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +00001097 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +00001098 return false;
1099
Matt Arsenault51818c12017-01-10 23:32:04 +00001100 MRI = &MF.getRegInfo();
Tom Stellard5bfbae52018-07-11 20:59:01 +00001101 ST = &MF.getSubtarget<GCNSubtarget>();
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001102 TII = ST->getInstrInfo();
Matt Arsenault51818c12017-01-10 23:32:04 +00001103 TRI = &TII->getRegisterInfo();
Tom Stellard6596ba72014-11-21 22:06:37 +00001104
Matt Arsenault3cb39042017-02-27 19:35:42 +00001105 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1106
1107 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1108 // correctly handle signed zeros.
1109 //
Matt Arsenault13b0db92018-08-12 08:44:25 +00001110 bool IsIEEEMode = ST->enableIEEEBit(MF);
1111 bool HasNSZ = MFI->hasNoSignedZerosFPMath();
Matt Arsenault3cb39042017-02-27 19:35:42 +00001112
Matt Arsenaultff3f9122017-06-20 18:56:32 +00001113 for (MachineBasicBlock *MBB : depth_first(&MF)) {
Tom Stellard6596ba72014-11-21 22:06:37 +00001114 MachineBasicBlock::iterator I, Next;
Matt Arsenaultff3f9122017-06-20 18:56:32 +00001115 for (I = MBB->begin(); I != MBB->end(); I = Next) {
Tom Stellard6596ba72014-11-21 22:06:37 +00001116 Next = std::next(I);
1117 MachineInstr &MI = *I;
1118
Stanislav Mekhanoshin70603dc2017-03-24 18:55:20 +00001119 tryFoldInst(TII, &MI);
1120
Sam Kolton27e0f8b2017-03-31 11:42:43 +00001121 if (!TII->isFoldableCopy(MI)) {
Matt Arsenault13b0db92018-08-12 08:44:25 +00001122 // TODO: Omod might be OK if there is NSZ only on the source
1123 // instruction, and not the omod multiply.
1124 if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1125 !tryFoldOMod(MI))
Matt Arsenault3cb39042017-02-27 19:35:42 +00001126 tryFoldClamp(MI);
Tom Stellard6596ba72014-11-21 22:06:37 +00001127 continue;
Matt Arsenaultd5c65152017-02-22 23:27:53 +00001128 }
Tom Stellard6596ba72014-11-21 22:06:37 +00001129
1130 MachineOperand &OpToFold = MI.getOperand(1);
Matt Arsenault2bc198a2016-09-14 15:51:33 +00001131 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
Tom Stellard26cc18d2015-01-07 22:18:27 +00001132
Matt Arsenault51818c12017-01-10 23:32:04 +00001133 // FIXME: We could also be folding things like TargetIndexes.
Tom Stellard05992972015-01-07 22:44:19 +00001134 if (!FoldingImm && !OpToFold.isReg())
1135 continue;
1136
Tom Stellard6596ba72014-11-21 22:06:37 +00001137 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +00001138 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +00001139 continue;
1140
Marek Olsak926c56f2016-01-13 11:44:29 +00001141 // Prevent folding operands backwards in the function. For example,
1142 // the COPY opcode must not be replaced by 1 in this example:
1143 //
Francis Visoiu Mistriha8a83d12017-12-07 10:40:31 +00001144 // %3 = COPY %vgpr0; VGPR_32:%3
Marek Olsak926c56f2016-01-13 11:44:29 +00001145 // ...
Francis Visoiu Mistriha8a83d12017-12-07 10:40:31 +00001146 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec
Marek Olsak926c56f2016-01-13 11:44:29 +00001147 MachineOperand &Dst = MI.getOperand(0);
1148 if (Dst.isReg() &&
1149 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
1150 continue;
1151
Matt Arsenault51818c12017-01-10 23:32:04 +00001152 foldInstOperand(MI, OpToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +00001153 }
1154 }
1155 return false;
1156}