blob: a5c0d4923d6b3b702d8a77fc69aa93a47b2bb882 [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
13#include "AMDGPUSubtarget.h"
14#include "SIInstrInfo.h"
15#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000016#include "llvm/CodeGen/MachineFunctionPass.h"
17#include "llvm/CodeGen/MachineInstrBuilder.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000019#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000020#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000021#include "llvm/Target/TargetMachine.h"
22
23#define DEBUG_TYPE "si-fold-operands"
24using namespace llvm;
25
26namespace {
27
Tom Stellardbb763e62015-01-07 17:42:16 +000028struct FoldCandidate {
29 MachineInstr *UseMI;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000030 union {
31 MachineOperand *OpToFold;
32 uint64_t ImmToFold;
33 int FrameIndexToFold;
34 };
35 unsigned char UseOpNo;
36 MachineOperand::MachineOperandType Kind;
Tom Stellardbb763e62015-01-07 17:42:16 +000037
38 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
Matt Arsenault2bc198a2016-09-14 15:51:33 +000039 UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()) {
Tom Stellard05992972015-01-07 22:44:19 +000040 if (FoldOp->isImm()) {
Tom Stellard05992972015-01-07 22:44:19 +000041 ImmToFold = FoldOp->getImm();
Matt Arsenault2bc198a2016-09-14 15:51:33 +000042 } else if (FoldOp->isFI()) {
43 FrameIndexToFold = FoldOp->getIndex();
Tom Stellard05992972015-01-07 22:44:19 +000044 } else {
45 assert(FoldOp->isReg());
46 OpToFold = FoldOp;
47 }
48 }
Tom Stellardbb763e62015-01-07 17:42:16 +000049
Matt Arsenault2bc198a2016-09-14 15:51:33 +000050 bool isFI() const {
51 return Kind == MachineOperand::MO_FrameIndex;
52 }
53
Tom Stellardbb763e62015-01-07 17:42:16 +000054 bool isImm() const {
Matt Arsenault2bc198a2016-09-14 15:51:33 +000055 return Kind == MachineOperand::MO_Immediate;
56 }
57
58 bool isReg() const {
59 return Kind == MachineOperand::MO_Register;
Tom Stellardbb763e62015-01-07 17:42:16 +000060 }
61};
62
Matt Arsenault51818c12017-01-10 23:32:04 +000063class SIFoldOperands : public MachineFunctionPass {
64public:
65 static char ID;
66 MachineRegisterInfo *MRI;
67 const SIInstrInfo *TII;
68 const SIRegisterInfo *TRI;
69
70 void foldOperand(MachineOperand &OpToFold,
71 MachineInstr *UseMI,
72 unsigned UseOpIdx,
73 SmallVectorImpl<FoldCandidate> &FoldList,
74 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
75
76 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
77
78public:
79 SIFoldOperands() : MachineFunctionPass(ID) {
80 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
81 }
82
83 bool runOnMachineFunction(MachineFunction &MF) override;
84
85 StringRef getPassName() const override { return "SI Fold Operands"; }
86
87 void getAnalysisUsage(AnalysisUsage &AU) const override {
88 AU.setPreservesCFG();
89 MachineFunctionPass::getAnalysisUsage(AU);
90 }
91};
92
Tom Stellard6596ba72014-11-21 22:06:37 +000093} // End anonymous namespace.
94
Matt Arsenault427c5482016-02-11 06:15:34 +000095INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
96 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +000097
98char SIFoldOperands::ID = 0;
99
100char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
101
Matt Arsenault69e30012017-01-11 22:00:02 +0000102// Wrapper around isInlineConstant that understands special cases when
103// instruction types are replaced during operand folding.
104static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
105 const MachineInstr &UseMI,
106 unsigned OpNo,
107 const MachineOperand &OpToFold) {
108 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
109 return true;
110
111 unsigned Opc = UseMI.getOpcode();
112 switch (Opc) {
113 case AMDGPU::V_MAC_F32_e64:
114 case AMDGPU::V_MAC_F16_e64: {
115 // Special case for mac. Since this is replaced with mad when folded into
116 // src2, we need to check the legality for the final instruction.
117 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
118 if (static_cast<int>(OpNo) == Src2Idx) {
119 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
120 const MCInstrDesc &MadDesc
121 = TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
122 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
123 }
124 }
125 default:
126 return false;
127 }
128}
129
Tom Stellard6596ba72014-11-21 22:06:37 +0000130FunctionPass *llvm::createSIFoldOperandsPass() {
131 return new SIFoldOperands();
132}
133
Matt Arsenault5e63a042016-10-06 18:12:13 +0000134static bool isSafeToFold(const MachineInstr &MI) {
135 switch (MI.getOpcode()) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000136 case AMDGPU::V_MOV_B32_e32:
137 case AMDGPU::V_MOV_B32_e64:
Matt Arsenault5e63a042016-10-06 18:12:13 +0000138 case AMDGPU::V_MOV_B64_PSEUDO: {
139 // If there are additional implicit register operands, this may be used for
140 // register indexing so the source register operand isn't simply copied.
141 unsigned NumOps = MI.getDesc().getNumOperands() +
142 MI.getDesc().getNumImplicitUses();
143
144 return MI.getNumOperands() == NumOps;
145 }
Tom Stellard6596ba72014-11-21 22:06:37 +0000146 case AMDGPU::S_MOV_B32:
147 case AMDGPU::S_MOV_B64:
148 case AMDGPU::COPY:
149 return true;
150 default:
151 return false;
152 }
153}
154
Tom Stellardbb763e62015-01-07 17:42:16 +0000155static bool updateOperand(FoldCandidate &Fold,
Tom Stellard6596ba72014-11-21 22:06:37 +0000156 const TargetRegisterInfo &TRI) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000157 MachineInstr *MI = Fold.UseMI;
158 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000159 assert(Old.isReg());
160
Tom Stellardbb763e62015-01-07 17:42:16 +0000161 if (Fold.isImm()) {
162 Old.ChangeToImmediate(Fold.ImmToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000163 return true;
164 }
165
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000166 if (Fold.isFI()) {
167 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
168 return true;
169 }
170
Tom Stellardbb763e62015-01-07 17:42:16 +0000171 MachineOperand *New = Fold.OpToFold;
172 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
173 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
174 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000175 return true;
176 }
177
Tom Stellard6596ba72014-11-21 22:06:37 +0000178 // FIXME: Handle physical registers.
179
180 return false;
181}
182
Matt Arsenault51818c12017-01-10 23:32:04 +0000183static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000184 const MachineInstr *MI) {
185 for (auto Candidate : FoldList) {
186 if (Candidate.UseMI == MI)
187 return true;
188 }
189 return false;
190}
191
Matt Arsenault51818c12017-01-10 23:32:04 +0000192static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
Tom Stellard05992972015-01-07 22:44:19 +0000193 MachineInstr *MI, unsigned OpNo,
194 MachineOperand *OpToFold,
195 const SIInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000196 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000197
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000198 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000199 unsigned Opc = MI->getOpcode();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000200 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) &&
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000201 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
Matt Arsenault69e30012017-01-11 22:00:02 +0000202 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000203
204 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
205 // to fold the operand.
206 MI->setDesc(TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16));
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000207 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
208 if (FoldAsMAD) {
209 MI->untieRegOperand(OpNo);
210 return true;
211 }
212 MI->setDesc(TII->get(Opc));
213 }
214
Tom Stellard8485fa02016-12-07 02:42:15 +0000215 // Special case for s_setreg_b32
216 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
217 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
218 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
219 return true;
220 }
221
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000222 // If we are already folding into another operand of MI, then
223 // we can't commute the instruction, otherwise we risk making the
224 // other fold illegal.
225 if (isUseMIInFoldList(FoldList, MI))
226 return false;
227
Tom Stellard05992972015-01-07 22:44:19 +0000228 // Operand is not legal, so try to commute the instruction to
229 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000230 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
231 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000232 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000233
234 if (CanCommute) {
235 if (CommuteIdx0 == OpNo)
236 OpNo = CommuteIdx1;
237 else if (CommuteIdx1 == OpNo)
238 OpNo = CommuteIdx0;
239 }
240
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000241 // One of operands might be an Imm operand, and OpNo may refer to it after
242 // the call of commuteInstruction() below. Such situations are avoided
243 // here explicitly as OpNo must be a register operand to be a candidate
244 // for memory folding.
245 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
246 !MI->getOperand(CommuteIdx1).isReg()))
247 return false;
248
249 if (!CanCommute ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000250 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000251 return false;
252
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000253 if (!TII->isOperandLegal(*MI, OpNo, OpToFold))
Tom Stellard05992972015-01-07 22:44:19 +0000254 return false;
255 }
256
257 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
258 return true;
259}
260
Matt Arsenault5e63a042016-10-06 18:12:13 +0000261// If the use operand doesn't care about the value, this may be an operand only
262// used for register indexing, in which case it is unsafe to fold.
263static bool isUseSafeToFold(const MachineInstr &MI,
264 const MachineOperand &UseMO) {
265 return !UseMO.isUndef();
266 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
267}
268
Matt Arsenault51818c12017-01-10 23:32:04 +0000269void SIFoldOperands::foldOperand(
270 MachineOperand &OpToFold,
271 MachineInstr *UseMI,
272 unsigned UseOpIdx,
273 SmallVectorImpl<FoldCandidate> &FoldList,
274 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000275 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
276
Matt Arsenault5e63a042016-10-06 18:12:13 +0000277 if (!isUseSafeToFold(*UseMI, UseOp))
278 return;
279
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000280 // FIXME: Fold operands with subregs.
Matt Arsenault3661e902016-08-15 16:18:36 +0000281 if (UseOp.isReg() && OpToFold.isReg()) {
282 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
283 return;
284
285 // Don't fold subregister extracts into tied operands, only if it is a full
286 // copy since a subregister use tied to a full register def doesn't really
287 // make sense. e.g. don't fold:
288 //
289 // %vreg1 = COPY %vreg0:sub1
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000290 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000291 //
292 // into
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000293 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000294 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
295 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000296 }
297
Tom Stellard9a197672015-09-09 15:43:26 +0000298 // Special case for REG_SEQUENCE: We can't fold literals into
299 // REG_SEQUENCE instructions, so we have to fold them into the
300 // uses of REG_SEQUENCE.
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000301 if (UseMI->isRegSequence()) {
Tom Stellard9a197672015-09-09 15:43:26 +0000302 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
303 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
304
305 for (MachineRegisterInfo::use_iterator
Matt Arsenault51818c12017-01-10 23:32:04 +0000306 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000307 RSUse != RSE; ++RSUse) {
Tom Stellard9a197672015-09-09 15:43:26 +0000308
309 MachineInstr *RSUseMI = RSUse->getParent();
310 if (RSUse->getSubReg() != RegSeqDstSubReg)
311 continue;
312
313 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenault51818c12017-01-10 23:32:04 +0000314 CopiesToReplace);
Tom Stellard9a197672015-09-09 15:43:26 +0000315 }
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000316
Tom Stellard9a197672015-09-09 15:43:26 +0000317 return;
318 }
319
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000320
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000321 bool FoldingImm = OpToFold.isImm();
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000322
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000323 // In order to fold immediates into copies, we need to change the
324 // copy to a MOV.
325 if (FoldingImm && UseMI->isCopy()) {
326 unsigned DestReg = UseMI->getOperand(0).getReg();
327 const TargetRegisterClass *DestRC
328 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000329 MRI->getRegClass(DestReg) :
330 TRI->getPhysRegClass(DestReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000331
332 unsigned MovOp = TII->getMovOpcode(DestRC);
333 if (MovOp == AMDGPU::COPY)
334 return;
335
336 UseMI->setDesc(TII->get(MovOp));
337 CopiesToReplace.push_back(UseMI);
338 } else {
339 const MCInstrDesc &UseDesc = UseMI->getDesc();
340
341 // Don't fold into target independent nodes. Target independent opcodes
342 // don't have defined register classes.
343 if (UseDesc.isVariadic() ||
344 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
345 return;
346 }
347
348 if (!FoldingImm) {
349 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
350
351 // FIXME: We could try to change the instruction from 64-bit to 32-bit
352 // to enable more folding opportunites. The shrink operands pass
353 // already does this.
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000354 return;
355 }
356
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000357
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000358 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
359 const TargetRegisterClass *FoldRC =
Matt Arsenault51818c12017-01-10 23:32:04 +0000360 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000361
Matt Arsenault4bd72362016-12-10 00:39:12 +0000362 APInt Imm(TII->operandBitWidth(FoldDesc.OpInfo[1].OperandType),
363 OpToFold.getImm());
364
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000365 // Split 64-bit constants into 32-bits for folding.
366 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
367 unsigned UseReg = UseOp.getReg();
368 const TargetRegisterClass *UseRC
369 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
Matt Arsenault51818c12017-01-10 23:32:04 +0000370 MRI->getRegClass(UseReg) :
371 TRI->getPhysRegClass(UseReg);
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000372
Matt Arsenault4bd72362016-12-10 00:39:12 +0000373 assert(Imm.getBitWidth() == 64);
374
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000375 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
376 return;
377
378 if (UseOp.getSubReg() == AMDGPU::sub0) {
379 Imm = Imm.getLoBits(32);
380 } else {
381 assert(UseOp.getSubReg() == AMDGPU::sub1);
382 Imm = Imm.getHiBits(32);
383 }
384 }
385
386 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
387 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000388}
389
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000390static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
Matt Arsenault51818c12017-01-10 23:32:04 +0000391 uint32_t LHS, uint32_t RHS) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000392 switch (Opcode) {
393 case AMDGPU::V_AND_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000394 case AMDGPU::V_AND_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000395 case AMDGPU::S_AND_B32:
396 Result = LHS & RHS;
397 return true;
398 case AMDGPU::V_OR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000399 case AMDGPU::V_OR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000400 case AMDGPU::S_OR_B32:
401 Result = LHS | RHS;
402 return true;
403 case AMDGPU::V_XOR_B32_e64:
Matt Arsenault51818c12017-01-10 23:32:04 +0000404 case AMDGPU::V_XOR_B32_e32:
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000405 case AMDGPU::S_XOR_B32:
406 Result = LHS ^ RHS;
407 return true;
Matt Arsenault51818c12017-01-10 23:32:04 +0000408 case AMDGPU::V_LSHL_B32_e64:
409 case AMDGPU::V_LSHL_B32_e32:
410 case AMDGPU::S_LSHL_B32:
411 // The instruction ignores the high bits for out of bounds shifts.
412 Result = LHS << (RHS & 31);
413 return true;
414 case AMDGPU::V_LSHLREV_B32_e64:
415 case AMDGPU::V_LSHLREV_B32_e32:
416 Result = RHS << (LHS & 31);
417 return true;
418 case AMDGPU::V_LSHR_B32_e64:
419 case AMDGPU::V_LSHR_B32_e32:
420 case AMDGPU::S_LSHR_B32:
421 Result = LHS >> (RHS & 31);
422 return true;
423 case AMDGPU::V_LSHRREV_B32_e64:
424 case AMDGPU::V_LSHRREV_B32_e32:
425 Result = RHS >> (LHS & 31);
426 return true;
427 case AMDGPU::V_ASHR_I32_e64:
428 case AMDGPU::V_ASHR_I32_e32:
429 case AMDGPU::S_ASHR_I32:
430 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
431 return true;
432 case AMDGPU::V_ASHRREV_I32_e64:
433 case AMDGPU::V_ASHRREV_I32_e32:
434 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
435 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000436 default:
437 return false;
438 }
439}
440
441static unsigned getMovOpc(bool IsScalar) {
442 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
443}
444
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000445/// Remove any leftover implicit operands from mutating the instruction. e.g.
446/// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
447/// anymore.
448static void stripExtraCopyOperands(MachineInstr &MI) {
449 const MCInstrDesc &Desc = MI.getDesc();
450 unsigned NumOps = Desc.getNumOperands() +
451 Desc.getNumImplicitUses() +
452 Desc.getNumImplicitDefs();
453
454 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
455 MI.RemoveOperand(I);
456}
457
458static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
459 MI.setDesc(NewDesc);
460 stripExtraCopyOperands(MI);
461}
462
Matt Arsenault51818c12017-01-10 23:32:04 +0000463static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
464 MachineOperand &Op) {
465 if (Op.isReg()) {
466 // If this has a subregister, it obviously is a register source.
467 if (Op.getSubReg() != AMDGPU::NoSubRegister)
468 return &Op;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000469
Matt Arsenault51818c12017-01-10 23:32:04 +0000470 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
471 if (Def->isMoveImmediate()) {
472 MachineOperand &ImmSrc = Def->getOperand(1);
473 if (ImmSrc.isImm())
474 return &ImmSrc;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000475 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000476 }
477
Matt Arsenault51818c12017-01-10 23:32:04 +0000478 return &Op;
479}
480
481// Try to simplify operations with a constant that may appear after instruction
482// selection.
483// TODO: See if a frame index with a fixed offset can fold.
484static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
485 const SIInstrInfo *TII,
486 MachineInstr *MI,
487 MachineOperand *ImmOp) {
488 unsigned Opc = MI->getOpcode();
489 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
490 Opc == AMDGPU::S_NOT_B32) {
491 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
492 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
493 return true;
494 }
495
496 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
497 if (Src1Idx == -1)
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000498 return false;
499
500 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Matt Arsenault51818c12017-01-10 23:32:04 +0000501 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
502 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000503
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000504 if (!Src0->isImm() && !Src1->isImm())
505 return false;
506
507 // and k0, k1 -> v_mov_b32 (k0 & k1)
508 // or k0, k1 -> v_mov_b32 (k0 | k1)
509 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
510 if (Src0->isImm() && Src1->isImm()) {
511 int32_t NewImm;
512 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
513 return false;
514
515 const SIRegisterInfo &TRI = TII->getRegisterInfo();
516 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
517
Matt Arsenault51818c12017-01-10 23:32:04 +0000518 // Be careful to change the right operand, src0 may belong to a different
519 // instruction.
520 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000521 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000522 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000523 return true;
524 }
525
Matt Arsenault51818c12017-01-10 23:32:04 +0000526 if (!MI->isCommutable())
527 return false;
528
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000529 if (Src0->isImm() && !Src1->isImm()) {
530 std::swap(Src0, Src1);
531 std::swap(Src0Idx, Src1Idx);
532 }
533
534 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
Matt Arsenault51818c12017-01-10 23:32:04 +0000535 if (Opc == AMDGPU::V_OR_B32_e64 ||
536 Opc == AMDGPU::V_OR_B32_e32 ||
537 Opc == AMDGPU::S_OR_B32) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000538 if (Src1Val == 0) {
539 // y = or x, 0 => y = copy x
540 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000541 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000542 } else if (Src1Val == -1) {
543 // y = or x, -1 => y = v_mov_b32 -1
544 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000545 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000546 } else
547 return false;
548
549 return true;
550 }
551
552 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000553 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000554 MI->getOpcode() == AMDGPU::S_AND_B32) {
555 if (Src1Val == 0) {
556 // y = and x, 0 => y = v_mov_b32 0
557 MI->RemoveOperand(Src0Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000558 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000559 } else if (Src1Val == -1) {
560 // y = and x, -1 => y = copy x
561 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000562 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
563 stripExtraCopyOperands(*MI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000564 } else
565 return false;
566
567 return true;
568 }
569
570 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
Matt Arsenault51818c12017-01-10 23:32:04 +0000571 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000572 MI->getOpcode() == AMDGPU::S_XOR_B32) {
573 if (Src1Val == 0) {
574 // y = xor x, 0 => y = copy x
575 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000576 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenault51818c12017-01-10 23:32:04 +0000577 return true;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000578 }
579 }
580
581 return false;
582}
583
Matt Arsenault51818c12017-01-10 23:32:04 +0000584void SIFoldOperands::foldInstOperand(MachineInstr &MI,
585 MachineOperand &OpToFold) const {
586 // We need mutate the operands of new mov instructions to add implicit
587 // uses of EXEC, but adding them invalidates the use_iterator, so defer
588 // this.
589 SmallVector<MachineInstr *, 4> CopiesToReplace;
590 SmallVector<FoldCandidate, 4> FoldList;
591 MachineOperand &Dst = MI.getOperand(0);
592
593 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
594 if (FoldingImm) {
595 unsigned NumLiteralUses = 0;
596 MachineOperand *NonInlineUse = nullptr;
597 int NonInlineUseOpNo = -1;
598
599 MachineRegisterInfo::use_iterator NextUse, NextInstUse;
600 for (MachineRegisterInfo::use_iterator
601 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
602 Use != E; Use = NextUse) {
603 NextUse = std::next(Use);
604 MachineInstr *UseMI = Use->getParent();
605 unsigned OpNo = Use.getOperandNo();
606
607 // Folding the immediate may reveal operations that can be constant
608 // folded or replaced with a copy. This can happen for example after
609 // frame indices are lowered to constants or from splitting 64-bit
610 // constants.
611 //
612 // We may also encounter cases where one or both operands are
613 // immediates materialized into a register, which would ordinarily not
614 // be folded due to multiple uses or operand constraints.
615
616 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
617 DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n');
618
619 // Some constant folding cases change the same immediate's use to a new
620 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
621 // again. The same constant folded instruction could also have a second
622 // use operand.
623 NextUse = MRI->use_begin(Dst.getReg());
624 continue;
625 }
626
627 // Try to fold any inline immediate uses, and then only fold other
628 // constants if they have one use.
629 //
630 // The legality of the inline immediate must be checked based on the use
631 // operand, not the defining instruction, because 32-bit instructions
632 // with 32-bit inline immediate sources may be used to materialize
633 // constants used in 16-bit operands.
634 //
635 // e.g. it is unsafe to fold:
636 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
637 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
638
639 // Folding immediates with more than one use will increase program size.
640 // FIXME: This will also reduce register usage, which may be better
641 // in some cases. A better heuristic is needed.
Matt Arsenault69e30012017-01-11 22:00:02 +0000642 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
Matt Arsenault51818c12017-01-10 23:32:04 +0000643 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
644 } else {
645 if (++NumLiteralUses == 1) {
646 NonInlineUse = &*Use;
647 NonInlineUseOpNo = OpNo;
648 }
649 }
650 }
651
652 if (NumLiteralUses == 1) {
653 MachineInstr *UseMI = NonInlineUse->getParent();
654 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
655 }
656 } else {
657 // Folding register.
658 for (MachineRegisterInfo::use_iterator
659 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
660 Use != E; ++Use) {
661 MachineInstr *UseMI = Use->getParent();
662
663 foldOperand(OpToFold, UseMI, Use.getOperandNo(),
664 FoldList, CopiesToReplace);
665 }
666 }
667
668 MachineFunction *MF = MI.getParent()->getParent();
669 // Make sure we add EXEC uses to any new v_mov instructions created.
670 for (MachineInstr *Copy : CopiesToReplace)
671 Copy->addImplicitDefUseOperands(*MF);
672
673 for (FoldCandidate &Fold : FoldList) {
674 if (updateOperand(Fold, *TRI)) {
675 // Clear kill flags.
676 if (Fold.isReg()) {
677 assert(Fold.OpToFold && Fold.OpToFold->isReg());
678 // FIXME: Probably shouldn't bother trying to fold if not an
679 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
680 // copies.
681 MRI->clearKillFlags(Fold.OpToFold->getReg());
682 }
683 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
684 static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
685 }
686 }
687}
688
Tom Stellard6596ba72014-11-21 22:06:37 +0000689bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000690 if (skipFunction(*MF.getFunction()))
691 return false;
692
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000693 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
694
Matt Arsenault51818c12017-01-10 23:32:04 +0000695 MRI = &MF.getRegInfo();
696 TII = ST.getInstrInfo();
697 TRI = &TII->getRegisterInfo();
Tom Stellard6596ba72014-11-21 22:06:37 +0000698
699 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
Matt Arsenault51818c12017-01-10 23:32:04 +0000700 BI != BE; ++BI) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000701
702 MachineBasicBlock &MBB = *BI;
703 MachineBasicBlock::iterator I, Next;
704 for (I = MBB.begin(); I != MBB.end(); I = Next) {
705 Next = std::next(I);
706 MachineInstr &MI = *I;
707
Matt Arsenault5e63a042016-10-06 18:12:13 +0000708 if (!isSafeToFold(MI))
Tom Stellard6596ba72014-11-21 22:06:37 +0000709 continue;
710
711 MachineOperand &OpToFold = MI.getOperand(1);
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000712 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
Tom Stellard26cc18d2015-01-07 22:18:27 +0000713
Matt Arsenault51818c12017-01-10 23:32:04 +0000714 // FIXME: We could also be folding things like TargetIndexes.
Tom Stellard05992972015-01-07 22:44:19 +0000715 if (!FoldingImm && !OpToFold.isReg())
716 continue;
717
Tom Stellard6596ba72014-11-21 22:06:37 +0000718 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +0000719 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +0000720 continue;
721
Marek Olsak926c56f2016-01-13 11:44:29 +0000722 // Prevent folding operands backwards in the function. For example,
723 // the COPY opcode must not be replaced by 1 in this example:
724 //
725 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
726 // ...
727 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
728 MachineOperand &Dst = MI.getOperand(0);
729 if (Dst.isReg() &&
730 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
731 continue;
732
Matt Arsenault51818c12017-01-10 23:32:04 +0000733 foldInstOperand(MI, OpToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000734 }
735 }
736 return false;
737}