blob: 3d59f8d82ae7c91aeb3b3e779ca3c25b35df1eca [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
13#include "AMDGPUSubtarget.h"
14#include "SIInstrInfo.h"
15#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000016#include "llvm/CodeGen/MachineFunctionPass.h"
17#include "llvm/CodeGen/MachineInstrBuilder.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000019#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000020#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000021#include "llvm/Target/TargetMachine.h"
22
23#define DEBUG_TYPE "si-fold-operands"
24using namespace llvm;
25
26namespace {
27
28class SIFoldOperands : public MachineFunctionPass {
29public:
30 static char ID;
31
32public:
33 SIFoldOperands() : MachineFunctionPass(ID) {
34 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
35 }
36
37 bool runOnMachineFunction(MachineFunction &MF) override;
38
Mehdi Amini117296c2016-10-01 02:56:57 +000039 StringRef getPassName() const override { return "SI Fold Operands"; }
Tom Stellard6596ba72014-11-21 22:06:37 +000040
41 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard6596ba72014-11-21 22:06:37 +000042 AU.setPreservesCFG();
43 MachineFunctionPass::getAnalysisUsage(AU);
44 }
45};
46
Tom Stellardbb763e62015-01-07 17:42:16 +000047struct FoldCandidate {
48 MachineInstr *UseMI;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000049 union {
50 MachineOperand *OpToFold;
51 uint64_t ImmToFold;
52 int FrameIndexToFold;
53 };
54 unsigned char UseOpNo;
55 MachineOperand::MachineOperandType Kind;
Tom Stellardbb763e62015-01-07 17:42:16 +000056
57 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
Matt Arsenault2bc198a2016-09-14 15:51:33 +000058 UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()) {
Tom Stellard05992972015-01-07 22:44:19 +000059 if (FoldOp->isImm()) {
Tom Stellard05992972015-01-07 22:44:19 +000060 ImmToFold = FoldOp->getImm();
Matt Arsenault2bc198a2016-09-14 15:51:33 +000061 } else if (FoldOp->isFI()) {
62 FrameIndexToFold = FoldOp->getIndex();
Tom Stellard05992972015-01-07 22:44:19 +000063 } else {
64 assert(FoldOp->isReg());
65 OpToFold = FoldOp;
66 }
67 }
Tom Stellardbb763e62015-01-07 17:42:16 +000068
Matt Arsenault2bc198a2016-09-14 15:51:33 +000069 bool isFI() const {
70 return Kind == MachineOperand::MO_FrameIndex;
71 }
72
Tom Stellardbb763e62015-01-07 17:42:16 +000073 bool isImm() const {
Matt Arsenault2bc198a2016-09-14 15:51:33 +000074 return Kind == MachineOperand::MO_Immediate;
75 }
76
77 bool isReg() const {
78 return Kind == MachineOperand::MO_Register;
Tom Stellardbb763e62015-01-07 17:42:16 +000079 }
80};
81
Tom Stellard6596ba72014-11-21 22:06:37 +000082} // End anonymous namespace.
83
Matt Arsenault427c5482016-02-11 06:15:34 +000084INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
85 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +000086
87char SIFoldOperands::ID = 0;
88
89char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
90
91FunctionPass *llvm::createSIFoldOperandsPass() {
92 return new SIFoldOperands();
93}
94
Matt Arsenault5e63a042016-10-06 18:12:13 +000095static bool isSafeToFold(const MachineInstr &MI) {
96 switch (MI.getOpcode()) {
Tom Stellard6596ba72014-11-21 22:06:37 +000097 case AMDGPU::V_MOV_B32_e32:
98 case AMDGPU::V_MOV_B32_e64:
Matt Arsenault5e63a042016-10-06 18:12:13 +000099 case AMDGPU::V_MOV_B64_PSEUDO: {
100 // If there are additional implicit register operands, this may be used for
101 // register indexing so the source register operand isn't simply copied.
102 unsigned NumOps = MI.getDesc().getNumOperands() +
103 MI.getDesc().getNumImplicitUses();
104
105 return MI.getNumOperands() == NumOps;
106 }
Tom Stellard6596ba72014-11-21 22:06:37 +0000107 case AMDGPU::S_MOV_B32:
108 case AMDGPU::S_MOV_B64:
109 case AMDGPU::COPY:
110 return true;
111 default:
112 return false;
113 }
114}
115
Tom Stellardbb763e62015-01-07 17:42:16 +0000116static bool updateOperand(FoldCandidate &Fold,
Tom Stellard6596ba72014-11-21 22:06:37 +0000117 const TargetRegisterInfo &TRI) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000118 MachineInstr *MI = Fold.UseMI;
119 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000120 assert(Old.isReg());
121
Tom Stellardbb763e62015-01-07 17:42:16 +0000122 if (Fold.isImm()) {
123 Old.ChangeToImmediate(Fold.ImmToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000124 return true;
125 }
126
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000127 if (Fold.isFI()) {
128 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
129 return true;
130 }
131
Tom Stellardbb763e62015-01-07 17:42:16 +0000132 MachineOperand *New = Fold.OpToFold;
133 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
134 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
135 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000136 return true;
137 }
138
Tom Stellard6596ba72014-11-21 22:06:37 +0000139 // FIXME: Handle physical registers.
140
141 return false;
142}
143
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000144static bool isUseMIInFoldList(const std::vector<FoldCandidate> &FoldList,
145 const MachineInstr *MI) {
146 for (auto Candidate : FoldList) {
147 if (Candidate.UseMI == MI)
148 return true;
149 }
150 return false;
151}
152
Tom Stellard05992972015-01-07 22:44:19 +0000153static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
154 MachineInstr *MI, unsigned OpNo,
155 MachineOperand *OpToFold,
156 const SIInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000157 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000158
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000159 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000160 unsigned Opc = MI->getOpcode();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000161 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) &&
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000162 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000163 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
164
165 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
166 // to fold the operand.
167 MI->setDesc(TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16));
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000168 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
169 if (FoldAsMAD) {
170 MI->untieRegOperand(OpNo);
171 return true;
172 }
173 MI->setDesc(TII->get(Opc));
174 }
175
Tom Stellard8485fa02016-12-07 02:42:15 +0000176 // Special case for s_setreg_b32
177 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
178 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
179 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
180 return true;
181 }
182
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000183 // If we are already folding into another operand of MI, then
184 // we can't commute the instruction, otherwise we risk making the
185 // other fold illegal.
186 if (isUseMIInFoldList(FoldList, MI))
187 return false;
188
Tom Stellard05992972015-01-07 22:44:19 +0000189 // Operand is not legal, so try to commute the instruction to
190 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000191 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
192 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000193 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000194
195 if (CanCommute) {
196 if (CommuteIdx0 == OpNo)
197 OpNo = CommuteIdx1;
198 else if (CommuteIdx1 == OpNo)
199 OpNo = CommuteIdx0;
200 }
201
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000202 // One of operands might be an Imm operand, and OpNo may refer to it after
203 // the call of commuteInstruction() below. Such situations are avoided
204 // here explicitly as OpNo must be a register operand to be a candidate
205 // for memory folding.
206 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
207 !MI->getOperand(CommuteIdx1).isReg()))
208 return false;
209
210 if (!CanCommute ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000211 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000212 return false;
213
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000214 if (!TII->isOperandLegal(*MI, OpNo, OpToFold))
Tom Stellard05992972015-01-07 22:44:19 +0000215 return false;
216 }
217
218 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
219 return true;
220}
221
Matt Arsenault5e63a042016-10-06 18:12:13 +0000222// If the use operand doesn't care about the value, this may be an operand only
223// used for register indexing, in which case it is unsafe to fold.
224static bool isUseSafeToFold(const MachineInstr &MI,
225 const MachineOperand &UseMO) {
226 return !UseMO.isUndef();
227 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
228}
229
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000230static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI,
231 unsigned UseOpIdx,
232 std::vector<FoldCandidate> &FoldList,
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000233 SmallVectorImpl<MachineInstr *> &CopiesToReplace,
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000234 const SIInstrInfo *TII, const SIRegisterInfo &TRI,
235 MachineRegisterInfo &MRI) {
236 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
237
Matt Arsenault5e63a042016-10-06 18:12:13 +0000238 if (!isUseSafeToFold(*UseMI, UseOp))
239 return;
240
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000241 // FIXME: Fold operands with subregs.
Matt Arsenault3661e902016-08-15 16:18:36 +0000242 if (UseOp.isReg() && OpToFold.isReg()) {
243 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
244 return;
245
246 // Don't fold subregister extracts into tied operands, only if it is a full
247 // copy since a subregister use tied to a full register def doesn't really
248 // make sense. e.g. don't fold:
249 //
250 // %vreg1 = COPY %vreg0:sub1
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000251 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000252 //
253 // into
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000254 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
Matt Arsenault3661e902016-08-15 16:18:36 +0000255 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
256 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000257 }
258
Tom Stellard9a197672015-09-09 15:43:26 +0000259 // Special case for REG_SEQUENCE: We can't fold literals into
260 // REG_SEQUENCE instructions, so we have to fold them into the
261 // uses of REG_SEQUENCE.
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000262 if (UseMI->isRegSequence()) {
Tom Stellard9a197672015-09-09 15:43:26 +0000263 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
264 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
265
266 for (MachineRegisterInfo::use_iterator
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000267 RSUse = MRI.use_begin(RegSeqDstReg), RSE = MRI.use_end();
268 RSUse != RSE; ++RSUse) {
Tom Stellard9a197672015-09-09 15:43:26 +0000269
270 MachineInstr *RSUseMI = RSUse->getParent();
271 if (RSUse->getSubReg() != RegSeqDstSubReg)
272 continue;
273
274 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000275 CopiesToReplace, TII, TRI, MRI);
Tom Stellard9a197672015-09-09 15:43:26 +0000276 }
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000277
Tom Stellard9a197672015-09-09 15:43:26 +0000278 return;
279 }
280
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000281
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000282 bool FoldingImm = OpToFold.isImm();
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000283
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000284 // In order to fold immediates into copies, we need to change the
285 // copy to a MOV.
286 if (FoldingImm && UseMI->isCopy()) {
287 unsigned DestReg = UseMI->getOperand(0).getReg();
288 const TargetRegisterClass *DestRC
289 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
290 MRI.getRegClass(DestReg) :
291 TRI.getPhysRegClass(DestReg);
292
293 unsigned MovOp = TII->getMovOpcode(DestRC);
294 if (MovOp == AMDGPU::COPY)
295 return;
296
297 UseMI->setDesc(TII->get(MovOp));
298 CopiesToReplace.push_back(UseMI);
299 } else {
300 const MCInstrDesc &UseDesc = UseMI->getDesc();
301
302 // Don't fold into target independent nodes. Target independent opcodes
303 // don't have defined register classes.
304 if (UseDesc.isVariadic() ||
305 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
306 return;
307 }
308
309 if (!FoldingImm) {
310 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
311
312 // FIXME: We could try to change the instruction from 64-bit to 32-bit
313 // to enable more folding opportunites. The shrink operands pass
314 // already does this.
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000315 return;
316 }
317
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000318 APInt Imm(64, OpToFold.getImm());
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000319
Matt Arsenaulta24d84b2016-11-23 21:51:07 +0000320 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
321 const TargetRegisterClass *FoldRC =
322 TRI.getRegClass(FoldDesc.OpInfo[0].RegClass);
323
324 // Split 64-bit constants into 32-bits for folding.
325 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
326 unsigned UseReg = UseOp.getReg();
327 const TargetRegisterClass *UseRC
328 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
329 MRI.getRegClass(UseReg) :
330 TRI.getPhysRegClass(UseReg);
331
332 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
333 return;
334
335 if (UseOp.getSubReg() == AMDGPU::sub0) {
336 Imm = Imm.getLoBits(32);
337 } else {
338 assert(UseOp.getSubReg() == AMDGPU::sub1);
339 Imm = Imm.getHiBits(32);
340 }
341 }
342
343 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
344 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000345}
346
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000347static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
348 int32_t LHS, int32_t RHS) {
349 switch (Opcode) {
350 case AMDGPU::V_AND_B32_e64:
351 case AMDGPU::S_AND_B32:
352 Result = LHS & RHS;
353 return true;
354 case AMDGPU::V_OR_B32_e64:
355 case AMDGPU::S_OR_B32:
356 Result = LHS | RHS;
357 return true;
358 case AMDGPU::V_XOR_B32_e64:
359 case AMDGPU::S_XOR_B32:
360 Result = LHS ^ RHS;
361 return true;
362 default:
363 return false;
364 }
365}
366
367static unsigned getMovOpc(bool IsScalar) {
368 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
369}
370
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000371/// Remove any leftover implicit operands from mutating the instruction. e.g.
372/// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
373/// anymore.
374static void stripExtraCopyOperands(MachineInstr &MI) {
375 const MCInstrDesc &Desc = MI.getDesc();
376 unsigned NumOps = Desc.getNumOperands() +
377 Desc.getNumImplicitUses() +
378 Desc.getNumImplicitDefs();
379
380 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
381 MI.RemoveOperand(I);
382}
383
384static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
385 MI.setDesc(NewDesc);
386 stripExtraCopyOperands(MI);
387}
388
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000389// Try to simplify operations with a constant that may appear after instruction
390// selection.
391static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
392 const SIInstrInfo *TII,
393 MachineInstr *MI) {
394 unsigned Opc = MI->getOpcode();
395
396 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
397 Opc == AMDGPU::S_NOT_B32) {
398 MachineOperand &Src0 = MI->getOperand(1);
399 if (Src0.isImm()) {
400 Src0.setImm(~Src0.getImm());
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000401 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000402 return true;
403 }
404
405 return false;
406 }
407
408 if (!MI->isCommutable())
409 return false;
410
411 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
412 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
413
414 MachineOperand *Src0 = &MI->getOperand(Src0Idx);
415 MachineOperand *Src1 = &MI->getOperand(Src1Idx);
416 if (!Src0->isImm() && !Src1->isImm())
417 return false;
418
419 // and k0, k1 -> v_mov_b32 (k0 & k1)
420 // or k0, k1 -> v_mov_b32 (k0 | k1)
421 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
422 if (Src0->isImm() && Src1->isImm()) {
423 int32_t NewImm;
424 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
425 return false;
426
427 const SIRegisterInfo &TRI = TII->getRegisterInfo();
428 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
429
430 Src0->setImm(NewImm);
431 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000432 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000433 return true;
434 }
435
436 if (Src0->isImm() && !Src1->isImm()) {
437 std::swap(Src0, Src1);
438 std::swap(Src0Idx, Src1Idx);
439 }
440
441 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
442 if (Opc == AMDGPU::V_OR_B32_e64 || Opc == AMDGPU::S_OR_B32) {
443 if (Src1Val == 0) {
444 // y = or x, 0 => y = copy x
445 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000446 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000447 } else if (Src1Val == -1) {
448 // y = or x, -1 => y = v_mov_b32 -1
449 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000450 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000451 } else
452 return false;
453
454 return true;
455 }
456
457 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
458 MI->getOpcode() == AMDGPU::S_AND_B32) {
459 if (Src1Val == 0) {
460 // y = and x, 0 => y = v_mov_b32 0
461 MI->RemoveOperand(Src0Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000462 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000463 } else if (Src1Val == -1) {
464 // y = and x, -1 => y = copy x
465 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000466 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
467 stripExtraCopyOperands(*MI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000468 } else
469 return false;
470
471 return true;
472 }
473
474 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
475 MI->getOpcode() == AMDGPU::S_XOR_B32) {
476 if (Src1Val == 0) {
477 // y = xor x, 0 => y = copy x
478 MI->RemoveOperand(Src1Idx);
Matt Arsenaultc2ee42c2016-10-06 17:54:30 +0000479 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000480 }
481 }
482
483 return false;
484}
485
Tom Stellard6596ba72014-11-21 22:06:37 +0000486bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000487 if (skipFunction(*MF.getFunction()))
488 return false;
489
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000490 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
491
Tom Stellard6596ba72014-11-21 22:06:37 +0000492 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000493 const SIInstrInfo *TII = ST.getInstrInfo();
Tom Stellard6596ba72014-11-21 22:06:37 +0000494 const SIRegisterInfo &TRI = TII->getRegisterInfo();
495
496 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
497 BI != BE; ++BI) {
498
499 MachineBasicBlock &MBB = *BI;
500 MachineBasicBlock::iterator I, Next;
501 for (I = MBB.begin(); I != MBB.end(); I = Next) {
502 Next = std::next(I);
503 MachineInstr &MI = *I;
504
Matt Arsenault5e63a042016-10-06 18:12:13 +0000505 if (!isSafeToFold(MI))
Tom Stellard6596ba72014-11-21 22:06:37 +0000506 continue;
507
Matt Arsenault11a4d672015-02-13 19:05:03 +0000508 unsigned OpSize = TII->getOpSize(MI, 1);
Tom Stellard6596ba72014-11-21 22:06:37 +0000509 MachineOperand &OpToFold = MI.getOperand(1);
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000510 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
Tom Stellard26cc18d2015-01-07 22:18:27 +0000511
Tom Stellard05992972015-01-07 22:44:19 +0000512 // FIXME: We could also be folding things like FrameIndexes and
513 // TargetIndexes.
514 if (!FoldingImm && !OpToFold.isReg())
515 continue;
516
Tom Stellard6596ba72014-11-21 22:06:37 +0000517 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +0000518 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +0000519 continue;
520
Marek Olsak926c56f2016-01-13 11:44:29 +0000521 // Prevent folding operands backwards in the function. For example,
522 // the COPY opcode must not be replaced by 1 in this example:
523 //
524 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
525 // ...
526 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
527 MachineOperand &Dst = MI.getOperand(0);
528 if (Dst.isReg() &&
529 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
530 continue;
531
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000532 // We need mutate the operands of new mov instructions to add implicit
533 // uses of EXEC, but adding them invalidates the use_iterator, so defer
534 // this.
535 SmallVector<MachineInstr *, 4> CopiesToReplace;
536
Tom Stellardbb763e62015-01-07 17:42:16 +0000537 std::vector<FoldCandidate> FoldList;
Matt Arsenaultff8bb492016-11-29 19:20:42 +0000538 if (FoldingImm) {
539 unsigned NumLiteralUses = 0;
540 MachineOperand *NonInlineUse = nullptr;
541 int NonInlineUseOpNo = -1;
Tom Stellard6596ba72014-11-21 22:06:37 +0000542
Matt Arsenaultff8bb492016-11-29 19:20:42 +0000543 // Try to fold any inline immediate uses, and then only fold other
544 // constants if they have one use.
545 //
546 // The legality of the inline immediate must be checked based on the use
547 // operand, not the defining instruction, because 32-bit instructions
548 // with 32-bit inline immediate sources may be used to materialize
549 // constants used in 16-bit operands.
550 //
551 // e.g. it is unsafe to fold:
552 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
553 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
Tom Stellard6596ba72014-11-21 22:06:37 +0000554
Matt Arsenaultff8bb492016-11-29 19:20:42 +0000555 // Folding immediates with more than one use will increase program size.
556 // FIXME: This will also reduce register usage, which may be better
557 // in some cases. A better heuristic is needed.
558 for (MachineRegisterInfo::use_iterator
559 Use = MRI.use_begin(Dst.getReg()), E = MRI.use_end();
560 Use != E; ++Use) {
561 MachineInstr *UseMI = Use->getParent();
562
563 if (TII->isInlineConstant(OpToFold, OpSize)) {
564 foldOperand(OpToFold, UseMI, Use.getOperandNo(), FoldList,
565 CopiesToReplace, TII, TRI, MRI);
566 } else {
567 if (++NumLiteralUses == 1) {
568 NonInlineUse = &*Use;
569 NonInlineUseOpNo = Use.getOperandNo();
570 }
571 }
572 }
573
574 if (NumLiteralUses == 1) {
575 MachineInstr *UseMI = NonInlineUse->getParent();
576 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList,
577 CopiesToReplace, TII, TRI, MRI);
578 }
579 } else {
580 // Folding register.
581 for (MachineRegisterInfo::use_iterator
582 Use = MRI.use_begin(Dst.getReg()), E = MRI.use_end();
583 Use != E; ++Use) {
584 MachineInstr *UseMI = Use->getParent();
585
586 foldOperand(OpToFold, UseMI, Use.getOperandNo(), FoldList,
587 CopiesToReplace, TII, TRI, MRI);
588 }
Tom Stellard6596ba72014-11-21 22:06:37 +0000589 }
590
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000591 // Make sure we add EXEC uses to any new v_mov instructions created.
592 for (MachineInstr *Copy : CopiesToReplace)
593 Copy->addImplicitDefUseOperands(MF);
594
Tom Stellardbb763e62015-01-07 17:42:16 +0000595 for (FoldCandidate &Fold : FoldList) {
596 if (updateOperand(Fold, TRI)) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000597 // Clear kill flags.
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000598 if (Fold.isReg()) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000599 assert(Fold.OpToFold && Fold.OpToFold->isReg());
Matt Arsenaulte8c08912015-10-21 22:37:50 +0000600 // FIXME: Probably shouldn't bother trying to fold if not an
601 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
602 // copies.
603 MRI.clearKillFlags(Fold.OpToFold->getReg());
Tom Stellardbb763e62015-01-07 17:42:16 +0000604 }
Tom Stellard6596ba72014-11-21 22:06:37 +0000605 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
Matt Arsenault391c3ea2016-11-23 21:51:05 +0000606 static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000607
608 // Folding the immediate may reveal operations that can be constant
609 // folded or replaced with a copy. This can happen for example after
610 // frame indices are lowered to constants or from splitting 64-bit
611 // constants.
612 tryConstantFoldOp(MRI, TII, Fold.UseMI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000613 }
614 }
615 }
616 }
617 return false;
618}