blob: 39eaf75568c5a794c46ff0e7da211d4044422c68 [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
13#include "AMDGPUSubtarget.h"
14#include "SIInstrInfo.h"
15#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000016#include "llvm/CodeGen/MachineFunctionPass.h"
17#include "llvm/CodeGen/MachineInstrBuilder.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000019#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000020#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000021#include "llvm/Target/TargetMachine.h"
22
23#define DEBUG_TYPE "si-fold-operands"
24using namespace llvm;
25
26namespace {
27
28class SIFoldOperands : public MachineFunctionPass {
29public:
30 static char ID;
31
32public:
33 SIFoldOperands() : MachineFunctionPass(ID) {
34 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
35 }
36
37 bool runOnMachineFunction(MachineFunction &MF) override;
38
Mehdi Amini117296c2016-10-01 02:56:57 +000039 StringRef getPassName() const override { return "SI Fold Operands"; }
Tom Stellard6596ba72014-11-21 22:06:37 +000040
41 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard6596ba72014-11-21 22:06:37 +000042 AU.setPreservesCFG();
43 MachineFunctionPass::getAnalysisUsage(AU);
44 }
45};
46
Tom Stellardbb763e62015-01-07 17:42:16 +000047struct FoldCandidate {
48 MachineInstr *UseMI;
Matt Arsenault2bc198a2016-09-14 15:51:33 +000049 union {
50 MachineOperand *OpToFold;
51 uint64_t ImmToFold;
52 int FrameIndexToFold;
53 };
54 unsigned char UseOpNo;
55 MachineOperand::MachineOperandType Kind;
Tom Stellardbb763e62015-01-07 17:42:16 +000056
57 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
Matt Arsenault2bc198a2016-09-14 15:51:33 +000058 UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()) {
Tom Stellard05992972015-01-07 22:44:19 +000059 if (FoldOp->isImm()) {
Tom Stellard05992972015-01-07 22:44:19 +000060 ImmToFold = FoldOp->getImm();
Matt Arsenault2bc198a2016-09-14 15:51:33 +000061 } else if (FoldOp->isFI()) {
62 FrameIndexToFold = FoldOp->getIndex();
Tom Stellard05992972015-01-07 22:44:19 +000063 } else {
64 assert(FoldOp->isReg());
65 OpToFold = FoldOp;
66 }
67 }
Tom Stellardbb763e62015-01-07 17:42:16 +000068
Matt Arsenault2bc198a2016-09-14 15:51:33 +000069 bool isFI() const {
70 return Kind == MachineOperand::MO_FrameIndex;
71 }
72
Tom Stellardbb763e62015-01-07 17:42:16 +000073 bool isImm() const {
Matt Arsenault2bc198a2016-09-14 15:51:33 +000074 return Kind == MachineOperand::MO_Immediate;
75 }
76
77 bool isReg() const {
78 return Kind == MachineOperand::MO_Register;
Tom Stellardbb763e62015-01-07 17:42:16 +000079 }
80};
81
Tom Stellard6596ba72014-11-21 22:06:37 +000082} // End anonymous namespace.
83
Matt Arsenault427c5482016-02-11 06:15:34 +000084INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
85 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +000086
87char SIFoldOperands::ID = 0;
88
89char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
90
91FunctionPass *llvm::createSIFoldOperandsPass() {
92 return new SIFoldOperands();
93}
94
95static bool isSafeToFold(unsigned Opcode) {
96 switch(Opcode) {
97 case AMDGPU::V_MOV_B32_e32:
98 case AMDGPU::V_MOV_B32_e64:
Tom Stellard4842c052015-01-07 20:27:25 +000099 case AMDGPU::V_MOV_B64_PSEUDO:
Tom Stellard6596ba72014-11-21 22:06:37 +0000100 case AMDGPU::S_MOV_B32:
101 case AMDGPU::S_MOV_B64:
102 case AMDGPU::COPY:
103 return true;
104 default:
105 return false;
106 }
107}
108
Tom Stellardbb763e62015-01-07 17:42:16 +0000109static bool updateOperand(FoldCandidate &Fold,
Tom Stellard6596ba72014-11-21 22:06:37 +0000110 const TargetRegisterInfo &TRI) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000111 MachineInstr *MI = Fold.UseMI;
112 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000113 assert(Old.isReg());
114
Tom Stellardbb763e62015-01-07 17:42:16 +0000115 if (Fold.isImm()) {
116 Old.ChangeToImmediate(Fold.ImmToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000117 return true;
118 }
119
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000120 if (Fold.isFI()) {
121 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
122 return true;
123 }
124
Tom Stellardbb763e62015-01-07 17:42:16 +0000125 MachineOperand *New = Fold.OpToFold;
126 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
127 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
128 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000129 return true;
130 }
131
Tom Stellard6596ba72014-11-21 22:06:37 +0000132 // FIXME: Handle physical registers.
133
134 return false;
135}
136
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000137static bool isUseMIInFoldList(const std::vector<FoldCandidate> &FoldList,
138 const MachineInstr *MI) {
139 for (auto Candidate : FoldList) {
140 if (Candidate.UseMI == MI)
141 return true;
142 }
143 return false;
144}
145
Tom Stellard05992972015-01-07 22:44:19 +0000146static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
147 MachineInstr *MI, unsigned OpNo,
148 MachineOperand *OpToFold,
149 const SIInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000150 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000151
152 // Special case for v_mac_f32_e64 if we are trying to fold into src2
153 unsigned Opc = MI->getOpcode();
154 if (Opc == AMDGPU::V_MAC_F32_e64 &&
155 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
156 // Check if changing this to a v_mad_f32 instruction will allow us to
157 // fold the operand.
158 MI->setDesc(TII->get(AMDGPU::V_MAD_F32));
159 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
160 if (FoldAsMAD) {
161 MI->untieRegOperand(OpNo);
162 return true;
163 }
164 MI->setDesc(TII->get(Opc));
165 }
166
167 // If we are already folding into another operand of MI, then
168 // we can't commute the instruction, otherwise we risk making the
169 // other fold illegal.
170 if (isUseMIInFoldList(FoldList, MI))
171 return false;
172
Tom Stellard05992972015-01-07 22:44:19 +0000173 // Operand is not legal, so try to commute the instruction to
174 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000175 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
176 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000177 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
Tom Stellard05992972015-01-07 22:44:19 +0000178
179 if (CanCommute) {
180 if (CommuteIdx0 == OpNo)
181 OpNo = CommuteIdx1;
182 else if (CommuteIdx1 == OpNo)
183 OpNo = CommuteIdx0;
184 }
185
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000186 // One of operands might be an Imm operand, and OpNo may refer to it after
187 // the call of commuteInstruction() below. Such situations are avoided
188 // here explicitly as OpNo must be a register operand to be a candidate
189 // for memory folding.
190 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
191 !MI->getOperand(CommuteIdx1).isReg()))
192 return false;
193
194 if (!CanCommute ||
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000195 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000196 return false;
197
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000198 if (!TII->isOperandLegal(*MI, OpNo, OpToFold))
Tom Stellard05992972015-01-07 22:44:19 +0000199 return false;
200 }
201
202 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
203 return true;
204}
205
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000206static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI,
207 unsigned UseOpIdx,
208 std::vector<FoldCandidate> &FoldList,
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000209 SmallVectorImpl<MachineInstr *> &CopiesToReplace,
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000210 const SIInstrInfo *TII, const SIRegisterInfo &TRI,
211 MachineRegisterInfo &MRI) {
212 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
213
214 // FIXME: Fold operands with subregs.
Matt Arsenault3661e902016-08-15 16:18:36 +0000215 if (UseOp.isReg() && OpToFold.isReg()) {
216 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
217 return;
218
219 // Don't fold subregister extracts into tied operands, only if it is a full
220 // copy since a subregister use tied to a full register def doesn't really
221 // make sense. e.g. don't fold:
222 //
223 // %vreg1 = COPY %vreg0:sub1
224 // %vreg2<tied3> = V_MAC_F32 %vreg3, %vreg4, %vreg1<tied0>
225 //
226 // into
227 // %vreg2<tied3> = V_MAC_F32 %vreg3, %vreg4, %vreg0:sub1<tied0>
228 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
229 return;
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000230 }
231
232 bool FoldingImm = OpToFold.isImm();
233 APInt Imm;
234
235 if (FoldingImm) {
236 unsigned UseReg = UseOp.getReg();
237 const TargetRegisterClass *UseRC
238 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
239 MRI.getRegClass(UseReg) :
240 TRI.getPhysRegClass(UseReg);
241
242 Imm = APInt(64, OpToFold.getImm());
243
Tom Stellardeea72cc2015-08-29 01:58:21 +0000244 const MCInstrDesc &FoldDesc = TII->get(OpToFold.getParent()->getOpcode());
245 const TargetRegisterClass *FoldRC =
246 TRI.getRegClass(FoldDesc.OpInfo[0].RegClass);
247
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000248 // Split 64-bit constants into 32-bits for folding.
Tom Stellardeea72cc2015-08-29 01:58:21 +0000249 if (FoldRC->getSize() == 8 && UseOp.getSubReg()) {
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000250 if (UseRC->getSize() != 8)
251 return;
252
253 if (UseOp.getSubReg() == AMDGPU::sub0) {
254 Imm = Imm.getLoBits(32);
255 } else {
256 assert(UseOp.getSubReg() == AMDGPU::sub1);
257 Imm = Imm.getHiBits(32);
258 }
259 }
260
261 // In order to fold immediates into copies, we need to change the
262 // copy to a MOV.
263 if (UseMI->getOpcode() == AMDGPU::COPY) {
264 unsigned DestReg = UseMI->getOperand(0).getReg();
265 const TargetRegisterClass *DestRC
266 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
267 MRI.getRegClass(DestReg) :
268 TRI.getPhysRegClass(DestReg);
269
270 unsigned MovOp = TII->getMovOpcode(DestRC);
271 if (MovOp == AMDGPU::COPY)
272 return;
273
274 UseMI->setDesc(TII->get(MovOp));
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000275 CopiesToReplace.push_back(UseMI);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000276 }
277 }
278
Tom Stellard9a197672015-09-09 15:43:26 +0000279 // Special case for REG_SEQUENCE: We can't fold literals into
280 // REG_SEQUENCE instructions, so we have to fold them into the
281 // uses of REG_SEQUENCE.
282 if (UseMI->getOpcode() == AMDGPU::REG_SEQUENCE) {
283 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
284 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
285
286 for (MachineRegisterInfo::use_iterator
287 RSUse = MRI.use_begin(RegSeqDstReg),
288 RSE = MRI.use_end(); RSUse != RSE; ++RSUse) {
289
290 MachineInstr *RSUseMI = RSUse->getParent();
291 if (RSUse->getSubReg() != RegSeqDstSubReg)
292 continue;
293
294 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000295 CopiesToReplace, TII, TRI, MRI);
Tom Stellard9a197672015-09-09 15:43:26 +0000296 }
297 return;
298 }
299
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000300 const MCInstrDesc &UseDesc = UseMI->getDesc();
301
302 // Don't fold into target independent nodes. Target independent opcodes
303 // don't have defined register classes.
304 if (UseDesc.isVariadic() ||
305 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
306 return;
307
308 if (FoldingImm) {
309 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
310 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
311 return;
312 }
313
314 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
315
316 // FIXME: We could try to change the instruction from 64-bit to 32-bit
317 // to enable more folding opportunites. The shrink operands pass
318 // already does this.
319 return;
320}
321
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000322static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
323 int32_t LHS, int32_t RHS) {
324 switch (Opcode) {
325 case AMDGPU::V_AND_B32_e64:
326 case AMDGPU::S_AND_B32:
327 Result = LHS & RHS;
328 return true;
329 case AMDGPU::V_OR_B32_e64:
330 case AMDGPU::S_OR_B32:
331 Result = LHS | RHS;
332 return true;
333 case AMDGPU::V_XOR_B32_e64:
334 case AMDGPU::S_XOR_B32:
335 Result = LHS ^ RHS;
336 return true;
337 default:
338 return false;
339 }
340}
341
342static unsigned getMovOpc(bool IsScalar) {
343 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
344}
345
346// Try to simplify operations with a constant that may appear after instruction
347// selection.
348static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
349 const SIInstrInfo *TII,
350 MachineInstr *MI) {
351 unsigned Opc = MI->getOpcode();
352
353 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
354 Opc == AMDGPU::S_NOT_B32) {
355 MachineOperand &Src0 = MI->getOperand(1);
356 if (Src0.isImm()) {
357 Src0.setImm(~Src0.getImm());
358 MI->setDesc(TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
359 return true;
360 }
361
362 return false;
363 }
364
365 if (!MI->isCommutable())
366 return false;
367
368 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
369 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
370
371 MachineOperand *Src0 = &MI->getOperand(Src0Idx);
372 MachineOperand *Src1 = &MI->getOperand(Src1Idx);
373 if (!Src0->isImm() && !Src1->isImm())
374 return false;
375
376 // and k0, k1 -> v_mov_b32 (k0 & k1)
377 // or k0, k1 -> v_mov_b32 (k0 | k1)
378 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
379 if (Src0->isImm() && Src1->isImm()) {
380 int32_t NewImm;
381 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
382 return false;
383
384 const SIRegisterInfo &TRI = TII->getRegisterInfo();
385 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
386
387 Src0->setImm(NewImm);
388 MI->RemoveOperand(Src1Idx);
389 MI->setDesc(TII->get(getMovOpc(IsSGPR)));
390 return true;
391 }
392
393 if (Src0->isImm() && !Src1->isImm()) {
394 std::swap(Src0, Src1);
395 std::swap(Src0Idx, Src1Idx);
396 }
397
398 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
399 if (Opc == AMDGPU::V_OR_B32_e64 || Opc == AMDGPU::S_OR_B32) {
400 if (Src1Val == 0) {
401 // y = or x, 0 => y = copy x
402 MI->RemoveOperand(Src1Idx);
403 MI->setDesc(TII->get(AMDGPU::COPY));
404 } else if (Src1Val == -1) {
405 // y = or x, -1 => y = v_mov_b32 -1
406 MI->RemoveOperand(Src1Idx);
407 MI->setDesc(TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
408 } else
409 return false;
410
411 return true;
412 }
413
414 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
415 MI->getOpcode() == AMDGPU::S_AND_B32) {
416 if (Src1Val == 0) {
417 // y = and x, 0 => y = v_mov_b32 0
418 MI->RemoveOperand(Src0Idx);
419 MI->setDesc(TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
420 } else if (Src1Val == -1) {
421 // y = and x, -1 => y = copy x
422 MI->RemoveOperand(Src1Idx);
423 MI->setDesc(TII->get(AMDGPU::COPY));
424 } else
425 return false;
426
427 return true;
428 }
429
430 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
431 MI->getOpcode() == AMDGPU::S_XOR_B32) {
432 if (Src1Val == 0) {
433 // y = xor x, 0 => y = copy x
434 MI->RemoveOperand(Src1Idx);
435 MI->setDesc(TII->get(AMDGPU::COPY));
436 }
437 }
438
439 return false;
440}
441
Tom Stellard6596ba72014-11-21 22:06:37 +0000442bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000443 if (skipFunction(*MF.getFunction()))
444 return false;
445
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000446 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
447
Tom Stellard6596ba72014-11-21 22:06:37 +0000448 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000449 const SIInstrInfo *TII = ST.getInstrInfo();
Tom Stellard6596ba72014-11-21 22:06:37 +0000450 const SIRegisterInfo &TRI = TII->getRegisterInfo();
451
452 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
453 BI != BE; ++BI) {
454
455 MachineBasicBlock &MBB = *BI;
456 MachineBasicBlock::iterator I, Next;
457 for (I = MBB.begin(); I != MBB.end(); I = Next) {
458 Next = std::next(I);
459 MachineInstr &MI = *I;
460
461 if (!isSafeToFold(MI.getOpcode()))
462 continue;
463
Matt Arsenault11a4d672015-02-13 19:05:03 +0000464 unsigned OpSize = TII->getOpSize(MI, 1);
Tom Stellard6596ba72014-11-21 22:06:37 +0000465 MachineOperand &OpToFold = MI.getOperand(1);
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000466 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
Tom Stellard26cc18d2015-01-07 22:18:27 +0000467
Tom Stellard05992972015-01-07 22:44:19 +0000468 // FIXME: We could also be folding things like FrameIndexes and
469 // TargetIndexes.
470 if (!FoldingImm && !OpToFold.isReg())
471 continue;
472
Matt Arsenault25f61a62015-01-31 23:37:27 +0000473 // Folding immediates with more than one use will increase program size.
Tom Stellard26cc18d2015-01-07 22:18:27 +0000474 // FIXME: This will also reduce register usage, which may be better
475 // in some cases. A better heuristic is needed.
Matt Arsenault11a4d672015-02-13 19:05:03 +0000476 if (FoldingImm && !TII->isInlineConstant(OpToFold, OpSize) &&
Tom Stellard26cc18d2015-01-07 22:18:27 +0000477 !MRI.hasOneUse(MI.getOperand(0).getReg()))
478 continue;
Tom Stellard6596ba72014-11-21 22:06:37 +0000479
Tom Stellard6596ba72014-11-21 22:06:37 +0000480 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +0000481 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +0000482 continue;
483
Marek Olsak926c56f2016-01-13 11:44:29 +0000484 // Prevent folding operands backwards in the function. For example,
485 // the COPY opcode must not be replaced by 1 in this example:
486 //
487 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
488 // ...
489 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
490 MachineOperand &Dst = MI.getOperand(0);
491 if (Dst.isReg() &&
492 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
493 continue;
494
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000495 // We need mutate the operands of new mov instructions to add implicit
496 // uses of EXEC, but adding them invalidates the use_iterator, so defer
497 // this.
498 SmallVector<MachineInstr *, 4> CopiesToReplace;
499
Tom Stellardbb763e62015-01-07 17:42:16 +0000500 std::vector<FoldCandidate> FoldList;
Tom Stellard6596ba72014-11-21 22:06:37 +0000501 for (MachineRegisterInfo::use_iterator
502 Use = MRI.use_begin(MI.getOperand(0).getReg()), E = MRI.use_end();
503 Use != E; ++Use) {
504
505 MachineInstr *UseMI = Use->getParent();
Tom Stellard6596ba72014-11-21 22:06:37 +0000506
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000507 foldOperand(OpToFold, UseMI, Use.getOperandNo(), FoldList,
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000508 CopiesToReplace, TII, TRI, MRI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000509 }
510
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000511 // Make sure we add EXEC uses to any new v_mov instructions created.
512 for (MachineInstr *Copy : CopiesToReplace)
513 Copy->addImplicitDefUseOperands(MF);
514
Tom Stellardbb763e62015-01-07 17:42:16 +0000515 for (FoldCandidate &Fold : FoldList) {
516 if (updateOperand(Fold, TRI)) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000517 // Clear kill flags.
Matt Arsenault2bc198a2016-09-14 15:51:33 +0000518 if (Fold.isReg()) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000519 assert(Fold.OpToFold && Fold.OpToFold->isReg());
Matt Arsenaulte8c08912015-10-21 22:37:50 +0000520 // FIXME: Probably shouldn't bother trying to fold if not an
521 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
522 // copies.
523 MRI.clearKillFlags(Fold.OpToFold->getReg());
Tom Stellardbb763e62015-01-07 17:42:16 +0000524 }
Tom Stellard6596ba72014-11-21 22:06:37 +0000525 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
Tom Stellardbb763e62015-01-07 17:42:16 +0000526 Fold.UseOpNo << " of " << *Fold.UseMI << '\n');
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000527
528 // Folding the immediate may reveal operations that can be constant
529 // folded or replaced with a copy. This can happen for example after
530 // frame indices are lowered to constants or from splitting 64-bit
531 // constants.
532 tryConstantFoldOp(MRI, TII, Fold.UseMI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000533 }
534 }
535 }
536 }
537 return false;
538}