blob: 236a3f16d5ffb5210530cc10ee4c7310e99ffc73 [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
13#include "AMDGPUSubtarget.h"
14#include "SIInstrInfo.h"
15#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000016#include "llvm/CodeGen/MachineFunctionPass.h"
17#include "llvm/CodeGen/MachineInstrBuilder.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000019#include "llvm/IR/Function.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000020#include "llvm/IR/LLVMContext.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000021#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000022#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000023#include "llvm/Target/TargetMachine.h"
24
25#define DEBUG_TYPE "si-fold-operands"
26using namespace llvm;
27
28namespace {
29
30class SIFoldOperands : public MachineFunctionPass {
31public:
32 static char ID;
33
34public:
35 SIFoldOperands() : MachineFunctionPass(ID) {
36 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
37 }
38
39 bool runOnMachineFunction(MachineFunction &MF) override;
40
41 const char *getPassName() const override {
42 return "SI Fold Operands";
43 }
44
45 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard6596ba72014-11-21 22:06:37 +000046 AU.setPreservesCFG();
47 MachineFunctionPass::getAnalysisUsage(AU);
48 }
49};
50
Tom Stellardbb763e62015-01-07 17:42:16 +000051struct FoldCandidate {
52 MachineInstr *UseMI;
53 unsigned UseOpNo;
54 MachineOperand *OpToFold;
55 uint64_t ImmToFold;
56
57 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
Tom Stellard05992972015-01-07 22:44:19 +000058 UseMI(MI), UseOpNo(OpNo) {
Tom Stellardbb763e62015-01-07 17:42:16 +000059
Tom Stellard05992972015-01-07 22:44:19 +000060 if (FoldOp->isImm()) {
61 OpToFold = nullptr;
62 ImmToFold = FoldOp->getImm();
63 } else {
64 assert(FoldOp->isReg());
65 OpToFold = FoldOp;
66 }
67 }
Tom Stellardbb763e62015-01-07 17:42:16 +000068
69 bool isImm() const {
70 return !OpToFold;
71 }
72};
73
Tom Stellard6596ba72014-11-21 22:06:37 +000074} // End anonymous namespace.
75
Matt Arsenault427c5482016-02-11 06:15:34 +000076INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
77 "SI Fold Operands", false, false)
Tom Stellard6596ba72014-11-21 22:06:37 +000078
79char SIFoldOperands::ID = 0;
80
81char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
82
83FunctionPass *llvm::createSIFoldOperandsPass() {
84 return new SIFoldOperands();
85}
86
87static bool isSafeToFold(unsigned Opcode) {
88 switch(Opcode) {
89 case AMDGPU::V_MOV_B32_e32:
90 case AMDGPU::V_MOV_B32_e64:
Tom Stellard4842c052015-01-07 20:27:25 +000091 case AMDGPU::V_MOV_B64_PSEUDO:
Tom Stellard6596ba72014-11-21 22:06:37 +000092 case AMDGPU::S_MOV_B32:
93 case AMDGPU::S_MOV_B64:
94 case AMDGPU::COPY:
95 return true;
96 default:
97 return false;
98 }
99}
100
Tom Stellardbb763e62015-01-07 17:42:16 +0000101static bool updateOperand(FoldCandidate &Fold,
Tom Stellard6596ba72014-11-21 22:06:37 +0000102 const TargetRegisterInfo &TRI) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000103 MachineInstr *MI = Fold.UseMI;
104 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000105 assert(Old.isReg());
106
Tom Stellardbb763e62015-01-07 17:42:16 +0000107 if (Fold.isImm()) {
108 Old.ChangeToImmediate(Fold.ImmToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000109 return true;
110 }
111
Tom Stellardbb763e62015-01-07 17:42:16 +0000112 MachineOperand *New = Fold.OpToFold;
113 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
114 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
115 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000116 return true;
117 }
118
Tom Stellard6596ba72014-11-21 22:06:37 +0000119 // FIXME: Handle physical registers.
120
121 return false;
122}
123
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000124static bool isUseMIInFoldList(const std::vector<FoldCandidate> &FoldList,
125 const MachineInstr *MI) {
126 for (auto Candidate : FoldList) {
127 if (Candidate.UseMI == MI)
128 return true;
129 }
130 return false;
131}
132
Tom Stellard05992972015-01-07 22:44:19 +0000133static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
134 MachineInstr *MI, unsigned OpNo,
135 MachineOperand *OpToFold,
136 const SIInstrInfo *TII) {
137 if (!TII->isOperandLegal(MI, OpNo, OpToFold)) {
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000138
139 // Special case for v_mac_f32_e64 if we are trying to fold into src2
140 unsigned Opc = MI->getOpcode();
141 if (Opc == AMDGPU::V_MAC_F32_e64 &&
142 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
143 // Check if changing this to a v_mad_f32 instruction will allow us to
144 // fold the operand.
145 MI->setDesc(TII->get(AMDGPU::V_MAD_F32));
146 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
147 if (FoldAsMAD) {
148 MI->untieRegOperand(OpNo);
149 return true;
150 }
151 MI->setDesc(TII->get(Opc));
152 }
153
154 // If we are already folding into another operand of MI, then
155 // we can't commute the instruction, otherwise we risk making the
156 // other fold illegal.
157 if (isUseMIInFoldList(FoldList, MI))
158 return false;
159
Tom Stellard05992972015-01-07 22:44:19 +0000160 // Operand is not legal, so try to commute the instruction to
161 // see if this makes it possible to fold.
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000162 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
163 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
Tom Stellard05992972015-01-07 22:44:19 +0000164 bool CanCommute = TII->findCommutedOpIndices(MI, CommuteIdx0, CommuteIdx1);
165
166 if (CanCommute) {
167 if (CommuteIdx0 == OpNo)
168 OpNo = CommuteIdx1;
169 else if (CommuteIdx1 == OpNo)
170 OpNo = CommuteIdx0;
171 }
172
Andrew Kaylor16c4da02015-09-28 20:33:22 +0000173 // One of operands might be an Imm operand, and OpNo may refer to it after
174 // the call of commuteInstruction() below. Such situations are avoided
175 // here explicitly as OpNo must be a register operand to be a candidate
176 // for memory folding.
177 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
178 !MI->getOperand(CommuteIdx1).isReg()))
179 return false;
180
181 if (!CanCommute ||
182 !TII->commuteInstruction(MI, false, CommuteIdx0, CommuteIdx1))
Tom Stellard05992972015-01-07 22:44:19 +0000183 return false;
184
185 if (!TII->isOperandLegal(MI, OpNo, OpToFold))
186 return false;
187 }
188
189 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
190 return true;
191}
192
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000193static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI,
194 unsigned UseOpIdx,
195 std::vector<FoldCandidate> &FoldList,
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000196 SmallVectorImpl<MachineInstr *> &CopiesToReplace,
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000197 const SIInstrInfo *TII, const SIRegisterInfo &TRI,
198 MachineRegisterInfo &MRI) {
199 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
200
201 // FIXME: Fold operands with subregs.
202 if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) ||
203 UseOp.isImplicit())) {
204 return;
205 }
206
207 bool FoldingImm = OpToFold.isImm();
208 APInt Imm;
209
210 if (FoldingImm) {
211 unsigned UseReg = UseOp.getReg();
212 const TargetRegisterClass *UseRC
213 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
214 MRI.getRegClass(UseReg) :
215 TRI.getPhysRegClass(UseReg);
216
217 Imm = APInt(64, OpToFold.getImm());
218
Tom Stellardeea72cc2015-08-29 01:58:21 +0000219 const MCInstrDesc &FoldDesc = TII->get(OpToFold.getParent()->getOpcode());
220 const TargetRegisterClass *FoldRC =
221 TRI.getRegClass(FoldDesc.OpInfo[0].RegClass);
222
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000223 // Split 64-bit constants into 32-bits for folding.
Tom Stellardeea72cc2015-08-29 01:58:21 +0000224 if (FoldRC->getSize() == 8 && UseOp.getSubReg()) {
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000225 if (UseRC->getSize() != 8)
226 return;
227
228 if (UseOp.getSubReg() == AMDGPU::sub0) {
229 Imm = Imm.getLoBits(32);
230 } else {
231 assert(UseOp.getSubReg() == AMDGPU::sub1);
232 Imm = Imm.getHiBits(32);
233 }
234 }
235
236 // In order to fold immediates into copies, we need to change the
237 // copy to a MOV.
238 if (UseMI->getOpcode() == AMDGPU::COPY) {
239 unsigned DestReg = UseMI->getOperand(0).getReg();
240 const TargetRegisterClass *DestRC
241 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
242 MRI.getRegClass(DestReg) :
243 TRI.getPhysRegClass(DestReg);
244
245 unsigned MovOp = TII->getMovOpcode(DestRC);
246 if (MovOp == AMDGPU::COPY)
247 return;
248
249 UseMI->setDesc(TII->get(MovOp));
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000250 CopiesToReplace.push_back(UseMI);
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000251 }
252 }
253
Tom Stellard9a197672015-09-09 15:43:26 +0000254 // Special case for REG_SEQUENCE: We can't fold literals into
255 // REG_SEQUENCE instructions, so we have to fold them into the
256 // uses of REG_SEQUENCE.
257 if (UseMI->getOpcode() == AMDGPU::REG_SEQUENCE) {
258 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
259 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
260
261 for (MachineRegisterInfo::use_iterator
262 RSUse = MRI.use_begin(RegSeqDstReg),
263 RSE = MRI.use_end(); RSUse != RSE; ++RSUse) {
264
265 MachineInstr *RSUseMI = RSUse->getParent();
266 if (RSUse->getSubReg() != RegSeqDstSubReg)
267 continue;
268
269 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000270 CopiesToReplace, TII, TRI, MRI);
Tom Stellard9a197672015-09-09 15:43:26 +0000271 }
272 return;
273 }
274
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000275 const MCInstrDesc &UseDesc = UseMI->getDesc();
276
277 // Don't fold into target independent nodes. Target independent opcodes
278 // don't have defined register classes.
279 if (UseDesc.isVariadic() ||
280 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
281 return;
282
283 if (FoldingImm) {
284 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
285 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
286 return;
287 }
288
289 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
290
291 // FIXME: We could try to change the instruction from 64-bit to 32-bit
292 // to enable more folding opportunites. The shrink operands pass
293 // already does this.
294 return;
295}
296
Tom Stellard6596ba72014-11-21 22:06:37 +0000297bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000298 if (skipFunction(*MF.getFunction()))
299 return false;
300
Tom Stellard6596ba72014-11-21 22:06:37 +0000301 MachineRegisterInfo &MRI = MF.getRegInfo();
302 const SIInstrInfo *TII =
303 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
304 const SIRegisterInfo &TRI = TII->getRegisterInfo();
305
306 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
307 BI != BE; ++BI) {
308
309 MachineBasicBlock &MBB = *BI;
310 MachineBasicBlock::iterator I, Next;
311 for (I = MBB.begin(); I != MBB.end(); I = Next) {
312 Next = std::next(I);
313 MachineInstr &MI = *I;
314
315 if (!isSafeToFold(MI.getOpcode()))
316 continue;
317
Matt Arsenault11a4d672015-02-13 19:05:03 +0000318 unsigned OpSize = TII->getOpSize(MI, 1);
Tom Stellard6596ba72014-11-21 22:06:37 +0000319 MachineOperand &OpToFold = MI.getOperand(1);
Tom Stellardfb77f002015-01-13 22:59:41 +0000320 bool FoldingImm = OpToFold.isImm();
Tom Stellard26cc18d2015-01-07 22:18:27 +0000321
Tom Stellard05992972015-01-07 22:44:19 +0000322 // FIXME: We could also be folding things like FrameIndexes and
323 // TargetIndexes.
324 if (!FoldingImm && !OpToFold.isReg())
325 continue;
326
Matt Arsenault25f61a62015-01-31 23:37:27 +0000327 // Folding immediates with more than one use will increase program size.
Tom Stellard26cc18d2015-01-07 22:18:27 +0000328 // FIXME: This will also reduce register usage, which may be better
329 // in some cases. A better heuristic is needed.
Matt Arsenault11a4d672015-02-13 19:05:03 +0000330 if (FoldingImm && !TII->isInlineConstant(OpToFold, OpSize) &&
Tom Stellard26cc18d2015-01-07 22:18:27 +0000331 !MRI.hasOneUse(MI.getOperand(0).getReg()))
332 continue;
Tom Stellard6596ba72014-11-21 22:06:37 +0000333
Tom Stellard6596ba72014-11-21 22:06:37 +0000334 if (OpToFold.isReg() &&
Nicolai Haehnle82fc9622016-01-07 17:10:29 +0000335 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
Tom Stellard6596ba72014-11-21 22:06:37 +0000336 continue;
337
Marek Olsak926c56f2016-01-13 11:44:29 +0000338 // Prevent folding operands backwards in the function. For example,
339 // the COPY opcode must not be replaced by 1 in this example:
340 //
341 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
342 // ...
343 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
344 MachineOperand &Dst = MI.getOperand(0);
345 if (Dst.isReg() &&
346 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
347 continue;
348
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000349 // We need mutate the operands of new mov instructions to add implicit
350 // uses of EXEC, but adding them invalidates the use_iterator, so defer
351 // this.
352 SmallVector<MachineInstr *, 4> CopiesToReplace;
353
Tom Stellardbb763e62015-01-07 17:42:16 +0000354 std::vector<FoldCandidate> FoldList;
Tom Stellard6596ba72014-11-21 22:06:37 +0000355 for (MachineRegisterInfo::use_iterator
356 Use = MRI.use_begin(MI.getOperand(0).getReg()), E = MRI.use_end();
357 Use != E; ++Use) {
358
359 MachineInstr *UseMI = Use->getParent();
Tom Stellard6596ba72014-11-21 22:06:37 +0000360
Tom Stellardb8ce14c2015-08-28 23:45:19 +0000361 foldOperand(OpToFold, UseMI, Use.getOperandNo(), FoldList,
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000362 CopiesToReplace, TII, TRI, MRI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000363 }
364
Matt Arsenaultad46e0c2015-09-10 01:06:06 +0000365 // Make sure we add EXEC uses to any new v_mov instructions created.
366 for (MachineInstr *Copy : CopiesToReplace)
367 Copy->addImplicitDefUseOperands(MF);
368
Tom Stellardbb763e62015-01-07 17:42:16 +0000369 for (FoldCandidate &Fold : FoldList) {
370 if (updateOperand(Fold, TRI)) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000371 // Clear kill flags.
Tom Stellardbb763e62015-01-07 17:42:16 +0000372 if (!Fold.isImm()) {
373 assert(Fold.OpToFold && Fold.OpToFold->isReg());
Matt Arsenaulte8c08912015-10-21 22:37:50 +0000374 // FIXME: Probably shouldn't bother trying to fold if not an
375 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
376 // copies.
377 MRI.clearKillFlags(Fold.OpToFold->getReg());
Tom Stellardbb763e62015-01-07 17:42:16 +0000378 }
Tom Stellard6596ba72014-11-21 22:06:37 +0000379 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
Tom Stellardbb763e62015-01-07 17:42:16 +0000380 Fold.UseOpNo << " of " << *Fold.UseMI << '\n');
Tom Stellard6596ba72014-11-21 22:06:37 +0000381 }
382 }
383 }
384 }
385 return false;
386}