blob: c2887255cc1135c03f01ee32a9525ef3b9da31d8 [file] [log] [blame]
Tom Stellard6596ba72014-11-21 22:06:37 +00001//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
13#include "AMDGPUSubtarget.h"
14#include "SIInstrInfo.h"
15#include "llvm/CodeGen/LiveIntervalAnalysis.h"
16#include "llvm/CodeGen/MachineDominators.h"
17#include "llvm/CodeGen/MachineFunctionPass.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000020#include "llvm/IR/Function.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000021#include "llvm/IR/LLVMContext.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000022#include "llvm/Support/Debug.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000023#include "llvm/Support/raw_ostream.h"
Tom Stellard6596ba72014-11-21 22:06:37 +000024#include "llvm/Target/TargetMachine.h"
25
26#define DEBUG_TYPE "si-fold-operands"
27using namespace llvm;
28
29namespace {
30
31class SIFoldOperands : public MachineFunctionPass {
32public:
33 static char ID;
34
35public:
36 SIFoldOperands() : MachineFunctionPass(ID) {
37 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
38 }
39
40 bool runOnMachineFunction(MachineFunction &MF) override;
41
42 const char *getPassName() const override {
43 return "SI Fold Operands";
44 }
45
46 void getAnalysisUsage(AnalysisUsage &AU) const override {
47 AU.addRequired<MachineDominatorTree>();
48 AU.setPreservesCFG();
49 MachineFunctionPass::getAnalysisUsage(AU);
50 }
51};
52
Tom Stellardbb763e62015-01-07 17:42:16 +000053struct FoldCandidate {
54 MachineInstr *UseMI;
55 unsigned UseOpNo;
56 MachineOperand *OpToFold;
57 uint64_t ImmToFold;
58
59 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
Tom Stellard05992972015-01-07 22:44:19 +000060 UseMI(MI), UseOpNo(OpNo) {
Tom Stellardbb763e62015-01-07 17:42:16 +000061
Tom Stellard05992972015-01-07 22:44:19 +000062 if (FoldOp->isImm()) {
63 OpToFold = nullptr;
64 ImmToFold = FoldOp->getImm();
65 } else {
66 assert(FoldOp->isReg());
67 OpToFold = FoldOp;
68 }
69 }
Tom Stellardbb763e62015-01-07 17:42:16 +000070
71 bool isImm() const {
72 return !OpToFold;
73 }
74};
75
Tom Stellard6596ba72014-11-21 22:06:37 +000076} // End anonymous namespace.
77
78INITIALIZE_PASS_BEGIN(SIFoldOperands, DEBUG_TYPE,
79 "SI Fold Operands", false, false)
80INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
81INITIALIZE_PASS_END(SIFoldOperands, DEBUG_TYPE,
82 "SI Fold Operands", false, false)
83
84char SIFoldOperands::ID = 0;
85
86char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
87
88FunctionPass *llvm::createSIFoldOperandsPass() {
89 return new SIFoldOperands();
90}
91
92static bool isSafeToFold(unsigned Opcode) {
93 switch(Opcode) {
94 case AMDGPU::V_MOV_B32_e32:
95 case AMDGPU::V_MOV_B32_e64:
Tom Stellard4842c052015-01-07 20:27:25 +000096 case AMDGPU::V_MOV_B64_PSEUDO:
Tom Stellard6596ba72014-11-21 22:06:37 +000097 case AMDGPU::S_MOV_B32:
98 case AMDGPU::S_MOV_B64:
99 case AMDGPU::COPY:
100 return true;
101 default:
102 return false;
103 }
104}
105
Tom Stellardbb763e62015-01-07 17:42:16 +0000106static bool updateOperand(FoldCandidate &Fold,
Tom Stellard6596ba72014-11-21 22:06:37 +0000107 const TargetRegisterInfo &TRI) {
Tom Stellardbb763e62015-01-07 17:42:16 +0000108 MachineInstr *MI = Fold.UseMI;
109 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
Tom Stellard6596ba72014-11-21 22:06:37 +0000110 assert(Old.isReg());
111
Tom Stellardbb763e62015-01-07 17:42:16 +0000112 if (Fold.isImm()) {
113 Old.ChangeToImmediate(Fold.ImmToFold);
Tom Stellard6596ba72014-11-21 22:06:37 +0000114 return true;
115 }
116
Tom Stellardbb763e62015-01-07 17:42:16 +0000117 MachineOperand *New = Fold.OpToFold;
118 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
119 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
120 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
Tom Stellard6596ba72014-11-21 22:06:37 +0000121 return true;
122 }
123
Tom Stellard6596ba72014-11-21 22:06:37 +0000124 // FIXME: Handle physical registers.
125
126 return false;
127}
128
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000129static bool isUseMIInFoldList(const std::vector<FoldCandidate> &FoldList,
130 const MachineInstr *MI) {
131 for (auto Candidate : FoldList) {
132 if (Candidate.UseMI == MI)
133 return true;
134 }
135 return false;
136}
137
Tom Stellard05992972015-01-07 22:44:19 +0000138static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
139 MachineInstr *MI, unsigned OpNo,
140 MachineOperand *OpToFold,
141 const SIInstrInfo *TII) {
142 if (!TII->isOperandLegal(MI, OpNo, OpToFold)) {
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000143
144 // Special case for v_mac_f32_e64 if we are trying to fold into src2
145 unsigned Opc = MI->getOpcode();
146 if (Opc == AMDGPU::V_MAC_F32_e64 &&
147 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
148 // Check if changing this to a v_mad_f32 instruction will allow us to
149 // fold the operand.
150 MI->setDesc(TII->get(AMDGPU::V_MAD_F32));
151 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
152 if (FoldAsMAD) {
153 MI->untieRegOperand(OpNo);
154 return true;
155 }
156 MI->setDesc(TII->get(Opc));
157 }
158
159 // If we are already folding into another operand of MI, then
160 // we can't commute the instruction, otherwise we risk making the
161 // other fold illegal.
162 if (isUseMIInFoldList(FoldList, MI))
163 return false;
164
Tom Stellard05992972015-01-07 22:44:19 +0000165 // Operand is not legal, so try to commute the instruction to
166 // see if this makes it possible to fold.
167 unsigned CommuteIdx0;
168 unsigned CommuteIdx1;
169 bool CanCommute = TII->findCommutedOpIndices(MI, CommuteIdx0, CommuteIdx1);
170
171 if (CanCommute) {
172 if (CommuteIdx0 == OpNo)
173 OpNo = CommuteIdx1;
174 else if (CommuteIdx1 == OpNo)
175 OpNo = CommuteIdx0;
176 }
177
178 if (!CanCommute || !TII->commuteInstruction(MI))
179 return false;
180
181 if (!TII->isOperandLegal(MI, OpNo, OpToFold))
182 return false;
183 }
184
185 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
186 return true;
187}
188
Tom Stellard6596ba72014-11-21 22:06:37 +0000189bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
190 MachineRegisterInfo &MRI = MF.getRegInfo();
191 const SIInstrInfo *TII =
192 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
193 const SIRegisterInfo &TRI = TII->getRegisterInfo();
194
195 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
196 BI != BE; ++BI) {
197
198 MachineBasicBlock &MBB = *BI;
199 MachineBasicBlock::iterator I, Next;
200 for (I = MBB.begin(); I != MBB.end(); I = Next) {
201 Next = std::next(I);
202 MachineInstr &MI = *I;
203
204 if (!isSafeToFold(MI.getOpcode()))
205 continue;
206
Matt Arsenault11a4d672015-02-13 19:05:03 +0000207 unsigned OpSize = TII->getOpSize(MI, 1);
Tom Stellard6596ba72014-11-21 22:06:37 +0000208 MachineOperand &OpToFold = MI.getOperand(1);
Tom Stellardfb77f002015-01-13 22:59:41 +0000209 bool FoldingImm = OpToFold.isImm();
Tom Stellard26cc18d2015-01-07 22:18:27 +0000210
Tom Stellard05992972015-01-07 22:44:19 +0000211 // FIXME: We could also be folding things like FrameIndexes and
212 // TargetIndexes.
213 if (!FoldingImm && !OpToFold.isReg())
214 continue;
215
Matt Arsenault25f61a62015-01-31 23:37:27 +0000216 // Folding immediates with more than one use will increase program size.
Tom Stellard26cc18d2015-01-07 22:18:27 +0000217 // FIXME: This will also reduce register usage, which may be better
218 // in some cases. A better heuristic is needed.
Matt Arsenault11a4d672015-02-13 19:05:03 +0000219 if (FoldingImm && !TII->isInlineConstant(OpToFold, OpSize) &&
Tom Stellard26cc18d2015-01-07 22:18:27 +0000220 !MRI.hasOneUse(MI.getOperand(0).getReg()))
221 continue;
Tom Stellard6596ba72014-11-21 22:06:37 +0000222
223 // FIXME: Fold operands with subregs.
224 if (OpToFold.isReg() &&
225 (!TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()) ||
226 OpToFold.getSubReg()))
227 continue;
228
Tom Stellardbb763e62015-01-07 17:42:16 +0000229 std::vector<FoldCandidate> FoldList;
Tom Stellard6596ba72014-11-21 22:06:37 +0000230 for (MachineRegisterInfo::use_iterator
231 Use = MRI.use_begin(MI.getOperand(0).getReg()), E = MRI.use_end();
232 Use != E; ++Use) {
233
234 MachineInstr *UseMI = Use->getParent();
235 const MachineOperand &UseOp = UseMI->getOperand(Use.getOperandNo());
236
237 // FIXME: Fold operands with subregs.
Tom Stellard7b3aa882015-02-17 20:11:54 +0000238 if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) ||
239 UseOp.isImplicit())) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000240 continue;
241 }
242
Tom Stellardef3b8642015-01-07 19:56:17 +0000243 APInt Imm;
Tom Stellardbb763e62015-01-07 17:42:16 +0000244
Tom Stellardef3b8642015-01-07 19:56:17 +0000245 if (FoldingImm) {
Matt Arsenault95546b42015-02-14 02:55:57 +0000246 unsigned UseReg = UseOp.getReg();
247 const TargetRegisterClass *UseRC
248 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
249 MRI.getRegClass(UseReg) :
Tom Stellardd33d7f12015-05-12 14:18:11 +0000250 TRI.getPhysRegClass(UseReg);
Matt Arsenault95546b42015-02-14 02:55:57 +0000251
Tom Stellardfb77f002015-01-13 22:59:41 +0000252 Imm = APInt(64, OpToFold.getImm());
Tom Stellardef3b8642015-01-07 19:56:17 +0000253
254 // Split 64-bit constants into 32-bits for folding.
255 if (UseOp.getSubReg()) {
256 if (UseRC->getSize() != 8)
257 continue;
258
259 if (UseOp.getSubReg() == AMDGPU::sub0) {
260 Imm = Imm.getLoBits(32);
261 } else {
262 assert(UseOp.getSubReg() == AMDGPU::sub1);
263 Imm = Imm.getHiBits(32);
264 }
265 }
266
267 // In order to fold immediates into copies, we need to change the
268 // copy to a MOV.
269 if (UseMI->getOpcode() == AMDGPU::COPY) {
Matt Arsenault95546b42015-02-14 02:55:57 +0000270 unsigned DestReg = UseMI->getOperand(0).getReg();
271 const TargetRegisterClass *DestRC
272 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
273 MRI.getRegClass(DestReg) :
Tom Stellardd33d7f12015-05-12 14:18:11 +0000274 TRI.getPhysRegClass(DestReg);
Matt Arsenault95546b42015-02-14 02:55:57 +0000275
276 unsigned MovOp = TII->getMovOpcode(DestRC);
Tom Stellardef3b8642015-01-07 19:56:17 +0000277 if (MovOp == AMDGPU::COPY)
278 continue;
279
280 UseMI->setDesc(TII->get(MovOp));
Tom Stellard6596ba72014-11-21 22:06:37 +0000281 }
282 }
283
284 const MCInstrDesc &UseDesc = UseMI->getDesc();
285
286 // Don't fold into target independent nodes. Target independent opcodes
287 // don't have defined register classes.
288 if (UseDesc.isVariadic() ||
289 UseDesc.OpInfo[Use.getOperandNo()].RegClass == -1)
290 continue;
291
Tom Stellardef3b8642015-01-07 19:56:17 +0000292 if (FoldingImm) {
Tom Stellard05992972015-01-07 22:44:19 +0000293 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
294 tryAddToFoldList(FoldList, UseMI, Use.getOperandNo(), &ImmOp, TII);
Tom Stellardef3b8642015-01-07 19:56:17 +0000295 continue;
Tom Stellardbb763e62015-01-07 17:42:16 +0000296 }
297
Tom Stellard05992972015-01-07 22:44:19 +0000298 tryAddToFoldList(FoldList, UseMI, Use.getOperandNo(), &OpToFold, TII);
Tom Stellard6596ba72014-11-21 22:06:37 +0000299
300 // FIXME: We could try to change the instruction from 64-bit to 32-bit
301 // to enable more folding opportunites. The shrink operands pass
302 // already does this.
303 }
304
Tom Stellardbb763e62015-01-07 17:42:16 +0000305 for (FoldCandidate &Fold : FoldList) {
306 if (updateOperand(Fold, TRI)) {
Tom Stellard6596ba72014-11-21 22:06:37 +0000307 // Clear kill flags.
Tom Stellardbb763e62015-01-07 17:42:16 +0000308 if (!Fold.isImm()) {
309 assert(Fold.OpToFold && Fold.OpToFold->isReg());
310 Fold.OpToFold->setIsKill(false);
311 }
Tom Stellard6596ba72014-11-21 22:06:37 +0000312 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
Tom Stellardbb763e62015-01-07 17:42:16 +0000313 Fold.UseOpNo << " of " << *Fold.UseMI << '\n');
Tom Stellard6596ba72014-11-21 22:06:37 +0000314 }
315 }
316 }
317 }
318 return false;
319}