blob: 5d00bdd6a9bb03f714b7e229e424df7de7e5a6ba [file] [log] [blame]
Tom Stellard1aaad692014-07-21 16:55:33 +00001//===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// The pass tries to use the 32-bit encoding for instructions when possible.
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
Marek Olsaka93603d2015-01-15 18:42:51 +000013#include "AMDGPUMCInstLower.h"
Eric Christopherd9134482014-08-04 21:25:23 +000014#include "AMDGPUSubtarget.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000015#include "SIInstrInfo.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/CodeGen/MachineFunctionPass.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6407e1e2014-08-01 00:32:33 +000020#include "llvm/IR/Constants.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000021#include "llvm/IR/Function.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000022#include "llvm/IR/LLVMContext.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000023#include "llvm/Support/Debug.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000024#include "llvm/Support/raw_ostream.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000025#include "llvm/Target/TargetMachine.h"
26
27#define DEBUG_TYPE "si-shrink-instructions"
28
29STATISTIC(NumInstructionsShrunk,
30 "Number of 64-bit instruction reduced to 32-bit.");
Tom Stellard6407e1e2014-08-01 00:32:33 +000031STATISTIC(NumLiteralConstantsFolded,
32 "Number of literal constants folded into 32-bit instructions.");
Tom Stellard1aaad692014-07-21 16:55:33 +000033
34namespace llvm {
35 void initializeSIShrinkInstructionsPass(PassRegistry&);
36}
37
38using namespace llvm;
39
40namespace {
41
42class SIShrinkInstructions : public MachineFunctionPass {
43public:
44 static char ID;
45
46public:
47 SIShrinkInstructions() : MachineFunctionPass(ID) {
48 }
49
Craig Topperfd38cbe2014-08-30 16:48:34 +000050 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard1aaad692014-07-21 16:55:33 +000051
Craig Topperfd38cbe2014-08-30 16:48:34 +000052 const char *getPassName() const override {
Tom Stellard1aaad692014-07-21 16:55:33 +000053 return "SI Shrink Instructions";
54 }
55
Craig Topperfd38cbe2014-08-30 16:48:34 +000056 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard1aaad692014-07-21 16:55:33 +000057 AU.setPreservesCFG();
58 MachineFunctionPass::getAnalysisUsage(AU);
59 }
60};
61
62} // End anonymous namespace.
63
64INITIALIZE_PASS_BEGIN(SIShrinkInstructions, DEBUG_TYPE,
65 "SI Lower il Copies", false, false)
66INITIALIZE_PASS_END(SIShrinkInstructions, DEBUG_TYPE,
67 "SI Lower il Copies", false, false)
68
69char SIShrinkInstructions::ID = 0;
70
71FunctionPass *llvm::createSIShrinkInstructionsPass() {
72 return new SIShrinkInstructions();
73}
74
75static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI,
76 const MachineRegisterInfo &MRI) {
77 if (!MO->isReg())
78 return false;
79
80 if (TargetRegisterInfo::isVirtualRegister(MO->getReg()))
81 return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()));
82
83 return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()));
84}
85
86static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
87 const SIRegisterInfo &TRI,
88 const MachineRegisterInfo &MRI) {
89
90 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
91 // Can't shrink instruction with three operands.
Tom Stellard5224df32015-03-10 16:16:44 +000092 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add
93 // a special case for it. It can only be shrunk if the third operand
94 // is vcc. We should handle this the same way we handle vopc, by addding
95 // a register allocation hint pre-regalloc and then do the shrining
96 // post-regalloc.
Tom Stellarddb5a11f2015-07-13 15:47:57 +000097 if (Src2) {
Tom Stellarde48fe2a2015-07-14 14:15:03 +000098 switch (MI.getOpcode()) {
99 default: return false;
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000100
Tom Stellarde48fe2a2015-07-14 14:15:03 +0000101 case AMDGPU::V_MAC_F32_e64:
102 if (!isVGPR(Src2, TRI, MRI) ||
103 TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
104 return false;
105 break;
106
107 case AMDGPU::V_CNDMASK_B32_e64:
108 break;
109 }
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000110 }
Tom Stellard1aaad692014-07-21 16:55:33 +0000111
112 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
113 const MachineOperand *Src1Mod =
114 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
115
Tom Stellardb4a313a2014-08-01 00:32:39 +0000116 if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0)))
Tom Stellard1aaad692014-07-21 16:55:33 +0000117 return false;
118
Matt Arsenault8943d242014-10-17 18:00:45 +0000119 // We don't need to check src0, all input types are legal, so just make sure
120 // src0 isn't using any modifiers.
121 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
Tom Stellard1aaad692014-07-21 16:55:33 +0000122 return false;
123
124 // Check output modifiers
Matt Arsenault8943d242014-10-17 18:00:45 +0000125 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
Tom Stellard1aaad692014-07-21 16:55:33 +0000126 return false;
127
Matt Arsenault8943d242014-10-17 18:00:45 +0000128 if (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
129 return false;
130
131 return true;
Tom Stellard1aaad692014-07-21 16:55:33 +0000132}
133
Tom Stellard6407e1e2014-08-01 00:32:33 +0000134/// \brief This function checks \p MI for operands defined by a move immediate
135/// instruction and then folds the literal constant into the instruction if it
136/// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction
137/// and will only fold literal constants if we are still in SSA.
138static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
139 MachineRegisterInfo &MRI, bool TryToCommute = true) {
140
141 if (!MRI.isSSA())
142 return;
143
144 assert(TII->isVOP1(MI.getOpcode()) || TII->isVOP2(MI.getOpcode()) ||
145 TII->isVOPC(MI.getOpcode()));
146
147 const SIRegisterInfo &TRI = TII->getRegisterInfo();
Matt Arsenault11a4d672015-02-13 19:05:03 +0000148 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
149 MachineOperand &Src0 = MI.getOperand(Src0Idx);
Tom Stellard6407e1e2014-08-01 00:32:33 +0000150
151 // Only one literal constant is allowed per instruction, so if src0 is a
152 // literal constant then we can't do any folding.
Matt Arsenault11a4d672015-02-13 19:05:03 +0000153 if (Src0.isImm() &&
154 TII->isLiteralConstant(Src0, TII->getOpSize(MI, Src0Idx)))
Tom Stellard6407e1e2014-08-01 00:32:33 +0000155 return;
156
Tom Stellard6407e1e2014-08-01 00:32:33 +0000157 // Literal constants and SGPRs can only be used in Src0, so if Src0 is an
158 // SGPR, we cannot commute the instruction, so we can't fold any literal
159 // constants.
Matt Arsenault11a4d672015-02-13 19:05:03 +0000160 if (Src0.isReg() && !isVGPR(&Src0, TRI, MRI))
Tom Stellard6407e1e2014-08-01 00:32:33 +0000161 return;
162
163 // Try to fold Src0
Tom Stellardab6e9c02015-07-09 16:30:36 +0000164 if (Src0.isReg() && MRI.hasOneUse(Src0.getReg())) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000165 unsigned Reg = Src0.getReg();
Tom Stellard6407e1e2014-08-01 00:32:33 +0000166 MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
167 if (Def && Def->isMoveImmediate()) {
168 MachineOperand &MovSrc = Def->getOperand(1);
169 bool ConstantFolded = false;
170
171 if (MovSrc.isImm() && isUInt<32>(MovSrc.getImm())) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000172 Src0.ChangeToImmediate(MovSrc.getImm());
Tom Stellard6407e1e2014-08-01 00:32:33 +0000173 ConstantFolded = true;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000174 }
175 if (ConstantFolded) {
Tom Stellard6407e1e2014-08-01 00:32:33 +0000176 if (MRI.use_empty(Reg))
177 Def->eraseFromParent();
178 ++NumLiteralConstantsFolded;
179 return;
180 }
181 }
182 }
183
184 // We have failed to fold src0, so commute the instruction and try again.
185 if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(&MI))
186 foldImmediates(MI, TII, MRI, false);
187
188}
189
Tom Stellard1aaad692014-07-21 16:55:33 +0000190bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
191 MachineRegisterInfo &MRI = MF.getRegInfo();
Eric Christopherfc6de422014-08-05 02:39:49 +0000192 const SIInstrInfo *TII =
193 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
Tom Stellard1aaad692014-07-21 16:55:33 +0000194 const SIRegisterInfo &TRI = TII->getRegisterInfo();
195 std::vector<unsigned> I1Defs;
196
197 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
198 BI != BE; ++BI) {
199
200 MachineBasicBlock &MBB = *BI;
201 MachineBasicBlock::iterator I, Next;
202 for (I = MBB.begin(); I != MBB.end(); I = Next) {
203 Next = std::next(I);
204 MachineInstr &MI = *I;
205
Matt Arsenault77849922014-11-13 20:44:23 +0000206 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
207 if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
208 const MachineOperand &Src = MI.getOperand(1);
209
Matt Arsenault77849922014-11-13 20:44:23 +0000210 if (Src.isImm()) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000211 if (isInt<16>(Src.getImm()) && !TII->isInlineConstant(Src, 4))
Matt Arsenault77849922014-11-13 20:44:23 +0000212 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
Matt Arsenault77849922014-11-13 20:44:23 +0000213 }
Matt Arsenault13bd95bb2014-12-08 19:55:43 +0000214
215 continue;
Matt Arsenault77849922014-11-13 20:44:23 +0000216 }
217
Tom Stellard86d12eb2014-08-01 00:32:28 +0000218 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
Tom Stellard1aaad692014-07-21 16:55:33 +0000219 continue;
220
221 if (!canShrink(MI, TII, TRI, MRI)) {
Matt Arsenault66524032014-09-16 18:00:23 +0000222 // Try commuting the instruction and see if that enables us to shrink
Tom Stellard1aaad692014-07-21 16:55:33 +0000223 // it.
224 if (!MI.isCommutable() || !TII->commuteInstruction(&MI) ||
225 !canShrink(MI, TII, TRI, MRI))
226 continue;
227 }
228
Marek Olsaka93603d2015-01-15 18:42:51 +0000229 // getVOPe32 could be -1 here if we started with an instruction that had
Tom Stellard86d12eb2014-08-01 00:32:28 +0000230 // a 32-bit encoding and then commuted it to an instruction that did not.
Marek Olsaka93603d2015-01-15 18:42:51 +0000231 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
Tom Stellard86d12eb2014-08-01 00:32:28 +0000232 continue;
233
Marek Olsaka93603d2015-01-15 18:42:51 +0000234 int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
235
Tom Stellard1aaad692014-07-21 16:55:33 +0000236 if (TII->isVOPC(Op32)) {
237 unsigned DstReg = MI.getOperand(0).getReg();
238 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
239 // VOPC instructions can only write to the VCC register. We can't
Matt Arsenault5d26d042014-09-13 19:58:27 +0000240 // force them to use VCC here, because the register allocator has
241 // trouble with sequences like this, which cause the allocator to run
242 // out of registers if vreg0 and vreg1 belong to the VCCReg register
243 // class:
Tom Stellard1aaad692014-07-21 16:55:33 +0000244 // vreg0 = VOPC;
245 // vreg1 = VOPC;
246 // S_AND_B64 vreg0, vreg1
247 //
Matt Arsenaulta9627ae2014-09-21 17:27:32 +0000248 // So, instead of forcing the instruction to write to VCC, we provide
249 // a hint to the register allocator to use VCC and then we we will run
250 // this pass again after RA and shrink it if it outputs to VCC.
Tom Stellard1aaad692014-07-21 16:55:33 +0000251 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
252 continue;
253 }
254 if (DstReg != AMDGPU::VCC)
255 continue;
256 }
257
Tom Stellarde48fe2a2015-07-14 14:15:03 +0000258 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
259 // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
260 // instructions.
261 const MachineOperand *Src2 =
262 TII->getNamedOperand(MI, AMDGPU::OpName::src2);
263 if (!Src2->isReg())
264 continue;
265 unsigned SReg = Src2->getReg();
266 if (TargetRegisterInfo::isVirtualRegister(SReg)) {
267 MRI.setRegAllocationHint(SReg, 0, AMDGPU::VCC);
268 continue;
269 }
270 if (SReg != AMDGPU::VCC)
271 continue;
272 }
273
Tom Stellard1aaad692014-07-21 16:55:33 +0000274 // We can shrink this instruction
Tom Stellard6407e1e2014-08-01 00:32:33 +0000275 DEBUG(dbgs() << "Shrinking "; MI.dump(); dbgs() << '\n';);
Tom Stellard1aaad692014-07-21 16:55:33 +0000276
Tom Stellard6407e1e2014-08-01 00:32:33 +0000277 MachineInstrBuilder Inst32 =
Tom Stellard1aaad692014-07-21 16:55:33 +0000278 BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
279
280 // dst
Tom Stellard6407e1e2014-08-01 00:32:33 +0000281 Inst32.addOperand(MI.getOperand(0));
Tom Stellard1aaad692014-07-21 16:55:33 +0000282
Tom Stellard6407e1e2014-08-01 00:32:33 +0000283 Inst32.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
Tom Stellard1aaad692014-07-21 16:55:33 +0000284
285 const MachineOperand *Src1 =
286 TII->getNamedOperand(MI, AMDGPU::OpName::src1);
287 if (Src1)
Tom Stellard6407e1e2014-08-01 00:32:33 +0000288 Inst32.addOperand(*Src1);
Tom Stellard1aaad692014-07-21 16:55:33 +0000289
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000290 const MachineOperand *Src2 =
291 TII->getNamedOperand(MI, AMDGPU::OpName::src2);
292 if (Src2)
293 Inst32.addOperand(*Src2);
294
Tom Stellard1aaad692014-07-21 16:55:33 +0000295 ++NumInstructionsShrunk;
296 MI.eraseFromParent();
Tom Stellard6407e1e2014-08-01 00:32:33 +0000297
298 foldImmediates(*Inst32, TII, MRI);
299 DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
300
301
Tom Stellard1aaad692014-07-21 16:55:33 +0000302 }
303 }
304 return false;
305}