blob: 362a5c1e4e07b68df0beeccfe6bd19f1d6aafaac [file] [log] [blame]
Tom Stellard1aaad692014-07-21 16:55:33 +00001//===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// The pass tries to use the 32-bit encoding for instructions when possible.
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
13#include "SIInstrInfo.h"
14#include "llvm/ADT/Statistic.h"
15#include "llvm/CodeGen/MachineFunctionPass.h"
16#include "llvm/CodeGen/MachineInstrBuilder.h"
17#include "llvm/CodeGen/MachineRegisterInfo.h"
18#include "llvm/IR/LLVMContext.h"
19#include "llvm/IR/Function.h"
20#include "llvm/Support/Debug.h"
21#include "llvm/Target/TargetMachine.h"
22
23#define DEBUG_TYPE "si-shrink-instructions"
24
25STATISTIC(NumInstructionsShrunk,
26 "Number of 64-bit instruction reduced to 32-bit.");
27
28namespace llvm {
29 void initializeSIShrinkInstructionsPass(PassRegistry&);
30}
31
32using namespace llvm;
33
34namespace {
35
36class SIShrinkInstructions : public MachineFunctionPass {
37public:
38 static char ID;
39
40public:
41 SIShrinkInstructions() : MachineFunctionPass(ID) {
42 }
43
44 virtual bool runOnMachineFunction(MachineFunction &MF) override;
45
46 virtual const char *getPassName() const override {
47 return "SI Shrink Instructions";
48 }
49
50 virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
51 AU.setPreservesCFG();
52 MachineFunctionPass::getAnalysisUsage(AU);
53 }
54};
55
56} // End anonymous namespace.
57
58INITIALIZE_PASS_BEGIN(SIShrinkInstructions, DEBUG_TYPE,
59 "SI Lower il Copies", false, false)
60INITIALIZE_PASS_END(SIShrinkInstructions, DEBUG_TYPE,
61 "SI Lower il Copies", false, false)
62
63char SIShrinkInstructions::ID = 0;
64
65FunctionPass *llvm::createSIShrinkInstructionsPass() {
66 return new SIShrinkInstructions();
67}
68
69static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI,
70 const MachineRegisterInfo &MRI) {
71 if (!MO->isReg())
72 return false;
73
74 if (TargetRegisterInfo::isVirtualRegister(MO->getReg()))
75 return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()));
76
77 return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()));
78}
79
80static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
81 const SIRegisterInfo &TRI,
82 const MachineRegisterInfo &MRI) {
83
84 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
85 // Can't shrink instruction with three operands.
86 if (Src2)
87 return false;
88
89 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
90 const MachineOperand *Src1Mod =
91 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
92
93 if (Src1 && (!isVGPR(Src1, TRI, MRI) || Src1Mod->getImm() != 0))
94 return false;
95
96 // We don't need to check src0, all input types are legal, so just make
97 // sure src0 isn't using any modifiers.
98 const MachineOperand *Src0Mod =
99 TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
100 if (Src0Mod && Src0Mod->getImm() != 0)
101 return false;
102
103 // Check output modifiers
104 const MachineOperand *Omod = TII->getNamedOperand(MI, AMDGPU::OpName::omod);
105 if (Omod && Omod->getImm() != 0)
106 return false;
107
108 const MachineOperand *Clamp = TII->getNamedOperand(MI, AMDGPU::OpName::clamp);
109 return !Clamp || Clamp->getImm() == 0;
110}
111
112bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
113 MachineRegisterInfo &MRI = MF.getRegInfo();
114 const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
115 MF.getTarget().getInstrInfo());
116 const SIRegisterInfo &TRI = TII->getRegisterInfo();
117 std::vector<unsigned> I1Defs;
118
119 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
120 BI != BE; ++BI) {
121
122 MachineBasicBlock &MBB = *BI;
123 MachineBasicBlock::iterator I, Next;
124 for (I = MBB.begin(); I != MBB.end(); I = Next) {
125 Next = std::next(I);
126 MachineInstr &MI = *I;
127
128 int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
129
130 if (Op32 == -1)
131 continue;
132
133 if (!canShrink(MI, TII, TRI, MRI)) {
134 // Try commtuing the instruction and see if that enables us to shrink
135 // it.
136 if (!MI.isCommutable() || !TII->commuteInstruction(&MI) ||
137 !canShrink(MI, TII, TRI, MRI))
138 continue;
139 }
140
141 if (TII->isVOPC(Op32)) {
142 unsigned DstReg = MI.getOperand(0).getReg();
143 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
144 // VOPC instructions can only write to the VCC register. We can't
145 // force them to use VCC here, because the register allocator
146 // has trouble with sequences like this, which cause the allocator
147 // to run out of registes if vreg0 and vreg1 belong to the VCCReg
148 // register class:
149 // vreg0 = VOPC;
150 // vreg1 = VOPC;
151 // S_AND_B64 vreg0, vreg1
152 //
153 // So, instead of forcing the instruction to write to VCC, we provide a
154 // hint to the register allocator to use VCC and then we
155 // we will run this pass again after RA and shrink it if it outpus to
156 // VCC.
157 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
158 continue;
159 }
160 if (DstReg != AMDGPU::VCC)
161 continue;
162 }
163
164 // We can shrink this instruction
165 DEBUG(dbgs() << "Shrinking "; MI.dump(); dbgs() << "\n";);
166
167 MachineInstrBuilder MIB =
168 BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
169
170 // dst
171 MIB.addOperand(MI.getOperand(0));
172
173 MIB.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
174
175 const MachineOperand *Src1 =
176 TII->getNamedOperand(MI, AMDGPU::OpName::src1);
177 if (Src1)
178 MIB.addOperand(*Src1);
179
180 for (const MachineOperand &MO : MI.implicit_operands())
181 MIB.addOperand(MO);
182
183 DEBUG(dbgs() << "e32 MI = "; MI.dump(); dbgs() << "\n";);
184 ++NumInstructionsShrunk;
185 MI.eraseFromParent();
186 }
187 }
188 return false;
189}