blob: 5c30601e0ad3d87ad29b2c7b3eb9237556c2a695 [file] [log] [blame]
Vincent Lejeune08001a52013-04-01 21:48:05 +00001//===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// This pass compute turns all control flow pseudo instructions into native one
12/// computing their address on the fly ; it also sets STACK_SIZE info.
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "R600Defines.h"
17#include "R600InstrInfo.h"
18#include "R600MachineFunctionInfo.h"
19#include "R600RegisterInfo.h"
20#include "llvm/CodeGen/MachineFunctionPass.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23
24namespace llvm {
25
26class R600ControlFlowFinalizer : public MachineFunctionPass {
27
28private:
29 static char ID;
30 const R600InstrInfo *TII;
31 unsigned MaxFetchInst;
32
33 bool isFetch(const MachineInstr *MI) const {
34 switch (MI->getOpcode()) {
35 case AMDGPU::TEX_VTX_CONSTBUF:
36 case AMDGPU::TEX_VTX_TEXBUF:
37 case AMDGPU::TEX_LD:
38 case AMDGPU::TEX_GET_TEXTURE_RESINFO:
39 case AMDGPU::TEX_GET_GRADIENTS_H:
40 case AMDGPU::TEX_GET_GRADIENTS_V:
41 case AMDGPU::TEX_SET_GRADIENTS_H:
42 case AMDGPU::TEX_SET_GRADIENTS_V:
43 case AMDGPU::TEX_SAMPLE:
44 case AMDGPU::TEX_SAMPLE_C:
45 case AMDGPU::TEX_SAMPLE_L:
46 case AMDGPU::TEX_SAMPLE_C_L:
47 case AMDGPU::TEX_SAMPLE_LB:
48 case AMDGPU::TEX_SAMPLE_C_LB:
49 case AMDGPU::TEX_SAMPLE_G:
50 case AMDGPU::TEX_SAMPLE_C_G:
51 case AMDGPU::TXD:
52 case AMDGPU::TXD_SHADOW:
53 return true;
54 default:
55 return false;
56 }
57 }
58
59 bool IsTrivialInst(MachineInstr *MI) const {
60 switch (MI->getOpcode()) {
61 case AMDGPU::KILL:
62 case AMDGPU::RETURN:
63 return true;
64 default:
65 return false;
66 }
67 }
68
69 MachineBasicBlock::iterator
70 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
71 unsigned CfAddress) const {
72 MachineBasicBlock::iterator ClauseHead = I;
73 unsigned AluInstCount = 0;
74 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
75 if (IsTrivialInst(I))
76 continue;
77 if (!isFetch(I))
78 break;
79 AluInstCount ++;
80 if (AluInstCount > MaxFetchInst)
81 break;
82 }
83 BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
84 TII->get(AMDGPU::CF_TC))
85 .addImm(CfAddress) // ADDR
86 .addImm(AluInstCount); // COUNT
87 return I;
88 }
89 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
90 switch (MI->getOpcode()) {
91 case AMDGPU::WHILE_LOOP:
92 MI->getOperand(0).setImm(Addr + 1);
93 break;
94 default:
95 MI->getOperand(0).setImm(Addr);
96 break;
97 }
98 }
99 void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
100 const {
101 for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
102 It != E; ++It) {
103 MachineInstr *MI = *It;
104 CounterPropagateAddr(MI, Addr);
105 }
106 }
107
108public:
109 R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
110 TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) {
111 const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
112 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX)
113 MaxFetchInst = 8;
114 else
115 MaxFetchInst = 16;
116 }
117
118 virtual bool runOnMachineFunction(MachineFunction &MF) {
119 unsigned MaxStack = 0;
120 unsigned CurrentStack = 0;
121 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
122 ++MB) {
123 MachineBasicBlock &MBB = *MB;
124 unsigned CfCount = 0;
125 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
126 std::vector<std::pair<unsigned, MachineInstr *> > IfThenElseStack;
127 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
128 if (MFI->ShaderType == 1) {
129 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
130 TII->get(AMDGPU::CF_CALL_FS));
131 CfCount++;
132 }
133 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
134 I != E;) {
135 if (isFetch(I)) {
136 I = MakeFetchClause(MBB, I, 0);
137 CfCount++;
138 continue;
139 }
140
141 MachineBasicBlock::iterator MI = I;
142 I++;
143 switch (MI->getOpcode()) {
144 case AMDGPU::CF_ALU_PUSH_BEFORE:
145 CurrentStack++;
146 MaxStack = std::max(MaxStack, CurrentStack);
Vincent Lejeune08001a52013-04-01 21:48:05 +0000147 case AMDGPU::CF_ALU:
148 CfCount++;
149 break;
150 case AMDGPU::WHILELOOP: {
151 CurrentStack++;
152 MaxStack = std::max(MaxStack, CurrentStack);
153 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
154 TII->get(AMDGPU::WHILE_LOOP))
155 .addImm(0);
156 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
157 std::set<MachineInstr *>());
158 Pair.second.insert(MIb);
159 LoopStack.push_back(Pair);
160 MI->eraseFromParent();
161 CfCount++;
162 break;
163 }
164 case AMDGPU::ENDLOOP: {
165 CurrentStack--;
166 std::pair<unsigned, std::set<MachineInstr *> > Pair =
167 LoopStack.back();
168 LoopStack.pop_back();
169 CounterPropagateAddr(Pair.second, CfCount);
170 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::END_LOOP))
171 .addImm(Pair.first + 1);
172 MI->eraseFromParent();
173 CfCount++;
174 break;
175 }
176 case AMDGPU::IF_PREDICATE_SET: {
177 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
178 TII->get(AMDGPU::CF_JUMP))
179 .addImm(0)
180 .addImm(0);
181 std::pair<unsigned, MachineInstr *> Pair(CfCount, MIb);
182 IfThenElseStack.push_back(Pair);
183 MI->eraseFromParent();
184 CfCount++;
185 break;
186 }
187 case AMDGPU::ELSE: {
188 std::pair<unsigned, MachineInstr *> Pair = IfThenElseStack.back();
189 IfThenElseStack.pop_back();
190 CounterPropagateAddr(Pair.second, CfCount);
191 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
192 TII->get(AMDGPU::CF_ELSE))
193 .addImm(0)
194 .addImm(1);
195 std::pair<unsigned, MachineInstr *> NewPair(CfCount, MIb);
196 IfThenElseStack.push_back(NewPair);
197 MI->eraseFromParent();
198 CfCount++;
199 break;
200 }
201 case AMDGPU::ENDIF: {
202 CurrentStack--;
203 std::pair<unsigned, MachineInstr *> Pair = IfThenElseStack.back();
204 IfThenElseStack.pop_back();
205 CounterPropagateAddr(Pair.second, CfCount + 1);
206 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::POP))
207 .addImm(CfCount + 1)
208 .addImm(1);
209 MI->eraseFromParent();
210 CfCount++;
211 break;
212 }
213 case AMDGPU::PREDICATED_BREAK: {
214 CurrentStack--;
215 CfCount += 3;
216 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::CF_JUMP))
217 .addImm(CfCount)
218 .addImm(1);
219 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
220 TII->get(AMDGPU::LOOP_BREAK))
221 .addImm(0);
222 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::POP))
223 .addImm(CfCount)
224 .addImm(1);
225 LoopStack.back().second.insert(MIb);
226 MI->eraseFromParent();
227 break;
228 }
229 case AMDGPU::CONTINUE: {
230 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
231 TII->get(AMDGPU::CF_CONTINUE))
232 .addImm(CfCount);
233 LoopStack.back().second.insert(MIb);
234 MI->eraseFromParent();
235 CfCount++;
236 break;
237 }
238 default:
239 break;
240 }
241 }
242 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
243 TII->get(AMDGPU::STACK_SIZE))
244 .addImm(MaxStack);
245 }
246
247 return false;
248 }
249
250 const char *getPassName() const {
251 return "R600 Control Flow Finalizer Pass";
252 }
253};
254
255char R600ControlFlowFinalizer::ID = 0;
256
257}
258
259
260llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
261 return new R600ControlFlowFinalizer(TM);
262}
263