blob: bd87d741ecf08def39f8a05e0b7e7a6d3859ef0f [file] [log] [blame]
Vincent Lejeune08001a52013-04-01 21:48:05 +00001//===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// This pass compute turns all control flow pseudo instructions into native one
12/// computing their address on the fly ; it also sets STACK_SIZE info.
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "R600Defines.h"
17#include "R600InstrInfo.h"
18#include "R600MachineFunctionInfo.h"
19#include "R600RegisterInfo.h"
20#include "llvm/CodeGen/MachineFunctionPass.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23
24namespace llvm {
25
26class R600ControlFlowFinalizer : public MachineFunctionPass {
27
28private:
29 static char ID;
30 const R600InstrInfo *TII;
31 unsigned MaxFetchInst;
32
33 bool isFetch(const MachineInstr *MI) const {
34 switch (MI->getOpcode()) {
35 case AMDGPU::TEX_VTX_CONSTBUF:
36 case AMDGPU::TEX_VTX_TEXBUF:
37 case AMDGPU::TEX_LD:
38 case AMDGPU::TEX_GET_TEXTURE_RESINFO:
39 case AMDGPU::TEX_GET_GRADIENTS_H:
40 case AMDGPU::TEX_GET_GRADIENTS_V:
41 case AMDGPU::TEX_SET_GRADIENTS_H:
42 case AMDGPU::TEX_SET_GRADIENTS_V:
43 case AMDGPU::TEX_SAMPLE:
44 case AMDGPU::TEX_SAMPLE_C:
45 case AMDGPU::TEX_SAMPLE_L:
46 case AMDGPU::TEX_SAMPLE_C_L:
47 case AMDGPU::TEX_SAMPLE_LB:
48 case AMDGPU::TEX_SAMPLE_C_LB:
49 case AMDGPU::TEX_SAMPLE_G:
50 case AMDGPU::TEX_SAMPLE_C_G:
51 case AMDGPU::TXD:
52 case AMDGPU::TXD_SHADOW:
53 return true;
54 default:
55 return false;
56 }
57 }
58
59 bool IsTrivialInst(MachineInstr *MI) const {
60 switch (MI->getOpcode()) {
61 case AMDGPU::KILL:
62 case AMDGPU::RETURN:
63 return true;
64 default:
65 return false;
66 }
67 }
68
69 MachineBasicBlock::iterator
70 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
71 unsigned CfAddress) const {
72 MachineBasicBlock::iterator ClauseHead = I;
73 unsigned AluInstCount = 0;
74 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
75 if (IsTrivialInst(I))
76 continue;
77 if (!isFetch(I))
78 break;
79 AluInstCount ++;
80 if (AluInstCount > MaxFetchInst)
81 break;
82 }
83 BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
84 TII->get(AMDGPU::CF_TC))
85 .addImm(CfAddress) // ADDR
86 .addImm(AluInstCount); // COUNT
87 return I;
88 }
89 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
90 switch (MI->getOpcode()) {
91 case AMDGPU::WHILE_LOOP:
92 MI->getOperand(0).setImm(Addr + 1);
93 break;
94 default:
95 MI->getOperand(0).setImm(Addr);
96 break;
97 }
98 }
99 void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
100 const {
101 for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
102 It != E; ++It) {
103 MachineInstr *MI = *It;
104 CounterPropagateAddr(MI, Addr);
105 }
106 }
107
108public:
109 R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
110 TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) {
111 const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
112 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX)
113 MaxFetchInst = 8;
114 else
115 MaxFetchInst = 16;
116 }
117
118 virtual bool runOnMachineFunction(MachineFunction &MF) {
119 unsigned MaxStack = 0;
120 unsigned CurrentStack = 0;
121 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
122 ++MB) {
123 MachineBasicBlock &MBB = *MB;
124 unsigned CfCount = 0;
125 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
126 std::vector<std::pair<unsigned, MachineInstr *> > IfThenElseStack;
127 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
128 if (MFI->ShaderType == 1) {
129 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
130 TII->get(AMDGPU::CF_CALL_FS));
131 CfCount++;
132 }
133 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
134 I != E;) {
135 if (isFetch(I)) {
136 I = MakeFetchClause(MBB, I, 0);
137 CfCount++;
138 continue;
139 }
140
141 MachineBasicBlock::iterator MI = I;
142 I++;
143 switch (MI->getOpcode()) {
144 case AMDGPU::CF_ALU_PUSH_BEFORE:
145 CurrentStack++;
146 MaxStack = std::max(MaxStack, CurrentStack);
147 case AMDGPU::KILLGT:
148 case AMDGPU::CF_ALU:
149 CfCount++;
150 break;
151 case AMDGPU::WHILELOOP: {
152 CurrentStack++;
153 MaxStack = std::max(MaxStack, CurrentStack);
154 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
155 TII->get(AMDGPU::WHILE_LOOP))
156 .addImm(0);
157 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
158 std::set<MachineInstr *>());
159 Pair.second.insert(MIb);
160 LoopStack.push_back(Pair);
161 MI->eraseFromParent();
162 CfCount++;
163 break;
164 }
165 case AMDGPU::ENDLOOP: {
166 CurrentStack--;
167 std::pair<unsigned, std::set<MachineInstr *> > Pair =
168 LoopStack.back();
169 LoopStack.pop_back();
170 CounterPropagateAddr(Pair.second, CfCount);
171 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::END_LOOP))
172 .addImm(Pair.first + 1);
173 MI->eraseFromParent();
174 CfCount++;
175 break;
176 }
177 case AMDGPU::IF_PREDICATE_SET: {
178 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
179 TII->get(AMDGPU::CF_JUMP))
180 .addImm(0)
181 .addImm(0);
182 std::pair<unsigned, MachineInstr *> Pair(CfCount, MIb);
183 IfThenElseStack.push_back(Pair);
184 MI->eraseFromParent();
185 CfCount++;
186 break;
187 }
188 case AMDGPU::ELSE: {
189 std::pair<unsigned, MachineInstr *> Pair = IfThenElseStack.back();
190 IfThenElseStack.pop_back();
191 CounterPropagateAddr(Pair.second, CfCount);
192 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
193 TII->get(AMDGPU::CF_ELSE))
194 .addImm(0)
195 .addImm(1);
196 std::pair<unsigned, MachineInstr *> NewPair(CfCount, MIb);
197 IfThenElseStack.push_back(NewPair);
198 MI->eraseFromParent();
199 CfCount++;
200 break;
201 }
202 case AMDGPU::ENDIF: {
203 CurrentStack--;
204 std::pair<unsigned, MachineInstr *> Pair = IfThenElseStack.back();
205 IfThenElseStack.pop_back();
206 CounterPropagateAddr(Pair.second, CfCount + 1);
207 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::POP))
208 .addImm(CfCount + 1)
209 .addImm(1);
210 MI->eraseFromParent();
211 CfCount++;
212 break;
213 }
214 case AMDGPU::PREDICATED_BREAK: {
215 CurrentStack--;
216 CfCount += 3;
217 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::CF_JUMP))
218 .addImm(CfCount)
219 .addImm(1);
220 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
221 TII->get(AMDGPU::LOOP_BREAK))
222 .addImm(0);
223 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::POP))
224 .addImm(CfCount)
225 .addImm(1);
226 LoopStack.back().second.insert(MIb);
227 MI->eraseFromParent();
228 break;
229 }
230 case AMDGPU::CONTINUE: {
231 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
232 TII->get(AMDGPU::CF_CONTINUE))
233 .addImm(CfCount);
234 LoopStack.back().second.insert(MIb);
235 MI->eraseFromParent();
236 CfCount++;
237 break;
238 }
239 default:
240 break;
241 }
242 }
243 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
244 TII->get(AMDGPU::STACK_SIZE))
245 .addImm(MaxStack);
246 }
247
248 return false;
249 }
250
251 const char *getPassName() const {
252 return "R600 Control Flow Finalizer Pass";
253 }
254};
255
256char R600ControlFlowFinalizer::ID = 0;
257
258}
259
260
261llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
262 return new R600ControlFlowFinalizer(TM);
263}
264