blob: e42a46d839b41836c29edf730a88ef8e3209c5c7 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implementation of the TargetInstrInfo class that is common to all
12/// AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUTargetMachine.h"
19#include "AMDIL.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23
24#define GET_INSTRINFO_CTOR
25#include "AMDGPUGenInstrInfo.inc"
26
27using namespace llvm;
28
29AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
30 : AMDGPUGenInstrInfo(0,0), RI(tm, *this), TM(tm) { }
31
32const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
33 return RI;
34}
35
36bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
37 unsigned &SrcReg, unsigned &DstReg,
38 unsigned &SubIdx) const {
39// TODO: Implement this function
40 return false;
41}
42
43unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
44 int &FrameIndex) const {
45// TODO: Implement this function
46 return 0;
47}
48
49unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
50 int &FrameIndex) const {
51// TODO: Implement this function
52 return 0;
53}
54
55bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
56 const MachineMemOperand *&MMO,
57 int &FrameIndex) const {
58// TODO: Implement this function
59 return false;
60}
61unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
62 int &FrameIndex) const {
63// TODO: Implement this function
64 return 0;
65}
66unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
67 int &FrameIndex) const {
68// TODO: Implement this function
69 return 0;
70}
71bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
72 const MachineMemOperand *&MMO,
73 int &FrameIndex) const {
74// TODO: Implement this function
75 return false;
76}
77
78MachineInstr *
79AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
80 MachineBasicBlock::iterator &MBBI,
81 LiveVariables *LV) const {
82// TODO: Implement this function
83 return NULL;
84}
85bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
86 MachineBasicBlock &MBB) const {
87 while (iter != MBB.end()) {
88 switch (iter->getOpcode()) {
89 default:
90 break;
91 case AMDGPU::BRANCH_COND_i32:
92 case AMDGPU::BRANCH_COND_f32:
93 case AMDGPU::BRANCH:
94 return true;
95 };
96 ++iter;
97 }
98 return false;
99}
100
101MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
102 MachineBasicBlock::iterator tmp = MBB->end();
103 if (!MBB->size()) {
104 return MBB->end();
105 }
106 while (--tmp) {
107 if (tmp->getOpcode() == AMDGPU::ENDLOOP
108 || tmp->getOpcode() == AMDGPU::ENDIF
109 || tmp->getOpcode() == AMDGPU::ELSE) {
110 if (tmp == MBB->begin()) {
111 return tmp;
112 } else {
113 continue;
114 }
115 } else {
116 return ++tmp;
117 }
118 }
119 return MBB->end();
120}
121
122void
123AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
124 MachineBasicBlock::iterator MI,
125 unsigned SrcReg, bool isKill,
126 int FrameIndex,
127 const TargetRegisterClass *RC,
128 const TargetRegisterInfo *TRI) const {
129 assert(!"Not Implemented");
130}
131
132void
133AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
134 MachineBasicBlock::iterator MI,
135 unsigned DestReg, int FrameIndex,
136 const TargetRegisterClass *RC,
137 const TargetRegisterInfo *TRI) const {
138 assert(!"Not Implemented");
139}
140
141MachineInstr *
142AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
143 MachineInstr *MI,
144 const SmallVectorImpl<unsigned> &Ops,
145 int FrameIndex) const {
146// TODO: Implement this function
147 return 0;
148}
149MachineInstr*
150AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
151 MachineInstr *MI,
152 const SmallVectorImpl<unsigned> &Ops,
153 MachineInstr *LoadMI) const {
154 // TODO: Implement this function
155 return 0;
156}
157bool
158AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
159 const SmallVectorImpl<unsigned> &Ops) const {
160 // TODO: Implement this function
161 return false;
162}
163bool
164AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
165 unsigned Reg, bool UnfoldLoad,
166 bool UnfoldStore,
167 SmallVectorImpl<MachineInstr*> &NewMIs) const {
168 // TODO: Implement this function
169 return false;
170}
171
172bool
173AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
174 SmallVectorImpl<SDNode*> &NewNodes) const {
175 // TODO: Implement this function
176 return false;
177}
178
179unsigned
180AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
181 bool UnfoldLoad, bool UnfoldStore,
182 unsigned *LoadRegIndex) const {
183 // TODO: Implement this function
184 return 0;
185}
186
187bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
188 int64_t Offset1, int64_t Offset2,
189 unsigned NumLoads) const {
190 assert(Offset2 > Offset1
191 && "Second offset should be larger than first offset!");
192 // If we have less than 16 loads in a row, and the offsets are within 16,
193 // then schedule together.
194 // TODO: Make the loads schedule near if it fits in a cacheline
195 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
196}
197
198bool
199AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
200 const {
201 // TODO: Implement this function
202 return true;
203}
204void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
205 MachineBasicBlock::iterator MI) const {
206 // TODO: Implement this function
207}
208
209bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
210 // TODO: Implement this function
211 return false;
212}
213bool
214AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
215 const SmallVectorImpl<MachineOperand> &Pred2)
216 const {
217 // TODO: Implement this function
218 return false;
219}
220
221bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
222 std::vector<MachineOperand> &Pred) const {
223 // TODO: Implement this function
224 return false;
225}
226
227bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
228 // TODO: Implement this function
229 return MI->getDesc().isPredicable();
230}
231
232bool
233AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
234 // TODO: Implement this function
235 return true;
236}
237
238void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
239 DebugLoc DL) const {
240 MachineRegisterInfo &MRI = MF.getRegInfo();
241 const AMDGPURegisterInfo & RI = getRegisterInfo();
242
243 for (unsigned i = 0; i < MI.getNumOperands(); i++) {
244 MachineOperand &MO = MI.getOperand(i);
245 // Convert dst regclass to one that is supported by the ISA
246 if (MO.isReg() && MO.isDef()) {
247 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
248 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
249 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
250
251 assert(newRegClass);
252
253 MRI.setRegClass(MO.getReg(), newRegClass);
254 }
255 }
256 }
257}