blob: 31b300216038ad8672affd71aba2facc4301fbf6 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implementation of the TargetInstrInfo class that is common to all
12/// AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUTargetMachine.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22
23#define GET_INSTRINFO_CTOR
Christian Konigf741fbf2013-02-26 17:52:42 +000024#define GET_INSTRMAP_INFO
Tom Stellard75aadc22012-12-11 21:25:42 +000025#include "AMDGPUGenInstrInfo.inc"
26
27using namespace llvm;
28
29AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
Bill Wendling37e9adb2013-06-07 20:28:55 +000030 : AMDGPUGenInstrInfo(0,0), RI(tm), TM(tm) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000031
32const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
33 return RI;
34}
35
36bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
37 unsigned &SrcReg, unsigned &DstReg,
38 unsigned &SubIdx) const {
39// TODO: Implement this function
40 return false;
41}
42
43unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
44 int &FrameIndex) const {
45// TODO: Implement this function
46 return 0;
47}
48
49unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
50 int &FrameIndex) const {
51// TODO: Implement this function
52 return 0;
53}
54
55bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
56 const MachineMemOperand *&MMO,
57 int &FrameIndex) const {
58// TODO: Implement this function
59 return false;
60}
61unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
62 int &FrameIndex) const {
63// TODO: Implement this function
64 return 0;
65}
66unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
67 int &FrameIndex) const {
68// TODO: Implement this function
69 return 0;
70}
71bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
72 const MachineMemOperand *&MMO,
73 int &FrameIndex) const {
74// TODO: Implement this function
75 return false;
76}
77
78MachineInstr *
79AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
80 MachineBasicBlock::iterator &MBBI,
81 LiveVariables *LV) const {
82// TODO: Implement this function
83 return NULL;
84}
85bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
86 MachineBasicBlock &MBB) const {
87 while (iter != MBB.end()) {
88 switch (iter->getOpcode()) {
89 default:
90 break;
91 case AMDGPU::BRANCH_COND_i32:
92 case AMDGPU::BRANCH_COND_f32:
93 case AMDGPU::BRANCH:
94 return true;
95 };
96 ++iter;
97 }
98 return false;
99}
100
Tom Stellard75aadc22012-12-11 21:25:42 +0000101void
102AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
103 MachineBasicBlock::iterator MI,
104 unsigned SrcReg, bool isKill,
105 int FrameIndex,
106 const TargetRegisterClass *RC,
107 const TargetRegisterInfo *TRI) const {
108 assert(!"Not Implemented");
109}
110
111void
112AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
113 MachineBasicBlock::iterator MI,
114 unsigned DestReg, int FrameIndex,
115 const TargetRegisterClass *RC,
116 const TargetRegisterInfo *TRI) const {
117 assert(!"Not Implemented");
118}
119
120MachineInstr *
121AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
122 MachineInstr *MI,
123 const SmallVectorImpl<unsigned> &Ops,
124 int FrameIndex) const {
125// TODO: Implement this function
126 return 0;
127}
128MachineInstr*
129AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
130 MachineInstr *MI,
131 const SmallVectorImpl<unsigned> &Ops,
132 MachineInstr *LoadMI) const {
133 // TODO: Implement this function
134 return 0;
135}
136bool
137AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
138 const SmallVectorImpl<unsigned> &Ops) const {
139 // TODO: Implement this function
140 return false;
141}
142bool
143AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
144 unsigned Reg, bool UnfoldLoad,
145 bool UnfoldStore,
146 SmallVectorImpl<MachineInstr*> &NewMIs) const {
147 // TODO: Implement this function
148 return false;
149}
150
151bool
152AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
153 SmallVectorImpl<SDNode*> &NewNodes) const {
154 // TODO: Implement this function
155 return false;
156}
157
158unsigned
159AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
160 bool UnfoldLoad, bool UnfoldStore,
161 unsigned *LoadRegIndex) const {
162 // TODO: Implement this function
163 return 0;
164}
165
166bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
167 int64_t Offset1, int64_t Offset2,
168 unsigned NumLoads) const {
169 assert(Offset2 > Offset1
170 && "Second offset should be larger than first offset!");
171 // If we have less than 16 loads in a row, and the offsets are within 16,
172 // then schedule together.
173 // TODO: Make the loads schedule near if it fits in a cacheline
174 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
175}
176
177bool
178AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
179 const {
180 // TODO: Implement this function
181 return true;
182}
183void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
184 MachineBasicBlock::iterator MI) const {
185 // TODO: Implement this function
186}
187
188bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
189 // TODO: Implement this function
190 return false;
191}
192bool
193AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
194 const SmallVectorImpl<MachineOperand> &Pred2)
195 const {
196 // TODO: Implement this function
197 return false;
198}
199
200bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
201 std::vector<MachineOperand> &Pred) const {
202 // TODO: Implement this function
203 return false;
204}
205
206bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
207 // TODO: Implement this function
208 return MI->getDesc().isPredicable();
209}
210
211bool
212AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
213 // TODO: Implement this function
214 return true;
215}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000216
217bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
218 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
219}
220
221bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
222 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
223}
224
225
Tom Stellard75aadc22012-12-11 21:25:42 +0000226void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
227 DebugLoc DL) const {
228 MachineRegisterInfo &MRI = MF.getRegInfo();
229 const AMDGPURegisterInfo & RI = getRegisterInfo();
230
231 for (unsigned i = 0; i < MI.getNumOperands(); i++) {
232 MachineOperand &MO = MI.getOperand(i);
233 // Convert dst regclass to one that is supported by the ISA
234 if (MO.isReg() && MO.isDef()) {
235 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
236 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
237 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
238
239 assert(newRegClass);
240
241 MRI.setRegClass(MO.getReg(), newRegClass);
242 }
243 }
244 }
245}