blob: 61437e90e6292ea06947fbedfaa70122b6336dde [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implementation of the TargetInstrInfo class that is common to all
12/// AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUTargetMachine.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22
23#define GET_INSTRINFO_CTOR
Tom Stellard02661d92013-06-25 21:22:18 +000024#define GET_INSTRINFO_NAMED_OPS
Christian Konigf741fbf2013-02-26 17:52:42 +000025#define GET_INSTRMAP_INFO
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "AMDGPUGenInstrInfo.inc"
27
28using namespace llvm;
29
30AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
Bill Wendling37e9adb2013-06-07 20:28:55 +000031 : AMDGPUGenInstrInfo(0,0), RI(tm), TM(tm) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000032
33const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
34 return RI;
35}
36
37bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
38 unsigned &SrcReg, unsigned &DstReg,
39 unsigned &SubIdx) const {
40// TODO: Implement this function
41 return false;
42}
43
44unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
45 int &FrameIndex) const {
46// TODO: Implement this function
47 return 0;
48}
49
50unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
51 int &FrameIndex) const {
52// TODO: Implement this function
53 return 0;
54}
55
56bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
57 const MachineMemOperand *&MMO,
58 int &FrameIndex) const {
59// TODO: Implement this function
60 return false;
61}
62unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
63 int &FrameIndex) const {
64// TODO: Implement this function
65 return 0;
66}
67unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
68 int &FrameIndex) const {
69// TODO: Implement this function
70 return 0;
71}
72bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
73 const MachineMemOperand *&MMO,
74 int &FrameIndex) const {
75// TODO: Implement this function
76 return false;
77}
78
79MachineInstr *
80AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
81 MachineBasicBlock::iterator &MBBI,
82 LiveVariables *LV) const {
83// TODO: Implement this function
84 return NULL;
85}
86bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
87 MachineBasicBlock &MBB) const {
88 while (iter != MBB.end()) {
89 switch (iter->getOpcode()) {
90 default:
91 break;
92 case AMDGPU::BRANCH_COND_i32:
93 case AMDGPU::BRANCH_COND_f32:
94 case AMDGPU::BRANCH:
95 return true;
96 };
97 ++iter;
98 }
99 return false;
100}
101
Tom Stellard75aadc22012-12-11 21:25:42 +0000102void
103AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
104 MachineBasicBlock::iterator MI,
105 unsigned SrcReg, bool isKill,
106 int FrameIndex,
107 const TargetRegisterClass *RC,
108 const TargetRegisterInfo *TRI) const {
109 assert(!"Not Implemented");
110}
111
112void
113AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
114 MachineBasicBlock::iterator MI,
115 unsigned DestReg, int FrameIndex,
116 const TargetRegisterClass *RC,
117 const TargetRegisterInfo *TRI) const {
118 assert(!"Not Implemented");
119}
120
121MachineInstr *
122AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
123 MachineInstr *MI,
124 const SmallVectorImpl<unsigned> &Ops,
125 int FrameIndex) const {
126// TODO: Implement this function
127 return 0;
128}
129MachineInstr*
130AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
131 MachineInstr *MI,
132 const SmallVectorImpl<unsigned> &Ops,
133 MachineInstr *LoadMI) const {
134 // TODO: Implement this function
135 return 0;
136}
137bool
138AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
139 const SmallVectorImpl<unsigned> &Ops) const {
140 // TODO: Implement this function
141 return false;
142}
143bool
144AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
145 unsigned Reg, bool UnfoldLoad,
146 bool UnfoldStore,
147 SmallVectorImpl<MachineInstr*> &NewMIs) const {
148 // TODO: Implement this function
149 return false;
150}
151
152bool
153AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
154 SmallVectorImpl<SDNode*> &NewNodes) const {
155 // TODO: Implement this function
156 return false;
157}
158
159unsigned
160AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
161 bool UnfoldLoad, bool UnfoldStore,
162 unsigned *LoadRegIndex) const {
163 // TODO: Implement this function
164 return 0;
165}
166
167bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
168 int64_t Offset1, int64_t Offset2,
169 unsigned NumLoads) const {
170 assert(Offset2 > Offset1
171 && "Second offset should be larger than first offset!");
172 // If we have less than 16 loads in a row, and the offsets are within 16,
173 // then schedule together.
174 // TODO: Make the loads schedule near if it fits in a cacheline
175 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
176}
177
178bool
179AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
180 const {
181 // TODO: Implement this function
182 return true;
183}
184void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
185 MachineBasicBlock::iterator MI) const {
186 // TODO: Implement this function
187}
188
189bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
190 // TODO: Implement this function
191 return false;
192}
193bool
194AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
195 const SmallVectorImpl<MachineOperand> &Pred2)
196 const {
197 // TODO: Implement this function
198 return false;
199}
200
201bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
202 std::vector<MachineOperand> &Pred) const {
203 // TODO: Implement this function
204 return false;
205}
206
207bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
208 // TODO: Implement this function
209 return MI->getDesc().isPredicable();
210}
211
212bool
213AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
214 // TODO: Implement this function
215 return true;
216}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000217
218bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
219 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
220}
221
222bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
223 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
224}
225
226
Tom Stellard75aadc22012-12-11 21:25:42 +0000227void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
228 DebugLoc DL) const {
229 MachineRegisterInfo &MRI = MF.getRegInfo();
230 const AMDGPURegisterInfo & RI = getRegisterInfo();
231
232 for (unsigned i = 0; i < MI.getNumOperands(); i++) {
233 MachineOperand &MO = MI.getOperand(i);
234 // Convert dst regclass to one that is supported by the ISA
235 if (MO.isReg() && MO.isDef()) {
236 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
237 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
238 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
239
240 assert(newRegClass);
241
242 MRI.setRegClass(MO.getReg(), newRegClass);
243 }
244 }
245 }
246}