blob: e32dd9fc6519b7875dafe2fb8d497e4363da323b [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implementation of the TargetInstrInfo class that is common to all
12/// AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUTargetMachine.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000023#define GET_INSTRINFO_CTOR_DTOR
Tom Stellard02661d92013-06-25 21:22:18 +000024#define GET_INSTRINFO_NAMED_OPS
Christian Konigf741fbf2013-02-26 17:52:42 +000025#define GET_INSTRMAP_INFO
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "AMDGPUGenInstrInfo.inc"
27
28using namespace llvm;
29
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000030
31// Pin the vtable to this file.
32void AMDGPUInstrInfo::anchor() {}
33
Tom Stellard75aadc22012-12-11 21:25:42 +000034AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
Vincent Lejeune269708b2013-10-01 19:32:38 +000035 : AMDGPUGenInstrInfo(-1,-1), RI(tm), TM(tm) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000036
37const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
38 return RI;
39}
40
41bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
42 unsigned &SrcReg, unsigned &DstReg,
43 unsigned &SubIdx) const {
44// TODO: Implement this function
45 return false;
46}
47
48unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
49 int &FrameIndex) const {
50// TODO: Implement this function
51 return 0;
52}
53
54unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
55 int &FrameIndex) const {
56// TODO: Implement this function
57 return 0;
58}
59
60bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
61 const MachineMemOperand *&MMO,
62 int &FrameIndex) const {
63// TODO: Implement this function
64 return false;
65}
66unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
67 int &FrameIndex) const {
68// TODO: Implement this function
69 return 0;
70}
71unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
72 int &FrameIndex) const {
73// TODO: Implement this function
74 return 0;
75}
76bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
77 const MachineMemOperand *&MMO,
78 int &FrameIndex) const {
79// TODO: Implement this function
80 return false;
81}
82
83MachineInstr *
84AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
85 MachineBasicBlock::iterator &MBBI,
86 LiveVariables *LV) const {
87// TODO: Implement this function
88 return NULL;
89}
90bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
91 MachineBasicBlock &MBB) const {
92 while (iter != MBB.end()) {
93 switch (iter->getOpcode()) {
94 default:
95 break;
96 case AMDGPU::BRANCH_COND_i32:
97 case AMDGPU::BRANCH_COND_f32:
98 case AMDGPU::BRANCH:
99 return true;
100 };
101 ++iter;
102 }
103 return false;
104}
105
Tom Stellard75aadc22012-12-11 21:25:42 +0000106void
107AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
108 MachineBasicBlock::iterator MI,
109 unsigned SrcReg, bool isKill,
110 int FrameIndex,
111 const TargetRegisterClass *RC,
112 const TargetRegisterInfo *TRI) const {
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000113 llvm_unreachable("Not Implemented");
Tom Stellard75aadc22012-12-11 21:25:42 +0000114}
115
116void
117AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
118 MachineBasicBlock::iterator MI,
119 unsigned DestReg, int FrameIndex,
120 const TargetRegisterClass *RC,
121 const TargetRegisterInfo *TRI) const {
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000122 llvm_unreachable("Not Implemented");
Tom Stellard75aadc22012-12-11 21:25:42 +0000123}
124
Tom Stellard26a3b672013-10-22 18:19:10 +0000125bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
126 MachineBasicBlock *MBB = MI->getParent();
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000127 int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
128 AMDGPU::OpName::addr);
Tom Stellard81d871d2013-11-13 23:36:50 +0000129 // addr is a custom operand with multiple MI operands, and only the
130 // first MI operand is given a name.
131 int RegOpIdx = OffsetOpIdx + 1;
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000132 int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
133 AMDGPU::OpName::chan);
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000134 if (isRegisterLoad(*MI)) {
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000135 int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
136 AMDGPU::OpName::dst);
Tom Stellard81d871d2013-11-13 23:36:50 +0000137 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
138 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000139 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Tom Stellard81d871d2013-11-13 23:36:50 +0000140 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000141 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000142 buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000143 getIndirectAddrRegClass()->getRegister(Address));
Tom Stellard26a3b672013-10-22 18:19:10 +0000144 } else {
Tom Stellard81d871d2013-11-13 23:36:50 +0000145 buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000146 Address, OffsetReg);
Tom Stellard26a3b672013-10-22 18:19:10 +0000147 }
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000148 } else if (isRegisterStore(*MI)) {
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000149 int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
150 AMDGPU::OpName::val);
Tom Stellard81d871d2013-11-13 23:36:50 +0000151 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
152 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
153 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000154 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Tom Stellard81d871d2013-11-13 23:36:50 +0000155 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000156 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
157 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
Tom Stellard81d871d2013-11-13 23:36:50 +0000158 MI->getOperand(ValOpIdx).getReg());
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000159 } else {
Tom Stellard81d871d2013-11-13 23:36:50 +0000160 buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
161 calculateIndirectAddress(RegIndex, Channel),
162 OffsetReg);
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000163 }
164 } else {
165 return false;
Tom Stellard26a3b672013-10-22 18:19:10 +0000166 }
167
168 MBB->erase(MI);
169 return true;
170}
171
172
Tom Stellard75aadc22012-12-11 21:25:42 +0000173MachineInstr *
174AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
175 MachineInstr *MI,
176 const SmallVectorImpl<unsigned> &Ops,
177 int FrameIndex) const {
178// TODO: Implement this function
179 return 0;
180}
181MachineInstr*
182AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
183 MachineInstr *MI,
184 const SmallVectorImpl<unsigned> &Ops,
185 MachineInstr *LoadMI) const {
186 // TODO: Implement this function
187 return 0;
188}
189bool
190AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
191 const SmallVectorImpl<unsigned> &Ops) const {
192 // TODO: Implement this function
193 return false;
194}
195bool
196AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
197 unsigned Reg, bool UnfoldLoad,
198 bool UnfoldStore,
199 SmallVectorImpl<MachineInstr*> &NewMIs) const {
200 // TODO: Implement this function
201 return false;
202}
203
204bool
205AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
206 SmallVectorImpl<SDNode*> &NewNodes) const {
207 // TODO: Implement this function
208 return false;
209}
210
211unsigned
212AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
213 bool UnfoldLoad, bool UnfoldStore,
214 unsigned *LoadRegIndex) const {
215 // TODO: Implement this function
216 return 0;
217}
218
219bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
220 int64_t Offset1, int64_t Offset2,
221 unsigned NumLoads) const {
222 assert(Offset2 > Offset1
223 && "Second offset should be larger than first offset!");
224 // If we have less than 16 loads in a row, and the offsets are within 16,
225 // then schedule together.
226 // TODO: Make the loads schedule near if it fits in a cacheline
227 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
228}
229
230bool
231AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
232 const {
233 // TODO: Implement this function
234 return true;
235}
236void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
237 MachineBasicBlock::iterator MI) const {
238 // TODO: Implement this function
239}
240
241bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
242 // TODO: Implement this function
243 return false;
244}
245bool
246AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
247 const SmallVectorImpl<MachineOperand> &Pred2)
248 const {
249 // TODO: Implement this function
250 return false;
251}
252
253bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
254 std::vector<MachineOperand> &Pred) const {
255 // TODO: Implement this function
256 return false;
257}
258
259bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
260 // TODO: Implement this function
261 return MI->getDesc().isPredicable();
262}
263
264bool
265AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
266 // TODO: Implement this function
267 return true;
268}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000269
270bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
271 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
272}
273
274bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
275 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
276}
277
Tom Stellard81d871d2013-11-13 23:36:50 +0000278int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
279 const MachineRegisterInfo &MRI = MF.getRegInfo();
280 const MachineFrameInfo *MFI = MF.getFrameInfo();
281 int Offset = -1;
282
283 if (MFI->getNumObjects() == 0) {
284 return -1;
285 }
286
287 if (MRI.livein_empty()) {
288 return 0;
289 }
290
291 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
292 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
293 LE = MRI.livein_end();
294 LI != LE; ++LI) {
295 unsigned Reg = LI->first;
296 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
297 !IndirectRC->contains(Reg))
298 continue;
299
300 unsigned RegIndex;
301 unsigned RegEnd;
302 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
303 ++RegIndex) {
304 if (IndirectRC->getRegister(RegIndex) == Reg)
305 break;
306 }
307 Offset = std::max(Offset, (int)RegIndex);
308 }
309
310 return Offset + 1;
311}
312
313int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
314 int Offset = 0;
315 const MachineFrameInfo *MFI = MF.getFrameInfo();
316
317 // Variable sized objects are not supported
318 assert(!MFI->hasVarSizedObjects());
319
320 if (MFI->getNumObjects() == 0) {
321 return -1;
322 }
323
324 Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
325
326 return getIndirectIndexBegin(MF) + Offset;
327}
328
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000329
Tom Stellard75aadc22012-12-11 21:25:42 +0000330void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
331 DebugLoc DL) const {
332 MachineRegisterInfo &MRI = MF.getRegInfo();
333 const AMDGPURegisterInfo & RI = getRegisterInfo();
334
335 for (unsigned i = 0; i < MI.getNumOperands(); i++) {
336 MachineOperand &MO = MI.getOperand(i);
337 // Convert dst regclass to one that is supported by the ISA
338 if (MO.isReg() && MO.isDef()) {
339 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
340 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
341 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
342
343 assert(newRegClass);
344
345 MRI.setRegClass(MO.getReg(), newRegClass);
346 }
347 }
348 }
349}
Tom Stellard682bfbc2013-10-10 17:11:24 +0000350
351int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
352 switch (Channels) {
353 default: return Opcode;
354 case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
355 case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
356 case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
357 }
358}