blob: 4bc90c0404e55370eae160b24a5219a63280e9d6 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implementation of the TargetInstrInfo class that is common to all
12/// AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUTargetMachine.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000023#define GET_INSTRINFO_CTOR_DTOR
Tom Stellard02661d92013-06-25 21:22:18 +000024#define GET_INSTRINFO_NAMED_OPS
Christian Konigf741fbf2013-02-26 17:52:42 +000025#define GET_INSTRMAP_INFO
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "AMDGPUGenInstrInfo.inc"
27
28using namespace llvm;
29
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000030
31// Pin the vtable to this file.
32void AMDGPUInstrInfo::anchor() {}
33
Tom Stellard75aadc22012-12-11 21:25:42 +000034AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
Vincent Lejeune269708b2013-10-01 19:32:38 +000035 : AMDGPUGenInstrInfo(-1,-1), RI(tm), TM(tm) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000036
37const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
38 return RI;
39}
40
41bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
42 unsigned &SrcReg, unsigned &DstReg,
43 unsigned &SubIdx) const {
44// TODO: Implement this function
45 return false;
46}
47
48unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
49 int &FrameIndex) const {
50// TODO: Implement this function
51 return 0;
52}
53
54unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
55 int &FrameIndex) const {
56// TODO: Implement this function
57 return 0;
58}
59
60bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
61 const MachineMemOperand *&MMO,
62 int &FrameIndex) const {
63// TODO: Implement this function
64 return false;
65}
66unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
67 int &FrameIndex) const {
68// TODO: Implement this function
69 return 0;
70}
71unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
72 int &FrameIndex) const {
73// TODO: Implement this function
74 return 0;
75}
76bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
77 const MachineMemOperand *&MMO,
78 int &FrameIndex) const {
79// TODO: Implement this function
80 return false;
81}
82
83MachineInstr *
84AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
85 MachineBasicBlock::iterator &MBBI,
86 LiveVariables *LV) const {
87// TODO: Implement this function
88 return NULL;
89}
90bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
91 MachineBasicBlock &MBB) const {
92 while (iter != MBB.end()) {
93 switch (iter->getOpcode()) {
94 default:
95 break;
96 case AMDGPU::BRANCH_COND_i32:
97 case AMDGPU::BRANCH_COND_f32:
98 case AMDGPU::BRANCH:
99 return true;
100 };
101 ++iter;
102 }
103 return false;
104}
105
Tom Stellard75aadc22012-12-11 21:25:42 +0000106void
107AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
108 MachineBasicBlock::iterator MI,
109 unsigned SrcReg, bool isKill,
110 int FrameIndex,
111 const TargetRegisterClass *RC,
112 const TargetRegisterInfo *TRI) const {
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000113 llvm_unreachable("Not Implemented");
Tom Stellard75aadc22012-12-11 21:25:42 +0000114}
115
116void
117AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
118 MachineBasicBlock::iterator MI,
119 unsigned DestReg, int FrameIndex,
120 const TargetRegisterClass *RC,
121 const TargetRegisterInfo *TRI) const {
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000122 llvm_unreachable("Not Implemented");
Tom Stellard75aadc22012-12-11 21:25:42 +0000123}
124
Tom Stellard26a3b672013-10-22 18:19:10 +0000125bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
126 MachineBasicBlock *MBB = MI->getParent();
Tom Stellard81d871d2013-11-13 23:36:50 +0000127 int OffsetOpIdx =
128 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::addr);
129 // addr is a custom operand with multiple MI operands, and only the
130 // first MI operand is given a name.
131 int RegOpIdx = OffsetOpIdx + 1;
132 int ChanOpIdx =
133 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::chan);
Tom Stellard26a3b672013-10-22 18:19:10 +0000134
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000135 if (isRegisterLoad(*MI)) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000136 int DstOpIdx =
137 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
138 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
139 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000140 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Tom Stellard81d871d2013-11-13 23:36:50 +0000141 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000142 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000143 buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000144 getIndirectAddrRegClass()->getRegister(Address));
Tom Stellard26a3b672013-10-22 18:19:10 +0000145 } else {
Tom Stellard81d871d2013-11-13 23:36:50 +0000146 buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000147 Address, OffsetReg);
Tom Stellard26a3b672013-10-22 18:19:10 +0000148 }
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000149 } else if (isRegisterStore(*MI)) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000150 int ValOpIdx =
151 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::val);
152 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
153 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
154 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000155 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Tom Stellard81d871d2013-11-13 23:36:50 +0000156 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000157 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
158 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
Tom Stellard81d871d2013-11-13 23:36:50 +0000159 MI->getOperand(ValOpIdx).getReg());
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000160 } else {
Tom Stellard81d871d2013-11-13 23:36:50 +0000161 buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
162 calculateIndirectAddress(RegIndex, Channel),
163 OffsetReg);
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000164 }
165 } else {
166 return false;
Tom Stellard26a3b672013-10-22 18:19:10 +0000167 }
168
169 MBB->erase(MI);
170 return true;
171}
172
173
Tom Stellard75aadc22012-12-11 21:25:42 +0000174MachineInstr *
175AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
176 MachineInstr *MI,
177 const SmallVectorImpl<unsigned> &Ops,
178 int FrameIndex) const {
179// TODO: Implement this function
180 return 0;
181}
182MachineInstr*
183AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
184 MachineInstr *MI,
185 const SmallVectorImpl<unsigned> &Ops,
186 MachineInstr *LoadMI) const {
187 // TODO: Implement this function
188 return 0;
189}
190bool
191AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
192 const SmallVectorImpl<unsigned> &Ops) const {
193 // TODO: Implement this function
194 return false;
195}
196bool
197AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
198 unsigned Reg, bool UnfoldLoad,
199 bool UnfoldStore,
200 SmallVectorImpl<MachineInstr*> &NewMIs) const {
201 // TODO: Implement this function
202 return false;
203}
204
205bool
206AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
207 SmallVectorImpl<SDNode*> &NewNodes) const {
208 // TODO: Implement this function
209 return false;
210}
211
212unsigned
213AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
214 bool UnfoldLoad, bool UnfoldStore,
215 unsigned *LoadRegIndex) const {
216 // TODO: Implement this function
217 return 0;
218}
219
220bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
221 int64_t Offset1, int64_t Offset2,
222 unsigned NumLoads) const {
223 assert(Offset2 > Offset1
224 && "Second offset should be larger than first offset!");
225 // If we have less than 16 loads in a row, and the offsets are within 16,
226 // then schedule together.
227 // TODO: Make the loads schedule near if it fits in a cacheline
228 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
229}
230
231bool
232AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
233 const {
234 // TODO: Implement this function
235 return true;
236}
237void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
238 MachineBasicBlock::iterator MI) const {
239 // TODO: Implement this function
240}
241
242bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
243 // TODO: Implement this function
244 return false;
245}
246bool
247AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
248 const SmallVectorImpl<MachineOperand> &Pred2)
249 const {
250 // TODO: Implement this function
251 return false;
252}
253
254bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
255 std::vector<MachineOperand> &Pred) const {
256 // TODO: Implement this function
257 return false;
258}
259
260bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
261 // TODO: Implement this function
262 return MI->getDesc().isPredicable();
263}
264
265bool
266AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
267 // TODO: Implement this function
268 return true;
269}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000270
271bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
272 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
273}
274
275bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
276 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
277}
278
Tom Stellard81d871d2013-11-13 23:36:50 +0000279int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
280 const MachineRegisterInfo &MRI = MF.getRegInfo();
281 const MachineFrameInfo *MFI = MF.getFrameInfo();
282 int Offset = -1;
283
284 if (MFI->getNumObjects() == 0) {
285 return -1;
286 }
287
288 if (MRI.livein_empty()) {
289 return 0;
290 }
291
292 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
293 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
294 LE = MRI.livein_end();
295 LI != LE; ++LI) {
296 unsigned Reg = LI->first;
297 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
298 !IndirectRC->contains(Reg))
299 continue;
300
301 unsigned RegIndex;
302 unsigned RegEnd;
303 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
304 ++RegIndex) {
305 if (IndirectRC->getRegister(RegIndex) == Reg)
306 break;
307 }
308 Offset = std::max(Offset, (int)RegIndex);
309 }
310
311 return Offset + 1;
312}
313
314int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
315 int Offset = 0;
316 const MachineFrameInfo *MFI = MF.getFrameInfo();
317
318 // Variable sized objects are not supported
319 assert(!MFI->hasVarSizedObjects());
320
321 if (MFI->getNumObjects() == 0) {
322 return -1;
323 }
324
325 Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
326
327 return getIndirectIndexBegin(MF) + Offset;
328}
329
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000330
Tom Stellard75aadc22012-12-11 21:25:42 +0000331void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
332 DebugLoc DL) const {
333 MachineRegisterInfo &MRI = MF.getRegInfo();
334 const AMDGPURegisterInfo & RI = getRegisterInfo();
335
336 for (unsigned i = 0; i < MI.getNumOperands(); i++) {
337 MachineOperand &MO = MI.getOperand(i);
338 // Convert dst regclass to one that is supported by the ISA
339 if (MO.isReg() && MO.isDef()) {
340 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
341 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
342 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
343
344 assert(newRegClass);
345
346 MRI.setRegClass(MO.getReg(), newRegClass);
347 }
348 }
349 }
350}
Tom Stellard682bfbc2013-10-10 17:11:24 +0000351
352int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
353 switch (Channels) {
354 default: return Opcode;
355 case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
356 case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
357 case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
358 }
359}