blob: 63d9d3d5a786a6b0717e5e375ea6e8a69b9c5772 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implementation of the TargetInstrInfo class that is common to all
12/// AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUTargetMachine.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22
Chandler Carruthd174b722014-04-22 02:03:14 +000023using namespace llvm;
24
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000025#define GET_INSTRINFO_CTOR_DTOR
Tom Stellard02661d92013-06-25 21:22:18 +000026#define GET_INSTRINFO_NAMED_OPS
Christian Konigf741fbf2013-02-26 17:52:42 +000027#define GET_INSTRMAP_INFO
Tom Stellard75aadc22012-12-11 21:25:42 +000028#include "AMDGPUGenInstrInfo.inc"
29
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000030// Pin the vtable to this file.
31void AMDGPUInstrInfo::anchor() {}
32
Tom Stellard2e59a452014-06-13 01:32:00 +000033AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &st)
34 : AMDGPUGenInstrInfo(-1,-1), RI(st), ST(st) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000035
36const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
37 return RI;
38}
39
40bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
41 unsigned &SrcReg, unsigned &DstReg,
42 unsigned &SubIdx) const {
43// TODO: Implement this function
44 return false;
45}
46
47unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
48 int &FrameIndex) const {
49// TODO: Implement this function
50 return 0;
51}
52
53unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
54 int &FrameIndex) const {
55// TODO: Implement this function
56 return 0;
57}
58
59bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
60 const MachineMemOperand *&MMO,
61 int &FrameIndex) const {
62// TODO: Implement this function
63 return false;
64}
65unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
66 int &FrameIndex) const {
67// TODO: Implement this function
68 return 0;
69}
70unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
71 int &FrameIndex) const {
72// TODO: Implement this function
73 return 0;
74}
75bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
76 const MachineMemOperand *&MMO,
77 int &FrameIndex) const {
78// TODO: Implement this function
79 return false;
80}
81
82MachineInstr *
83AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
84 MachineBasicBlock::iterator &MBBI,
85 LiveVariables *LV) const {
86// TODO: Implement this function
Craig Topper062a2ba2014-04-25 05:30:21 +000087 return nullptr;
Tom Stellard75aadc22012-12-11 21:25:42 +000088}
89bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
90 MachineBasicBlock &MBB) const {
91 while (iter != MBB.end()) {
92 switch (iter->getOpcode()) {
93 default:
94 break;
95 case AMDGPU::BRANCH_COND_i32:
96 case AMDGPU::BRANCH_COND_f32:
97 case AMDGPU::BRANCH:
98 return true;
99 };
100 ++iter;
101 }
102 return false;
103}
104
Tom Stellard75aadc22012-12-11 21:25:42 +0000105void
106AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
107 MachineBasicBlock::iterator MI,
108 unsigned SrcReg, bool isKill,
109 int FrameIndex,
110 const TargetRegisterClass *RC,
111 const TargetRegisterInfo *TRI) const {
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000112 llvm_unreachable("Not Implemented");
Tom Stellard75aadc22012-12-11 21:25:42 +0000113}
114
115void
116AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
117 MachineBasicBlock::iterator MI,
118 unsigned DestReg, int FrameIndex,
119 const TargetRegisterClass *RC,
120 const TargetRegisterInfo *TRI) const {
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000121 llvm_unreachable("Not Implemented");
Tom Stellard75aadc22012-12-11 21:25:42 +0000122}
123
Tom Stellard26a3b672013-10-22 18:19:10 +0000124bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
125 MachineBasicBlock *MBB = MI->getParent();
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000126 int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
127 AMDGPU::OpName::addr);
Tom Stellard81d871d2013-11-13 23:36:50 +0000128 // addr is a custom operand with multiple MI operands, and only the
129 // first MI operand is given a name.
130 int RegOpIdx = OffsetOpIdx + 1;
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000131 int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
132 AMDGPU::OpName::chan);
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000133 if (isRegisterLoad(*MI)) {
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000134 int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
135 AMDGPU::OpName::dst);
Tom Stellard81d871d2013-11-13 23:36:50 +0000136 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
137 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000138 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Tom Stellard81d871d2013-11-13 23:36:50 +0000139 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000140 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000141 buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000142 getIndirectAddrRegClass()->getRegister(Address));
Tom Stellard26a3b672013-10-22 18:19:10 +0000143 } else {
Tom Stellard81d871d2013-11-13 23:36:50 +0000144 buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000145 Address, OffsetReg);
Tom Stellard26a3b672013-10-22 18:19:10 +0000146 }
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000147 } else if (isRegisterStore(*MI)) {
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000148 int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
149 AMDGPU::OpName::val);
Tom Stellard81d871d2013-11-13 23:36:50 +0000150 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
151 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
152 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000153 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Tom Stellard81d871d2013-11-13 23:36:50 +0000154 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000155 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
156 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
Tom Stellard81d871d2013-11-13 23:36:50 +0000157 MI->getOperand(ValOpIdx).getReg());
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000158 } else {
Tom Stellard81d871d2013-11-13 23:36:50 +0000159 buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
160 calculateIndirectAddress(RegIndex, Channel),
161 OffsetReg);
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000162 }
163 } else {
164 return false;
Tom Stellard26a3b672013-10-22 18:19:10 +0000165 }
166
167 MBB->erase(MI);
168 return true;
169}
170
171
Tom Stellard75aadc22012-12-11 21:25:42 +0000172MachineInstr *
173AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
174 MachineInstr *MI,
175 const SmallVectorImpl<unsigned> &Ops,
176 int FrameIndex) const {
177// TODO: Implement this function
Craig Topper062a2ba2014-04-25 05:30:21 +0000178 return nullptr;
Tom Stellard75aadc22012-12-11 21:25:42 +0000179}
180MachineInstr*
181AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
182 MachineInstr *MI,
183 const SmallVectorImpl<unsigned> &Ops,
184 MachineInstr *LoadMI) const {
185 // TODO: Implement this function
Craig Topper062a2ba2014-04-25 05:30:21 +0000186 return nullptr;
Tom Stellard75aadc22012-12-11 21:25:42 +0000187}
188bool
189AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
190 const SmallVectorImpl<unsigned> &Ops) const {
191 // TODO: Implement this function
192 return false;
193}
194bool
195AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
196 unsigned Reg, bool UnfoldLoad,
197 bool UnfoldStore,
198 SmallVectorImpl<MachineInstr*> &NewMIs) const {
199 // TODO: Implement this function
200 return false;
201}
202
203bool
204AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
205 SmallVectorImpl<SDNode*> &NewNodes) const {
206 // TODO: Implement this function
207 return false;
208}
209
210unsigned
211AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
212 bool UnfoldLoad, bool UnfoldStore,
213 unsigned *LoadRegIndex) const {
214 // TODO: Implement this function
215 return 0;
216}
217
218bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
219 int64_t Offset1, int64_t Offset2,
220 unsigned NumLoads) const {
221 assert(Offset2 > Offset1
222 && "Second offset should be larger than first offset!");
223 // If we have less than 16 loads in a row, and the offsets are within 16,
224 // then schedule together.
225 // TODO: Make the loads schedule near if it fits in a cacheline
226 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
227}
228
229bool
230AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
231 const {
232 // TODO: Implement this function
233 return true;
234}
235void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
236 MachineBasicBlock::iterator MI) const {
237 // TODO: Implement this function
238}
239
240bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
241 // TODO: Implement this function
242 return false;
243}
244bool
245AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
246 const SmallVectorImpl<MachineOperand> &Pred2)
247 const {
248 // TODO: Implement this function
249 return false;
250}
251
252bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
253 std::vector<MachineOperand> &Pred) const {
254 // TODO: Implement this function
255 return false;
256}
257
258bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
259 // TODO: Implement this function
260 return MI->getDesc().isPredicable();
261}
262
263bool
264AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
265 // TODO: Implement this function
266 return true;
267}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000268
269bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
270 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
271}
272
273bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
274 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
275}
276
Tom Stellard81d871d2013-11-13 23:36:50 +0000277int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
278 const MachineRegisterInfo &MRI = MF.getRegInfo();
279 const MachineFrameInfo *MFI = MF.getFrameInfo();
280 int Offset = -1;
281
282 if (MFI->getNumObjects() == 0) {
283 return -1;
284 }
285
286 if (MRI.livein_empty()) {
287 return 0;
288 }
289
290 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
291 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
292 LE = MRI.livein_end();
293 LI != LE; ++LI) {
294 unsigned Reg = LI->first;
295 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
296 !IndirectRC->contains(Reg))
297 continue;
298
299 unsigned RegIndex;
300 unsigned RegEnd;
301 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
302 ++RegIndex) {
303 if (IndirectRC->getRegister(RegIndex) == Reg)
304 break;
305 }
306 Offset = std::max(Offset, (int)RegIndex);
307 }
308
309 return Offset + 1;
310}
311
312int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
313 int Offset = 0;
314 const MachineFrameInfo *MFI = MF.getFrameInfo();
315
316 // Variable sized objects are not supported
317 assert(!MFI->hasVarSizedObjects());
318
319 if (MFI->getNumObjects() == 0) {
320 return -1;
321 }
322
Tom Stellardbfd480d2014-06-13 01:02:57 +0000323 Offset = MF.getTarget().getFrameLowering()->getFrameIndexOffset(MF, -1);
Tom Stellard81d871d2013-11-13 23:36:50 +0000324
325 return getIndirectIndexBegin(MF) + Offset;
326}
327
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000328
Tom Stellard75aadc22012-12-11 21:25:42 +0000329void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
330 DebugLoc DL) const {
331 MachineRegisterInfo &MRI = MF.getRegInfo();
332 const AMDGPURegisterInfo & RI = getRegisterInfo();
333
334 for (unsigned i = 0; i < MI.getNumOperands(); i++) {
335 MachineOperand &MO = MI.getOperand(i);
336 // Convert dst regclass to one that is supported by the ISA
337 if (MO.isReg() && MO.isDef()) {
338 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
339 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
340 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
341
342 assert(newRegClass);
343
344 MRI.setRegClass(MO.getReg(), newRegClass);
345 }
346 }
347 }
348}
Tom Stellard682bfbc2013-10-10 17:11:24 +0000349
350int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
351 switch (Channels) {
352 default: return Opcode;
353 case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
354 case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
355 case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
356 }
357}
Tom Stellardc721a232014-05-16 20:56:47 +0000358
359// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
360// header files, so we need to wrap it in a function that takes unsigned
361// instead.
362namespace llvm {
363namespace AMDGPU {
364int getMCOpcode(uint16_t Opcode, unsigned Gen) {
365 return getMCOpcode(Opcode);
366}
367}
368}