blob: 85e0ac5a07c05d9a7ab78cf8126956d732da48d7 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implementation of the TargetInstrInfo class that is common to all
12/// AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUTargetMachine.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22
Chandler Carruthd174b722014-04-22 02:03:14 +000023using namespace llvm;
24
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000025#define GET_INSTRINFO_CTOR_DTOR
Tom Stellard02661d92013-06-25 21:22:18 +000026#define GET_INSTRINFO_NAMED_OPS
Christian Konigf741fbf2013-02-26 17:52:42 +000027#define GET_INSTRMAP_INFO
Tom Stellard75aadc22012-12-11 21:25:42 +000028#include "AMDGPUGenInstrInfo.inc"
29
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000030// Pin the vtable to this file.
31void AMDGPUInstrInfo::anchor() {}
32
Tom Stellard2e59a452014-06-13 01:32:00 +000033AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &st)
Eric Christopher6c5b5112015-03-11 18:43:21 +000034 : AMDGPUGenInstrInfo(-1, -1), ST(st) {}
Tom Stellard75aadc22012-12-11 21:25:42 +000035
36const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
37 return RI;
38}
39
40bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
41 unsigned &SrcReg, unsigned &DstReg,
42 unsigned &SubIdx) const {
43// TODO: Implement this function
44 return false;
45}
46
47unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
48 int &FrameIndex) const {
49// TODO: Implement this function
50 return 0;
51}
52
53unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
54 int &FrameIndex) const {
55// TODO: Implement this function
56 return 0;
57}
58
59bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
60 const MachineMemOperand *&MMO,
61 int &FrameIndex) const {
62// TODO: Implement this function
63 return false;
64}
65unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
66 int &FrameIndex) const {
67// TODO: Implement this function
68 return 0;
69}
70unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
71 int &FrameIndex) const {
72// TODO: Implement this function
73 return 0;
74}
75bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
76 const MachineMemOperand *&MMO,
77 int &FrameIndex) const {
78// TODO: Implement this function
79 return false;
80}
81
82MachineInstr *
83AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
84 MachineBasicBlock::iterator &MBBI,
85 LiveVariables *LV) const {
86// TODO: Implement this function
Craig Topper062a2ba2014-04-25 05:30:21 +000087 return nullptr;
Tom Stellard75aadc22012-12-11 21:25:42 +000088}
Tom Stellard75aadc22012-12-11 21:25:42 +000089
Tom Stellard75aadc22012-12-11 21:25:42 +000090void
91AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
92 MachineBasicBlock::iterator MI,
93 unsigned SrcReg, bool isKill,
94 int FrameIndex,
95 const TargetRegisterClass *RC,
96 const TargetRegisterInfo *TRI) const {
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +000097 llvm_unreachable("Not Implemented");
Tom Stellard75aadc22012-12-11 21:25:42 +000098}
99
100void
101AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
102 MachineBasicBlock::iterator MI,
103 unsigned DestReg, int FrameIndex,
104 const TargetRegisterClass *RC,
105 const TargetRegisterInfo *TRI) const {
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000106 llvm_unreachable("Not Implemented");
Tom Stellard75aadc22012-12-11 21:25:42 +0000107}
108
Tom Stellard26a3b672013-10-22 18:19:10 +0000109bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
110 MachineBasicBlock *MBB = MI->getParent();
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000111 int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
112 AMDGPU::OpName::addr);
Tom Stellard81d871d2013-11-13 23:36:50 +0000113 // addr is a custom operand with multiple MI operands, and only the
114 // first MI operand is given a name.
115 int RegOpIdx = OffsetOpIdx + 1;
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000116 int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
117 AMDGPU::OpName::chan);
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000118 if (isRegisterLoad(*MI)) {
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000119 int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
120 AMDGPU::OpName::dst);
Tom Stellard81d871d2013-11-13 23:36:50 +0000121 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
122 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000123 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Tom Stellard81d871d2013-11-13 23:36:50 +0000124 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000125 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000126 buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000127 getIndirectAddrRegClass()->getRegister(Address));
Tom Stellard26a3b672013-10-22 18:19:10 +0000128 } else {
Tom Stellard81d871d2013-11-13 23:36:50 +0000129 buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000130 Address, OffsetReg);
Tom Stellard26a3b672013-10-22 18:19:10 +0000131 }
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000132 } else if (isRegisterStore(*MI)) {
Matt Arsenaulte1f1da32014-03-11 00:01:27 +0000133 int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
134 AMDGPU::OpName::val);
Tom Stellard81d871d2013-11-13 23:36:50 +0000135 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
136 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000137 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Tom Stellard81d871d2013-11-13 23:36:50 +0000138 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000139 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
140 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
Tom Stellard81d871d2013-11-13 23:36:50 +0000141 MI->getOperand(ValOpIdx).getReg());
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000142 } else {
Tom Stellard81d871d2013-11-13 23:36:50 +0000143 buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
144 calculateIndirectAddress(RegIndex, Channel),
145 OffsetReg);
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000146 }
147 } else {
148 return false;
Tom Stellard26a3b672013-10-22 18:19:10 +0000149 }
150
151 MBB->erase(MI);
152 return true;
153}
154
Keno Fischere70b31f2015-06-08 20:09:58 +0000155MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
156 MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
157 MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000158// TODO: Implement this function
Craig Topper062a2ba2014-04-25 05:30:21 +0000159 return nullptr;
Tom Stellard75aadc22012-12-11 21:25:42 +0000160}
Keno Fischere70b31f2015-06-08 20:09:58 +0000161MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
162 MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
163 MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000164 // TODO: Implement this function
Craig Topper062a2ba2014-04-25 05:30:21 +0000165 return nullptr;
Tom Stellard75aadc22012-12-11 21:25:42 +0000166}
Tom Stellard75aadc22012-12-11 21:25:42 +0000167bool
168AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
169 unsigned Reg, bool UnfoldLoad,
170 bool UnfoldStore,
171 SmallVectorImpl<MachineInstr*> &NewMIs) const {
172 // TODO: Implement this function
173 return false;
174}
175
176bool
177AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
178 SmallVectorImpl<SDNode*> &NewNodes) const {
179 // TODO: Implement this function
180 return false;
181}
182
183unsigned
184AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
185 bool UnfoldLoad, bool UnfoldStore,
186 unsigned *LoadRegIndex) const {
187 // TODO: Implement this function
188 return 0;
189}
190
Matt Arsenault034d6662014-07-24 02:10:17 +0000191bool AMDGPUInstrInfo::enableClusterLoads() const {
192 return true;
193}
194
Matt Arsenaultd5f4de22014-08-06 00:29:49 +0000195// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
196// the first 16 loads will be interleaved with the stores, and the next 16 will
197// be clustered as expected. It should really split into 2 16 store batches.
198//
199// Loads are clustered until this returns false, rather than trying to schedule
200// groups of stores. This also means we have to deal with saying different
201// address space loads should be clustered, and ones which might cause bank
202// conflicts.
203//
204// This might be deprecated so it might not be worth that much effort to fix.
205bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
206 int64_t Offset0, int64_t Offset1,
207 unsigned NumLoads) const {
208 assert(Offset1 > Offset0 &&
209 "Second offset should be larger than first offset!");
210 // If we have less than 16 loads in a row, and the offsets are within 64
211 // bytes, then schedule together.
212
213 // A cacheline is 64 bytes (for global memory).
214 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
Tom Stellard75aadc22012-12-11 21:25:42 +0000215}
216
217bool
218AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
219 const {
220 // TODO: Implement this function
221 return true;
222}
223void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
224 MachineBasicBlock::iterator MI) const {
225 // TODO: Implement this function
226}
227
228bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
229 // TODO: Implement this function
230 return false;
231}
Ahmed Bougachac88bf542015-06-11 19:30:37 +0000232
233bool AMDGPUInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
234 ArrayRef<MachineOperand> Pred2) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000235 // TODO: Implement this function
236 return false;
237}
238
239bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
240 std::vector<MachineOperand> &Pred) const {
241 // TODO: Implement this function
242 return false;
243}
244
245bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
246 // TODO: Implement this function
247 return MI->getDesc().isPredicable();
248}
249
250bool
251AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
252 // TODO: Implement this function
253 return true;
254}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000255
256bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
257 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
258}
259
260bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
261 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
262}
263
Tom Stellard81d871d2013-11-13 23:36:50 +0000264int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
265 const MachineRegisterInfo &MRI = MF.getRegInfo();
266 const MachineFrameInfo *MFI = MF.getFrameInfo();
267 int Offset = -1;
268
269 if (MFI->getNumObjects() == 0) {
270 return -1;
271 }
272
273 if (MRI.livein_empty()) {
274 return 0;
275 }
276
277 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
278 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
279 LE = MRI.livein_end();
280 LI != LE; ++LI) {
281 unsigned Reg = LI->first;
282 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
283 !IndirectRC->contains(Reg))
284 continue;
285
286 unsigned RegIndex;
287 unsigned RegEnd;
288 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
289 ++RegIndex) {
290 if (IndirectRC->getRegister(RegIndex) == Reg)
291 break;
292 }
293 Offset = std::max(Offset, (int)RegIndex);
294 }
295
296 return Offset + 1;
297}
298
299int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
300 int Offset = 0;
301 const MachineFrameInfo *MFI = MF.getFrameInfo();
302
303 // Variable sized objects are not supported
304 assert(!MFI->hasVarSizedObjects());
305
306 if (MFI->getNumObjects() == 0) {
307 return -1;
308 }
309
Eric Christopher7792e322015-01-30 23:24:40 +0000310 Offset = MF.getSubtarget().getFrameLowering()->getFrameIndexOffset(MF, -1);
Tom Stellard81d871d2013-11-13 23:36:50 +0000311
312 return getIndirectIndexBegin(MF) + Offset;
313}
314
Tom Stellard682bfbc2013-10-10 17:11:24 +0000315int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
316 switch (Channels) {
317 default: return Opcode;
318 case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
319 case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
320 case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
321 }
322}
Tom Stellardc721a232014-05-16 20:56:47 +0000323
324// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
Matt Arsenault1f0227a2014-10-07 21:29:56 +0000325// header files, so we need to wrap it in a function that takes unsigned
Tom Stellardc721a232014-05-16 20:56:47 +0000326// instead.
327namespace llvm {
328namespace AMDGPU {
Marek Olsaka93603d2015-01-15 18:42:51 +0000329static int getMCOpcode(uint16_t Opcode, unsigned Gen) {
Marek Olsak5df00d62014-12-07 12:18:57 +0000330 return getMCOpcodeGen(Opcode, (enum Subtarget)Gen);
Tom Stellardc721a232014-05-16 20:56:47 +0000331}
332}
333}
Marek Olsaka93603d2015-01-15 18:42:51 +0000334
335// This must be kept in sync with the SISubtarget class in SIInstrInfo.td
336enum SISubtarget {
337 SI = 0,
338 VI = 1
339};
340
Benjamin Kramer970eac42015-02-06 17:51:54 +0000341static enum SISubtarget AMDGPUSubtargetToSISubtarget(unsigned Gen) {
Marek Olsaka93603d2015-01-15 18:42:51 +0000342 switch (Gen) {
343 default:
344 return SI;
345 case AMDGPUSubtarget::VOLCANIC_ISLANDS:
346 return VI;
347 }
348}
349
350int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
Eric Christopher6c5b5112015-03-11 18:43:21 +0000351 int MCOp = AMDGPU::getMCOpcode(
352 Opcode, AMDGPUSubtargetToSISubtarget(ST.getGeneration()));
Marek Olsaka93603d2015-01-15 18:42:51 +0000353
354 // -1 means that Opcode is already a native instruction.
355 if (MCOp == -1)
356 return Opcode;
357
358 // (uint16_t)-1 means that Opcode is a pseudo instruction that has
359 // no encoding in the given subtarget generation.
360 if (MCOp == (uint16_t)-1)
361 return -1;
362
363 return MCOp;
364}
Alex Lorenzef5c1962015-07-28 23:02:45 +0000365
366ArrayRef<std::pair<int, const char *>>
367AMDGPUInstrInfo::getSerializableTargetIndices() const {
368 static std::pair<int, const char *> TargetIndices[] = {
369 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
370 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
371 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
372 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
373 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
Alex Lorenzd8a1e542015-07-28 23:34:27 +0000374 return makeArrayRef(TargetIndices);
Alex Lorenzef5c1962015-07-28 23:02:45 +0000375}