blob: 1b2e131fb9cbbf05dff38a89a02acdb6e07d25d6 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implementation of the TargetInstrInfo class that is common to all
12/// AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUTargetMachine.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22
23#define GET_INSTRINFO_CTOR
Tom Stellard02661d92013-06-25 21:22:18 +000024#define GET_INSTRINFO_NAMED_OPS
Christian Konigf741fbf2013-02-26 17:52:42 +000025#define GET_INSTRMAP_INFO
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "AMDGPUGenInstrInfo.inc"
27
28using namespace llvm;
29
30AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
Vincent Lejeune269708b2013-10-01 19:32:38 +000031 : AMDGPUGenInstrInfo(-1,-1), RI(tm), TM(tm) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000032
33const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
34 return RI;
35}
36
37bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
38 unsigned &SrcReg, unsigned &DstReg,
39 unsigned &SubIdx) const {
40// TODO: Implement this function
41 return false;
42}
43
44unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
45 int &FrameIndex) const {
46// TODO: Implement this function
47 return 0;
48}
49
50unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
51 int &FrameIndex) const {
52// TODO: Implement this function
53 return 0;
54}
55
56bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
57 const MachineMemOperand *&MMO,
58 int &FrameIndex) const {
59// TODO: Implement this function
60 return false;
61}
62unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
63 int &FrameIndex) const {
64// TODO: Implement this function
65 return 0;
66}
67unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
68 int &FrameIndex) const {
69// TODO: Implement this function
70 return 0;
71}
72bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
73 const MachineMemOperand *&MMO,
74 int &FrameIndex) const {
75// TODO: Implement this function
76 return false;
77}
78
79MachineInstr *
80AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
81 MachineBasicBlock::iterator &MBBI,
82 LiveVariables *LV) const {
83// TODO: Implement this function
84 return NULL;
85}
86bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
87 MachineBasicBlock &MBB) const {
88 while (iter != MBB.end()) {
89 switch (iter->getOpcode()) {
90 default:
91 break;
92 case AMDGPU::BRANCH_COND_i32:
93 case AMDGPU::BRANCH_COND_f32:
94 case AMDGPU::BRANCH:
95 return true;
96 };
97 ++iter;
98 }
99 return false;
100}
101
Tom Stellard75aadc22012-12-11 21:25:42 +0000102void
103AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
104 MachineBasicBlock::iterator MI,
105 unsigned SrcReg, bool isKill,
106 int FrameIndex,
107 const TargetRegisterClass *RC,
108 const TargetRegisterInfo *TRI) const {
109 assert(!"Not Implemented");
110}
111
112void
113AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
114 MachineBasicBlock::iterator MI,
115 unsigned DestReg, int FrameIndex,
116 const TargetRegisterClass *RC,
117 const TargetRegisterInfo *TRI) const {
118 assert(!"Not Implemented");
119}
120
Tom Stellard26a3b672013-10-22 18:19:10 +0000121bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
122 MachineBasicBlock *MBB = MI->getParent();
Tom Stellard81d871d2013-11-13 23:36:50 +0000123 int OffsetOpIdx =
124 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::addr);
125 // addr is a custom operand with multiple MI operands, and only the
126 // first MI operand is given a name.
127 int RegOpIdx = OffsetOpIdx + 1;
128 int ChanOpIdx =
129 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::chan);
Tom Stellard26a3b672013-10-22 18:19:10 +0000130
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000131 if (isRegisterLoad(*MI)) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000132 int DstOpIdx =
133 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
134 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
135 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000136 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Tom Stellard81d871d2013-11-13 23:36:50 +0000137 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000138 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000139 buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000140 getIndirectAddrRegClass()->getRegister(Address));
Tom Stellard26a3b672013-10-22 18:19:10 +0000141 } else {
Tom Stellard81d871d2013-11-13 23:36:50 +0000142 buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000143 Address, OffsetReg);
Tom Stellard26a3b672013-10-22 18:19:10 +0000144 }
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000145 } else if (isRegisterStore(*MI)) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000146 int ValOpIdx =
147 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::val);
148 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
149 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
150 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000151 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
Tom Stellard81d871d2013-11-13 23:36:50 +0000152 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000153 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
154 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
Tom Stellard81d871d2013-11-13 23:36:50 +0000155 MI->getOperand(ValOpIdx).getReg());
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000156 } else {
Tom Stellard81d871d2013-11-13 23:36:50 +0000157 buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
158 calculateIndirectAddress(RegIndex, Channel),
159 OffsetReg);
Aaron Ballman9ab670f2013-10-29 20:40:52 +0000160 }
161 } else {
162 return false;
Tom Stellard26a3b672013-10-22 18:19:10 +0000163 }
164
165 MBB->erase(MI);
166 return true;
167}
168
169
Tom Stellard75aadc22012-12-11 21:25:42 +0000170MachineInstr *
171AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
172 MachineInstr *MI,
173 const SmallVectorImpl<unsigned> &Ops,
174 int FrameIndex) const {
175// TODO: Implement this function
176 return 0;
177}
178MachineInstr*
179AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
180 MachineInstr *MI,
181 const SmallVectorImpl<unsigned> &Ops,
182 MachineInstr *LoadMI) const {
183 // TODO: Implement this function
184 return 0;
185}
186bool
187AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
188 const SmallVectorImpl<unsigned> &Ops) const {
189 // TODO: Implement this function
190 return false;
191}
192bool
193AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
194 unsigned Reg, bool UnfoldLoad,
195 bool UnfoldStore,
196 SmallVectorImpl<MachineInstr*> &NewMIs) const {
197 // TODO: Implement this function
198 return false;
199}
200
201bool
202AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
203 SmallVectorImpl<SDNode*> &NewNodes) const {
204 // TODO: Implement this function
205 return false;
206}
207
208unsigned
209AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
210 bool UnfoldLoad, bool UnfoldStore,
211 unsigned *LoadRegIndex) const {
212 // TODO: Implement this function
213 return 0;
214}
215
216bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
217 int64_t Offset1, int64_t Offset2,
218 unsigned NumLoads) const {
219 assert(Offset2 > Offset1
220 && "Second offset should be larger than first offset!");
221 // If we have less than 16 loads in a row, and the offsets are within 16,
222 // then schedule together.
223 // TODO: Make the loads schedule near if it fits in a cacheline
224 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
225}
226
227bool
228AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
229 const {
230 // TODO: Implement this function
231 return true;
232}
233void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
234 MachineBasicBlock::iterator MI) const {
235 // TODO: Implement this function
236}
237
238bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
239 // TODO: Implement this function
240 return false;
241}
242bool
243AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
244 const SmallVectorImpl<MachineOperand> &Pred2)
245 const {
246 // TODO: Implement this function
247 return false;
248}
249
250bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
251 std::vector<MachineOperand> &Pred) const {
252 // TODO: Implement this function
253 return false;
254}
255
256bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
257 // TODO: Implement this function
258 return MI->getDesc().isPredicable();
259}
260
261bool
262AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
263 // TODO: Implement this function
264 return true;
265}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000266
267bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
268 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
269}
270
271bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
272 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
273}
274
Tom Stellard81d871d2013-11-13 23:36:50 +0000275int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
276 const MachineRegisterInfo &MRI = MF.getRegInfo();
277 const MachineFrameInfo *MFI = MF.getFrameInfo();
278 int Offset = -1;
279
280 if (MFI->getNumObjects() == 0) {
281 return -1;
282 }
283
284 if (MRI.livein_empty()) {
285 return 0;
286 }
287
288 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
289 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
290 LE = MRI.livein_end();
291 LI != LE; ++LI) {
292 unsigned Reg = LI->first;
293 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
294 !IndirectRC->contains(Reg))
295 continue;
296
297 unsigned RegIndex;
298 unsigned RegEnd;
299 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
300 ++RegIndex) {
301 if (IndirectRC->getRegister(RegIndex) == Reg)
302 break;
303 }
304 Offset = std::max(Offset, (int)RegIndex);
305 }
306
307 return Offset + 1;
308}
309
310int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
311 int Offset = 0;
312 const MachineFrameInfo *MFI = MF.getFrameInfo();
313
314 // Variable sized objects are not supported
315 assert(!MFI->hasVarSizedObjects());
316
317 if (MFI->getNumObjects() == 0) {
318 return -1;
319 }
320
321 Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
322
323 return getIndirectIndexBegin(MF) + Offset;
324}
325
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000326
Tom Stellard75aadc22012-12-11 21:25:42 +0000327void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
328 DebugLoc DL) const {
329 MachineRegisterInfo &MRI = MF.getRegInfo();
330 const AMDGPURegisterInfo & RI = getRegisterInfo();
331
332 for (unsigned i = 0; i < MI.getNumOperands(); i++) {
333 MachineOperand &MO = MI.getOperand(i);
334 // Convert dst regclass to one that is supported by the ISA
335 if (MO.isReg() && MO.isDef()) {
336 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
337 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
338 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
339
340 assert(newRegClass);
341
342 MRI.setRegClass(MO.getReg(), newRegClass);
343 }
344 }
345 }
346}
Tom Stellard682bfbc2013-10-10 17:11:24 +0000347
348int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
349 switch (Channels) {
350 default: return Opcode;
351 case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
352 case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
353 case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
354 }
355}