blob: f838701b35365660060e1a717fb85322b4358a1c [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief SI Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "SIInstrInfo.h"
17#include "AMDGPUTargetMachine.h"
Tom Stellard16a9a202013-08-14 23:24:17 +000018#include "SIDefines.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/MC/MCInstrDesc.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000022
23using namespace llvm;
24
25SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
26 : AMDGPUInstrInfo(tm),
Bill Wendling37e9adb2013-06-07 20:28:55 +000027 RI(tm)
Tom Stellard75aadc22012-12-11 21:25:42 +000028 { }
29
30const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
31 return RI;
32}
33
34void
35SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Christian Konigd0e3da12013-03-01 09:46:27 +000036 MachineBasicBlock::iterator MI, DebugLoc DL,
37 unsigned DestReg, unsigned SrcReg,
38 bool KillSrc) const {
39
Tom Stellard75aadc22012-12-11 21:25:42 +000040 // If we are trying to copy to or from SCC, there is a bug somewhere else in
41 // the backend. While it may be theoretically possible to do this, it should
42 // never be necessary.
43 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
44
Craig Topper0afd0ab2013-07-15 06:39:13 +000045 static const int16_t Sub0_15[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000046 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
47 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
48 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
49 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
50 };
51
Craig Topper0afd0ab2013-07-15 06:39:13 +000052 static const int16_t Sub0_7[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000053 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
54 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
55 };
56
Craig Topper0afd0ab2013-07-15 06:39:13 +000057 static const int16_t Sub0_3[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000058 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
59 };
60
Craig Topper0afd0ab2013-07-15 06:39:13 +000061 static const int16_t Sub0_2[] = {
Christian Konig8b1ed282013-04-10 08:39:16 +000062 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
63 };
64
Craig Topper0afd0ab2013-07-15 06:39:13 +000065 static const int16_t Sub0_1[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000066 AMDGPU::sub0, AMDGPU::sub1, 0
67 };
68
69 unsigned Opcode;
70 const int16_t *SubIndices;
71
Christian Konig082c6612013-03-26 14:04:12 +000072 if (AMDGPU::M0 == DestReg) {
73 // Check if M0 isn't already set to this value
74 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
75 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
76
77 if (!I->definesRegister(AMDGPU::M0))
78 continue;
79
80 unsigned Opc = I->getOpcode();
81 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
82 break;
83
84 if (!I->readsRegister(SrcReg))
85 break;
86
87 // The copy isn't necessary
88 return;
89 }
90 }
91
Christian Konigd0e3da12013-03-01 09:46:27 +000092 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
93 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
94 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
95 .addReg(SrcReg, getKillRegState(KillSrc));
96 return;
97
Tom Stellardaac18892013-02-07 19:39:43 +000098 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
Tom Stellard75aadc22012-12-11 21:25:42 +000099 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
100 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
101 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000102 return;
103
104 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
105 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
106 Opcode = AMDGPU::S_MOV_B32;
107 SubIndices = Sub0_3;
108
109 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
110 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
111 Opcode = AMDGPU::S_MOV_B32;
112 SubIndices = Sub0_7;
113
114 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
115 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
116 Opcode = AMDGPU::S_MOV_B32;
117 SubIndices = Sub0_15;
118
Tom Stellard75aadc22012-12-11 21:25:42 +0000119 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
120 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
Christian Konigd0e3da12013-03-01 09:46:27 +0000121 AMDGPU::SReg_32RegClass.contains(SrcReg));
Tom Stellard75aadc22012-12-11 21:25:42 +0000122 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
123 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000124 return;
125
126 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
127 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
128 AMDGPU::SReg_64RegClass.contains(SrcReg));
129 Opcode = AMDGPU::V_MOV_B32_e32;
130 SubIndices = Sub0_1;
131
Christian Konig8b1ed282013-04-10 08:39:16 +0000132 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
133 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
134 Opcode = AMDGPU::V_MOV_B32_e32;
135 SubIndices = Sub0_2;
136
Christian Konigd0e3da12013-03-01 09:46:27 +0000137 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
138 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
139 AMDGPU::SReg_128RegClass.contains(SrcReg));
140 Opcode = AMDGPU::V_MOV_B32_e32;
141 SubIndices = Sub0_3;
142
143 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
144 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
145 AMDGPU::SReg_256RegClass.contains(SrcReg));
146 Opcode = AMDGPU::V_MOV_B32_e32;
147 SubIndices = Sub0_7;
148
149 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
150 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
151 AMDGPU::SReg_512RegClass.contains(SrcReg));
152 Opcode = AMDGPU::V_MOV_B32_e32;
153 SubIndices = Sub0_15;
154
Tom Stellard75aadc22012-12-11 21:25:42 +0000155 } else {
Christian Konigd0e3da12013-03-01 09:46:27 +0000156 llvm_unreachable("Can't copy register!");
157 }
158
159 while (unsigned SubIdx = *SubIndices++) {
160 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
161 get(Opcode), RI.getSubReg(DestReg, SubIdx));
162
163 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
164
165 if (*SubIndices)
166 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000167 }
168}
169
Christian Konig3c145802013-03-27 09:12:59 +0000170unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
171
172 int NewOpc;
173
174 // Try to map original to commuted opcode
175 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
176 return NewOpc;
177
178 // Try to map commuted to original opcode
179 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
180 return NewOpc;
181
182 return Opcode;
183}
184
Christian Konig76edd4f2013-02-26 17:52:29 +0000185MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
186 bool NewMI) const {
187
188 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
189 !MI->getOperand(2).isReg())
190 return 0;
191
Christian Konig3c145802013-03-27 09:12:59 +0000192 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
193
194 if (MI)
195 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
196
197 return MI;
Christian Konig76edd4f2013-02-26 17:52:29 +0000198}
199
Tom Stellard75aadc22012-12-11 21:25:42 +0000200MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
201 int64_t Imm) const {
Christian Konigc756cb992013-02-16 11:28:22 +0000202 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
NAKAMURA Takumi2a0b40f2012-12-20 00:22:11 +0000203 MachineInstrBuilder MIB(*MF, MI);
204 MIB.addReg(DstReg, RegState::Define);
205 MIB.addImm(Imm);
Tom Stellard75aadc22012-12-11 21:25:42 +0000206
207 return MI;
208
209}
210
211bool SIInstrInfo::isMov(unsigned Opcode) const {
212 switch(Opcode) {
213 default: return false;
214 case AMDGPU::S_MOV_B32:
215 case AMDGPU::S_MOV_B64:
216 case AMDGPU::V_MOV_B32_e32:
217 case AMDGPU::V_MOV_B32_e64:
Tom Stellard75aadc22012-12-11 21:25:42 +0000218 return true;
219 }
220}
221
222bool
223SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
224 return RC != &AMDGPU::EXECRegRegClass;
225}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000226
Tom Stellard16a9a202013-08-14 23:24:17 +0000227int SIInstrInfo::isMIMG(uint16_t Opcode) const {
228 return get(Opcode).TSFlags & SIInstrFlags::MIMG;
229}
230
Michel Danzer20680b12013-08-16 16:19:24 +0000231int SIInstrInfo::isSMRD(uint16_t Opcode) const {
232 return get(Opcode).TSFlags & SIInstrFlags::SMRD;
233}
234
Tom Stellard93fabce2013-10-10 17:11:55 +0000235bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
236 return get(Opcode).TSFlags & SIInstrFlags::VOP1;
237}
238
239bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
240 return get(Opcode).TSFlags & SIInstrFlags::VOP2;
241}
242
243bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
244 return get(Opcode).TSFlags & SIInstrFlags::VOP3;
245}
246
247bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
248 return get(Opcode).TSFlags & SIInstrFlags::VOPC;
249}
250
251bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
252 if(MO.isImm()) {
253 return MO.getImm() >= -16 && MO.getImm() <= 64;
254 }
255 if (MO.isFPImm()) {
256 return MO.getFPImm()->isExactlyValue(0.0) ||
257 MO.getFPImm()->isExactlyValue(0.5) ||
258 MO.getFPImm()->isExactlyValue(-0.5) ||
259 MO.getFPImm()->isExactlyValue(1.0) ||
260 MO.getFPImm()->isExactlyValue(-1.0) ||
261 MO.getFPImm()->isExactlyValue(2.0) ||
262 MO.getFPImm()->isExactlyValue(-2.0) ||
263 MO.getFPImm()->isExactlyValue(4.0) ||
264 MO.getFPImm()->isExactlyValue(-4.0);
265 }
266 return false;
267}
268
269bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
270 return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
271}
272
273bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
274 StringRef &ErrInfo) const {
275 uint16_t Opcode = MI->getOpcode();
276 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
277 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
278 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
279
280 // Verify VOP*
281 if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
282 unsigned ConstantBusCount = 0;
283 unsigned SGPRUsed = AMDGPU::NoRegister;
284 MI->dump();
285 for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
286 const MachineOperand &MO = MI->getOperand(i);
287 if (MO.isReg() && MO.isUse() &&
288 !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
289
290 // EXEC register uses the constant bus.
291 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
292 ++ConstantBusCount;
293
294 // SGPRs use the constant bus
295 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
296 (!MO.isImplicit() &&
297 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
298 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
299 if (SGPRUsed != MO.getReg()) {
300 ++ConstantBusCount;
301 SGPRUsed = MO.getReg();
302 }
303 }
304 }
305 // Literal constants use the constant bus.
306 if (isLiteralConstant(MO))
307 ++ConstantBusCount;
308 }
309 if (ConstantBusCount > 1) {
310 ErrInfo = "VOP* instruction uses the constant bus more than once";
311 return false;
312 }
313 }
314
315 // Verify SRC1 for VOP2 and VOPC
316 if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
317 const MachineOperand &Src1 = MI->getOperand(Src1Idx);
318 if (Src1.isImm()) {
319 ErrInfo = "VOP[2C] src1 cannot be an immediate.";
320 return false;
321 }
322 }
323
324 // Verify VOP3
325 if (isVOP3(Opcode)) {
326 if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
327 ErrInfo = "VOP3 src0 cannot be a literal constant.";
328 return false;
329 }
330 if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
331 ErrInfo = "VOP3 src1 cannot be a literal constant.";
332 return false;
333 }
334 if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
335 ErrInfo = "VOP3 src2 cannot be a literal constant.";
336 return false;
337 }
338 }
339 return true;
340}
341
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000342//===----------------------------------------------------------------------===//
343// Indirect addressing callbacks
344//===----------------------------------------------------------------------===//
345
346unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
347 unsigned Channel) const {
348 assert(Channel == 0);
349 return RegIndex;
350}
351
352
353int SIInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
354 llvm_unreachable("Unimplemented");
355}
356
357int SIInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
358 llvm_unreachable("Unimplemented");
359}
360
361const TargetRegisterClass *SIInstrInfo::getIndirectAddrStoreRegClass(
362 unsigned SourceReg) const {
363 llvm_unreachable("Unimplemented");
364}
365
366const TargetRegisterClass *SIInstrInfo::getIndirectAddrLoadRegClass() const {
367 llvm_unreachable("Unimplemented");
368}
369
370MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
371 MachineBasicBlock *MBB,
372 MachineBasicBlock::iterator I,
373 unsigned ValueReg,
374 unsigned Address, unsigned OffsetReg) const {
375 llvm_unreachable("Unimplemented");
376}
377
378MachineInstrBuilder SIInstrInfo::buildIndirectRead(
379 MachineBasicBlock *MBB,
380 MachineBasicBlock::iterator I,
381 unsigned ValueReg,
382 unsigned Address, unsigned OffsetReg) const {
383 llvm_unreachable("Unimplemented");
384}
385
386const TargetRegisterClass *SIInstrInfo::getSuperIndirectRegClass() const {
387 llvm_unreachable("Unimplemented");
388}