blob: 33da26ed17c1b980c36eb9753d77fc3a120609b9 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief SI Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "SIInstrInfo.h"
17#include "AMDGPUTargetMachine.h"
Tom Stellard16a9a202013-08-14 23:24:17 +000018#include "SIDefines.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/MC/MCInstrDesc.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000022
23using namespace llvm;
24
25SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
26 : AMDGPUInstrInfo(tm),
Bill Wendling37e9adb2013-06-07 20:28:55 +000027 RI(tm)
Tom Stellard75aadc22012-12-11 21:25:42 +000028 { }
29
30const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
31 return RI;
32}
33
34void
35SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Christian Konigd0e3da12013-03-01 09:46:27 +000036 MachineBasicBlock::iterator MI, DebugLoc DL,
37 unsigned DestReg, unsigned SrcReg,
38 bool KillSrc) const {
39
Tom Stellard75aadc22012-12-11 21:25:42 +000040 // If we are trying to copy to or from SCC, there is a bug somewhere else in
41 // the backend. While it may be theoretically possible to do this, it should
42 // never be necessary.
43 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
44
Craig Topper0afd0ab2013-07-15 06:39:13 +000045 static const int16_t Sub0_15[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000046 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
47 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
48 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
49 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
50 };
51
Craig Topper0afd0ab2013-07-15 06:39:13 +000052 static const int16_t Sub0_7[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000053 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
54 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
55 };
56
Craig Topper0afd0ab2013-07-15 06:39:13 +000057 static const int16_t Sub0_3[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000058 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
59 };
60
Craig Topper0afd0ab2013-07-15 06:39:13 +000061 static const int16_t Sub0_2[] = {
Christian Konig8b1ed282013-04-10 08:39:16 +000062 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
63 };
64
Craig Topper0afd0ab2013-07-15 06:39:13 +000065 static const int16_t Sub0_1[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000066 AMDGPU::sub0, AMDGPU::sub1, 0
67 };
68
69 unsigned Opcode;
70 const int16_t *SubIndices;
71
Christian Konig082c6612013-03-26 14:04:12 +000072 if (AMDGPU::M0 == DestReg) {
73 // Check if M0 isn't already set to this value
74 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
75 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
76
77 if (!I->definesRegister(AMDGPU::M0))
78 continue;
79
80 unsigned Opc = I->getOpcode();
81 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
82 break;
83
84 if (!I->readsRegister(SrcReg))
85 break;
86
87 // The copy isn't necessary
88 return;
89 }
90 }
91
Christian Konigd0e3da12013-03-01 09:46:27 +000092 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
93 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
94 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
95 .addReg(SrcReg, getKillRegState(KillSrc));
96 return;
97
Tom Stellardaac18892013-02-07 19:39:43 +000098 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
Tom Stellard75aadc22012-12-11 21:25:42 +000099 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
100 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
101 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000102 return;
103
104 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
105 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
106 Opcode = AMDGPU::S_MOV_B32;
107 SubIndices = Sub0_3;
108
109 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
110 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
111 Opcode = AMDGPU::S_MOV_B32;
112 SubIndices = Sub0_7;
113
114 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
115 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
116 Opcode = AMDGPU::S_MOV_B32;
117 SubIndices = Sub0_15;
118
Tom Stellard75aadc22012-12-11 21:25:42 +0000119 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
120 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
Christian Konigd0e3da12013-03-01 09:46:27 +0000121 AMDGPU::SReg_32RegClass.contains(SrcReg));
Tom Stellard75aadc22012-12-11 21:25:42 +0000122 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
123 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000124 return;
125
126 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
127 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
128 AMDGPU::SReg_64RegClass.contains(SrcReg));
129 Opcode = AMDGPU::V_MOV_B32_e32;
130 SubIndices = Sub0_1;
131
Christian Konig8b1ed282013-04-10 08:39:16 +0000132 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
133 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
134 Opcode = AMDGPU::V_MOV_B32_e32;
135 SubIndices = Sub0_2;
136
Christian Konigd0e3da12013-03-01 09:46:27 +0000137 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
138 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
139 AMDGPU::SReg_128RegClass.contains(SrcReg));
140 Opcode = AMDGPU::V_MOV_B32_e32;
141 SubIndices = Sub0_3;
142
143 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
144 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
145 AMDGPU::SReg_256RegClass.contains(SrcReg));
146 Opcode = AMDGPU::V_MOV_B32_e32;
147 SubIndices = Sub0_7;
148
149 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
150 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
151 AMDGPU::SReg_512RegClass.contains(SrcReg));
152 Opcode = AMDGPU::V_MOV_B32_e32;
153 SubIndices = Sub0_15;
154
Tom Stellard75aadc22012-12-11 21:25:42 +0000155 } else {
Christian Konigd0e3da12013-03-01 09:46:27 +0000156 llvm_unreachable("Can't copy register!");
157 }
158
159 while (unsigned SubIdx = *SubIndices++) {
160 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
161 get(Opcode), RI.getSubReg(DestReg, SubIdx));
162
163 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
164
165 if (*SubIndices)
166 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000167 }
168}
169
Christian Konig3c145802013-03-27 09:12:59 +0000170unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
171
172 int NewOpc;
173
174 // Try to map original to commuted opcode
175 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
176 return NewOpc;
177
178 // Try to map commuted to original opcode
179 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
180 return NewOpc;
181
182 return Opcode;
183}
184
Christian Konig76edd4f2013-02-26 17:52:29 +0000185MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
186 bool NewMI) const {
187
188 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
189 !MI->getOperand(2).isReg())
190 return 0;
191
Christian Konig3c145802013-03-27 09:12:59 +0000192 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
193
194 if (MI)
195 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
196
197 return MI;
Christian Konig76edd4f2013-02-26 17:52:29 +0000198}
199
Tom Stellard26a3b672013-10-22 18:19:10 +0000200MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
201 MachineBasicBlock::iterator I,
202 unsigned DstReg,
203 unsigned SrcReg) const {
204 assert(!"Not Implemented");
Tom Stellard8be4dd22013-10-22 18:31:45 +0000205 return NULL;
Tom Stellard26a3b672013-10-22 18:19:10 +0000206}
207
Tom Stellard75aadc22012-12-11 21:25:42 +0000208bool SIInstrInfo::isMov(unsigned Opcode) const {
209 switch(Opcode) {
210 default: return false;
211 case AMDGPU::S_MOV_B32:
212 case AMDGPU::S_MOV_B64:
213 case AMDGPU::V_MOV_B32_e32:
214 case AMDGPU::V_MOV_B32_e64:
Tom Stellard75aadc22012-12-11 21:25:42 +0000215 return true;
216 }
217}
218
219bool
220SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
221 return RC != &AMDGPU::EXECRegRegClass;
222}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000223
Tom Stellard16a9a202013-08-14 23:24:17 +0000224int SIInstrInfo::isMIMG(uint16_t Opcode) const {
225 return get(Opcode).TSFlags & SIInstrFlags::MIMG;
226}
227
Michel Danzer20680b12013-08-16 16:19:24 +0000228int SIInstrInfo::isSMRD(uint16_t Opcode) const {
229 return get(Opcode).TSFlags & SIInstrFlags::SMRD;
230}
231
Tom Stellard93fabce2013-10-10 17:11:55 +0000232bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
233 return get(Opcode).TSFlags & SIInstrFlags::VOP1;
234}
235
236bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
237 return get(Opcode).TSFlags & SIInstrFlags::VOP2;
238}
239
240bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
241 return get(Opcode).TSFlags & SIInstrFlags::VOP3;
242}
243
244bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
245 return get(Opcode).TSFlags & SIInstrFlags::VOPC;
246}
247
248bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
249 if(MO.isImm()) {
250 return MO.getImm() >= -16 && MO.getImm() <= 64;
251 }
252 if (MO.isFPImm()) {
253 return MO.getFPImm()->isExactlyValue(0.0) ||
254 MO.getFPImm()->isExactlyValue(0.5) ||
255 MO.getFPImm()->isExactlyValue(-0.5) ||
256 MO.getFPImm()->isExactlyValue(1.0) ||
257 MO.getFPImm()->isExactlyValue(-1.0) ||
258 MO.getFPImm()->isExactlyValue(2.0) ||
259 MO.getFPImm()->isExactlyValue(-2.0) ||
260 MO.getFPImm()->isExactlyValue(4.0) ||
261 MO.getFPImm()->isExactlyValue(-4.0);
262 }
263 return false;
264}
265
266bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
267 return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
268}
269
270bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
271 StringRef &ErrInfo) const {
272 uint16_t Opcode = MI->getOpcode();
273 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
274 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
275 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
276
277 // Verify VOP*
278 if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
279 unsigned ConstantBusCount = 0;
280 unsigned SGPRUsed = AMDGPU::NoRegister;
Tom Stellard93fabce2013-10-10 17:11:55 +0000281 for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
282 const MachineOperand &MO = MI->getOperand(i);
283 if (MO.isReg() && MO.isUse() &&
284 !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
285
286 // EXEC register uses the constant bus.
287 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
288 ++ConstantBusCount;
289
290 // SGPRs use the constant bus
291 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
292 (!MO.isImplicit() &&
293 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
294 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
295 if (SGPRUsed != MO.getReg()) {
296 ++ConstantBusCount;
297 SGPRUsed = MO.getReg();
298 }
299 }
300 }
301 // Literal constants use the constant bus.
302 if (isLiteralConstant(MO))
303 ++ConstantBusCount;
304 }
305 if (ConstantBusCount > 1) {
306 ErrInfo = "VOP* instruction uses the constant bus more than once";
307 return false;
308 }
309 }
310
311 // Verify SRC1 for VOP2 and VOPC
312 if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
313 const MachineOperand &Src1 = MI->getOperand(Src1Idx);
314 if (Src1.isImm()) {
315 ErrInfo = "VOP[2C] src1 cannot be an immediate.";
316 return false;
317 }
318 }
319
320 // Verify VOP3
321 if (isVOP3(Opcode)) {
322 if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
323 ErrInfo = "VOP3 src0 cannot be a literal constant.";
324 return false;
325 }
326 if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
327 ErrInfo = "VOP3 src1 cannot be a literal constant.";
328 return false;
329 }
330 if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
331 ErrInfo = "VOP3 src2 cannot be a literal constant.";
332 return false;
333 }
334 }
335 return true;
336}
337
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000338//===----------------------------------------------------------------------===//
339// Indirect addressing callbacks
340//===----------------------------------------------------------------------===//
341
342unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
343 unsigned Channel) const {
344 assert(Channel == 0);
345 return RegIndex;
346}
347
348
349int SIInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
350 llvm_unreachable("Unimplemented");
351}
352
353int SIInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
354 llvm_unreachable("Unimplemented");
355}
356
Tom Stellard26a3b672013-10-22 18:19:10 +0000357const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000358 llvm_unreachable("Unimplemented");
359}
360
361MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
362 MachineBasicBlock *MBB,
363 MachineBasicBlock::iterator I,
364 unsigned ValueReg,
365 unsigned Address, unsigned OffsetReg) const {
366 llvm_unreachable("Unimplemented");
367}
368
369MachineInstrBuilder SIInstrInfo::buildIndirectRead(
370 MachineBasicBlock *MBB,
371 MachineBasicBlock::iterator I,
372 unsigned ValueReg,
373 unsigned Address, unsigned OffsetReg) const {
374 llvm_unreachable("Unimplemented");
375}