blob: ab2fe093172fca821d9b3ffe1cfcdaa2e24e1f17 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief SI Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "SIInstrInfo.h"
17#include "AMDGPUTargetMachine.h"
Tom Stellard16a9a202013-08-14 23:24:17 +000018#include "SIDefines.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000019#include "SIMachineFunctionInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000020#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/MC/MCInstrDesc.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000023
24using namespace llvm;
25
26SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
27 : AMDGPUInstrInfo(tm),
Matt Arsenault6dde3032014-03-11 00:01:34 +000028 RI(tm) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000029
Tom Stellard82166022013-11-13 23:36:37 +000030//===----------------------------------------------------------------------===//
31// TargetInstrInfo callbacks
32//===----------------------------------------------------------------------===//
33
Tom Stellard75aadc22012-12-11 21:25:42 +000034void
35SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Christian Konigd0e3da12013-03-01 09:46:27 +000036 MachineBasicBlock::iterator MI, DebugLoc DL,
37 unsigned DestReg, unsigned SrcReg,
38 bool KillSrc) const {
39
Tom Stellard75aadc22012-12-11 21:25:42 +000040 // If we are trying to copy to or from SCC, there is a bug somewhere else in
41 // the backend. While it may be theoretically possible to do this, it should
42 // never be necessary.
43 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
44
Craig Topper0afd0ab2013-07-15 06:39:13 +000045 static const int16_t Sub0_15[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000046 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
47 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
48 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
49 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
50 };
51
Craig Topper0afd0ab2013-07-15 06:39:13 +000052 static const int16_t Sub0_7[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000053 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
54 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
55 };
56
Craig Topper0afd0ab2013-07-15 06:39:13 +000057 static const int16_t Sub0_3[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000058 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
59 };
60
Craig Topper0afd0ab2013-07-15 06:39:13 +000061 static const int16_t Sub0_2[] = {
Christian Konig8b1ed282013-04-10 08:39:16 +000062 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
63 };
64
Craig Topper0afd0ab2013-07-15 06:39:13 +000065 static const int16_t Sub0_1[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000066 AMDGPU::sub0, AMDGPU::sub1, 0
67 };
68
69 unsigned Opcode;
70 const int16_t *SubIndices;
71
Christian Konig082c6612013-03-26 14:04:12 +000072 if (AMDGPU::M0 == DestReg) {
73 // Check if M0 isn't already set to this value
74 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
75 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
76
77 if (!I->definesRegister(AMDGPU::M0))
78 continue;
79
80 unsigned Opc = I->getOpcode();
81 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
82 break;
83
84 if (!I->readsRegister(SrcReg))
85 break;
86
87 // The copy isn't necessary
88 return;
89 }
90 }
91
Christian Konigd0e3da12013-03-01 09:46:27 +000092 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
93 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
94 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
95 .addReg(SrcReg, getKillRegState(KillSrc));
96 return;
97
Tom Stellardaac18892013-02-07 19:39:43 +000098 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
Tom Stellard75aadc22012-12-11 21:25:42 +000099 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
100 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
101 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000102 return;
103
104 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
105 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
106 Opcode = AMDGPU::S_MOV_B32;
107 SubIndices = Sub0_3;
108
109 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
110 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
111 Opcode = AMDGPU::S_MOV_B32;
112 SubIndices = Sub0_7;
113
114 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
115 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
116 Opcode = AMDGPU::S_MOV_B32;
117 SubIndices = Sub0_15;
118
Tom Stellard75aadc22012-12-11 21:25:42 +0000119 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
120 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000121 AMDGPU::SReg_32RegClass.contains(SrcReg));
Tom Stellard75aadc22012-12-11 21:25:42 +0000122 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
123 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000124 return;
125
126 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
127 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000128 AMDGPU::SReg_64RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000129 Opcode = AMDGPU::V_MOV_B32_e32;
130 SubIndices = Sub0_1;
131
Christian Konig8b1ed282013-04-10 08:39:16 +0000132 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
133 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
134 Opcode = AMDGPU::V_MOV_B32_e32;
135 SubIndices = Sub0_2;
136
Christian Konigd0e3da12013-03-01 09:46:27 +0000137 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
138 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000139 AMDGPU::SReg_128RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000140 Opcode = AMDGPU::V_MOV_B32_e32;
141 SubIndices = Sub0_3;
142
143 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
144 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000145 AMDGPU::SReg_256RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000146 Opcode = AMDGPU::V_MOV_B32_e32;
147 SubIndices = Sub0_7;
148
149 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
150 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000151 AMDGPU::SReg_512RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000152 Opcode = AMDGPU::V_MOV_B32_e32;
153 SubIndices = Sub0_15;
154
Tom Stellard75aadc22012-12-11 21:25:42 +0000155 } else {
Christian Konigd0e3da12013-03-01 09:46:27 +0000156 llvm_unreachable("Can't copy register!");
157 }
158
159 while (unsigned SubIdx = *SubIndices++) {
160 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
161 get(Opcode), RI.getSubReg(DestReg, SubIdx));
162
163 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
164
165 if (*SubIndices)
166 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000167 }
168}
169
Christian Konig3c145802013-03-27 09:12:59 +0000170unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
Christian Konig3c145802013-03-27 09:12:59 +0000171 int NewOpc;
172
173 // Try to map original to commuted opcode
174 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
175 return NewOpc;
176
177 // Try to map commuted to original opcode
178 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
179 return NewOpc;
180
181 return Opcode;
182}
183
Tom Stellardc149dc02013-11-27 21:23:35 +0000184void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
185 MachineBasicBlock::iterator MI,
186 unsigned SrcReg, bool isKill,
187 int FrameIndex,
188 const TargetRegisterClass *RC,
189 const TargetRegisterInfo *TRI) const {
190 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
191 SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
192 DebugLoc DL = MBB.findDebugLoc(MI);
193 unsigned KillFlag = isKill ? RegState::Kill : 0;
194
195 if (TRI->getCommonSubClass(RC, &AMDGPU::SGPR_32RegClass)) {
196 unsigned Lane = MFI->SpillTracker.getNextLane(MRI);
197 BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32),
198 MFI->SpillTracker.LaneVGPR)
199 .addReg(SrcReg, KillFlag)
200 .addImm(Lane);
201 MFI->SpillTracker.addSpilledReg(FrameIndex, MFI->SpillTracker.LaneVGPR,
202 Lane);
203 } else {
204 for (unsigned i = 0, e = RC->getSize() / 4; i != e; ++i) {
205 unsigned SubReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
206 BuildMI(MBB, MI, MBB.findDebugLoc(MI), get(AMDGPU::COPY), SubReg)
207 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
208 storeRegToStackSlot(MBB, MI, SubReg, isKill, FrameIndex + i,
209 &AMDGPU::SReg_32RegClass, TRI);
210 }
211 }
212}
213
214void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
215 MachineBasicBlock::iterator MI,
216 unsigned DestReg, int FrameIndex,
217 const TargetRegisterClass *RC,
218 const TargetRegisterInfo *TRI) const {
219 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
220 SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
221 DebugLoc DL = MBB.findDebugLoc(MI);
222 if (TRI->getCommonSubClass(RC, &AMDGPU::SReg_32RegClass)) {
223 SIMachineFunctionInfo::SpilledReg Spill =
224 MFI->SpillTracker.getSpilledReg(FrameIndex);
225 assert(Spill.VGPR);
226 BuildMI(MBB, MI, DL, get(AMDGPU::V_READLANE_B32), DestReg)
227 .addReg(Spill.VGPR)
228 .addImm(Spill.Lane);
229 } else {
230 for (unsigned i = 0, e = RC->getSize() / 4; i != e; ++i) {
231 unsigned Flags = RegState::Define;
232 if (i == 0) {
233 Flags |= RegState::Undef;
234 }
235 unsigned SubReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
236 loadRegFromStackSlot(MBB, MI, SubReg, FrameIndex + i,
237 &AMDGPU::SReg_32RegClass, TRI);
238 BuildMI(MBB, MI, DL, get(AMDGPU::COPY))
239 .addReg(DestReg, Flags, RI.getSubRegFromChannel(i))
240 .addReg(SubReg);
241 }
242 }
243}
244
Christian Konig76edd4f2013-02-26 17:52:29 +0000245MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
246 bool NewMI) const {
247
Tom Stellard82166022013-11-13 23:36:37 +0000248 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
249 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg())
Christian Konig76edd4f2013-02-26 17:52:29 +0000250 return 0;
251
Tom Stellard82166022013-11-13 23:36:37 +0000252 // Cannot commute VOP2 if src0 is SGPR.
253 if (isVOP2(MI->getOpcode()) && MI->getOperand(1).isReg() &&
254 RI.isSGPRClass(MRI.getRegClass(MI->getOperand(1).getReg())))
255 return 0;
256
257 if (!MI->getOperand(2).isReg()) {
258 // XXX: Commute instructions with FPImm operands
259 if (NewMI || MI->getOperand(2).isFPImm() ||
260 (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
261 return 0;
262 }
263
264 // XXX: Commute VOP3 instructions with abs and neg set.
265 if (isVOP3(MI->getOpcode()) &&
266 (MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
267 AMDGPU::OpName::abs)).getImm() ||
268 MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
269 AMDGPU::OpName::neg)).getImm()))
270 return 0;
271
272 unsigned Reg = MI->getOperand(1).getReg();
Andrew Tricke3398282013-12-17 04:50:45 +0000273 unsigned SubReg = MI->getOperand(1).getSubReg();
Tom Stellard82166022013-11-13 23:36:37 +0000274 MI->getOperand(1).ChangeToImmediate(MI->getOperand(2).getImm());
275 MI->getOperand(2).ChangeToRegister(Reg, false);
Andrew Tricke3398282013-12-17 04:50:45 +0000276 MI->getOperand(2).setSubReg(SubReg);
Tom Stellard82166022013-11-13 23:36:37 +0000277 } else {
278 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
279 }
Christian Konig3c145802013-03-27 09:12:59 +0000280
281 if (MI)
282 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
283
284 return MI;
Christian Konig76edd4f2013-02-26 17:52:29 +0000285}
286
Tom Stellard26a3b672013-10-22 18:19:10 +0000287MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
288 MachineBasicBlock::iterator I,
289 unsigned DstReg,
290 unsigned SrcReg) const {
Tom Stellard81d871d2013-11-13 23:36:50 +0000291 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
292 DstReg) .addReg(SrcReg);
Tom Stellard26a3b672013-10-22 18:19:10 +0000293}
294
Tom Stellard75aadc22012-12-11 21:25:42 +0000295bool SIInstrInfo::isMov(unsigned Opcode) const {
296 switch(Opcode) {
297 default: return false;
298 case AMDGPU::S_MOV_B32:
299 case AMDGPU::S_MOV_B64:
300 case AMDGPU::V_MOV_B32_e32:
301 case AMDGPU::V_MOV_B32_e64:
Tom Stellard75aadc22012-12-11 21:25:42 +0000302 return true;
303 }
304}
305
306bool
307SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
308 return RC != &AMDGPU::EXECRegRegClass;
309}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000310
Tom Stellard30f59412014-03-31 14:01:56 +0000311bool
312SIInstrInfo::isTriviallyReMaterializable(const MachineInstr *MI,
313 AliasAnalysis *AA) const {
314 switch(MI->getOpcode()) {
315 default: return AMDGPUInstrInfo::isTriviallyReMaterializable(MI, AA);
316 case AMDGPU::S_MOV_B32:
317 case AMDGPU::S_MOV_B64:
318 case AMDGPU::V_MOV_B32_e32:
319 return MI->getOperand(1).isImm();
320 }
321}
322
Tom Stellard5d7aaae2014-02-10 16:58:30 +0000323namespace llvm {
324namespace AMDGPU {
325// Helper function generated by tablegen. We are wrapping this with
326// an SIInstrInfo function that reutrns bool rather than int.
327int isDS(uint16_t Opcode);
328}
329}
330
331bool SIInstrInfo::isDS(uint16_t Opcode) const {
332 return ::AMDGPU::isDS(Opcode) != -1;
333}
334
Tom Stellard16a9a202013-08-14 23:24:17 +0000335int SIInstrInfo::isMIMG(uint16_t Opcode) const {
336 return get(Opcode).TSFlags & SIInstrFlags::MIMG;
337}
338
Michel Danzer20680b12013-08-16 16:19:24 +0000339int SIInstrInfo::isSMRD(uint16_t Opcode) const {
340 return get(Opcode).TSFlags & SIInstrFlags::SMRD;
341}
342
Tom Stellard93fabce2013-10-10 17:11:55 +0000343bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
344 return get(Opcode).TSFlags & SIInstrFlags::VOP1;
345}
346
347bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
348 return get(Opcode).TSFlags & SIInstrFlags::VOP2;
349}
350
351bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
352 return get(Opcode).TSFlags & SIInstrFlags::VOP3;
353}
354
355bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
356 return get(Opcode).TSFlags & SIInstrFlags::VOPC;
357}
358
Tom Stellard82166022013-11-13 23:36:37 +0000359bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
360 return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
361}
362
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +0000363bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
364 int32_t Val = Imm.getSExtValue();
365 if (Val >= -16 && Val <= 64)
366 return true;
Tom Stellardd0084462014-03-17 17:03:52 +0000367
368 // The actual type of the operand does not seem to matter as long
369 // as the bits match one of the inline immediate values. For example:
370 //
371 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
372 // so it is a legal inline immediate.
373 //
374 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
375 // floating-point, so it is a legal inline immediate.
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +0000376
377 return (APInt::floatToBits(0.0f) == Imm) ||
378 (APInt::floatToBits(1.0f) == Imm) ||
379 (APInt::floatToBits(-1.0f) == Imm) ||
380 (APInt::floatToBits(0.5f) == Imm) ||
381 (APInt::floatToBits(-0.5f) == Imm) ||
382 (APInt::floatToBits(2.0f) == Imm) ||
383 (APInt::floatToBits(-2.0f) == Imm) ||
384 (APInt::floatToBits(4.0f) == Imm) ||
385 (APInt::floatToBits(-4.0f) == Imm);
386}
387
388bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
389 if (MO.isImm())
390 return isInlineConstant(APInt(32, MO.getImm(), true));
391
392 if (MO.isFPImm()) {
393 APFloat FpImm = MO.getFPImm()->getValueAPF();
394 return isInlineConstant(FpImm.bitcastToAPInt());
395 }
396
397 return false;
Tom Stellard93fabce2013-10-10 17:11:55 +0000398}
399
400bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
401 return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
402}
403
404bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
405 StringRef &ErrInfo) const {
406 uint16_t Opcode = MI->getOpcode();
407 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
408 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
409 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
410
Tom Stellardca700e42014-03-17 17:03:49 +0000411 // Make sure the number of operands is correct.
412 const MCInstrDesc &Desc = get(Opcode);
413 if (!Desc.isVariadic() &&
414 Desc.getNumOperands() != MI->getNumExplicitOperands()) {
415 ErrInfo = "Instruction has wrong number of operands.";
416 return false;
417 }
418
419 // Make sure the register classes are correct
420 for (unsigned i = 0, e = Desc.getNumOperands(); i != e; ++i) {
421 switch (Desc.OpInfo[i].OperandType) {
422 case MCOI::OPERAND_REGISTER:
423 break;
424 case MCOI::OPERAND_IMMEDIATE:
425 if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFPImm()) {
426 ErrInfo = "Expected immediate, but got non-immediate";
427 return false;
428 }
429 // Fall-through
430 default:
431 continue;
432 }
433
434 if (!MI->getOperand(i).isReg())
435 continue;
436
437 int RegClass = Desc.OpInfo[i].RegClass;
438 if (RegClass != -1) {
439 unsigned Reg = MI->getOperand(i).getReg();
440 if (TargetRegisterInfo::isVirtualRegister(Reg))
441 continue;
442
443 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
444 if (!RC->contains(Reg)) {
445 ErrInfo = "Operand has incorrect register class.";
446 return false;
447 }
448 }
449 }
450
451
Tom Stellard93fabce2013-10-10 17:11:55 +0000452 // Verify VOP*
453 if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
454 unsigned ConstantBusCount = 0;
455 unsigned SGPRUsed = AMDGPU::NoRegister;
Tom Stellard93fabce2013-10-10 17:11:55 +0000456 for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
457 const MachineOperand &MO = MI->getOperand(i);
458 if (MO.isReg() && MO.isUse() &&
459 !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
460
461 // EXEC register uses the constant bus.
462 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
463 ++ConstantBusCount;
464
465 // SGPRs use the constant bus
466 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
467 (!MO.isImplicit() &&
468 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
469 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
470 if (SGPRUsed != MO.getReg()) {
471 ++ConstantBusCount;
472 SGPRUsed = MO.getReg();
473 }
474 }
475 }
476 // Literal constants use the constant bus.
477 if (isLiteralConstant(MO))
478 ++ConstantBusCount;
479 }
480 if (ConstantBusCount > 1) {
481 ErrInfo = "VOP* instruction uses the constant bus more than once";
482 return false;
483 }
484 }
485
486 // Verify SRC1 for VOP2 and VOPC
487 if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
488 const MachineOperand &Src1 = MI->getOperand(Src1Idx);
Tom Stellard82166022013-11-13 23:36:37 +0000489 if (Src1.isImm() || Src1.isFPImm()) {
Tom Stellard93fabce2013-10-10 17:11:55 +0000490 ErrInfo = "VOP[2C] src1 cannot be an immediate.";
491 return false;
492 }
493 }
494
495 // Verify VOP3
496 if (isVOP3(Opcode)) {
497 if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
498 ErrInfo = "VOP3 src0 cannot be a literal constant.";
499 return false;
500 }
501 if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
502 ErrInfo = "VOP3 src1 cannot be a literal constant.";
503 return false;
504 }
505 if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
506 ErrInfo = "VOP3 src2 cannot be a literal constant.";
507 return false;
508 }
509 }
510 return true;
511}
512
Matt Arsenaultf14032a2013-11-15 22:02:28 +0000513unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
Tom Stellard82166022013-11-13 23:36:37 +0000514 switch (MI.getOpcode()) {
515 default: return AMDGPU::INSTRUCTION_LIST_END;
516 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
517 case AMDGPU::COPY: return AMDGPU::COPY;
518 case AMDGPU::PHI: return AMDGPU::PHI;
Tom Stellarde0387202014-03-21 15:51:54 +0000519 case AMDGPU::S_MOV_B32:
520 return MI.getOperand(1).isReg() ?
Tom Stellard8c12fd92014-03-24 16:12:34 +0000521 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
Matt Arsenault43b8e4e2013-11-18 20:09:29 +0000522 case AMDGPU::S_ADD_I32: return AMDGPU::V_ADD_I32_e32;
523 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
524 case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32;
525 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
Matt Arsenault8e2581b2014-03-21 18:01:18 +0000526 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32;
527 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32;
528 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32;
529 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32;
530 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32;
531 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32;
532 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32;
Tom Stellard82166022013-11-13 23:36:37 +0000533 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
534 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
535 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
536 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
537 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
538 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
539 }
540}
541
542bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
543 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
544}
545
546const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
547 unsigned OpNo) const {
548 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
549 const MCInstrDesc &Desc = get(MI.getOpcode());
550 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
551 Desc.OpInfo[OpNo].RegClass == -1)
552 return MRI.getRegClass(MI.getOperand(OpNo).getReg());
553
554 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
555 return RI.getRegClass(RCID);
556}
557
558bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
559 switch (MI.getOpcode()) {
560 case AMDGPU::COPY:
561 case AMDGPU::REG_SEQUENCE:
562 return RI.hasVGPRs(getOpRegClass(MI, 0));
563 default:
564 return RI.hasVGPRs(getOpRegClass(MI, OpNo));
565 }
566}
567
568void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
569 MachineBasicBlock::iterator I = MI;
570 MachineOperand &MO = MI->getOperand(OpIdx);
571 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
572 unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
573 const TargetRegisterClass *RC = RI.getRegClass(RCID);
574 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
575 if (MO.isReg()) {
576 Opcode = AMDGPU::COPY;
577 } else if (RI.isSGPRClass(RC)) {
Matt Arsenault671a0052013-11-14 10:08:50 +0000578 Opcode = AMDGPU::S_MOV_B32;
Tom Stellard82166022013-11-13 23:36:37 +0000579 }
580
Matt Arsenault3a4d86a2013-11-18 20:09:55 +0000581 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
582 unsigned Reg = MRI.createVirtualRegister(VRC);
Tom Stellard82166022013-11-13 23:36:37 +0000583 BuildMI(*MI->getParent(), I, MI->getParent()->findDebugLoc(I), get(Opcode),
584 Reg).addOperand(MO);
585 MO.ChangeToRegister(Reg, false);
586}
587
Tom Stellard15834092014-03-21 15:51:57 +0000588unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
589 MachineRegisterInfo &MRI,
590 MachineOperand &SuperReg,
591 const TargetRegisterClass *SuperRC,
592 unsigned SubIdx,
593 const TargetRegisterClass *SubRC)
594 const {
595 assert(SuperReg.isReg());
596
597 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
598 unsigned SubReg = MRI.createVirtualRegister(SubRC);
599
600 // Just in case the super register is itself a sub-register, copy it to a new
601 // value so we don't need to wory about merging its subreg index with the
602 // SubIdx passed to this function. The register coalescer should be able to
603 // eliminate this extra copy.
604 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
605 NewSuperReg)
606 .addOperand(SuperReg);
607
608 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
609 SubReg)
610 .addReg(NewSuperReg, 0, SubIdx);
611 return SubReg;
612}
613
Matt Arsenault248b7b62014-03-24 20:08:09 +0000614MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
615 MachineBasicBlock::iterator MII,
616 MachineRegisterInfo &MRI,
617 MachineOperand &Op,
618 const TargetRegisterClass *SuperRC,
619 unsigned SubIdx,
620 const TargetRegisterClass *SubRC) const {
621 if (Op.isImm()) {
622 // XXX - Is there a better way to do this?
623 if (SubIdx == AMDGPU::sub0)
624 return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF);
625 if (SubIdx == AMDGPU::sub1)
626 return MachineOperand::CreateImm(Op.getImm() >> 32);
627
628 llvm_unreachable("Unhandled register index for immediate");
629 }
630
631 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
632 SubIdx, SubRC);
633 return MachineOperand::CreateReg(SubReg, false);
634}
635
Matt Arsenaultbd995802014-03-24 18:26:52 +0000636unsigned SIInstrInfo::split64BitImm(SmallVectorImpl<MachineInstr *> &Worklist,
637 MachineBasicBlock::iterator MI,
638 MachineRegisterInfo &MRI,
639 const TargetRegisterClass *RC,
640 const MachineOperand &Op) const {
641 MachineBasicBlock *MBB = MI->getParent();
642 DebugLoc DL = MI->getDebugLoc();
643 unsigned LoDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
644 unsigned HiDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
645 unsigned Dst = MRI.createVirtualRegister(RC);
646
647 MachineInstr *Lo = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
648 LoDst)
649 .addImm(Op.getImm() & 0xFFFFFFFF);
650 MachineInstr *Hi = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
651 HiDst)
652 .addImm(Op.getImm() >> 32);
653
654 BuildMI(*MBB, MI, DL, get(TargetOpcode::REG_SEQUENCE), Dst)
655 .addReg(LoDst)
656 .addImm(AMDGPU::sub0)
657 .addReg(HiDst)
658 .addImm(AMDGPU::sub1);
659
660 Worklist.push_back(Lo);
661 Worklist.push_back(Hi);
662
663 return Dst;
664}
665
Tom Stellard82166022013-11-13 23:36:37 +0000666void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
667 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
668 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
669 AMDGPU::OpName::src0);
670 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
671 AMDGPU::OpName::src1);
672 int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
673 AMDGPU::OpName::src2);
674
675 // Legalize VOP2
676 if (isVOP2(MI->getOpcode()) && Src1Idx != -1) {
Matt Arsenault08f7e372013-11-18 20:09:50 +0000677 MachineOperand &Src0 = MI->getOperand(Src0Idx);
Tom Stellard82166022013-11-13 23:36:37 +0000678 MachineOperand &Src1 = MI->getOperand(Src1Idx);
Matt Arsenaultf4760452013-11-14 08:06:38 +0000679
Matt Arsenault08f7e372013-11-18 20:09:50 +0000680 // If the instruction implicitly reads VCC, we can't have any SGPR operands,
681 // so move any.
682 bool ReadsVCC = MI->readsRegister(AMDGPU::VCC, &RI);
683 if (ReadsVCC && Src0.isReg() &&
684 RI.isSGPRClass(MRI.getRegClass(Src0.getReg()))) {
685 legalizeOpWithMove(MI, Src0Idx);
686 return;
687 }
688
689 if (ReadsVCC && Src1.isReg() &&
690 RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
691 legalizeOpWithMove(MI, Src1Idx);
692 return;
693 }
694
Matt Arsenaultf4760452013-11-14 08:06:38 +0000695 // Legalize VOP2 instructions where src1 is not a VGPR. An SGPR input must
696 // be the first operand, and there can only be one.
Tom Stellard82166022013-11-13 23:36:37 +0000697 if (Src1.isImm() || Src1.isFPImm() ||
698 (Src1.isReg() && RI.isSGPRClass(MRI.getRegClass(Src1.getReg())))) {
699 if (MI->isCommutable()) {
700 if (commuteInstruction(MI))
701 return;
702 }
703 legalizeOpWithMove(MI, Src1Idx);
704 }
705 }
706
Matt Arsenault08f7e372013-11-18 20:09:50 +0000707 // XXX - Do any VOP3 instructions read VCC?
Tom Stellard82166022013-11-13 23:36:37 +0000708 // Legalize VOP3
709 if (isVOP3(MI->getOpcode())) {
710 int VOP3Idx[3] = {Src0Idx, Src1Idx, Src2Idx};
711 unsigned SGPRReg = AMDGPU::NoRegister;
712 for (unsigned i = 0; i < 3; ++i) {
713 int Idx = VOP3Idx[i];
714 if (Idx == -1)
715 continue;
716 MachineOperand &MO = MI->getOperand(Idx);
717
718 if (MO.isReg()) {
719 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
720 continue; // VGPRs are legal
721
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +0000722 assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction");
723
Tom Stellard82166022013-11-13 23:36:37 +0000724 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
725 SGPRReg = MO.getReg();
726 // We can use one SGPR in each VOP3 instruction.
727 continue;
728 }
729 } else if (!isLiteralConstant(MO)) {
730 // If it is not a register and not a literal constant, then it must be
731 // an inline constant which is always legal.
732 continue;
733 }
734 // If we make it this far, then the operand is not legal and we must
735 // legalize it.
736 legalizeOpWithMove(MI, Idx);
737 }
738 }
739
740 // Legalize REG_SEQUENCE
741 // The register class of the operands much be the same type as the register
742 // class of the output.
743 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
744 const TargetRegisterClass *RC = NULL, *SRC = NULL, *VRC = NULL;
745 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
746 if (!MI->getOperand(i).isReg() ||
747 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
748 continue;
749 const TargetRegisterClass *OpRC =
750 MRI.getRegClass(MI->getOperand(i).getReg());
751 if (RI.hasVGPRs(OpRC)) {
752 VRC = OpRC;
753 } else {
754 SRC = OpRC;
755 }
756 }
757
758 // If any of the operands are VGPR registers, then they all most be
759 // otherwise we will create illegal VGPR->SGPR copies when legalizing
760 // them.
761 if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
762 if (!VRC) {
763 assert(SRC);
764 VRC = RI.getEquivalentVGPRClass(SRC);
765 }
766 RC = VRC;
767 } else {
768 RC = SRC;
769 }
770
771 // Update all the operands so they have the same type.
772 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
773 if (!MI->getOperand(i).isReg() ||
774 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
775 continue;
776 unsigned DstReg = MRI.createVirtualRegister(RC);
777 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
778 get(AMDGPU::COPY), DstReg)
779 .addOperand(MI->getOperand(i));
780 MI->getOperand(i).setReg(DstReg);
781 }
782 }
Tom Stellard15834092014-03-21 15:51:57 +0000783
784 // Legalize MUBUF* instructions
785 // FIXME: If we start using the non-addr64 instructions for compute, we
786 // may need to legalize them here.
787
788 int SRsrcIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
789 AMDGPU::OpName::srsrc);
790 int VAddrIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
791 AMDGPU::OpName::vaddr);
792 if (SRsrcIdx != -1 && VAddrIdx != -1) {
793 const TargetRegisterClass *VAddrRC =
794 RI.getRegClass(get(MI->getOpcode()).OpInfo[VAddrIdx].RegClass);
795
796 if(VAddrRC->getSize() == 8 &&
797 MRI.getRegClass(MI->getOperand(SRsrcIdx).getReg()) != VAddrRC) {
798 // We have a MUBUF instruction that uses a 64-bit vaddr register and
799 // srsrc has the incorrect register class. In order to fix this, we
800 // need to extract the pointer from the resource descriptor (srsrc),
801 // add it to the value of vadd, then store the result in the vaddr
802 // operand. Then, we need to set the pointer field of the resource
803 // descriptor to zero.
804
805 MachineBasicBlock &MBB = *MI->getParent();
806 MachineOperand &SRsrcOp = MI->getOperand(SRsrcIdx);
807 MachineOperand &VAddrOp = MI->getOperand(VAddrIdx);
808 unsigned SRsrcPtrLo, SRsrcPtrHi, VAddrLo, VAddrHi;
809 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
810 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
811 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
812 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
813 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
814 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
815 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
816
817 // SRsrcPtrLo = srsrc:sub0
818 SRsrcPtrLo = buildExtractSubReg(MI, MRI, SRsrcOp,
819 &AMDGPU::VReg_128RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
820
821 // SRsrcPtrHi = srsrc:sub1
822 SRsrcPtrHi = buildExtractSubReg(MI, MRI, SRsrcOp,
823 &AMDGPU::VReg_128RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
824
825 // VAddrLo = vaddr:sub0
826 VAddrLo = buildExtractSubReg(MI, MRI, VAddrOp,
827 &AMDGPU::VReg_64RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
828
829 // VAddrHi = vaddr:sub1
830 VAddrHi = buildExtractSubReg(MI, MRI, VAddrOp,
831 &AMDGPU::VReg_64RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
832
833 // NewVaddrLo = SRsrcPtrLo + VAddrLo
834 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADD_I32_e32),
835 NewVAddrLo)
836 .addReg(SRsrcPtrLo)
837 .addReg(VAddrLo)
838 .addReg(AMDGPU::VCC, RegState::Define | RegState::Implicit);
839
840 // NewVaddrHi = SRsrcPtrHi + VAddrHi
841 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADDC_U32_e32),
842 NewVAddrHi)
843 .addReg(SRsrcPtrHi)
844 .addReg(VAddrHi)
845 .addReg(AMDGPU::VCC, RegState::ImplicitDefine)
846 .addReg(AMDGPU::VCC, RegState::Implicit);
847
848 // NewVaddr = {NewVaddrHi, NewVaddrLo}
849 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
850 NewVAddr)
851 .addReg(NewVAddrLo)
852 .addImm(AMDGPU::sub0)
853 .addReg(NewVAddrHi)
854 .addImm(AMDGPU::sub1);
855
856 // Zero64 = 0
857 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64),
858 Zero64)
859 .addImm(0);
860
861 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
862 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
863 SRsrcFormatLo)
864 .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
865
866 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
867 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
868 SRsrcFormatHi)
869 .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
870
871 // NewSRsrc = {Zero64, SRsrcFormat}
872 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
873 NewSRsrc)
874 .addReg(Zero64)
875 .addImm(AMDGPU::sub0_sub1)
876 .addReg(SRsrcFormatLo)
877 .addImm(AMDGPU::sub2)
878 .addReg(SRsrcFormatHi)
879 .addImm(AMDGPU::sub3);
880
881 // Update the instruction to use NewVaddr
882 MI->getOperand(VAddrIdx).setReg(NewVAddr);
883 // Update the instruction to use NewSRsrc
884 MI->getOperand(SRsrcIdx).setReg(NewSRsrc);
885 }
886 }
Tom Stellard82166022013-11-13 23:36:37 +0000887}
888
889void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
890 SmallVector<MachineInstr *, 128> Worklist;
891 Worklist.push_back(&TopInst);
892
893 while (!Worklist.empty()) {
894 MachineInstr *Inst = Worklist.pop_back_val();
Tom Stellarde0387202014-03-21 15:51:54 +0000895 MachineBasicBlock *MBB = Inst->getParent();
896 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
897
898 // Handle some special cases
899 switch(Inst->getOpcode()) {
Matt Arsenaultbd995802014-03-24 18:26:52 +0000900 case AMDGPU::S_MOV_B64: {
901 DebugLoc DL = Inst->getDebugLoc();
Tom Stellarde0387202014-03-21 15:51:54 +0000902
Matt Arsenaultbd995802014-03-24 18:26:52 +0000903 // If the source operand is a register we can replace this with a
904 // copy.
905 if (Inst->getOperand(1).isReg()) {
906 MachineInstr *Copy = BuildMI(*MBB, Inst, DL, get(TargetOpcode::COPY))
907 .addOperand(Inst->getOperand(0))
908 .addOperand(Inst->getOperand(1));
909 Worklist.push_back(Copy);
910 } else {
911 // Otherwise, we need to split this into two movs, because there is
912 // no 64-bit VALU move instruction.
913 unsigned Reg = Inst->getOperand(0).getReg();
914 unsigned Dst = split64BitImm(Worklist,
915 Inst,
916 MRI,
917 MRI.getRegClass(Reg),
918 Inst->getOperand(1));
919 MRI.replaceRegWith(Reg, Dst);
Tom Stellarde0387202014-03-21 15:51:54 +0000920 }
Matt Arsenaultbd995802014-03-24 18:26:52 +0000921 Inst->eraseFromParent();
922 continue;
923 }
Matt Arsenaultf35182c2014-03-24 20:08:05 +0000924 case AMDGPU::S_AND_B64:
925 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_AND_B32);
926 Inst->eraseFromParent();
927 continue;
928
929 case AMDGPU::S_OR_B64:
930 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_OR_B32);
931 Inst->eraseFromParent();
932 continue;
933
934 case AMDGPU::S_XOR_B64:
935 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_XOR_B32);
936 Inst->eraseFromParent();
937 continue;
938
939 case AMDGPU::S_NOT_B64:
940 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_NOT_B32);
941 Inst->eraseFromParent();
942 continue;
943
944 case AMDGPU::S_BFE_U64:
945 case AMDGPU::S_BFE_I64:
946 case AMDGPU::S_BFM_B64:
947 llvm_unreachable("Moving this op to VALU not implemented");
Tom Stellarde0387202014-03-21 15:51:54 +0000948 }
949
Tom Stellard82166022013-11-13 23:36:37 +0000950 unsigned NewOpcode = getVALUOp(*Inst);
Tom Stellard15834092014-03-21 15:51:57 +0000951 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
952 // We cannot move this instruction to the VALU, so we should try to
953 // legalize its operands instead.
954 legalizeOperands(Inst);
Tom Stellard82166022013-11-13 23:36:37 +0000955 continue;
Tom Stellard15834092014-03-21 15:51:57 +0000956 }
Tom Stellard82166022013-11-13 23:36:37 +0000957
Tom Stellard82166022013-11-13 23:36:37 +0000958 // Use the new VALU Opcode.
959 const MCInstrDesc &NewDesc = get(NewOpcode);
960 Inst->setDesc(NewDesc);
961
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +0000962 // Remove any references to SCC. Vector instructions can't read from it, and
963 // We're just about to add the implicit use / defs of VCC, and we don't want
964 // both.
965 for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) {
966 MachineOperand &Op = Inst->getOperand(i);
967 if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
968 Inst->RemoveOperand(i);
969 }
970
Tom Stellard82166022013-11-13 23:36:37 +0000971 // Add the implict and explicit register definitions.
972 if (NewDesc.ImplicitUses) {
973 for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +0000974 unsigned Reg = NewDesc.ImplicitUses[i];
975 Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
Tom Stellard82166022013-11-13 23:36:37 +0000976 }
977 }
978
979 if (NewDesc.ImplicitDefs) {
980 for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +0000981 unsigned Reg = NewDesc.ImplicitDefs[i];
982 Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
Tom Stellard82166022013-11-13 23:36:37 +0000983 }
984 }
985
986 legalizeOperands(Inst);
987
988 // Update the destination register class.
989 const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
990
991 switch (Inst->getOpcode()) {
992 // For target instructions, getOpRegClass just returns the virtual
993 // register class associated with the operand, so we need to find an
994 // equivalent VGPR register class in order to move the instruction to the
995 // VALU.
996 case AMDGPU::COPY:
997 case AMDGPU::PHI:
998 case AMDGPU::REG_SEQUENCE:
999 if (RI.hasVGPRs(NewDstRC))
1000 continue;
1001 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
1002 if (!NewDstRC)
1003 continue;
1004 break;
1005 default:
1006 break;
1007 }
1008
1009 unsigned DstReg = Inst->getOperand(0).getReg();
1010 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
1011 MRI.replaceRegWith(DstReg, NewDstReg);
1012
1013 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
1014 E = MRI.use_end(); I != E; ++I) {
Owen Anderson16c6bf42014-03-13 23:12:04 +00001015 MachineInstr &UseMI = *I->getParent();
Tom Stellard82166022013-11-13 23:36:37 +00001016 if (!canReadVGPR(UseMI, I.getOperandNo())) {
1017 Worklist.push_back(&UseMI);
1018 }
1019 }
1020 }
1021}
1022
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001023//===----------------------------------------------------------------------===//
1024// Indirect addressing callbacks
1025//===----------------------------------------------------------------------===//
1026
1027unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
1028 unsigned Channel) const {
1029 assert(Channel == 0);
1030 return RegIndex;
1031}
1032
Tom Stellard26a3b672013-10-22 18:19:10 +00001033const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
Tom Stellard81d871d2013-11-13 23:36:50 +00001034 return &AMDGPU::VReg_32RegClass;
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001035}
1036
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001037void SIInstrInfo::splitScalar64BitOp(SmallVectorImpl<MachineInstr *> &Worklist,
1038 MachineInstr *Inst,
1039 unsigned Opcode) const {
1040 MachineBasicBlock &MBB = *Inst->getParent();
1041 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1042
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001043 MachineOperand &Dest = Inst->getOperand(0);
1044 MachineOperand &Src0 = Inst->getOperand(1);
1045 MachineOperand &Src1 = Inst->getOperand(2);
1046 DebugLoc DL = Inst->getDebugLoc();
1047
1048 MachineBasicBlock::iterator MII = Inst;
1049
1050 const MCInstrDesc &InstDesc = get(Opcode);
Matt Arsenault684dc802014-03-24 20:08:13 +00001051 const TargetRegisterClass *Src0RC = Src0.isReg() ?
1052 MRI.getRegClass(Src0.getReg()) :
1053 &AMDGPU::SGPR_32RegClass;
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001054
Matt Arsenault684dc802014-03-24 20:08:13 +00001055 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
1056 const TargetRegisterClass *Src1RC = Src1.isReg() ?
1057 MRI.getRegClass(Src1.getReg()) :
1058 &AMDGPU::SGPR_32RegClass;
1059
1060 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
1061
1062 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1063 AMDGPU::sub0, Src0SubRC);
1064 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
1065 AMDGPU::sub0, Src1SubRC);
1066
1067 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
1068 const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
1069
1070 unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001071 MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
Matt Arsenault248b7b62014-03-24 20:08:09 +00001072 .addOperand(SrcReg0Sub0)
1073 .addOperand(SrcReg1Sub0);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001074
Matt Arsenault684dc802014-03-24 20:08:13 +00001075 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1076 AMDGPU::sub1, Src0SubRC);
1077 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
1078 AMDGPU::sub1, Src1SubRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001079
Matt Arsenault684dc802014-03-24 20:08:13 +00001080 unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001081 MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
Matt Arsenault248b7b62014-03-24 20:08:09 +00001082 .addOperand(SrcReg0Sub1)
1083 .addOperand(SrcReg1Sub1);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001084
Matt Arsenault684dc802014-03-24 20:08:13 +00001085 unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001086 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1087 .addReg(DestSub0)
1088 .addImm(AMDGPU::sub0)
1089 .addReg(DestSub1)
1090 .addImm(AMDGPU::sub1);
1091
1092 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
1093
1094 // Try to legalize the operands in case we need to swap the order to keep it
1095 // valid.
1096 Worklist.push_back(LoHalf);
1097 Worklist.push_back(HiHalf);
1098}
1099
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001100MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
1101 MachineBasicBlock *MBB,
1102 MachineBasicBlock::iterator I,
1103 unsigned ValueReg,
1104 unsigned Address, unsigned OffsetReg) const {
Tom Stellard81d871d2013-11-13 23:36:50 +00001105 const DebugLoc &DL = MBB->findDebugLoc(I);
1106 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1107 getIndirectIndexBegin(*MBB->getParent()));
1108
1109 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
1110 .addReg(IndirectBaseReg, RegState::Define)
1111 .addOperand(I->getOperand(0))
1112 .addReg(IndirectBaseReg)
1113 .addReg(OffsetReg)
1114 .addImm(0)
1115 .addReg(ValueReg);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001116}
1117
1118MachineInstrBuilder SIInstrInfo::buildIndirectRead(
1119 MachineBasicBlock *MBB,
1120 MachineBasicBlock::iterator I,
1121 unsigned ValueReg,
1122 unsigned Address, unsigned OffsetReg) const {
Tom Stellard81d871d2013-11-13 23:36:50 +00001123 const DebugLoc &DL = MBB->findDebugLoc(I);
1124 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1125 getIndirectIndexBegin(*MBB->getParent()));
1126
1127 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
1128 .addOperand(I->getOperand(0))
1129 .addOperand(I->getOperand(1))
1130 .addReg(IndirectBaseReg)
1131 .addReg(OffsetReg)
1132 .addImm(0);
1133
1134}
1135
1136void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1137 const MachineFunction &MF) const {
1138 int End = getIndirectIndexEnd(MF);
1139 int Begin = getIndirectIndexBegin(MF);
1140
1141 if (End == -1)
1142 return;
1143
1144
1145 for (int Index = Begin; Index <= End; ++Index)
1146 Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
1147
Tom Stellard415ef6d2013-11-13 23:58:51 +00001148 for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001149 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
1150
Tom Stellard415ef6d2013-11-13 23:58:51 +00001151 for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001152 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
1153
Tom Stellard415ef6d2013-11-13 23:58:51 +00001154 for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001155 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
1156
Tom Stellard415ef6d2013-11-13 23:58:51 +00001157 for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001158 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
1159
Tom Stellard415ef6d2013-11-13 23:58:51 +00001160 for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001161 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001162}