blob: 5d08b91ea7bbac07207b648899412819bd0be4b8 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief SI Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "SIInstrInfo.h"
17#include "AMDGPUTargetMachine.h"
Tom Stellard16a9a202013-08-14 23:24:17 +000018#include "SIDefines.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000019#include "SIMachineFunctionInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000020#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/MC/MCInstrDesc.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000023
24using namespace llvm;
25
26SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
27 : AMDGPUInstrInfo(tm),
Matt Arsenault6dde3032014-03-11 00:01:34 +000028 RI(tm) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000029
Tom Stellard82166022013-11-13 23:36:37 +000030//===----------------------------------------------------------------------===//
31// TargetInstrInfo callbacks
32//===----------------------------------------------------------------------===//
33
Tom Stellard75aadc22012-12-11 21:25:42 +000034void
35SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Christian Konigd0e3da12013-03-01 09:46:27 +000036 MachineBasicBlock::iterator MI, DebugLoc DL,
37 unsigned DestReg, unsigned SrcReg,
38 bool KillSrc) const {
39
Tom Stellard75aadc22012-12-11 21:25:42 +000040 // If we are trying to copy to or from SCC, there is a bug somewhere else in
41 // the backend. While it may be theoretically possible to do this, it should
42 // never be necessary.
43 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
44
Craig Topper0afd0ab2013-07-15 06:39:13 +000045 static const int16_t Sub0_15[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000046 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
47 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
48 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
49 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
50 };
51
Craig Topper0afd0ab2013-07-15 06:39:13 +000052 static const int16_t Sub0_7[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000053 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
54 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
55 };
56
Craig Topper0afd0ab2013-07-15 06:39:13 +000057 static const int16_t Sub0_3[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000058 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
59 };
60
Craig Topper0afd0ab2013-07-15 06:39:13 +000061 static const int16_t Sub0_2[] = {
Christian Konig8b1ed282013-04-10 08:39:16 +000062 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
63 };
64
Craig Topper0afd0ab2013-07-15 06:39:13 +000065 static const int16_t Sub0_1[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000066 AMDGPU::sub0, AMDGPU::sub1, 0
67 };
68
69 unsigned Opcode;
70 const int16_t *SubIndices;
71
Christian Konig082c6612013-03-26 14:04:12 +000072 if (AMDGPU::M0 == DestReg) {
73 // Check if M0 isn't already set to this value
74 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
75 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
76
77 if (!I->definesRegister(AMDGPU::M0))
78 continue;
79
80 unsigned Opc = I->getOpcode();
81 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
82 break;
83
84 if (!I->readsRegister(SrcReg))
85 break;
86
87 // The copy isn't necessary
88 return;
89 }
90 }
91
Christian Konigd0e3da12013-03-01 09:46:27 +000092 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
93 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
94 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
95 .addReg(SrcReg, getKillRegState(KillSrc));
96 return;
97
Tom Stellardaac18892013-02-07 19:39:43 +000098 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
Tom Stellard75aadc22012-12-11 21:25:42 +000099 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
100 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
101 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000102 return;
103
104 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
105 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
106 Opcode = AMDGPU::S_MOV_B32;
107 SubIndices = Sub0_3;
108
109 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
110 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
111 Opcode = AMDGPU::S_MOV_B32;
112 SubIndices = Sub0_7;
113
114 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
115 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
116 Opcode = AMDGPU::S_MOV_B32;
117 SubIndices = Sub0_15;
118
Tom Stellard75aadc22012-12-11 21:25:42 +0000119 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
120 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000121 AMDGPU::SReg_32RegClass.contains(SrcReg));
Tom Stellard75aadc22012-12-11 21:25:42 +0000122 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
123 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000124 return;
125
126 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
127 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000128 AMDGPU::SReg_64RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000129 Opcode = AMDGPU::V_MOV_B32_e32;
130 SubIndices = Sub0_1;
131
Christian Konig8b1ed282013-04-10 08:39:16 +0000132 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
133 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
134 Opcode = AMDGPU::V_MOV_B32_e32;
135 SubIndices = Sub0_2;
136
Christian Konigd0e3da12013-03-01 09:46:27 +0000137 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
138 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000139 AMDGPU::SReg_128RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000140 Opcode = AMDGPU::V_MOV_B32_e32;
141 SubIndices = Sub0_3;
142
143 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
144 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000145 AMDGPU::SReg_256RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000146 Opcode = AMDGPU::V_MOV_B32_e32;
147 SubIndices = Sub0_7;
148
149 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
150 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000151 AMDGPU::SReg_512RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000152 Opcode = AMDGPU::V_MOV_B32_e32;
153 SubIndices = Sub0_15;
154
Tom Stellard75aadc22012-12-11 21:25:42 +0000155 } else {
Christian Konigd0e3da12013-03-01 09:46:27 +0000156 llvm_unreachable("Can't copy register!");
157 }
158
159 while (unsigned SubIdx = *SubIndices++) {
160 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
161 get(Opcode), RI.getSubReg(DestReg, SubIdx));
162
163 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
164
165 if (*SubIndices)
166 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000167 }
168}
169
Christian Konig3c145802013-03-27 09:12:59 +0000170unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
Christian Konig3c145802013-03-27 09:12:59 +0000171 int NewOpc;
172
173 // Try to map original to commuted opcode
174 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
175 return NewOpc;
176
177 // Try to map commuted to original opcode
178 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
179 return NewOpc;
180
181 return Opcode;
182}
183
Tom Stellardc149dc02013-11-27 21:23:35 +0000184void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
185 MachineBasicBlock::iterator MI,
186 unsigned SrcReg, bool isKill,
187 int FrameIndex,
188 const TargetRegisterClass *RC,
189 const TargetRegisterInfo *TRI) const {
190 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
191 SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
192 DebugLoc DL = MBB.findDebugLoc(MI);
193 unsigned KillFlag = isKill ? RegState::Kill : 0;
194
195 if (TRI->getCommonSubClass(RC, &AMDGPU::SGPR_32RegClass)) {
196 unsigned Lane = MFI->SpillTracker.getNextLane(MRI);
197 BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32),
198 MFI->SpillTracker.LaneVGPR)
199 .addReg(SrcReg, KillFlag)
200 .addImm(Lane);
201 MFI->SpillTracker.addSpilledReg(FrameIndex, MFI->SpillTracker.LaneVGPR,
202 Lane);
203 } else {
204 for (unsigned i = 0, e = RC->getSize() / 4; i != e; ++i) {
205 unsigned SubReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
206 BuildMI(MBB, MI, MBB.findDebugLoc(MI), get(AMDGPU::COPY), SubReg)
207 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
208 storeRegToStackSlot(MBB, MI, SubReg, isKill, FrameIndex + i,
209 &AMDGPU::SReg_32RegClass, TRI);
210 }
211 }
212}
213
214void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
215 MachineBasicBlock::iterator MI,
216 unsigned DestReg, int FrameIndex,
217 const TargetRegisterClass *RC,
218 const TargetRegisterInfo *TRI) const {
219 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
220 SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
221 DebugLoc DL = MBB.findDebugLoc(MI);
222 if (TRI->getCommonSubClass(RC, &AMDGPU::SReg_32RegClass)) {
223 SIMachineFunctionInfo::SpilledReg Spill =
224 MFI->SpillTracker.getSpilledReg(FrameIndex);
225 assert(Spill.VGPR);
226 BuildMI(MBB, MI, DL, get(AMDGPU::V_READLANE_B32), DestReg)
227 .addReg(Spill.VGPR)
228 .addImm(Spill.Lane);
229 } else {
230 for (unsigned i = 0, e = RC->getSize() / 4; i != e; ++i) {
231 unsigned Flags = RegState::Define;
232 if (i == 0) {
233 Flags |= RegState::Undef;
234 }
235 unsigned SubReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
236 loadRegFromStackSlot(MBB, MI, SubReg, FrameIndex + i,
237 &AMDGPU::SReg_32RegClass, TRI);
238 BuildMI(MBB, MI, DL, get(AMDGPU::COPY))
239 .addReg(DestReg, Flags, RI.getSubRegFromChannel(i))
240 .addReg(SubReg);
241 }
242 }
243}
244
Christian Konig76edd4f2013-02-26 17:52:29 +0000245MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
246 bool NewMI) const {
247
Tom Stellard82166022013-11-13 23:36:37 +0000248 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
249 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg())
Craig Topper062a2ba2014-04-25 05:30:21 +0000250 return nullptr;
Christian Konig76edd4f2013-02-26 17:52:29 +0000251
Tom Stellard82166022013-11-13 23:36:37 +0000252 // Cannot commute VOP2 if src0 is SGPR.
253 if (isVOP2(MI->getOpcode()) && MI->getOperand(1).isReg() &&
254 RI.isSGPRClass(MRI.getRegClass(MI->getOperand(1).getReg())))
Craig Topper062a2ba2014-04-25 05:30:21 +0000255 return nullptr;
Tom Stellard82166022013-11-13 23:36:37 +0000256
257 if (!MI->getOperand(2).isReg()) {
258 // XXX: Commute instructions with FPImm operands
259 if (NewMI || MI->getOperand(2).isFPImm() ||
260 (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
Craig Topper062a2ba2014-04-25 05:30:21 +0000261 return nullptr;
Tom Stellard82166022013-11-13 23:36:37 +0000262 }
263
264 // XXX: Commute VOP3 instructions with abs and neg set.
265 if (isVOP3(MI->getOpcode()) &&
266 (MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
267 AMDGPU::OpName::abs)).getImm() ||
268 MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
269 AMDGPU::OpName::neg)).getImm()))
Craig Topper062a2ba2014-04-25 05:30:21 +0000270 return nullptr;
Tom Stellard82166022013-11-13 23:36:37 +0000271
272 unsigned Reg = MI->getOperand(1).getReg();
Andrew Tricke3398282013-12-17 04:50:45 +0000273 unsigned SubReg = MI->getOperand(1).getSubReg();
Tom Stellard82166022013-11-13 23:36:37 +0000274 MI->getOperand(1).ChangeToImmediate(MI->getOperand(2).getImm());
275 MI->getOperand(2).ChangeToRegister(Reg, false);
Andrew Tricke3398282013-12-17 04:50:45 +0000276 MI->getOperand(2).setSubReg(SubReg);
Tom Stellard82166022013-11-13 23:36:37 +0000277 } else {
278 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
279 }
Christian Konig3c145802013-03-27 09:12:59 +0000280
281 if (MI)
282 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
283
284 return MI;
Christian Konig76edd4f2013-02-26 17:52:29 +0000285}
286
Tom Stellard26a3b672013-10-22 18:19:10 +0000287MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
288 MachineBasicBlock::iterator I,
289 unsigned DstReg,
290 unsigned SrcReg) const {
Tom Stellard81d871d2013-11-13 23:36:50 +0000291 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
292 DstReg) .addReg(SrcReg);
Tom Stellard26a3b672013-10-22 18:19:10 +0000293}
294
Tom Stellard75aadc22012-12-11 21:25:42 +0000295bool SIInstrInfo::isMov(unsigned Opcode) const {
296 switch(Opcode) {
297 default: return false;
298 case AMDGPU::S_MOV_B32:
299 case AMDGPU::S_MOV_B64:
300 case AMDGPU::V_MOV_B32_e32:
301 case AMDGPU::V_MOV_B32_e64:
Tom Stellard75aadc22012-12-11 21:25:42 +0000302 return true;
303 }
304}
305
306bool
307SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
308 return RC != &AMDGPU::EXECRegRegClass;
309}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000310
Tom Stellard30f59412014-03-31 14:01:56 +0000311bool
312SIInstrInfo::isTriviallyReMaterializable(const MachineInstr *MI,
313 AliasAnalysis *AA) const {
314 switch(MI->getOpcode()) {
315 default: return AMDGPUInstrInfo::isTriviallyReMaterializable(MI, AA);
316 case AMDGPU::S_MOV_B32:
317 case AMDGPU::S_MOV_B64:
318 case AMDGPU::V_MOV_B32_e32:
319 return MI->getOperand(1).isImm();
320 }
321}
322
Tom Stellard5d7aaae2014-02-10 16:58:30 +0000323namespace llvm {
324namespace AMDGPU {
325// Helper function generated by tablegen. We are wrapping this with
326// an SIInstrInfo function that reutrns bool rather than int.
327int isDS(uint16_t Opcode);
328}
329}
330
331bool SIInstrInfo::isDS(uint16_t Opcode) const {
332 return ::AMDGPU::isDS(Opcode) != -1;
333}
334
Tom Stellard16a9a202013-08-14 23:24:17 +0000335int SIInstrInfo::isMIMG(uint16_t Opcode) const {
336 return get(Opcode).TSFlags & SIInstrFlags::MIMG;
337}
338
Michel Danzer20680b12013-08-16 16:19:24 +0000339int SIInstrInfo::isSMRD(uint16_t Opcode) const {
340 return get(Opcode).TSFlags & SIInstrFlags::SMRD;
341}
342
Tom Stellard93fabce2013-10-10 17:11:55 +0000343bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
344 return get(Opcode).TSFlags & SIInstrFlags::VOP1;
345}
346
347bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
348 return get(Opcode).TSFlags & SIInstrFlags::VOP2;
349}
350
351bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
352 return get(Opcode).TSFlags & SIInstrFlags::VOP3;
353}
354
355bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
356 return get(Opcode).TSFlags & SIInstrFlags::VOPC;
357}
358
Tom Stellard82166022013-11-13 23:36:37 +0000359bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
360 return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
361}
362
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +0000363bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
364 int32_t Val = Imm.getSExtValue();
365 if (Val >= -16 && Val <= 64)
366 return true;
Tom Stellardd0084462014-03-17 17:03:52 +0000367
368 // The actual type of the operand does not seem to matter as long
369 // as the bits match one of the inline immediate values. For example:
370 //
371 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
372 // so it is a legal inline immediate.
373 //
374 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
375 // floating-point, so it is a legal inline immediate.
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +0000376
377 return (APInt::floatToBits(0.0f) == Imm) ||
378 (APInt::floatToBits(1.0f) == Imm) ||
379 (APInt::floatToBits(-1.0f) == Imm) ||
380 (APInt::floatToBits(0.5f) == Imm) ||
381 (APInt::floatToBits(-0.5f) == Imm) ||
382 (APInt::floatToBits(2.0f) == Imm) ||
383 (APInt::floatToBits(-2.0f) == Imm) ||
384 (APInt::floatToBits(4.0f) == Imm) ||
385 (APInt::floatToBits(-4.0f) == Imm);
386}
387
388bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
389 if (MO.isImm())
390 return isInlineConstant(APInt(32, MO.getImm(), true));
391
392 if (MO.isFPImm()) {
393 APFloat FpImm = MO.getFPImm()->getValueAPF();
394 return isInlineConstant(FpImm.bitcastToAPInt());
395 }
396
397 return false;
Tom Stellard93fabce2013-10-10 17:11:55 +0000398}
399
400bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
401 return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
402}
403
404bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
405 StringRef &ErrInfo) const {
406 uint16_t Opcode = MI->getOpcode();
407 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
408 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
409 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
410
Tom Stellardca700e42014-03-17 17:03:49 +0000411 // Make sure the number of operands is correct.
412 const MCInstrDesc &Desc = get(Opcode);
413 if (!Desc.isVariadic() &&
414 Desc.getNumOperands() != MI->getNumExplicitOperands()) {
415 ErrInfo = "Instruction has wrong number of operands.";
416 return false;
417 }
418
419 // Make sure the register classes are correct
420 for (unsigned i = 0, e = Desc.getNumOperands(); i != e; ++i) {
421 switch (Desc.OpInfo[i].OperandType) {
422 case MCOI::OPERAND_REGISTER:
423 break;
424 case MCOI::OPERAND_IMMEDIATE:
425 if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFPImm()) {
426 ErrInfo = "Expected immediate, but got non-immediate";
427 return false;
428 }
429 // Fall-through
430 default:
431 continue;
432 }
433
434 if (!MI->getOperand(i).isReg())
435 continue;
436
437 int RegClass = Desc.OpInfo[i].RegClass;
438 if (RegClass != -1) {
439 unsigned Reg = MI->getOperand(i).getReg();
440 if (TargetRegisterInfo::isVirtualRegister(Reg))
441 continue;
442
443 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
444 if (!RC->contains(Reg)) {
445 ErrInfo = "Operand has incorrect register class.";
446 return false;
447 }
448 }
449 }
450
451
Tom Stellard93fabce2013-10-10 17:11:55 +0000452 // Verify VOP*
453 if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
454 unsigned ConstantBusCount = 0;
455 unsigned SGPRUsed = AMDGPU::NoRegister;
Tom Stellard93fabce2013-10-10 17:11:55 +0000456 for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
457 const MachineOperand &MO = MI->getOperand(i);
458 if (MO.isReg() && MO.isUse() &&
459 !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
460
461 // EXEC register uses the constant bus.
462 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
463 ++ConstantBusCount;
464
465 // SGPRs use the constant bus
466 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
467 (!MO.isImplicit() &&
468 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
469 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
470 if (SGPRUsed != MO.getReg()) {
471 ++ConstantBusCount;
472 SGPRUsed = MO.getReg();
473 }
474 }
475 }
476 // Literal constants use the constant bus.
477 if (isLiteralConstant(MO))
478 ++ConstantBusCount;
479 }
480 if (ConstantBusCount > 1) {
481 ErrInfo = "VOP* instruction uses the constant bus more than once";
482 return false;
483 }
484 }
485
486 // Verify SRC1 for VOP2 and VOPC
487 if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
488 const MachineOperand &Src1 = MI->getOperand(Src1Idx);
Tom Stellard82166022013-11-13 23:36:37 +0000489 if (Src1.isImm() || Src1.isFPImm()) {
Tom Stellard93fabce2013-10-10 17:11:55 +0000490 ErrInfo = "VOP[2C] src1 cannot be an immediate.";
491 return false;
492 }
493 }
494
495 // Verify VOP3
496 if (isVOP3(Opcode)) {
497 if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
498 ErrInfo = "VOP3 src0 cannot be a literal constant.";
499 return false;
500 }
501 if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
502 ErrInfo = "VOP3 src1 cannot be a literal constant.";
503 return false;
504 }
505 if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
506 ErrInfo = "VOP3 src2 cannot be a literal constant.";
507 return false;
508 }
509 }
510 return true;
511}
512
Matt Arsenaultf14032a2013-11-15 22:02:28 +0000513unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
Tom Stellard82166022013-11-13 23:36:37 +0000514 switch (MI.getOpcode()) {
515 default: return AMDGPU::INSTRUCTION_LIST_END;
516 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
517 case AMDGPU::COPY: return AMDGPU::COPY;
518 case AMDGPU::PHI: return AMDGPU::PHI;
Tom Stellard204e61b2014-04-07 19:45:45 +0000519 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
Tom Stellarde0387202014-03-21 15:51:54 +0000520 case AMDGPU::S_MOV_B32:
521 return MI.getOperand(1).isReg() ?
Tom Stellard8c12fd92014-03-24 16:12:34 +0000522 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
Matt Arsenault43b8e4e2013-11-18 20:09:29 +0000523 case AMDGPU::S_ADD_I32: return AMDGPU::V_ADD_I32_e32;
524 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
525 case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32;
526 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
Matt Arsenault8e2581b2014-03-21 18:01:18 +0000527 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32;
528 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32;
529 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32;
530 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32;
531 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32;
532 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32;
533 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32;
Tom Stellard82166022013-11-13 23:36:37 +0000534 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
535 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
536 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
537 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
538 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
539 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
Matt Arsenault27cc9582014-04-18 01:53:18 +0000540 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
541 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
Matt Arsenault78b86702014-04-18 05:19:26 +0000542 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
543 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
Matt Arsenault2c335622014-04-09 07:16:16 +0000544 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
Matt Arsenault0cb92e12014-04-11 19:25:18 +0000545 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
546 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
547 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
548 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
549 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
550 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
Tom Stellard0c354f22014-04-30 15:31:29 +0000551 case AMDGPU::S_LOAD_DWORD_SGPR: return AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
552 case AMDGPU::S_LOAD_DWORDX2_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
553 case AMDGPU::S_LOAD_DWORDX4_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
Tom Stellard82166022013-11-13 23:36:37 +0000554 }
555}
556
557bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
558 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
559}
560
561const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
562 unsigned OpNo) const {
563 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
564 const MCInstrDesc &Desc = get(MI.getOpcode());
565 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
566 Desc.OpInfo[OpNo].RegClass == -1)
567 return MRI.getRegClass(MI.getOperand(OpNo).getReg());
568
569 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
570 return RI.getRegClass(RCID);
571}
572
573bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
574 switch (MI.getOpcode()) {
575 case AMDGPU::COPY:
576 case AMDGPU::REG_SEQUENCE:
Tom Stellard4f3b04d2014-04-17 21:00:07 +0000577 case AMDGPU::PHI:
Tom Stellard82166022013-11-13 23:36:37 +0000578 return RI.hasVGPRs(getOpRegClass(MI, 0));
579 default:
580 return RI.hasVGPRs(getOpRegClass(MI, OpNo));
581 }
582}
583
584void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
585 MachineBasicBlock::iterator I = MI;
586 MachineOperand &MO = MI->getOperand(OpIdx);
587 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
588 unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
589 const TargetRegisterClass *RC = RI.getRegClass(RCID);
590 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
591 if (MO.isReg()) {
592 Opcode = AMDGPU::COPY;
593 } else if (RI.isSGPRClass(RC)) {
Matt Arsenault671a0052013-11-14 10:08:50 +0000594 Opcode = AMDGPU::S_MOV_B32;
Tom Stellard82166022013-11-13 23:36:37 +0000595 }
596
Matt Arsenault3a4d86a2013-11-18 20:09:55 +0000597 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
598 unsigned Reg = MRI.createVirtualRegister(VRC);
Tom Stellard82166022013-11-13 23:36:37 +0000599 BuildMI(*MI->getParent(), I, MI->getParent()->findDebugLoc(I), get(Opcode),
600 Reg).addOperand(MO);
601 MO.ChangeToRegister(Reg, false);
602}
603
Tom Stellard15834092014-03-21 15:51:57 +0000604unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
605 MachineRegisterInfo &MRI,
606 MachineOperand &SuperReg,
607 const TargetRegisterClass *SuperRC,
608 unsigned SubIdx,
609 const TargetRegisterClass *SubRC)
610 const {
611 assert(SuperReg.isReg());
612
613 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
614 unsigned SubReg = MRI.createVirtualRegister(SubRC);
615
616 // Just in case the super register is itself a sub-register, copy it to a new
617 // value so we don't need to wory about merging its subreg index with the
618 // SubIdx passed to this function. The register coalescer should be able to
619 // eliminate this extra copy.
620 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
621 NewSuperReg)
622 .addOperand(SuperReg);
623
624 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
625 SubReg)
626 .addReg(NewSuperReg, 0, SubIdx);
627 return SubReg;
628}
629
Matt Arsenault248b7b62014-03-24 20:08:09 +0000630MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
631 MachineBasicBlock::iterator MII,
632 MachineRegisterInfo &MRI,
633 MachineOperand &Op,
634 const TargetRegisterClass *SuperRC,
635 unsigned SubIdx,
636 const TargetRegisterClass *SubRC) const {
637 if (Op.isImm()) {
638 // XXX - Is there a better way to do this?
639 if (SubIdx == AMDGPU::sub0)
640 return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF);
641 if (SubIdx == AMDGPU::sub1)
642 return MachineOperand::CreateImm(Op.getImm() >> 32);
643
644 llvm_unreachable("Unhandled register index for immediate");
645 }
646
647 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
648 SubIdx, SubRC);
649 return MachineOperand::CreateReg(SubReg, false);
650}
651
Matt Arsenaultbd995802014-03-24 18:26:52 +0000652unsigned SIInstrInfo::split64BitImm(SmallVectorImpl<MachineInstr *> &Worklist,
653 MachineBasicBlock::iterator MI,
654 MachineRegisterInfo &MRI,
655 const TargetRegisterClass *RC,
656 const MachineOperand &Op) const {
657 MachineBasicBlock *MBB = MI->getParent();
658 DebugLoc DL = MI->getDebugLoc();
659 unsigned LoDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
660 unsigned HiDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
661 unsigned Dst = MRI.createVirtualRegister(RC);
662
663 MachineInstr *Lo = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
664 LoDst)
665 .addImm(Op.getImm() & 0xFFFFFFFF);
666 MachineInstr *Hi = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
667 HiDst)
668 .addImm(Op.getImm() >> 32);
669
670 BuildMI(*MBB, MI, DL, get(TargetOpcode::REG_SEQUENCE), Dst)
671 .addReg(LoDst)
672 .addImm(AMDGPU::sub0)
673 .addReg(HiDst)
674 .addImm(AMDGPU::sub1);
675
676 Worklist.push_back(Lo);
677 Worklist.push_back(Hi);
678
679 return Dst;
680}
681
Tom Stellard82166022013-11-13 23:36:37 +0000682void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
683 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
684 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
685 AMDGPU::OpName::src0);
686 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
687 AMDGPU::OpName::src1);
688 int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
689 AMDGPU::OpName::src2);
690
691 // Legalize VOP2
692 if (isVOP2(MI->getOpcode()) && Src1Idx != -1) {
Matt Arsenault08f7e372013-11-18 20:09:50 +0000693 MachineOperand &Src0 = MI->getOperand(Src0Idx);
Tom Stellard82166022013-11-13 23:36:37 +0000694 MachineOperand &Src1 = MI->getOperand(Src1Idx);
Matt Arsenaultf4760452013-11-14 08:06:38 +0000695
Matt Arsenault08f7e372013-11-18 20:09:50 +0000696 // If the instruction implicitly reads VCC, we can't have any SGPR operands,
697 // so move any.
698 bool ReadsVCC = MI->readsRegister(AMDGPU::VCC, &RI);
699 if (ReadsVCC && Src0.isReg() &&
700 RI.isSGPRClass(MRI.getRegClass(Src0.getReg()))) {
701 legalizeOpWithMove(MI, Src0Idx);
702 return;
703 }
704
705 if (ReadsVCC && Src1.isReg() &&
706 RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
707 legalizeOpWithMove(MI, Src1Idx);
708 return;
709 }
710
Matt Arsenaultf4760452013-11-14 08:06:38 +0000711 // Legalize VOP2 instructions where src1 is not a VGPR. An SGPR input must
712 // be the first operand, and there can only be one.
Tom Stellard82166022013-11-13 23:36:37 +0000713 if (Src1.isImm() || Src1.isFPImm() ||
714 (Src1.isReg() && RI.isSGPRClass(MRI.getRegClass(Src1.getReg())))) {
715 if (MI->isCommutable()) {
716 if (commuteInstruction(MI))
717 return;
718 }
719 legalizeOpWithMove(MI, Src1Idx);
720 }
721 }
722
Matt Arsenault08f7e372013-11-18 20:09:50 +0000723 // XXX - Do any VOP3 instructions read VCC?
Tom Stellard82166022013-11-13 23:36:37 +0000724 // Legalize VOP3
725 if (isVOP3(MI->getOpcode())) {
726 int VOP3Idx[3] = {Src0Idx, Src1Idx, Src2Idx};
727 unsigned SGPRReg = AMDGPU::NoRegister;
728 for (unsigned i = 0; i < 3; ++i) {
729 int Idx = VOP3Idx[i];
730 if (Idx == -1)
731 continue;
732 MachineOperand &MO = MI->getOperand(Idx);
733
734 if (MO.isReg()) {
735 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
736 continue; // VGPRs are legal
737
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +0000738 assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction");
739
Tom Stellard82166022013-11-13 23:36:37 +0000740 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
741 SGPRReg = MO.getReg();
742 // We can use one SGPR in each VOP3 instruction.
743 continue;
744 }
745 } else if (!isLiteralConstant(MO)) {
746 // If it is not a register and not a literal constant, then it must be
747 // an inline constant which is always legal.
748 continue;
749 }
750 // If we make it this far, then the operand is not legal and we must
751 // legalize it.
752 legalizeOpWithMove(MI, Idx);
753 }
754 }
755
Tom Stellard4f3b04d2014-04-17 21:00:07 +0000756 // Legalize REG_SEQUENCE and PHI
Tom Stellard82166022013-11-13 23:36:37 +0000757 // The register class of the operands much be the same type as the register
758 // class of the output.
Tom Stellard4f3b04d2014-04-17 21:00:07 +0000759 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE ||
760 MI->getOpcode() == AMDGPU::PHI) {
Craig Topper062a2ba2014-04-25 05:30:21 +0000761 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
Tom Stellard82166022013-11-13 23:36:37 +0000762 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
763 if (!MI->getOperand(i).isReg() ||
764 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
765 continue;
766 const TargetRegisterClass *OpRC =
767 MRI.getRegClass(MI->getOperand(i).getReg());
768 if (RI.hasVGPRs(OpRC)) {
769 VRC = OpRC;
770 } else {
771 SRC = OpRC;
772 }
773 }
774
775 // If any of the operands are VGPR registers, then they all most be
776 // otherwise we will create illegal VGPR->SGPR copies when legalizing
777 // them.
778 if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
779 if (!VRC) {
780 assert(SRC);
781 VRC = RI.getEquivalentVGPRClass(SRC);
782 }
783 RC = VRC;
784 } else {
785 RC = SRC;
786 }
787
788 // Update all the operands so they have the same type.
789 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
790 if (!MI->getOperand(i).isReg() ||
791 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
792 continue;
793 unsigned DstReg = MRI.createVirtualRegister(RC);
Tom Stellard4f3b04d2014-04-17 21:00:07 +0000794 MachineBasicBlock *InsertBB;
795 MachineBasicBlock::iterator Insert;
796 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
797 InsertBB = MI->getParent();
798 Insert = MI;
799 } else {
800 // MI is a PHI instruction.
801 InsertBB = MI->getOperand(i + 1).getMBB();
802 Insert = InsertBB->getFirstTerminator();
803 }
804 BuildMI(*InsertBB, Insert, MI->getDebugLoc(),
Tom Stellard82166022013-11-13 23:36:37 +0000805 get(AMDGPU::COPY), DstReg)
806 .addOperand(MI->getOperand(i));
807 MI->getOperand(i).setReg(DstReg);
808 }
809 }
Tom Stellard15834092014-03-21 15:51:57 +0000810
811 // Legalize MUBUF* instructions
812 // FIXME: If we start using the non-addr64 instructions for compute, we
813 // may need to legalize them here.
814
815 int SRsrcIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
816 AMDGPU::OpName::srsrc);
817 int VAddrIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
818 AMDGPU::OpName::vaddr);
819 if (SRsrcIdx != -1 && VAddrIdx != -1) {
820 const TargetRegisterClass *VAddrRC =
821 RI.getRegClass(get(MI->getOpcode()).OpInfo[VAddrIdx].RegClass);
822
823 if(VAddrRC->getSize() == 8 &&
824 MRI.getRegClass(MI->getOperand(SRsrcIdx).getReg()) != VAddrRC) {
825 // We have a MUBUF instruction that uses a 64-bit vaddr register and
826 // srsrc has the incorrect register class. In order to fix this, we
827 // need to extract the pointer from the resource descriptor (srsrc),
828 // add it to the value of vadd, then store the result in the vaddr
829 // operand. Then, we need to set the pointer field of the resource
830 // descriptor to zero.
831
832 MachineBasicBlock &MBB = *MI->getParent();
833 MachineOperand &SRsrcOp = MI->getOperand(SRsrcIdx);
834 MachineOperand &VAddrOp = MI->getOperand(VAddrIdx);
835 unsigned SRsrcPtrLo, SRsrcPtrHi, VAddrLo, VAddrHi;
836 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
837 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
838 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
839 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
840 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
841 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
842 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
843
844 // SRsrcPtrLo = srsrc:sub0
845 SRsrcPtrLo = buildExtractSubReg(MI, MRI, SRsrcOp,
846 &AMDGPU::VReg_128RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
847
848 // SRsrcPtrHi = srsrc:sub1
849 SRsrcPtrHi = buildExtractSubReg(MI, MRI, SRsrcOp,
850 &AMDGPU::VReg_128RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
851
852 // VAddrLo = vaddr:sub0
853 VAddrLo = buildExtractSubReg(MI, MRI, VAddrOp,
854 &AMDGPU::VReg_64RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
855
856 // VAddrHi = vaddr:sub1
857 VAddrHi = buildExtractSubReg(MI, MRI, VAddrOp,
858 &AMDGPU::VReg_64RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
859
860 // NewVaddrLo = SRsrcPtrLo + VAddrLo
861 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADD_I32_e32),
862 NewVAddrLo)
863 .addReg(SRsrcPtrLo)
864 .addReg(VAddrLo)
865 .addReg(AMDGPU::VCC, RegState::Define | RegState::Implicit);
866
867 // NewVaddrHi = SRsrcPtrHi + VAddrHi
868 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADDC_U32_e32),
869 NewVAddrHi)
870 .addReg(SRsrcPtrHi)
871 .addReg(VAddrHi)
872 .addReg(AMDGPU::VCC, RegState::ImplicitDefine)
873 .addReg(AMDGPU::VCC, RegState::Implicit);
874
875 // NewVaddr = {NewVaddrHi, NewVaddrLo}
876 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
877 NewVAddr)
878 .addReg(NewVAddrLo)
879 .addImm(AMDGPU::sub0)
880 .addReg(NewVAddrHi)
881 .addImm(AMDGPU::sub1);
882
883 // Zero64 = 0
884 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64),
885 Zero64)
886 .addImm(0);
887
888 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
889 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
890 SRsrcFormatLo)
891 .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
892
893 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
894 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
895 SRsrcFormatHi)
896 .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
897
898 // NewSRsrc = {Zero64, SRsrcFormat}
899 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
900 NewSRsrc)
901 .addReg(Zero64)
902 .addImm(AMDGPU::sub0_sub1)
903 .addReg(SRsrcFormatLo)
904 .addImm(AMDGPU::sub2)
905 .addReg(SRsrcFormatHi)
906 .addImm(AMDGPU::sub3);
907
908 // Update the instruction to use NewVaddr
909 MI->getOperand(VAddrIdx).setReg(NewVAddr);
910 // Update the instruction to use NewSRsrc
911 MI->getOperand(SRsrcIdx).setReg(NewSRsrc);
912 }
913 }
Tom Stellard82166022013-11-13 23:36:37 +0000914}
915
Tom Stellard0c354f22014-04-30 15:31:29 +0000916void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) const {
917 MachineBasicBlock *MBB = MI->getParent();
918 switch (MI->getOpcode()) {
919 case AMDGPU::S_LOAD_DWORD_SGPR:
920 case AMDGPU::S_LOAD_DWORDX2_SGPR:
921 case AMDGPU::S_LOAD_DWORDX4_SGPR:
922 unsigned NewOpcode = getVALUOp(*MI);
923 unsigned Offset = MI->getOperand(2).getReg();
924
925
926 unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
927 unsigned DWord0 = Offset;
928 unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
929 unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
930 unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
931
932 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1)
933 .addImm(0);
934 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2)
935 .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
936 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3)
937 .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
938 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc)
939 .addReg(DWord0)
940 .addImm(AMDGPU::sub0)
941 .addReg(DWord1)
942 .addImm(AMDGPU::sub1)
943 .addReg(DWord2)
944 .addImm(AMDGPU::sub2)
945 .addReg(DWord3)
946 .addImm(AMDGPU::sub3);
947 MI->setDesc(get(NewOpcode));
948 MI->getOperand(2).setReg(MI->getOperand(1).getReg());
949 MI->getOperand(1).setReg(SRsrc);
950 MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(0));
951 }
952}
953
Tom Stellard82166022013-11-13 23:36:37 +0000954void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
955 SmallVector<MachineInstr *, 128> Worklist;
956 Worklist.push_back(&TopInst);
957
958 while (!Worklist.empty()) {
959 MachineInstr *Inst = Worklist.pop_back_val();
Tom Stellarde0387202014-03-21 15:51:54 +0000960 MachineBasicBlock *MBB = Inst->getParent();
961 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
962
Matt Arsenault27cc9582014-04-18 01:53:18 +0000963 unsigned Opcode = Inst->getOpcode();
Tom Stellard0c354f22014-04-30 15:31:29 +0000964 unsigned NewOpcode = getVALUOp(*Inst);
Matt Arsenault27cc9582014-04-18 01:53:18 +0000965
Tom Stellarde0387202014-03-21 15:51:54 +0000966 // Handle some special cases
Matt Arsenault27cc9582014-04-18 01:53:18 +0000967 switch (Opcode) {
Tom Stellard0c354f22014-04-30 15:31:29 +0000968 default:
969 if (isSMRD(Inst->getOpcode())) {
970 moveSMRDToVALU(Inst, MRI);
971 }
972 break;
Matt Arsenaultbd995802014-03-24 18:26:52 +0000973 case AMDGPU::S_MOV_B64: {
974 DebugLoc DL = Inst->getDebugLoc();
Tom Stellarde0387202014-03-21 15:51:54 +0000975
Matt Arsenaultbd995802014-03-24 18:26:52 +0000976 // If the source operand is a register we can replace this with a
977 // copy.
978 if (Inst->getOperand(1).isReg()) {
979 MachineInstr *Copy = BuildMI(*MBB, Inst, DL, get(TargetOpcode::COPY))
980 .addOperand(Inst->getOperand(0))
981 .addOperand(Inst->getOperand(1));
982 Worklist.push_back(Copy);
983 } else {
984 // Otherwise, we need to split this into two movs, because there is
985 // no 64-bit VALU move instruction.
986 unsigned Reg = Inst->getOperand(0).getReg();
987 unsigned Dst = split64BitImm(Worklist,
988 Inst,
989 MRI,
990 MRI.getRegClass(Reg),
991 Inst->getOperand(1));
992 MRI.replaceRegWith(Reg, Dst);
Tom Stellarde0387202014-03-21 15:51:54 +0000993 }
Matt Arsenaultbd995802014-03-24 18:26:52 +0000994 Inst->eraseFromParent();
995 continue;
996 }
Matt Arsenaultf35182c2014-03-24 20:08:05 +0000997 case AMDGPU::S_AND_B64:
998 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_AND_B32);
999 Inst->eraseFromParent();
1000 continue;
1001
1002 case AMDGPU::S_OR_B64:
1003 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_OR_B32);
1004 Inst->eraseFromParent();
1005 continue;
1006
1007 case AMDGPU::S_XOR_B64:
1008 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_XOR_B32);
1009 Inst->eraseFromParent();
1010 continue;
1011
1012 case AMDGPU::S_NOT_B64:
1013 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_NOT_B32);
1014 Inst->eraseFromParent();
1015 continue;
1016
1017 case AMDGPU::S_BFE_U64:
1018 case AMDGPU::S_BFE_I64:
1019 case AMDGPU::S_BFM_B64:
1020 llvm_unreachable("Moving this op to VALU not implemented");
Tom Stellarde0387202014-03-21 15:51:54 +00001021 }
1022
Tom Stellard15834092014-03-21 15:51:57 +00001023 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
1024 // We cannot move this instruction to the VALU, so we should try to
1025 // legalize its operands instead.
1026 legalizeOperands(Inst);
Tom Stellard82166022013-11-13 23:36:37 +00001027 continue;
Tom Stellard15834092014-03-21 15:51:57 +00001028 }
Tom Stellard82166022013-11-13 23:36:37 +00001029
Tom Stellard82166022013-11-13 23:36:37 +00001030 // Use the new VALU Opcode.
1031 const MCInstrDesc &NewDesc = get(NewOpcode);
1032 Inst->setDesc(NewDesc);
1033
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +00001034 // Remove any references to SCC. Vector instructions can't read from it, and
1035 // We're just about to add the implicit use / defs of VCC, and we don't want
1036 // both.
1037 for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) {
1038 MachineOperand &Op = Inst->getOperand(i);
1039 if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
1040 Inst->RemoveOperand(i);
1041 }
1042
Matt Arsenault27cc9582014-04-18 01:53:18 +00001043 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
1044 // We are converting these to a BFE, so we need to add the missing
1045 // operands for the size and offset.
1046 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
1047 Inst->addOperand(MachineOperand::CreateImm(0));
1048 Inst->addOperand(MachineOperand::CreateImm(Size));
1049
1050 // XXX - Other pointless operands. There are 4, but it seems you only need
1051 // 3 to not hit an assertion later in MCInstLower.
1052 Inst->addOperand(MachineOperand::CreateImm(0));
1053 Inst->addOperand(MachineOperand::CreateImm(0));
1054 Inst->addOperand(MachineOperand::CreateImm(0));
1055 Inst->addOperand(MachineOperand::CreateImm(0));
Tom Stellard82166022013-11-13 23:36:37 +00001056 }
1057
Matt Arsenault27cc9582014-04-18 01:53:18 +00001058 addDescImplicitUseDef(NewDesc, Inst);
Tom Stellard82166022013-11-13 23:36:37 +00001059
Matt Arsenault78b86702014-04-18 05:19:26 +00001060 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
1061 const MachineOperand &OffsetWidthOp = Inst->getOperand(2);
1062 // If we need to move this to VGPRs, we need to unpack the second operand
1063 // back into the 2 separate ones for bit offset and width.
1064 assert(OffsetWidthOp.isImm() &&
1065 "Scalar BFE is only implemented for constant width and offset");
1066 uint32_t Imm = OffsetWidthOp.getImm();
1067
1068 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
1069 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
1070
1071 Inst->RemoveOperand(2); // Remove old immediate.
1072 Inst->addOperand(MachineOperand::CreateImm(Offset));
1073 Inst->addOperand(MachineOperand::CreateImm(BitWidth));
1074
1075 Inst->addOperand(MachineOperand::CreateImm(0));
1076 Inst->addOperand(MachineOperand::CreateImm(0));
1077 Inst->addOperand(MachineOperand::CreateImm(0));
1078 Inst->addOperand(MachineOperand::CreateImm(0));
1079 }
1080
Tom Stellard82166022013-11-13 23:36:37 +00001081 // Update the destination register class.
Tom Stellarde1a24452014-04-17 21:00:01 +00001082
Tom Stellard82166022013-11-13 23:36:37 +00001083 const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
1084
Matt Arsenault27cc9582014-04-18 01:53:18 +00001085 switch (Opcode) {
Tom Stellard82166022013-11-13 23:36:37 +00001086 // For target instructions, getOpRegClass just returns the virtual
1087 // register class associated with the operand, so we need to find an
1088 // equivalent VGPR register class in order to move the instruction to the
1089 // VALU.
1090 case AMDGPU::COPY:
1091 case AMDGPU::PHI:
1092 case AMDGPU::REG_SEQUENCE:
Tom Stellard204e61b2014-04-07 19:45:45 +00001093 case AMDGPU::INSERT_SUBREG:
Tom Stellard82166022013-11-13 23:36:37 +00001094 if (RI.hasVGPRs(NewDstRC))
1095 continue;
1096 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
1097 if (!NewDstRC)
1098 continue;
1099 break;
1100 default:
1101 break;
1102 }
1103
1104 unsigned DstReg = Inst->getOperand(0).getReg();
1105 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
1106 MRI.replaceRegWith(DstReg, NewDstReg);
1107
Tom Stellarde1a24452014-04-17 21:00:01 +00001108 // Legalize the operands
1109 legalizeOperands(Inst);
1110
Tom Stellard82166022013-11-13 23:36:37 +00001111 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
1112 E = MRI.use_end(); I != E; ++I) {
Owen Anderson16c6bf42014-03-13 23:12:04 +00001113 MachineInstr &UseMI = *I->getParent();
Tom Stellard82166022013-11-13 23:36:37 +00001114 if (!canReadVGPR(UseMI, I.getOperandNo())) {
1115 Worklist.push_back(&UseMI);
1116 }
1117 }
1118 }
1119}
1120
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001121//===----------------------------------------------------------------------===//
1122// Indirect addressing callbacks
1123//===----------------------------------------------------------------------===//
1124
1125unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
1126 unsigned Channel) const {
1127 assert(Channel == 0);
1128 return RegIndex;
1129}
1130
Tom Stellard26a3b672013-10-22 18:19:10 +00001131const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
Tom Stellard81d871d2013-11-13 23:36:50 +00001132 return &AMDGPU::VReg_32RegClass;
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001133}
1134
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001135void SIInstrInfo::splitScalar64BitOp(SmallVectorImpl<MachineInstr *> &Worklist,
1136 MachineInstr *Inst,
1137 unsigned Opcode) const {
1138 MachineBasicBlock &MBB = *Inst->getParent();
1139 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1140
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001141 MachineOperand &Dest = Inst->getOperand(0);
1142 MachineOperand &Src0 = Inst->getOperand(1);
1143 MachineOperand &Src1 = Inst->getOperand(2);
1144 DebugLoc DL = Inst->getDebugLoc();
1145
1146 MachineBasicBlock::iterator MII = Inst;
1147
1148 const MCInstrDesc &InstDesc = get(Opcode);
Matt Arsenault684dc802014-03-24 20:08:13 +00001149 const TargetRegisterClass *Src0RC = Src0.isReg() ?
1150 MRI.getRegClass(Src0.getReg()) :
1151 &AMDGPU::SGPR_32RegClass;
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001152
Matt Arsenault684dc802014-03-24 20:08:13 +00001153 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
1154 const TargetRegisterClass *Src1RC = Src1.isReg() ?
1155 MRI.getRegClass(Src1.getReg()) :
1156 &AMDGPU::SGPR_32RegClass;
1157
1158 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
1159
1160 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1161 AMDGPU::sub0, Src0SubRC);
1162 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
1163 AMDGPU::sub0, Src1SubRC);
1164
1165 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
1166 const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
1167
1168 unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001169 MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
Matt Arsenault248b7b62014-03-24 20:08:09 +00001170 .addOperand(SrcReg0Sub0)
1171 .addOperand(SrcReg1Sub0);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001172
Matt Arsenault684dc802014-03-24 20:08:13 +00001173 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1174 AMDGPU::sub1, Src0SubRC);
1175 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
1176 AMDGPU::sub1, Src1SubRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001177
Matt Arsenault684dc802014-03-24 20:08:13 +00001178 unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001179 MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
Matt Arsenault248b7b62014-03-24 20:08:09 +00001180 .addOperand(SrcReg0Sub1)
1181 .addOperand(SrcReg1Sub1);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001182
Matt Arsenault684dc802014-03-24 20:08:13 +00001183 unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001184 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1185 .addReg(DestSub0)
1186 .addImm(AMDGPU::sub0)
1187 .addReg(DestSub1)
1188 .addImm(AMDGPU::sub1);
1189
1190 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
1191
1192 // Try to legalize the operands in case we need to swap the order to keep it
1193 // valid.
1194 Worklist.push_back(LoHalf);
1195 Worklist.push_back(HiHalf);
1196}
1197
Matt Arsenault27cc9582014-04-18 01:53:18 +00001198void SIInstrInfo::addDescImplicitUseDef(const MCInstrDesc &NewDesc,
1199 MachineInstr *Inst) const {
1200 // Add the implict and explicit register definitions.
1201 if (NewDesc.ImplicitUses) {
1202 for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
1203 unsigned Reg = NewDesc.ImplicitUses[i];
1204 Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
1205 }
1206 }
1207
1208 if (NewDesc.ImplicitDefs) {
1209 for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
1210 unsigned Reg = NewDesc.ImplicitDefs[i];
1211 Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
1212 }
1213 }
1214}
1215
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001216MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
1217 MachineBasicBlock *MBB,
1218 MachineBasicBlock::iterator I,
1219 unsigned ValueReg,
1220 unsigned Address, unsigned OffsetReg) const {
Tom Stellard81d871d2013-11-13 23:36:50 +00001221 const DebugLoc &DL = MBB->findDebugLoc(I);
1222 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1223 getIndirectIndexBegin(*MBB->getParent()));
1224
1225 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
1226 .addReg(IndirectBaseReg, RegState::Define)
1227 .addOperand(I->getOperand(0))
1228 .addReg(IndirectBaseReg)
1229 .addReg(OffsetReg)
1230 .addImm(0)
1231 .addReg(ValueReg);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001232}
1233
1234MachineInstrBuilder SIInstrInfo::buildIndirectRead(
1235 MachineBasicBlock *MBB,
1236 MachineBasicBlock::iterator I,
1237 unsigned ValueReg,
1238 unsigned Address, unsigned OffsetReg) const {
Tom Stellard81d871d2013-11-13 23:36:50 +00001239 const DebugLoc &DL = MBB->findDebugLoc(I);
1240 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1241 getIndirectIndexBegin(*MBB->getParent()));
1242
1243 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
1244 .addOperand(I->getOperand(0))
1245 .addOperand(I->getOperand(1))
1246 .addReg(IndirectBaseReg)
1247 .addReg(OffsetReg)
1248 .addImm(0);
1249
1250}
1251
1252void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1253 const MachineFunction &MF) const {
1254 int End = getIndirectIndexEnd(MF);
1255 int Begin = getIndirectIndexBegin(MF);
1256
1257 if (End == -1)
1258 return;
1259
1260
1261 for (int Index = Begin; Index <= End; ++Index)
1262 Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
1263
Tom Stellard415ef6d2013-11-13 23:58:51 +00001264 for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001265 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
1266
Tom Stellard415ef6d2013-11-13 23:58:51 +00001267 for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001268 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
1269
Tom Stellard415ef6d2013-11-13 23:58:51 +00001270 for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001271 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
1272
Tom Stellard415ef6d2013-11-13 23:58:51 +00001273 for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001274 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
1275
Tom Stellard415ef6d2013-11-13 23:58:51 +00001276 for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001277 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001278}