blob: f5b82d53ba7a9d020fa3fdc4abdfdb3bfac7578c [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief SI Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "SIInstrInfo.h"
17#include "AMDGPUTargetMachine.h"
Tom Stellard16a9a202013-08-14 23:24:17 +000018#include "SIDefines.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000019#include "SIMachineFunctionInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000020#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard4e07b1d2014-06-10 21:20:41 +000022#include "llvm/IR/Function.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000023#include "llvm/MC/MCInstrDesc.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000024
25using namespace llvm;
26
Tom Stellard2e59a452014-06-13 01:32:00 +000027SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st)
28 : AMDGPUInstrInfo(st),
29 RI(st) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000030
Tom Stellard82166022013-11-13 23:36:37 +000031//===----------------------------------------------------------------------===//
32// TargetInstrInfo callbacks
33//===----------------------------------------------------------------------===//
34
Tom Stellard75aadc22012-12-11 21:25:42 +000035void
36SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Christian Konigd0e3da12013-03-01 09:46:27 +000037 MachineBasicBlock::iterator MI, DebugLoc DL,
38 unsigned DestReg, unsigned SrcReg,
39 bool KillSrc) const {
40
Tom Stellard75aadc22012-12-11 21:25:42 +000041 // If we are trying to copy to or from SCC, there is a bug somewhere else in
42 // the backend. While it may be theoretically possible to do this, it should
43 // never be necessary.
44 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
45
Craig Topper0afd0ab2013-07-15 06:39:13 +000046 static const int16_t Sub0_15[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000047 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
48 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
49 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
50 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
51 };
52
Craig Topper0afd0ab2013-07-15 06:39:13 +000053 static const int16_t Sub0_7[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000054 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
55 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
56 };
57
Craig Topper0afd0ab2013-07-15 06:39:13 +000058 static const int16_t Sub0_3[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000059 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
60 };
61
Craig Topper0afd0ab2013-07-15 06:39:13 +000062 static const int16_t Sub0_2[] = {
Christian Konig8b1ed282013-04-10 08:39:16 +000063 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
64 };
65
Craig Topper0afd0ab2013-07-15 06:39:13 +000066 static const int16_t Sub0_1[] = {
Christian Konigd0e3da12013-03-01 09:46:27 +000067 AMDGPU::sub0, AMDGPU::sub1, 0
68 };
69
70 unsigned Opcode;
71 const int16_t *SubIndices;
72
Christian Konig082c6612013-03-26 14:04:12 +000073 if (AMDGPU::M0 == DestReg) {
74 // Check if M0 isn't already set to this value
75 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
76 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
77
78 if (!I->definesRegister(AMDGPU::M0))
79 continue;
80
81 unsigned Opc = I->getOpcode();
82 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
83 break;
84
85 if (!I->readsRegister(SrcReg))
86 break;
87
88 // The copy isn't necessary
89 return;
90 }
91 }
92
Christian Konigd0e3da12013-03-01 09:46:27 +000093 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
94 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
95 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
96 .addReg(SrcReg, getKillRegState(KillSrc));
97 return;
98
Tom Stellardaac18892013-02-07 19:39:43 +000099 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000100 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
101 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
102 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000103 return;
104
105 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
106 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
107 Opcode = AMDGPU::S_MOV_B32;
108 SubIndices = Sub0_3;
109
110 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
111 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
112 Opcode = AMDGPU::S_MOV_B32;
113 SubIndices = Sub0_7;
114
115 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
116 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
117 Opcode = AMDGPU::S_MOV_B32;
118 SubIndices = Sub0_15;
119
Tom Stellard75aadc22012-12-11 21:25:42 +0000120 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
121 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000122 AMDGPU::SReg_32RegClass.contains(SrcReg));
Tom Stellard75aadc22012-12-11 21:25:42 +0000123 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
124 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000125 return;
126
127 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
128 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000129 AMDGPU::SReg_64RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000130 Opcode = AMDGPU::V_MOV_B32_e32;
131 SubIndices = Sub0_1;
132
Christian Konig8b1ed282013-04-10 08:39:16 +0000133 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
134 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
135 Opcode = AMDGPU::V_MOV_B32_e32;
136 SubIndices = Sub0_2;
137
Christian Konigd0e3da12013-03-01 09:46:27 +0000138 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
139 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000140 AMDGPU::SReg_128RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000141 Opcode = AMDGPU::V_MOV_B32_e32;
142 SubIndices = Sub0_3;
143
144 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
145 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000146 AMDGPU::SReg_256RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000147 Opcode = AMDGPU::V_MOV_B32_e32;
148 SubIndices = Sub0_7;
149
150 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
151 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000152 AMDGPU::SReg_512RegClass.contains(SrcReg));
Christian Konigd0e3da12013-03-01 09:46:27 +0000153 Opcode = AMDGPU::V_MOV_B32_e32;
154 SubIndices = Sub0_15;
155
Tom Stellard75aadc22012-12-11 21:25:42 +0000156 } else {
Christian Konigd0e3da12013-03-01 09:46:27 +0000157 llvm_unreachable("Can't copy register!");
158 }
159
160 while (unsigned SubIdx = *SubIndices++) {
161 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
162 get(Opcode), RI.getSubReg(DestReg, SubIdx));
163
164 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
165
166 if (*SubIndices)
167 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000168 }
169}
170
Christian Konig3c145802013-03-27 09:12:59 +0000171unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
Christian Konig3c145802013-03-27 09:12:59 +0000172 int NewOpc;
173
174 // Try to map original to commuted opcode
175 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
176 return NewOpc;
177
178 // Try to map commuted to original opcode
179 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
180 return NewOpc;
181
182 return Opcode;
183}
184
Tom Stellardc149dc02013-11-27 21:23:35 +0000185void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
186 MachineBasicBlock::iterator MI,
187 unsigned SrcReg, bool isKill,
188 int FrameIndex,
189 const TargetRegisterClass *RC,
190 const TargetRegisterInfo *TRI) const {
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000191 MachineFunction *MF = MBB.getParent();
192 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
193 MachineRegisterInfo &MRI = MF->getRegInfo();
Tom Stellardc149dc02013-11-27 21:23:35 +0000194 DebugLoc DL = MBB.findDebugLoc(MI);
195 unsigned KillFlag = isKill ? RegState::Kill : 0;
196
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000197 if (RI.hasVGPRs(RC)) {
198 LLVMContext &Ctx = MF->getFunction()->getContext();
199 Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Can't spill VGPR!");
200 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), AMDGPU::VGPR0)
201 .addReg(SrcReg);
202 } else if (TRI->getCommonSubClass(RC, &AMDGPU::SGPR_32RegClass)) {
203 unsigned Lane = MFI->SpillTracker.reserveLanes(MRI, MF);
204 unsigned TgtReg = MFI->SpillTracker.LaneVGPR;
Tom Stellardeba61072014-05-02 15:41:42 +0000205
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000206 BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32), TgtReg)
Tom Stellardc149dc02013-11-27 21:23:35 +0000207 .addReg(SrcReg, KillFlag)
208 .addImm(Lane);
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000209 MFI->SpillTracker.addSpilledReg(FrameIndex, TgtReg, Lane);
Tom Stellardeba61072014-05-02 15:41:42 +0000210 } else if (RI.isSGPRClass(RC)) {
211 // We are only allowed to create one new instruction when spilling
212 // registers, so we need to use pseudo instruction for vector
213 // registers.
214 //
215 // Reserve a spot in the spill tracker for each sub-register of
216 // the vector register.
217 unsigned NumSubRegs = RC->getSize() / 4;
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000218 unsigned FirstLane = MFI->SpillTracker.reserveLanes(MRI, MF, NumSubRegs);
Tom Stellardc149dc02013-11-27 21:23:35 +0000219 MFI->SpillTracker.addSpilledReg(FrameIndex, MFI->SpillTracker.LaneVGPR,
Tom Stellardeba61072014-05-02 15:41:42 +0000220 FirstLane);
221
222 unsigned Opcode;
223 switch (RC->getSize() * 8) {
224 case 64: Opcode = AMDGPU::SI_SPILL_S64_SAVE; break;
225 case 128: Opcode = AMDGPU::SI_SPILL_S128_SAVE; break;
226 case 256: Opcode = AMDGPU::SI_SPILL_S256_SAVE; break;
227 case 512: Opcode = AMDGPU::SI_SPILL_S512_SAVE; break;
228 default: llvm_unreachable("Cannot spill register class");
Tom Stellardc149dc02013-11-27 21:23:35 +0000229 }
Tom Stellardeba61072014-05-02 15:41:42 +0000230
231 BuildMI(MBB, MI, DL, get(Opcode), MFI->SpillTracker.LaneVGPR)
232 .addReg(SrcReg)
233 .addImm(FrameIndex);
234 } else {
235 llvm_unreachable("VGPR spilling not supported");
Tom Stellardc149dc02013-11-27 21:23:35 +0000236 }
237}
238
239void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
240 MachineBasicBlock::iterator MI,
241 unsigned DestReg, int FrameIndex,
242 const TargetRegisterClass *RC,
243 const TargetRegisterInfo *TRI) const {
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000244 MachineFunction *MF = MBB.getParent();
245 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
Tom Stellardc149dc02013-11-27 21:23:35 +0000246 DebugLoc DL = MBB.findDebugLoc(MI);
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000247
248 if (RI.hasVGPRs(RC)) {
249 LLVMContext &Ctx = MF->getFunction()->getContext();
250 Ctx.emitError("SIInstrInfo::loadRegToStackSlot - Can't retrieve spilled VGPR!");
251 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
252 .addImm(0);
253 } else if (RI.isSGPRClass(RC)){
Tom Stellardeba61072014-05-02 15:41:42 +0000254 unsigned Opcode;
255 switch(RC->getSize() * 8) {
Tom Stellard060ae392014-06-10 21:20:38 +0000256 case 32: Opcode = AMDGPU::SI_SPILL_S32_RESTORE; break;
Tom Stellardeba61072014-05-02 15:41:42 +0000257 case 64: Opcode = AMDGPU::SI_SPILL_S64_RESTORE; break;
258 case 128: Opcode = AMDGPU::SI_SPILL_S128_RESTORE; break;
259 case 256: Opcode = AMDGPU::SI_SPILL_S256_RESTORE; break;
260 case 512: Opcode = AMDGPU::SI_SPILL_S512_RESTORE; break;
261 default: llvm_unreachable("Cannot spill register class");
Tom Stellardc149dc02013-11-27 21:23:35 +0000262 }
Tom Stellardeba61072014-05-02 15:41:42 +0000263
264 SIMachineFunctionInfo::SpilledReg Spill =
265 MFI->SpillTracker.getSpilledReg(FrameIndex);
266
267 BuildMI(MBB, MI, DL, get(Opcode), DestReg)
268 .addReg(Spill.VGPR)
269 .addImm(FrameIndex);
Tom Stellardeba61072014-05-02 15:41:42 +0000270 } else {
271 llvm_unreachable("VGPR spilling not supported");
Tom Stellardc149dc02013-11-27 21:23:35 +0000272 }
273}
274
Tom Stellardeba61072014-05-02 15:41:42 +0000275static unsigned getNumSubRegsForSpillOp(unsigned Op) {
276
277 switch (Op) {
278 case AMDGPU::SI_SPILL_S512_SAVE:
279 case AMDGPU::SI_SPILL_S512_RESTORE:
280 return 16;
281 case AMDGPU::SI_SPILL_S256_SAVE:
282 case AMDGPU::SI_SPILL_S256_RESTORE:
283 return 8;
284 case AMDGPU::SI_SPILL_S128_SAVE:
285 case AMDGPU::SI_SPILL_S128_RESTORE:
286 return 4;
287 case AMDGPU::SI_SPILL_S64_SAVE:
288 case AMDGPU::SI_SPILL_S64_RESTORE:
289 return 2;
Tom Stellard060ae392014-06-10 21:20:38 +0000290 case AMDGPU::SI_SPILL_S32_RESTORE:
291 return 1;
Tom Stellardeba61072014-05-02 15:41:42 +0000292 default: llvm_unreachable("Invalid spill opcode");
293 }
294}
295
296void SIInstrInfo::insertNOPs(MachineBasicBlock::iterator MI,
297 int Count) const {
298 while (Count > 0) {
299 int Arg;
300 if (Count >= 8)
301 Arg = 7;
302 else
303 Arg = Count - 1;
304 Count -= 8;
305 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(AMDGPU::S_NOP))
306 .addImm(Arg);
307 }
308}
309
310bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
311 SIMachineFunctionInfo *MFI =
312 MI->getParent()->getParent()->getInfo<SIMachineFunctionInfo>();
313 MachineBasicBlock &MBB = *MI->getParent();
314 DebugLoc DL = MBB.findDebugLoc(MI);
315 switch (MI->getOpcode()) {
316 default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
317
318 // SGPR register spill
319 case AMDGPU::SI_SPILL_S512_SAVE:
320 case AMDGPU::SI_SPILL_S256_SAVE:
321 case AMDGPU::SI_SPILL_S128_SAVE:
322 case AMDGPU::SI_SPILL_S64_SAVE: {
323 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
324 unsigned FrameIndex = MI->getOperand(2).getImm();
325
326 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
327 SIMachineFunctionInfo::SpilledReg Spill;
328 unsigned SubReg = RI.getPhysRegSubReg(MI->getOperand(1).getReg(),
329 &AMDGPU::SGPR_32RegClass, i);
330 Spill = MFI->SpillTracker.getSpilledReg(FrameIndex);
331
332 BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32),
333 MI->getOperand(0).getReg())
334 .addReg(SubReg)
335 .addImm(Spill.Lane + i);
336 }
337 MI->eraseFromParent();
338 break;
339 }
340
341 // SGPR register restore
342 case AMDGPU::SI_SPILL_S512_RESTORE:
343 case AMDGPU::SI_SPILL_S256_RESTORE:
344 case AMDGPU::SI_SPILL_S128_RESTORE:
Tom Stellard060ae392014-06-10 21:20:38 +0000345 case AMDGPU::SI_SPILL_S64_RESTORE:
346 case AMDGPU::SI_SPILL_S32_RESTORE: {
Tom Stellardeba61072014-05-02 15:41:42 +0000347 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
348
349 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
350 SIMachineFunctionInfo::SpilledReg Spill;
351 unsigned FrameIndex = MI->getOperand(2).getImm();
352 unsigned SubReg = RI.getPhysRegSubReg(MI->getOperand(0).getReg(),
353 &AMDGPU::SGPR_32RegClass, i);
354 Spill = MFI->SpillTracker.getSpilledReg(FrameIndex);
355
356 BuildMI(MBB, MI, DL, get(AMDGPU::V_READLANE_B32), SubReg)
357 .addReg(MI->getOperand(1).getReg())
358 .addImm(Spill.Lane + i);
359 }
Tom Stellard060ae392014-06-10 21:20:38 +0000360 insertNOPs(MI, 3);
Tom Stellardeba61072014-05-02 15:41:42 +0000361 MI->eraseFromParent();
362 break;
363 }
364 }
365 return true;
366}
367
Christian Konig76edd4f2013-02-26 17:52:29 +0000368MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
369 bool NewMI) const {
370
Tom Stellard82166022013-11-13 23:36:37 +0000371 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
372 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg())
Craig Topper062a2ba2014-04-25 05:30:21 +0000373 return nullptr;
Christian Konig76edd4f2013-02-26 17:52:29 +0000374
Tom Stellard82166022013-11-13 23:36:37 +0000375 // Cannot commute VOP2 if src0 is SGPR.
376 if (isVOP2(MI->getOpcode()) && MI->getOperand(1).isReg() &&
377 RI.isSGPRClass(MRI.getRegClass(MI->getOperand(1).getReg())))
Craig Topper062a2ba2014-04-25 05:30:21 +0000378 return nullptr;
Tom Stellard82166022013-11-13 23:36:37 +0000379
380 if (!MI->getOperand(2).isReg()) {
381 // XXX: Commute instructions with FPImm operands
382 if (NewMI || MI->getOperand(2).isFPImm() ||
383 (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
Craig Topper062a2ba2014-04-25 05:30:21 +0000384 return nullptr;
Tom Stellard82166022013-11-13 23:36:37 +0000385 }
386
387 // XXX: Commute VOP3 instructions with abs and neg set.
388 if (isVOP3(MI->getOpcode()) &&
389 (MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
390 AMDGPU::OpName::abs)).getImm() ||
391 MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
392 AMDGPU::OpName::neg)).getImm()))
Craig Topper062a2ba2014-04-25 05:30:21 +0000393 return nullptr;
Tom Stellard82166022013-11-13 23:36:37 +0000394
395 unsigned Reg = MI->getOperand(1).getReg();
Andrew Tricke3398282013-12-17 04:50:45 +0000396 unsigned SubReg = MI->getOperand(1).getSubReg();
Tom Stellard82166022013-11-13 23:36:37 +0000397 MI->getOperand(1).ChangeToImmediate(MI->getOperand(2).getImm());
398 MI->getOperand(2).ChangeToRegister(Reg, false);
Andrew Tricke3398282013-12-17 04:50:45 +0000399 MI->getOperand(2).setSubReg(SubReg);
Tom Stellard82166022013-11-13 23:36:37 +0000400 } else {
401 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
402 }
Christian Konig3c145802013-03-27 09:12:59 +0000403
404 if (MI)
405 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
406
407 return MI;
Christian Konig76edd4f2013-02-26 17:52:29 +0000408}
409
Tom Stellard26a3b672013-10-22 18:19:10 +0000410MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
411 MachineBasicBlock::iterator I,
412 unsigned DstReg,
413 unsigned SrcReg) const {
Tom Stellard81d871d2013-11-13 23:36:50 +0000414 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
415 DstReg) .addReg(SrcReg);
Tom Stellard26a3b672013-10-22 18:19:10 +0000416}
417
Tom Stellard75aadc22012-12-11 21:25:42 +0000418bool SIInstrInfo::isMov(unsigned Opcode) const {
419 switch(Opcode) {
420 default: return false;
421 case AMDGPU::S_MOV_B32:
422 case AMDGPU::S_MOV_B64:
423 case AMDGPU::V_MOV_B32_e32:
424 case AMDGPU::V_MOV_B32_e64:
Tom Stellard75aadc22012-12-11 21:25:42 +0000425 return true;
426 }
427}
428
429bool
430SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
431 return RC != &AMDGPU::EXECRegRegClass;
432}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000433
Tom Stellard30f59412014-03-31 14:01:56 +0000434bool
435SIInstrInfo::isTriviallyReMaterializable(const MachineInstr *MI,
436 AliasAnalysis *AA) const {
437 switch(MI->getOpcode()) {
438 default: return AMDGPUInstrInfo::isTriviallyReMaterializable(MI, AA);
439 case AMDGPU::S_MOV_B32:
440 case AMDGPU::S_MOV_B64:
441 case AMDGPU::V_MOV_B32_e32:
442 return MI->getOperand(1).isImm();
443 }
444}
445
Tom Stellard5d7aaae2014-02-10 16:58:30 +0000446namespace llvm {
447namespace AMDGPU {
448// Helper function generated by tablegen. We are wrapping this with
449// an SIInstrInfo function that reutrns bool rather than int.
450int isDS(uint16_t Opcode);
451}
452}
453
454bool SIInstrInfo::isDS(uint16_t Opcode) const {
455 return ::AMDGPU::isDS(Opcode) != -1;
456}
457
Tom Stellard16a9a202013-08-14 23:24:17 +0000458int SIInstrInfo::isMIMG(uint16_t Opcode) const {
459 return get(Opcode).TSFlags & SIInstrFlags::MIMG;
460}
461
Michel Danzer20680b12013-08-16 16:19:24 +0000462int SIInstrInfo::isSMRD(uint16_t Opcode) const {
463 return get(Opcode).TSFlags & SIInstrFlags::SMRD;
464}
465
Tom Stellard93fabce2013-10-10 17:11:55 +0000466bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
467 return get(Opcode).TSFlags & SIInstrFlags::VOP1;
468}
469
470bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
471 return get(Opcode).TSFlags & SIInstrFlags::VOP2;
472}
473
474bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
475 return get(Opcode).TSFlags & SIInstrFlags::VOP3;
476}
477
478bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
479 return get(Opcode).TSFlags & SIInstrFlags::VOPC;
480}
481
Tom Stellard82166022013-11-13 23:36:37 +0000482bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
483 return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
484}
485
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +0000486bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
487 int32_t Val = Imm.getSExtValue();
488 if (Val >= -16 && Val <= 64)
489 return true;
Tom Stellardd0084462014-03-17 17:03:52 +0000490
491 // The actual type of the operand does not seem to matter as long
492 // as the bits match one of the inline immediate values. For example:
493 //
494 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
495 // so it is a legal inline immediate.
496 //
497 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
498 // floating-point, so it is a legal inline immediate.
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +0000499
500 return (APInt::floatToBits(0.0f) == Imm) ||
501 (APInt::floatToBits(1.0f) == Imm) ||
502 (APInt::floatToBits(-1.0f) == Imm) ||
503 (APInt::floatToBits(0.5f) == Imm) ||
504 (APInt::floatToBits(-0.5f) == Imm) ||
505 (APInt::floatToBits(2.0f) == Imm) ||
506 (APInt::floatToBits(-2.0f) == Imm) ||
507 (APInt::floatToBits(4.0f) == Imm) ||
508 (APInt::floatToBits(-4.0f) == Imm);
509}
510
511bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
512 if (MO.isImm())
513 return isInlineConstant(APInt(32, MO.getImm(), true));
514
515 if (MO.isFPImm()) {
516 APFloat FpImm = MO.getFPImm()->getValueAPF();
517 return isInlineConstant(FpImm.bitcastToAPInt());
518 }
519
520 return false;
Tom Stellard93fabce2013-10-10 17:11:55 +0000521}
522
523bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
524 return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
525}
526
527bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
528 StringRef &ErrInfo) const {
529 uint16_t Opcode = MI->getOpcode();
530 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
531 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
532 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
533
Tom Stellardca700e42014-03-17 17:03:49 +0000534 // Make sure the number of operands is correct.
535 const MCInstrDesc &Desc = get(Opcode);
536 if (!Desc.isVariadic() &&
537 Desc.getNumOperands() != MI->getNumExplicitOperands()) {
538 ErrInfo = "Instruction has wrong number of operands.";
539 return false;
540 }
541
542 // Make sure the register classes are correct
543 for (unsigned i = 0, e = Desc.getNumOperands(); i != e; ++i) {
544 switch (Desc.OpInfo[i].OperandType) {
545 case MCOI::OPERAND_REGISTER:
546 break;
547 case MCOI::OPERAND_IMMEDIATE:
548 if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFPImm()) {
549 ErrInfo = "Expected immediate, but got non-immediate";
550 return false;
551 }
552 // Fall-through
553 default:
554 continue;
555 }
556
557 if (!MI->getOperand(i).isReg())
558 continue;
559
560 int RegClass = Desc.OpInfo[i].RegClass;
561 if (RegClass != -1) {
562 unsigned Reg = MI->getOperand(i).getReg();
563 if (TargetRegisterInfo::isVirtualRegister(Reg))
564 continue;
565
566 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
567 if (!RC->contains(Reg)) {
568 ErrInfo = "Operand has incorrect register class.";
569 return false;
570 }
571 }
572 }
573
574
Tom Stellard93fabce2013-10-10 17:11:55 +0000575 // Verify VOP*
576 if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
577 unsigned ConstantBusCount = 0;
578 unsigned SGPRUsed = AMDGPU::NoRegister;
Tom Stellard93fabce2013-10-10 17:11:55 +0000579 for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
580 const MachineOperand &MO = MI->getOperand(i);
581 if (MO.isReg() && MO.isUse() &&
582 !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
583
584 // EXEC register uses the constant bus.
585 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
586 ++ConstantBusCount;
587
588 // SGPRs use the constant bus
589 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
590 (!MO.isImplicit() &&
591 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
592 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
593 if (SGPRUsed != MO.getReg()) {
594 ++ConstantBusCount;
595 SGPRUsed = MO.getReg();
596 }
597 }
598 }
599 // Literal constants use the constant bus.
600 if (isLiteralConstant(MO))
601 ++ConstantBusCount;
602 }
603 if (ConstantBusCount > 1) {
604 ErrInfo = "VOP* instruction uses the constant bus more than once";
605 return false;
606 }
607 }
608
609 // Verify SRC1 for VOP2 and VOPC
610 if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
611 const MachineOperand &Src1 = MI->getOperand(Src1Idx);
Tom Stellard82166022013-11-13 23:36:37 +0000612 if (Src1.isImm() || Src1.isFPImm()) {
Tom Stellard93fabce2013-10-10 17:11:55 +0000613 ErrInfo = "VOP[2C] src1 cannot be an immediate.";
614 return false;
615 }
616 }
617
618 // Verify VOP3
619 if (isVOP3(Opcode)) {
620 if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
621 ErrInfo = "VOP3 src0 cannot be a literal constant.";
622 return false;
623 }
624 if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
625 ErrInfo = "VOP3 src1 cannot be a literal constant.";
626 return false;
627 }
628 if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
629 ErrInfo = "VOP3 src2 cannot be a literal constant.";
630 return false;
631 }
632 }
633 return true;
634}
635
Matt Arsenaultf14032a2013-11-15 22:02:28 +0000636unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
Tom Stellard82166022013-11-13 23:36:37 +0000637 switch (MI.getOpcode()) {
638 default: return AMDGPU::INSTRUCTION_LIST_END;
639 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
640 case AMDGPU::COPY: return AMDGPU::COPY;
641 case AMDGPU::PHI: return AMDGPU::PHI;
Tom Stellard204e61b2014-04-07 19:45:45 +0000642 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
Tom Stellarde0387202014-03-21 15:51:54 +0000643 case AMDGPU::S_MOV_B32:
644 return MI.getOperand(1).isReg() ?
Tom Stellard8c12fd92014-03-24 16:12:34 +0000645 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
Matt Arsenault43b8e4e2013-11-18 20:09:29 +0000646 case AMDGPU::S_ADD_I32: return AMDGPU::V_ADD_I32_e32;
647 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
648 case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32;
649 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
Matt Arsenault8e2581b2014-03-21 18:01:18 +0000650 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32;
651 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32;
652 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32;
653 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32;
654 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32;
655 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32;
656 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32;
Tom Stellard82166022013-11-13 23:36:37 +0000657 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
658 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
659 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
660 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
661 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
662 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
Matt Arsenault27cc9582014-04-18 01:53:18 +0000663 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
664 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
Matt Arsenault78b86702014-04-18 05:19:26 +0000665 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
666 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
Matt Arsenault43160e72014-06-18 17:13:57 +0000667 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
Matt Arsenault2c335622014-04-09 07:16:16 +0000668 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
Matt Arsenault689f3252014-06-09 16:36:31 +0000669 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
Matt Arsenault0cb92e12014-04-11 19:25:18 +0000670 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
671 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
672 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
673 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
674 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
675 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
Tom Stellard4c00b522014-05-09 16:42:22 +0000676 case AMDGPU::S_LOAD_DWORD_IMM:
Tom Stellard0c354f22014-04-30 15:31:29 +0000677 case AMDGPU::S_LOAD_DWORD_SGPR: return AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
Tom Stellard4c00b522014-05-09 16:42:22 +0000678 case AMDGPU::S_LOAD_DWORDX2_IMM:
Tom Stellard0c354f22014-04-30 15:31:29 +0000679 case AMDGPU::S_LOAD_DWORDX2_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
Tom Stellard4c00b522014-05-09 16:42:22 +0000680 case AMDGPU::S_LOAD_DWORDX4_IMM:
Tom Stellard0c354f22014-04-30 15:31:29 +0000681 case AMDGPU::S_LOAD_DWORDX4_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
Matt Arsenaultb5b51102014-06-10 19:18:21 +0000682 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e32;
Matt Arsenault295b86e2014-06-17 17:36:27 +0000683 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
Matt Arsenault85796012014-06-17 17:36:24 +0000684 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
Tom Stellard82166022013-11-13 23:36:37 +0000685 }
686}
687
688bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
689 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
690}
691
692const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
693 unsigned OpNo) const {
694 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
695 const MCInstrDesc &Desc = get(MI.getOpcode());
696 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
697 Desc.OpInfo[OpNo].RegClass == -1)
698 return MRI.getRegClass(MI.getOperand(OpNo).getReg());
699
700 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
701 return RI.getRegClass(RCID);
702}
703
704bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
705 switch (MI.getOpcode()) {
706 case AMDGPU::COPY:
707 case AMDGPU::REG_SEQUENCE:
Tom Stellard4f3b04d2014-04-17 21:00:07 +0000708 case AMDGPU::PHI:
Tom Stellarda5687382014-05-15 14:41:55 +0000709 case AMDGPU::INSERT_SUBREG:
Tom Stellard82166022013-11-13 23:36:37 +0000710 return RI.hasVGPRs(getOpRegClass(MI, 0));
711 default:
712 return RI.hasVGPRs(getOpRegClass(MI, OpNo));
713 }
714}
715
716void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
717 MachineBasicBlock::iterator I = MI;
718 MachineOperand &MO = MI->getOperand(OpIdx);
719 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
720 unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
721 const TargetRegisterClass *RC = RI.getRegClass(RCID);
722 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
723 if (MO.isReg()) {
724 Opcode = AMDGPU::COPY;
725 } else if (RI.isSGPRClass(RC)) {
Matt Arsenault671a0052013-11-14 10:08:50 +0000726 Opcode = AMDGPU::S_MOV_B32;
Tom Stellard82166022013-11-13 23:36:37 +0000727 }
728
Matt Arsenault3a4d86a2013-11-18 20:09:55 +0000729 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
730 unsigned Reg = MRI.createVirtualRegister(VRC);
Tom Stellard82166022013-11-13 23:36:37 +0000731 BuildMI(*MI->getParent(), I, MI->getParent()->findDebugLoc(I), get(Opcode),
732 Reg).addOperand(MO);
733 MO.ChangeToRegister(Reg, false);
734}
735
Tom Stellard15834092014-03-21 15:51:57 +0000736unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
737 MachineRegisterInfo &MRI,
738 MachineOperand &SuperReg,
739 const TargetRegisterClass *SuperRC,
740 unsigned SubIdx,
741 const TargetRegisterClass *SubRC)
742 const {
743 assert(SuperReg.isReg());
744
745 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
746 unsigned SubReg = MRI.createVirtualRegister(SubRC);
747
748 // Just in case the super register is itself a sub-register, copy it to a new
Matt Arsenault08d84942014-06-03 23:06:13 +0000749 // value so we don't need to worry about merging its subreg index with the
750 // SubIdx passed to this function. The register coalescer should be able to
Tom Stellard15834092014-03-21 15:51:57 +0000751 // eliminate this extra copy.
752 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
753 NewSuperReg)
754 .addOperand(SuperReg);
755
756 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
757 SubReg)
758 .addReg(NewSuperReg, 0, SubIdx);
759 return SubReg;
760}
761
Matt Arsenault248b7b62014-03-24 20:08:09 +0000762MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
763 MachineBasicBlock::iterator MII,
764 MachineRegisterInfo &MRI,
765 MachineOperand &Op,
766 const TargetRegisterClass *SuperRC,
767 unsigned SubIdx,
768 const TargetRegisterClass *SubRC) const {
769 if (Op.isImm()) {
770 // XXX - Is there a better way to do this?
771 if (SubIdx == AMDGPU::sub0)
772 return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF);
773 if (SubIdx == AMDGPU::sub1)
774 return MachineOperand::CreateImm(Op.getImm() >> 32);
775
776 llvm_unreachable("Unhandled register index for immediate");
777 }
778
779 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
780 SubIdx, SubRC);
781 return MachineOperand::CreateReg(SubReg, false);
782}
783
Matt Arsenaultbd995802014-03-24 18:26:52 +0000784unsigned SIInstrInfo::split64BitImm(SmallVectorImpl<MachineInstr *> &Worklist,
785 MachineBasicBlock::iterator MI,
786 MachineRegisterInfo &MRI,
787 const TargetRegisterClass *RC,
788 const MachineOperand &Op) const {
789 MachineBasicBlock *MBB = MI->getParent();
790 DebugLoc DL = MI->getDebugLoc();
791 unsigned LoDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
792 unsigned HiDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
793 unsigned Dst = MRI.createVirtualRegister(RC);
794
795 MachineInstr *Lo = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
796 LoDst)
797 .addImm(Op.getImm() & 0xFFFFFFFF);
798 MachineInstr *Hi = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
799 HiDst)
800 .addImm(Op.getImm() >> 32);
801
802 BuildMI(*MBB, MI, DL, get(TargetOpcode::REG_SEQUENCE), Dst)
803 .addReg(LoDst)
804 .addImm(AMDGPU::sub0)
805 .addReg(HiDst)
806 .addImm(AMDGPU::sub1);
807
808 Worklist.push_back(Lo);
809 Worklist.push_back(Hi);
810
811 return Dst;
812}
813
Tom Stellard82166022013-11-13 23:36:37 +0000814void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
815 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
816 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
817 AMDGPU::OpName::src0);
818 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
819 AMDGPU::OpName::src1);
820 int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
821 AMDGPU::OpName::src2);
822
823 // Legalize VOP2
824 if (isVOP2(MI->getOpcode()) && Src1Idx != -1) {
Matt Arsenault08f7e372013-11-18 20:09:50 +0000825 MachineOperand &Src0 = MI->getOperand(Src0Idx);
Tom Stellard82166022013-11-13 23:36:37 +0000826 MachineOperand &Src1 = MI->getOperand(Src1Idx);
Matt Arsenaultf4760452013-11-14 08:06:38 +0000827
Matt Arsenault08f7e372013-11-18 20:09:50 +0000828 // If the instruction implicitly reads VCC, we can't have any SGPR operands,
829 // so move any.
830 bool ReadsVCC = MI->readsRegister(AMDGPU::VCC, &RI);
831 if (ReadsVCC && Src0.isReg() &&
832 RI.isSGPRClass(MRI.getRegClass(Src0.getReg()))) {
833 legalizeOpWithMove(MI, Src0Idx);
834 return;
835 }
836
837 if (ReadsVCC && Src1.isReg() &&
838 RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
839 legalizeOpWithMove(MI, Src1Idx);
840 return;
841 }
842
Matt Arsenaultf4760452013-11-14 08:06:38 +0000843 // Legalize VOP2 instructions where src1 is not a VGPR. An SGPR input must
844 // be the first operand, and there can only be one.
Tom Stellard82166022013-11-13 23:36:37 +0000845 if (Src1.isImm() || Src1.isFPImm() ||
846 (Src1.isReg() && RI.isSGPRClass(MRI.getRegClass(Src1.getReg())))) {
847 if (MI->isCommutable()) {
848 if (commuteInstruction(MI))
849 return;
850 }
851 legalizeOpWithMove(MI, Src1Idx);
852 }
853 }
854
Matt Arsenault08f7e372013-11-18 20:09:50 +0000855 // XXX - Do any VOP3 instructions read VCC?
Tom Stellard82166022013-11-13 23:36:37 +0000856 // Legalize VOP3
857 if (isVOP3(MI->getOpcode())) {
858 int VOP3Idx[3] = {Src0Idx, Src1Idx, Src2Idx};
859 unsigned SGPRReg = AMDGPU::NoRegister;
860 for (unsigned i = 0; i < 3; ++i) {
861 int Idx = VOP3Idx[i];
862 if (Idx == -1)
863 continue;
864 MachineOperand &MO = MI->getOperand(Idx);
865
866 if (MO.isReg()) {
867 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
868 continue; // VGPRs are legal
869
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +0000870 assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction");
871
Tom Stellard82166022013-11-13 23:36:37 +0000872 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
873 SGPRReg = MO.getReg();
874 // We can use one SGPR in each VOP3 instruction.
875 continue;
876 }
877 } else if (!isLiteralConstant(MO)) {
878 // If it is not a register and not a literal constant, then it must be
879 // an inline constant which is always legal.
880 continue;
881 }
882 // If we make it this far, then the operand is not legal and we must
883 // legalize it.
884 legalizeOpWithMove(MI, Idx);
885 }
886 }
887
Tom Stellard4f3b04d2014-04-17 21:00:07 +0000888 // Legalize REG_SEQUENCE and PHI
Tom Stellard82166022013-11-13 23:36:37 +0000889 // The register class of the operands much be the same type as the register
890 // class of the output.
Tom Stellard4f3b04d2014-04-17 21:00:07 +0000891 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE ||
892 MI->getOpcode() == AMDGPU::PHI) {
Craig Topper062a2ba2014-04-25 05:30:21 +0000893 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
Tom Stellard82166022013-11-13 23:36:37 +0000894 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
895 if (!MI->getOperand(i).isReg() ||
896 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
897 continue;
898 const TargetRegisterClass *OpRC =
899 MRI.getRegClass(MI->getOperand(i).getReg());
900 if (RI.hasVGPRs(OpRC)) {
901 VRC = OpRC;
902 } else {
903 SRC = OpRC;
904 }
905 }
906
907 // If any of the operands are VGPR registers, then they all most be
908 // otherwise we will create illegal VGPR->SGPR copies when legalizing
909 // them.
910 if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
911 if (!VRC) {
912 assert(SRC);
913 VRC = RI.getEquivalentVGPRClass(SRC);
914 }
915 RC = VRC;
916 } else {
917 RC = SRC;
918 }
919
920 // Update all the operands so they have the same type.
921 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
922 if (!MI->getOperand(i).isReg() ||
923 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
924 continue;
925 unsigned DstReg = MRI.createVirtualRegister(RC);
Tom Stellard4f3b04d2014-04-17 21:00:07 +0000926 MachineBasicBlock *InsertBB;
927 MachineBasicBlock::iterator Insert;
928 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
929 InsertBB = MI->getParent();
930 Insert = MI;
931 } else {
932 // MI is a PHI instruction.
933 InsertBB = MI->getOperand(i + 1).getMBB();
934 Insert = InsertBB->getFirstTerminator();
935 }
936 BuildMI(*InsertBB, Insert, MI->getDebugLoc(),
Tom Stellard82166022013-11-13 23:36:37 +0000937 get(AMDGPU::COPY), DstReg)
938 .addOperand(MI->getOperand(i));
939 MI->getOperand(i).setReg(DstReg);
940 }
941 }
Tom Stellard15834092014-03-21 15:51:57 +0000942
Tom Stellarda5687382014-05-15 14:41:55 +0000943 // Legalize INSERT_SUBREG
944 // src0 must have the same register class as dst
945 if (MI->getOpcode() == AMDGPU::INSERT_SUBREG) {
946 unsigned Dst = MI->getOperand(0).getReg();
947 unsigned Src0 = MI->getOperand(1).getReg();
948 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
949 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
950 if (DstRC != Src0RC) {
951 MachineBasicBlock &MBB = *MI->getParent();
952 unsigned NewSrc0 = MRI.createVirtualRegister(DstRC);
953 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0)
954 .addReg(Src0);
955 MI->getOperand(1).setReg(NewSrc0);
956 }
957 return;
958 }
959
Tom Stellard15834092014-03-21 15:51:57 +0000960 // Legalize MUBUF* instructions
961 // FIXME: If we start using the non-addr64 instructions for compute, we
962 // may need to legalize them here.
963
964 int SRsrcIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
965 AMDGPU::OpName::srsrc);
966 int VAddrIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
967 AMDGPU::OpName::vaddr);
968 if (SRsrcIdx != -1 && VAddrIdx != -1) {
969 const TargetRegisterClass *VAddrRC =
970 RI.getRegClass(get(MI->getOpcode()).OpInfo[VAddrIdx].RegClass);
971
972 if(VAddrRC->getSize() == 8 &&
973 MRI.getRegClass(MI->getOperand(SRsrcIdx).getReg()) != VAddrRC) {
974 // We have a MUBUF instruction that uses a 64-bit vaddr register and
975 // srsrc has the incorrect register class. In order to fix this, we
976 // need to extract the pointer from the resource descriptor (srsrc),
977 // add it to the value of vadd, then store the result in the vaddr
978 // operand. Then, we need to set the pointer field of the resource
979 // descriptor to zero.
980
981 MachineBasicBlock &MBB = *MI->getParent();
982 MachineOperand &SRsrcOp = MI->getOperand(SRsrcIdx);
983 MachineOperand &VAddrOp = MI->getOperand(VAddrIdx);
984 unsigned SRsrcPtrLo, SRsrcPtrHi, VAddrLo, VAddrHi;
985 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
986 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
987 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
988 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
989 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
990 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
991 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
992
993 // SRsrcPtrLo = srsrc:sub0
994 SRsrcPtrLo = buildExtractSubReg(MI, MRI, SRsrcOp,
995 &AMDGPU::VReg_128RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
996
997 // SRsrcPtrHi = srsrc:sub1
998 SRsrcPtrHi = buildExtractSubReg(MI, MRI, SRsrcOp,
999 &AMDGPU::VReg_128RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
1000
1001 // VAddrLo = vaddr:sub0
1002 VAddrLo = buildExtractSubReg(MI, MRI, VAddrOp,
1003 &AMDGPU::VReg_64RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
1004
1005 // VAddrHi = vaddr:sub1
1006 VAddrHi = buildExtractSubReg(MI, MRI, VAddrOp,
1007 &AMDGPU::VReg_64RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
1008
1009 // NewVaddrLo = SRsrcPtrLo + VAddrLo
1010 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADD_I32_e32),
1011 NewVAddrLo)
1012 .addReg(SRsrcPtrLo)
1013 .addReg(VAddrLo)
1014 .addReg(AMDGPU::VCC, RegState::Define | RegState::Implicit);
1015
1016 // NewVaddrHi = SRsrcPtrHi + VAddrHi
1017 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADDC_U32_e32),
1018 NewVAddrHi)
1019 .addReg(SRsrcPtrHi)
1020 .addReg(VAddrHi)
1021 .addReg(AMDGPU::VCC, RegState::ImplicitDefine)
1022 .addReg(AMDGPU::VCC, RegState::Implicit);
1023
1024 // NewVaddr = {NewVaddrHi, NewVaddrLo}
1025 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
1026 NewVAddr)
1027 .addReg(NewVAddrLo)
1028 .addImm(AMDGPU::sub0)
1029 .addReg(NewVAddrHi)
1030 .addImm(AMDGPU::sub1);
1031
1032 // Zero64 = 0
1033 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64),
1034 Zero64)
1035 .addImm(0);
1036
1037 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
1038 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1039 SRsrcFormatLo)
1040 .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
1041
1042 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
1043 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1044 SRsrcFormatHi)
1045 .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
1046
1047 // NewSRsrc = {Zero64, SRsrcFormat}
1048 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
1049 NewSRsrc)
1050 .addReg(Zero64)
1051 .addImm(AMDGPU::sub0_sub1)
1052 .addReg(SRsrcFormatLo)
1053 .addImm(AMDGPU::sub2)
1054 .addReg(SRsrcFormatHi)
1055 .addImm(AMDGPU::sub3);
1056
1057 // Update the instruction to use NewVaddr
1058 MI->getOperand(VAddrIdx).setReg(NewVAddr);
1059 // Update the instruction to use NewSRsrc
1060 MI->getOperand(SRsrcIdx).setReg(NewSRsrc);
1061 }
1062 }
Tom Stellard82166022013-11-13 23:36:37 +00001063}
1064
Tom Stellard0c354f22014-04-30 15:31:29 +00001065void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) const {
1066 MachineBasicBlock *MBB = MI->getParent();
1067 switch (MI->getOpcode()) {
Tom Stellard4c00b522014-05-09 16:42:22 +00001068 case AMDGPU::S_LOAD_DWORD_IMM:
Tom Stellard0c354f22014-04-30 15:31:29 +00001069 case AMDGPU::S_LOAD_DWORD_SGPR:
Tom Stellard4c00b522014-05-09 16:42:22 +00001070 case AMDGPU::S_LOAD_DWORDX2_IMM:
Tom Stellard0c354f22014-04-30 15:31:29 +00001071 case AMDGPU::S_LOAD_DWORDX2_SGPR:
Tom Stellard4c00b522014-05-09 16:42:22 +00001072 case AMDGPU::S_LOAD_DWORDX4_IMM:
Tom Stellard0c354f22014-04-30 15:31:29 +00001073 case AMDGPU::S_LOAD_DWORDX4_SGPR:
1074 unsigned NewOpcode = getVALUOp(*MI);
Tom Stellard4c00b522014-05-09 16:42:22 +00001075 unsigned RegOffset;
1076 unsigned ImmOffset;
Tom Stellard0c354f22014-04-30 15:31:29 +00001077
Tom Stellard4c00b522014-05-09 16:42:22 +00001078 if (MI->getOperand(2).isReg()) {
1079 RegOffset = MI->getOperand(2).getReg();
1080 ImmOffset = 0;
1081 } else {
1082 assert(MI->getOperand(2).isImm());
1083 // SMRD instructions take a dword offsets and MUBUF instructions
1084 // take a byte offset.
1085 ImmOffset = MI->getOperand(2).getImm() << 2;
1086 RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1087 if (isUInt<12>(ImmOffset)) {
1088 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1089 RegOffset)
1090 .addImm(0);
1091 } else {
1092 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1093 RegOffset)
1094 .addImm(ImmOffset);
1095 ImmOffset = 0;
1096 }
1097 }
Tom Stellard0c354f22014-04-30 15:31:29 +00001098
1099 unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
Tom Stellard4c00b522014-05-09 16:42:22 +00001100 unsigned DWord0 = RegOffset;
Tom Stellard0c354f22014-04-30 15:31:29 +00001101 unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1102 unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1103 unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1104
1105 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1)
1106 .addImm(0);
1107 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2)
1108 .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
1109 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3)
1110 .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
1111 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc)
1112 .addReg(DWord0)
1113 .addImm(AMDGPU::sub0)
1114 .addReg(DWord1)
1115 .addImm(AMDGPU::sub1)
1116 .addReg(DWord2)
1117 .addImm(AMDGPU::sub2)
1118 .addReg(DWord3)
1119 .addImm(AMDGPU::sub3);
1120 MI->setDesc(get(NewOpcode));
Tom Stellard4c00b522014-05-09 16:42:22 +00001121 if (MI->getOperand(2).isReg()) {
1122 MI->getOperand(2).setReg(MI->getOperand(1).getReg());
1123 } else {
1124 MI->getOperand(2).ChangeToRegister(MI->getOperand(1).getReg(), false);
1125 }
Tom Stellard0c354f22014-04-30 15:31:29 +00001126 MI->getOperand(1).setReg(SRsrc);
Tom Stellard4c00b522014-05-09 16:42:22 +00001127 MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(ImmOffset));
Tom Stellard0c354f22014-04-30 15:31:29 +00001128 }
1129}
1130
Tom Stellard82166022013-11-13 23:36:37 +00001131void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
1132 SmallVector<MachineInstr *, 128> Worklist;
1133 Worklist.push_back(&TopInst);
1134
1135 while (!Worklist.empty()) {
1136 MachineInstr *Inst = Worklist.pop_back_val();
Tom Stellarde0387202014-03-21 15:51:54 +00001137 MachineBasicBlock *MBB = Inst->getParent();
1138 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1139
Matt Arsenault27cc9582014-04-18 01:53:18 +00001140 unsigned Opcode = Inst->getOpcode();
Tom Stellard0c354f22014-04-30 15:31:29 +00001141 unsigned NewOpcode = getVALUOp(*Inst);
Matt Arsenault27cc9582014-04-18 01:53:18 +00001142
Tom Stellarde0387202014-03-21 15:51:54 +00001143 // Handle some special cases
Matt Arsenault27cc9582014-04-18 01:53:18 +00001144 switch (Opcode) {
Tom Stellard0c354f22014-04-30 15:31:29 +00001145 default:
1146 if (isSMRD(Inst->getOpcode())) {
1147 moveSMRDToVALU(Inst, MRI);
1148 }
1149 break;
Matt Arsenaultbd995802014-03-24 18:26:52 +00001150 case AMDGPU::S_MOV_B64: {
1151 DebugLoc DL = Inst->getDebugLoc();
Tom Stellarde0387202014-03-21 15:51:54 +00001152
Matt Arsenaultbd995802014-03-24 18:26:52 +00001153 // If the source operand is a register we can replace this with a
1154 // copy.
1155 if (Inst->getOperand(1).isReg()) {
1156 MachineInstr *Copy = BuildMI(*MBB, Inst, DL, get(TargetOpcode::COPY))
1157 .addOperand(Inst->getOperand(0))
1158 .addOperand(Inst->getOperand(1));
1159 Worklist.push_back(Copy);
1160 } else {
1161 // Otherwise, we need to split this into two movs, because there is
1162 // no 64-bit VALU move instruction.
1163 unsigned Reg = Inst->getOperand(0).getReg();
1164 unsigned Dst = split64BitImm(Worklist,
1165 Inst,
1166 MRI,
1167 MRI.getRegClass(Reg),
1168 Inst->getOperand(1));
1169 MRI.replaceRegWith(Reg, Dst);
Tom Stellarde0387202014-03-21 15:51:54 +00001170 }
Matt Arsenaultbd995802014-03-24 18:26:52 +00001171 Inst->eraseFromParent();
1172 continue;
1173 }
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001174 case AMDGPU::S_AND_B64:
Matt Arsenault689f3252014-06-09 16:36:31 +00001175 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001176 Inst->eraseFromParent();
1177 continue;
1178
1179 case AMDGPU::S_OR_B64:
Matt Arsenault689f3252014-06-09 16:36:31 +00001180 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001181 Inst->eraseFromParent();
1182 continue;
1183
1184 case AMDGPU::S_XOR_B64:
Matt Arsenault689f3252014-06-09 16:36:31 +00001185 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001186 Inst->eraseFromParent();
1187 continue;
1188
1189 case AMDGPU::S_NOT_B64:
Matt Arsenault689f3252014-06-09 16:36:31 +00001190 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001191 Inst->eraseFromParent();
1192 continue;
1193
Matt Arsenault8333e432014-06-10 19:18:24 +00001194 case AMDGPU::S_BCNT1_I32_B64:
1195 splitScalar64BitBCNT(Worklist, Inst);
1196 Inst->eraseFromParent();
1197 continue;
1198
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001199 case AMDGPU::S_BFE_U64:
1200 case AMDGPU::S_BFE_I64:
1201 case AMDGPU::S_BFM_B64:
1202 llvm_unreachable("Moving this op to VALU not implemented");
Tom Stellarde0387202014-03-21 15:51:54 +00001203 }
1204
Tom Stellard15834092014-03-21 15:51:57 +00001205 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
1206 // We cannot move this instruction to the VALU, so we should try to
1207 // legalize its operands instead.
1208 legalizeOperands(Inst);
Tom Stellard82166022013-11-13 23:36:37 +00001209 continue;
Tom Stellard15834092014-03-21 15:51:57 +00001210 }
Tom Stellard82166022013-11-13 23:36:37 +00001211
Tom Stellard82166022013-11-13 23:36:37 +00001212 // Use the new VALU Opcode.
1213 const MCInstrDesc &NewDesc = get(NewOpcode);
1214 Inst->setDesc(NewDesc);
1215
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +00001216 // Remove any references to SCC. Vector instructions can't read from it, and
1217 // We're just about to add the implicit use / defs of VCC, and we don't want
1218 // both.
1219 for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) {
1220 MachineOperand &Op = Inst->getOperand(i);
1221 if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
1222 Inst->RemoveOperand(i);
1223 }
1224
Matt Arsenault27cc9582014-04-18 01:53:18 +00001225 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
1226 // We are converting these to a BFE, so we need to add the missing
1227 // operands for the size and offset.
1228 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
Vincent Lejeune94af31f2014-05-10 19:18:33 +00001229 Inst->addOperand(Inst->getOperand(1));
1230 Inst->getOperand(1).ChangeToImmediate(0);
1231 Inst->addOperand(MachineOperand::CreateImm(0));
1232 Inst->addOperand(MachineOperand::CreateImm(0));
Matt Arsenault27cc9582014-04-18 01:53:18 +00001233 Inst->addOperand(MachineOperand::CreateImm(0));
1234 Inst->addOperand(MachineOperand::CreateImm(Size));
1235
1236 // XXX - Other pointless operands. There are 4, but it seems you only need
1237 // 3 to not hit an assertion later in MCInstLower.
1238 Inst->addOperand(MachineOperand::CreateImm(0));
1239 Inst->addOperand(MachineOperand::CreateImm(0));
Matt Arsenaultb5b51102014-06-10 19:18:21 +00001240 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
1241 // The VALU version adds the second operand to the result, so insert an
1242 // extra 0 operand.
1243 Inst->addOperand(MachineOperand::CreateImm(0));
Tom Stellard82166022013-11-13 23:36:37 +00001244 }
1245
Matt Arsenault27cc9582014-04-18 01:53:18 +00001246 addDescImplicitUseDef(NewDesc, Inst);
Tom Stellard82166022013-11-13 23:36:37 +00001247
Matt Arsenault78b86702014-04-18 05:19:26 +00001248 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
1249 const MachineOperand &OffsetWidthOp = Inst->getOperand(2);
1250 // If we need to move this to VGPRs, we need to unpack the second operand
1251 // back into the 2 separate ones for bit offset and width.
1252 assert(OffsetWidthOp.isImm() &&
1253 "Scalar BFE is only implemented for constant width and offset");
1254 uint32_t Imm = OffsetWidthOp.getImm();
1255
1256 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
1257 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
1258
1259 Inst->RemoveOperand(2); // Remove old immediate.
Vincent Lejeune94af31f2014-05-10 19:18:33 +00001260 Inst->addOperand(Inst->getOperand(1));
1261 Inst->getOperand(1).ChangeToImmediate(0);
Matt Arsenault4b0402e2014-05-13 23:45:50 +00001262 Inst->addOperand(MachineOperand::CreateImm(0));
Matt Arsenault78b86702014-04-18 05:19:26 +00001263 Inst->addOperand(MachineOperand::CreateImm(Offset));
Matt Arsenault78b86702014-04-18 05:19:26 +00001264 Inst->addOperand(MachineOperand::CreateImm(0));
Vincent Lejeune94af31f2014-05-10 19:18:33 +00001265 Inst->addOperand(MachineOperand::CreateImm(BitWidth));
Matt Arsenault78b86702014-04-18 05:19:26 +00001266 Inst->addOperand(MachineOperand::CreateImm(0));
1267 Inst->addOperand(MachineOperand::CreateImm(0));
Matt Arsenault78b86702014-04-18 05:19:26 +00001268 }
1269
Tom Stellard82166022013-11-13 23:36:37 +00001270 // Update the destination register class.
Tom Stellarde1a24452014-04-17 21:00:01 +00001271
Tom Stellard82166022013-11-13 23:36:37 +00001272 const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
1273
Matt Arsenault27cc9582014-04-18 01:53:18 +00001274 switch (Opcode) {
Tom Stellard82166022013-11-13 23:36:37 +00001275 // For target instructions, getOpRegClass just returns the virtual
1276 // register class associated with the operand, so we need to find an
1277 // equivalent VGPR register class in order to move the instruction to the
1278 // VALU.
1279 case AMDGPU::COPY:
1280 case AMDGPU::PHI:
1281 case AMDGPU::REG_SEQUENCE:
Tom Stellard204e61b2014-04-07 19:45:45 +00001282 case AMDGPU::INSERT_SUBREG:
Tom Stellard82166022013-11-13 23:36:37 +00001283 if (RI.hasVGPRs(NewDstRC))
1284 continue;
1285 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
1286 if (!NewDstRC)
1287 continue;
1288 break;
1289 default:
1290 break;
1291 }
1292
1293 unsigned DstReg = Inst->getOperand(0).getReg();
1294 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
1295 MRI.replaceRegWith(DstReg, NewDstReg);
1296
Tom Stellarde1a24452014-04-17 21:00:01 +00001297 // Legalize the operands
1298 legalizeOperands(Inst);
1299
Tom Stellard82166022013-11-13 23:36:37 +00001300 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
1301 E = MRI.use_end(); I != E; ++I) {
Owen Anderson16c6bf42014-03-13 23:12:04 +00001302 MachineInstr &UseMI = *I->getParent();
Tom Stellard82166022013-11-13 23:36:37 +00001303 if (!canReadVGPR(UseMI, I.getOperandNo())) {
1304 Worklist.push_back(&UseMI);
1305 }
1306 }
1307 }
1308}
1309
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001310//===----------------------------------------------------------------------===//
1311// Indirect addressing callbacks
1312//===----------------------------------------------------------------------===//
1313
1314unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
1315 unsigned Channel) const {
1316 assert(Channel == 0);
1317 return RegIndex;
1318}
1319
Tom Stellard26a3b672013-10-22 18:19:10 +00001320const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
Tom Stellard81d871d2013-11-13 23:36:50 +00001321 return &AMDGPU::VReg_32RegClass;
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001322}
1323
Matt Arsenault689f3252014-06-09 16:36:31 +00001324void SIInstrInfo::splitScalar64BitUnaryOp(
1325 SmallVectorImpl<MachineInstr *> &Worklist,
1326 MachineInstr *Inst,
1327 unsigned Opcode) const {
1328 MachineBasicBlock &MBB = *Inst->getParent();
1329 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1330
1331 MachineOperand &Dest = Inst->getOperand(0);
1332 MachineOperand &Src0 = Inst->getOperand(1);
1333 DebugLoc DL = Inst->getDebugLoc();
1334
1335 MachineBasicBlock::iterator MII = Inst;
1336
1337 const MCInstrDesc &InstDesc = get(Opcode);
1338 const TargetRegisterClass *Src0RC = Src0.isReg() ?
1339 MRI.getRegClass(Src0.getReg()) :
1340 &AMDGPU::SGPR_32RegClass;
1341
1342 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
1343
1344 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1345 AMDGPU::sub0, Src0SubRC);
1346
1347 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
1348 const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
1349
1350 unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
1351 MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
1352 .addOperand(SrcReg0Sub0);
1353
1354 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1355 AMDGPU::sub1, Src0SubRC);
1356
1357 unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
1358 MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
1359 .addOperand(SrcReg0Sub1);
1360
1361 unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
1362 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1363 .addReg(DestSub0)
1364 .addImm(AMDGPU::sub0)
1365 .addReg(DestSub1)
1366 .addImm(AMDGPU::sub1);
1367
1368 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
1369
1370 // Try to legalize the operands in case we need to swap the order to keep it
1371 // valid.
1372 Worklist.push_back(LoHalf);
1373 Worklist.push_back(HiHalf);
1374}
1375
1376void SIInstrInfo::splitScalar64BitBinaryOp(
1377 SmallVectorImpl<MachineInstr *> &Worklist,
1378 MachineInstr *Inst,
1379 unsigned Opcode) const {
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001380 MachineBasicBlock &MBB = *Inst->getParent();
1381 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1382
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001383 MachineOperand &Dest = Inst->getOperand(0);
1384 MachineOperand &Src0 = Inst->getOperand(1);
1385 MachineOperand &Src1 = Inst->getOperand(2);
1386 DebugLoc DL = Inst->getDebugLoc();
1387
1388 MachineBasicBlock::iterator MII = Inst;
1389
1390 const MCInstrDesc &InstDesc = get(Opcode);
Matt Arsenault684dc802014-03-24 20:08:13 +00001391 const TargetRegisterClass *Src0RC = Src0.isReg() ?
1392 MRI.getRegClass(Src0.getReg()) :
1393 &AMDGPU::SGPR_32RegClass;
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001394
Matt Arsenault684dc802014-03-24 20:08:13 +00001395 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
1396 const TargetRegisterClass *Src1RC = Src1.isReg() ?
1397 MRI.getRegClass(Src1.getReg()) :
1398 &AMDGPU::SGPR_32RegClass;
1399
1400 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
1401
1402 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1403 AMDGPU::sub0, Src0SubRC);
1404 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
1405 AMDGPU::sub0, Src1SubRC);
1406
1407 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
1408 const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
1409
1410 unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001411 MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
Matt Arsenault248b7b62014-03-24 20:08:09 +00001412 .addOperand(SrcReg0Sub0)
1413 .addOperand(SrcReg1Sub0);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001414
Matt Arsenault684dc802014-03-24 20:08:13 +00001415 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1416 AMDGPU::sub1, Src0SubRC);
1417 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
1418 AMDGPU::sub1, Src1SubRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001419
Matt Arsenault684dc802014-03-24 20:08:13 +00001420 unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001421 MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
Matt Arsenault248b7b62014-03-24 20:08:09 +00001422 .addOperand(SrcReg0Sub1)
1423 .addOperand(SrcReg1Sub1);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001424
Matt Arsenault684dc802014-03-24 20:08:13 +00001425 unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00001426 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1427 .addReg(DestSub0)
1428 .addImm(AMDGPU::sub0)
1429 .addReg(DestSub1)
1430 .addImm(AMDGPU::sub1);
1431
1432 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
1433
1434 // Try to legalize the operands in case we need to swap the order to keep it
1435 // valid.
1436 Worklist.push_back(LoHalf);
1437 Worklist.push_back(HiHalf);
1438}
1439
Matt Arsenault8333e432014-06-10 19:18:24 +00001440void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist,
1441 MachineInstr *Inst) const {
1442 MachineBasicBlock &MBB = *Inst->getParent();
1443 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1444
1445 MachineBasicBlock::iterator MII = Inst;
1446 DebugLoc DL = Inst->getDebugLoc();
1447
1448 MachineOperand &Dest = Inst->getOperand(0);
1449 MachineOperand &Src = Inst->getOperand(1);
1450
1451 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e32);
1452 const TargetRegisterClass *SrcRC = Src.isReg() ?
1453 MRI.getRegClass(Src.getReg()) :
1454 &AMDGPU::SGPR_32RegClass;
1455
1456 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1457 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1458
1459 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
1460
1461 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
1462 AMDGPU::sub0, SrcSubRC);
1463 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
1464 AMDGPU::sub1, SrcSubRC);
1465
1466 MachineInstr *First = BuildMI(MBB, MII, DL, InstDesc, MidReg)
1467 .addOperand(SrcRegSub0)
1468 .addImm(0);
1469
1470 MachineInstr *Second = BuildMI(MBB, MII, DL, InstDesc, ResultReg)
1471 .addOperand(SrcRegSub1)
1472 .addReg(MidReg);
1473
1474 MRI.replaceRegWith(Dest.getReg(), ResultReg);
1475
1476 Worklist.push_back(First);
1477 Worklist.push_back(Second);
1478}
1479
Matt Arsenault27cc9582014-04-18 01:53:18 +00001480void SIInstrInfo::addDescImplicitUseDef(const MCInstrDesc &NewDesc,
1481 MachineInstr *Inst) const {
1482 // Add the implict and explicit register definitions.
1483 if (NewDesc.ImplicitUses) {
1484 for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
1485 unsigned Reg = NewDesc.ImplicitUses[i];
1486 Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
1487 }
1488 }
1489
1490 if (NewDesc.ImplicitDefs) {
1491 for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
1492 unsigned Reg = NewDesc.ImplicitDefs[i];
1493 Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
1494 }
1495 }
1496}
1497
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001498MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
1499 MachineBasicBlock *MBB,
1500 MachineBasicBlock::iterator I,
1501 unsigned ValueReg,
1502 unsigned Address, unsigned OffsetReg) const {
Tom Stellard81d871d2013-11-13 23:36:50 +00001503 const DebugLoc &DL = MBB->findDebugLoc(I);
1504 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1505 getIndirectIndexBegin(*MBB->getParent()));
1506
1507 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
1508 .addReg(IndirectBaseReg, RegState::Define)
1509 .addOperand(I->getOperand(0))
1510 .addReg(IndirectBaseReg)
1511 .addReg(OffsetReg)
1512 .addImm(0)
1513 .addReg(ValueReg);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001514}
1515
1516MachineInstrBuilder SIInstrInfo::buildIndirectRead(
1517 MachineBasicBlock *MBB,
1518 MachineBasicBlock::iterator I,
1519 unsigned ValueReg,
1520 unsigned Address, unsigned OffsetReg) const {
Tom Stellard81d871d2013-11-13 23:36:50 +00001521 const DebugLoc &DL = MBB->findDebugLoc(I);
1522 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1523 getIndirectIndexBegin(*MBB->getParent()));
1524
1525 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
1526 .addOperand(I->getOperand(0))
1527 .addOperand(I->getOperand(1))
1528 .addReg(IndirectBaseReg)
1529 .addReg(OffsetReg)
1530 .addImm(0);
1531
1532}
1533
1534void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1535 const MachineFunction &MF) const {
1536 int End = getIndirectIndexEnd(MF);
1537 int Begin = getIndirectIndexBegin(MF);
1538
1539 if (End == -1)
1540 return;
1541
1542
1543 for (int Index = Begin; Index <= End; ++Index)
1544 Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
1545
Tom Stellard415ef6d2013-11-13 23:58:51 +00001546 for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001547 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
1548
Tom Stellard415ef6d2013-11-13 23:58:51 +00001549 for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001550 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
1551
Tom Stellard415ef6d2013-11-13 23:58:51 +00001552 for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001553 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
1554
Tom Stellard415ef6d2013-11-13 23:58:51 +00001555 for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001556 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
1557
Tom Stellard415ef6d2013-11-13 23:58:51 +00001558 for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
Tom Stellard81d871d2013-11-13 23:36:50 +00001559 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001560}