blob: 981b63c59a83672ae9e7760e1f1efa3efa2e4769 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief SI Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
Tom Stellard75aadc22012-12-11 21:25:42 +000015#include "SIInstrInfo.h"
16#include "AMDGPUTargetMachine.h"
Tom Stellardcb6ba622016-04-30 00:23:06 +000017#include "GCNHazardRecognizer.h"
Tom Stellard16a9a202013-08-14 23:24:17 +000018#include "SIDefines.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000019#include "SIMachineFunctionInfo.h"
Tom Stellardc5cf2f02014-08-21 20:40:54 +000020#include "llvm/CodeGen/MachineFrameInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000021#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellardcb6ba622016-04-30 00:23:06 +000023#include "llvm/CodeGen/ScheduleDAG.h"
Tom Stellard4e07b1d2014-06-10 21:20:41 +000024#include "llvm/IR/Function.h"
Tom Stellard96468902014-09-24 01:33:17 +000025#include "llvm/CodeGen/RegisterScavenging.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "llvm/MC/MCInstrDesc.h"
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +000027#include "llvm/Support/Debug.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000028
29using namespace llvm;
30
Matt Arsenault6bc43d82016-10-06 16:20:41 +000031// Must be at least 4 to be able to branch over minimum unconditional branch
32// code. This is only for making it possible to write reasonably small tests for
33// long branches.
34static cl::opt<unsigned>
35BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
36 cl::desc("Restrict range of branch instructions (DEBUG)"));
37
Matt Arsenault43e92fe2016-06-24 06:30:11 +000038SIInstrInfo::SIInstrInfo(const SISubtarget &ST)
39 : AMDGPUInstrInfo(ST), RI(), ST(ST) {}
Tom Stellard75aadc22012-12-11 21:25:42 +000040
Tom Stellard82166022013-11-13 23:36:37 +000041//===----------------------------------------------------------------------===//
42// TargetInstrInfo callbacks
43//===----------------------------------------------------------------------===//
44
Matt Arsenaultc10853f2014-08-06 00:29:43 +000045static unsigned getNumOperandsNoGlue(SDNode *Node) {
46 unsigned N = Node->getNumOperands();
47 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
48 --N;
49 return N;
50}
51
52static SDValue findChainOperand(SDNode *Load) {
53 SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1);
54 assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node");
55 return LastOp;
56}
57
Tom Stellard155bbb72014-08-11 22:18:17 +000058/// \brief Returns true if both nodes have the same value for the given
59/// operand \p Op, or if both nodes do not have this operand.
60static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
61 unsigned Opc0 = N0->getMachineOpcode();
62 unsigned Opc1 = N1->getMachineOpcode();
63
64 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
65 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
66
67 if (Op0Idx == -1 && Op1Idx == -1)
68 return true;
69
70
71 if ((Op0Idx == -1 && Op1Idx != -1) ||
72 (Op1Idx == -1 && Op0Idx != -1))
73 return false;
74
75 // getNamedOperandIdx returns the index for the MachineInstr's operands,
76 // which includes the result as the first operand. We are indexing into the
77 // MachineSDNode's operands, so we need to skip the result operand to get
78 // the real index.
79 --Op0Idx;
80 --Op1Idx;
81
Tom Stellardb8b84132014-09-03 15:22:39 +000082 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
Tom Stellard155bbb72014-08-11 22:18:17 +000083}
84
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +000085bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
Matt Arsenaulta48b8662015-04-23 23:34:48 +000086 AliasAnalysis *AA) const {
87 // TODO: The generic check fails for VALU instructions that should be
88 // rematerializable due to implicit reads of exec. We really want all of the
89 // generic logic for this except for this.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +000090 switch (MI.getOpcode()) {
Matt Arsenaulta48b8662015-04-23 23:34:48 +000091 case AMDGPU::V_MOV_B32_e32:
92 case AMDGPU::V_MOV_B32_e64:
Matt Arsenault80f766a2015-09-10 01:23:28 +000093 case AMDGPU::V_MOV_B64_PSEUDO:
Matt Arsenaulta48b8662015-04-23 23:34:48 +000094 return true;
95 default:
96 return false;
97 }
98}
99
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000100bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
101 int64_t &Offset0,
102 int64_t &Offset1) const {
103 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
104 return false;
105
106 unsigned Opc0 = Load0->getMachineOpcode();
107 unsigned Opc1 = Load1->getMachineOpcode();
108
109 // Make sure both are actually loads.
110 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
111 return false;
112
113 if (isDS(Opc0) && isDS(Opc1)) {
Tom Stellard20fa0be2014-10-07 21:09:20 +0000114
115 // FIXME: Handle this case:
116 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
117 return false;
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000118
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000119 // Check base reg.
120 if (Load0->getOperand(1) != Load1->getOperand(1))
121 return false;
122
123 // Check chain.
124 if (findChainOperand(Load0) != findChainOperand(Load1))
125 return false;
126
Matt Arsenault972c12a2014-09-17 17:48:32 +0000127 // Skip read2 / write2 variants for simplicity.
128 // TODO: We should report true if the used offsets are adjacent (excluded
129 // st64 versions).
130 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 ||
131 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1)
132 return false;
133
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000134 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue();
135 Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue();
136 return true;
137 }
138
139 if (isSMRD(Opc0) && isSMRD(Opc1)) {
140 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1));
141
142 // Check base reg.
143 if (Load0->getOperand(0) != Load1->getOperand(0))
144 return false;
145
Tom Stellardf0a575f2015-03-23 16:06:01 +0000146 const ConstantSDNode *Load0Offset =
147 dyn_cast<ConstantSDNode>(Load0->getOperand(1));
148 const ConstantSDNode *Load1Offset =
149 dyn_cast<ConstantSDNode>(Load1->getOperand(1));
150
151 if (!Load0Offset || !Load1Offset)
152 return false;
153
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000154 // Check chain.
155 if (findChainOperand(Load0) != findChainOperand(Load1))
156 return false;
157
Tom Stellardf0a575f2015-03-23 16:06:01 +0000158 Offset0 = Load0Offset->getZExtValue();
159 Offset1 = Load1Offset->getZExtValue();
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000160 return true;
161 }
162
163 // MUBUF and MTBUF can access the same addresses.
164 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000165
166 // MUBUF and MTBUF have vaddr at different indices.
Tom Stellard155bbb72014-08-11 22:18:17 +0000167 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
168 findChainOperand(Load0) != findChainOperand(Load1) ||
169 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
Tom Stellardb8b84132014-09-03 15:22:39 +0000170 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000171 return false;
172
Tom Stellard155bbb72014-08-11 22:18:17 +0000173 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
174 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
175
176 if (OffIdx0 == -1 || OffIdx1 == -1)
177 return false;
178
179 // getNamedOperandIdx returns the index for MachineInstrs. Since they
180 // inlcude the output in the operand list, but SDNodes don't, we need to
181 // subtract the index by one.
182 --OffIdx0;
183 --OffIdx1;
184
185 SDValue Off0 = Load0->getOperand(OffIdx0);
186 SDValue Off1 = Load1->getOperand(OffIdx1);
187
188 // The offset might be a FrameIndexSDNode.
189 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
190 return false;
191
192 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
193 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000194 return true;
195 }
196
197 return false;
198}
199
Matt Arsenault2e991122014-09-10 23:26:16 +0000200static bool isStride64(unsigned Opc) {
201 switch (Opc) {
202 case AMDGPU::DS_READ2ST64_B32:
203 case AMDGPU::DS_READ2ST64_B64:
204 case AMDGPU::DS_WRITE2ST64_B32:
205 case AMDGPU::DS_WRITE2ST64_B64:
206 return true;
207 default:
208 return false;
209 }
210}
211
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000212bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
Chad Rosierc27a18f2016-03-09 16:00:35 +0000213 int64_t &Offset,
Sanjoy Dasb666ea32015-06-15 18:44:14 +0000214 const TargetRegisterInfo *TRI) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000215 unsigned Opc = LdSt.getOpcode();
Matt Arsenault3add6432015-10-20 04:35:43 +0000216
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000217 if (isDS(LdSt)) {
218 const MachineOperand *OffsetImm =
219 getNamedOperand(LdSt, AMDGPU::OpName::offset);
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000220 if (OffsetImm) {
221 // Normal, single offset LDS instruction.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000222 const MachineOperand *AddrReg =
223 getNamedOperand(LdSt, AMDGPU::OpName::addr);
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000224
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000225 BaseReg = AddrReg->getReg();
226 Offset = OffsetImm->getImm();
227 return true;
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000228 }
229
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000230 // The 2 offset instructions use offset0 and offset1 instead. We can treat
231 // these as a load with a single offset if the 2 offsets are consecutive. We
232 // will use this for some partially aligned loads.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000233 const MachineOperand *Offset0Imm =
234 getNamedOperand(LdSt, AMDGPU::OpName::offset0);
235 const MachineOperand *Offset1Imm =
236 getNamedOperand(LdSt, AMDGPU::OpName::offset1);
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000237
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000238 uint8_t Offset0 = Offset0Imm->getImm();
239 uint8_t Offset1 = Offset1Imm->getImm();
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000240
Matt Arsenault84db5d92015-07-14 17:57:36 +0000241 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) {
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000242 // Each of these offsets is in element sized units, so we need to convert
243 // to bytes of the individual reads.
244
245 unsigned EltSize;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000246 if (LdSt.mayLoad())
247 EltSize = getOpRegClass(LdSt, 0)->getSize() / 2;
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000248 else {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000249 assert(LdSt.mayStore());
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000250 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000251 EltSize = getOpRegClass(LdSt, Data0Idx)->getSize();
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000252 }
253
Matt Arsenault2e991122014-09-10 23:26:16 +0000254 if (isStride64(Opc))
255 EltSize *= 64;
256
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000257 const MachineOperand *AddrReg =
258 getNamedOperand(LdSt, AMDGPU::OpName::addr);
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000259 BaseReg = AddrReg->getReg();
260 Offset = EltSize * Offset0;
261 return true;
262 }
263
264 return false;
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000265 }
266
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000267 if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
Matt Arsenault36666292016-11-15 20:14:27 +0000268 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset);
269 if (SOffset && SOffset->isReg())
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000270 return false;
271
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000272 const MachineOperand *AddrReg =
273 getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000274 if (!AddrReg)
275 return false;
276
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000277 const MachineOperand *OffsetImm =
278 getNamedOperand(LdSt, AMDGPU::OpName::offset);
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000279 BaseReg = AddrReg->getReg();
280 Offset = OffsetImm->getImm();
Matt Arsenault36666292016-11-15 20:14:27 +0000281
282 if (SOffset) // soffset can be an inline immediate.
283 Offset += SOffset->getImm();
284
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000285 return true;
286 }
287
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000288 if (isSMRD(LdSt)) {
289 const MachineOperand *OffsetImm =
290 getNamedOperand(LdSt, AMDGPU::OpName::offset);
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000291 if (!OffsetImm)
292 return false;
293
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000294 const MachineOperand *SBaseReg =
295 getNamedOperand(LdSt, AMDGPU::OpName::sbase);
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000296 BaseReg = SBaseReg->getReg();
297 Offset = OffsetImm->getImm();
298 return true;
299 }
300
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000301 if (isFLAT(LdSt)) {
Matt Arsenault97279a82016-11-29 19:30:44 +0000302 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
Matt Arsenault43578ec2016-06-02 20:05:20 +0000303 BaseReg = AddrReg->getReg();
304 Offset = 0;
305 return true;
306 }
307
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000308 return false;
309}
310
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000311bool SIInstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
312 MachineInstr &SecondLdSt,
Jun Bum Lim4c5bd582016-04-15 14:58:38 +0000313 unsigned NumLoads) const {
NAKAMURA Takumife1202c2016-06-20 00:37:41 +0000314 const MachineOperand *FirstDst = nullptr;
315 const MachineOperand *SecondDst = nullptr;
Tom Stellarda76bcc22016-03-28 16:10:13 +0000316
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000317 if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) ||
318 (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt))) {
319 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata);
320 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata);
Matt Arsenault437fd712016-11-29 19:30:41 +0000321 } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) {
322 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst);
323 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst);
324 } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) {
325 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst);
326 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst);
Tom Stellarda76bcc22016-03-28 16:10:13 +0000327 }
328
329 if (!FirstDst || !SecondDst)
Matt Arsenault0e75a062014-09-17 17:48:30 +0000330 return false;
331
Tom Stellarda76bcc22016-03-28 16:10:13 +0000332 // Try to limit clustering based on the total number of bytes loaded
333 // rather than the number of instructions. This is done to help reduce
334 // register pressure. The method used is somewhat inexact, though,
335 // because it assumes that all loads in the cluster will load the
336 // same number of bytes as FirstLdSt.
Matt Arsenault0e75a062014-09-17 17:48:30 +0000337
Tom Stellarda76bcc22016-03-28 16:10:13 +0000338 // The unit of this value is bytes.
339 // FIXME: This needs finer tuning.
340 unsigned LoadClusterThreshold = 16;
Matt Arsenault0e75a062014-09-17 17:48:30 +0000341
Tom Stellarda76bcc22016-03-28 16:10:13 +0000342 const MachineRegisterInfo &MRI =
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000343 FirstLdSt.getParent()->getParent()->getRegInfo();
Tom Stellarda76bcc22016-03-28 16:10:13 +0000344 const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
345
346 return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
Matt Arsenault0e75a062014-09-17 17:48:30 +0000347}
348
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000349void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
350 MachineBasicBlock::iterator MI,
351 const DebugLoc &DL, unsigned DestReg,
352 unsigned SrcReg, bool KillSrc) const {
Matt Arsenault314cbf72016-11-07 16:39:22 +0000353 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg);
Christian Konigd0e3da12013-03-01 09:46:27 +0000354
Matt Arsenault314cbf72016-11-07 16:39:22 +0000355 if (RC == &AMDGPU::VGPR_32RegClass) {
356 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
357 AMDGPU::SReg_32RegClass.contains(SrcReg));
358 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
359 .addReg(SrcReg, getKillRegState(KillSrc));
360 return;
361 }
Christian Konigd0e3da12013-03-01 09:46:27 +0000362
Marek Olsak79c05872016-11-25 17:37:09 +0000363 if (RC == &AMDGPU::SReg_32_XM0RegClass ||
364 RC == &AMDGPU::SReg_32RegClass) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000365 if (SrcReg == AMDGPU::SCC) {
366 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
367 .addImm(-1)
368 .addImm(0);
369 return;
370 }
371
Christian Konigd0e3da12013-03-01 09:46:27 +0000372 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
373 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
374 .addReg(SrcReg, getKillRegState(KillSrc));
375 return;
Matt Arsenault314cbf72016-11-07 16:39:22 +0000376 }
Christian Konigd0e3da12013-03-01 09:46:27 +0000377
Matt Arsenault314cbf72016-11-07 16:39:22 +0000378 if (RC == &AMDGPU::SReg_64RegClass) {
Matt Arsenault834b1aa2015-02-14 02:55:54 +0000379 if (DestReg == AMDGPU::VCC) {
Matt Arsenault99981682015-02-14 02:55:56 +0000380 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
381 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
382 .addReg(SrcReg, getKillRegState(KillSrc));
383 } else {
384 // FIXME: Hack until VReg_1 removed.
385 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000386 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
Matt Arsenault99981682015-02-14 02:55:56 +0000387 .addImm(0)
388 .addReg(SrcReg, getKillRegState(KillSrc));
389 }
Matt Arsenault834b1aa2015-02-14 02:55:54 +0000390
Matt Arsenault834b1aa2015-02-14 02:55:54 +0000391 return;
392 }
393
Tom Stellard75aadc22012-12-11 21:25:42 +0000394 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
395 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
396 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000397 return;
Christian Konigd0e3da12013-03-01 09:46:27 +0000398 }
399
Matt Arsenault314cbf72016-11-07 16:39:22 +0000400 if (DestReg == AMDGPU::SCC) {
401 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
402 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
403 .addReg(SrcReg, getKillRegState(KillSrc))
404 .addImm(0);
405 return;
406 }
407
408 unsigned EltSize = 4;
409 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
410 if (RI.isSGPRClass(RC)) {
411 if (RC->getSize() > 4) {
412 Opcode = AMDGPU::S_MOV_B64;
413 EltSize = 8;
414 } else {
415 Opcode = AMDGPU::S_MOV_B32;
416 EltSize = 4;
417 }
418 }
419
420 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
Matt Arsenault73d2f892016-07-15 22:32:02 +0000421 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
Nicolai Haehnledd587052015-12-19 01:16:06 +0000422
423 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
424 unsigned SubIdx;
425 if (Forward)
426 SubIdx = SubIndices[Idx];
427 else
428 SubIdx = SubIndices[SubIndices.size() - Idx - 1];
429
Christian Konigd0e3da12013-03-01 09:46:27 +0000430 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
431 get(Opcode), RI.getSubReg(DestReg, SubIdx));
432
Nicolai Haehnledd587052015-12-19 01:16:06 +0000433 Builder.addReg(RI.getSubReg(SrcReg, SubIdx));
Christian Konigd0e3da12013-03-01 09:46:27 +0000434
Nicolai Haehnledd587052015-12-19 01:16:06 +0000435 if (Idx == SubIndices.size() - 1)
Matt Arsenault598f5532016-06-02 00:04:30 +0000436 Builder.addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit);
Nicolai Haehnledd587052015-12-19 01:16:06 +0000437
438 if (Idx == 0)
Christian Konigd0e3da12013-03-01 09:46:27 +0000439 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
Matt Arsenault73d2f892016-07-15 22:32:02 +0000440
441 Builder.addReg(SrcReg, RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000442 }
443}
444
Matt Arsenaultbbb47da2016-09-08 17:19:29 +0000445int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
Christian Konig3c145802013-03-27 09:12:59 +0000446 int NewOpc;
447
448 // Try to map original to commuted opcode
Marek Olsak191507e2015-02-03 17:38:12 +0000449 NewOpc = AMDGPU::getCommuteRev(Opcode);
Marek Olsakcfbdba22015-06-26 20:29:10 +0000450 if (NewOpc != -1)
451 // Check if the commuted (REV) opcode exists on the target.
452 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
Christian Konig3c145802013-03-27 09:12:59 +0000453
454 // Try to map commuted to original opcode
Marek Olsak191507e2015-02-03 17:38:12 +0000455 NewOpc = AMDGPU::getCommuteOrig(Opcode);
Marek Olsakcfbdba22015-06-26 20:29:10 +0000456 if (NewOpc != -1)
457 // Check if the original (non-REV) opcode exists on the target.
458 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
Christian Konig3c145802013-03-27 09:12:59 +0000459
460 return Opcode;
461}
462
Tom Stellardef3b8642015-01-07 19:56:17 +0000463unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
464
465 if (DstRC->getSize() == 4) {
466 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
467 } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) {
468 return AMDGPU::S_MOV_B64;
Tom Stellard4842c052015-01-07 20:27:25 +0000469 } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) {
470 return AMDGPU::V_MOV_B64_PSEUDO;
Tom Stellardef3b8642015-01-07 19:56:17 +0000471 }
472 return AMDGPU::COPY;
473}
474
Matt Arsenault08f14de2015-11-06 18:07:53 +0000475static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
476 switch (Size) {
477 case 4:
478 return AMDGPU::SI_SPILL_S32_SAVE;
479 case 8:
480 return AMDGPU::SI_SPILL_S64_SAVE;
481 case 16:
482 return AMDGPU::SI_SPILL_S128_SAVE;
483 case 32:
484 return AMDGPU::SI_SPILL_S256_SAVE;
485 case 64:
486 return AMDGPU::SI_SPILL_S512_SAVE;
487 default:
488 llvm_unreachable("unknown register size");
489 }
490}
491
492static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
493 switch (Size) {
494 case 4:
495 return AMDGPU::SI_SPILL_V32_SAVE;
496 case 8:
497 return AMDGPU::SI_SPILL_V64_SAVE;
Tom Stellard703b2ec2016-04-12 23:57:30 +0000498 case 12:
499 return AMDGPU::SI_SPILL_V96_SAVE;
Matt Arsenault08f14de2015-11-06 18:07:53 +0000500 case 16:
501 return AMDGPU::SI_SPILL_V128_SAVE;
502 case 32:
503 return AMDGPU::SI_SPILL_V256_SAVE;
504 case 64:
505 return AMDGPU::SI_SPILL_V512_SAVE;
506 default:
507 llvm_unreachable("unknown register size");
508 }
509}
510
Tom Stellardc149dc02013-11-27 21:23:35 +0000511void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
512 MachineBasicBlock::iterator MI,
513 unsigned SrcReg, bool isKill,
514 int FrameIndex,
515 const TargetRegisterClass *RC,
516 const TargetRegisterInfo *TRI) const {
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000517 MachineFunction *MF = MBB.getParent();
Tom Stellard42fb60e2015-01-14 15:42:31 +0000518 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
Matthias Braun941a7052016-07-28 18:40:00 +0000519 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
Tom Stellardc149dc02013-11-27 21:23:35 +0000520 DebugLoc DL = MBB.findDebugLoc(MI);
Matt Arsenault08f14de2015-11-06 18:07:53 +0000521
Matthias Braun941a7052016-07-28 18:40:00 +0000522 unsigned Size = FrameInfo.getObjectSize(FrameIndex);
523 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
Matt Arsenault08f14de2015-11-06 18:07:53 +0000524 MachinePointerInfo PtrInfo
525 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
526 MachineMemOperand *MMO
527 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
528 Size, Align);
Tom Stellardc149dc02013-11-27 21:23:35 +0000529
Tom Stellard96468902014-09-24 01:33:17 +0000530 if (RI.isSGPRClass(RC)) {
Matt Arsenault5b22dfa2015-11-05 05:27:10 +0000531 MFI->setHasSpilledSGPRs();
532
Matt Arsenault2510a312016-09-03 06:57:55 +0000533 // We are only allowed to create one new instruction when spilling
534 // registers, so we need to use pseudo instruction for spilling SGPRs.
535 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(RC->getSize()));
536
537 // The SGPR spill/restore instructions only work on number sgprs, so we need
538 // to make sure we are using the correct register class.
Matt Arsenaultb6e1cc22016-05-21 00:53:42 +0000539 if (TargetRegisterInfo::isVirtualRegister(SrcReg) && RC->getSize() == 4) {
Matt Arsenaultb6e1cc22016-05-21 00:53:42 +0000540 MachineRegisterInfo &MRI = MF->getRegInfo();
541 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass);
542 }
543
Marek Olsak79c05872016-11-25 17:37:09 +0000544 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc)
Matt Arsenault3354f422016-09-10 01:20:33 +0000545 .addReg(SrcReg, getKillRegState(isKill)) // data
546 .addFrameIndex(FrameIndex) // addr
Matt Arsenault08906a32016-10-28 19:43:31 +0000547 .addMemOperand(MMO)
548 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
549 .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit);
550 // Add the scratch resource registers as implicit uses because we may end up
551 // needing them, and need to ensure that the reserved registers are
552 // correctly handled.
Tom Stellard42fb60e2015-01-14 15:42:31 +0000553
Marek Olsak79c05872016-11-25 17:37:09 +0000554 if (ST.hasScalarStores()) {
555 // m0 is used for offset to scalar stores if used to spill.
556 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine);
557 }
558
Matt Arsenault08f14de2015-11-06 18:07:53 +0000559 return;
Tom Stellard96468902014-09-24 01:33:17 +0000560 }
Tom Stellardeba61072014-05-02 15:41:42 +0000561
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000562 if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
Tom Stellard96468902014-09-24 01:33:17 +0000563 LLVMContext &Ctx = MF->getFunction()->getContext();
564 Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to"
565 " spill register");
Tom Stellard0febe682015-01-14 15:42:34 +0000566 BuildMI(MBB, MI, DL, get(AMDGPU::KILL))
Matt Arsenault08f14de2015-11-06 18:07:53 +0000567 .addReg(SrcReg);
568
569 return;
570 }
571
572 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
573
574 unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize());
575 MFI->setHasSpilledVGPRs();
576 BuildMI(MBB, MI, DL, get(Opcode))
Matt Arsenault3354f422016-09-10 01:20:33 +0000577 .addReg(SrcReg, getKillRegState(isKill)) // data
578 .addFrameIndex(FrameIndex) // addr
Matt Arsenault2510a312016-09-03 06:57:55 +0000579 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
580 .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset
581 .addImm(0) // offset
Matt Arsenault08f14de2015-11-06 18:07:53 +0000582 .addMemOperand(MMO);
583}
584
585static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
586 switch (Size) {
587 case 4:
588 return AMDGPU::SI_SPILL_S32_RESTORE;
589 case 8:
590 return AMDGPU::SI_SPILL_S64_RESTORE;
591 case 16:
592 return AMDGPU::SI_SPILL_S128_RESTORE;
593 case 32:
594 return AMDGPU::SI_SPILL_S256_RESTORE;
595 case 64:
596 return AMDGPU::SI_SPILL_S512_RESTORE;
597 default:
598 llvm_unreachable("unknown register size");
599 }
600}
601
602static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
603 switch (Size) {
604 case 4:
605 return AMDGPU::SI_SPILL_V32_RESTORE;
606 case 8:
607 return AMDGPU::SI_SPILL_V64_RESTORE;
Tom Stellard703b2ec2016-04-12 23:57:30 +0000608 case 12:
609 return AMDGPU::SI_SPILL_V96_RESTORE;
Matt Arsenault08f14de2015-11-06 18:07:53 +0000610 case 16:
611 return AMDGPU::SI_SPILL_V128_RESTORE;
612 case 32:
613 return AMDGPU::SI_SPILL_V256_RESTORE;
614 case 64:
615 return AMDGPU::SI_SPILL_V512_RESTORE;
616 default:
617 llvm_unreachable("unknown register size");
Tom Stellardc149dc02013-11-27 21:23:35 +0000618 }
619}
620
621void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
622 MachineBasicBlock::iterator MI,
623 unsigned DestReg, int FrameIndex,
624 const TargetRegisterClass *RC,
625 const TargetRegisterInfo *TRI) const {
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000626 MachineFunction *MF = MBB.getParent();
Tom Stellarde99fb652015-01-20 19:33:04 +0000627 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
Matthias Braun941a7052016-07-28 18:40:00 +0000628 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
Tom Stellardc149dc02013-11-27 21:23:35 +0000629 DebugLoc DL = MBB.findDebugLoc(MI);
Matthias Braun941a7052016-07-28 18:40:00 +0000630 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
631 unsigned Size = FrameInfo.getObjectSize(FrameIndex);
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000632
Matt Arsenault08f14de2015-11-06 18:07:53 +0000633 MachinePointerInfo PtrInfo
634 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
635
636 MachineMemOperand *MMO = MF->getMachineMemOperand(
637 PtrInfo, MachineMemOperand::MOLoad, Size, Align);
638
639 if (RI.isSGPRClass(RC)) {
640 // FIXME: Maybe this should not include a memoperand because it will be
641 // lowered to non-memory instructions.
Matt Arsenault2510a312016-09-03 06:57:55 +0000642 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(RC->getSize()));
Matt Arsenaultb6e1cc22016-05-21 00:53:42 +0000643 if (TargetRegisterInfo::isVirtualRegister(DestReg) && RC->getSize() == 4) {
Matt Arsenaultb6e1cc22016-05-21 00:53:42 +0000644 MachineRegisterInfo &MRI = MF->getRegInfo();
645 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
646 }
647
Marek Olsak79c05872016-11-25 17:37:09 +0000648 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc, DestReg)
Matt Arsenault3354f422016-09-10 01:20:33 +0000649 .addFrameIndex(FrameIndex) // addr
Matt Arsenault08906a32016-10-28 19:43:31 +0000650 .addMemOperand(MMO)
651 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
652 .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit);
Matt Arsenault08f14de2015-11-06 18:07:53 +0000653
Marek Olsak79c05872016-11-25 17:37:09 +0000654 if (ST.hasScalarStores()) {
655 // m0 is used for offset to scalar stores if used to spill.
656 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine);
657 }
658
Matt Arsenault08f14de2015-11-06 18:07:53 +0000659 return;
Tom Stellard96468902014-09-24 01:33:17 +0000660 }
Tom Stellardeba61072014-05-02 15:41:42 +0000661
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000662 if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
Tom Stellard96468902014-09-24 01:33:17 +0000663 LLVMContext &Ctx = MF->getFunction()->getContext();
664 Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to"
665 " restore register");
Tom Stellard0febe682015-01-14 15:42:34 +0000666 BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg);
Matt Arsenault08f14de2015-11-06 18:07:53 +0000667
668 return;
Tom Stellardc149dc02013-11-27 21:23:35 +0000669 }
Matt Arsenault08f14de2015-11-06 18:07:53 +0000670
671 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
672
673 unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize());
674 BuildMI(MBB, MI, DL, get(Opcode), DestReg)
Matt Arsenault3354f422016-09-10 01:20:33 +0000675 .addFrameIndex(FrameIndex) // vaddr
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000676 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
677 .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset
Tom Stellard649b5db2016-03-04 18:31:18 +0000678 .addImm(0) // offset
Matt Arsenault08f14de2015-11-06 18:07:53 +0000679 .addMemOperand(MMO);
Tom Stellardc149dc02013-11-27 21:23:35 +0000680}
681
Tom Stellard96468902014-09-24 01:33:17 +0000682/// \param @Offset Offset in bytes of the FrameIndex being spilled
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000683unsigned SIInstrInfo::calculateLDSSpillAddress(
684 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg,
685 unsigned FrameOffset, unsigned Size) const {
Tom Stellard96468902014-09-24 01:33:17 +0000686 MachineFunction *MF = MBB.getParent();
687 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000688 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
689 const SIRegisterInfo *TRI = ST.getRegisterInfo();
Tom Stellard96468902014-09-24 01:33:17 +0000690 DebugLoc DL = MBB.findDebugLoc(MI);
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000691 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize();
Tom Stellard96468902014-09-24 01:33:17 +0000692 unsigned WavefrontSize = ST.getWavefrontSize();
693
694 unsigned TIDReg = MFI->getTIDReg();
695 if (!MFI->hasCalculatedTID()) {
696 MachineBasicBlock &Entry = MBB.getParent()->front();
697 MachineBasicBlock::iterator Insert = Entry.front();
698 DebugLoc DL = Insert->getDebugLoc();
699
Tom Stellard19f43012016-07-28 14:30:43 +0000700 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass,
701 *MF);
Tom Stellard96468902014-09-24 01:33:17 +0000702 if (TIDReg == AMDGPU::NoRegister)
703 return TIDReg;
704
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000705 if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) &&
Tom Stellard96468902014-09-24 01:33:17 +0000706 WorkGroupSize > WavefrontSize) {
707
Matt Arsenaultac234b62015-11-30 21:15:57 +0000708 unsigned TIDIGXReg
709 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_X);
710 unsigned TIDIGYReg
711 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Y);
712 unsigned TIDIGZReg
713 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Z);
Tom Stellard96468902014-09-24 01:33:17 +0000714 unsigned InputPtrReg =
Matt Arsenaultac234b62015-11-30 21:15:57 +0000715 TRI->getPreloadedValue(*MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
Benjamin Kramer7149aab2015-03-01 18:09:56 +0000716 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) {
Tom Stellard96468902014-09-24 01:33:17 +0000717 if (!Entry.isLiveIn(Reg))
718 Entry.addLiveIn(Reg);
719 }
720
Matthias Braun7dc03f02016-04-06 02:47:09 +0000721 RS->enterBasicBlock(Entry);
Matt Arsenault0c90e952015-11-06 18:17:45 +0000722 // FIXME: Can we scavenge an SReg_64 and access the subregs?
Tom Stellard96468902014-09-24 01:33:17 +0000723 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
724 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
725 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0)
726 .addReg(InputPtrReg)
727 .addImm(SI::KernelInputOffsets::NGROUPS_Z);
728 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1)
729 .addReg(InputPtrReg)
730 .addImm(SI::KernelInputOffsets::NGROUPS_Y);
731
732 // NGROUPS.X * NGROUPS.Y
733 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1)
734 .addReg(STmp1)
735 .addReg(STmp0);
736 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X
737 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg)
738 .addReg(STmp1)
739 .addReg(TIDIGXReg);
740 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)
741 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg)
742 .addReg(STmp0)
743 .addReg(TIDIGYReg)
744 .addReg(TIDReg);
745 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z
746 BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg)
747 .addReg(TIDReg)
748 .addReg(TIDIGZReg);
749 } else {
750 // Get the wave id
751 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64),
752 TIDReg)
753 .addImm(-1)
754 .addImm(0);
755
Marek Olsakc5368502015-01-15 18:43:01 +0000756 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64),
Tom Stellard96468902014-09-24 01:33:17 +0000757 TIDReg)
758 .addImm(-1)
759 .addReg(TIDReg);
760 }
761
762 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32),
763 TIDReg)
764 .addImm(2)
765 .addReg(TIDReg);
766 MFI->setTIDReg(TIDReg);
767 }
768
769 // Add FrameIndex to LDS offset
Matt Arsenault52ef4012016-07-26 16:45:58 +0000770 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize);
Tom Stellard96468902014-09-24 01:33:17 +0000771 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg)
772 .addImm(LDSOffset)
773 .addReg(TIDReg);
774
775 return TmpReg;
776}
777
Tom Stellardd37630e2016-04-07 14:47:07 +0000778void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB,
779 MachineBasicBlock::iterator MI,
Nicolai Haehnle87323da2015-12-17 16:46:42 +0000780 int Count) const {
Tom Stellard341e2932016-05-02 18:02:24 +0000781 DebugLoc DL = MBB.findDebugLoc(MI);
Tom Stellardeba61072014-05-02 15:41:42 +0000782 while (Count > 0) {
783 int Arg;
784 if (Count >= 8)
785 Arg = 7;
786 else
787 Arg = Count - 1;
788 Count -= 8;
Tom Stellard341e2932016-05-02 18:02:24 +0000789 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP))
Tom Stellardeba61072014-05-02 15:41:42 +0000790 .addImm(Arg);
791 }
792}
793
Tom Stellardcb6ba622016-04-30 00:23:06 +0000794void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
795 MachineBasicBlock::iterator MI) const {
796 insertWaitStates(MBB, MI, 1);
797}
798
799unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) const {
800 switch (MI.getOpcode()) {
801 default: return 1; // FIXME: Do wait states equal cycles?
802
803 case AMDGPU::S_NOP:
804 return MI.getOperand(0).getImm() + 1;
805 }
806}
807
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000808bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
809 MachineBasicBlock &MBB = *MI.getParent();
Tom Stellardeba61072014-05-02 15:41:42 +0000810 DebugLoc DL = MBB.findDebugLoc(MI);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000811 switch (MI.getOpcode()) {
Tom Stellardeba61072014-05-02 15:41:42 +0000812 default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000813 case AMDGPU::S_MOV_B64_term: {
814 // This is only a terminator to get the correct spill code placement during
815 // register allocation.
816 MI.setDesc(get(AMDGPU::S_MOV_B64));
817 break;
818 }
819 case AMDGPU::S_XOR_B64_term: {
820 // This is only a terminator to get the correct spill code placement during
821 // register allocation.
822 MI.setDesc(get(AMDGPU::S_XOR_B64));
823 break;
824 }
825 case AMDGPU::S_ANDN2_B64_term: {
826 // This is only a terminator to get the correct spill code placement during
827 // register allocation.
828 MI.setDesc(get(AMDGPU::S_ANDN2_B64));
829 break;
830 }
Tom Stellard4842c052015-01-07 20:27:25 +0000831 case AMDGPU::V_MOV_B64_PSEUDO: {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000832 unsigned Dst = MI.getOperand(0).getReg();
Tom Stellard4842c052015-01-07 20:27:25 +0000833 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
834 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
835
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000836 const MachineOperand &SrcOp = MI.getOperand(1);
Tom Stellard4842c052015-01-07 20:27:25 +0000837 // FIXME: Will this work for 64-bit floating point immediates?
838 assert(!SrcOp.isFPImm());
839 if (SrcOp.isImm()) {
840 APInt Imm(64, SrcOp.getImm());
841 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
Matt Arsenault80bc3552016-06-13 15:53:52 +0000842 .addImm(Imm.getLoBits(32).getZExtValue())
843 .addReg(Dst, RegState::Implicit | RegState::Define);
Tom Stellard4842c052015-01-07 20:27:25 +0000844 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
Matt Arsenault80bc3552016-06-13 15:53:52 +0000845 .addImm(Imm.getHiBits(32).getZExtValue())
846 .addReg(Dst, RegState::Implicit | RegState::Define);
Tom Stellard4842c052015-01-07 20:27:25 +0000847 } else {
848 assert(SrcOp.isReg());
849 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
Matt Arsenault80bc3552016-06-13 15:53:52 +0000850 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
851 .addReg(Dst, RegState::Implicit | RegState::Define);
Tom Stellard4842c052015-01-07 20:27:25 +0000852 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
Matt Arsenault80bc3552016-06-13 15:53:52 +0000853 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
854 .addReg(Dst, RegState::Implicit | RegState::Define);
Tom Stellard4842c052015-01-07 20:27:25 +0000855 }
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000856 MI.eraseFromParent();
Tom Stellard4842c052015-01-07 20:27:25 +0000857 break;
858 }
Nicolai Haehnlea7852092016-10-24 14:56:02 +0000859 case AMDGPU::V_MOVRELD_B32_V1:
860 case AMDGPU::V_MOVRELD_B32_V2:
861 case AMDGPU::V_MOVRELD_B32_V4:
862 case AMDGPU::V_MOVRELD_B32_V8:
863 case AMDGPU::V_MOVRELD_B32_V16: {
864 const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32);
865 unsigned VecReg = MI.getOperand(0).getReg();
866 bool IsUndef = MI.getOperand(1).isUndef();
867 unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm();
868 assert(VecReg == MI.getOperand(1).getReg());
869
870 MachineInstr *MovRel =
871 BuildMI(MBB, MI, DL, MovRelDesc)
872 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
873 .addOperand(MI.getOperand(2))
874 .addReg(VecReg, RegState::ImplicitDefine)
875 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0));
876
877 const int ImpDefIdx =
878 MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses();
879 const int ImpUseIdx = ImpDefIdx + 1;
880 MovRel->tieOperands(ImpDefIdx, ImpUseIdx);
881
882 MI.eraseFromParent();
883 break;
884 }
Tom Stellardbf3e6e52016-06-14 20:29:59 +0000885 case AMDGPU::SI_PC_ADD_REL_OFFSET: {
Tom Stellardc93fc112015-12-10 02:13:01 +0000886 MachineFunction &MF = *MBB.getParent();
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000887 unsigned Reg = MI.getOperand(0).getReg();
Matt Arsenault11587d92016-08-10 19:11:45 +0000888 unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
889 unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
Tom Stellardc93fc112015-12-10 02:13:01 +0000890
891 // Create a bundle so these instructions won't be re-ordered by the
892 // post-RA scheduler.
893 MIBundleBuilder Bundler(MBB, MI);
894 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
895
896 // Add 32-bit offset from this instruction to the start of the
897 // constant data.
898 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000899 .addReg(RegLo)
900 .addOperand(MI.getOperand(1)));
Tom Stellardc93fc112015-12-10 02:13:01 +0000901
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +0000902 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
903 .addReg(RegHi);
904 if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE)
905 MIB.addImm(0);
906 else
907 MIB.addOperand(MI.getOperand(2));
908
909 Bundler.append(MIB);
Tom Stellardc93fc112015-12-10 02:13:01 +0000910 llvm::finalizeBundle(MBB, Bundler.begin());
911
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000912 MI.eraseFromParent();
Tom Stellardc93fc112015-12-10 02:13:01 +0000913 break;
914 }
Tom Stellardeba61072014-05-02 15:41:42 +0000915 }
916 return true;
917}
918
Matt Arsenaultbbb47da2016-09-08 17:19:29 +0000919bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
920 MachineOperand &Src0,
921 unsigned Src0OpName,
922 MachineOperand &Src1,
923 unsigned Src1OpName) const {
924 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
925 if (!Src0Mods)
926 return false;
927
928 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
929 assert(Src1Mods &&
930 "All commutable instructions have both src0 and src1 modifiers");
931
932 int Src0ModsVal = Src0Mods->getImm();
933 int Src1ModsVal = Src1Mods->getImm();
934
935 Src1Mods->setImm(Src0ModsVal);
936 Src0Mods->setImm(Src1ModsVal);
937 return true;
938}
939
940static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI,
941 MachineOperand &RegOp,
Matt Arsenault25dba302016-09-13 19:03:12 +0000942 MachineOperand &NonRegOp) {
943 unsigned Reg = RegOp.getReg();
944 unsigned SubReg = RegOp.getSubReg();
945 bool IsKill = RegOp.isKill();
946 bool IsDead = RegOp.isDead();
947 bool IsUndef = RegOp.isUndef();
948 bool IsDebug = RegOp.isDebug();
949
950 if (NonRegOp.isImm())
951 RegOp.ChangeToImmediate(NonRegOp.getImm());
952 else if (NonRegOp.isFI())
953 RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
954 else
Matt Arsenaultbbb47da2016-09-08 17:19:29 +0000955 return nullptr;
956
Matt Arsenault25dba302016-09-13 19:03:12 +0000957 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
958 NonRegOp.setSubReg(SubReg);
959
Matt Arsenaultbbb47da2016-09-08 17:19:29 +0000960 return &MI;
961}
962
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000963MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
Matt Arsenaultbbb47da2016-09-08 17:19:29 +0000964 unsigned Src0Idx,
965 unsigned Src1Idx) const {
966 assert(!NewMI && "this should never be used");
967
968 unsigned Opc = MI.getOpcode();
969 int CommutedOpcode = commuteOpcode(Opc);
Marek Olsakcfbdba22015-06-26 20:29:10 +0000970 if (CommutedOpcode == -1)
971 return nullptr;
972
Matt Arsenaultbbb47da2016-09-08 17:19:29 +0000973 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==
974 static_cast<int>(Src0Idx) &&
975 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==
976 static_cast<int>(Src1Idx) &&
977 "inconsistency with findCommutedOpIndices");
978
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000979 MachineOperand &Src0 = MI.getOperand(Src0Idx);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000980 MachineOperand &Src1 = MI.getOperand(Src1Idx);
Matt Arsenaultaa5ccfb2014-10-17 18:00:37 +0000981
Matt Arsenaultbbb47da2016-09-08 17:19:29 +0000982 MachineInstr *CommutedMI = nullptr;
983 if (Src0.isReg() && Src1.isReg()) {
984 if (isOperandLegal(MI, Src1Idx, &Src0)) {
985 // Be sure to copy the source modifiers to the right place.
986 CommutedMI
987 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
Matt Arsenaultd282ada2014-10-17 18:00:48 +0000988 }
989
Matt Arsenaultbbb47da2016-09-08 17:19:29 +0000990 } else if (Src0.isReg() && !Src1.isReg()) {
991 // src0 should always be able to support any operand type, so no need to
992 // check operand legality.
993 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
994 } else if (!Src0.isReg() && Src1.isReg()) {
995 if (isOperandLegal(MI, Src1Idx, &Src0))
996 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
Tom Stellard82166022013-11-13 23:36:37 +0000997 } else {
Matt Arsenaultbbb47da2016-09-08 17:19:29 +0000998 // FIXME: Found two non registers to commute. This does happen.
999 return nullptr;
Tom Stellard82166022013-11-13 23:36:37 +00001000 }
Christian Konig3c145802013-03-27 09:12:59 +00001001
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001002
1003 if (CommutedMI) {
1004 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
1005 Src1, AMDGPU::OpName::src1_modifiers);
1006
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001007 CommutedMI->setDesc(get(CommutedOpcode));
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001008 }
Christian Konig3c145802013-03-27 09:12:59 +00001009
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001010 return CommutedMI;
Christian Konig76edd4f2013-02-26 17:52:29 +00001011}
1012
Matt Arsenault92befe72014-09-26 17:54:54 +00001013// This needs to be implemented because the source modifiers may be inserted
1014// between the true commutable operands, and the base
1015// TargetInstrInfo::commuteInstruction uses it.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001016bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0,
Andrew Kaylor16c4da02015-09-28 20:33:22 +00001017 unsigned &SrcOpIdx1) const {
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001018 if (!MI.isCommutable())
Matt Arsenault92befe72014-09-26 17:54:54 +00001019 return false;
1020
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001021 unsigned Opc = MI.getOpcode();
Matt Arsenault92befe72014-09-26 17:54:54 +00001022 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1023 if (Src0Idx == -1)
1024 return false;
1025
Matt Arsenault92befe72014-09-26 17:54:54 +00001026 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1027 if (Src1Idx == -1)
1028 return false;
1029
Andrew Kaylor16c4da02015-09-28 20:33:22 +00001030 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
Matt Arsenault92befe72014-09-26 17:54:54 +00001031}
1032
Matt Arsenault6bc43d82016-10-06 16:20:41 +00001033bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
1034 int64_t BrOffset) const {
1035 // BranchRelaxation should never have to check s_setpc_b64 because its dest
1036 // block is unanalyzable.
1037 assert(BranchOp != AMDGPU::S_SETPC_B64);
1038
1039 // Convert to dwords.
1040 BrOffset /= 4;
1041
1042 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
1043 // from the next instruction.
1044 BrOffset -= 1;
1045
1046 return isIntN(BranchOffsetBits, BrOffset);
1047}
1048
1049MachineBasicBlock *SIInstrInfo::getBranchDestBlock(
1050 const MachineInstr &MI) const {
1051 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) {
1052 // This would be a difficult analysis to perform, but can always be legal so
1053 // there's no need to analyze it.
1054 return nullptr;
1055 }
1056
1057 return MI.getOperand(0).getMBB();
1058}
1059
1060unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
1061 MachineBasicBlock &DestBB,
1062 const DebugLoc &DL,
1063 int64_t BrOffset,
1064 RegScavenger *RS) const {
1065 assert(RS && "RegScavenger required for long branching");
1066 assert(MBB.empty() &&
1067 "new block should be inserted for expanding unconditional branch");
1068 assert(MBB.pred_size() == 1);
1069
1070 MachineFunction *MF = MBB.getParent();
1071 MachineRegisterInfo &MRI = MF->getRegInfo();
1072
1073 // FIXME: Virtual register workaround for RegScavenger not working with empty
1074 // blocks.
1075 unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1076
1077 auto I = MBB.end();
1078
1079 // We need to compute the offset relative to the instruction immediately after
1080 // s_getpc_b64. Insert pc arithmetic code before last terminator.
1081 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
1082
1083 // TODO: Handle > 32-bit block address.
1084 if (BrOffset >= 0) {
1085 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
1086 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1087 .addReg(PCReg, 0, AMDGPU::sub0)
1088 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_FORWARD);
1089 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
1090 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1091 .addReg(PCReg, 0, AMDGPU::sub1)
1092 .addImm(0);
1093 } else {
1094 // Backwards branch.
1095 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32))
1096 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1097 .addReg(PCReg, 0, AMDGPU::sub0)
1098 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_BACKWARD);
1099 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32))
1100 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1101 .addReg(PCReg, 0, AMDGPU::sub1)
1102 .addImm(0);
1103 }
1104
1105 // Insert the indirect branch after the other terminator.
1106 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
1107 .addReg(PCReg);
1108
1109 // FIXME: If spilling is necessary, this will fail because this scavenger has
1110 // no emergency stack slots. It is non-trivial to spill in this situation,
1111 // because the restore code needs to be specially placed after the
1112 // jump. BranchRelaxation then needs to be made aware of the newly inserted
1113 // block.
1114 //
1115 // If a spill is needed for the pc register pair, we need to insert a spill
1116 // restore block right before the destination block, and insert a short branch
1117 // into the old destination block's fallthrough predecessor.
1118 // e.g.:
1119 //
1120 // s_cbranch_scc0 skip_long_branch:
1121 //
1122 // long_branch_bb:
1123 // spill s[8:9]
1124 // s_getpc_b64 s[8:9]
1125 // s_add_u32 s8, s8, restore_bb
1126 // s_addc_u32 s9, s9, 0
1127 // s_setpc_b64 s[8:9]
1128 //
1129 // skip_long_branch:
1130 // foo;
1131 //
1132 // .....
1133 //
1134 // dest_bb_fallthrough_predecessor:
1135 // bar;
1136 // s_branch dest_bb
1137 //
1138 // restore_bb:
1139 // restore s[8:9]
1140 // fallthrough dest_bb
1141 ///
1142 // dest_bb:
1143 // buzz;
1144
1145 RS->enterBasicBlockEnd(MBB);
1146 unsigned Scav = RS->scavengeRegister(&AMDGPU::SReg_64RegClass,
1147 MachineBasicBlock::iterator(GetPC), 0);
1148 MRI.replaceRegWith(PCReg, Scav);
1149 MRI.clearVirtRegs();
1150 RS->setRegUsed(Scav);
1151
1152 return 4 + 8 + 4 + 4;
1153}
1154
Matt Arsenault6d093802016-05-21 00:29:27 +00001155unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
1156 switch (Cond) {
1157 case SIInstrInfo::SCC_TRUE:
1158 return AMDGPU::S_CBRANCH_SCC1;
1159 case SIInstrInfo::SCC_FALSE:
1160 return AMDGPU::S_CBRANCH_SCC0;
Matt Arsenault49459052016-05-21 00:29:40 +00001161 case SIInstrInfo::VCCNZ:
1162 return AMDGPU::S_CBRANCH_VCCNZ;
1163 case SIInstrInfo::VCCZ:
1164 return AMDGPU::S_CBRANCH_VCCZ;
1165 case SIInstrInfo::EXECNZ:
1166 return AMDGPU::S_CBRANCH_EXECNZ;
1167 case SIInstrInfo::EXECZ:
1168 return AMDGPU::S_CBRANCH_EXECZ;
Matt Arsenault6d093802016-05-21 00:29:27 +00001169 default:
1170 llvm_unreachable("invalid branch predicate");
1171 }
1172}
1173
1174SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
1175 switch (Opcode) {
1176 case AMDGPU::S_CBRANCH_SCC0:
1177 return SCC_FALSE;
1178 case AMDGPU::S_CBRANCH_SCC1:
1179 return SCC_TRUE;
Matt Arsenault49459052016-05-21 00:29:40 +00001180 case AMDGPU::S_CBRANCH_VCCNZ:
1181 return VCCNZ;
1182 case AMDGPU::S_CBRANCH_VCCZ:
1183 return VCCZ;
1184 case AMDGPU::S_CBRANCH_EXECNZ:
1185 return EXECNZ;
1186 case AMDGPU::S_CBRANCH_EXECZ:
1187 return EXECZ;
Matt Arsenault6d093802016-05-21 00:29:27 +00001188 default:
1189 return INVALID_BR;
1190 }
1191}
1192
Matt Arsenault6bc43d82016-10-06 16:20:41 +00001193bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB,
1194 MachineBasicBlock::iterator I,
1195 MachineBasicBlock *&TBB,
1196 MachineBasicBlock *&FBB,
1197 SmallVectorImpl<MachineOperand> &Cond,
1198 bool AllowModify) const {
Matt Arsenault6d093802016-05-21 00:29:27 +00001199 if (I->getOpcode() == AMDGPU::S_BRANCH) {
1200 // Unconditional Branch
1201 TBB = I->getOperand(0).getMBB();
1202 return false;
1203 }
1204
1205 BranchPredicate Pred = getBranchPredicate(I->getOpcode());
1206 if (Pred == INVALID_BR)
1207 return true;
1208
1209 MachineBasicBlock *CondBB = I->getOperand(0).getMBB();
1210 Cond.push_back(MachineOperand::CreateImm(Pred));
Matt Arsenault52f14ec2016-11-07 19:09:27 +00001211 Cond.push_back(I->getOperand(1)); // Save the branch register.
Matt Arsenault6d093802016-05-21 00:29:27 +00001212
1213 ++I;
1214
1215 if (I == MBB.end()) {
1216 // Conditional branch followed by fall-through.
1217 TBB = CondBB;
1218 return false;
1219 }
1220
1221 if (I->getOpcode() == AMDGPU::S_BRANCH) {
1222 TBB = CondBB;
1223 FBB = I->getOperand(0).getMBB();
1224 return false;
1225 }
1226
1227 return true;
1228}
1229
Matt Arsenault6bc43d82016-10-06 16:20:41 +00001230bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
1231 MachineBasicBlock *&FBB,
1232 SmallVectorImpl<MachineOperand> &Cond,
1233 bool AllowModify) const {
1234 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
1235 if (I == MBB.end())
1236 return false;
1237
1238 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH)
1239 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
1240
1241 ++I;
1242
1243 // TODO: Should be able to treat as fallthrough?
1244 if (I == MBB.end())
1245 return true;
1246
1247 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify))
1248 return true;
1249
1250 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB();
1251
1252 // Specifically handle the case where the conditional branch is to the same
1253 // destination as the mask branch. e.g.
1254 //
1255 // si_mask_branch BB8
1256 // s_cbranch_execz BB8
1257 // s_cbranch BB9
1258 //
1259 // This is required to understand divergent loops which may need the branches
1260 // to be relaxed.
1261 if (TBB != MaskBrDest || Cond.empty())
1262 return true;
1263
1264 auto Pred = Cond[0].getImm();
1265 return (Pred != EXECZ && Pred != EXECNZ);
1266}
1267
Matt Arsenault1b9fc8e2016-09-14 20:43:16 +00001268unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB,
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001269 int *BytesRemoved) const {
Matt Arsenault6d093802016-05-21 00:29:27 +00001270 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
1271
1272 unsigned Count = 0;
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001273 unsigned RemovedSize = 0;
Matt Arsenault6d093802016-05-21 00:29:27 +00001274 while (I != MBB.end()) {
1275 MachineBasicBlock::iterator Next = std::next(I);
Matt Arsenault6bc43d82016-10-06 16:20:41 +00001276 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) {
1277 I = Next;
1278 continue;
1279 }
1280
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001281 RemovedSize += getInstSizeInBytes(*I);
Matt Arsenault6d093802016-05-21 00:29:27 +00001282 I->eraseFromParent();
1283 ++Count;
1284 I = Next;
1285 }
1286
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001287 if (BytesRemoved)
1288 *BytesRemoved = RemovedSize;
1289
Matt Arsenault6d093802016-05-21 00:29:27 +00001290 return Count;
1291}
1292
Matt Arsenaulte8e0f5c2016-09-14 17:24:15 +00001293unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
Matt Arsenault6d093802016-05-21 00:29:27 +00001294 MachineBasicBlock *TBB,
1295 MachineBasicBlock *FBB,
1296 ArrayRef<MachineOperand> Cond,
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001297 const DebugLoc &DL,
1298 int *BytesAdded) const {
Matt Arsenault6d093802016-05-21 00:29:27 +00001299
1300 if (!FBB && Cond.empty()) {
1301 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1302 .addMBB(TBB);
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001303 if (BytesAdded)
1304 *BytesAdded = 4;
Matt Arsenault6d093802016-05-21 00:29:27 +00001305 return 1;
1306 }
1307
1308 assert(TBB && Cond[0].isImm());
1309
1310 unsigned Opcode
1311 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
1312
1313 if (!FBB) {
Matt Arsenault52f14ec2016-11-07 19:09:27 +00001314 Cond[1].isUndef();
1315 MachineInstr *CondBr =
1316 BuildMI(&MBB, DL, get(Opcode))
Matt Arsenault6d093802016-05-21 00:29:27 +00001317 .addMBB(TBB);
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001318
Matt Arsenault52f14ec2016-11-07 19:09:27 +00001319 // Copy the flags onto the implicit condition register operand.
1320 MachineOperand &CondReg = CondBr->getOperand(1);
1321 CondReg.setIsUndef(Cond[1].isUndef());
1322 CondReg.setIsKill(Cond[1].isKill());
1323
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001324 if (BytesAdded)
1325 *BytesAdded = 4;
Matt Arsenault6d093802016-05-21 00:29:27 +00001326 return 1;
1327 }
1328
1329 assert(TBB && FBB);
1330
Matt Arsenault52f14ec2016-11-07 19:09:27 +00001331 MachineInstr *CondBr =
1332 BuildMI(&MBB, DL, get(Opcode))
Matt Arsenault6d093802016-05-21 00:29:27 +00001333 .addMBB(TBB);
1334 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1335 .addMBB(FBB);
1336
Matt Arsenault52f14ec2016-11-07 19:09:27 +00001337 MachineOperand &CondReg = CondBr->getOperand(1);
1338 CondReg.setIsUndef(Cond[1].isUndef());
1339 CondReg.setIsKill(Cond[1].isKill());
1340
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001341 if (BytesAdded)
1342 *BytesAdded = 8;
1343
Matt Arsenault6d093802016-05-21 00:29:27 +00001344 return 2;
1345}
1346
Matt Arsenault1b9fc8e2016-09-14 20:43:16 +00001347bool SIInstrInfo::reverseBranchCondition(
Matt Arsenault72fcd5f2016-05-21 00:29:34 +00001348 SmallVectorImpl<MachineOperand> &Cond) const {
Matt Arsenault52f14ec2016-11-07 19:09:27 +00001349 assert(Cond.size() == 2);
Matt Arsenault72fcd5f2016-05-21 00:29:34 +00001350 Cond[0].setImm(-Cond[0].getImm());
1351 return false;
1352}
1353
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001354static void removeModOperands(MachineInstr &MI) {
1355 unsigned Opc = MI.getOpcode();
1356 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1357 AMDGPU::OpName::src0_modifiers);
1358 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1359 AMDGPU::OpName::src1_modifiers);
1360 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1361 AMDGPU::OpName::src2_modifiers);
1362
1363 MI.RemoveOperand(Src2ModIdx);
1364 MI.RemoveOperand(Src1ModIdx);
1365 MI.RemoveOperand(Src0ModIdx);
1366}
1367
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001368bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001369 unsigned Reg, MachineRegisterInfo *MRI) const {
1370 if (!MRI->hasOneNonDBGUse(Reg))
1371 return false;
1372
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001373 unsigned Opc = UseMI.getOpcode();
Tom Stellard2add8a12016-09-06 20:00:26 +00001374 if (Opc == AMDGPU::COPY) {
1375 bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg());
1376 switch (DefMI.getOpcode()) {
1377 default:
1378 return false;
1379 case AMDGPU::S_MOV_B64:
1380 // TODO: We could fold 64-bit immediates, but this get compilicated
1381 // when there are sub-registers.
1382 return false;
1383
1384 case AMDGPU::V_MOV_B32_e32:
1385 case AMDGPU::S_MOV_B32:
1386 break;
1387 }
1388 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1389 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
1390 assert(ImmOp);
1391 // FIXME: We could handle FrameIndex values here.
1392 if (!ImmOp->isImm()) {
1393 return false;
1394 }
1395 UseMI.setDesc(get(NewOpc));
1396 UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm());
1397 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
1398 return true;
1399 }
1400
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001401 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 ||
1402 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64) {
1403 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64;
1404
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001405 // Don't fold if we are using source modifiers. The new VOP2 instructions
1406 // don't have them.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001407 if (hasModifiersSet(UseMI, AMDGPU::OpName::src0_modifiers) ||
1408 hasModifiersSet(UseMI, AMDGPU::OpName::src1_modifiers) ||
1409 hasModifiersSet(UseMI, AMDGPU::OpName::src2_modifiers)) {
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001410 return false;
1411 }
1412
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001413 const MachineOperand &ImmOp = DefMI.getOperand(1);
Matt Arsenault3d1c1de2016-04-14 21:58:24 +00001414
1415 // If this is a free constant, there's no reason to do this.
1416 // TODO: We could fold this here instead of letting SIFoldOperands do it
1417 // later.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001418 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
1419
1420 // Any src operand can be used for the legality check.
1421 if (isInlineConstant(UseMI, *Src0, ImmOp))
Matt Arsenault3d1c1de2016-04-14 21:58:24 +00001422 return false;
1423
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001424 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
1425 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001426
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001427 // Multiplied part is the constant: Use v_madmk_{f16, f32}.
Matt Arsenaultf0783302015-02-21 21:29:10 +00001428 // We should only expect these to be on src0 due to canonicalizations.
1429 if (Src0->isReg() && Src0->getReg() == Reg) {
Matt Arsenaulta266bd82016-03-02 04:05:14 +00001430 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
Matt Arsenaultf0783302015-02-21 21:29:10 +00001431 return false;
1432
Matt Arsenaulta266bd82016-03-02 04:05:14 +00001433 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
Matt Arsenaultf0783302015-02-21 21:29:10 +00001434 return false;
1435
Nikolay Haustov65607812016-03-11 09:27:25 +00001436 // We need to swap operands 0 and 1 since madmk constant is at operand 1.
Matt Arsenaultf0783302015-02-21 21:29:10 +00001437
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001438 const int64_t Imm = DefMI.getOperand(1).getImm();
Matt Arsenaultf0783302015-02-21 21:29:10 +00001439
1440 // FIXME: This would be a lot easier if we could return a new instruction
1441 // instead of having to modify in place.
1442
1443 // Remove these first since they are at the end.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001444 UseMI.RemoveOperand(
1445 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
1446 UseMI.RemoveOperand(
1447 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
Matt Arsenaultf0783302015-02-21 21:29:10 +00001448
1449 unsigned Src1Reg = Src1->getReg();
1450 unsigned Src1SubReg = Src1->getSubReg();
Matt Arsenaultf0783302015-02-21 21:29:10 +00001451 Src0->setReg(Src1Reg);
1452 Src0->setSubReg(Src1SubReg);
Matt Arsenault5e100162015-04-24 01:57:58 +00001453 Src0->setIsKill(Src1->isKill());
1454
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001455 if (Opc == AMDGPU::V_MAC_F32_e64 ||
1456 Opc == AMDGPU::V_MAC_F16_e64)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001457 UseMI.untieRegOperand(
1458 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
Tom Stellarddb5a11f2015-07-13 15:47:57 +00001459
Nikolay Haustov65607812016-03-11 09:27:25 +00001460 Src1->ChangeToImmediate(Imm);
Matt Arsenaultf0783302015-02-21 21:29:10 +00001461
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001462 removeModOperands(UseMI);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001463 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16));
Matt Arsenaultf0783302015-02-21 21:29:10 +00001464
1465 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1466 if (DeleteDef)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001467 DefMI.eraseFromParent();
Matt Arsenaultf0783302015-02-21 21:29:10 +00001468
1469 return true;
1470 }
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001471
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001472 // Added part is the constant: Use v_madak_{f16, f32}.
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001473 if (Src2->isReg() && Src2->getReg() == Reg) {
1474 // Not allowed to use constant bus for another operand.
1475 // We can however allow an inline immediate as src0.
1476 if (!Src0->isImm() &&
1477 (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))
1478 return false;
1479
Matt Arsenaulta266bd82016-03-02 04:05:14 +00001480 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001481 return false;
1482
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001483 const int64_t Imm = DefMI.getOperand(1).getImm();
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001484
1485 // FIXME: This would be a lot easier if we could return a new instruction
1486 // instead of having to modify in place.
1487
1488 // Remove these first since they are at the end.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001489 UseMI.RemoveOperand(
1490 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
1491 UseMI.RemoveOperand(
1492 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001493
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001494 if (Opc == AMDGPU::V_MAC_F32_e64 ||
1495 Opc == AMDGPU::V_MAC_F16_e64)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001496 UseMI.untieRegOperand(
1497 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
Tom Stellarddb5a11f2015-07-13 15:47:57 +00001498
1499 // ChangingToImmediate adds Src2 back to the instruction.
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001500 Src2->ChangeToImmediate(Imm);
1501
1502 // These come before src2.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001503 removeModOperands(UseMI);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001504 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16));
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001505
1506 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1507 if (DeleteDef)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001508 DefMI.eraseFromParent();
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001509
1510 return true;
1511 }
1512 }
1513
1514 return false;
1515}
1516
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001517static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
1518 int WidthB, int OffsetB) {
1519 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
1520 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
1521 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1522 return LowOffset + LowWidth <= HighOffset;
1523}
1524
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001525bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
1526 MachineInstr &MIb) const {
Chad Rosierc27a18f2016-03-09 16:00:35 +00001527 unsigned BaseReg0, BaseReg1;
1528 int64_t Offset0, Offset1;
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001529
Sanjoy Dasb666ea32015-06-15 18:44:14 +00001530 if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
1531 getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
Tom Stellardcb6ba622016-04-30 00:23:06 +00001532
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001533 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
Tom Stellardcb6ba622016-04-30 00:23:06 +00001534 // FIXME: Handle ds_read2 / ds_write2.
1535 return false;
1536 }
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001537 unsigned Width0 = (*MIa.memoperands_begin())->getSize();
1538 unsigned Width1 = (*MIb.memoperands_begin())->getSize();
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001539 if (BaseReg0 == BaseReg1 &&
1540 offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) {
1541 return true;
1542 }
1543 }
1544
1545 return false;
1546}
1547
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001548bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa,
1549 MachineInstr &MIb,
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001550 AliasAnalysis *AA) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001551 assert((MIa.mayLoad() || MIa.mayStore()) &&
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001552 "MIa must load from or modify a memory location");
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001553 assert((MIb.mayLoad() || MIb.mayStore()) &&
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001554 "MIb must load from or modify a memory location");
1555
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001556 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001557 return false;
1558
1559 // XXX - Can we relax this between address spaces?
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001560 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001561 return false;
1562
Tom Stellard662f3302016-08-29 12:05:32 +00001563 if (AA && MIa.hasOneMemOperand() && MIb.hasOneMemOperand()) {
1564 const MachineMemOperand *MMOa = *MIa.memoperands_begin();
1565 const MachineMemOperand *MMOb = *MIb.memoperands_begin();
1566 if (MMOa->getValue() && MMOb->getValue()) {
1567 MemoryLocation LocA(MMOa->getValue(), MMOa->getSize(), MMOa->getAAInfo());
1568 MemoryLocation LocB(MMOb->getValue(), MMOb->getSize(), MMOb->getAAInfo());
1569 if (!AA->alias(LocA, LocB))
1570 return true;
1571 }
1572 }
1573
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001574 // TODO: Should we check the address space from the MachineMemOperand? That
1575 // would allow us to distinguish objects we know don't alias based on the
Benjamin Kramerdf005cb2015-08-08 18:27:36 +00001576 // underlying address space, even if it was lowered to a different one,
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001577 // e.g. private accesses lowered to use MUBUF instructions on a scratch
1578 // buffer.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001579 if (isDS(MIa)) {
1580 if (isDS(MIb))
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001581 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1582
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001583 return !isFLAT(MIb);
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001584 }
1585
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001586 if (isMUBUF(MIa) || isMTBUF(MIa)) {
1587 if (isMUBUF(MIb) || isMTBUF(MIb))
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001588 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1589
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001590 return !isFLAT(MIb) && !isSMRD(MIb);
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001591 }
1592
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001593 if (isSMRD(MIa)) {
1594 if (isSMRD(MIb))
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001595 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1596
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001597 return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa);
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001598 }
1599
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001600 if (isFLAT(MIa)) {
1601 if (isFLAT(MIb))
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00001602 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1603
1604 return false;
1605 }
1606
1607 return false;
1608}
1609
Tom Stellarddb5a11f2015-07-13 15:47:57 +00001610MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001611 MachineInstr &MI,
1612 LiveVariables *LV) const {
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001613 bool IsF16 = false;
Tom Stellarddb5a11f2015-07-13 15:47:57 +00001614
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001615 switch (MI.getOpcode()) {
1616 default:
1617 return nullptr;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001618 case AMDGPU::V_MAC_F16_e64:
1619 IsF16 = true;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001620 case AMDGPU::V_MAC_F32_e64:
1621 break;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001622 case AMDGPU::V_MAC_F16_e32:
1623 IsF16 = true;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001624 case AMDGPU::V_MAC_F32_e32: {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001625 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1626 AMDGPU::OpName::src0);
1627 const MachineOperand *Src0 = &MI.getOperand(Src0Idx);
1628 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0))
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001629 return nullptr;
1630 break;
1631 }
Tom Stellarddb5a11f2015-07-13 15:47:57 +00001632 }
1633
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001634 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
1635 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
1636 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
1637 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
Tom Stellarddb5a11f2015-07-13 15:47:57 +00001638
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001639 return BuildMI(*MBB, MI, MI.getDebugLoc(),
1640 get(IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32))
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001641 .addOperand(*Dst)
1642 .addImm(0) // Src0 mods
1643 .addOperand(*Src0)
1644 .addImm(0) // Src1 mods
1645 .addOperand(*Src1)
1646 .addImm(0) // Src mods
1647 .addOperand(*Src2)
1648 .addImm(0) // clamp
1649 .addImm(0); // omod
Tom Stellarddb5a11f2015-07-13 15:47:57 +00001650}
1651
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001652// It's not generally safe to move VALU instructions across these since it will
1653// start using the register as a base index rather than directly.
1654// XXX - Why isn't hasSideEffects sufficient for these?
1655static bool changesVGPRIndexingMode(const MachineInstr &MI) {
1656 switch (MI.getOpcode()) {
1657 case AMDGPU::S_SET_GPR_IDX_ON:
1658 case AMDGPU::S_SET_GPR_IDX_MODE:
1659 case AMDGPU::S_SET_GPR_IDX_OFF:
1660 return true;
1661 default:
1662 return false;
1663 }
1664}
1665
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001666bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +00001667 const MachineBasicBlock *MBB,
1668 const MachineFunction &MF) const {
Matt Arsenault95c78972016-07-09 01:13:51 +00001669 // XXX - Do we want the SP check in the base implementation?
1670
Nicolai Haehnle213e87f2016-03-21 20:28:33 +00001671 // Target-independent instructions do not have an implicit-use of EXEC, even
1672 // when they operate on VGPRs. Treating EXEC modifications as scheduling
1673 // boundaries prevents incorrect movements of such instructions.
Matt Arsenault95c78972016-07-09 01:13:51 +00001674 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) ||
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001675 MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
Tom Stellard8485fa02016-12-07 02:42:15 +00001676 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
1677 MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001678 changesVGPRIndexingMode(MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +00001679}
1680
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001681bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
Matt Arsenault26faed32016-12-05 22:26:17 +00001682 switch (Imm.getBitWidth()) {
1683 case 32:
1684 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(),
1685 ST.hasInv2PiInlineImm());
1686 case 64:
1687 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(),
1688 ST.hasInv2PiInlineImm());
Matt Arsenault4bd72362016-12-10 00:39:12 +00001689 case 16:
1690 return AMDGPU::isInlinableLiteral16(Imm.getSExtValue(),
1691 ST.hasInv2PiInlineImm());
Matt Arsenault26faed32016-12-05 22:26:17 +00001692 default:
1693 llvm_unreachable("invalid bitwidth");
Matt Arsenault303011a2014-12-17 21:04:08 +00001694 }
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001695}
1696
Matt Arsenault11a4d672015-02-13 19:05:03 +00001697bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
Matt Arsenault4bd72362016-12-10 00:39:12 +00001698 uint8_t OperandType) const {
1699 if (!MO.isImm() || OperandType < MCOI::OPERAND_FIRST_TARGET)
1700 return false;
1701
1702 // MachineOperand provides no way to tell the true operand size, since it only
1703 // records a 64-bit value. We need to know the size to determine if a 32-bit
1704 // floating point immediate bit pattern is legal for an integer immediate. It
1705 // would be for any 32-bit integer operand, but would not be for a 64-bit one.
1706
1707 int64_t Imm = MO.getImm();
1708 switch (operandBitWidth(OperandType)) {
1709 case 32: {
1710 int32_t Trunc = static_cast<int32_t>(Imm);
1711 return Trunc == Imm &&
1712 AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm());
Matt Arsenault11a4d672015-02-13 19:05:03 +00001713 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001714 case 64: {
1715 return AMDGPU::isInlinableLiteral64(MO.getImm(),
1716 ST.hasInv2PiInlineImm());
1717 }
1718 case 16: {
1719 if (isInt<16>(Imm) || isUInt<16>(Imm)) {
1720 int16_t Trunc = static_cast<int16_t>(Imm);
1721 return AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm());
1722 }
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001723
Matt Arsenault4bd72362016-12-10 00:39:12 +00001724 return false;
1725 }
1726 default:
1727 llvm_unreachable("invalid bitwidth");
1728 }
Tom Stellard93fabce2013-10-10 17:11:55 +00001729}
1730
Matt Arsenaultc1ebd822016-08-13 01:43:54 +00001731bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO,
Matt Arsenault4bd72362016-12-10 00:39:12 +00001732 const MCOperandInfo &OpInfo) const {
Matt Arsenaultc1ebd822016-08-13 01:43:54 +00001733 switch (MO.getType()) {
1734 case MachineOperand::MO_Register:
1735 return false;
1736 case MachineOperand::MO_Immediate:
Matt Arsenault4bd72362016-12-10 00:39:12 +00001737 return !isInlineConstant(MO, OpInfo);
Matt Arsenaultc1ebd822016-08-13 01:43:54 +00001738 case MachineOperand::MO_FrameIndex:
1739 case MachineOperand::MO_MachineBasicBlock:
1740 case MachineOperand::MO_ExternalSymbol:
1741 case MachineOperand::MO_GlobalAddress:
1742 case MachineOperand::MO_MCSymbol:
1743 return true;
1744 default:
1745 llvm_unreachable("unexpected operand type");
1746 }
1747}
1748
Matt Arsenaultbecb1402014-06-23 18:28:31 +00001749static bool compareMachineOp(const MachineOperand &Op0,
1750 const MachineOperand &Op1) {
1751 if (Op0.getType() != Op1.getType())
1752 return false;
1753
1754 switch (Op0.getType()) {
1755 case MachineOperand::MO_Register:
1756 return Op0.getReg() == Op1.getReg();
1757 case MachineOperand::MO_Immediate:
1758 return Op0.getImm() == Op1.getImm();
Matt Arsenaultbecb1402014-06-23 18:28:31 +00001759 default:
1760 llvm_unreachable("Didn't expect to be comparing these operand types");
1761 }
1762}
1763
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001764bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
1765 const MachineOperand &MO) const {
1766 const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo];
Tom Stellardb02094e2014-07-21 15:45:01 +00001767
Tom Stellardfb77f002015-01-13 22:59:41 +00001768 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
Tom Stellardb02094e2014-07-21 15:45:01 +00001769
1770 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
1771 return true;
1772
1773 if (OpInfo.RegClass < 0)
1774 return false;
1775
Matt Arsenault4bd72362016-12-10 00:39:12 +00001776 if (MO.isImm() && isInlineConstant(MO, OpInfo))
1777 return RI.opCanUseInlineConstant(OpInfo.OperandType);
Tom Stellard73ae1cb2014-09-23 21:26:25 +00001778
Matt Arsenault4bd72362016-12-10 00:39:12 +00001779 return RI.opCanUseLiteralConstant(OpInfo.OperandType);
Tom Stellardb02094e2014-07-21 15:45:01 +00001780}
1781
Tom Stellard86d12eb2014-08-01 00:32:28 +00001782bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
Marek Olsaka93603d2015-01-15 18:42:51 +00001783 int Op32 = AMDGPU::getVOPe32(Opcode);
1784 if (Op32 == -1)
1785 return false;
1786
1787 return pseudoToMCOpcode(Op32) != -1;
Tom Stellard86d12eb2014-08-01 00:32:28 +00001788}
1789
Tom Stellardb4a313a2014-08-01 00:32:39 +00001790bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
1791 // The src0_modifier operand is present on all instructions
1792 // that have modifiers.
1793
1794 return AMDGPU::getNamedOperandIdx(Opcode,
1795 AMDGPU::OpName::src0_modifiers) != -1;
1796}
1797
Matt Arsenaultace5b762014-10-17 18:00:43 +00001798bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
1799 unsigned OpName) const {
1800 const MachineOperand *Mods = getNamedOperand(MI, OpName);
1801 return Mods && Mods->getImm();
1802}
1803
Tom Stellard73ae1cb2014-09-23 21:26:25 +00001804bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
Matt Arsenault11a4d672015-02-13 19:05:03 +00001805 const MachineOperand &MO,
Matt Arsenault4bd72362016-12-10 00:39:12 +00001806 const MCOperandInfo &OpInfo) const {
Tom Stellard73ae1cb2014-09-23 21:26:25 +00001807 // Literal constants use the constant bus.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001808 //if (isLiteralConstantLike(MO, OpInfo))
1809 // return true;
1810 if (MO.isImm())
1811 return !isInlineConstant(MO, OpInfo);
Tom Stellard73ae1cb2014-09-23 21:26:25 +00001812
Matt Arsenault4bd72362016-12-10 00:39:12 +00001813 if (!MO.isReg())
1814 return true; // Misc other operands like FrameIndex
1815
1816 if (!MO.isUse())
Tom Stellard73ae1cb2014-09-23 21:26:25 +00001817 return false;
1818
1819 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1820 return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
1821
1822 // FLAT_SCR is just an SGPR pair.
1823 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR))
1824 return true;
1825
1826 // EXEC register uses the constant bus.
1827 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
1828 return true;
1829
1830 // SGPRs use the constant bus
Matt Arsenault8226fc42016-03-02 23:00:21 +00001831 return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 ||
1832 (!MO.isImplicit() &&
1833 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
1834 AMDGPU::SGPR_64RegClass.contains(MO.getReg()))));
Tom Stellard73ae1cb2014-09-23 21:26:25 +00001835}
1836
Matt Arsenaulte223ceb2015-10-21 21:15:01 +00001837static unsigned findImplicitSGPRRead(const MachineInstr &MI) {
1838 for (const MachineOperand &MO : MI.implicit_operands()) {
1839 // We only care about reads.
1840 if (MO.isDef())
1841 continue;
1842
1843 switch (MO.getReg()) {
1844 case AMDGPU::VCC:
1845 case AMDGPU::M0:
1846 case AMDGPU::FLAT_SCR:
1847 return MO.getReg();
1848
1849 default:
1850 break;
1851 }
1852 }
1853
1854 return AMDGPU::NoRegister;
1855}
1856
Matt Arsenault529cf252016-06-23 01:26:16 +00001857static bool shouldReadExec(const MachineInstr &MI) {
1858 if (SIInstrInfo::isVALU(MI)) {
1859 switch (MI.getOpcode()) {
1860 case AMDGPU::V_READLANE_B32:
1861 case AMDGPU::V_READLANE_B32_si:
1862 case AMDGPU::V_READLANE_B32_vi:
1863 case AMDGPU::V_WRITELANE_B32:
1864 case AMDGPU::V_WRITELANE_B32_si:
1865 case AMDGPU::V_WRITELANE_B32_vi:
1866 return false;
1867 }
1868
1869 return true;
1870 }
1871
1872 if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
1873 SIInstrInfo::isSALU(MI) ||
1874 SIInstrInfo::isSMRD(MI))
1875 return false;
1876
1877 return true;
1878}
1879
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001880static bool isSubRegOf(const SIRegisterInfo &TRI,
1881 const MachineOperand &SuperVec,
1882 const MachineOperand &SubReg) {
1883 if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg()))
1884 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
1885
1886 return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
1887 SubReg.getReg() == SuperVec.getReg();
1888}
1889
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001890bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
Tom Stellard93fabce2013-10-10 17:11:55 +00001891 StringRef &ErrInfo) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001892 uint16_t Opcode = MI.getOpcode();
1893 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Tom Stellard93fabce2013-10-10 17:11:55 +00001894 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
1895 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
1896 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
1897
Tom Stellardca700e42014-03-17 17:03:49 +00001898 // Make sure the number of operands is correct.
1899 const MCInstrDesc &Desc = get(Opcode);
1900 if (!Desc.isVariadic() &&
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001901 Desc.getNumOperands() != MI.getNumExplicitOperands()) {
1902 ErrInfo = "Instruction has wrong number of operands.";
1903 return false;
Tom Stellardca700e42014-03-17 17:03:49 +00001904 }
1905
Matt Arsenault3d463192016-11-01 22:55:07 +00001906 if (MI.isInlineAsm()) {
1907 // Verify register classes for inlineasm constraints.
1908 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
1909 I != E; ++I) {
1910 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
1911 if (!RC)
1912 continue;
1913
1914 const MachineOperand &Op = MI.getOperand(I);
1915 if (!Op.isReg())
1916 continue;
1917
1918 unsigned Reg = Op.getReg();
1919 if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) {
1920 ErrInfo = "inlineasm operand has incorrect register class.";
1921 return false;
1922 }
1923 }
1924
1925 return true;
1926 }
1927
Changpeng Fangc9963932015-12-18 20:04:28 +00001928 // Make sure the register classes are correct.
Tom Stellardb4a313a2014-08-01 00:32:39 +00001929 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001930 if (MI.getOperand(i).isFPImm()) {
Tom Stellardfb77f002015-01-13 22:59:41 +00001931 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
1932 "all fp values to integers.";
1933 return false;
1934 }
1935
Marek Olsak8eeebcc2015-02-18 22:12:41 +00001936 int RegClass = Desc.OpInfo[i].RegClass;
1937
Tom Stellardca700e42014-03-17 17:03:49 +00001938 switch (Desc.OpInfo[i].OperandType) {
Tom Stellard1106b1c2015-01-20 17:49:41 +00001939 case MCOI::OPERAND_REGISTER:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001940 if (MI.getOperand(i).isImm()) {
Tom Stellard1106b1c2015-01-20 17:49:41 +00001941 ErrInfo = "Illegal immediate value for operand.";
1942 return false;
1943 }
1944 break;
Matt Arsenault4bd72362016-12-10 00:39:12 +00001945 case AMDGPU::OPERAND_REG_IMM_INT32:
1946 case AMDGPU::OPERAND_REG_IMM_FP32:
Tom Stellard1106b1c2015-01-20 17:49:41 +00001947 break;
Matt Arsenault4bd72362016-12-10 00:39:12 +00001948 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1949 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1950 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1951 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1952 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1953 case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
1954 const MachineOperand &MO = MI.getOperand(i);
1955 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
Marek Olsak8eeebcc2015-02-18 22:12:41 +00001956 ErrInfo = "Illegal immediate value for operand.";
1957 return false;
Tom Stellarda305f932014-07-02 20:53:44 +00001958 }
Tom Stellardca700e42014-03-17 17:03:49 +00001959 break;
Matt Arsenault4bd72362016-12-10 00:39:12 +00001960 }
Tom Stellardca700e42014-03-17 17:03:49 +00001961 case MCOI::OPERAND_IMMEDIATE:
Matt Arsenaultffc82752016-07-05 17:09:01 +00001962 case AMDGPU::OPERAND_KIMM32:
Tom Stellardb02094e2014-07-21 15:45:01 +00001963 // Check if this operand is an immediate.
1964 // FrameIndex operands will be replaced by immediates, so they are
1965 // allowed.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001966 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
Tom Stellardca700e42014-03-17 17:03:49 +00001967 ErrInfo = "Expected immediate, but got non-immediate";
1968 return false;
1969 }
Justin Bognerb03fd122016-08-17 05:10:15 +00001970 LLVM_FALLTHROUGH;
Tom Stellardca700e42014-03-17 17:03:49 +00001971 default:
1972 continue;
1973 }
1974
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001975 if (!MI.getOperand(i).isReg())
Tom Stellardca700e42014-03-17 17:03:49 +00001976 continue;
1977
Tom Stellardca700e42014-03-17 17:03:49 +00001978 if (RegClass != -1) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001979 unsigned Reg = MI.getOperand(i).getReg();
Matt Arsenault1322b6f2016-07-09 01:13:56 +00001980 if (Reg == AMDGPU::NoRegister ||
1981 TargetRegisterInfo::isVirtualRegister(Reg))
Tom Stellardca700e42014-03-17 17:03:49 +00001982 continue;
1983
1984 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
1985 if (!RC->contains(Reg)) {
1986 ErrInfo = "Operand has incorrect register class.";
1987 return false;
1988 }
1989 }
1990 }
1991
Tom Stellard93fabce2013-10-10 17:11:55 +00001992 // Verify VOP*
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001993 if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI)) {
Matt Arsenaulte368cb32014-12-11 23:37:32 +00001994 // Only look at the true operands. Only a real operand can use the constant
1995 // bus, and we don't want to check pseudo-operands like the source modifier
1996 // flags.
1997 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
1998
Tom Stellard93fabce2013-10-10 17:11:55 +00001999 unsigned ConstantBusCount = 0;
Matt Arsenaultffc82752016-07-05 17:09:01 +00002000
2001 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1)
2002 ++ConstantBusCount;
2003
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002004 unsigned SGPRUsed = findImplicitSGPRRead(MI);
Matt Arsenaulte223ceb2015-10-21 21:15:01 +00002005 if (SGPRUsed != AMDGPU::NoRegister)
2006 ++ConstantBusCount;
2007
Matt Arsenaulte368cb32014-12-11 23:37:32 +00002008 for (int OpIdx : OpIndices) {
2009 if (OpIdx == -1)
2010 break;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002011 const MachineOperand &MO = MI.getOperand(OpIdx);
Matt Arsenault4bd72362016-12-10 00:39:12 +00002012 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
Tom Stellard73ae1cb2014-09-23 21:26:25 +00002013 if (MO.isReg()) {
2014 if (MO.getReg() != SGPRUsed)
Tom Stellard93fabce2013-10-10 17:11:55 +00002015 ++ConstantBusCount;
Tom Stellard73ae1cb2014-09-23 21:26:25 +00002016 SGPRUsed = MO.getReg();
2017 } else {
2018 ++ConstantBusCount;
Tom Stellard93fabce2013-10-10 17:11:55 +00002019 }
2020 }
Tom Stellard93fabce2013-10-10 17:11:55 +00002021 }
2022 if (ConstantBusCount > 1) {
2023 ErrInfo = "VOP* instruction uses the constant bus more than once";
2024 return false;
2025 }
2026 }
2027
Matt Arsenaultbecb1402014-06-23 18:28:31 +00002028 // Verify misc. restrictions on specific instructions.
2029 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
2030 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002031 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
2032 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
2033 const MachineOperand &Src2 = MI.getOperand(Src2Idx);
Matt Arsenaultbecb1402014-06-23 18:28:31 +00002034 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
2035 if (!compareMachineOp(Src0, Src1) &&
2036 !compareMachineOp(Src0, Src2)) {
2037 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
2038 return false;
2039 }
2040 }
2041 }
2042
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +00002043 if (isSOPK(MI)) {
2044 int64_t Imm = getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm();
2045 if (sopkIsZext(MI)) {
2046 if (!isUInt<16>(Imm)) {
2047 ErrInfo = "invalid immediate for SOPK instruction";
2048 return false;
2049 }
2050 } else {
2051 if (!isInt<16>(Imm)) {
2052 ErrInfo = "invalid immediate for SOPK instruction";
2053 return false;
2054 }
2055 }
2056 }
2057
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002058 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
2059 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
2060 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2061 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
2062 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2063 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
2064
2065 const unsigned StaticNumOps = Desc.getNumOperands() +
2066 Desc.getNumImplicitUses();
2067 const unsigned NumImplicitOps = IsDst ? 2 : 1;
2068
Nicolai Haehnle368972c2016-11-02 17:03:11 +00002069 // Allow additional implicit operands. This allows a fixup done by the post
2070 // RA scheduler where the main implicit operand is killed and implicit-defs
2071 // are added for sub-registers that remain live after this instruction.
2072 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002073 ErrInfo = "missing implicit register operands";
2074 return false;
2075 }
2076
2077 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
2078 if (IsDst) {
2079 if (!Dst->isUse()) {
2080 ErrInfo = "v_movreld_b32 vdst should be a use operand";
2081 return false;
2082 }
2083
2084 unsigned UseOpIdx;
2085 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
2086 UseOpIdx != StaticNumOps + 1) {
2087 ErrInfo = "movrel implicit operands should be tied";
2088 return false;
2089 }
2090 }
2091
2092 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
2093 const MachineOperand &ImpUse
2094 = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
2095 if (!ImpUse.isReg() || !ImpUse.isUse() ||
2096 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
2097 ErrInfo = "src0 should be subreg of implicit vector use";
2098 return false;
2099 }
2100 }
2101
Matt Arsenaultd092a062015-10-02 18:58:37 +00002102 // Make sure we aren't losing exec uses in the td files. This mostly requires
2103 // being careful when using let Uses to try to add other use registers.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002104 if (shouldReadExec(MI)) {
2105 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
Matt Arsenaultd092a062015-10-02 18:58:37 +00002106 ErrInfo = "VALU instruction does not implicitly read exec mask";
2107 return false;
2108 }
2109 }
2110
Matt Arsenault7b647552016-10-28 21:55:15 +00002111 if (isSMRD(MI)) {
2112 if (MI.mayStore()) {
2113 // The register offset form of scalar stores may only use m0 as the
2114 // soffset register.
2115 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff);
2116 if (Soff && Soff->getReg() != AMDGPU::M0) {
2117 ErrInfo = "scalar stores must use m0 as offset register";
2118 return false;
2119 }
2120 }
2121 }
2122
Tom Stellard93fabce2013-10-10 17:11:55 +00002123 return true;
2124}
2125
Matt Arsenaultf14032a2013-11-15 22:02:28 +00002126unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
Tom Stellard82166022013-11-13 23:36:37 +00002127 switch (MI.getOpcode()) {
2128 default: return AMDGPU::INSTRUCTION_LIST_END;
2129 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
2130 case AMDGPU::COPY: return AMDGPU::COPY;
2131 case AMDGPU::PHI: return AMDGPU::PHI;
Tom Stellard204e61b2014-04-07 19:45:45 +00002132 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
Tom Stellarde0387202014-03-21 15:51:54 +00002133 case AMDGPU::S_MOV_B32:
2134 return MI.getOperand(1).isReg() ?
Tom Stellard8c12fd92014-03-24 16:12:34 +00002135 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
Tom Stellard80942a12014-09-05 14:07:59 +00002136 case AMDGPU::S_ADD_I32:
2137 case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32;
Matt Arsenault43b8e4e2013-11-18 20:09:29 +00002138 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
Tom Stellard80942a12014-09-05 14:07:59 +00002139 case AMDGPU::S_SUB_I32:
2140 case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32;
Matt Arsenault43b8e4e2013-11-18 20:09:29 +00002141 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
Matt Arsenault869cd072014-09-03 23:24:35 +00002142 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32;
Matt Arsenault124384f2016-09-09 23:32:53 +00002143 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
2144 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
2145 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
2146 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
2147 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
2148 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
2149 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
Tom Stellard82166022013-11-13 23:36:37 +00002150 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
2151 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
2152 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
2153 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
2154 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
2155 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
Matt Arsenault27cc9582014-04-18 01:53:18 +00002156 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
2157 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
Matt Arsenault78b86702014-04-18 05:19:26 +00002158 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
2159 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
Marek Olsak63a7b082015-03-24 13:40:21 +00002160 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
Matt Arsenault43160e72014-06-18 17:13:57 +00002161 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
Matt Arsenault2c335622014-04-09 07:16:16 +00002162 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
Matt Arsenault689f3252014-06-09 16:36:31 +00002163 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
Matt Arsenault0cb92e12014-04-11 19:25:18 +00002164 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
2165 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
2166 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
2167 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
2168 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
2169 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
Tom Stellardbc4497b2016-02-12 23:45:29 +00002170 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32;
2171 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32;
2172 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32;
2173 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32;
2174 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32;
2175 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32;
Matt Arsenault7b1dc2c2016-09-17 02:02:19 +00002176 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32;
2177 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32;
Marek Olsakc5368502015-01-15 18:43:01 +00002178 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
Matt Arsenault295b86e2014-06-17 17:36:27 +00002179 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
Matt Arsenault85796012014-06-17 17:36:24 +00002180 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
Marek Olsakd2af89d2015-03-04 17:33:45 +00002181 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
Tom Stellardbc4497b2016-02-12 23:45:29 +00002182 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
2183 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
Tom Stellard82166022013-11-13 23:36:37 +00002184 }
2185}
2186
2187bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
2188 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
2189}
2190
2191const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
2192 unsigned OpNo) const {
2193 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2194 const MCInstrDesc &Desc = get(MI.getOpcode());
2195 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
Matt Arsenault102a7042014-12-11 23:37:34 +00002196 Desc.OpInfo[OpNo].RegClass == -1) {
2197 unsigned Reg = MI.getOperand(OpNo).getReg();
2198
2199 if (TargetRegisterInfo::isVirtualRegister(Reg))
2200 return MRI.getRegClass(Reg);
Matt Arsenault11a4d672015-02-13 19:05:03 +00002201 return RI.getPhysRegClass(Reg);
Matt Arsenault102a7042014-12-11 23:37:34 +00002202 }
Tom Stellard82166022013-11-13 23:36:37 +00002203
2204 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
2205 return RI.getRegClass(RCID);
2206}
2207
2208bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
2209 switch (MI.getOpcode()) {
2210 case AMDGPU::COPY:
2211 case AMDGPU::REG_SEQUENCE:
Tom Stellard4f3b04d2014-04-17 21:00:07 +00002212 case AMDGPU::PHI:
Tom Stellarda5687382014-05-15 14:41:55 +00002213 case AMDGPU::INSERT_SUBREG:
Tom Stellard82166022013-11-13 23:36:37 +00002214 return RI.hasVGPRs(getOpRegClass(MI, 0));
2215 default:
2216 return RI.hasVGPRs(getOpRegClass(MI, OpNo));
2217 }
2218}
2219
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002220void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
Tom Stellard82166022013-11-13 23:36:37 +00002221 MachineBasicBlock::iterator I = MI;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002222 MachineBasicBlock *MBB = MI.getParent();
2223 MachineOperand &MO = MI.getOperand(OpIdx);
Matt Arsenault3f3a2752014-10-13 15:47:59 +00002224 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002225 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
Tom Stellard82166022013-11-13 23:36:37 +00002226 const TargetRegisterClass *RC = RI.getRegClass(RCID);
2227 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
Matt Arsenault3f3a2752014-10-13 15:47:59 +00002228 if (MO.isReg())
Tom Stellard82166022013-11-13 23:36:37 +00002229 Opcode = AMDGPU::COPY;
Matt Arsenault3f3a2752014-10-13 15:47:59 +00002230 else if (RI.isSGPRClass(RC))
Matt Arsenault671a0052013-11-14 10:08:50 +00002231 Opcode = AMDGPU::S_MOV_B32;
Matt Arsenault3f3a2752014-10-13 15:47:59 +00002232
Matt Arsenault3a4d86a2013-11-18 20:09:55 +00002233 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
Matt Arsenault3f3a2752014-10-13 15:47:59 +00002234 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
Tom Stellard0c93c9e2014-09-05 14:08:01 +00002235 VRC = &AMDGPU::VReg_64RegClass;
Matt Arsenault3f3a2752014-10-13 15:47:59 +00002236 else
Tom Stellard45c0b3a2015-01-07 20:59:25 +00002237 VRC = &AMDGPU::VGPR_32RegClass;
Matt Arsenault3f3a2752014-10-13 15:47:59 +00002238
Matt Arsenault3a4d86a2013-11-18 20:09:55 +00002239 unsigned Reg = MRI.createVirtualRegister(VRC);
Matt Arsenault3f3a2752014-10-13 15:47:59 +00002240 DebugLoc DL = MBB->findDebugLoc(I);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002241 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).addOperand(MO);
Tom Stellard82166022013-11-13 23:36:37 +00002242 MO.ChangeToRegister(Reg, false);
2243}
2244
Tom Stellard15834092014-03-21 15:51:57 +00002245unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
2246 MachineRegisterInfo &MRI,
2247 MachineOperand &SuperReg,
2248 const TargetRegisterClass *SuperRC,
2249 unsigned SubIdx,
2250 const TargetRegisterClass *SubRC)
2251 const {
Matt Arsenaultc8e2ce42015-09-24 07:16:37 +00002252 MachineBasicBlock *MBB = MI->getParent();
2253 DebugLoc DL = MI->getDebugLoc();
Tom Stellard15834092014-03-21 15:51:57 +00002254 unsigned SubReg = MRI.createVirtualRegister(SubRC);
2255
Matt Arsenaultc8e2ce42015-09-24 07:16:37 +00002256 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) {
2257 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
2258 .addReg(SuperReg.getReg(), 0, SubIdx);
2259 return SubReg;
2260 }
2261
Tom Stellard15834092014-03-21 15:51:57 +00002262 // Just in case the super register is itself a sub-register, copy it to a new
Matt Arsenault08d84942014-06-03 23:06:13 +00002263 // value so we don't need to worry about merging its subreg index with the
2264 // SubIdx passed to this function. The register coalescer should be able to
Tom Stellard15834092014-03-21 15:51:57 +00002265 // eliminate this extra copy.
Matt Arsenaultc8e2ce42015-09-24 07:16:37 +00002266 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
Tom Stellard15834092014-03-21 15:51:57 +00002267
Matt Arsenault7480a0e2014-11-17 21:11:37 +00002268 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
2269 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
2270
2271 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
2272 .addReg(NewSuperReg, 0, SubIdx);
2273
Tom Stellard15834092014-03-21 15:51:57 +00002274 return SubReg;
2275}
2276
Matt Arsenault248b7b62014-03-24 20:08:09 +00002277MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
2278 MachineBasicBlock::iterator MII,
2279 MachineRegisterInfo &MRI,
2280 MachineOperand &Op,
2281 const TargetRegisterClass *SuperRC,
2282 unsigned SubIdx,
2283 const TargetRegisterClass *SubRC) const {
2284 if (Op.isImm()) {
Matt Arsenault248b7b62014-03-24 20:08:09 +00002285 if (SubIdx == AMDGPU::sub0)
Matt Arsenaultd745c282016-09-08 17:44:36 +00002286 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
Matt Arsenault248b7b62014-03-24 20:08:09 +00002287 if (SubIdx == AMDGPU::sub1)
Matt Arsenaultd745c282016-09-08 17:44:36 +00002288 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
Matt Arsenault248b7b62014-03-24 20:08:09 +00002289
2290 llvm_unreachable("Unhandled register index for immediate");
2291 }
2292
2293 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
2294 SubIdx, SubRC);
2295 return MachineOperand::CreateReg(SubReg, false);
2296}
2297
Marek Olsakbe047802014-12-07 12:19:03 +00002298// Change the order of operands from (0, 1, 2) to (0, 2, 1)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002299void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
2300 assert(Inst.getNumExplicitOperands() == 3);
2301 MachineOperand Op1 = Inst.getOperand(1);
2302 Inst.RemoveOperand(1);
2303 Inst.addOperand(Op1);
Marek Olsakbe047802014-12-07 12:19:03 +00002304}
2305
Matt Arsenault856d1922015-12-01 19:57:17 +00002306bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
2307 const MCOperandInfo &OpInfo,
2308 const MachineOperand &MO) const {
2309 if (!MO.isReg())
2310 return false;
2311
2312 unsigned Reg = MO.getReg();
2313 const TargetRegisterClass *RC =
2314 TargetRegisterInfo::isVirtualRegister(Reg) ?
2315 MRI.getRegClass(Reg) :
2316 RI.getPhysRegClass(Reg);
2317
Nicolai Haehnle82fc9622016-01-07 17:10:29 +00002318 const SIRegisterInfo *TRI =
2319 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
2320 RC = TRI->getSubRegClass(RC, MO.getSubReg());
2321
Matt Arsenault856d1922015-12-01 19:57:17 +00002322 // In order to be legal, the common sub-class must be equal to the
2323 // class of the current operand. For example:
2324 //
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002325 // v_mov_b32 s0 ; Operand defined as vsrc_b32
2326 // ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL
Matt Arsenault856d1922015-12-01 19:57:17 +00002327 //
2328 // s_sendmsg 0, s0 ; Operand defined as m0reg
2329 // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL
2330
2331 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC;
2332}
2333
2334bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
2335 const MCOperandInfo &OpInfo,
2336 const MachineOperand &MO) const {
2337 if (MO.isReg())
2338 return isLegalRegOperand(MRI, OpInfo, MO);
2339
2340 // Handle non-register types that are treated like immediates.
2341 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
2342 return true;
2343}
2344
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002345bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
Tom Stellard0e975cf2014-08-01 00:32:35 +00002346 const MachineOperand *MO) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002347 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2348 const MCInstrDesc &InstDesc = MI.getDesc();
Tom Stellard0e975cf2014-08-01 00:32:35 +00002349 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
2350 const TargetRegisterClass *DefinedRC =
2351 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
2352 if (!MO)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002353 MO = &MI.getOperand(OpIdx);
Tom Stellard0e975cf2014-08-01 00:32:35 +00002354
Matt Arsenault4bd72362016-12-10 00:39:12 +00002355 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) {
Matt Arsenaultfcb345f2016-02-11 06:15:39 +00002356
2357 RegSubRegPair SGPRUsed;
2358 if (MO->isReg())
2359 SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg());
2360
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002361 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
Tom Stellard73ae1cb2014-09-23 21:26:25 +00002362 if (i == OpIdx)
2363 continue;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002364 const MachineOperand &Op = MI.getOperand(i);
Matt Arsenaultffc82752016-07-05 17:09:01 +00002365 if (Op.isReg()) {
2366 if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) &&
Matt Arsenault4bd72362016-12-10 00:39:12 +00002367 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) {
Matt Arsenaultffc82752016-07-05 17:09:01 +00002368 return false;
2369 }
2370 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) {
Tom Stellard73ae1cb2014-09-23 21:26:25 +00002371 return false;
2372 }
2373 }
2374 }
2375
Tom Stellard0e975cf2014-08-01 00:32:35 +00002376 if (MO->isReg()) {
2377 assert(DefinedRC);
Matt Arsenault856d1922015-12-01 19:57:17 +00002378 return isLegalRegOperand(MRI, OpInfo, *MO);
Tom Stellard0e975cf2014-08-01 00:32:35 +00002379 }
2380
Tom Stellard0e975cf2014-08-01 00:32:35 +00002381 // Handle non-register types that are treated like immediates.
Tom Stellardfb77f002015-01-13 22:59:41 +00002382 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI());
Tom Stellard0e975cf2014-08-01 00:32:35 +00002383
Matt Arsenault4364fef2014-09-23 18:30:57 +00002384 if (!DefinedRC) {
2385 // This operand expects an immediate.
Tom Stellard0e975cf2014-08-01 00:32:35 +00002386 return true;
Matt Arsenault4364fef2014-09-23 18:30:57 +00002387 }
Tom Stellard0e975cf2014-08-01 00:32:35 +00002388
Tom Stellard73ae1cb2014-09-23 21:26:25 +00002389 return isImmOperandLegal(MI, OpIdx, *MO);
Tom Stellard0e975cf2014-08-01 00:32:35 +00002390}
2391
Matt Arsenault856d1922015-12-01 19:57:17 +00002392void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002393 MachineInstr &MI) const {
2394 unsigned Opc = MI.getOpcode();
Matt Arsenault856d1922015-12-01 19:57:17 +00002395 const MCInstrDesc &InstrDesc = get(Opc);
2396
2397 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002398 MachineOperand &Src1 = MI.getOperand(Src1Idx);
Matt Arsenault856d1922015-12-01 19:57:17 +00002399
2400 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
2401 // we need to only have one constant bus use.
2402 //
2403 // Note we do not need to worry about literal constants here. They are
2404 // disabled for the operand type for instructions because they will always
2405 // violate the one constant bus use rule.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002406 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
Matt Arsenault856d1922015-12-01 19:57:17 +00002407 if (HasImplicitSGPR) {
2408 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002409 MachineOperand &Src0 = MI.getOperand(Src0Idx);
Matt Arsenault856d1922015-12-01 19:57:17 +00002410
2411 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg()))
2412 legalizeOpWithMove(MI, Src0Idx);
2413 }
2414
2415 // VOP2 src0 instructions support all operand types, so we don't need to check
2416 // their legality. If src1 is already legal, we don't need to do anything.
2417 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
2418 return;
2419
2420 // We do not use commuteInstruction here because it is too aggressive and will
2421 // commute if it is possible. We only want to commute here if it improves
2422 // legality. This can be called a fairly large number of times so don't waste
2423 // compile time pointlessly swapping and checking legality again.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002424 if (HasImplicitSGPR || !MI.isCommutable()) {
Matt Arsenault856d1922015-12-01 19:57:17 +00002425 legalizeOpWithMove(MI, Src1Idx);
2426 return;
2427 }
2428
2429 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002430 MachineOperand &Src0 = MI.getOperand(Src0Idx);
Matt Arsenault856d1922015-12-01 19:57:17 +00002431
2432 // If src0 can be used as src1, commuting will make the operands legal.
2433 // Otherwise we have to give up and insert a move.
2434 //
2435 // TODO: Other immediate-like operand kinds could be commuted if there was a
2436 // MachineOperand::ChangeTo* for them.
2437 if ((!Src1.isImm() && !Src1.isReg()) ||
2438 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
2439 legalizeOpWithMove(MI, Src1Idx);
2440 return;
2441 }
2442
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002443 int CommutedOpc = commuteOpcode(MI);
Matt Arsenault856d1922015-12-01 19:57:17 +00002444 if (CommutedOpc == -1) {
2445 legalizeOpWithMove(MI, Src1Idx);
2446 return;
2447 }
2448
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002449 MI.setDesc(get(CommutedOpc));
Matt Arsenault856d1922015-12-01 19:57:17 +00002450
2451 unsigned Src0Reg = Src0.getReg();
2452 unsigned Src0SubReg = Src0.getSubReg();
2453 bool Src0Kill = Src0.isKill();
2454
2455 if (Src1.isImm())
2456 Src0.ChangeToImmediate(Src1.getImm());
2457 else if (Src1.isReg()) {
2458 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
2459 Src0.setSubReg(Src1.getSubReg());
2460 } else
2461 llvm_unreachable("Should only have register or immediate operands");
2462
2463 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
2464 Src1.setSubReg(Src0SubReg);
2465}
2466
Matt Arsenault6005fcb2015-10-21 21:51:02 +00002467// Legalize VOP3 operands. Because all operand types are supported for any
2468// operand, and since literal constants are not allowed and should never be
2469// seen, we only need to worry about inserting copies if we use multiple SGPR
2470// operands.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002471void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
2472 MachineInstr &MI) const {
2473 unsigned Opc = MI.getOpcode();
Matt Arsenault6005fcb2015-10-21 21:51:02 +00002474
2475 int VOP3Idx[3] = {
2476 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
2477 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
2478 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
2479 };
2480
2481 // Find the one SGPR operand we are allowed to use.
2482 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx);
2483
2484 for (unsigned i = 0; i < 3; ++i) {
2485 int Idx = VOP3Idx[i];
2486 if (Idx == -1)
2487 break;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002488 MachineOperand &MO = MI.getOperand(Idx);
Matt Arsenault6005fcb2015-10-21 21:51:02 +00002489
2490 // We should never see a VOP3 instruction with an illegal immediate operand.
2491 if (!MO.isReg())
2492 continue;
2493
2494 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
2495 continue; // VGPRs are legal
2496
2497 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
2498 SGPRReg = MO.getReg();
2499 // We can use one SGPR in each VOP3 instruction.
2500 continue;
2501 }
2502
2503 // If we make it this far, then the operand is not legal and we must
2504 // legalize it.
2505 legalizeOpWithMove(MI, Idx);
2506 }
2507}
2508
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002509unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
2510 MachineRegisterInfo &MRI) const {
Tom Stellard1397d492016-02-11 21:45:07 +00002511 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
2512 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
2513 unsigned DstReg = MRI.createVirtualRegister(SRC);
2514 unsigned SubRegs = VRC->getSize() / 4;
2515
2516 SmallVector<unsigned, 8> SRegs;
2517 for (unsigned i = 0; i < SubRegs; ++i) {
2518 unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002519 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
Tom Stellard1397d492016-02-11 21:45:07 +00002520 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002521 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
Tom Stellard1397d492016-02-11 21:45:07 +00002522 SRegs.push_back(SGPR);
2523 }
2524
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002525 MachineInstrBuilder MIB =
2526 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
2527 get(AMDGPU::REG_SEQUENCE), DstReg);
Tom Stellard1397d492016-02-11 21:45:07 +00002528 for (unsigned i = 0; i < SubRegs; ++i) {
2529 MIB.addReg(SRegs[i]);
2530 MIB.addImm(RI.getSubRegFromChannel(i));
2531 }
2532 return DstReg;
2533}
2534
Tom Stellard467b5b92016-02-20 00:37:25 +00002535void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002536 MachineInstr &MI) const {
Tom Stellard467b5b92016-02-20 00:37:25 +00002537
2538 // If the pointer is store in VGPRs, then we need to move them to
2539 // SGPRs using v_readfirstlane. This is safe because we only select
2540 // loads with uniform pointers to SMRD instruction so we know the
2541 // pointer value is uniform.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002542 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
Tom Stellard467b5b92016-02-20 00:37:25 +00002543 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
2544 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
2545 SBase->setReg(SGPR);
2546 }
2547}
2548
Tom Stellard0d162b12016-11-16 18:42:17 +00002549void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
2550 MachineBasicBlock::iterator I,
2551 const TargetRegisterClass *DstRC,
2552 MachineOperand &Op,
2553 MachineRegisterInfo &MRI,
2554 const DebugLoc &DL) const {
2555
2556 unsigned OpReg = Op.getReg();
2557 unsigned OpSubReg = Op.getSubReg();
2558
2559 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
2560 RI.getRegClassForReg(MRI, OpReg), OpSubReg);
2561
2562 // Check if operand is already the correct register class.
2563 if (DstRC == OpRC)
2564 return;
2565
2566 unsigned DstReg = MRI.createVirtualRegister(DstRC);
2567 MachineInstr *Copy = BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg)
2568 .addOperand(Op);
2569
2570 Op.setReg(DstReg);
2571 Op.setSubReg(0);
2572
2573 MachineInstr *Def = MRI.getVRegDef(OpReg);
2574 if (!Def)
2575 return;
2576
2577 // Try to eliminate the copy if it is copying an immediate value.
2578 if (Def->isMoveImmediate())
2579 FoldImmediate(*Copy, *Def, OpReg, &MRI);
2580}
2581
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002582void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
Nicolai Haehnlece2b5892016-11-18 11:55:52 +00002583 MachineFunction &MF = *MI.getParent()->getParent();
2584 MachineRegisterInfo &MRI = MF.getRegInfo();
Tom Stellard82166022013-11-13 23:36:37 +00002585
2586 // Legalize VOP2
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002587 if (isVOP2(MI) || isVOPC(MI)) {
Matt Arsenault856d1922015-12-01 19:57:17 +00002588 legalizeOperandsVOP2(MRI, MI);
Tom Stellard0e975cf2014-08-01 00:32:35 +00002589 return;
Tom Stellard82166022013-11-13 23:36:37 +00002590 }
2591
2592 // Legalize VOP3
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002593 if (isVOP3(MI)) {
Matt Arsenault6005fcb2015-10-21 21:51:02 +00002594 legalizeOperandsVOP3(MRI, MI);
Matt Arsenaulte068f9a2015-09-24 07:51:28 +00002595 return;
Tom Stellard82166022013-11-13 23:36:37 +00002596 }
2597
Tom Stellard467b5b92016-02-20 00:37:25 +00002598 // Legalize SMRD
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002599 if (isSMRD(MI)) {
Tom Stellard467b5b92016-02-20 00:37:25 +00002600 legalizeOperandsSMRD(MRI, MI);
2601 return;
2602 }
2603
Tom Stellard4f3b04d2014-04-17 21:00:07 +00002604 // Legalize REG_SEQUENCE and PHI
Tom Stellard82166022013-11-13 23:36:37 +00002605 // The register class of the operands much be the same type as the register
2606 // class of the output.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002607 if (MI.getOpcode() == AMDGPU::PHI) {
Craig Topper062a2ba2014-04-25 05:30:21 +00002608 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002609 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2610 if (!MI.getOperand(i).isReg() ||
2611 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
Tom Stellard82166022013-11-13 23:36:37 +00002612 continue;
2613 const TargetRegisterClass *OpRC =
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002614 MRI.getRegClass(MI.getOperand(i).getReg());
Tom Stellard82166022013-11-13 23:36:37 +00002615 if (RI.hasVGPRs(OpRC)) {
2616 VRC = OpRC;
2617 } else {
2618 SRC = OpRC;
2619 }
2620 }
2621
2622 // If any of the operands are VGPR registers, then they all most be
2623 // otherwise we will create illegal VGPR->SGPR copies when legalizing
2624 // them.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002625 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
Tom Stellard82166022013-11-13 23:36:37 +00002626 if (!VRC) {
2627 assert(SRC);
2628 VRC = RI.getEquivalentVGPRClass(SRC);
2629 }
2630 RC = VRC;
2631 } else {
2632 RC = SRC;
2633 }
2634
2635 // Update all the operands so they have the same type.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002636 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2637 MachineOperand &Op = MI.getOperand(I);
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00002638 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
Tom Stellard82166022013-11-13 23:36:37 +00002639 continue;
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00002640
2641 // MI is a PHI instruction.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002642 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00002643 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator();
2644
Tom Stellard0d162b12016-11-16 18:42:17 +00002645 // Avoid creating no-op copies with the same src and dst reg class. These
2646 // confuse some of the machine passes.
2647 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc());
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00002648 }
2649 }
2650
2651 // REG_SEQUENCE doesn't really require operand legalization, but if one has a
2652 // VGPR dest type and SGPR sources, insert copies so all operands are
2653 // VGPRs. This seems to help operand folding / the register coalescer.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002654 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
2655 MachineBasicBlock *MBB = MI.getParent();
2656 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00002657 if (RI.hasVGPRs(DstRC)) {
2658 // Update all the operands so they are VGPR register classes. These may
2659 // not be the same register class because REG_SEQUENCE supports mixing
2660 // subregister index types e.g. sub0_sub1 + sub2 + sub3
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002661 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2662 MachineOperand &Op = MI.getOperand(I);
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00002663 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
2664 continue;
2665
2666 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
2667 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
2668 if (VRC == OpRC)
2669 continue;
2670
Tom Stellard0d162b12016-11-16 18:42:17 +00002671 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00002672 Op.setIsKill();
Tom Stellard4f3b04d2014-04-17 21:00:07 +00002673 }
Tom Stellard82166022013-11-13 23:36:37 +00002674 }
Matt Arsenaulte068f9a2015-09-24 07:51:28 +00002675
2676 return;
Tom Stellard82166022013-11-13 23:36:37 +00002677 }
Tom Stellard15834092014-03-21 15:51:57 +00002678
Tom Stellarda5687382014-05-15 14:41:55 +00002679 // Legalize INSERT_SUBREG
2680 // src0 must have the same register class as dst
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002681 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
2682 unsigned Dst = MI.getOperand(0).getReg();
2683 unsigned Src0 = MI.getOperand(1).getReg();
Tom Stellarda5687382014-05-15 14:41:55 +00002684 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
2685 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
2686 if (DstRC != Src0RC) {
Tom Stellard0d162b12016-11-16 18:42:17 +00002687 MachineBasicBlock *MBB = MI.getParent();
2688 MachineOperand &Op = MI.getOperand(1);
2689 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
Tom Stellarda5687382014-05-15 14:41:55 +00002690 }
2691 return;
2692 }
2693
Nicolai Haehnlece2b5892016-11-18 11:55:52 +00002694 // Legalize MIMG and MUBUF/MTBUF for shaders.
2695 //
2696 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via
2697 // scratch memory access. In both cases, the legalization never involves
2698 // conversion to the addr64 form.
2699 if (isMIMG(MI) ||
2700 (AMDGPU::isShader(MF.getFunction()->getCallingConv()) &&
2701 (isMUBUF(MI) || isMTBUF(MI)))) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002702 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
Tom Stellard1397d492016-02-11 21:45:07 +00002703 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
2704 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI);
2705 SRsrc->setReg(SGPR);
2706 }
2707
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002708 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
Tom Stellard1397d492016-02-11 21:45:07 +00002709 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) {
2710 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI);
2711 SSamp->setReg(SGPR);
2712 }
2713 return;
2714 }
2715
Nicolai Haehnlece2b5892016-11-18 11:55:52 +00002716 // Legalize MUBUF* instructions by converting to addr64 form.
Tom Stellard15834092014-03-21 15:51:57 +00002717 // FIXME: If we start using the non-addr64 instructions for compute, we
Nicolai Haehnlece2b5892016-11-18 11:55:52 +00002718 // may need to legalize them as above. This especially applies to the
2719 // buffer_load_format_* variants and variants with idxen (or bothen).
Tom Stellard155bbb72014-08-11 22:18:17 +00002720 int SRsrcIdx =
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002721 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
Tom Stellard155bbb72014-08-11 22:18:17 +00002722 if (SRsrcIdx != -1) {
2723 // We have an MUBUF instruction
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002724 MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx);
2725 unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass;
Tom Stellard155bbb72014-08-11 22:18:17 +00002726 if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()),
2727 RI.getRegClass(SRsrcRC))) {
2728 // The operands are legal.
2729 // FIXME: We may need to legalize operands besided srsrc.
2730 return;
2731 }
Tom Stellard15834092014-03-21 15:51:57 +00002732
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002733 MachineBasicBlock &MBB = *MI.getParent();
Matt Arsenaultef67d762015-09-09 17:03:29 +00002734
Eric Christopher572e03a2015-06-19 01:53:21 +00002735 // Extract the ptr from the resource descriptor.
Matt Arsenaultef67d762015-09-09 17:03:29 +00002736 unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc,
2737 &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
Tom Stellard15834092014-03-21 15:51:57 +00002738
Tom Stellard155bbb72014-08-11 22:18:17 +00002739 // Create an empty resource descriptor
2740 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2741 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2742 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2743 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
Tom Stellard794c8c02014-12-02 17:05:41 +00002744 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
Tom Stellard15834092014-03-21 15:51:57 +00002745
Tom Stellard155bbb72014-08-11 22:18:17 +00002746 // Zero64 = 0
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002747 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64)
2748 .addImm(0);
Tom Stellard15834092014-03-21 15:51:57 +00002749
Tom Stellard155bbb72014-08-11 22:18:17 +00002750 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002751 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
2752 .addImm(RsrcDataFormat & 0xFFFFFFFF);
Tom Stellard15834092014-03-21 15:51:57 +00002753
Tom Stellard155bbb72014-08-11 22:18:17 +00002754 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002755 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
2756 .addImm(RsrcDataFormat >> 32);
Tom Stellard15834092014-03-21 15:51:57 +00002757
Tom Stellard155bbb72014-08-11 22:18:17 +00002758 // NewSRsrc = {Zero64, SRsrcFormat}
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002759 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc)
2760 .addReg(Zero64)
2761 .addImm(AMDGPU::sub0_sub1)
2762 .addReg(SRsrcFormatLo)
2763 .addImm(AMDGPU::sub2)
2764 .addReg(SRsrcFormatHi)
2765 .addImm(AMDGPU::sub3);
Tom Stellard155bbb72014-08-11 22:18:17 +00002766
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002767 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
Tom Stellard155bbb72014-08-11 22:18:17 +00002768 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
Tom Stellard155bbb72014-08-11 22:18:17 +00002769 if (VAddr) {
2770 // This is already an ADDR64 instruction so we need to add the pointer
2771 // extracted from the resource descriptor to the current value of VAddr.
Matt Arsenaultef67d762015-09-09 17:03:29 +00002772 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2773 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Tom Stellard155bbb72014-08-11 22:18:17 +00002774
Matt Arsenaultef67d762015-09-09 17:03:29 +00002775 // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002776 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault51d2d0f2015-09-01 02:02:21 +00002777 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
Matt Arsenaultef67d762015-09-09 17:03:29 +00002778 .addReg(SRsrcPtr, 0, AMDGPU::sub0)
Matt Arsenault51d2d0f2015-09-01 02:02:21 +00002779 .addReg(VAddr->getReg(), 0, AMDGPU::sub0);
Tom Stellard15834092014-03-21 15:51:57 +00002780
Matt Arsenaultef67d762015-09-09 17:03:29 +00002781 // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1
Matt Arsenault51d2d0f2015-09-01 02:02:21 +00002782 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi)
Matt Arsenaultef67d762015-09-09 17:03:29 +00002783 .addReg(SRsrcPtr, 0, AMDGPU::sub1)
Matt Arsenault51d2d0f2015-09-01 02:02:21 +00002784 .addReg(VAddr->getReg(), 0, AMDGPU::sub1);
Tom Stellard15834092014-03-21 15:51:57 +00002785
Matt Arsenaultef67d762015-09-09 17:03:29 +00002786 // NewVaddr = {NewVaddrHi, NewVaddrLo}
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002787 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
2788 .addReg(NewVAddrLo)
2789 .addImm(AMDGPU::sub0)
2790 .addReg(NewVAddrHi)
2791 .addImm(AMDGPU::sub1);
Tom Stellard155bbb72014-08-11 22:18:17 +00002792 } else {
2793 // This instructions is the _OFFSET variant, so we need to convert it to
2794 // ADDR64.
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002795 assert(MBB.getParent()->getSubtarget<SISubtarget>().getGeneration()
2796 < SISubtarget::VOLCANIC_ISLANDS &&
Matt Arsenaulta40450c2015-11-05 02:46:56 +00002797 "FIXME: Need to emit flat atomics here");
2798
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002799 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
2800 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
2801 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
2802 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
Matt Arsenaulta40450c2015-11-05 02:46:56 +00002803
2804 // Atomics rith return have have an additional tied operand and are
2805 // missing some of the special bits.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002806 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
Matt Arsenaulta40450c2015-11-05 02:46:56 +00002807 MachineInstr *Addr64;
2808
2809 if (!VDataIn) {
2810 // Regular buffer load / store.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002811 MachineInstrBuilder MIB =
2812 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
2813 .addOperand(*VData)
2814 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
2815 // This will be replaced later
2816 // with the new value of vaddr.
2817 .addOperand(*SRsrc)
2818 .addOperand(*SOffset)
2819 .addOperand(*Offset);
Matt Arsenaulta40450c2015-11-05 02:46:56 +00002820
2821 // Atomics do not have this operand.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002822 if (const MachineOperand *GLC =
2823 getNamedOperand(MI, AMDGPU::OpName::glc)) {
Matt Arsenaulta40450c2015-11-05 02:46:56 +00002824 MIB.addImm(GLC->getImm());
2825 }
2826
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002827 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc));
Matt Arsenaulta40450c2015-11-05 02:46:56 +00002828
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002829 if (const MachineOperand *TFE =
2830 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
Matt Arsenaulta40450c2015-11-05 02:46:56 +00002831 MIB.addImm(TFE->getImm());
2832 }
2833
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002834 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
Matt Arsenaulta40450c2015-11-05 02:46:56 +00002835 Addr64 = MIB;
2836 } else {
2837 // Atomics with return.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002838 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
2839 .addOperand(*VData)
2840 .addOperand(*VDataIn)
2841 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
2842 // This will be replaced later
2843 // with the new value of vaddr.
2844 .addOperand(*SRsrc)
2845 .addOperand(*SOffset)
2846 .addOperand(*Offset)
2847 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
2848 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
Matt Arsenaulta40450c2015-11-05 02:46:56 +00002849 }
Tom Stellard15834092014-03-21 15:51:57 +00002850
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002851 MI.removeFromParent();
Tom Stellard15834092014-03-21 15:51:57 +00002852
Matt Arsenaultef67d762015-09-09 17:03:29 +00002853 // NewVaddr = {NewVaddrHi, NewVaddrLo}
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002854 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
2855 NewVAddr)
2856 .addReg(SRsrcPtr, 0, AMDGPU::sub0)
2857 .addImm(AMDGPU::sub0)
2858 .addReg(SRsrcPtr, 0, AMDGPU::sub1)
2859 .addImm(AMDGPU::sub1);
Matt Arsenaultef67d762015-09-09 17:03:29 +00002860
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002861 VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr);
2862 SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc);
Tom Stellard15834092014-03-21 15:51:57 +00002863 }
Tom Stellard155bbb72014-08-11 22:18:17 +00002864
Tom Stellard155bbb72014-08-11 22:18:17 +00002865 // Update the instruction to use NewVaddr
2866 VAddr->setReg(NewVAddr);
2867 // Update the instruction to use NewSRsrc
2868 SRsrc->setReg(NewSRsrc);
Tom Stellard15834092014-03-21 15:51:57 +00002869 }
Tom Stellard82166022013-11-13 23:36:37 +00002870}
2871
2872void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
2873 SmallVector<MachineInstr *, 128> Worklist;
2874 Worklist.push_back(&TopInst);
2875
2876 while (!Worklist.empty()) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002877 MachineInstr &Inst = *Worklist.pop_back_val();
2878 MachineBasicBlock *MBB = Inst.getParent();
Tom Stellarde0387202014-03-21 15:51:54 +00002879 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
2880
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002881 unsigned Opcode = Inst.getOpcode();
2882 unsigned NewOpcode = getVALUOp(Inst);
Matt Arsenault27cc9582014-04-18 01:53:18 +00002883
Tom Stellarde0387202014-03-21 15:51:54 +00002884 // Handle some special cases
Matt Arsenault27cc9582014-04-18 01:53:18 +00002885 switch (Opcode) {
Tom Stellard0c354f22014-04-30 15:31:29 +00002886 default:
Tom Stellard0c354f22014-04-30 15:31:29 +00002887 break;
Matt Arsenaultf35182c2014-03-24 20:08:05 +00002888 case AMDGPU::S_AND_B64:
Matt Arsenaultf003c382015-08-26 20:47:50 +00002889 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002890 Inst.eraseFromParent();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00002891 continue;
2892
2893 case AMDGPU::S_OR_B64:
Matt Arsenaultf003c382015-08-26 20:47:50 +00002894 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002895 Inst.eraseFromParent();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00002896 continue;
2897
2898 case AMDGPU::S_XOR_B64:
Matt Arsenaultf003c382015-08-26 20:47:50 +00002899 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002900 Inst.eraseFromParent();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00002901 continue;
2902
2903 case AMDGPU::S_NOT_B64:
Matt Arsenaultf003c382015-08-26 20:47:50 +00002904 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002905 Inst.eraseFromParent();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00002906 continue;
2907
Matt Arsenault8333e432014-06-10 19:18:24 +00002908 case AMDGPU::S_BCNT1_I32_B64:
2909 splitScalar64BitBCNT(Worklist, Inst);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002910 Inst.eraseFromParent();
Matt Arsenault8333e432014-06-10 19:18:24 +00002911 continue;
2912
Matt Arsenault94812212014-11-14 18:18:16 +00002913 case AMDGPU::S_BFE_I64: {
2914 splitScalar64BitBFE(Worklist, Inst);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002915 Inst.eraseFromParent();
Matt Arsenault94812212014-11-14 18:18:16 +00002916 continue;
2917 }
2918
Marek Olsakbe047802014-12-07 12:19:03 +00002919 case AMDGPU::S_LSHL_B32:
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002920 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
Marek Olsakbe047802014-12-07 12:19:03 +00002921 NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
2922 swapOperands(Inst);
2923 }
2924 break;
2925 case AMDGPU::S_ASHR_I32:
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002926 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
Marek Olsakbe047802014-12-07 12:19:03 +00002927 NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
2928 swapOperands(Inst);
2929 }
2930 break;
2931 case AMDGPU::S_LSHR_B32:
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002932 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
Marek Olsakbe047802014-12-07 12:19:03 +00002933 NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
2934 swapOperands(Inst);
2935 }
2936 break;
Marek Olsak707a6d02015-02-03 21:53:01 +00002937 case AMDGPU::S_LSHL_B64:
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002938 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
Marek Olsak707a6d02015-02-03 21:53:01 +00002939 NewOpcode = AMDGPU::V_LSHLREV_B64;
2940 swapOperands(Inst);
2941 }
2942 break;
2943 case AMDGPU::S_ASHR_I64:
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002944 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
Marek Olsak707a6d02015-02-03 21:53:01 +00002945 NewOpcode = AMDGPU::V_ASHRREV_I64;
2946 swapOperands(Inst);
2947 }
2948 break;
2949 case AMDGPU::S_LSHR_B64:
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002950 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
Marek Olsak707a6d02015-02-03 21:53:01 +00002951 NewOpcode = AMDGPU::V_LSHRREV_B64;
2952 swapOperands(Inst);
2953 }
2954 break;
Marek Olsakbe047802014-12-07 12:19:03 +00002955
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00002956 case AMDGPU::S_ABS_I32:
2957 lowerScalarAbs(Worklist, Inst);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002958 Inst.eraseFromParent();
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00002959 continue;
2960
Tom Stellardbc4497b2016-02-12 23:45:29 +00002961 case AMDGPU::S_CBRANCH_SCC0:
2962 case AMDGPU::S_CBRANCH_SCC1:
2963 // Clear unused bits of vcc
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002964 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
2965 AMDGPU::VCC)
2966 .addReg(AMDGPU::EXEC)
2967 .addReg(AMDGPU::VCC);
Tom Stellardbc4497b2016-02-12 23:45:29 +00002968 break;
2969
Matt Arsenaultf35182c2014-03-24 20:08:05 +00002970 case AMDGPU::S_BFE_U64:
Matt Arsenaultf35182c2014-03-24 20:08:05 +00002971 case AMDGPU::S_BFM_B64:
2972 llvm_unreachable("Moving this op to VALU not implemented");
Tom Stellarde0387202014-03-21 15:51:54 +00002973 }
2974
Tom Stellard15834092014-03-21 15:51:57 +00002975 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
2976 // We cannot move this instruction to the VALU, so we should try to
2977 // legalize its operands instead.
2978 legalizeOperands(Inst);
Tom Stellard82166022013-11-13 23:36:37 +00002979 continue;
Tom Stellard15834092014-03-21 15:51:57 +00002980 }
Tom Stellard82166022013-11-13 23:36:37 +00002981
Tom Stellard82166022013-11-13 23:36:37 +00002982 // Use the new VALU Opcode.
2983 const MCInstrDesc &NewDesc = get(NewOpcode);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002984 Inst.setDesc(NewDesc);
Tom Stellard82166022013-11-13 23:36:37 +00002985
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +00002986 // Remove any references to SCC. Vector instructions can't read from it, and
2987 // We're just about to add the implicit use / defs of VCC, and we don't want
2988 // both.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002989 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) {
2990 MachineOperand &Op = Inst.getOperand(i);
Tom Stellardbc4497b2016-02-12 23:45:29 +00002991 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002992 Inst.RemoveOperand(i);
Tom Stellardbc4497b2016-02-12 23:45:29 +00002993 addSCCDefUsersToVALUWorklist(Inst, Worklist);
2994 }
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +00002995 }
2996
Matt Arsenault27cc9582014-04-18 01:53:18 +00002997 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
2998 // We are converting these to a BFE, so we need to add the missing
2999 // operands for the size and offset.
3000 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003001 Inst.addOperand(MachineOperand::CreateImm(0));
3002 Inst.addOperand(MachineOperand::CreateImm(Size));
Matt Arsenault27cc9582014-04-18 01:53:18 +00003003
Matt Arsenaultb5b51102014-06-10 19:18:21 +00003004 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
3005 // The VALU version adds the second operand to the result, so insert an
3006 // extra 0 operand.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003007 Inst.addOperand(MachineOperand::CreateImm(0));
Tom Stellard82166022013-11-13 23:36:37 +00003008 }
3009
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003010 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent());
Tom Stellard82166022013-11-13 23:36:37 +00003011
Matt Arsenault78b86702014-04-18 05:19:26 +00003012 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003013 const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
Matt Arsenault78b86702014-04-18 05:19:26 +00003014 // If we need to move this to VGPRs, we need to unpack the second operand
3015 // back into the 2 separate ones for bit offset and width.
3016 assert(OffsetWidthOp.isImm() &&
3017 "Scalar BFE is only implemented for constant width and offset");
3018 uint32_t Imm = OffsetWidthOp.getImm();
3019
3020 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
3021 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003022 Inst.RemoveOperand(2); // Remove old immediate.
3023 Inst.addOperand(MachineOperand::CreateImm(Offset));
3024 Inst.addOperand(MachineOperand::CreateImm(BitWidth));
Matt Arsenault78b86702014-04-18 05:19:26 +00003025 }
3026
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003027 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef();
Tom Stellardbc4497b2016-02-12 23:45:29 +00003028 unsigned NewDstReg = AMDGPU::NoRegister;
3029 if (HasDst) {
3030 // Update the destination register class.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003031 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst);
Tom Stellardbc4497b2016-02-12 23:45:29 +00003032 if (!NewDstRC)
3033 continue;
Tom Stellard82166022013-11-13 23:36:37 +00003034
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003035 unsigned DstReg = Inst.getOperand(0).getReg();
Tom Stellard0d162b12016-11-16 18:42:17 +00003036 if (Inst.isCopy() &&
3037 TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) &&
3038 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
3039 // Instead of creating a copy where src and dst are the same register
3040 // class, we just replace all uses of dst with src. These kinds of
3041 // copies interfere with the heuristics MachineSink uses to decide
3042 // whether or not to split a critical edge. Since the pass assumes
3043 // that copies will end up as machine instructions and not be
3044 // eliminated.
3045 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
3046 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg());
3047 MRI.clearKillFlags(Inst.getOperand(1).getReg());
3048 Inst.getOperand(0).setReg(DstReg);
3049 continue;
3050 }
3051
Tom Stellardbc4497b2016-02-12 23:45:29 +00003052 NewDstReg = MRI.createVirtualRegister(NewDstRC);
3053 MRI.replaceRegWith(DstReg, NewDstReg);
3054 }
Tom Stellard82166022013-11-13 23:36:37 +00003055
Tom Stellarde1a24452014-04-17 21:00:01 +00003056 // Legalize the operands
3057 legalizeOperands(Inst);
3058
Tom Stellardbc4497b2016-02-12 23:45:29 +00003059 if (HasDst)
3060 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
Tom Stellard82166022013-11-13 23:36:37 +00003061 }
3062}
3063
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00003064void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003065 MachineInstr &Inst) const {
3066 MachineBasicBlock &MBB = *Inst.getParent();
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00003067 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3068 MachineBasicBlock::iterator MII = Inst;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003069 DebugLoc DL = Inst.getDebugLoc();
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00003070
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003071 MachineOperand &Dest = Inst.getOperand(0);
3072 MachineOperand &Src = Inst.getOperand(1);
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00003073 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3074 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3075
3076 BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg)
3077 .addImm(0)
3078 .addReg(Src.getReg());
3079
3080 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg)
3081 .addReg(Src.getReg())
3082 .addReg(TmpReg);
3083
3084 MRI.replaceRegWith(Dest.getReg(), ResultReg);
3085 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3086}
3087
Matt Arsenault689f3252014-06-09 16:36:31 +00003088void SIInstrInfo::splitScalar64BitUnaryOp(
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003089 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
3090 unsigned Opcode) const {
3091 MachineBasicBlock &MBB = *Inst.getParent();
Matt Arsenault689f3252014-06-09 16:36:31 +00003092 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3093
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003094 MachineOperand &Dest = Inst.getOperand(0);
3095 MachineOperand &Src0 = Inst.getOperand(1);
3096 DebugLoc DL = Inst.getDebugLoc();
Matt Arsenault689f3252014-06-09 16:36:31 +00003097
3098 MachineBasicBlock::iterator MII = Inst;
3099
3100 const MCInstrDesc &InstDesc = get(Opcode);
3101 const TargetRegisterClass *Src0RC = Src0.isReg() ?
3102 MRI.getRegClass(Src0.getReg()) :
3103 &AMDGPU::SGPR_32RegClass;
3104
3105 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
3106
3107 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3108 AMDGPU::sub0, Src0SubRC);
3109
3110 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
Matt Arsenaultf003c382015-08-26 20:47:50 +00003111 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
3112 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
Matt Arsenault689f3252014-06-09 16:36:31 +00003113
Matt Arsenaultf003c382015-08-26 20:47:50 +00003114 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
3115 BuildMI(MBB, MII, DL, InstDesc, DestSub0)
Matt Arsenault689f3252014-06-09 16:36:31 +00003116 .addOperand(SrcReg0Sub0);
3117
3118 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3119 AMDGPU::sub1, Src0SubRC);
3120
Matt Arsenaultf003c382015-08-26 20:47:50 +00003121 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
3122 BuildMI(MBB, MII, DL, InstDesc, DestSub1)
Matt Arsenault689f3252014-06-09 16:36:31 +00003123 .addOperand(SrcReg0Sub1);
3124
Matt Arsenaultf003c382015-08-26 20:47:50 +00003125 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
Matt Arsenault689f3252014-06-09 16:36:31 +00003126 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3127 .addReg(DestSub0)
3128 .addImm(AMDGPU::sub0)
3129 .addReg(DestSub1)
3130 .addImm(AMDGPU::sub1);
3131
3132 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
3133
Matt Arsenaultf003c382015-08-26 20:47:50 +00003134 // We don't need to legalizeOperands here because for a single operand, src0
3135 // will support any kind of input.
3136
3137 // Move all users of this moved value.
3138 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
Matt Arsenault689f3252014-06-09 16:36:31 +00003139}
3140
3141void SIInstrInfo::splitScalar64BitBinaryOp(
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003142 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
3143 unsigned Opcode) const {
3144 MachineBasicBlock &MBB = *Inst.getParent();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00003145 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3146
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003147 MachineOperand &Dest = Inst.getOperand(0);
3148 MachineOperand &Src0 = Inst.getOperand(1);
3149 MachineOperand &Src1 = Inst.getOperand(2);
3150 DebugLoc DL = Inst.getDebugLoc();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00003151
3152 MachineBasicBlock::iterator MII = Inst;
3153
3154 const MCInstrDesc &InstDesc = get(Opcode);
Matt Arsenault684dc802014-03-24 20:08:13 +00003155 const TargetRegisterClass *Src0RC = Src0.isReg() ?
3156 MRI.getRegClass(Src0.getReg()) :
3157 &AMDGPU::SGPR_32RegClass;
Matt Arsenaultf35182c2014-03-24 20:08:05 +00003158
Matt Arsenault684dc802014-03-24 20:08:13 +00003159 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
3160 const TargetRegisterClass *Src1RC = Src1.isReg() ?
3161 MRI.getRegClass(Src1.getReg()) :
3162 &AMDGPU::SGPR_32RegClass;
3163
3164 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
3165
3166 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3167 AMDGPU::sub0, Src0SubRC);
3168 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3169 AMDGPU::sub0, Src1SubRC);
3170
3171 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
Matt Arsenaultf003c382015-08-26 20:47:50 +00003172 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
3173 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
Matt Arsenault684dc802014-03-24 20:08:13 +00003174
Matt Arsenaultf003c382015-08-26 20:47:50 +00003175 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003176 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
3177 .addOperand(SrcReg0Sub0)
3178 .addOperand(SrcReg1Sub0);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00003179
Matt Arsenault684dc802014-03-24 20:08:13 +00003180 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3181 AMDGPU::sub1, Src0SubRC);
3182 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3183 AMDGPU::sub1, Src1SubRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00003184
Matt Arsenaultf003c382015-08-26 20:47:50 +00003185 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003186 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
3187 .addOperand(SrcReg0Sub1)
3188 .addOperand(SrcReg1Sub1);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00003189
Matt Arsenaultf003c382015-08-26 20:47:50 +00003190 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00003191 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3192 .addReg(DestSub0)
3193 .addImm(AMDGPU::sub0)
3194 .addReg(DestSub1)
3195 .addImm(AMDGPU::sub1);
3196
3197 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
3198
3199 // Try to legalize the operands in case we need to swap the order to keep it
3200 // valid.
Matt Arsenaultf003c382015-08-26 20:47:50 +00003201 legalizeOperands(LoHalf);
3202 legalizeOperands(HiHalf);
3203
3204 // Move all users of this moved vlaue.
3205 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00003206}
3207
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003208void SIInstrInfo::splitScalar64BitBCNT(
3209 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst) const {
3210 MachineBasicBlock &MBB = *Inst.getParent();
Matt Arsenault8333e432014-06-10 19:18:24 +00003211 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3212
3213 MachineBasicBlock::iterator MII = Inst;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003214 DebugLoc DL = Inst.getDebugLoc();
Matt Arsenault8333e432014-06-10 19:18:24 +00003215
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003216 MachineOperand &Dest = Inst.getOperand(0);
3217 MachineOperand &Src = Inst.getOperand(1);
Matt Arsenault8333e432014-06-10 19:18:24 +00003218
Marek Olsakc5368502015-01-15 18:43:01 +00003219 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
Matt Arsenault8333e432014-06-10 19:18:24 +00003220 const TargetRegisterClass *SrcRC = Src.isReg() ?
3221 MRI.getRegClass(Src.getReg()) :
3222 &AMDGPU::SGPR_32RegClass;
3223
3224 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3225 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3226
3227 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
3228
3229 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3230 AMDGPU::sub0, SrcSubRC);
3231 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3232 AMDGPU::sub1, SrcSubRC);
3233
Matt Arsenault5e7f95e2015-08-26 20:48:04 +00003234 BuildMI(MBB, MII, DL, InstDesc, MidReg)
Matt Arsenault8333e432014-06-10 19:18:24 +00003235 .addOperand(SrcRegSub0)
3236 .addImm(0);
3237
Matt Arsenault5e7f95e2015-08-26 20:48:04 +00003238 BuildMI(MBB, MII, DL, InstDesc, ResultReg)
Matt Arsenault8333e432014-06-10 19:18:24 +00003239 .addOperand(SrcRegSub1)
3240 .addReg(MidReg);
3241
3242 MRI.replaceRegWith(Dest.getReg(), ResultReg);
3243
Matt Arsenault5e7f95e2015-08-26 20:48:04 +00003244 // We don't need to legalize operands here. src0 for etiher instruction can be
3245 // an SGPR, and the second input is unused or determined here.
3246 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
Matt Arsenault8333e432014-06-10 19:18:24 +00003247}
3248
Matt Arsenault94812212014-11-14 18:18:16 +00003249void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003250 MachineInstr &Inst) const {
3251 MachineBasicBlock &MBB = *Inst.getParent();
Matt Arsenault94812212014-11-14 18:18:16 +00003252 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3253 MachineBasicBlock::iterator MII = Inst;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003254 DebugLoc DL = Inst.getDebugLoc();
Matt Arsenault94812212014-11-14 18:18:16 +00003255
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003256 MachineOperand &Dest = Inst.getOperand(0);
3257 uint32_t Imm = Inst.getOperand(2).getImm();
Matt Arsenault94812212014-11-14 18:18:16 +00003258 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
3259 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
3260
Matt Arsenault6ad34262014-11-14 18:40:49 +00003261 (void) Offset;
3262
Matt Arsenault94812212014-11-14 18:18:16 +00003263 // Only sext_inreg cases handled.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003264 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 &&
3265 Offset == 0 && "Not implemented");
Matt Arsenault94812212014-11-14 18:18:16 +00003266
3267 if (BitWidth < 32) {
3268 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3269 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3270 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
3271
3272 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003273 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0)
3274 .addImm(0)
3275 .addImm(BitWidth);
Matt Arsenault94812212014-11-14 18:18:16 +00003276
3277 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
3278 .addImm(31)
3279 .addReg(MidRegLo);
3280
3281 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
3282 .addReg(MidRegLo)
3283 .addImm(AMDGPU::sub0)
3284 .addReg(MidRegHi)
3285 .addImm(AMDGPU::sub1);
3286
3287 MRI.replaceRegWith(Dest.getReg(), ResultReg);
Matt Arsenault445833c2015-08-26 20:47:58 +00003288 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
Matt Arsenault94812212014-11-14 18:18:16 +00003289 return;
3290 }
3291
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003292 MachineOperand &Src = Inst.getOperand(1);
Matt Arsenault94812212014-11-14 18:18:16 +00003293 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3294 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
3295
3296 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
3297 .addImm(31)
3298 .addReg(Src.getReg(), 0, AMDGPU::sub0);
3299
3300 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
3301 .addReg(Src.getReg(), 0, AMDGPU::sub0)
3302 .addImm(AMDGPU::sub0)
3303 .addReg(TmpReg)
3304 .addImm(AMDGPU::sub1);
3305
3306 MRI.replaceRegWith(Dest.getReg(), ResultReg);
Matt Arsenault445833c2015-08-26 20:47:58 +00003307 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
Matt Arsenault94812212014-11-14 18:18:16 +00003308}
3309
Matt Arsenaultf003c382015-08-26 20:47:50 +00003310void SIInstrInfo::addUsersToMoveToVALUWorklist(
3311 unsigned DstReg,
3312 MachineRegisterInfo &MRI,
3313 SmallVectorImpl<MachineInstr *> &Worklist) const {
3314 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg),
3315 E = MRI.use_end(); I != E; ++I) {
3316 MachineInstr &UseMI = *I->getParent();
3317 if (!canReadVGPR(UseMI, I.getOperandNo())) {
3318 Worklist.push_back(&UseMI);
3319 }
3320 }
3321}
3322
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003323void SIInstrInfo::addSCCDefUsersToVALUWorklist(
3324 MachineInstr &SCCDefInst, SmallVectorImpl<MachineInstr *> &Worklist) const {
Tom Stellardbc4497b2016-02-12 23:45:29 +00003325 // This assumes that all the users of SCC are in the same block
3326 // as the SCC def.
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +00003327 for (MachineInstr &MI :
3328 llvm::make_range(MachineBasicBlock::iterator(SCCDefInst),
3329 SCCDefInst.getParent()->end())) {
Tom Stellardbc4497b2016-02-12 23:45:29 +00003330 // Exit if we find another SCC def.
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +00003331 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC) != -1)
Tom Stellardbc4497b2016-02-12 23:45:29 +00003332 return;
3333
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +00003334 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC) != -1)
3335 Worklist.push_back(&MI);
Tom Stellardbc4497b2016-02-12 23:45:29 +00003336 }
3337}
3338
Matt Arsenaultba6aae72015-09-28 20:54:57 +00003339const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
3340 const MachineInstr &Inst) const {
3341 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);
3342
3343 switch (Inst.getOpcode()) {
3344 // For target instructions, getOpRegClass just returns the virtual register
3345 // class associated with the operand, so we need to find an equivalent VGPR
3346 // register class in order to move the instruction to the VALU.
3347 case AMDGPU::COPY:
3348 case AMDGPU::PHI:
3349 case AMDGPU::REG_SEQUENCE:
3350 case AMDGPU::INSERT_SUBREG:
3351 if (RI.hasVGPRs(NewDstRC))
3352 return nullptr;
3353
3354 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
3355 if (!NewDstRC)
3356 return nullptr;
3357 return NewDstRC;
3358 default:
3359 return NewDstRC;
3360 }
3361}
3362
Matt Arsenault6c067412015-11-03 22:30:15 +00003363// Find the one SGPR operand we are allowed to use.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003364unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
Matt Arsenaultee522bf2014-09-26 17:55:06 +00003365 int OpIndices[3]) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003366 const MCInstrDesc &Desc = MI.getDesc();
Matt Arsenaultee522bf2014-09-26 17:55:06 +00003367
3368 // Find the one SGPR operand we are allowed to use.
Matt Arsenaulte223ceb2015-10-21 21:15:01 +00003369 //
Matt Arsenaultee522bf2014-09-26 17:55:06 +00003370 // First we need to consider the instruction's operand requirements before
3371 // legalizing. Some operands are required to be SGPRs, such as implicit uses
3372 // of VCC, but we are still bound by the constant bus requirement to only use
3373 // one.
3374 //
3375 // If the operand's class is an SGPR, we can never move it.
3376
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003377 unsigned SGPRReg = findImplicitSGPRRead(MI);
Matt Arsenaulte223ceb2015-10-21 21:15:01 +00003378 if (SGPRReg != AMDGPU::NoRegister)
3379 return SGPRReg;
Matt Arsenaultee522bf2014-09-26 17:55:06 +00003380
3381 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister };
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003382 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Matt Arsenaultee522bf2014-09-26 17:55:06 +00003383
3384 for (unsigned i = 0; i < 3; ++i) {
3385 int Idx = OpIndices[i];
3386 if (Idx == -1)
3387 break;
3388
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003389 const MachineOperand &MO = MI.getOperand(Idx);
Matt Arsenault6c067412015-11-03 22:30:15 +00003390 if (!MO.isReg())
3391 continue;
Matt Arsenaultee522bf2014-09-26 17:55:06 +00003392
Matt Arsenault6c067412015-11-03 22:30:15 +00003393 // Is this operand statically required to be an SGPR based on the operand
3394 // constraints?
3395 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass);
3396 bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
3397 if (IsRequiredSGPR)
3398 return MO.getReg();
3399
3400 // If this could be a VGPR or an SGPR, Check the dynamic register class.
3401 unsigned Reg = MO.getReg();
3402 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
3403 if (RI.isSGPRClass(RegRC))
3404 UsedSGPRs[i] = Reg;
Matt Arsenaultee522bf2014-09-26 17:55:06 +00003405 }
3406
Matt Arsenaultee522bf2014-09-26 17:55:06 +00003407 // We don't have a required SGPR operand, so we have a bit more freedom in
3408 // selecting operands to move.
3409
3410 // Try to select the most used SGPR. If an SGPR is equal to one of the
3411 // others, we choose that.
3412 //
3413 // e.g.
3414 // V_FMA_F32 v0, s0, s0, s0 -> No moves
3415 // V_FMA_F32 v0, s0, s1, s0 -> Move s1
3416
Matt Arsenault6c067412015-11-03 22:30:15 +00003417 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should
3418 // prefer those.
3419
Matt Arsenaultee522bf2014-09-26 17:55:06 +00003420 if (UsedSGPRs[0] != AMDGPU::NoRegister) {
3421 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
3422 SGPRReg = UsedSGPRs[0];
3423 }
3424
3425 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) {
3426 if (UsedSGPRs[1] == UsedSGPRs[2])
3427 SGPRReg = UsedSGPRs[1];
3428 }
3429
3430 return SGPRReg;
3431}
3432
Tom Stellard6407e1e2014-08-01 00:32:33 +00003433MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
Matt Arsenaultace5b762014-10-17 18:00:43 +00003434 unsigned OperandName) const {
Tom Stellard1aaad692014-07-21 16:55:33 +00003435 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
3436 if (Idx == -1)
3437 return nullptr;
3438
3439 return &MI.getOperand(Idx);
3440}
Tom Stellard794c8c02014-12-02 17:05:41 +00003441
3442uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const {
3443 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
Tom Stellard4694ed02015-06-26 21:58:42 +00003444 if (ST.isAmdHsaOS()) {
Tom Stellard794c8c02014-12-02 17:05:41 +00003445 RsrcDataFormat |= (1ULL << 56);
3446
Matt Arsenault43e92fe2016-06-24 06:30:11 +00003447 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
Michel Danzerbeb79ce2016-03-16 09:10:35 +00003448 // Set MTYPE = 2
3449 RsrcDataFormat |= (2ULL << 59);
Tom Stellard4694ed02015-06-26 21:58:42 +00003450 }
3451
Tom Stellard794c8c02014-12-02 17:05:41 +00003452 return RsrcDataFormat;
3453}
Marek Olsakd1a69a22015-09-29 23:37:32 +00003454
3455uint64_t SIInstrInfo::getScratchRsrcWords23() const {
3456 uint64_t Rsrc23 = getDefaultRsrcDataFormat() |
3457 AMDGPU::RSRC_TID_ENABLE |
3458 0xffffffff; // Size;
3459
Matt Arsenault24ee0782016-02-12 02:40:47 +00003460 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1;
3461
Marek Olsake93f6d62016-06-13 16:05:57 +00003462 Rsrc23 |= (EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT) |
3463 // IndexStride = 64
3464 (UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT);
Matt Arsenault24ee0782016-02-12 02:40:47 +00003465
Marek Olsakd1a69a22015-09-29 23:37:32 +00003466 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17].
3467 // Clear them unless we want a huge stride.
Matt Arsenault43e92fe2016-06-24 06:30:11 +00003468 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
Marek Olsakd1a69a22015-09-29 23:37:32 +00003469 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT;
3470
3471 return Rsrc23;
3472}
Nicolai Haehnle02c32912016-01-13 16:10:10 +00003473
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003474bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const {
3475 unsigned Opc = MI.getOpcode();
Nicolai Haehnle02c32912016-01-13 16:10:10 +00003476
3477 return isSMRD(Opc);
3478}
3479
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003480bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const {
3481 unsigned Opc = MI.getOpcode();
Nicolai Haehnle02c32912016-01-13 16:10:10 +00003482
3483 return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc);
3484}
Tom Stellard2ff72622016-01-28 16:04:37 +00003485
Matt Arsenault3354f422016-09-10 01:20:33 +00003486unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI,
3487 int &FrameIndex) const {
3488 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
3489 if (!Addr || !Addr->isFI())
3490 return AMDGPU::NoRegister;
3491
3492 assert(!MI.memoperands_empty() &&
3493 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS);
3494
3495 FrameIndex = Addr->getIndex();
3496 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
3497}
3498
3499unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI,
3500 int &FrameIndex) const {
3501 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr);
3502 assert(Addr && Addr->isFI());
3503 FrameIndex = Addr->getIndex();
3504 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg();
3505}
3506
3507unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
3508 int &FrameIndex) const {
3509
3510 if (!MI.mayLoad())
3511 return AMDGPU::NoRegister;
3512
3513 if (isMUBUF(MI) || isVGPRSpill(MI))
3514 return isStackAccess(MI, FrameIndex);
3515
3516 if (isSGPRSpill(MI))
3517 return isSGPRStackAccess(MI, FrameIndex);
3518
3519 return AMDGPU::NoRegister;
3520}
3521
3522unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
3523 int &FrameIndex) const {
3524 if (!MI.mayStore())
3525 return AMDGPU::NoRegister;
3526
3527 if (isMUBUF(MI) || isVGPRSpill(MI))
3528 return isStackAccess(MI, FrameIndex);
3529
3530 if (isSGPRSpill(MI))
3531 return isSGPRStackAccess(MI, FrameIndex);
3532
3533 return AMDGPU::NoRegister;
3534}
3535
Matt Arsenault02458c22016-06-06 20:10:33 +00003536unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
3537 unsigned Opc = MI.getOpcode();
3538 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc);
3539 unsigned DescSize = Desc.getSize();
3540
3541 // If we have a definitive size, we can use it. Otherwise we need to inspect
3542 // the operands to know the size.
Matt Arsenault2d8c2892016-11-01 20:42:24 +00003543 //
3544 // FIXME: Instructions that have a base 32-bit encoding report their size as
3545 // 4, even though they are really 8 bytes if they have a literal operand.
3546 if (DescSize != 0 && DescSize != 4)
Matt Arsenault02458c22016-06-06 20:10:33 +00003547 return DescSize;
3548
Stanislav Mekhanoshinea91cca2016-11-15 19:00:15 +00003549 if (Opc == AMDGPU::WAVE_BARRIER)
3550 return 0;
3551
Matt Arsenault02458c22016-06-06 20:10:33 +00003552 // 4-byte instructions may have a 32-bit literal encoded after them. Check
3553 // operands that coud ever be literals.
3554 if (isVALU(MI) || isSALU(MI)) {
Matt Arsenault2d8c2892016-11-01 20:42:24 +00003555 if (isFixedSize(MI)) {
3556 assert(DescSize == 4);
3557 return DescSize;
3558 }
3559
Matt Arsenault02458c22016-06-06 20:10:33 +00003560 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
3561 if (Src0Idx == -1)
3562 return 4; // No operands.
3563
Matt Arsenault4bd72362016-12-10 00:39:12 +00003564 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx]))
Matt Arsenault02458c22016-06-06 20:10:33 +00003565 return 8;
3566
3567 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
3568 if (Src1Idx == -1)
3569 return 4;
3570
Matt Arsenault4bd72362016-12-10 00:39:12 +00003571 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx]))
Matt Arsenault02458c22016-06-06 20:10:33 +00003572 return 8;
3573
3574 return 4;
3575 }
3576
Matt Arsenault2d8c2892016-11-01 20:42:24 +00003577 if (DescSize == 4)
3578 return 4;
3579
Matt Arsenault02458c22016-06-06 20:10:33 +00003580 switch (Opc) {
Matt Arsenault1110f142016-10-26 14:53:54 +00003581 case AMDGPU::SI_MASK_BRANCH:
Matt Arsenault02458c22016-06-06 20:10:33 +00003582 case TargetOpcode::IMPLICIT_DEF:
3583 case TargetOpcode::KILL:
3584 case TargetOpcode::DBG_VALUE:
3585 case TargetOpcode::BUNDLE:
3586 case TargetOpcode::EH_LABEL:
3587 return 0;
3588 case TargetOpcode::INLINEASM: {
3589 const MachineFunction *MF = MI.getParent()->getParent();
3590 const char *AsmStr = MI.getOperand(0).getSymbolName();
3591 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
3592 }
3593 default:
3594 llvm_unreachable("unable to find instruction size");
3595 }
3596}
3597
Tom Stellard6695ba02016-10-28 23:53:48 +00003598bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const {
3599 if (!isFLAT(MI))
3600 return false;
3601
3602 if (MI.memoperands_empty())
3603 return true;
3604
3605 for (const MachineMemOperand *MMO : MI.memoperands()) {
3606 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
3607 return true;
3608 }
3609 return false;
3610}
3611
Tom Stellard2ff72622016-01-28 16:04:37 +00003612ArrayRef<std::pair<int, const char *>>
3613SIInstrInfo::getSerializableTargetIndices() const {
3614 static const std::pair<int, const char *> TargetIndices[] = {
3615 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
3616 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
3617 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
3618 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
3619 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
3620 return makeArrayRef(TargetIndices);
3621}
Tom Stellardcb6ba622016-04-30 00:23:06 +00003622
3623/// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The
3624/// post-RA version of misched uses CreateTargetMIHazardRecognizer.
3625ScheduleHazardRecognizer *
3626SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
3627 const ScheduleDAG *DAG) const {
3628 return new GCNHazardRecognizer(DAG->MF);
3629}
3630
3631/// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer
3632/// pass.
3633ScheduleHazardRecognizer *
3634SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
3635 return new GCNHazardRecognizer(MF);
3636}