blob: 196ecd70f0d8431e8e1da392ac07a06f1233b688 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SIInstrInfo.cpp - SI Instruction Information ----------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard75aadc22012-12-11 21:25:42 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// SI Implementation of TargetInstrInfo.
Tom Stellard75aadc22012-12-11 21:25:42 +000011//
12//===----------------------------------------------------------------------===//
13
Tom Stellard75aadc22012-12-11 21:25:42 +000014#include "SIInstrInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000015#include "AMDGPU.h"
16#include "AMDGPUSubtarget.h"
Tom Stellardcb6ba622016-04-30 00:23:06 +000017#include "GCNHazardRecognizer.h"
Tom Stellard16a9a202013-08-14 23:24:17 +000018#include "SIDefines.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000019#include "SIMachineFunctionInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000020#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000021#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000022#include "Utils/AMDGPUBaseInfo.h"
23#include "llvm/ADT/APInt.h"
24#include "llvm/ADT/ArrayRef.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/iterator_range.h"
28#include "llvm/Analysis/AliasAnalysis.h"
29#include "llvm/Analysis/MemoryLocation.h"
Stanislav Mekhanoshin7fe9a5d2017-09-13 22:20:47 +000030#include "llvm/Analysis/ValueTracking.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000031#include "llvm/CodeGen/MachineBasicBlock.h"
Scott Linder823549a2018-10-08 18:47:01 +000032#include "llvm/CodeGen/MachineDominators.h"
Tom Stellardc5cf2f02014-08-21 20:40:54 +000033#include "llvm/CodeGen/MachineFrameInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000034#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineInstr.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000036#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000037#include "llvm/CodeGen/MachineInstrBundle.h"
38#include "llvm/CodeGen/MachineMemOperand.h"
39#include "llvm/CodeGen/MachineOperand.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000040#include "llvm/CodeGen/MachineRegisterInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000041#include "llvm/CodeGen/RegisterScavenging.h"
Tom Stellardcb6ba622016-04-30 00:23:06 +000042#include "llvm/CodeGen/ScheduleDAG.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000043#include "llvm/CodeGen/SelectionDAGNodes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000044#include "llvm/CodeGen/TargetOpcodes.h"
45#include "llvm/CodeGen/TargetRegisterInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000046#include "llvm/IR/DebugLoc.h"
Matt Arsenault21a43822017-04-06 21:09:53 +000047#include "llvm/IR/DiagnosticInfo.h"
Tom Stellard4e07b1d2014-06-10 21:20:41 +000048#include "llvm/IR/Function.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000049#include "llvm/IR/InlineAsm.h"
50#include "llvm/IR/LLVMContext.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000051#include "llvm/MC/MCInstrDesc.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000052#include "llvm/Support/Casting.h"
53#include "llvm/Support/CommandLine.h"
54#include "llvm/Support/Compiler.h"
55#include "llvm/Support/ErrorHandling.h"
David Blaikie13e77db2018-03-23 23:58:25 +000056#include "llvm/Support/MachineValueType.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000057#include "llvm/Support/MathExtras.h"
58#include "llvm/Target/TargetMachine.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000059#include <cassert>
60#include <cstdint>
61#include <iterator>
62#include <utility>
Tom Stellard75aadc22012-12-11 21:25:42 +000063
64using namespace llvm;
65
Tom Stellardc5a154d2018-06-28 23:47:12 +000066#define GET_INSTRINFO_CTOR_DTOR
67#include "AMDGPUGenInstrInfo.inc"
68
69namespace llvm {
70namespace AMDGPU {
71#define GET_D16ImageDimIntrinsics_IMPL
72#define GET_ImageDimIntrinsicTable_IMPL
73#define GET_RsrcIntrinsics_IMPL
74#include "AMDGPUGenSearchableTables.inc"
75}
76}
77
78
Matt Arsenault6bc43d82016-10-06 16:20:41 +000079// Must be at least 4 to be able to branch over minimum unconditional branch
80// code. This is only for making it possible to write reasonably small tests for
81// long branches.
82static cl::opt<unsigned>
83BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
84 cl::desc("Restrict range of branch instructions (DEBUG)"));
85
Tom Stellard5bfbae52018-07-11 20:59:01 +000086SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST)
Tom Stellardc5a154d2018-06-28 23:47:12 +000087 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN),
88 RI(ST), ST(ST) {}
Tom Stellard75aadc22012-12-11 21:25:42 +000089
Tom Stellard82166022013-11-13 23:36:37 +000090//===----------------------------------------------------------------------===//
91// TargetInstrInfo callbacks
92//===----------------------------------------------------------------------===//
93
Matt Arsenaultc10853f2014-08-06 00:29:43 +000094static unsigned getNumOperandsNoGlue(SDNode *Node) {
95 unsigned N = Node->getNumOperands();
96 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
97 --N;
98 return N;
99}
100
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000101/// Returns true if both nodes have the same value for the given
Tom Stellard155bbb72014-08-11 22:18:17 +0000102/// operand \p Op, or if both nodes do not have this operand.
103static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
104 unsigned Opc0 = N0->getMachineOpcode();
105 unsigned Opc1 = N1->getMachineOpcode();
106
107 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
108 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
109
110 if (Op0Idx == -1 && Op1Idx == -1)
111 return true;
112
113
114 if ((Op0Idx == -1 && Op1Idx != -1) ||
115 (Op1Idx == -1 && Op0Idx != -1))
116 return false;
117
118 // getNamedOperandIdx returns the index for the MachineInstr's operands,
119 // which includes the result as the first operand. We are indexing into the
120 // MachineSDNode's operands, so we need to skip the result operand to get
121 // the real index.
122 --Op0Idx;
123 --Op1Idx;
124
Tom Stellardb8b84132014-09-03 15:22:39 +0000125 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
Tom Stellard155bbb72014-08-11 22:18:17 +0000126}
127
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000128bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
Matt Arsenaulta48b8662015-04-23 23:34:48 +0000129 AliasAnalysis *AA) const {
130 // TODO: The generic check fails for VALU instructions that should be
131 // rematerializable due to implicit reads of exec. We really want all of the
132 // generic logic for this except for this.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000133 switch (MI.getOpcode()) {
Matt Arsenaulta48b8662015-04-23 23:34:48 +0000134 case AMDGPU::V_MOV_B32_e32:
135 case AMDGPU::V_MOV_B32_e64:
Matt Arsenault80f766a2015-09-10 01:23:28 +0000136 case AMDGPU::V_MOV_B64_PSEUDO:
Matt Arsenaultcba0c6d2019-02-04 22:26:21 +0000137 // No implicit operands.
138 return MI.getNumOperands() == MI.getDesc().getNumOperands();
Matt Arsenaulta48b8662015-04-23 23:34:48 +0000139 default:
140 return false;
141 }
142}
143
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000144bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
145 int64_t &Offset0,
146 int64_t &Offset1) const {
147 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
148 return false;
149
150 unsigned Opc0 = Load0->getMachineOpcode();
151 unsigned Opc1 = Load1->getMachineOpcode();
152
153 // Make sure both are actually loads.
154 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
155 return false;
156
157 if (isDS(Opc0) && isDS(Opc1)) {
Tom Stellard20fa0be2014-10-07 21:09:20 +0000158
159 // FIXME: Handle this case:
160 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
161 return false;
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000162
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000163 // Check base reg.
Matt Arsenault07f904b2019-03-08 20:30:50 +0000164 if (Load0->getOperand(0) != Load1->getOperand(0))
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000165 return false;
166
Matt Arsenault972c12a2014-09-17 17:48:32 +0000167 // Skip read2 / write2 variants for simplicity.
168 // TODO: We should report true if the used offsets are adjacent (excluded
169 // st64 versions).
170 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 ||
171 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1)
172 return false;
173
Matt Arsenault07f904b2019-03-08 20:30:50 +0000174 Offset0 = cast<ConstantSDNode>(Load0->getOperand(1))->getZExtValue();
175 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getZExtValue();
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000176 return true;
177 }
178
179 if (isSMRD(Opc0) && isSMRD(Opc1)) {
Nicolai Haehnleef449782017-04-24 16:53:52 +0000180 // Skip time and cache invalidation instructions.
181 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 ||
182 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1)
183 return false;
184
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000185 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1));
186
187 // Check base reg.
188 if (Load0->getOperand(0) != Load1->getOperand(0))
189 return false;
190
Tom Stellardf0a575f2015-03-23 16:06:01 +0000191 const ConstantSDNode *Load0Offset =
192 dyn_cast<ConstantSDNode>(Load0->getOperand(1));
193 const ConstantSDNode *Load1Offset =
194 dyn_cast<ConstantSDNode>(Load1->getOperand(1));
195
196 if (!Load0Offset || !Load1Offset)
197 return false;
198
Tom Stellardf0a575f2015-03-23 16:06:01 +0000199 Offset0 = Load0Offset->getZExtValue();
200 Offset1 = Load1Offset->getZExtValue();
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000201 return true;
202 }
203
204 // MUBUF and MTBUF can access the same addresses.
205 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000206
207 // MUBUF and MTBUF have vaddr at different indices.
Tom Stellard155bbb72014-08-11 22:18:17 +0000208 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
Tom Stellard155bbb72014-08-11 22:18:17 +0000209 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
Tom Stellardb8b84132014-09-03 15:22:39 +0000210 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000211 return false;
212
Tom Stellard155bbb72014-08-11 22:18:17 +0000213 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
214 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
215
216 if (OffIdx0 == -1 || OffIdx1 == -1)
217 return false;
218
219 // getNamedOperandIdx returns the index for MachineInstrs. Since they
Matt Arsenault07f904b2019-03-08 20:30:50 +0000220 // include the output in the operand list, but SDNodes don't, we need to
Tom Stellard155bbb72014-08-11 22:18:17 +0000221 // subtract the index by one.
222 --OffIdx0;
223 --OffIdx1;
224
225 SDValue Off0 = Load0->getOperand(OffIdx0);
226 SDValue Off1 = Load1->getOperand(OffIdx1);
227
228 // The offset might be a FrameIndexSDNode.
229 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
230 return false;
231
232 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
233 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
Matt Arsenaultc10853f2014-08-06 00:29:43 +0000234 return true;
235 }
236
237 return false;
238}
239
Matt Arsenault2e991122014-09-10 23:26:16 +0000240static bool isStride64(unsigned Opc) {
241 switch (Opc) {
242 case AMDGPU::DS_READ2ST64_B32:
243 case AMDGPU::DS_READ2ST64_B64:
244 case AMDGPU::DS_WRITE2ST64_B32:
245 case AMDGPU::DS_WRITE2ST64_B64:
246 return true;
247 default:
248 return false;
249 }
250}
251
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000252bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
253 MachineOperand *&BaseOp,
254 int64_t &Offset,
255 const TargetRegisterInfo *TRI) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000256 unsigned Opc = LdSt.getOpcode();
Matt Arsenault3add6432015-10-20 04:35:43 +0000257
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000258 if (isDS(LdSt)) {
259 const MachineOperand *OffsetImm =
260 getNamedOperand(LdSt, AMDGPU::OpName::offset);
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000261 if (OffsetImm) {
262 // Normal, single offset LDS instruction.
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000263 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000264 // TODO: ds_consume/ds_append use M0 for the base address. Is it safe to
265 // report that here?
266 if (!BaseOp)
267 return false;
268
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000269 Offset = OffsetImm->getImm();
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000270 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
271 "operands of type register.");
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000272 return true;
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000273 }
274
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000275 // The 2 offset instructions use offset0 and offset1 instead. We can treat
276 // these as a load with a single offset if the 2 offsets are consecutive. We
277 // will use this for some partially aligned loads.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000278 const MachineOperand *Offset0Imm =
279 getNamedOperand(LdSt, AMDGPU::OpName::offset0);
280 const MachineOperand *Offset1Imm =
281 getNamedOperand(LdSt, AMDGPU::OpName::offset1);
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000282
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000283 uint8_t Offset0 = Offset0Imm->getImm();
284 uint8_t Offset1 = Offset1Imm->getImm();
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000285
Matt Arsenault84db5d92015-07-14 17:57:36 +0000286 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) {
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000287 // Each of these offsets is in element sized units, so we need to convert
288 // to bytes of the individual reads.
289
290 unsigned EltSize;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000291 if (LdSt.mayLoad())
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000292 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16;
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000293 else {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000294 assert(LdSt.mayStore());
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000295 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000296 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8;
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000297 }
298
Matt Arsenault2e991122014-09-10 23:26:16 +0000299 if (isStride64(Opc))
300 EltSize *= 64;
301
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000302 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000303 Offset = EltSize * Offset0;
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000304 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
305 "operands of type register.");
Matt Arsenault7eb0a102014-07-30 01:01:10 +0000306 return true;
307 }
308
309 return false;
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000310 }
311
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000312 if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
Matt Arsenault36666292016-11-15 20:14:27 +0000313 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset);
314 if (SOffset && SOffset->isReg())
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000315 return false;
316
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000317 MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000318 if (!AddrReg)
319 return false;
320
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000321 const MachineOperand *OffsetImm =
322 getNamedOperand(LdSt, AMDGPU::OpName::offset);
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000323 BaseOp = AddrReg;
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000324 Offset = OffsetImm->getImm();
Matt Arsenault36666292016-11-15 20:14:27 +0000325
326 if (SOffset) // soffset can be an inline immediate.
327 Offset += SOffset->getImm();
328
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000329 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
330 "operands of type register.");
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000331 return true;
332 }
333
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000334 if (isSMRD(LdSt)) {
335 const MachineOperand *OffsetImm =
336 getNamedOperand(LdSt, AMDGPU::OpName::offset);
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000337 if (!OffsetImm)
338 return false;
339
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000340 MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
341 BaseOp = SBaseReg;
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000342 Offset = OffsetImm->getImm();
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000343 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
344 "operands of type register.");
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000345 return true;
346 }
347
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000348 if (isFLAT(LdSt)) {
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000349 MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
Matt Arsenault37a58e02017-07-21 18:06:36 +0000350 if (VAddr) {
351 // Can't analyze 2 offsets.
352 if (getNamedOperand(LdSt, AMDGPU::OpName::saddr))
353 return false;
354
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000355 BaseOp = VAddr;
Matt Arsenault37a58e02017-07-21 18:06:36 +0000356 } else {
357 // scratch instructions have either vaddr or saddr.
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000358 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr);
Matt Arsenault37a58e02017-07-21 18:06:36 +0000359 }
360
361 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000362 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
363 "operands of type register.");
Matt Arsenault43578ec2016-06-02 20:05:20 +0000364 return true;
365 }
366
Matt Arsenault1acc72f2014-07-29 21:34:55 +0000367 return false;
368}
369
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000370static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
371 const MachineOperand &BaseOp1,
372 const MachineInstr &MI2,
373 const MachineOperand &BaseOp2) {
374 // Support only base operands with base registers.
375 // Note: this could be extended to support FI operands.
376 if (!BaseOp1.isReg() || !BaseOp2.isReg())
377 return false;
378
379 if (BaseOp1.isIdenticalTo(BaseOp2))
Stanislav Mekhanoshin7fe9a5d2017-09-13 22:20:47 +0000380 return true;
381
382 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand())
383 return false;
384
385 auto MO1 = *MI1.memoperands_begin();
386 auto MO2 = *MI2.memoperands_begin();
387 if (MO1->getAddrSpace() != MO2->getAddrSpace())
388 return false;
389
390 auto Base1 = MO1->getValue();
391 auto Base2 = MO2->getValue();
392 if (!Base1 || !Base2)
393 return false;
394 const MachineFunction &MF = *MI1.getParent()->getParent();
Matthias Braunf1caa282017-12-15 22:22:58 +0000395 const DataLayout &DL = MF.getFunction().getParent()->getDataLayout();
Stanislav Mekhanoshin7fe9a5d2017-09-13 22:20:47 +0000396 Base1 = GetUnderlyingObject(Base1, DL);
397 Base2 = GetUnderlyingObject(Base1, DL);
398
399 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
400 return false;
401
402 return Base1 == Base2;
403}
404
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000405bool SIInstrInfo::shouldClusterMemOps(MachineOperand &BaseOp1,
406 MachineOperand &BaseOp2,
Jun Bum Lim4c5bd582016-04-15 14:58:38 +0000407 unsigned NumLoads) const {
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +0000408 MachineInstr &FirstLdSt = *BaseOp1.getParent();
409 MachineInstr &SecondLdSt = *BaseOp2.getParent();
410
411 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOp1, SecondLdSt, BaseOp2))
Stanislav Mekhanoshin7fe9a5d2017-09-13 22:20:47 +0000412 return false;
413
NAKAMURA Takumife1202c2016-06-20 00:37:41 +0000414 const MachineOperand *FirstDst = nullptr;
415 const MachineOperand *SecondDst = nullptr;
Tom Stellarda76bcc22016-03-28 16:10:13 +0000416
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000417 if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) ||
Matt Arsenault74f64832017-02-01 20:22:51 +0000418 (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt)) ||
419 (isFLAT(FirstLdSt) && isFLAT(SecondLdSt))) {
Stanislav Mekhanoshin7fe9a5d2017-09-13 22:20:47 +0000420 const unsigned MaxGlobalLoadCluster = 6;
421 if (NumLoads > MaxGlobalLoadCluster)
422 return false;
423
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000424 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata);
Stanislav Mekhanoshin949fac92017-09-06 15:31:30 +0000425 if (!FirstDst)
426 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000427 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata);
Stanislav Mekhanoshin949fac92017-09-06 15:31:30 +0000428 if (!SecondDst)
429 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst);
Matt Arsenault437fd712016-11-29 19:30:41 +0000430 } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) {
431 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst);
432 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst);
433 } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) {
434 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst);
435 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst);
Tom Stellarda76bcc22016-03-28 16:10:13 +0000436 }
437
438 if (!FirstDst || !SecondDst)
Matt Arsenault0e75a062014-09-17 17:48:30 +0000439 return false;
440
Tom Stellarda76bcc22016-03-28 16:10:13 +0000441 // Try to limit clustering based on the total number of bytes loaded
442 // rather than the number of instructions. This is done to help reduce
443 // register pressure. The method used is somewhat inexact, though,
444 // because it assumes that all loads in the cluster will load the
445 // same number of bytes as FirstLdSt.
Matt Arsenault0e75a062014-09-17 17:48:30 +0000446
Tom Stellarda76bcc22016-03-28 16:10:13 +0000447 // The unit of this value is bytes.
448 // FIXME: This needs finer tuning.
449 unsigned LoadClusterThreshold = 16;
Matt Arsenault0e75a062014-09-17 17:48:30 +0000450
Tom Stellarda76bcc22016-03-28 16:10:13 +0000451 const MachineRegisterInfo &MRI =
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000452 FirstLdSt.getParent()->getParent()->getRegInfo();
Tom Stellarda76bcc22016-03-28 16:10:13 +0000453 const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
454
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000455 return (NumLoads * (RI.getRegSizeInBits(*DstRC) / 8)) <= LoadClusterThreshold;
Matt Arsenault0e75a062014-09-17 17:48:30 +0000456}
457
Tom Stellardc5a154d2018-06-28 23:47:12 +0000458// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
459// the first 16 loads will be interleaved with the stores, and the next 16 will
460// be clustered as expected. It should really split into 2 16 store batches.
461//
462// Loads are clustered until this returns false, rather than trying to schedule
463// groups of stores. This also means we have to deal with saying different
464// address space loads should be clustered, and ones which might cause bank
465// conflicts.
466//
467// This might be deprecated so it might not be worth that much effort to fix.
468bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
469 int64_t Offset0, int64_t Offset1,
470 unsigned NumLoads) const {
471 assert(Offset1 > Offset0 &&
472 "Second offset should be larger than first offset!");
473 // If we have less than 16 loads in a row, and the offsets are within 64
474 // bytes, then schedule together.
475
476 // A cacheline is 64 bytes (for global memory).
477 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
478}
479
Matt Arsenault21a43822017-04-06 21:09:53 +0000480static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB,
481 MachineBasicBlock::iterator MI,
482 const DebugLoc &DL, unsigned DestReg,
483 unsigned SrcReg, bool KillSrc) {
484 MachineFunction *MF = MBB.getParent();
Matthias Braunf1caa282017-12-15 22:22:58 +0000485 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(),
Matt Arsenault21a43822017-04-06 21:09:53 +0000486 "illegal SGPR to VGPR copy",
487 DL, DS_Error);
Matthias Braunf1caa282017-12-15 22:22:58 +0000488 LLVMContext &C = MF->getFunction().getContext();
Matt Arsenault21a43822017-04-06 21:09:53 +0000489 C.diagnose(IllegalCopy);
490
491 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg)
492 .addReg(SrcReg, getKillRegState(KillSrc));
493}
494
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000495void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
496 MachineBasicBlock::iterator MI,
497 const DebugLoc &DL, unsigned DestReg,
498 unsigned SrcReg, bool KillSrc) const {
Matt Arsenault314cbf72016-11-07 16:39:22 +0000499 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg);
Christian Konigd0e3da12013-03-01 09:46:27 +0000500
Matt Arsenault314cbf72016-11-07 16:39:22 +0000501 if (RC == &AMDGPU::VGPR_32RegClass) {
502 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
503 AMDGPU::SReg_32RegClass.contains(SrcReg));
504 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
505 .addReg(SrcReg, getKillRegState(KillSrc));
506 return;
507 }
Christian Konigd0e3da12013-03-01 09:46:27 +0000508
Marek Olsak79c05872016-11-25 17:37:09 +0000509 if (RC == &AMDGPU::SReg_32_XM0RegClass ||
510 RC == &AMDGPU::SReg_32RegClass) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000511 if (SrcReg == AMDGPU::SCC) {
512 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
513 .addImm(-1)
514 .addImm(0);
515 return;
516 }
517
Matt Arsenault21a43822017-04-06 21:09:53 +0000518 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) {
519 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
520 return;
521 }
522
Christian Konigd0e3da12013-03-01 09:46:27 +0000523 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
524 .addReg(SrcReg, getKillRegState(KillSrc));
525 return;
Matt Arsenault314cbf72016-11-07 16:39:22 +0000526 }
Christian Konigd0e3da12013-03-01 09:46:27 +0000527
Matt Arsenault314cbf72016-11-07 16:39:22 +0000528 if (RC == &AMDGPU::SReg_64RegClass) {
Matt Arsenault834b1aa2015-02-14 02:55:54 +0000529 if (DestReg == AMDGPU::VCC) {
Matt Arsenault99981682015-02-14 02:55:56 +0000530 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
531 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
532 .addReg(SrcReg, getKillRegState(KillSrc));
533 } else {
534 // FIXME: Hack until VReg_1 removed.
535 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000536 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
Matt Arsenault99981682015-02-14 02:55:56 +0000537 .addImm(0)
538 .addReg(SrcReg, getKillRegState(KillSrc));
539 }
Matt Arsenault834b1aa2015-02-14 02:55:54 +0000540
Matt Arsenault834b1aa2015-02-14 02:55:54 +0000541 return;
542 }
543
Matt Arsenault21a43822017-04-06 21:09:53 +0000544 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) {
545 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
546 return;
547 }
548
Tom Stellard75aadc22012-12-11 21:25:42 +0000549 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
550 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000551 return;
Christian Konigd0e3da12013-03-01 09:46:27 +0000552 }
553
Matt Arsenault314cbf72016-11-07 16:39:22 +0000554 if (DestReg == AMDGPU::SCC) {
555 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
556 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
557 .addReg(SrcReg, getKillRegState(KillSrc))
558 .addImm(0);
559 return;
560 }
561
562 unsigned EltSize = 4;
563 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
564 if (RI.isSGPRClass(RC)) {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000565 if (RI.getRegSizeInBits(*RC) > 32) {
Matt Arsenault314cbf72016-11-07 16:39:22 +0000566 Opcode = AMDGPU::S_MOV_B64;
567 EltSize = 8;
568 } else {
569 Opcode = AMDGPU::S_MOV_B32;
570 EltSize = 4;
571 }
Matt Arsenault21a43822017-04-06 21:09:53 +0000572
573 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) {
574 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
575 return;
576 }
Matt Arsenault314cbf72016-11-07 16:39:22 +0000577 }
578
579 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
Matt Arsenault73d2f892016-07-15 22:32:02 +0000580 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
Nicolai Haehnledd587052015-12-19 01:16:06 +0000581
582 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
583 unsigned SubIdx;
584 if (Forward)
585 SubIdx = SubIndices[Idx];
586 else
587 SubIdx = SubIndices[SubIndices.size() - Idx - 1];
588
Christian Konigd0e3da12013-03-01 09:46:27 +0000589 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
590 get(Opcode), RI.getSubReg(DestReg, SubIdx));
591
Nicolai Haehnledd587052015-12-19 01:16:06 +0000592 Builder.addReg(RI.getSubReg(SrcReg, SubIdx));
Christian Konigd0e3da12013-03-01 09:46:27 +0000593
Nicolai Haehnledd587052015-12-19 01:16:06 +0000594 if (Idx == 0)
Christian Konigd0e3da12013-03-01 09:46:27 +0000595 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
Matt Arsenault73d2f892016-07-15 22:32:02 +0000596
Matt Arsenault05c26472017-06-12 17:19:20 +0000597 bool UseKill = KillSrc && Idx == SubIndices.size() - 1;
598 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000599 }
600}
601
Matt Arsenaultbbb47da2016-09-08 17:19:29 +0000602int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
Christian Konig3c145802013-03-27 09:12:59 +0000603 int NewOpc;
604
605 // Try to map original to commuted opcode
Marek Olsak191507e2015-02-03 17:38:12 +0000606 NewOpc = AMDGPU::getCommuteRev(Opcode);
Marek Olsakcfbdba22015-06-26 20:29:10 +0000607 if (NewOpc != -1)
608 // Check if the commuted (REV) opcode exists on the target.
609 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
Christian Konig3c145802013-03-27 09:12:59 +0000610
611 // Try to map commuted to original opcode
Marek Olsak191507e2015-02-03 17:38:12 +0000612 NewOpc = AMDGPU::getCommuteOrig(Opcode);
Marek Olsakcfbdba22015-06-26 20:29:10 +0000613 if (NewOpc != -1)
614 // Check if the original (non-REV) opcode exists on the target.
615 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
Christian Konig3c145802013-03-27 09:12:59 +0000616
617 return Opcode;
618}
619
Jan Sjodina06bfe02017-05-15 20:18:37 +0000620void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB,
621 MachineBasicBlock::iterator MI,
622 const DebugLoc &DL, unsigned DestReg,
623 int64_t Value) const {
624 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
625 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg);
626 if (RegClass == &AMDGPU::SReg_32RegClass ||
627 RegClass == &AMDGPU::SGPR_32RegClass ||
628 RegClass == &AMDGPU::SReg_32_XM0RegClass ||
629 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) {
630 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
631 .addImm(Value);
632 return;
633 }
634
635 if (RegClass == &AMDGPU::SReg_64RegClass ||
636 RegClass == &AMDGPU::SGPR_64RegClass ||
637 RegClass == &AMDGPU::SReg_64_XEXECRegClass) {
638 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
639 .addImm(Value);
640 return;
641 }
642
643 if (RegClass == &AMDGPU::VGPR_32RegClass) {
644 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
645 .addImm(Value);
646 return;
647 }
648 if (RegClass == &AMDGPU::VReg_64RegClass) {
649 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg)
650 .addImm(Value);
651 return;
652 }
653
654 unsigned EltSize = 4;
655 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
656 if (RI.isSGPRClass(RegClass)) {
657 if (RI.getRegSizeInBits(*RegClass) > 32) {
658 Opcode = AMDGPU::S_MOV_B64;
659 EltSize = 8;
660 } else {
661 Opcode = AMDGPU::S_MOV_B32;
662 EltSize = 4;
663 }
664 }
665
666 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize);
667 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
668 int64_t IdxValue = Idx == 0 ? Value : 0;
669
670 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
671 get(Opcode), RI.getSubReg(DestReg, Idx));
672 Builder.addImm(IdxValue);
673 }
674}
675
676const TargetRegisterClass *
677SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const {
678 return &AMDGPU::VGPR_32RegClass;
679}
680
681void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
682 MachineBasicBlock::iterator I,
683 const DebugLoc &DL, unsigned DstReg,
684 ArrayRef<MachineOperand> Cond,
685 unsigned TrueReg,
686 unsigned FalseReg) const {
687 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
NAKAMURA Takumi994a43d2017-05-16 04:01:23 +0000688 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass &&
689 "Not a VGPR32 reg");
Jan Sjodina06bfe02017-05-15 20:18:37 +0000690
691 if (Cond.size() == 1) {
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +0000692 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
693 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
694 .add(Cond[0]);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000695 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000696 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000697 .addReg(FalseReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000698 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000699 .addReg(TrueReg)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +0000700 .addReg(SReg);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000701 } else if (Cond.size() == 2) {
702 assert(Cond[0].isImm() && "Cond[0] is not an immediate");
703 switch (Cond[0].getImm()) {
704 case SIInstrInfo::SCC_TRUE: {
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +0000705 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000706 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
707 .addImm(-1)
708 .addImm(0);
709 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000710 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000711 .addReg(FalseReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000712 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000713 .addReg(TrueReg)
714 .addReg(SReg);
715 break;
716 }
717 case SIInstrInfo::SCC_FALSE: {
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +0000718 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000719 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
720 .addImm(0)
721 .addImm(-1);
722 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000723 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000724 .addReg(FalseReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000725 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000726 .addReg(TrueReg)
727 .addReg(SReg);
728 break;
729 }
730 case SIInstrInfo::VCCNZ: {
731 MachineOperand RegOp = Cond[1];
732 RegOp.setImplicit(false);
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +0000733 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
734 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
735 .add(RegOp);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000736 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000737 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000738 .addReg(FalseReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000739 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000740 .addReg(TrueReg)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +0000741 .addReg(SReg);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000742 break;
743 }
744 case SIInstrInfo::VCCZ: {
745 MachineOperand RegOp = Cond[1];
746 RegOp.setImplicit(false);
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +0000747 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
748 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
749 .add(RegOp);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000750 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000751 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000752 .addReg(TrueReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000753 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000754 .addReg(FalseReg)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +0000755 .addReg(SReg);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000756 break;
757 }
758 case SIInstrInfo::EXECNZ: {
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +0000759 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000760 unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
761 BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
762 .addImm(0);
763 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
764 .addImm(-1)
765 .addImm(0);
766 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000767 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000768 .addReg(FalseReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000769 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000770 .addReg(TrueReg)
771 .addReg(SReg);
772 break;
773 }
774 case SIInstrInfo::EXECZ: {
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +0000775 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000776 unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
777 BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
778 .addImm(0);
779 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
780 .addImm(0)
781 .addImm(-1);
782 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000783 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000784 .addReg(FalseReg)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000785 .addImm(0)
Jan Sjodina06bfe02017-05-15 20:18:37 +0000786 .addReg(TrueReg)
787 .addReg(SReg);
788 llvm_unreachable("Unhandled branch predicate EXECZ");
789 break;
790 }
791 default:
792 llvm_unreachable("invalid branch predicate");
793 }
794 } else {
795 llvm_unreachable("Can only handle Cond size 1 or 2");
796 }
797}
798
799unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB,
800 MachineBasicBlock::iterator I,
801 const DebugLoc &DL,
802 unsigned SrcReg, int Value) const {
803 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
804 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
805 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg)
806 .addImm(Value)
807 .addReg(SrcReg);
808
809 return Reg;
810}
811
812unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB,
813 MachineBasicBlock::iterator I,
814 const DebugLoc &DL,
815 unsigned SrcReg, int Value) const {
816 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
817 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
818 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg)
819 .addImm(Value)
820 .addReg(SrcReg);
821
822 return Reg;
823}
824
Tom Stellardef3b8642015-01-07 19:56:17 +0000825unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
826
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000827 if (RI.getRegSizeInBits(*DstRC) == 32) {
Tom Stellardef3b8642015-01-07 19:56:17 +0000828 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000829 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) {
Tom Stellardef3b8642015-01-07 19:56:17 +0000830 return AMDGPU::S_MOV_B64;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000831 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) {
Tom Stellard4842c052015-01-07 20:27:25 +0000832 return AMDGPU::V_MOV_B64_PSEUDO;
Tom Stellardef3b8642015-01-07 19:56:17 +0000833 }
834 return AMDGPU::COPY;
835}
836
Matt Arsenault08f14de2015-11-06 18:07:53 +0000837static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
838 switch (Size) {
839 case 4:
840 return AMDGPU::SI_SPILL_S32_SAVE;
841 case 8:
842 return AMDGPU::SI_SPILL_S64_SAVE;
843 case 16:
844 return AMDGPU::SI_SPILL_S128_SAVE;
845 case 32:
846 return AMDGPU::SI_SPILL_S256_SAVE;
847 case 64:
848 return AMDGPU::SI_SPILL_S512_SAVE;
849 default:
850 llvm_unreachable("unknown register size");
851 }
852}
853
854static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
855 switch (Size) {
856 case 4:
857 return AMDGPU::SI_SPILL_V32_SAVE;
858 case 8:
859 return AMDGPU::SI_SPILL_V64_SAVE;
Tom Stellard703b2ec2016-04-12 23:57:30 +0000860 case 12:
861 return AMDGPU::SI_SPILL_V96_SAVE;
Matt Arsenault08f14de2015-11-06 18:07:53 +0000862 case 16:
863 return AMDGPU::SI_SPILL_V128_SAVE;
864 case 32:
865 return AMDGPU::SI_SPILL_V256_SAVE;
866 case 64:
867 return AMDGPU::SI_SPILL_V512_SAVE;
868 default:
869 llvm_unreachable("unknown register size");
870 }
871}
872
Tom Stellardc149dc02013-11-27 21:23:35 +0000873void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
874 MachineBasicBlock::iterator MI,
875 unsigned SrcReg, bool isKill,
876 int FrameIndex,
877 const TargetRegisterClass *RC,
878 const TargetRegisterInfo *TRI) const {
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000879 MachineFunction *MF = MBB.getParent();
Tom Stellard42fb60e2015-01-14 15:42:31 +0000880 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
Matthias Braun941a7052016-07-28 18:40:00 +0000881 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
Graham Sellersba559ac2018-12-01 12:27:53 +0000882 const DebugLoc &DL = MBB.findDebugLoc(MI);
Matt Arsenault08f14de2015-11-06 18:07:53 +0000883
Matthias Braun941a7052016-07-28 18:40:00 +0000884 unsigned Size = FrameInfo.getObjectSize(FrameIndex);
885 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
Matt Arsenault08f14de2015-11-06 18:07:53 +0000886 MachinePointerInfo PtrInfo
887 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
888 MachineMemOperand *MMO
889 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
890 Size, Align);
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000891 unsigned SpillSize = TRI->getSpillSize(*RC);
Tom Stellardc149dc02013-11-27 21:23:35 +0000892
Tom Stellard96468902014-09-24 01:33:17 +0000893 if (RI.isSGPRClass(RC)) {
Matt Arsenault5b22dfa2015-11-05 05:27:10 +0000894 MFI->setHasSpilledSGPRs();
895
Matt Arsenault2510a312016-09-03 06:57:55 +0000896 // We are only allowed to create one new instruction when spilling
897 // registers, so we need to use pseudo instruction for spilling SGPRs.
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000898 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize));
Matt Arsenault2510a312016-09-03 06:57:55 +0000899
900 // The SGPR spill/restore instructions only work on number sgprs, so we need
901 // to make sure we are using the correct register class.
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000902 if (TargetRegisterInfo::isVirtualRegister(SrcReg) && SpillSize == 4) {
Matt Arsenaultb6e1cc22016-05-21 00:53:42 +0000903 MachineRegisterInfo &MRI = MF->getRegInfo();
904 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass);
905 }
906
Marek Olsak79c05872016-11-25 17:37:09 +0000907 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc)
Matt Arsenault3354f422016-09-10 01:20:33 +0000908 .addReg(SrcReg, getKillRegState(isKill)) // data
909 .addFrameIndex(FrameIndex) // addr
Matt Arsenault08906a32016-10-28 19:43:31 +0000910 .addMemOperand(MMO)
911 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
Matt Arsenaultea8a4ed2017-05-17 19:37:57 +0000912 .addReg(MFI->getFrameOffsetReg(), RegState::Implicit);
Matt Arsenault08906a32016-10-28 19:43:31 +0000913 // Add the scratch resource registers as implicit uses because we may end up
914 // needing them, and need to ensure that the reserved registers are
915 // correctly handled.
Tom Stellard42fb60e2015-01-14 15:42:31 +0000916
Matt Arsenaultadc59d72018-04-23 15:51:26 +0000917 FrameInfo.setStackID(FrameIndex, SIStackID::SGPR_SPILL);
Marek Olsak79c05872016-11-25 17:37:09 +0000918 if (ST.hasScalarStores()) {
919 // m0 is used for offset to scalar stores if used to spill.
Nicolai Haehnle43cc6c42017-06-27 08:04:13 +0000920 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine | RegState::Dead);
Marek Olsak79c05872016-11-25 17:37:09 +0000921 }
922
Matt Arsenault08f14de2015-11-06 18:07:53 +0000923 return;
Tom Stellard96468902014-09-24 01:33:17 +0000924 }
Tom Stellardeba61072014-05-02 15:41:42 +0000925
Matt Arsenault08f14de2015-11-06 18:07:53 +0000926 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
927
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000928 unsigned Opcode = getVGPRSpillSaveOpcode(SpillSize);
Matt Arsenault08f14de2015-11-06 18:07:53 +0000929 MFI->setHasSpilledVGPRs();
930 BuildMI(MBB, MI, DL, get(Opcode))
Matt Arsenault3354f422016-09-10 01:20:33 +0000931 .addReg(SrcReg, getKillRegState(isKill)) // data
932 .addFrameIndex(FrameIndex) // addr
Matt Arsenault2510a312016-09-03 06:57:55 +0000933 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
Matt Arsenaultea8a4ed2017-05-17 19:37:57 +0000934 .addReg(MFI->getFrameOffsetReg()) // scratch_offset
Matt Arsenault2510a312016-09-03 06:57:55 +0000935 .addImm(0) // offset
Matt Arsenault08f14de2015-11-06 18:07:53 +0000936 .addMemOperand(MMO);
937}
938
939static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
940 switch (Size) {
941 case 4:
942 return AMDGPU::SI_SPILL_S32_RESTORE;
943 case 8:
944 return AMDGPU::SI_SPILL_S64_RESTORE;
945 case 16:
946 return AMDGPU::SI_SPILL_S128_RESTORE;
947 case 32:
948 return AMDGPU::SI_SPILL_S256_RESTORE;
949 case 64:
950 return AMDGPU::SI_SPILL_S512_RESTORE;
951 default:
952 llvm_unreachable("unknown register size");
953 }
954}
955
956static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
957 switch (Size) {
958 case 4:
959 return AMDGPU::SI_SPILL_V32_RESTORE;
960 case 8:
961 return AMDGPU::SI_SPILL_V64_RESTORE;
Tom Stellard703b2ec2016-04-12 23:57:30 +0000962 case 12:
963 return AMDGPU::SI_SPILL_V96_RESTORE;
Matt Arsenault08f14de2015-11-06 18:07:53 +0000964 case 16:
965 return AMDGPU::SI_SPILL_V128_RESTORE;
966 case 32:
967 return AMDGPU::SI_SPILL_V256_RESTORE;
968 case 64:
969 return AMDGPU::SI_SPILL_V512_RESTORE;
970 default:
971 llvm_unreachable("unknown register size");
Tom Stellardc149dc02013-11-27 21:23:35 +0000972 }
973}
974
975void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
976 MachineBasicBlock::iterator MI,
977 unsigned DestReg, int FrameIndex,
978 const TargetRegisterClass *RC,
979 const TargetRegisterInfo *TRI) const {
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000980 MachineFunction *MF = MBB.getParent();
Matt Arsenault88ce3dc2018-11-26 21:28:40 +0000981 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
Matthias Braun941a7052016-07-28 18:40:00 +0000982 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
Graham Sellersba559ac2018-12-01 12:27:53 +0000983 const DebugLoc &DL = MBB.findDebugLoc(MI);
Matthias Braun941a7052016-07-28 18:40:00 +0000984 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
985 unsigned Size = FrameInfo.getObjectSize(FrameIndex);
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000986 unsigned SpillSize = TRI->getSpillSize(*RC);
Tom Stellard4e07b1d2014-06-10 21:20:41 +0000987
Matt Arsenault08f14de2015-11-06 18:07:53 +0000988 MachinePointerInfo PtrInfo
989 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
990
991 MachineMemOperand *MMO = MF->getMachineMemOperand(
992 PtrInfo, MachineMemOperand::MOLoad, Size, Align);
993
994 if (RI.isSGPRClass(RC)) {
Matt Arsenault88ce3dc2018-11-26 21:28:40 +0000995 MFI->setHasSpilledSGPRs();
996
Matt Arsenault08f14de2015-11-06 18:07:53 +0000997 // FIXME: Maybe this should not include a memoperand because it will be
998 // lowered to non-memory instructions.
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +0000999 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
1000 if (TargetRegisterInfo::isVirtualRegister(DestReg) && SpillSize == 4) {
Matt Arsenaultb6e1cc22016-05-21 00:53:42 +00001001 MachineRegisterInfo &MRI = MF->getRegInfo();
1002 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
1003 }
1004
Matt Arsenaultadc59d72018-04-23 15:51:26 +00001005 FrameInfo.setStackID(FrameIndex, SIStackID::SGPR_SPILL);
Marek Olsak79c05872016-11-25 17:37:09 +00001006 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc, DestReg)
Matt Arsenault3354f422016-09-10 01:20:33 +00001007 .addFrameIndex(FrameIndex) // addr
Matt Arsenault08906a32016-10-28 19:43:31 +00001008 .addMemOperand(MMO)
1009 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
Matt Arsenaultea8a4ed2017-05-17 19:37:57 +00001010 .addReg(MFI->getFrameOffsetReg(), RegState::Implicit);
Matt Arsenault08f14de2015-11-06 18:07:53 +00001011
Marek Olsak79c05872016-11-25 17:37:09 +00001012 if (ST.hasScalarStores()) {
1013 // m0 is used for offset to scalar stores if used to spill.
Nicolai Haehnle43cc6c42017-06-27 08:04:13 +00001014 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine | RegState::Dead);
Marek Olsak79c05872016-11-25 17:37:09 +00001015 }
1016
Matt Arsenault08f14de2015-11-06 18:07:53 +00001017 return;
Tom Stellard96468902014-09-24 01:33:17 +00001018 }
Tom Stellardeba61072014-05-02 15:41:42 +00001019
Matt Arsenault08f14de2015-11-06 18:07:53 +00001020 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
1021
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00001022 unsigned Opcode = getVGPRSpillRestoreOpcode(SpillSize);
Matt Arsenault08f14de2015-11-06 18:07:53 +00001023 BuildMI(MBB, MI, DL, get(Opcode), DestReg)
Matt Arsenaultea8a4ed2017-05-17 19:37:57 +00001024 .addFrameIndex(FrameIndex) // vaddr
1025 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
1026 .addReg(MFI->getFrameOffsetReg()) // scratch_offset
1027 .addImm(0) // offset
Matt Arsenault08f14de2015-11-06 18:07:53 +00001028 .addMemOperand(MMO);
Tom Stellardc149dc02013-11-27 21:23:35 +00001029}
1030
Tom Stellard96468902014-09-24 01:33:17 +00001031/// \param @Offset Offset in bytes of the FrameIndex being spilled
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001032unsigned SIInstrInfo::calculateLDSSpillAddress(
1033 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg,
1034 unsigned FrameOffset, unsigned Size) const {
Tom Stellard96468902014-09-24 01:33:17 +00001035 MachineFunction *MF = MBB.getParent();
1036 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
Tom Stellard5bfbae52018-07-11 20:59:01 +00001037 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
Graham Sellersba559ac2018-12-01 12:27:53 +00001038 const DebugLoc &DL = MBB.findDebugLoc(MI);
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +00001039 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize();
Tom Stellard96468902014-09-24 01:33:17 +00001040 unsigned WavefrontSize = ST.getWavefrontSize();
1041
1042 unsigned TIDReg = MFI->getTIDReg();
1043 if (!MFI->hasCalculatedTID()) {
1044 MachineBasicBlock &Entry = MBB.getParent()->front();
1045 MachineBasicBlock::iterator Insert = Entry.front();
Graham Sellersba559ac2018-12-01 12:27:53 +00001046 const DebugLoc &DL = Insert->getDebugLoc();
Tom Stellard96468902014-09-24 01:33:17 +00001047
Tom Stellard19f43012016-07-28 14:30:43 +00001048 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass,
1049 *MF);
Tom Stellard96468902014-09-24 01:33:17 +00001050 if (TIDReg == AMDGPU::NoRegister)
1051 return TIDReg;
1052
Matthias Braunf1caa282017-12-15 22:22:58 +00001053 if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) &&
Tom Stellard96468902014-09-24 01:33:17 +00001054 WorkGroupSize > WavefrontSize) {
Matt Arsenaultac234b62015-11-30 21:15:57 +00001055 unsigned TIDIGXReg
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001056 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
Matt Arsenaultac234b62015-11-30 21:15:57 +00001057 unsigned TIDIGYReg
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001058 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
Matt Arsenaultac234b62015-11-30 21:15:57 +00001059 unsigned TIDIGZReg
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001060 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
Tom Stellard96468902014-09-24 01:33:17 +00001061 unsigned InputPtrReg =
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001062 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Benjamin Kramer7149aab2015-03-01 18:09:56 +00001063 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) {
Tom Stellard96468902014-09-24 01:33:17 +00001064 if (!Entry.isLiveIn(Reg))
1065 Entry.addLiveIn(Reg);
1066 }
1067
Matthias Braun7dc03f02016-04-06 02:47:09 +00001068 RS->enterBasicBlock(Entry);
Matt Arsenault0c90e952015-11-06 18:17:45 +00001069 // FIXME: Can we scavenge an SReg_64 and access the subregs?
Tom Stellard96468902014-09-24 01:33:17 +00001070 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
1071 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
1072 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0)
1073 .addReg(InputPtrReg)
1074 .addImm(SI::KernelInputOffsets::NGROUPS_Z);
1075 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1)
1076 .addReg(InputPtrReg)
1077 .addImm(SI::KernelInputOffsets::NGROUPS_Y);
1078
1079 // NGROUPS.X * NGROUPS.Y
1080 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1)
1081 .addReg(STmp1)
1082 .addReg(STmp0);
1083 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X
1084 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg)
1085 .addReg(STmp1)
1086 .addReg(TIDIGXReg);
1087 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)
1088 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg)
1089 .addReg(STmp0)
1090 .addReg(TIDIGYReg)
1091 .addReg(TIDReg);
1092 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z
Matt Arsenault84445dd2017-11-30 22:51:26 +00001093 getAddNoCarry(Entry, Insert, DL, TIDReg)
1094 .addReg(TIDReg)
1095 .addReg(TIDIGZReg);
Tom Stellard96468902014-09-24 01:33:17 +00001096 } else {
1097 // Get the wave id
1098 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64),
1099 TIDReg)
1100 .addImm(-1)
1101 .addImm(0);
1102
Marek Olsakc5368502015-01-15 18:43:01 +00001103 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64),
Tom Stellard96468902014-09-24 01:33:17 +00001104 TIDReg)
1105 .addImm(-1)
1106 .addReg(TIDReg);
1107 }
1108
1109 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32),
1110 TIDReg)
1111 .addImm(2)
1112 .addReg(TIDReg);
1113 MFI->setTIDReg(TIDReg);
1114 }
1115
1116 // Add FrameIndex to LDS offset
Matt Arsenault52ef4012016-07-26 16:45:58 +00001117 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize);
Matt Arsenault84445dd2017-11-30 22:51:26 +00001118 getAddNoCarry(MBB, MI, DL, TmpReg)
1119 .addImm(LDSOffset)
1120 .addReg(TIDReg);
Tom Stellard96468902014-09-24 01:33:17 +00001121
1122 return TmpReg;
1123}
1124
Tom Stellardd37630e2016-04-07 14:47:07 +00001125void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB,
1126 MachineBasicBlock::iterator MI,
Nicolai Haehnle87323da2015-12-17 16:46:42 +00001127 int Count) const {
Tom Stellard341e2932016-05-02 18:02:24 +00001128 DebugLoc DL = MBB.findDebugLoc(MI);
Tom Stellardeba61072014-05-02 15:41:42 +00001129 while (Count > 0) {
1130 int Arg;
1131 if (Count >= 8)
1132 Arg = 7;
1133 else
1134 Arg = Count - 1;
1135 Count -= 8;
Tom Stellard341e2932016-05-02 18:02:24 +00001136 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP))
Tom Stellardeba61072014-05-02 15:41:42 +00001137 .addImm(Arg);
1138 }
1139}
1140
Tom Stellardcb6ba622016-04-30 00:23:06 +00001141void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
1142 MachineBasicBlock::iterator MI) const {
1143 insertWaitStates(MBB, MI, 1);
1144}
1145
Jan Sjodina06bfe02017-05-15 20:18:37 +00001146void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const {
1147 auto MF = MBB.getParent();
1148 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1149
1150 assert(Info->isEntryFunction());
1151
1152 if (MBB.succ_empty()) {
1153 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end();
David Stuttard20ea21c2019-03-12 09:52:58 +00001154 if (HasNoTerminator) {
1155 if (Info->returnsVoid()) {
1156 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0);
1157 } else {
1158 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG));
1159 }
1160 }
Jan Sjodina06bfe02017-05-15 20:18:37 +00001161 }
1162}
1163
Stanislav Mekhanoshinf92ed692019-01-21 19:11:26 +00001164unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) {
Tom Stellardcb6ba622016-04-30 00:23:06 +00001165 switch (MI.getOpcode()) {
1166 default: return 1; // FIXME: Do wait states equal cycles?
1167
1168 case AMDGPU::S_NOP:
1169 return MI.getOperand(0).getImm() + 1;
1170 }
1171}
1172
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001173bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1174 MachineBasicBlock &MBB = *MI.getParent();
Tom Stellardeba61072014-05-02 15:41:42 +00001175 DebugLoc DL = MBB.findDebugLoc(MI);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001176 switch (MI.getOpcode()) {
Tom Stellardc5a154d2018-06-28 23:47:12 +00001177 default: return TargetInstrInfo::expandPostRAPseudo(MI);
Eugene Zelenko59e12822017-08-08 00:47:13 +00001178 case AMDGPU::S_MOV_B64_term:
Matt Arsenaulte6740752016-09-29 01:44:16 +00001179 // This is only a terminator to get the correct spill code placement during
1180 // register allocation.
1181 MI.setDesc(get(AMDGPU::S_MOV_B64));
1182 break;
Eugene Zelenko59e12822017-08-08 00:47:13 +00001183
1184 case AMDGPU::S_XOR_B64_term:
Matt Arsenaulte6740752016-09-29 01:44:16 +00001185 // This is only a terminator to get the correct spill code placement during
1186 // register allocation.
1187 MI.setDesc(get(AMDGPU::S_XOR_B64));
1188 break;
Eugene Zelenko59e12822017-08-08 00:47:13 +00001189
1190 case AMDGPU::S_ANDN2_B64_term:
Matt Arsenaulte6740752016-09-29 01:44:16 +00001191 // This is only a terminator to get the correct spill code placement during
1192 // register allocation.
1193 MI.setDesc(get(AMDGPU::S_ANDN2_B64));
1194 break;
Eugene Zelenko59e12822017-08-08 00:47:13 +00001195
Tom Stellard4842c052015-01-07 20:27:25 +00001196 case AMDGPU::V_MOV_B64_PSEUDO: {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001197 unsigned Dst = MI.getOperand(0).getReg();
Tom Stellard4842c052015-01-07 20:27:25 +00001198 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
1199 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
1200
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001201 const MachineOperand &SrcOp = MI.getOperand(1);
Tom Stellard4842c052015-01-07 20:27:25 +00001202 // FIXME: Will this work for 64-bit floating point immediates?
1203 assert(!SrcOp.isFPImm());
1204 if (SrcOp.isImm()) {
1205 APInt Imm(64, SrcOp.getImm());
1206 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
Matt Arsenault80bc3552016-06-13 15:53:52 +00001207 .addImm(Imm.getLoBits(32).getZExtValue())
1208 .addReg(Dst, RegState::Implicit | RegState::Define);
Tom Stellard4842c052015-01-07 20:27:25 +00001209 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
Matt Arsenault80bc3552016-06-13 15:53:52 +00001210 .addImm(Imm.getHiBits(32).getZExtValue())
1211 .addReg(Dst, RegState::Implicit | RegState::Define);
Tom Stellard4842c052015-01-07 20:27:25 +00001212 } else {
1213 assert(SrcOp.isReg());
1214 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
Matt Arsenault80bc3552016-06-13 15:53:52 +00001215 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
1216 .addReg(Dst, RegState::Implicit | RegState::Define);
Tom Stellard4842c052015-01-07 20:27:25 +00001217 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
Matt Arsenault80bc3552016-06-13 15:53:52 +00001218 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
1219 .addReg(Dst, RegState::Implicit | RegState::Define);
Tom Stellard4842c052015-01-07 20:27:25 +00001220 }
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001221 MI.eraseFromParent();
Tom Stellard4842c052015-01-07 20:27:25 +00001222 break;
1223 }
Connor Abbott66b9bd62017-08-04 18:36:54 +00001224 case AMDGPU::V_SET_INACTIVE_B32: {
1225 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC)
1226 .addReg(AMDGPU::EXEC);
1227 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg())
1228 .add(MI.getOperand(2));
1229 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC)
1230 .addReg(AMDGPU::EXEC);
1231 MI.eraseFromParent();
1232 break;
1233 }
1234 case AMDGPU::V_SET_INACTIVE_B64: {
1235 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC)
1236 .addReg(AMDGPU::EXEC);
1237 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO),
1238 MI.getOperand(0).getReg())
1239 .add(MI.getOperand(2));
1240 expandPostRAPseudo(*Copy);
1241 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC)
1242 .addReg(AMDGPU::EXEC);
1243 MI.eraseFromParent();
1244 break;
1245 }
Nicolai Haehnlea7852092016-10-24 14:56:02 +00001246 case AMDGPU::V_MOVRELD_B32_V1:
1247 case AMDGPU::V_MOVRELD_B32_V2:
1248 case AMDGPU::V_MOVRELD_B32_V4:
1249 case AMDGPU::V_MOVRELD_B32_V8:
1250 case AMDGPU::V_MOVRELD_B32_V16: {
1251 const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32);
1252 unsigned VecReg = MI.getOperand(0).getReg();
1253 bool IsUndef = MI.getOperand(1).isUndef();
1254 unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm();
1255 assert(VecReg == MI.getOperand(1).getReg());
1256
1257 MachineInstr *MovRel =
1258 BuildMI(MBB, MI, DL, MovRelDesc)
1259 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
Diana Picus116bbab2017-01-13 09:58:52 +00001260 .add(MI.getOperand(2))
Nicolai Haehnlea7852092016-10-24 14:56:02 +00001261 .addReg(VecReg, RegState::ImplicitDefine)
Diana Picus116bbab2017-01-13 09:58:52 +00001262 .addReg(VecReg,
1263 RegState::Implicit | (IsUndef ? RegState::Undef : 0));
Nicolai Haehnlea7852092016-10-24 14:56:02 +00001264
1265 const int ImpDefIdx =
1266 MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses();
1267 const int ImpUseIdx = ImpDefIdx + 1;
1268 MovRel->tieOperands(ImpDefIdx, ImpUseIdx);
1269
1270 MI.eraseFromParent();
1271 break;
1272 }
Tom Stellardbf3e6e52016-06-14 20:29:59 +00001273 case AMDGPU::SI_PC_ADD_REL_OFFSET: {
Tom Stellardc93fc112015-12-10 02:13:01 +00001274 MachineFunction &MF = *MBB.getParent();
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001275 unsigned Reg = MI.getOperand(0).getReg();
Matt Arsenault11587d92016-08-10 19:11:45 +00001276 unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
1277 unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
Tom Stellardc93fc112015-12-10 02:13:01 +00001278
1279 // Create a bundle so these instructions won't be re-ordered by the
1280 // post-RA scheduler.
1281 MIBundleBuilder Bundler(MBB, MI);
1282 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
1283
1284 // Add 32-bit offset from this instruction to the start of the
1285 // constant data.
1286 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001287 .addReg(RegLo)
Diana Picus116bbab2017-01-13 09:58:52 +00001288 .add(MI.getOperand(1)));
Tom Stellardc93fc112015-12-10 02:13:01 +00001289
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00001290 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
1291 .addReg(RegHi);
1292 if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE)
1293 MIB.addImm(0);
1294 else
Diana Picus116bbab2017-01-13 09:58:52 +00001295 MIB.add(MI.getOperand(2));
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00001296
1297 Bundler.append(MIB);
Eugene Zelenko59e12822017-08-08 00:47:13 +00001298 finalizeBundle(MBB, Bundler.begin());
Tom Stellardc93fc112015-12-10 02:13:01 +00001299
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001300 MI.eraseFromParent();
Tom Stellardc93fc112015-12-10 02:13:01 +00001301 break;
1302 }
Connor Abbott92638ab2017-08-04 18:36:52 +00001303 case AMDGPU::EXIT_WWM: {
1304 // This only gets its own opcode so that SIFixWWMLiveness can tell when WWM
1305 // is exited.
1306 MI.setDesc(get(AMDGPU::S_MOV_B64));
1307 break;
1308 }
Stanislav Mekhanoshin739174c2018-05-31 20:13:51 +00001309 case TargetOpcode::BUNDLE: {
1310 if (!MI.mayLoad())
1311 return false;
1312
1313 // If it is a load it must be a memory clause
1314 for (MachineBasicBlock::instr_iterator I = MI.getIterator();
1315 I->isBundledWithSucc(); ++I) {
1316 I->unbundleFromSucc();
1317 for (MachineOperand &MO : I->operands())
1318 if (MO.isReg())
1319 MO.setIsInternalRead(false);
1320 }
1321
1322 MI.eraseFromParent();
1323 break;
1324 }
Tom Stellardeba61072014-05-02 15:41:42 +00001325 }
1326 return true;
1327}
1328
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001329bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
1330 MachineOperand &Src0,
1331 unsigned Src0OpName,
1332 MachineOperand &Src1,
1333 unsigned Src1OpName) const {
1334 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
1335 if (!Src0Mods)
1336 return false;
1337
1338 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
1339 assert(Src1Mods &&
1340 "All commutable instructions have both src0 and src1 modifiers");
1341
1342 int Src0ModsVal = Src0Mods->getImm();
1343 int Src1ModsVal = Src1Mods->getImm();
1344
1345 Src1Mods->setImm(Src0ModsVal);
1346 Src0Mods->setImm(Src1ModsVal);
1347 return true;
1348}
1349
1350static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI,
1351 MachineOperand &RegOp,
Matt Arsenault25dba302016-09-13 19:03:12 +00001352 MachineOperand &NonRegOp) {
1353 unsigned Reg = RegOp.getReg();
1354 unsigned SubReg = RegOp.getSubReg();
1355 bool IsKill = RegOp.isKill();
1356 bool IsDead = RegOp.isDead();
1357 bool IsUndef = RegOp.isUndef();
1358 bool IsDebug = RegOp.isDebug();
1359
1360 if (NonRegOp.isImm())
1361 RegOp.ChangeToImmediate(NonRegOp.getImm());
1362 else if (NonRegOp.isFI())
1363 RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
1364 else
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001365 return nullptr;
1366
Matt Arsenault25dba302016-09-13 19:03:12 +00001367 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
1368 NonRegOp.setSubReg(SubReg);
1369
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001370 return &MI;
1371}
1372
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001373MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001374 unsigned Src0Idx,
1375 unsigned Src1Idx) const {
1376 assert(!NewMI && "this should never be used");
1377
1378 unsigned Opc = MI.getOpcode();
1379 int CommutedOpcode = commuteOpcode(Opc);
Marek Olsakcfbdba22015-06-26 20:29:10 +00001380 if (CommutedOpcode == -1)
1381 return nullptr;
1382
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001383 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==
1384 static_cast<int>(Src0Idx) &&
1385 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==
1386 static_cast<int>(Src1Idx) &&
1387 "inconsistency with findCommutedOpIndices");
1388
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001389 MachineOperand &Src0 = MI.getOperand(Src0Idx);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001390 MachineOperand &Src1 = MI.getOperand(Src1Idx);
Matt Arsenaultaa5ccfb2014-10-17 18:00:37 +00001391
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001392 MachineInstr *CommutedMI = nullptr;
1393 if (Src0.isReg() && Src1.isReg()) {
1394 if (isOperandLegal(MI, Src1Idx, &Src0)) {
1395 // Be sure to copy the source modifiers to the right place.
1396 CommutedMI
1397 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
Matt Arsenaultd282ada2014-10-17 18:00:48 +00001398 }
1399
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001400 } else if (Src0.isReg() && !Src1.isReg()) {
1401 // src0 should always be able to support any operand type, so no need to
1402 // check operand legality.
1403 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
1404 } else if (!Src0.isReg() && Src1.isReg()) {
1405 if (isOperandLegal(MI, Src1Idx, &Src0))
1406 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
Tom Stellard82166022013-11-13 23:36:37 +00001407 } else {
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001408 // FIXME: Found two non registers to commute. This does happen.
1409 return nullptr;
Tom Stellard82166022013-11-13 23:36:37 +00001410 }
Christian Konig3c145802013-03-27 09:12:59 +00001411
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001412 if (CommutedMI) {
1413 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
1414 Src1, AMDGPU::OpName::src1_modifiers);
1415
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001416 CommutedMI->setDesc(get(CommutedOpcode));
Matt Arsenaultbbb47da2016-09-08 17:19:29 +00001417 }
Christian Konig3c145802013-03-27 09:12:59 +00001418
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001419 return CommutedMI;
Christian Konig76edd4f2013-02-26 17:52:29 +00001420}
1421
Matt Arsenault92befe72014-09-26 17:54:54 +00001422// This needs to be implemented because the source modifiers may be inserted
1423// between the true commutable operands, and the base
1424// TargetInstrInfo::commuteInstruction uses it.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001425bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0,
Andrew Kaylor16c4da02015-09-28 20:33:22 +00001426 unsigned &SrcOpIdx1) const {
Alexander Timofeevdb7ee762018-09-11 11:56:50 +00001427 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1);
1428}
1429
1430bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0,
1431 unsigned &SrcOpIdx1) const {
1432 if (!Desc.isCommutable())
Matt Arsenault92befe72014-09-26 17:54:54 +00001433 return false;
1434
Alexander Timofeevdb7ee762018-09-11 11:56:50 +00001435 unsigned Opc = Desc.getOpcode();
Matt Arsenault92befe72014-09-26 17:54:54 +00001436 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1437 if (Src0Idx == -1)
1438 return false;
1439
Matt Arsenault92befe72014-09-26 17:54:54 +00001440 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1441 if (Src1Idx == -1)
1442 return false;
1443
Andrew Kaylor16c4da02015-09-28 20:33:22 +00001444 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
Matt Arsenault92befe72014-09-26 17:54:54 +00001445}
1446
Matt Arsenault6bc43d82016-10-06 16:20:41 +00001447bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
1448 int64_t BrOffset) const {
1449 // BranchRelaxation should never have to check s_setpc_b64 because its dest
1450 // block is unanalyzable.
1451 assert(BranchOp != AMDGPU::S_SETPC_B64);
1452
1453 // Convert to dwords.
1454 BrOffset /= 4;
1455
1456 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
1457 // from the next instruction.
1458 BrOffset -= 1;
1459
1460 return isIntN(BranchOffsetBits, BrOffset);
1461}
1462
1463MachineBasicBlock *SIInstrInfo::getBranchDestBlock(
1464 const MachineInstr &MI) const {
1465 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) {
1466 // This would be a difficult analysis to perform, but can always be legal so
1467 // there's no need to analyze it.
1468 return nullptr;
1469 }
1470
1471 return MI.getOperand(0).getMBB();
1472}
1473
1474unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
1475 MachineBasicBlock &DestBB,
1476 const DebugLoc &DL,
1477 int64_t BrOffset,
1478 RegScavenger *RS) const {
1479 assert(RS && "RegScavenger required for long branching");
1480 assert(MBB.empty() &&
1481 "new block should be inserted for expanding unconditional branch");
1482 assert(MBB.pred_size() == 1);
1483
1484 MachineFunction *MF = MBB.getParent();
1485 MachineRegisterInfo &MRI = MF->getRegInfo();
1486
1487 // FIXME: Virtual register workaround for RegScavenger not working with empty
1488 // blocks.
1489 unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1490
1491 auto I = MBB.end();
1492
1493 // We need to compute the offset relative to the instruction immediately after
1494 // s_getpc_b64. Insert pc arithmetic code before last terminator.
1495 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
1496
1497 // TODO: Handle > 32-bit block address.
1498 if (BrOffset >= 0) {
1499 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
1500 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1501 .addReg(PCReg, 0, AMDGPU::sub0)
1502 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_FORWARD);
1503 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
1504 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1505 .addReg(PCReg, 0, AMDGPU::sub1)
1506 .addImm(0);
1507 } else {
1508 // Backwards branch.
1509 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32))
1510 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1511 .addReg(PCReg, 0, AMDGPU::sub0)
1512 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_BACKWARD);
1513 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32))
1514 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1515 .addReg(PCReg, 0, AMDGPU::sub1)
1516 .addImm(0);
1517 }
1518
1519 // Insert the indirect branch after the other terminator.
1520 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
1521 .addReg(PCReg);
1522
1523 // FIXME: If spilling is necessary, this will fail because this scavenger has
1524 // no emergency stack slots. It is non-trivial to spill in this situation,
1525 // because the restore code needs to be specially placed after the
1526 // jump. BranchRelaxation then needs to be made aware of the newly inserted
1527 // block.
1528 //
1529 // If a spill is needed for the pc register pair, we need to insert a spill
1530 // restore block right before the destination block, and insert a short branch
1531 // into the old destination block's fallthrough predecessor.
1532 // e.g.:
1533 //
1534 // s_cbranch_scc0 skip_long_branch:
1535 //
1536 // long_branch_bb:
1537 // spill s[8:9]
1538 // s_getpc_b64 s[8:9]
1539 // s_add_u32 s8, s8, restore_bb
1540 // s_addc_u32 s9, s9, 0
1541 // s_setpc_b64 s[8:9]
1542 //
1543 // skip_long_branch:
1544 // foo;
1545 //
1546 // .....
1547 //
1548 // dest_bb_fallthrough_predecessor:
1549 // bar;
1550 // s_branch dest_bb
1551 //
1552 // restore_bb:
1553 // restore s[8:9]
1554 // fallthrough dest_bb
1555 ///
1556 // dest_bb:
1557 // buzz;
1558
1559 RS->enterBasicBlockEnd(MBB);
Matt Arsenaultb0b741e2018-10-30 01:33:14 +00001560 unsigned Scav = RS->scavengeRegisterBackwards(
1561 AMDGPU::SReg_64RegClass,
1562 MachineBasicBlock::iterator(GetPC), false, 0);
Matt Arsenault6bc43d82016-10-06 16:20:41 +00001563 MRI.replaceRegWith(PCReg, Scav);
1564 MRI.clearVirtRegs();
1565 RS->setRegUsed(Scav);
1566
1567 return 4 + 8 + 4 + 4;
1568}
1569
Matt Arsenault6d093802016-05-21 00:29:27 +00001570unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
1571 switch (Cond) {
1572 case SIInstrInfo::SCC_TRUE:
1573 return AMDGPU::S_CBRANCH_SCC1;
1574 case SIInstrInfo::SCC_FALSE:
1575 return AMDGPU::S_CBRANCH_SCC0;
Matt Arsenault49459052016-05-21 00:29:40 +00001576 case SIInstrInfo::VCCNZ:
1577 return AMDGPU::S_CBRANCH_VCCNZ;
1578 case SIInstrInfo::VCCZ:
1579 return AMDGPU::S_CBRANCH_VCCZ;
1580 case SIInstrInfo::EXECNZ:
1581 return AMDGPU::S_CBRANCH_EXECNZ;
1582 case SIInstrInfo::EXECZ:
1583 return AMDGPU::S_CBRANCH_EXECZ;
Matt Arsenault6d093802016-05-21 00:29:27 +00001584 default:
1585 llvm_unreachable("invalid branch predicate");
1586 }
1587}
1588
1589SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
1590 switch (Opcode) {
1591 case AMDGPU::S_CBRANCH_SCC0:
1592 return SCC_FALSE;
1593 case AMDGPU::S_CBRANCH_SCC1:
1594 return SCC_TRUE;
Matt Arsenault49459052016-05-21 00:29:40 +00001595 case AMDGPU::S_CBRANCH_VCCNZ:
1596 return VCCNZ;
1597 case AMDGPU::S_CBRANCH_VCCZ:
1598 return VCCZ;
1599 case AMDGPU::S_CBRANCH_EXECNZ:
1600 return EXECNZ;
1601 case AMDGPU::S_CBRANCH_EXECZ:
1602 return EXECZ;
Matt Arsenault6d093802016-05-21 00:29:27 +00001603 default:
1604 return INVALID_BR;
1605 }
1606}
1607
Matt Arsenault6bc43d82016-10-06 16:20:41 +00001608bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB,
1609 MachineBasicBlock::iterator I,
1610 MachineBasicBlock *&TBB,
1611 MachineBasicBlock *&FBB,
1612 SmallVectorImpl<MachineOperand> &Cond,
1613 bool AllowModify) const {
Matt Arsenault6d093802016-05-21 00:29:27 +00001614 if (I->getOpcode() == AMDGPU::S_BRANCH) {
1615 // Unconditional Branch
1616 TBB = I->getOperand(0).getMBB();
1617 return false;
1618 }
1619
Jan Sjodina06bfe02017-05-15 20:18:37 +00001620 MachineBasicBlock *CondBB = nullptr;
Matt Arsenault6d093802016-05-21 00:29:27 +00001621
Jan Sjodina06bfe02017-05-15 20:18:37 +00001622 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
1623 CondBB = I->getOperand(1).getMBB();
1624 Cond.push_back(I->getOperand(0));
1625 } else {
1626 BranchPredicate Pred = getBranchPredicate(I->getOpcode());
1627 if (Pred == INVALID_BR)
1628 return true;
Matt Arsenault6d093802016-05-21 00:29:27 +00001629
Jan Sjodina06bfe02017-05-15 20:18:37 +00001630 CondBB = I->getOperand(0).getMBB();
1631 Cond.push_back(MachineOperand::CreateImm(Pred));
1632 Cond.push_back(I->getOperand(1)); // Save the branch register.
1633 }
Matt Arsenault6d093802016-05-21 00:29:27 +00001634 ++I;
1635
1636 if (I == MBB.end()) {
1637 // Conditional branch followed by fall-through.
1638 TBB = CondBB;
1639 return false;
1640 }
1641
1642 if (I->getOpcode() == AMDGPU::S_BRANCH) {
1643 TBB = CondBB;
1644 FBB = I->getOperand(0).getMBB();
1645 return false;
1646 }
1647
1648 return true;
1649}
1650
Matt Arsenault6bc43d82016-10-06 16:20:41 +00001651bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
1652 MachineBasicBlock *&FBB,
1653 SmallVectorImpl<MachineOperand> &Cond,
1654 bool AllowModify) const {
1655 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
Matt Arsenaulteabb8dd2018-11-16 05:03:02 +00001656 auto E = MBB.end();
1657 if (I == E)
1658 return false;
1659
1660 // Skip over the instructions that are artificially terminators for special
1661 // exec management.
1662 while (I != E && !I->isBranch() && !I->isReturn() &&
1663 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) {
1664 switch (I->getOpcode()) {
1665 case AMDGPU::SI_MASK_BRANCH:
1666 case AMDGPU::S_MOV_B64_term:
1667 case AMDGPU::S_XOR_B64_term:
1668 case AMDGPU::S_ANDN2_B64_term:
1669 break;
1670 case AMDGPU::SI_IF:
1671 case AMDGPU::SI_ELSE:
1672 case AMDGPU::SI_KILL_I1_TERMINATOR:
1673 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
1674 // FIXME: It's messy that these need to be considered here at all.
1675 return true;
1676 default:
1677 llvm_unreachable("unexpected non-branch terminator inst");
1678 }
1679
1680 ++I;
1681 }
1682
1683 if (I == E)
Matt Arsenault6bc43d82016-10-06 16:20:41 +00001684 return false;
1685
1686 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH)
1687 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
1688
1689 ++I;
1690
1691 // TODO: Should be able to treat as fallthrough?
1692 if (I == MBB.end())
1693 return true;
1694
1695 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify))
1696 return true;
1697
1698 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB();
1699
1700 // Specifically handle the case where the conditional branch is to the same
1701 // destination as the mask branch. e.g.
1702 //
1703 // si_mask_branch BB8
1704 // s_cbranch_execz BB8
1705 // s_cbranch BB9
1706 //
1707 // This is required to understand divergent loops which may need the branches
1708 // to be relaxed.
1709 if (TBB != MaskBrDest || Cond.empty())
1710 return true;
1711
1712 auto Pred = Cond[0].getImm();
1713 return (Pred != EXECZ && Pred != EXECNZ);
1714}
1715
Matt Arsenault1b9fc8e2016-09-14 20:43:16 +00001716unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB,
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001717 int *BytesRemoved) const {
Matt Arsenault6d093802016-05-21 00:29:27 +00001718 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
1719
1720 unsigned Count = 0;
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001721 unsigned RemovedSize = 0;
Matt Arsenault6d093802016-05-21 00:29:27 +00001722 while (I != MBB.end()) {
1723 MachineBasicBlock::iterator Next = std::next(I);
Matt Arsenault6bc43d82016-10-06 16:20:41 +00001724 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) {
1725 I = Next;
1726 continue;
1727 }
1728
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001729 RemovedSize += getInstSizeInBytes(*I);
Matt Arsenault6d093802016-05-21 00:29:27 +00001730 I->eraseFromParent();
1731 ++Count;
1732 I = Next;
1733 }
1734
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001735 if (BytesRemoved)
1736 *BytesRemoved = RemovedSize;
1737
Matt Arsenault6d093802016-05-21 00:29:27 +00001738 return Count;
1739}
1740
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +00001741// Copy the flags onto the implicit condition register operand.
1742static void preserveCondRegFlags(MachineOperand &CondReg,
1743 const MachineOperand &OrigCond) {
1744 CondReg.setIsUndef(OrigCond.isUndef());
1745 CondReg.setIsKill(OrigCond.isKill());
1746}
1747
Matt Arsenaulte8e0f5c2016-09-14 17:24:15 +00001748unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
Matt Arsenault6d093802016-05-21 00:29:27 +00001749 MachineBasicBlock *TBB,
1750 MachineBasicBlock *FBB,
1751 ArrayRef<MachineOperand> Cond,
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001752 const DebugLoc &DL,
1753 int *BytesAdded) const {
Matt Arsenault6d093802016-05-21 00:29:27 +00001754 if (!FBB && Cond.empty()) {
1755 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1756 .addMBB(TBB);
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001757 if (BytesAdded)
1758 *BytesAdded = 4;
Matt Arsenault6d093802016-05-21 00:29:27 +00001759 return 1;
1760 }
1761
Jan Sjodina06bfe02017-05-15 20:18:37 +00001762 if(Cond.size() == 1 && Cond[0].isReg()) {
1763 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO))
1764 .add(Cond[0])
1765 .addMBB(TBB);
1766 return 1;
1767 }
1768
Matt Arsenault6d093802016-05-21 00:29:27 +00001769 assert(TBB && Cond[0].isImm());
1770
1771 unsigned Opcode
1772 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
1773
1774 if (!FBB) {
Matt Arsenault52f14ec2016-11-07 19:09:27 +00001775 Cond[1].isUndef();
1776 MachineInstr *CondBr =
1777 BuildMI(&MBB, DL, get(Opcode))
Matt Arsenault6d093802016-05-21 00:29:27 +00001778 .addMBB(TBB);
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001779
Matt Arsenault52f14ec2016-11-07 19:09:27 +00001780 // Copy the flags onto the implicit condition register operand.
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +00001781 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]);
Matt Arsenault52f14ec2016-11-07 19:09:27 +00001782
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001783 if (BytesAdded)
1784 *BytesAdded = 4;
Matt Arsenault6d093802016-05-21 00:29:27 +00001785 return 1;
1786 }
1787
1788 assert(TBB && FBB);
1789
Matt Arsenault52f14ec2016-11-07 19:09:27 +00001790 MachineInstr *CondBr =
1791 BuildMI(&MBB, DL, get(Opcode))
Matt Arsenault6d093802016-05-21 00:29:27 +00001792 .addMBB(TBB);
1793 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1794 .addMBB(FBB);
1795
Matt Arsenault52f14ec2016-11-07 19:09:27 +00001796 MachineOperand &CondReg = CondBr->getOperand(1);
1797 CondReg.setIsUndef(Cond[1].isUndef());
1798 CondReg.setIsKill(Cond[1].isKill());
1799
Matt Arsenaulta2b036e2016-09-14 17:23:48 +00001800 if (BytesAdded)
1801 *BytesAdded = 8;
1802
Matt Arsenault6d093802016-05-21 00:29:27 +00001803 return 2;
1804}
1805
Matt Arsenault1b9fc8e2016-09-14 20:43:16 +00001806bool SIInstrInfo::reverseBranchCondition(
Matt Arsenault72fcd5f2016-05-21 00:29:34 +00001807 SmallVectorImpl<MachineOperand> &Cond) const {
Jan Sjodina06bfe02017-05-15 20:18:37 +00001808 if (Cond.size() != 2) {
1809 return true;
1810 }
1811
1812 if (Cond[0].isImm()) {
1813 Cond[0].setImm(-Cond[0].getImm());
1814 return false;
1815 }
1816
1817 return true;
Matt Arsenault72fcd5f2016-05-21 00:29:34 +00001818}
1819
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +00001820bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
1821 ArrayRef<MachineOperand> Cond,
1822 unsigned TrueReg, unsigned FalseReg,
1823 int &CondCycles,
1824 int &TrueCycles, int &FalseCycles) const {
1825 switch (Cond[0].getImm()) {
1826 case VCCNZ:
1827 case VCCZ: {
1828 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1829 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
1830 assert(MRI.getRegClass(FalseReg) == RC);
1831
1832 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
1833 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
1834
1835 // Limit to equal cost for branch vs. N v_cndmask_b32s.
1836 return !RI.isSGPRClass(RC) && NumInsts <= 6;
1837 }
1838 case SCC_TRUE:
1839 case SCC_FALSE: {
1840 // FIXME: We could insert for VGPRs if we could replace the original compare
1841 // with a vector one.
1842 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1843 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
1844 assert(MRI.getRegClass(FalseReg) == RC);
1845
1846 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
1847
1848 // Multiples of 8 can do s_cselect_b64
1849 if (NumInsts % 2 == 0)
1850 NumInsts /= 2;
1851
1852 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
1853 return RI.isSGPRClass(RC);
1854 }
1855 default:
1856 return false;
1857 }
1858}
1859
1860void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
1861 MachineBasicBlock::iterator I, const DebugLoc &DL,
1862 unsigned DstReg, ArrayRef<MachineOperand> Cond,
1863 unsigned TrueReg, unsigned FalseReg) const {
1864 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm());
1865 if (Pred == VCCZ || Pred == SCC_FALSE) {
1866 Pred = static_cast<BranchPredicate>(-Pred);
1867 std::swap(TrueReg, FalseReg);
1868 }
1869
1870 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1871 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00001872 unsigned DstSize = RI.getRegSizeInBits(*DstRC);
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +00001873
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00001874 if (DstSize == 32) {
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +00001875 unsigned SelOp = Pred == SCC_TRUE ?
1876 AMDGPU::S_CSELECT_B32 : AMDGPU::V_CNDMASK_B32_e32;
1877
1878 // Instruction's operands are backwards from what is expected.
1879 MachineInstr *Select =
1880 BuildMI(MBB, I, DL, get(SelOp), DstReg)
1881 .addReg(FalseReg)
1882 .addReg(TrueReg);
1883
1884 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
1885 return;
1886 }
1887
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00001888 if (DstSize == 64 && Pred == SCC_TRUE) {
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +00001889 MachineInstr *Select =
1890 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg)
1891 .addReg(FalseReg)
1892 .addReg(TrueReg);
1893
1894 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
1895 return;
1896 }
1897
1898 static const int16_t Sub0_15[] = {
1899 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1900 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1901 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
1902 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
1903 };
1904
1905 static const int16_t Sub0_15_64[] = {
1906 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1907 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
1908 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
1909 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15,
1910 };
1911
1912 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
1913 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass;
1914 const int16_t *SubIndices = Sub0_15;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00001915 int NElts = DstSize / 32;
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +00001916
1917 // 64-bit select is only avaialble for SALU.
1918 if (Pred == SCC_TRUE) {
1919 SelOp = AMDGPU::S_CSELECT_B64;
1920 EltRC = &AMDGPU::SGPR_64RegClass;
1921 SubIndices = Sub0_15_64;
1922
1923 assert(NElts % 2 == 0);
1924 NElts /= 2;
1925 }
1926
1927 MachineInstrBuilder MIB = BuildMI(
1928 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg);
1929
1930 I = MIB->getIterator();
1931
1932 SmallVector<unsigned, 8> Regs;
1933 for (int Idx = 0; Idx != NElts; ++Idx) {
1934 unsigned DstElt = MRI.createVirtualRegister(EltRC);
1935 Regs.push_back(DstElt);
1936
1937 unsigned SubIdx = SubIndices[Idx];
1938
1939 MachineInstr *Select =
1940 BuildMI(MBB, I, DL, get(SelOp), DstElt)
1941 .addReg(FalseReg, 0, SubIdx)
1942 .addReg(TrueReg, 0, SubIdx);
1943 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
1944
1945 MIB.addReg(DstElt)
1946 .addImm(SubIdx);
1947 }
1948}
1949
Sam Kolton27e0f8b2017-03-31 11:42:43 +00001950bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const {
1951 switch (MI.getOpcode()) {
1952 case AMDGPU::V_MOV_B32_e32:
1953 case AMDGPU::V_MOV_B32_e64:
1954 case AMDGPU::V_MOV_B64_PSEUDO: {
1955 // If there are additional implicit register operands, this may be used for
1956 // register indexing so the source register operand isn't simply copied.
1957 unsigned NumOps = MI.getDesc().getNumOperands() +
1958 MI.getDesc().getNumImplicitUses();
1959
1960 return MI.getNumOperands() == NumOps;
1961 }
1962 case AMDGPU::S_MOV_B32:
1963 case AMDGPU::S_MOV_B64:
1964 case AMDGPU::COPY:
1965 return true;
1966 default:
1967 return false;
1968 }
1969}
1970
Jan Sjodin312ccf72017-09-14 20:53:51 +00001971unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind(
Marcello Maggioni5ca41282018-08-20 19:23:45 +00001972 unsigned Kind) const {
Jan Sjodin312ccf72017-09-14 20:53:51 +00001973 switch(Kind) {
1974 case PseudoSourceValue::Stack:
1975 case PseudoSourceValue::FixedStack:
Matt Arsenault0da63502018-08-31 05:49:54 +00001976 return AMDGPUAS::PRIVATE_ADDRESS;
Jan Sjodin312ccf72017-09-14 20:53:51 +00001977 case PseudoSourceValue::ConstantPool:
1978 case PseudoSourceValue::GOT:
1979 case PseudoSourceValue::JumpTable:
1980 case PseudoSourceValue::GlobalValueCallEntry:
1981 case PseudoSourceValue::ExternalSymbolCallEntry:
1982 case PseudoSourceValue::TargetCustom:
Matt Arsenault0da63502018-08-31 05:49:54 +00001983 return AMDGPUAS::CONSTANT_ADDRESS;
Jan Sjodin312ccf72017-09-14 20:53:51 +00001984 }
Matt Arsenault0da63502018-08-31 05:49:54 +00001985 return AMDGPUAS::FLAT_ADDRESS;
Jan Sjodin312ccf72017-09-14 20:53:51 +00001986}
1987
Matt Arsenault0325d3d2015-02-21 21:29:07 +00001988static void removeModOperands(MachineInstr &MI) {
1989 unsigned Opc = MI.getOpcode();
1990 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1991 AMDGPU::OpName::src0_modifiers);
1992 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1993 AMDGPU::OpName::src1_modifiers);
1994 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1995 AMDGPU::OpName::src2_modifiers);
1996
1997 MI.RemoveOperand(Src2ModIdx);
1998 MI.RemoveOperand(Src1ModIdx);
1999 MI.RemoveOperand(Src0ModIdx);
2000}
2001
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002002bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002003 unsigned Reg, MachineRegisterInfo *MRI) const {
2004 if (!MRI->hasOneNonDBGUse(Reg))
2005 return false;
2006
Nicolai Haehnle39980da2017-11-28 08:41:50 +00002007 switch (DefMI.getOpcode()) {
2008 default:
2009 return false;
2010 case AMDGPU::S_MOV_B64:
2011 // TODO: We could fold 64-bit immediates, but this get compilicated
2012 // when there are sub-registers.
2013 return false;
2014
2015 case AMDGPU::V_MOV_B32_e32:
2016 case AMDGPU::S_MOV_B32:
2017 break;
2018 }
2019
2020 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
2021 assert(ImmOp);
2022 // FIXME: We could handle FrameIndex values here.
2023 if (!ImmOp->isImm())
2024 return false;
2025
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002026 unsigned Opc = UseMI.getOpcode();
Tom Stellard2add8a12016-09-06 20:00:26 +00002027 if (Opc == AMDGPU::COPY) {
2028 bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg());
Tom Stellard2add8a12016-09-06 20:00:26 +00002029 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
Tom Stellard2add8a12016-09-06 20:00:26 +00002030 UseMI.setDesc(get(NewOpc));
2031 UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm());
2032 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
2033 return true;
2034 }
2035
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002036 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 ||
2037 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64) {
Matt Arsenault2ed21932017-02-27 20:21:31 +00002038 // Don't fold if we are using source or output modifiers. The new VOP2
2039 // instructions don't have them.
2040 if (hasAnyModifiersSet(UseMI))
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002041 return false;
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002042
Matt Arsenault3d1c1de2016-04-14 21:58:24 +00002043 // If this is a free constant, there's no reason to do this.
2044 // TODO: We could fold this here instead of letting SIFoldOperands do it
2045 // later.
Matt Arsenault4bd72362016-12-10 00:39:12 +00002046 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
2047
2048 // Any src operand can be used for the legality check.
Nicolai Haehnle39980da2017-11-28 08:41:50 +00002049 if (isInlineConstant(UseMI, *Src0, *ImmOp))
Matt Arsenault3d1c1de2016-04-14 21:58:24 +00002050 return false;
2051
Matt Arsenault2ed21932017-02-27 20:21:31 +00002052 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002053 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
2054 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002055
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002056 // Multiplied part is the constant: Use v_madmk_{f16, f32}.
Matt Arsenaultf0783302015-02-21 21:29:10 +00002057 // We should only expect these to be on src0 due to canonicalizations.
2058 if (Src0->isReg() && Src0->getReg() == Reg) {
Matt Arsenaulta266bd82016-03-02 04:05:14 +00002059 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
Matt Arsenaultf0783302015-02-21 21:29:10 +00002060 return false;
2061
Matt Arsenaulta266bd82016-03-02 04:05:14 +00002062 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
Matt Arsenaultf0783302015-02-21 21:29:10 +00002063 return false;
2064
Nikolay Haustov65607812016-03-11 09:27:25 +00002065 // We need to swap operands 0 and 1 since madmk constant is at operand 1.
Matt Arsenaultf0783302015-02-21 21:29:10 +00002066
Nicolai Haehnle39980da2017-11-28 08:41:50 +00002067 const int64_t Imm = ImmOp->getImm();
Matt Arsenaultf0783302015-02-21 21:29:10 +00002068
2069 // FIXME: This would be a lot easier if we could return a new instruction
2070 // instead of having to modify in place.
2071
2072 // Remove these first since they are at the end.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002073 UseMI.RemoveOperand(
2074 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
2075 UseMI.RemoveOperand(
2076 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
Matt Arsenaultf0783302015-02-21 21:29:10 +00002077
2078 unsigned Src1Reg = Src1->getReg();
2079 unsigned Src1SubReg = Src1->getSubReg();
Matt Arsenaultf0783302015-02-21 21:29:10 +00002080 Src0->setReg(Src1Reg);
2081 Src0->setSubReg(Src1SubReg);
Matt Arsenault5e100162015-04-24 01:57:58 +00002082 Src0->setIsKill(Src1->isKill());
2083
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002084 if (Opc == AMDGPU::V_MAC_F32_e64 ||
2085 Opc == AMDGPU::V_MAC_F16_e64)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002086 UseMI.untieRegOperand(
2087 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
Tom Stellarddb5a11f2015-07-13 15:47:57 +00002088
Nikolay Haustov65607812016-03-11 09:27:25 +00002089 Src1->ChangeToImmediate(Imm);
Matt Arsenaultf0783302015-02-21 21:29:10 +00002090
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002091 removeModOperands(UseMI);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002092 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16));
Matt Arsenaultf0783302015-02-21 21:29:10 +00002093
2094 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
2095 if (DeleteDef)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002096 DefMI.eraseFromParent();
Matt Arsenaultf0783302015-02-21 21:29:10 +00002097
2098 return true;
2099 }
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002100
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002101 // Added part is the constant: Use v_madak_{f16, f32}.
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002102 if (Src2->isReg() && Src2->getReg() == Reg) {
2103 // Not allowed to use constant bus for another operand.
2104 // We can however allow an inline immediate as src0.
Alexander Timofeev20cbe6f2018-09-10 16:42:49 +00002105 bool Src0Inlined = false;
2106 if (Src0->isReg()) {
2107 // Try to inline constant if possible.
2108 // If the Def moves immediate and the use is single
2109 // We are saving VGPR here.
2110 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg());
2111 if (Def && Def->isMoveImmediate() &&
2112 isInlineConstant(Def->getOperand(1)) &&
2113 MRI->hasOneUse(Src0->getReg())) {
2114 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
2115 Src0Inlined = true;
2116 } else if ((RI.isPhysicalRegister(Src0->getReg()) &&
2117 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg()))) ||
2118 (RI.isVirtualRegister(Src0->getReg()) &&
2119 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))
2120 return false;
2121 // VGPR is okay as Src0 - fallthrough
2122 }
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002123
Alexander Timofeev20cbe6f2018-09-10 16:42:49 +00002124 if (Src1->isReg() && !Src0Inlined ) {
2125 // We have one slot for inlinable constant so far - try to fill it
2126 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg());
2127 if (Def && Def->isMoveImmediate() &&
2128 isInlineConstant(Def->getOperand(1)) &&
2129 MRI->hasOneUse(Src1->getReg()) &&
2130 commuteInstruction(UseMI)) {
2131 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
2132 } else if ((RI.isPhysicalRegister(Src1->getReg()) &&
2133 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) ||
2134 (RI.isVirtualRegister(Src1->getReg()) &&
2135 RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
2136 return false;
2137 // VGPR is okay as Src1 - fallthrough
2138 }
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002139
Nicolai Haehnle39980da2017-11-28 08:41:50 +00002140 const int64_t Imm = ImmOp->getImm();
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002141
2142 // FIXME: This would be a lot easier if we could return a new instruction
2143 // instead of having to modify in place.
2144
2145 // Remove these first since they are at the end.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002146 UseMI.RemoveOperand(
2147 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
2148 UseMI.RemoveOperand(
2149 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002150
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002151 if (Opc == AMDGPU::V_MAC_F32_e64 ||
2152 Opc == AMDGPU::V_MAC_F16_e64)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002153 UseMI.untieRegOperand(
2154 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
Tom Stellarddb5a11f2015-07-13 15:47:57 +00002155
2156 // ChangingToImmediate adds Src2 back to the instruction.
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002157 Src2->ChangeToImmediate(Imm);
2158
2159 // These come before src2.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002160 removeModOperands(UseMI);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002161 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16));
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002162
2163 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
2164 if (DeleteDef)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002165 DefMI.eraseFromParent();
Matt Arsenault0325d3d2015-02-21 21:29:07 +00002166
2167 return true;
2168 }
2169 }
2170
2171 return false;
2172}
2173
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002174static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
2175 int WidthB, int OffsetB) {
2176 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
2177 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
2178 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2179 return LowOffset + LowWidth <= HighOffset;
2180}
2181
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002182bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
2183 MachineInstr &MIb) const {
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +00002184 MachineOperand *BaseOp0, *BaseOp1;
Chad Rosierc27a18f2016-03-09 16:00:35 +00002185 int64_t Offset0, Offset1;
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002186
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +00002187 if (getMemOperandWithOffset(MIa, BaseOp0, Offset0, &RI) &&
2188 getMemOperandWithOffset(MIb, BaseOp1, Offset1, &RI)) {
2189 if (!BaseOp0->isIdenticalTo(*BaseOp1))
2190 return false;
Tom Stellardcb6ba622016-04-30 00:23:06 +00002191
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002192 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
Tom Stellardcb6ba622016-04-30 00:23:06 +00002193 // FIXME: Handle ds_read2 / ds_write2.
2194 return false;
2195 }
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002196 unsigned Width0 = (*MIa.memoperands_begin())->getSize();
2197 unsigned Width1 = (*MIb.memoperands_begin())->getSize();
Francis Visoiu Mistrihd7eebd62018-11-28 12:00:20 +00002198 if (offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) {
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002199 return true;
2200 }
2201 }
2202
2203 return false;
2204}
2205
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002206bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa,
2207 MachineInstr &MIb,
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002208 AliasAnalysis *AA) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002209 assert((MIa.mayLoad() || MIa.mayStore()) &&
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002210 "MIa must load from or modify a memory location");
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002211 assert((MIb.mayLoad() || MIb.mayStore()) &&
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002212 "MIb must load from or modify a memory location");
2213
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002214 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002215 return false;
2216
2217 // XXX - Can we relax this between address spaces?
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002218 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002219 return false;
2220
2221 // TODO: Should we check the address space from the MachineMemOperand? That
2222 // would allow us to distinguish objects we know don't alias based on the
Benjamin Kramerdf005cb2015-08-08 18:27:36 +00002223 // underlying address space, even if it was lowered to a different one,
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002224 // e.g. private accesses lowered to use MUBUF instructions on a scratch
2225 // buffer.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002226 if (isDS(MIa)) {
2227 if (isDS(MIb))
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002228 return checkInstOffsetsDoNotOverlap(MIa, MIb);
2229
Matt Arsenault9608a2892017-07-29 01:26:21 +00002230 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb);
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002231 }
2232
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002233 if (isMUBUF(MIa) || isMTBUF(MIa)) {
2234 if (isMUBUF(MIb) || isMTBUF(MIb))
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002235 return checkInstOffsetsDoNotOverlap(MIa, MIb);
2236
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002237 return !isFLAT(MIb) && !isSMRD(MIb);
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002238 }
2239
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002240 if (isSMRD(MIa)) {
2241 if (isSMRD(MIb))
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002242 return checkInstOffsetsDoNotOverlap(MIa, MIb);
2243
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002244 return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa);
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002245 }
2246
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002247 if (isFLAT(MIa)) {
2248 if (isFLAT(MIb))
Matt Arsenaultc09cc3c2014-11-19 00:01:31 +00002249 return checkInstOffsetsDoNotOverlap(MIa, MIb);
2250
2251 return false;
2252 }
2253
2254 return false;
2255}
2256
Stanislav Mekhanoshin710da422017-09-11 17:13:57 +00002257static int64_t getFoldableImm(const MachineOperand* MO) {
2258 if (!MO->isReg())
2259 return false;
2260 const MachineFunction *MF = MO->getParent()->getParent()->getParent();
2261 const MachineRegisterInfo &MRI = MF->getRegInfo();
2262 auto Def = MRI.getUniqueVRegDef(MO->getReg());
Matt Arsenaultc3172872017-09-14 20:54:29 +00002263 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 &&
2264 Def->getOperand(1).isImm())
Stanislav Mekhanoshin710da422017-09-11 17:13:57 +00002265 return Def->getOperand(1).getImm();
2266 return AMDGPU::NoRegister;
2267}
2268
Tom Stellarddb5a11f2015-07-13 15:47:57 +00002269MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002270 MachineInstr &MI,
2271 LiveVariables *LV) const {
Matt Arsenault0084adc2018-04-30 19:08:16 +00002272 unsigned Opc = MI.getOpcode();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002273 bool IsF16 = false;
Matt Arsenault0084adc2018-04-30 19:08:16 +00002274 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64;
Tom Stellarddb5a11f2015-07-13 15:47:57 +00002275
Matt Arsenault0084adc2018-04-30 19:08:16 +00002276 switch (Opc) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002277 default:
2278 return nullptr;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002279 case AMDGPU::V_MAC_F16_e64:
2280 IsF16 = true;
Simon Pilgrim0f5b3502017-07-07 10:18:57 +00002281 LLVM_FALLTHROUGH;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002282 case AMDGPU::V_MAC_F32_e64:
Matt Arsenault0084adc2018-04-30 19:08:16 +00002283 case AMDGPU::V_FMAC_F32_e64:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002284 break;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002285 case AMDGPU::V_MAC_F16_e32:
2286 IsF16 = true;
Simon Pilgrim0f5b3502017-07-07 10:18:57 +00002287 LLVM_FALLTHROUGH;
Matt Arsenault0084adc2018-04-30 19:08:16 +00002288 case AMDGPU::V_MAC_F32_e32:
2289 case AMDGPU::V_FMAC_F32_e32: {
Matt Arsenault4bd72362016-12-10 00:39:12 +00002290 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
2291 AMDGPU::OpName::src0);
2292 const MachineOperand *Src0 = &MI.getOperand(Src0Idx);
Matt Arsenaultfdcdd882017-09-21 00:45:59 +00002293 if (!Src0->isReg() && !Src0->isImm())
2294 return nullptr;
2295
Matt Arsenault4bd72362016-12-10 00:39:12 +00002296 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0))
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002297 return nullptr;
Matt Arsenaultfdcdd882017-09-21 00:45:59 +00002298
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002299 break;
2300 }
Tom Stellarddb5a11f2015-07-13 15:47:57 +00002301 }
2302
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002303 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
2304 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
Matt Arsenault3cb9ff82017-03-11 05:40:40 +00002305 const MachineOperand *Src0Mods =
2306 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002307 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
Matt Arsenault3cb9ff82017-03-11 05:40:40 +00002308 const MachineOperand *Src1Mods =
2309 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002310 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
Matt Arsenault3cb9ff82017-03-11 05:40:40 +00002311 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
2312 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod);
Tom Stellarddb5a11f2015-07-13 15:47:57 +00002313
Matt Arsenault0084adc2018-04-30 19:08:16 +00002314 if (!IsFMA && !Src0Mods && !Src1Mods && !Clamp && !Omod &&
Matt Arsenaultc3172872017-09-14 20:54:29 +00002315 // If we have an SGPR input, we will violate the constant bus restriction.
Matt Arsenaultfdcdd882017-09-21 00:45:59 +00002316 (!Src0->isReg() || !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) {
Stanislav Mekhanoshin710da422017-09-11 17:13:57 +00002317 if (auto Imm = getFoldableImm(Src2)) {
2318 return BuildMI(*MBB, MI, MI.getDebugLoc(),
2319 get(IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32))
2320 .add(*Dst)
2321 .add(*Src0)
2322 .add(*Src1)
2323 .addImm(Imm);
2324 }
2325 if (auto Imm = getFoldableImm(Src1)) {
2326 return BuildMI(*MBB, MI, MI.getDebugLoc(),
2327 get(IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32))
2328 .add(*Dst)
2329 .add(*Src0)
2330 .addImm(Imm)
2331 .add(*Src2);
2332 }
2333 if (auto Imm = getFoldableImm(Src0)) {
2334 if (isOperandLegal(MI, AMDGPU::getNamedOperandIdx(AMDGPU::V_MADMK_F32,
2335 AMDGPU::OpName::src0), Src1))
2336 return BuildMI(*MBB, MI, MI.getDebugLoc(),
2337 get(IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32))
2338 .add(*Dst)
2339 .add(*Src1)
2340 .addImm(Imm)
2341 .add(*Src2);
2342 }
2343 }
2344
Matt Arsenault0084adc2018-04-30 19:08:16 +00002345 assert((!IsFMA || !IsF16) && "fmac only expected with f32");
2346 unsigned NewOpc = IsFMA ? AMDGPU::V_FMA_F32 :
2347 (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32);
2348 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
Diana Picus116bbab2017-01-13 09:58:52 +00002349 .add(*Dst)
Matt Arsenault3cb9ff82017-03-11 05:40:40 +00002350 .addImm(Src0Mods ? Src0Mods->getImm() : 0)
Diana Picus116bbab2017-01-13 09:58:52 +00002351 .add(*Src0)
Matt Arsenault3cb9ff82017-03-11 05:40:40 +00002352 .addImm(Src1Mods ? Src1Mods->getImm() : 0)
Diana Picus116bbab2017-01-13 09:58:52 +00002353 .add(*Src1)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002354 .addImm(0) // Src mods
Diana Picus116bbab2017-01-13 09:58:52 +00002355 .add(*Src2)
Matt Arsenault3cb9ff82017-03-11 05:40:40 +00002356 .addImm(Clamp ? Clamp->getImm() : 0)
2357 .addImm(Omod ? Omod->getImm() : 0);
Tom Stellarddb5a11f2015-07-13 15:47:57 +00002358}
2359
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002360// It's not generally safe to move VALU instructions across these since it will
2361// start using the register as a base index rather than directly.
2362// XXX - Why isn't hasSideEffects sufficient for these?
2363static bool changesVGPRIndexingMode(const MachineInstr &MI) {
2364 switch (MI.getOpcode()) {
2365 case AMDGPU::S_SET_GPR_IDX_ON:
2366 case AMDGPU::S_SET_GPR_IDX_MODE:
2367 case AMDGPU::S_SET_GPR_IDX_OFF:
2368 return true;
2369 default:
2370 return false;
2371 }
2372}
2373
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002374bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +00002375 const MachineBasicBlock *MBB,
2376 const MachineFunction &MF) const {
Matt Arsenault95c78972016-07-09 01:13:51 +00002377 // XXX - Do we want the SP check in the base implementation?
2378
Nicolai Haehnle213e87f2016-03-21 20:28:33 +00002379 // Target-independent instructions do not have an implicit-use of EXEC, even
2380 // when they operate on VGPRs. Treating EXEC modifications as scheduling
2381 // boundaries prevents incorrect movements of such instructions.
Matt Arsenault95c78972016-07-09 01:13:51 +00002382 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) ||
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002383 MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
Tom Stellard8485fa02016-12-07 02:42:15 +00002384 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
2385 MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002386 changesVGPRIndexingMode(MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +00002387}
2388
Marek Olsakc5cec5e2019-01-16 15:43:53 +00002389bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const {
2390 return Opcode == AMDGPU::DS_ORDERED_COUNT ||
2391 Opcode == AMDGPU::DS_GWS_INIT ||
2392 Opcode == AMDGPU::DS_GWS_SEMA_V ||
2393 Opcode == AMDGPU::DS_GWS_SEMA_BR ||
2394 Opcode == AMDGPU::DS_GWS_SEMA_P ||
2395 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL ||
2396 Opcode == AMDGPU::DS_GWS_BARRIER;
2397}
2398
Nicolai Haehnle7f0d05d2018-07-30 09:23:59 +00002399bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const {
2400 unsigned Opcode = MI.getOpcode();
2401
2402 if (MI.mayStore() && isSMRD(MI))
2403 return true; // scalar store or atomic
2404
2405 // These instructions cause shader I/O that may cause hardware lockups
2406 // when executed with an empty EXEC mask.
2407 //
2408 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when
2409 // EXEC = 0, but checking for that case here seems not worth it
2410 // given the typical code patterns.
2411 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT ||
Marek Olsakc5cec5e2019-01-16 15:43:53 +00002412 Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE ||
2413 Opcode == AMDGPU::DS_ORDERED_COUNT)
Nicolai Haehnle7f0d05d2018-07-30 09:23:59 +00002414 return true;
2415
2416 if (MI.isInlineAsm())
2417 return true; // conservative assumption
2418
2419 // These are like SALU instructions in terms of effects, so it's questionable
2420 // whether we should return true for those.
2421 //
2422 // However, executing them with EXEC = 0 causes them to operate on undefined
2423 // data, which we avoid by returning true here.
2424 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || Opcode == AMDGPU::V_READLANE_B32)
2425 return true;
2426
2427 return false;
2428}
2429
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00002430bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
Matt Arsenault26faed32016-12-05 22:26:17 +00002431 switch (Imm.getBitWidth()) {
2432 case 32:
2433 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(),
2434 ST.hasInv2PiInlineImm());
2435 case 64:
2436 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(),
2437 ST.hasInv2PiInlineImm());
Matt Arsenault4bd72362016-12-10 00:39:12 +00002438 case 16:
Matt Arsenault9dba9bd2017-02-02 02:27:04 +00002439 return ST.has16BitInsts() &&
2440 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00002441 ST.hasInv2PiInlineImm());
Matt Arsenault26faed32016-12-05 22:26:17 +00002442 default:
2443 llvm_unreachable("invalid bitwidth");
Matt Arsenault303011a2014-12-17 21:04:08 +00002444 }
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00002445}
2446
Matt Arsenault11a4d672015-02-13 19:05:03 +00002447bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
Matt Arsenault4bd72362016-12-10 00:39:12 +00002448 uint8_t OperandType) const {
Sam Kolton549c89d2017-06-21 08:53:38 +00002449 if (!MO.isImm() ||
2450 OperandType < AMDGPU::OPERAND_SRC_FIRST ||
2451 OperandType > AMDGPU::OPERAND_SRC_LAST)
Matt Arsenault4bd72362016-12-10 00:39:12 +00002452 return false;
2453
2454 // MachineOperand provides no way to tell the true operand size, since it only
2455 // records a 64-bit value. We need to know the size to determine if a 32-bit
2456 // floating point immediate bit pattern is legal for an integer immediate. It
2457 // would be for any 32-bit integer operand, but would not be for a 64-bit one.
2458
2459 int64_t Imm = MO.getImm();
Matt Arsenaulteb522e62017-02-27 22:15:25 +00002460 switch (OperandType) {
2461 case AMDGPU::OPERAND_REG_IMM_INT32:
2462 case AMDGPU::OPERAND_REG_IMM_FP32:
2463 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
2464 case AMDGPU::OPERAND_REG_INLINE_C_FP32: {
Matt Arsenault4bd72362016-12-10 00:39:12 +00002465 int32_t Trunc = static_cast<int32_t>(Imm);
Nicolai Haehnle283b9952018-08-29 07:46:09 +00002466 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm());
Matt Arsenault11a4d672015-02-13 19:05:03 +00002467 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +00002468 case AMDGPU::OPERAND_REG_IMM_INT64:
2469 case AMDGPU::OPERAND_REG_IMM_FP64:
2470 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenko59e12822017-08-08 00:47:13 +00002471 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault4bd72362016-12-10 00:39:12 +00002472 return AMDGPU::isInlinableLiteral64(MO.getImm(),
2473 ST.hasInv2PiInlineImm());
Matt Arsenaulteb522e62017-02-27 22:15:25 +00002474 case AMDGPU::OPERAND_REG_IMM_INT16:
2475 case AMDGPU::OPERAND_REG_IMM_FP16:
2476 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
2477 case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
Matt Arsenault4bd72362016-12-10 00:39:12 +00002478 if (isInt<16>(Imm) || isUInt<16>(Imm)) {
Matt Arsenault9dba9bd2017-02-02 02:27:04 +00002479 // A few special case instructions have 16-bit operands on subtargets
2480 // where 16-bit instructions are not legal.
2481 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle
2482 // constants in these cases
Matt Arsenault4bd72362016-12-10 00:39:12 +00002483 int16_t Trunc = static_cast<int16_t>(Imm);
Matt Arsenault9dba9bd2017-02-02 02:27:04 +00002484 return ST.has16BitInsts() &&
2485 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm());
Matt Arsenault4bd72362016-12-10 00:39:12 +00002486 }
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00002487
Matt Arsenault4bd72362016-12-10 00:39:12 +00002488 return false;
2489 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +00002490 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
2491 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Stanislav Mekhanoshin160f8572018-04-19 21:16:50 +00002492 if (isUInt<16>(Imm)) {
2493 int16_t Trunc = static_cast<int16_t>(Imm);
2494 return ST.has16BitInsts() &&
2495 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm());
2496 }
2497 if (!(Imm & 0xffff)) {
2498 return ST.has16BitInsts() &&
2499 AMDGPU::isInlinableLiteral16(Imm >> 16, ST.hasInv2PiInlineImm());
2500 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +00002501 uint32_t Trunc = static_cast<uint32_t>(Imm);
2502 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm());
2503 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00002504 default:
2505 llvm_unreachable("invalid bitwidth");
2506 }
Tom Stellard93fabce2013-10-10 17:11:55 +00002507}
2508
Matt Arsenaultc1ebd822016-08-13 01:43:54 +00002509bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO,
Matt Arsenault4bd72362016-12-10 00:39:12 +00002510 const MCOperandInfo &OpInfo) const {
Matt Arsenaultc1ebd822016-08-13 01:43:54 +00002511 switch (MO.getType()) {
2512 case MachineOperand::MO_Register:
2513 return false;
2514 case MachineOperand::MO_Immediate:
Matt Arsenault4bd72362016-12-10 00:39:12 +00002515 return !isInlineConstant(MO, OpInfo);
Matt Arsenaultc1ebd822016-08-13 01:43:54 +00002516 case MachineOperand::MO_FrameIndex:
2517 case MachineOperand::MO_MachineBasicBlock:
2518 case MachineOperand::MO_ExternalSymbol:
2519 case MachineOperand::MO_GlobalAddress:
2520 case MachineOperand::MO_MCSymbol:
2521 return true;
2522 default:
2523 llvm_unreachable("unexpected operand type");
2524 }
2525}
2526
Matt Arsenaultbecb1402014-06-23 18:28:31 +00002527static bool compareMachineOp(const MachineOperand &Op0,
2528 const MachineOperand &Op1) {
2529 if (Op0.getType() != Op1.getType())
2530 return false;
2531
2532 switch (Op0.getType()) {
2533 case MachineOperand::MO_Register:
2534 return Op0.getReg() == Op1.getReg();
2535 case MachineOperand::MO_Immediate:
2536 return Op0.getImm() == Op1.getImm();
Matt Arsenaultbecb1402014-06-23 18:28:31 +00002537 default:
2538 llvm_unreachable("Didn't expect to be comparing these operand types");
2539 }
2540}
2541
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002542bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
2543 const MachineOperand &MO) const {
2544 const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo];
Tom Stellardb02094e2014-07-21 15:45:01 +00002545
Tom Stellardfb77f002015-01-13 22:59:41 +00002546 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
Tom Stellardb02094e2014-07-21 15:45:01 +00002547
2548 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
2549 return true;
2550
2551 if (OpInfo.RegClass < 0)
2552 return false;
2553
Matt Arsenault4bd72362016-12-10 00:39:12 +00002554 if (MO.isImm() && isInlineConstant(MO, OpInfo))
2555 return RI.opCanUseInlineConstant(OpInfo.OperandType);
Tom Stellard73ae1cb2014-09-23 21:26:25 +00002556
Matt Arsenault4bd72362016-12-10 00:39:12 +00002557 return RI.opCanUseLiteralConstant(OpInfo.OperandType);
Tom Stellardb02094e2014-07-21 15:45:01 +00002558}
2559
Tom Stellard86d12eb2014-08-01 00:32:28 +00002560bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
Marek Olsaka93603d2015-01-15 18:42:51 +00002561 int Op32 = AMDGPU::getVOPe32(Opcode);
2562 if (Op32 == -1)
2563 return false;
2564
2565 return pseudoToMCOpcode(Op32) != -1;
Tom Stellard86d12eb2014-08-01 00:32:28 +00002566}
2567
Tom Stellardb4a313a2014-08-01 00:32:39 +00002568bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
2569 // The src0_modifier operand is present on all instructions
2570 // that have modifiers.
2571
2572 return AMDGPU::getNamedOperandIdx(Opcode,
2573 AMDGPU::OpName::src0_modifiers) != -1;
2574}
2575
Matt Arsenaultace5b762014-10-17 18:00:43 +00002576bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
2577 unsigned OpName) const {
2578 const MachineOperand *Mods = getNamedOperand(MI, OpName);
2579 return Mods && Mods->getImm();
2580}
2581
Matt Arsenault2ed21932017-02-27 20:21:31 +00002582bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const {
2583 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
2584 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
2585 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) ||
2586 hasModifiersSet(MI, AMDGPU::OpName::clamp) ||
2587 hasModifiersSet(MI, AMDGPU::OpName::omod);
2588}
2589
Matt Arsenault35b19022018-08-28 18:22:34 +00002590bool SIInstrInfo::canShrink(const MachineInstr &MI,
2591 const MachineRegisterInfo &MRI) const {
2592 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
2593 // Can't shrink instruction with three operands.
2594 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add
2595 // a special case for it. It can only be shrunk if the third operand
Tim Renouf2e94f6e2019-03-18 19:25:39 +00002596 // is vcc, and src0_modifiers and src1_modifiers are not set.
2597 // We should handle this the same way we handle vopc, by addding
Matt Arsenault35b19022018-08-28 18:22:34 +00002598 // a register allocation hint pre-regalloc and then do the shrinking
2599 // post-regalloc.
2600 if (Src2) {
2601 switch (MI.getOpcode()) {
2602 default: return false;
2603
2604 case AMDGPU::V_ADDC_U32_e64:
2605 case AMDGPU::V_SUBB_U32_e64:
2606 case AMDGPU::V_SUBBREV_U32_e64: {
2607 const MachineOperand *Src1
2608 = getNamedOperand(MI, AMDGPU::OpName::src1);
2609 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()))
2610 return false;
2611 // Additional verification is needed for sdst/src2.
2612 return true;
2613 }
2614 case AMDGPU::V_MAC_F32_e64:
2615 case AMDGPU::V_MAC_F16_e64:
2616 case AMDGPU::V_FMAC_F32_e64:
2617 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) ||
2618 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
2619 return false;
2620 break;
2621
2622 case AMDGPU::V_CNDMASK_B32_e64:
2623 break;
2624 }
2625 }
2626
2627 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
2628 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) ||
2629 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers)))
2630 return false;
2631
2632 // We don't need to check src0, all input types are legal, so just make sure
2633 // src0 isn't using any modifiers.
2634 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
2635 return false;
2636
Ron Lieberman16de4fd2018-12-03 13:04:54 +00002637 // Can it be shrunk to a valid 32 bit opcode?
2638 if (!hasVALU32BitEncoding(MI.getOpcode()))
2639 return false;
2640
Matt Arsenault35b19022018-08-28 18:22:34 +00002641 // Check output modifiers
2642 return !hasModifiersSet(MI, AMDGPU::OpName::omod) &&
2643 !hasModifiersSet(MI, AMDGPU::OpName::clamp);
Matt Arsenaultde6c4212018-08-28 18:34:24 +00002644}
Matt Arsenault35b19022018-08-28 18:22:34 +00002645
Matt Arsenaultde6c4212018-08-28 18:34:24 +00002646// Set VCC operand with all flags from \p Orig, except for setting it as
2647// implicit.
2648static void copyFlagsToImplicitVCC(MachineInstr &MI,
2649 const MachineOperand &Orig) {
2650
2651 for (MachineOperand &Use : MI.implicit_operands()) {
2652 if (Use.isUse() && Use.getReg() == AMDGPU::VCC) {
2653 Use.setIsUndef(Orig.isUndef());
2654 Use.setIsKill(Orig.isKill());
2655 return;
2656 }
2657 }
2658}
2659
2660MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI,
2661 unsigned Op32) const {
2662 MachineBasicBlock *MBB = MI.getParent();;
2663 MachineInstrBuilder Inst32 =
2664 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32));
2665
2666 // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
2667 // For VOPC instructions, this is replaced by an implicit def of vcc.
2668 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst);
2669 if (Op32DstIdx != -1) {
2670 // dst
2671 Inst32.add(MI.getOperand(0));
2672 } else {
2673 assert(MI.getOperand(0).getReg() == AMDGPU::VCC &&
2674 "Unexpected case");
2675 }
2676
2677 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0));
2678
2679 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
2680 if (Src1)
2681 Inst32.add(*Src1);
2682
2683 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
2684
2685 if (Src2) {
2686 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2);
2687 if (Op32Src2Idx != -1) {
2688 Inst32.add(*Src2);
2689 } else {
2690 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
2691 // replaced with an implicit read of vcc. This was already added
2692 // during the initial BuildMI, so find it to preserve the flags.
2693 copyFlagsToImplicitVCC(*Inst32, *Src2);
2694 }
2695 }
2696
2697 return Inst32;
Matt Arsenault35b19022018-08-28 18:22:34 +00002698}
2699
Tom Stellard73ae1cb2014-09-23 21:26:25 +00002700bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
Matt Arsenault11a4d672015-02-13 19:05:03 +00002701 const MachineOperand &MO,
Matt Arsenault4bd72362016-12-10 00:39:12 +00002702 const MCOperandInfo &OpInfo) const {
Tom Stellard73ae1cb2014-09-23 21:26:25 +00002703 // Literal constants use the constant bus.
Matt Arsenault4bd72362016-12-10 00:39:12 +00002704 //if (isLiteralConstantLike(MO, OpInfo))
2705 // return true;
2706 if (MO.isImm())
2707 return !isInlineConstant(MO, OpInfo);
Tom Stellard73ae1cb2014-09-23 21:26:25 +00002708
Matt Arsenault4bd72362016-12-10 00:39:12 +00002709 if (!MO.isReg())
2710 return true; // Misc other operands like FrameIndex
2711
2712 if (!MO.isUse())
Tom Stellard73ae1cb2014-09-23 21:26:25 +00002713 return false;
2714
2715 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
2716 return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
2717
2718 // FLAT_SCR is just an SGPR pair.
2719 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR))
2720 return true;
2721
2722 // EXEC register uses the constant bus.
2723 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
2724 return true;
2725
2726 // SGPRs use the constant bus
Matt Arsenault8226fc42016-03-02 23:00:21 +00002727 return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 ||
2728 (!MO.isImplicit() &&
2729 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
2730 AMDGPU::SGPR_64RegClass.contains(MO.getReg()))));
Tom Stellard73ae1cb2014-09-23 21:26:25 +00002731}
2732
Matt Arsenaulte223ceb2015-10-21 21:15:01 +00002733static unsigned findImplicitSGPRRead(const MachineInstr &MI) {
2734 for (const MachineOperand &MO : MI.implicit_operands()) {
2735 // We only care about reads.
2736 if (MO.isDef())
2737 continue;
2738
2739 switch (MO.getReg()) {
2740 case AMDGPU::VCC:
2741 case AMDGPU::M0:
2742 case AMDGPU::FLAT_SCR:
2743 return MO.getReg();
2744
2745 default:
2746 break;
2747 }
2748 }
2749
2750 return AMDGPU::NoRegister;
2751}
2752
Matt Arsenault529cf252016-06-23 01:26:16 +00002753static bool shouldReadExec(const MachineInstr &MI) {
2754 if (SIInstrInfo::isVALU(MI)) {
2755 switch (MI.getOpcode()) {
2756 case AMDGPU::V_READLANE_B32:
2757 case AMDGPU::V_READLANE_B32_si:
2758 case AMDGPU::V_READLANE_B32_vi:
2759 case AMDGPU::V_WRITELANE_B32:
2760 case AMDGPU::V_WRITELANE_B32_si:
2761 case AMDGPU::V_WRITELANE_B32_vi:
2762 return false;
2763 }
2764
2765 return true;
2766 }
2767
2768 if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
2769 SIInstrInfo::isSALU(MI) ||
2770 SIInstrInfo::isSMRD(MI))
2771 return false;
2772
2773 return true;
2774}
2775
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002776static bool isSubRegOf(const SIRegisterInfo &TRI,
2777 const MachineOperand &SuperVec,
2778 const MachineOperand &SubReg) {
2779 if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg()))
2780 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
2781
2782 return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
2783 SubReg.getReg() == SuperVec.getReg();
2784}
2785
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002786bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
Tom Stellard93fabce2013-10-10 17:11:55 +00002787 StringRef &ErrInfo) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002788 uint16_t Opcode = MI.getOpcode();
Tom Stellarddde28a82017-05-26 16:40:03 +00002789 if (SIInstrInfo::isGenericOpcode(MI.getOpcode()))
2790 return true;
2791
Matt Arsenault89ad17c2017-06-12 16:37:55 +00002792 const MachineFunction *MF = MI.getParent()->getParent();
2793 const MachineRegisterInfo &MRI = MF->getRegInfo();
2794
Tom Stellard93fabce2013-10-10 17:11:55 +00002795 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2796 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2797 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2798
Tom Stellardca700e42014-03-17 17:03:49 +00002799 // Make sure the number of operands is correct.
2800 const MCInstrDesc &Desc = get(Opcode);
2801 if (!Desc.isVariadic() &&
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002802 Desc.getNumOperands() != MI.getNumExplicitOperands()) {
2803 ErrInfo = "Instruction has wrong number of operands.";
2804 return false;
Tom Stellardca700e42014-03-17 17:03:49 +00002805 }
2806
Matt Arsenault3d463192016-11-01 22:55:07 +00002807 if (MI.isInlineAsm()) {
2808 // Verify register classes for inlineasm constraints.
2809 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
2810 I != E; ++I) {
2811 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
2812 if (!RC)
2813 continue;
2814
2815 const MachineOperand &Op = MI.getOperand(I);
2816 if (!Op.isReg())
2817 continue;
2818
2819 unsigned Reg = Op.getReg();
2820 if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) {
2821 ErrInfo = "inlineasm operand has incorrect register class.";
2822 return false;
2823 }
2824 }
2825
2826 return true;
2827 }
2828
Changpeng Fangc9963932015-12-18 20:04:28 +00002829 // Make sure the register classes are correct.
Tom Stellardb4a313a2014-08-01 00:32:39 +00002830 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002831 if (MI.getOperand(i).isFPImm()) {
Tom Stellardfb77f002015-01-13 22:59:41 +00002832 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
2833 "all fp values to integers.";
2834 return false;
2835 }
2836
Marek Olsak8eeebcc2015-02-18 22:12:41 +00002837 int RegClass = Desc.OpInfo[i].RegClass;
2838
Tom Stellardca700e42014-03-17 17:03:49 +00002839 switch (Desc.OpInfo[i].OperandType) {
Tom Stellard1106b1c2015-01-20 17:49:41 +00002840 case MCOI::OPERAND_REGISTER:
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002841 if (MI.getOperand(i).isImm()) {
Tom Stellard1106b1c2015-01-20 17:49:41 +00002842 ErrInfo = "Illegal immediate value for operand.";
2843 return false;
2844 }
2845 break;
Matt Arsenault4bd72362016-12-10 00:39:12 +00002846 case AMDGPU::OPERAND_REG_IMM_INT32:
2847 case AMDGPU::OPERAND_REG_IMM_FP32:
Tom Stellard1106b1c2015-01-20 17:49:41 +00002848 break;
Matt Arsenault4bd72362016-12-10 00:39:12 +00002849 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
2850 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
2851 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
2852 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
2853 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
2854 case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
2855 const MachineOperand &MO = MI.getOperand(i);
2856 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
Marek Olsak8eeebcc2015-02-18 22:12:41 +00002857 ErrInfo = "Illegal immediate value for operand.";
2858 return false;
Tom Stellarda305f932014-07-02 20:53:44 +00002859 }
Tom Stellardca700e42014-03-17 17:03:49 +00002860 break;
Matt Arsenault4bd72362016-12-10 00:39:12 +00002861 }
Tom Stellardca700e42014-03-17 17:03:49 +00002862 case MCOI::OPERAND_IMMEDIATE:
Matt Arsenaultffc82752016-07-05 17:09:01 +00002863 case AMDGPU::OPERAND_KIMM32:
Tom Stellardb02094e2014-07-21 15:45:01 +00002864 // Check if this operand is an immediate.
2865 // FrameIndex operands will be replaced by immediates, so they are
2866 // allowed.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002867 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
Tom Stellardca700e42014-03-17 17:03:49 +00002868 ErrInfo = "Expected immediate, but got non-immediate";
2869 return false;
2870 }
Justin Bognerb03fd122016-08-17 05:10:15 +00002871 LLVM_FALLTHROUGH;
Tom Stellardca700e42014-03-17 17:03:49 +00002872 default:
2873 continue;
2874 }
2875
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002876 if (!MI.getOperand(i).isReg())
Tom Stellardca700e42014-03-17 17:03:49 +00002877 continue;
2878
Tom Stellardca700e42014-03-17 17:03:49 +00002879 if (RegClass != -1) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00002880 unsigned Reg = MI.getOperand(i).getReg();
Matt Arsenault1322b6f2016-07-09 01:13:56 +00002881 if (Reg == AMDGPU::NoRegister ||
2882 TargetRegisterInfo::isVirtualRegister(Reg))
Tom Stellardca700e42014-03-17 17:03:49 +00002883 continue;
2884
2885 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
2886 if (!RC->contains(Reg)) {
2887 ErrInfo = "Operand has incorrect register class.";
2888 return false;
2889 }
2890 }
2891 }
2892
Sam Kolton549c89d2017-06-21 08:53:38 +00002893 // Verify SDWA
2894 if (isSDWA(MI)) {
Sam Kolton549c89d2017-06-21 08:53:38 +00002895 if (!ST.hasSDWA()) {
2896 ErrInfo = "SDWA is not supported on this target";
2897 return false;
2898 }
2899
2900 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
Sam Kolton549c89d2017-06-21 08:53:38 +00002901
2902 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx };
2903
2904 for (int OpIdx: OpIndicies) {
2905 if (OpIdx == -1)
2906 continue;
2907 const MachineOperand &MO = MI.getOperand(OpIdx);
2908
Sam Kolton3c4933f2017-06-22 06:26:41 +00002909 if (!ST.hasSDWAScalar()) {
Sam Kolton549c89d2017-06-21 08:53:38 +00002910 // Only VGPRS on VI
2911 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) {
2912 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI";
2913 return false;
2914 }
2915 } else {
2916 // No immediates on GFX9
2917 if (!MO.isReg()) {
2918 ErrInfo = "Only reg allowed as operands in SDWA instructions on GFX9";
2919 return false;
2920 }
2921 }
2922 }
2923
Sam Kolton3c4933f2017-06-22 06:26:41 +00002924 if (!ST.hasSDWAOmod()) {
Sam Kolton549c89d2017-06-21 08:53:38 +00002925 // No omod allowed on VI
2926 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
2927 if (OMod != nullptr &&
2928 (!OMod->isImm() || OMod->getImm() != 0)) {
2929 ErrInfo = "OMod not allowed in SDWA instructions on VI";
2930 return false;
2931 }
2932 }
2933
2934 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode);
2935 if (isVOPC(BasicOpcode)) {
Sam Kolton3c4933f2017-06-22 06:26:41 +00002936 if (!ST.hasSDWASdst() && DstIdx != -1) {
Sam Kolton549c89d2017-06-21 08:53:38 +00002937 // Only vcc allowed as dst on VI for VOPC
2938 const MachineOperand &Dst = MI.getOperand(DstIdx);
2939 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) {
2940 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI";
2941 return false;
2942 }
Sam Koltona179d252017-06-27 15:02:23 +00002943 } else if (!ST.hasSDWAOutModsVOPC()) {
Sam Kolton549c89d2017-06-21 08:53:38 +00002944 // No clamp allowed on GFX9 for VOPC
2945 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
Sam Koltona179d252017-06-27 15:02:23 +00002946 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) {
Sam Kolton549c89d2017-06-21 08:53:38 +00002947 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI";
2948 return false;
2949 }
Sam Koltona179d252017-06-27 15:02:23 +00002950
2951 // No omod allowed on GFX9 for VOPC
2952 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
2953 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) {
2954 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI";
2955 return false;
2956 }
Sam Kolton549c89d2017-06-21 08:53:38 +00002957 }
2958 }
Sam Kolton5f7f32c2017-12-04 16:22:32 +00002959
2960 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused);
2961 if (DstUnused && DstUnused->isImm() &&
2962 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) {
2963 const MachineOperand &Dst = MI.getOperand(DstIdx);
2964 if (!Dst.isReg() || !Dst.isTied()) {
2965 ErrInfo = "Dst register should have tied register";
2966 return false;
2967 }
2968
2969 const MachineOperand &TiedMO =
2970 MI.getOperand(MI.findTiedOperandIdx(DstIdx));
2971 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) {
2972 ErrInfo =
2973 "Dst register should be tied to implicit use of preserved register";
2974 return false;
2975 } else if (TargetRegisterInfo::isPhysicalRegister(TiedMO.getReg()) &&
2976 Dst.getReg() != TiedMO.getReg()) {
2977 ErrInfo = "Dst register should use same physical register as preserved";
2978 return false;
2979 }
2980 }
Sam Kolton549c89d2017-06-21 08:53:38 +00002981 }
2982
David Stuttardf77079f2019-01-14 11:55:24 +00002983 // Verify MIMG
2984 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) {
2985 // Ensure that the return type used is large enough for all the options
2986 // being used TFE/LWE require an extra result register.
2987 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask);
2988 if (DMask) {
2989 uint64_t DMaskImm = DMask->getImm();
2990 uint32_t RegCount =
2991 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm);
2992 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe);
2993 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe);
2994 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16);
2995
2996 // Adjust for packed 16 bit values
2997 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem())
2998 RegCount >>= 1;
2999
3000 // Adjust if using LWE or TFE
3001 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm()))
3002 RegCount += 1;
3003
3004 const uint32_t DstIdx =
3005 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
3006 const MachineOperand &Dst = MI.getOperand(DstIdx);
3007 if (Dst.isReg()) {
3008 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx);
3009 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32;
3010 if (RegCount > DstSize) {
3011 ErrInfo = "MIMG instruction returns too many registers for dst "
3012 "register class";
3013 return false;
3014 }
3015 }
3016 }
3017 }
3018
Tim Renouf2a99fa22018-02-28 19:10:32 +00003019 // Verify VOP*. Ignore multiple sgpr operands on writelane.
3020 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32
3021 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) {
Matt Arsenaulte368cb32014-12-11 23:37:32 +00003022 // Only look at the true operands. Only a real operand can use the constant
3023 // bus, and we don't want to check pseudo-operands like the source modifier
3024 // flags.
3025 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
3026
Tom Stellard93fabce2013-10-10 17:11:55 +00003027 unsigned ConstantBusCount = 0;
Stanislav Mekhanoshina4bfb3c2018-04-24 18:17:55 +00003028 unsigned LiteralCount = 0;
Matt Arsenaultffc82752016-07-05 17:09:01 +00003029
3030 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1)
3031 ++ConstantBusCount;
3032
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003033 unsigned SGPRUsed = findImplicitSGPRRead(MI);
Matt Arsenaulte223ceb2015-10-21 21:15:01 +00003034 if (SGPRUsed != AMDGPU::NoRegister)
3035 ++ConstantBusCount;
3036
Matt Arsenaulte368cb32014-12-11 23:37:32 +00003037 for (int OpIdx : OpIndices) {
3038 if (OpIdx == -1)
3039 break;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003040 const MachineOperand &MO = MI.getOperand(OpIdx);
Matt Arsenault4bd72362016-12-10 00:39:12 +00003041 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
Tom Stellard73ae1cb2014-09-23 21:26:25 +00003042 if (MO.isReg()) {
3043 if (MO.getReg() != SGPRUsed)
Tom Stellard93fabce2013-10-10 17:11:55 +00003044 ++ConstantBusCount;
Tom Stellard73ae1cb2014-09-23 21:26:25 +00003045 SGPRUsed = MO.getReg();
3046 } else {
3047 ++ConstantBusCount;
Stanislav Mekhanoshina4bfb3c2018-04-24 18:17:55 +00003048 ++LiteralCount;
Tom Stellard93fabce2013-10-10 17:11:55 +00003049 }
3050 }
Tom Stellard93fabce2013-10-10 17:11:55 +00003051 }
3052 if (ConstantBusCount > 1) {
3053 ErrInfo = "VOP* instruction uses the constant bus more than once";
3054 return false;
3055 }
Stanislav Mekhanoshina4bfb3c2018-04-24 18:17:55 +00003056
3057 if (isVOP3(MI) && LiteralCount) {
3058 ErrInfo = "VOP3 instruction uses literal";
3059 return false;
3060 }
Tom Stellard93fabce2013-10-10 17:11:55 +00003061 }
3062
Matt Arsenaultbecb1402014-06-23 18:28:31 +00003063 // Verify misc. restrictions on specific instructions.
3064 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
3065 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003066 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
3067 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
3068 const MachineOperand &Src2 = MI.getOperand(Src2Idx);
Matt Arsenaultbecb1402014-06-23 18:28:31 +00003069 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
3070 if (!compareMachineOp(Src0, Src1) &&
3071 !compareMachineOp(Src0, Src2)) {
3072 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
3073 return false;
3074 }
3075 }
3076 }
3077
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +00003078 if (isSOPK(MI)) {
3079 int64_t Imm = getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm();
3080 if (sopkIsZext(MI)) {
3081 if (!isUInt<16>(Imm)) {
3082 ErrInfo = "invalid immediate for SOPK instruction";
3083 return false;
3084 }
3085 } else {
3086 if (!isInt<16>(Imm)) {
3087 ErrInfo = "invalid immediate for SOPK instruction";
3088 return false;
3089 }
3090 }
3091 }
3092
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003093 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
3094 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
3095 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
3096 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
3097 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
3098 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
3099
3100 const unsigned StaticNumOps = Desc.getNumOperands() +
3101 Desc.getNumImplicitUses();
3102 const unsigned NumImplicitOps = IsDst ? 2 : 1;
3103
Nicolai Haehnle368972c2016-11-02 17:03:11 +00003104 // Allow additional implicit operands. This allows a fixup done by the post
3105 // RA scheduler where the main implicit operand is killed and implicit-defs
3106 // are added for sub-registers that remain live after this instruction.
3107 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003108 ErrInfo = "missing implicit register operands";
3109 return false;
3110 }
3111
3112 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
3113 if (IsDst) {
3114 if (!Dst->isUse()) {
3115 ErrInfo = "v_movreld_b32 vdst should be a use operand";
3116 return false;
3117 }
3118
3119 unsigned UseOpIdx;
3120 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
3121 UseOpIdx != StaticNumOps + 1) {
3122 ErrInfo = "movrel implicit operands should be tied";
3123 return false;
3124 }
3125 }
3126
3127 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
3128 const MachineOperand &ImpUse
3129 = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
3130 if (!ImpUse.isReg() || !ImpUse.isUse() ||
3131 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
3132 ErrInfo = "src0 should be subreg of implicit vector use";
3133 return false;
3134 }
3135 }
3136
Matt Arsenaultd092a062015-10-02 18:58:37 +00003137 // Make sure we aren't losing exec uses in the td files. This mostly requires
3138 // being careful when using let Uses to try to add other use registers.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003139 if (shouldReadExec(MI)) {
3140 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
Matt Arsenaultd092a062015-10-02 18:58:37 +00003141 ErrInfo = "VALU instruction does not implicitly read exec mask";
3142 return false;
3143 }
3144 }
3145
Matt Arsenault7b647552016-10-28 21:55:15 +00003146 if (isSMRD(MI)) {
3147 if (MI.mayStore()) {
3148 // The register offset form of scalar stores may only use m0 as the
3149 // soffset register.
3150 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff);
3151 if (Soff && Soff->getReg() != AMDGPU::M0) {
3152 ErrInfo = "scalar stores must use m0 as offset register";
3153 return false;
3154 }
3155 }
3156 }
3157
Tom Stellard5bfbae52018-07-11 20:59:01 +00003158 if (isFLAT(MI) && !MF->getSubtarget<GCNSubtarget>().hasFlatInstOffsets()) {
Matt Arsenault89ad17c2017-06-12 16:37:55 +00003159 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
3160 if (Offset->getImm() != 0) {
3161 ErrInfo = "subtarget does not support offsets in flat instructions";
3162 return false;
3163 }
3164 }
3165
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00003166 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl);
3167 if (DppCt) {
3168 using namespace AMDGPU::DPP;
3169
3170 unsigned DC = DppCt->getImm();
3171 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 ||
3172 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST ||
3173 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) ||
3174 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) ||
3175 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) ||
3176 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST)) {
3177 ErrInfo = "Invalid dpp_ctrl value";
3178 return false;
3179 }
3180 }
3181
Tom Stellard93fabce2013-10-10 17:11:55 +00003182 return true;
3183}
3184
Matt Arsenault84445dd2017-11-30 22:51:26 +00003185unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const {
Tom Stellard82166022013-11-13 23:36:37 +00003186 switch (MI.getOpcode()) {
3187 default: return AMDGPU::INSTRUCTION_LIST_END;
3188 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
3189 case AMDGPU::COPY: return AMDGPU::COPY;
3190 case AMDGPU::PHI: return AMDGPU::PHI;
Tom Stellard204e61b2014-04-07 19:45:45 +00003191 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
Connor Abbott8c217d02017-08-04 18:36:49 +00003192 case AMDGPU::WQM: return AMDGPU::WQM;
Connor Abbott92638ab2017-08-04 18:36:52 +00003193 case AMDGPU::WWM: return AMDGPU::WWM;
Tom Stellarde0387202014-03-21 15:51:54 +00003194 case AMDGPU::S_MOV_B32:
3195 return MI.getOperand(1).isReg() ?
Tom Stellard8c12fd92014-03-24 16:12:34 +00003196 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
Tom Stellard80942a12014-09-05 14:07:59 +00003197 case AMDGPU::S_ADD_I32:
Matt Arsenault84445dd2017-11-30 22:51:26 +00003198 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_I32_e32;
3199 case AMDGPU::S_ADDC_U32:
3200 return AMDGPU::V_ADDC_U32_e32;
Tom Stellard80942a12014-09-05 14:07:59 +00003201 case AMDGPU::S_SUB_I32:
Matt Arsenault84445dd2017-11-30 22:51:26 +00003202 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32;
3203 // FIXME: These are not consistently handled, and selected when the carry is
3204 // used.
3205 case AMDGPU::S_ADD_U32:
3206 return AMDGPU::V_ADD_I32_e32;
3207 case AMDGPU::S_SUB_U32:
3208 return AMDGPU::V_SUB_I32_e32;
Matt Arsenault43b8e4e2013-11-18 20:09:29 +00003209 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
Matt Arsenault869cd072014-09-03 23:24:35 +00003210 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32;
Matt Arsenault124384f2016-09-09 23:32:53 +00003211 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
3212 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
3213 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
Graham Sellers04f7a4d2018-11-29 16:05:38 +00003214 case AMDGPU::S_XNOR_B32:
3215 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END;
Matt Arsenault124384f2016-09-09 23:32:53 +00003216 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
3217 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
3218 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
3219 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
Tom Stellard82166022013-11-13 23:36:37 +00003220 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
3221 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
3222 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
3223 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
3224 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
3225 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
Matt Arsenault27cc9582014-04-18 01:53:18 +00003226 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
3227 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
Matt Arsenault78b86702014-04-18 05:19:26 +00003228 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
3229 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
Marek Olsak63a7b082015-03-24 13:40:21 +00003230 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
Matt Arsenault43160e72014-06-18 17:13:57 +00003231 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
Matt Arsenault2c335622014-04-09 07:16:16 +00003232 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
Matt Arsenault689f3252014-06-09 16:36:31 +00003233 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
Matt Arsenault0cb92e12014-04-11 19:25:18 +00003234 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
3235 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
3236 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
3237 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
3238 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
3239 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
Tom Stellardbc4497b2016-02-12 23:45:29 +00003240 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32;
3241 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32;
3242 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32;
3243 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32;
3244 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32;
3245 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32;
Matt Arsenault7b1dc2c2016-09-17 02:02:19 +00003246 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32;
3247 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32;
Marek Olsakc5368502015-01-15 18:43:01 +00003248 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
Matt Arsenault295b86e2014-06-17 17:36:27 +00003249 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
Matt Arsenault85796012014-06-17 17:36:24 +00003250 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
Marek Olsakd2af89d2015-03-04 17:33:45 +00003251 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
Tom Stellardbc4497b2016-02-12 23:45:29 +00003252 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
3253 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
Tom Stellard82166022013-11-13 23:36:37 +00003254 }
3255}
3256
Tom Stellard82166022013-11-13 23:36:37 +00003257const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
3258 unsigned OpNo) const {
3259 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
3260 const MCInstrDesc &Desc = get(MI.getOpcode());
3261 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
Matt Arsenault102a7042014-12-11 23:37:34 +00003262 Desc.OpInfo[OpNo].RegClass == -1) {
3263 unsigned Reg = MI.getOperand(OpNo).getReg();
3264
3265 if (TargetRegisterInfo::isVirtualRegister(Reg))
3266 return MRI.getRegClass(Reg);
Matt Arsenault11a4d672015-02-13 19:05:03 +00003267 return RI.getPhysRegClass(Reg);
Matt Arsenault102a7042014-12-11 23:37:34 +00003268 }
Tom Stellard82166022013-11-13 23:36:37 +00003269
3270 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
3271 return RI.getRegClass(RCID);
3272}
3273
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003274void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
Tom Stellard82166022013-11-13 23:36:37 +00003275 MachineBasicBlock::iterator I = MI;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003276 MachineBasicBlock *MBB = MI.getParent();
3277 MachineOperand &MO = MI.getOperand(OpIdx);
Matt Arsenault3f3a2752014-10-13 15:47:59 +00003278 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003279 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
Tom Stellard82166022013-11-13 23:36:37 +00003280 const TargetRegisterClass *RC = RI.getRegClass(RCID);
3281 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
Matt Arsenault3f3a2752014-10-13 15:47:59 +00003282 if (MO.isReg())
Tom Stellard82166022013-11-13 23:36:37 +00003283 Opcode = AMDGPU::COPY;
Matt Arsenault3f3a2752014-10-13 15:47:59 +00003284 else if (RI.isSGPRClass(RC))
Matt Arsenault671a0052013-11-14 10:08:50 +00003285 Opcode = AMDGPU::S_MOV_B32;
Matt Arsenault3f3a2752014-10-13 15:47:59 +00003286
Matt Arsenault3a4d86a2013-11-18 20:09:55 +00003287 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
Matt Arsenault3f3a2752014-10-13 15:47:59 +00003288 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
Tom Stellard0c93c9e2014-09-05 14:08:01 +00003289 VRC = &AMDGPU::VReg_64RegClass;
Matt Arsenault3f3a2752014-10-13 15:47:59 +00003290 else
Tom Stellard45c0b3a2015-01-07 20:59:25 +00003291 VRC = &AMDGPU::VGPR_32RegClass;
Matt Arsenault3f3a2752014-10-13 15:47:59 +00003292
Matt Arsenault3a4d86a2013-11-18 20:09:55 +00003293 unsigned Reg = MRI.createVirtualRegister(VRC);
Matt Arsenault3f3a2752014-10-13 15:47:59 +00003294 DebugLoc DL = MBB->findDebugLoc(I);
Diana Picus116bbab2017-01-13 09:58:52 +00003295 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO);
Tom Stellard82166022013-11-13 23:36:37 +00003296 MO.ChangeToRegister(Reg, false);
3297}
3298
Tom Stellard15834092014-03-21 15:51:57 +00003299unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
3300 MachineRegisterInfo &MRI,
3301 MachineOperand &SuperReg,
3302 const TargetRegisterClass *SuperRC,
3303 unsigned SubIdx,
3304 const TargetRegisterClass *SubRC)
3305 const {
Matt Arsenaultc8e2ce42015-09-24 07:16:37 +00003306 MachineBasicBlock *MBB = MI->getParent();
3307 DebugLoc DL = MI->getDebugLoc();
Tom Stellard15834092014-03-21 15:51:57 +00003308 unsigned SubReg = MRI.createVirtualRegister(SubRC);
3309
Matt Arsenaultc8e2ce42015-09-24 07:16:37 +00003310 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) {
3311 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
3312 .addReg(SuperReg.getReg(), 0, SubIdx);
3313 return SubReg;
3314 }
3315
Tom Stellard15834092014-03-21 15:51:57 +00003316 // Just in case the super register is itself a sub-register, copy it to a new
Matt Arsenault08d84942014-06-03 23:06:13 +00003317 // value so we don't need to worry about merging its subreg index with the
3318 // SubIdx passed to this function. The register coalescer should be able to
Tom Stellard15834092014-03-21 15:51:57 +00003319 // eliminate this extra copy.
Matt Arsenaultc8e2ce42015-09-24 07:16:37 +00003320 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
Tom Stellard15834092014-03-21 15:51:57 +00003321
Matt Arsenault7480a0e2014-11-17 21:11:37 +00003322 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
3323 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
3324
3325 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
3326 .addReg(NewSuperReg, 0, SubIdx);
3327
Tom Stellard15834092014-03-21 15:51:57 +00003328 return SubReg;
3329}
3330
Matt Arsenault248b7b62014-03-24 20:08:09 +00003331MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
3332 MachineBasicBlock::iterator MII,
3333 MachineRegisterInfo &MRI,
3334 MachineOperand &Op,
3335 const TargetRegisterClass *SuperRC,
3336 unsigned SubIdx,
3337 const TargetRegisterClass *SubRC) const {
3338 if (Op.isImm()) {
Matt Arsenault248b7b62014-03-24 20:08:09 +00003339 if (SubIdx == AMDGPU::sub0)
Matt Arsenaultd745c282016-09-08 17:44:36 +00003340 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
Matt Arsenault248b7b62014-03-24 20:08:09 +00003341 if (SubIdx == AMDGPU::sub1)
Matt Arsenaultd745c282016-09-08 17:44:36 +00003342 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
Matt Arsenault248b7b62014-03-24 20:08:09 +00003343
3344 llvm_unreachable("Unhandled register index for immediate");
3345 }
3346
3347 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
3348 SubIdx, SubRC);
3349 return MachineOperand::CreateReg(SubReg, false);
3350}
3351
Marek Olsakbe047802014-12-07 12:19:03 +00003352// Change the order of operands from (0, 1, 2) to (0, 2, 1)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003353void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
3354 assert(Inst.getNumExplicitOperands() == 3);
3355 MachineOperand Op1 = Inst.getOperand(1);
3356 Inst.RemoveOperand(1);
3357 Inst.addOperand(Op1);
Marek Olsakbe047802014-12-07 12:19:03 +00003358}
3359
Matt Arsenault856d1922015-12-01 19:57:17 +00003360bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
3361 const MCOperandInfo &OpInfo,
3362 const MachineOperand &MO) const {
3363 if (!MO.isReg())
3364 return false;
3365
3366 unsigned Reg = MO.getReg();
3367 const TargetRegisterClass *RC =
3368 TargetRegisterInfo::isVirtualRegister(Reg) ?
3369 MRI.getRegClass(Reg) :
3370 RI.getPhysRegClass(Reg);
3371
Nicolai Haehnle82fc9622016-01-07 17:10:29 +00003372 const SIRegisterInfo *TRI =
3373 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
3374 RC = TRI->getSubRegClass(RC, MO.getSubReg());
3375
Matt Arsenault856d1922015-12-01 19:57:17 +00003376 // In order to be legal, the common sub-class must be equal to the
3377 // class of the current operand. For example:
3378 //
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003379 // v_mov_b32 s0 ; Operand defined as vsrc_b32
3380 // ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL
Matt Arsenault856d1922015-12-01 19:57:17 +00003381 //
3382 // s_sendmsg 0, s0 ; Operand defined as m0reg
3383 // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL
3384
3385 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC;
3386}
3387
3388bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
3389 const MCOperandInfo &OpInfo,
3390 const MachineOperand &MO) const {
3391 if (MO.isReg())
3392 return isLegalRegOperand(MRI, OpInfo, MO);
3393
3394 // Handle non-register types that are treated like immediates.
3395 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
3396 return true;
3397}
3398
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003399bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
Tom Stellard0e975cf2014-08-01 00:32:35 +00003400 const MachineOperand *MO) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003401 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
3402 const MCInstrDesc &InstDesc = MI.getDesc();
Tom Stellard0e975cf2014-08-01 00:32:35 +00003403 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
3404 const TargetRegisterClass *DefinedRC =
3405 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
3406 if (!MO)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003407 MO = &MI.getOperand(OpIdx);
Tom Stellard0e975cf2014-08-01 00:32:35 +00003408
Matt Arsenault4bd72362016-12-10 00:39:12 +00003409 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) {
Matt Arsenaultfcb345f2016-02-11 06:15:39 +00003410
3411 RegSubRegPair SGPRUsed;
3412 if (MO->isReg())
3413 SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg());
3414
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003415 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
Tom Stellard73ae1cb2014-09-23 21:26:25 +00003416 if (i == OpIdx)
3417 continue;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003418 const MachineOperand &Op = MI.getOperand(i);
Matt Arsenaultffc82752016-07-05 17:09:01 +00003419 if (Op.isReg()) {
3420 if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) &&
Matt Arsenault4bd72362016-12-10 00:39:12 +00003421 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) {
Matt Arsenaultffc82752016-07-05 17:09:01 +00003422 return false;
3423 }
3424 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) {
Tom Stellard73ae1cb2014-09-23 21:26:25 +00003425 return false;
3426 }
3427 }
3428 }
3429
Tom Stellard0e975cf2014-08-01 00:32:35 +00003430 if (MO->isReg()) {
3431 assert(DefinedRC);
Matt Arsenault856d1922015-12-01 19:57:17 +00003432 return isLegalRegOperand(MRI, OpInfo, *MO);
Tom Stellard0e975cf2014-08-01 00:32:35 +00003433 }
3434
Tom Stellard0e975cf2014-08-01 00:32:35 +00003435 // Handle non-register types that are treated like immediates.
Tom Stellardfb77f002015-01-13 22:59:41 +00003436 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI());
Tom Stellard0e975cf2014-08-01 00:32:35 +00003437
Matt Arsenault4364fef2014-09-23 18:30:57 +00003438 if (!DefinedRC) {
3439 // This operand expects an immediate.
Tom Stellard0e975cf2014-08-01 00:32:35 +00003440 return true;
Matt Arsenault4364fef2014-09-23 18:30:57 +00003441 }
Tom Stellard0e975cf2014-08-01 00:32:35 +00003442
Tom Stellard73ae1cb2014-09-23 21:26:25 +00003443 return isImmOperandLegal(MI, OpIdx, *MO);
Tom Stellard0e975cf2014-08-01 00:32:35 +00003444}
3445
Matt Arsenault856d1922015-12-01 19:57:17 +00003446void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003447 MachineInstr &MI) const {
3448 unsigned Opc = MI.getOpcode();
Matt Arsenault856d1922015-12-01 19:57:17 +00003449 const MCInstrDesc &InstrDesc = get(Opc);
3450
3451 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003452 MachineOperand &Src1 = MI.getOperand(Src1Idx);
Matt Arsenault856d1922015-12-01 19:57:17 +00003453
3454 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
3455 // we need to only have one constant bus use.
3456 //
3457 // Note we do not need to worry about literal constants here. They are
3458 // disabled for the operand type for instructions because they will always
3459 // violate the one constant bus use rule.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003460 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
Matt Arsenault856d1922015-12-01 19:57:17 +00003461 if (HasImplicitSGPR) {
3462 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003463 MachineOperand &Src0 = MI.getOperand(Src0Idx);
Matt Arsenault856d1922015-12-01 19:57:17 +00003464
3465 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg()))
3466 legalizeOpWithMove(MI, Src0Idx);
3467 }
3468
Tim Renouf2a99fa22018-02-28 19:10:32 +00003469 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for
3470 // both the value to write (src0) and lane select (src1). Fix up non-SGPR
3471 // src0/src1 with V_READFIRSTLANE.
3472 if (Opc == AMDGPU::V_WRITELANE_B32) {
3473 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
3474 MachineOperand &Src0 = MI.getOperand(Src0Idx);
3475 const DebugLoc &DL = MI.getDebugLoc();
3476 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) {
3477 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3478 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
3479 .add(Src0);
3480 Src0.ChangeToRegister(Reg, false);
3481 }
3482 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) {
3483 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3484 const DebugLoc &DL = MI.getDebugLoc();
3485 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
3486 .add(Src1);
3487 Src1.ChangeToRegister(Reg, false);
3488 }
3489 return;
3490 }
3491
Matt Arsenault856d1922015-12-01 19:57:17 +00003492 // VOP2 src0 instructions support all operand types, so we don't need to check
3493 // their legality. If src1 is already legal, we don't need to do anything.
3494 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
3495 return;
3496
Nicolai Haehnle5dea6452017-04-24 17:17:36 +00003497 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for
3498 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane
3499 // select is uniform.
3500 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() &&
3501 RI.isVGPR(MRI, Src1.getReg())) {
3502 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3503 const DebugLoc &DL = MI.getDebugLoc();
3504 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
3505 .add(Src1);
3506 Src1.ChangeToRegister(Reg, false);
3507 return;
3508 }
3509
Matt Arsenault856d1922015-12-01 19:57:17 +00003510 // We do not use commuteInstruction here because it is too aggressive and will
3511 // commute if it is possible. We only want to commute here if it improves
3512 // legality. This can be called a fairly large number of times so don't waste
3513 // compile time pointlessly swapping and checking legality again.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003514 if (HasImplicitSGPR || !MI.isCommutable()) {
Matt Arsenault856d1922015-12-01 19:57:17 +00003515 legalizeOpWithMove(MI, Src1Idx);
3516 return;
3517 }
3518
3519 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003520 MachineOperand &Src0 = MI.getOperand(Src0Idx);
Matt Arsenault856d1922015-12-01 19:57:17 +00003521
3522 // If src0 can be used as src1, commuting will make the operands legal.
3523 // Otherwise we have to give up and insert a move.
3524 //
3525 // TODO: Other immediate-like operand kinds could be commuted if there was a
3526 // MachineOperand::ChangeTo* for them.
3527 if ((!Src1.isImm() && !Src1.isReg()) ||
3528 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
3529 legalizeOpWithMove(MI, Src1Idx);
3530 return;
3531 }
3532
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003533 int CommutedOpc = commuteOpcode(MI);
Matt Arsenault856d1922015-12-01 19:57:17 +00003534 if (CommutedOpc == -1) {
3535 legalizeOpWithMove(MI, Src1Idx);
3536 return;
3537 }
3538
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003539 MI.setDesc(get(CommutedOpc));
Matt Arsenault856d1922015-12-01 19:57:17 +00003540
3541 unsigned Src0Reg = Src0.getReg();
3542 unsigned Src0SubReg = Src0.getSubReg();
3543 bool Src0Kill = Src0.isKill();
3544
3545 if (Src1.isImm())
3546 Src0.ChangeToImmediate(Src1.getImm());
3547 else if (Src1.isReg()) {
3548 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
3549 Src0.setSubReg(Src1.getSubReg());
3550 } else
3551 llvm_unreachable("Should only have register or immediate operands");
3552
3553 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
3554 Src1.setSubReg(Src0SubReg);
3555}
3556
Matt Arsenault6005fcb2015-10-21 21:51:02 +00003557// Legalize VOP3 operands. Because all operand types are supported for any
3558// operand, and since literal constants are not allowed and should never be
3559// seen, we only need to worry about inserting copies if we use multiple SGPR
3560// operands.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003561void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
3562 MachineInstr &MI) const {
3563 unsigned Opc = MI.getOpcode();
Matt Arsenault6005fcb2015-10-21 21:51:02 +00003564
3565 int VOP3Idx[3] = {
3566 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
3567 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
3568 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
3569 };
3570
3571 // Find the one SGPR operand we are allowed to use.
3572 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx);
3573
3574 for (unsigned i = 0; i < 3; ++i) {
3575 int Idx = VOP3Idx[i];
3576 if (Idx == -1)
3577 break;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003578 MachineOperand &MO = MI.getOperand(Idx);
Matt Arsenault6005fcb2015-10-21 21:51:02 +00003579
3580 // We should never see a VOP3 instruction with an illegal immediate operand.
3581 if (!MO.isReg())
3582 continue;
3583
3584 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
3585 continue; // VGPRs are legal
3586
3587 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
3588 SGPRReg = MO.getReg();
3589 // We can use one SGPR in each VOP3 instruction.
3590 continue;
3591 }
3592
3593 // If we make it this far, then the operand is not legal and we must
3594 // legalize it.
3595 legalizeOpWithMove(MI, Idx);
3596 }
3597}
3598
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003599unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
3600 MachineRegisterInfo &MRI) const {
Tom Stellard1397d492016-02-11 21:45:07 +00003601 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
3602 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
3603 unsigned DstReg = MRI.createVirtualRegister(SRC);
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003604 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
Tom Stellard1397d492016-02-11 21:45:07 +00003605
Nicolai Haehnle7a879772018-04-20 07:14:25 +00003606 if (SubRegs == 1) {
3607 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
3608 get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
3609 .addReg(SrcReg);
3610 return DstReg;
3611 }
3612
Tom Stellard1397d492016-02-11 21:45:07 +00003613 SmallVector<unsigned, 8> SRegs;
3614 for (unsigned i = 0; i < SubRegs; ++i) {
3615 unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003616 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
Tom Stellard1397d492016-02-11 21:45:07 +00003617 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003618 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
Tom Stellard1397d492016-02-11 21:45:07 +00003619 SRegs.push_back(SGPR);
3620 }
3621
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003622 MachineInstrBuilder MIB =
3623 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
3624 get(AMDGPU::REG_SEQUENCE), DstReg);
Tom Stellard1397d492016-02-11 21:45:07 +00003625 for (unsigned i = 0; i < SubRegs; ++i) {
3626 MIB.addReg(SRegs[i]);
3627 MIB.addImm(RI.getSubRegFromChannel(i));
3628 }
3629 return DstReg;
3630}
3631
Tom Stellard467b5b92016-02-20 00:37:25 +00003632void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003633 MachineInstr &MI) const {
Tom Stellard467b5b92016-02-20 00:37:25 +00003634
3635 // If the pointer is store in VGPRs, then we need to move them to
3636 // SGPRs using v_readfirstlane. This is safe because we only select
3637 // loads with uniform pointers to SMRD instruction so we know the
3638 // pointer value is uniform.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003639 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
Tom Stellard467b5b92016-02-20 00:37:25 +00003640 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00003641 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
3642 SBase->setReg(SGPR);
3643 }
3644 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff);
3645 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) {
3646 unsigned SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI);
3647 SOff->setReg(SGPR);
Tom Stellard467b5b92016-02-20 00:37:25 +00003648 }
3649}
3650
Tom Stellard0d162b12016-11-16 18:42:17 +00003651void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
3652 MachineBasicBlock::iterator I,
3653 const TargetRegisterClass *DstRC,
3654 MachineOperand &Op,
3655 MachineRegisterInfo &MRI,
3656 const DebugLoc &DL) const {
Tom Stellard0d162b12016-11-16 18:42:17 +00003657 unsigned OpReg = Op.getReg();
3658 unsigned OpSubReg = Op.getSubReg();
3659
3660 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
3661 RI.getRegClassForReg(MRI, OpReg), OpSubReg);
3662
3663 // Check if operand is already the correct register class.
3664 if (DstRC == OpRC)
3665 return;
3666
3667 unsigned DstReg = MRI.createVirtualRegister(DstRC);
Diana Picus116bbab2017-01-13 09:58:52 +00003668 MachineInstr *Copy =
3669 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op);
Tom Stellard0d162b12016-11-16 18:42:17 +00003670
3671 Op.setReg(DstReg);
3672 Op.setSubReg(0);
3673
3674 MachineInstr *Def = MRI.getVRegDef(OpReg);
3675 if (!Def)
3676 return;
3677
3678 // Try to eliminate the copy if it is copying an immediate value.
3679 if (Def->isMoveImmediate())
3680 FoldImmediate(*Copy, *Def, OpReg, &MRI);
3681}
3682
Scott Linder823549a2018-10-08 18:47:01 +00003683// Emit the actual waterfall loop, executing the wrapped instruction for each
3684// unique value of \p Rsrc across all lanes. In the best case we execute 1
3685// iteration, in the worst case we execute 64 (once per lane).
3686static void
3687emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI,
3688 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB,
3689 const DebugLoc &DL, MachineOperand &Rsrc) {
3690 MachineBasicBlock::iterator I = LoopBB.begin();
3691
3692 unsigned VRsrc = Rsrc.getReg();
3693 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef());
3694
3695 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3696 unsigned CondReg0 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3697 unsigned CondReg1 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3698 unsigned AndCond = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3699 unsigned SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3700 unsigned SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3701 unsigned SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3702 unsigned SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3703 unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
3704
3705 // Beginning of the loop, read the next Rsrc variant.
3706 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0)
3707 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub0);
3708 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub1)
3709 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub1);
3710 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub2)
3711 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub2);
3712 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub3)
3713 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub3);
3714
3715 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc)
3716 .addReg(SRsrcSub0)
3717 .addImm(AMDGPU::sub0)
3718 .addReg(SRsrcSub1)
3719 .addImm(AMDGPU::sub1)
3720 .addReg(SRsrcSub2)
3721 .addImm(AMDGPU::sub2)
3722 .addReg(SRsrcSub3)
3723 .addImm(AMDGPU::sub3);
3724
3725 // Update Rsrc operand to use the SGPR Rsrc.
3726 Rsrc.setReg(SRsrc);
3727 Rsrc.setIsKill(true);
3728
3729 // Identify all lanes with identical Rsrc operands in their VGPRs.
3730 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg0)
3731 .addReg(SRsrc, 0, AMDGPU::sub0_sub1)
3732 .addReg(VRsrc, 0, AMDGPU::sub0_sub1);
3733 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1)
3734 .addReg(SRsrc, 0, AMDGPU::sub2_sub3)
3735 .addReg(VRsrc, 0, AMDGPU::sub2_sub3);
3736 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_B64), AndCond)
3737 .addReg(CondReg0)
3738 .addReg(CondReg1);
3739
3740 MRI.setSimpleHint(SaveExec, AndCond);
3741
3742 // Update EXEC to matching lanes, saving original to SaveExec.
3743 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_SAVEEXEC_B64), SaveExec)
3744 .addReg(AndCond, RegState::Kill);
3745
3746 // The original instruction is here; we insert the terminators after it.
3747 I = LoopBB.end();
3748
3749 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
3750 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
3751 .addReg(AMDGPU::EXEC)
3752 .addReg(SaveExec);
3753 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB);
3754}
3755
3756// Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register
3757// with SGPRs by iterating over all unique values across all lanes.
3758static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
3759 MachineOperand &Rsrc, MachineDominatorTree *MDT) {
3760 MachineBasicBlock &MBB = *MI.getParent();
3761 MachineFunction &MF = *MBB.getParent();
3762 MachineRegisterInfo &MRI = MF.getRegInfo();
3763 MachineBasicBlock::iterator I(&MI);
3764 const DebugLoc &DL = MI.getDebugLoc();
3765
3766 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3767
3768 // Save the EXEC mask
3769 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B64), SaveExec)
3770 .addReg(AMDGPU::EXEC);
3771
3772 // Killed uses in the instruction we are waterfalling around will be
3773 // incorrect due to the added control-flow.
3774 for (auto &MO : MI.uses()) {
3775 if (MO.isReg() && MO.isUse()) {
3776 MRI.clearKillFlags(MO.getReg());
3777 }
3778 }
3779
3780 // To insert the loop we need to split the block. Move everything after this
3781 // point to a new block, and insert a new empty block between the two.
3782 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock();
3783 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
3784 MachineFunction::iterator MBBI(MBB);
3785 ++MBBI;
3786
3787 MF.insert(MBBI, LoopBB);
3788 MF.insert(MBBI, RemainderBB);
3789
3790 LoopBB->addSuccessor(LoopBB);
3791 LoopBB->addSuccessor(RemainderBB);
3792
3793 // Move MI to the LoopBB, and the remainder of the block to RemainderBB.
3794 MachineBasicBlock::iterator J = I++;
3795 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
3796 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3797 LoopBB->splice(LoopBB->begin(), &MBB, J);
3798
3799 MBB.addSuccessor(LoopBB);
3800
3801 // Update dominators. We know that MBB immediately dominates LoopBB, that
3802 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately
3803 // dominates all of the successors transferred to it from MBB that MBB used
3804 // to dominate.
3805 if (MDT) {
3806 MDT->addNewBlock(LoopBB, &MBB);
3807 MDT->addNewBlock(RemainderBB, LoopBB);
3808 for (auto &Succ : RemainderBB->successors()) {
3809 if (MDT->dominates(&MBB, Succ)) {
3810 MDT->changeImmediateDominator(Succ, RemainderBB);
3811 }
3812 }
3813 }
3814
3815 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc);
3816
3817 // Restore the EXEC mask
3818 MachineBasicBlock::iterator First = RemainderBB->begin();
3819 BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
3820 .addReg(SaveExec);
3821}
3822
3823// Extract pointer from Rsrc and return a zero-value Rsrc replacement.
3824static std::tuple<unsigned, unsigned>
3825extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) {
3826 MachineBasicBlock &MBB = *MI.getParent();
3827 MachineFunction &MF = *MBB.getParent();
3828 MachineRegisterInfo &MRI = MF.getRegInfo();
3829
3830 // Extract the ptr from the resource descriptor.
3831 unsigned RsrcPtr =
3832 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass,
3833 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
3834
3835 // Create an empty resource descriptor
3836 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3837 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3838 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3839 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
3840 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat();
3841
3842 // Zero64 = 0
3843 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64)
3844 .addImm(0);
3845
3846 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
3847 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
3848 .addImm(RsrcDataFormat & 0xFFFFFFFF);
3849
3850 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
3851 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
3852 .addImm(RsrcDataFormat >> 32);
3853
3854 // NewSRsrc = {Zero64, SRsrcFormat}
3855 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc)
3856 .addReg(Zero64)
3857 .addImm(AMDGPU::sub0_sub1)
3858 .addReg(SRsrcFormatLo)
3859 .addImm(AMDGPU::sub2)
3860 .addReg(SRsrcFormatHi)
3861 .addImm(AMDGPU::sub3);
3862
3863 return std::make_tuple(RsrcPtr, NewSRsrc);
3864}
3865
3866void SIInstrInfo::legalizeOperands(MachineInstr &MI,
3867 MachineDominatorTree *MDT) const {
Nicolai Haehnlece2b5892016-11-18 11:55:52 +00003868 MachineFunction &MF = *MI.getParent()->getParent();
3869 MachineRegisterInfo &MRI = MF.getRegInfo();
Tom Stellard82166022013-11-13 23:36:37 +00003870
3871 // Legalize VOP2
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003872 if (isVOP2(MI) || isVOPC(MI)) {
Matt Arsenault856d1922015-12-01 19:57:17 +00003873 legalizeOperandsVOP2(MRI, MI);
Tom Stellard0e975cf2014-08-01 00:32:35 +00003874 return;
Tom Stellard82166022013-11-13 23:36:37 +00003875 }
3876
3877 // Legalize VOP3
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003878 if (isVOP3(MI)) {
Matt Arsenault6005fcb2015-10-21 21:51:02 +00003879 legalizeOperandsVOP3(MRI, MI);
Matt Arsenaulte068f9a2015-09-24 07:51:28 +00003880 return;
Tom Stellard82166022013-11-13 23:36:37 +00003881 }
3882
Tom Stellard467b5b92016-02-20 00:37:25 +00003883 // Legalize SMRD
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003884 if (isSMRD(MI)) {
Tom Stellard467b5b92016-02-20 00:37:25 +00003885 legalizeOperandsSMRD(MRI, MI);
3886 return;
3887 }
3888
Tom Stellard4f3b04d2014-04-17 21:00:07 +00003889 // Legalize REG_SEQUENCE and PHI
Tom Stellard82166022013-11-13 23:36:37 +00003890 // The register class of the operands much be the same type as the register
3891 // class of the output.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003892 if (MI.getOpcode() == AMDGPU::PHI) {
Craig Topper062a2ba2014-04-25 05:30:21 +00003893 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003894 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3895 if (!MI.getOperand(i).isReg() ||
3896 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
Tom Stellard82166022013-11-13 23:36:37 +00003897 continue;
3898 const TargetRegisterClass *OpRC =
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003899 MRI.getRegClass(MI.getOperand(i).getReg());
Tom Stellard82166022013-11-13 23:36:37 +00003900 if (RI.hasVGPRs(OpRC)) {
3901 VRC = OpRC;
3902 } else {
3903 SRC = OpRC;
3904 }
3905 }
3906
3907 // If any of the operands are VGPR registers, then they all most be
3908 // otherwise we will create illegal VGPR->SGPR copies when legalizing
3909 // them.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003910 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
Tom Stellard82166022013-11-13 23:36:37 +00003911 if (!VRC) {
3912 assert(SRC);
3913 VRC = RI.getEquivalentVGPRClass(SRC);
3914 }
3915 RC = VRC;
3916 } else {
3917 RC = SRC;
3918 }
3919
3920 // Update all the operands so they have the same type.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003921 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
3922 MachineOperand &Op = MI.getOperand(I);
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00003923 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
Tom Stellard82166022013-11-13 23:36:37 +00003924 continue;
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00003925
3926 // MI is a PHI instruction.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003927 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00003928 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator();
3929
Tom Stellard0d162b12016-11-16 18:42:17 +00003930 // Avoid creating no-op copies with the same src and dst reg class. These
3931 // confuse some of the machine passes.
3932 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc());
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00003933 }
3934 }
3935
3936 // REG_SEQUENCE doesn't really require operand legalization, but if one has a
3937 // VGPR dest type and SGPR sources, insert copies so all operands are
3938 // VGPRs. This seems to help operand folding / the register coalescer.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003939 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
3940 MachineBasicBlock *MBB = MI.getParent();
3941 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00003942 if (RI.hasVGPRs(DstRC)) {
3943 // Update all the operands so they are VGPR register classes. These may
3944 // not be the same register class because REG_SEQUENCE supports mixing
3945 // subregister index types e.g. sub0_sub1 + sub2 + sub3
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003946 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
3947 MachineOperand &Op = MI.getOperand(I);
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00003948 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
3949 continue;
3950
3951 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
3952 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
3953 if (VRC == OpRC)
3954 continue;
3955
Tom Stellard0d162b12016-11-16 18:42:17 +00003956 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00003957 Op.setIsKill();
Tom Stellard4f3b04d2014-04-17 21:00:07 +00003958 }
Tom Stellard82166022013-11-13 23:36:37 +00003959 }
Matt Arsenaulte068f9a2015-09-24 07:51:28 +00003960
3961 return;
Tom Stellard82166022013-11-13 23:36:37 +00003962 }
Tom Stellard15834092014-03-21 15:51:57 +00003963
Tom Stellarda5687382014-05-15 14:41:55 +00003964 // Legalize INSERT_SUBREG
3965 // src0 must have the same register class as dst
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003966 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
3967 unsigned Dst = MI.getOperand(0).getReg();
3968 unsigned Src0 = MI.getOperand(1).getReg();
Tom Stellarda5687382014-05-15 14:41:55 +00003969 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
3970 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
3971 if (DstRC != Src0RC) {
Tom Stellard0d162b12016-11-16 18:42:17 +00003972 MachineBasicBlock *MBB = MI.getParent();
3973 MachineOperand &Op = MI.getOperand(1);
3974 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
Tom Stellarda5687382014-05-15 14:41:55 +00003975 }
3976 return;
3977 }
3978
Nicolai Haehnle7a879772018-04-20 07:14:25 +00003979 // Legalize SI_INIT_M0
3980 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) {
3981 MachineOperand &Src = MI.getOperand(0);
3982 if (Src.isReg() && RI.hasVGPRs(MRI.getRegClass(Src.getReg())))
3983 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
3984 return;
3985 }
3986
Nicolai Haehnlece2b5892016-11-18 11:55:52 +00003987 // Legalize MIMG and MUBUF/MTBUF for shaders.
3988 //
3989 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via
3990 // scratch memory access. In both cases, the legalization never involves
3991 // conversion to the addr64 form.
3992 if (isMIMG(MI) ||
Matthias Braunf1caa282017-12-15 22:22:58 +00003993 (AMDGPU::isShader(MF.getFunction().getCallingConv()) &&
Nicolai Haehnlece2b5892016-11-18 11:55:52 +00003994 (isMUBUF(MI) || isMTBUF(MI)))) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00003995 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
Tom Stellard1397d492016-02-11 21:45:07 +00003996 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
3997 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI);
3998 SRsrc->setReg(SGPR);
3999 }
4000
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004001 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
Tom Stellard1397d492016-02-11 21:45:07 +00004002 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) {
4003 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI);
4004 SSamp->setReg(SGPR);
4005 }
4006 return;
4007 }
4008
Scott Linder823549a2018-10-08 18:47:01 +00004009 // Legalize MUBUF* instructions.
4010 int RsrcIdx =
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004011 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
Scott Linder823549a2018-10-08 18:47:01 +00004012 if (RsrcIdx != -1) {
Tom Stellard155bbb72014-08-11 22:18:17 +00004013 // We have an MUBUF instruction
Scott Linder823549a2018-10-08 18:47:01 +00004014 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
4015 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass;
4016 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()),
4017 RI.getRegClass(RsrcRC))) {
Tom Stellard155bbb72014-08-11 22:18:17 +00004018 // The operands are legal.
4019 // FIXME: We may need to legalize operands besided srsrc.
4020 return;
4021 }
Tom Stellard15834092014-03-21 15:51:57 +00004022
Scott Linder823549a2018-10-08 18:47:01 +00004023 // Legalize a VGPR Rsrc.
4024 //
4025 // If the instruction is _ADDR64, we can avoid a waterfall by extracting
4026 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using
4027 // a zero-value SRsrc.
4028 //
4029 // If the instruction is _OFFSET (both idxen and offen disabled), and we
4030 // support ADDR64 instructions, we can convert to ADDR64 and do the same as
4031 // above.
4032 //
4033 // Otherwise we are on non-ADDR64 hardware, and/or we have
4034 // idxen/offen/bothen and we fall back to a waterfall loop.
4035
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004036 MachineBasicBlock &MBB = *MI.getParent();
Matt Arsenaultef67d762015-09-09 17:03:29 +00004037
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004038 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
Scott Linder823549a2018-10-08 18:47:01 +00004039 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) {
Tom Stellard155bbb72014-08-11 22:18:17 +00004040 // This is already an ADDR64 instruction so we need to add the pointer
4041 // extracted from the resource descriptor to the current value of VAddr.
Matt Arsenaultef67d762015-09-09 17:03:29 +00004042 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4043 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Scott Linder823549a2018-10-08 18:47:01 +00004044 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
Tom Stellard155bbb72014-08-11 22:18:17 +00004045
Scott Linder823549a2018-10-08 18:47:01 +00004046 unsigned RsrcPtr, NewSRsrc;
4047 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
4048
4049 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004050 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault51d2d0f2015-09-01 02:02:21 +00004051 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
Scott Linder823549a2018-10-08 18:47:01 +00004052 .addReg(RsrcPtr, 0, AMDGPU::sub0)
4053 .addReg(VAddr->getReg(), 0, AMDGPU::sub0);
Tom Stellard15834092014-03-21 15:51:57 +00004054
Scott Linder823549a2018-10-08 18:47:01 +00004055 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1
Matt Arsenault51d2d0f2015-09-01 02:02:21 +00004056 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi)
Scott Linder823549a2018-10-08 18:47:01 +00004057 .addReg(RsrcPtr, 0, AMDGPU::sub1)
4058 .addReg(VAddr->getReg(), 0, AMDGPU::sub1);
Tom Stellard15834092014-03-21 15:51:57 +00004059
Matt Arsenaultef67d762015-09-09 17:03:29 +00004060 // NewVaddr = {NewVaddrHi, NewVaddrLo}
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004061 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
4062 .addReg(NewVAddrLo)
4063 .addImm(AMDGPU::sub0)
4064 .addReg(NewVAddrHi)
4065 .addImm(AMDGPU::sub1);
Scott Linder823549a2018-10-08 18:47:01 +00004066
4067 VAddr->setReg(NewVAddr);
4068 Rsrc->setReg(NewSRsrc);
4069 } else if (!VAddr && ST.hasAddr64()) {
Tom Stellard155bbb72014-08-11 22:18:17 +00004070 // This instructions is the _OFFSET variant, so we need to convert it to
4071 // ADDR64.
Tom Stellard5bfbae52018-07-11 20:59:01 +00004072 assert(MBB.getParent()->getSubtarget<GCNSubtarget>().getGeneration()
4073 < AMDGPUSubtarget::VOLCANIC_ISLANDS &&
Matt Arsenaulta40450c2015-11-05 02:46:56 +00004074 "FIXME: Need to emit flat atomics here");
4075
Scott Linder823549a2018-10-08 18:47:01 +00004076 unsigned RsrcPtr, NewSRsrc;
4077 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
4078
4079 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004080 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
4081 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
4082 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
4083 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
Matt Arsenaulta40450c2015-11-05 02:46:56 +00004084
4085 // Atomics rith return have have an additional tied operand and are
4086 // missing some of the special bits.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004087 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
Matt Arsenaulta40450c2015-11-05 02:46:56 +00004088 MachineInstr *Addr64;
4089
4090 if (!VDataIn) {
4091 // Regular buffer load / store.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004092 MachineInstrBuilder MIB =
4093 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
Diana Picus116bbab2017-01-13 09:58:52 +00004094 .add(*VData)
Scott Linder823549a2018-10-08 18:47:01 +00004095 .addReg(NewVAddr)
4096 .addReg(NewSRsrc)
Diana Picus116bbab2017-01-13 09:58:52 +00004097 .add(*SOffset)
4098 .add(*Offset);
Matt Arsenaulta40450c2015-11-05 02:46:56 +00004099
4100 // Atomics do not have this operand.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004101 if (const MachineOperand *GLC =
4102 getNamedOperand(MI, AMDGPU::OpName::glc)) {
Matt Arsenaulta40450c2015-11-05 02:46:56 +00004103 MIB.addImm(GLC->getImm());
4104 }
4105
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004106 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc));
Matt Arsenaulta40450c2015-11-05 02:46:56 +00004107
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004108 if (const MachineOperand *TFE =
4109 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
Matt Arsenaulta40450c2015-11-05 02:46:56 +00004110 MIB.addImm(TFE->getImm());
4111 }
4112
Chandler Carruthc73c0302018-08-16 21:30:05 +00004113 MIB.cloneMemRefs(MI);
Matt Arsenaulta40450c2015-11-05 02:46:56 +00004114 Addr64 = MIB;
4115 } else {
4116 // Atomics with return.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004117 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
Diana Picus116bbab2017-01-13 09:58:52 +00004118 .add(*VData)
4119 .add(*VDataIn)
Scott Linder823549a2018-10-08 18:47:01 +00004120 .addReg(NewVAddr)
4121 .addReg(NewSRsrc)
Diana Picus116bbab2017-01-13 09:58:52 +00004122 .add(*SOffset)
4123 .add(*Offset)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004124 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
Chandler Carruthc73c0302018-08-16 21:30:05 +00004125 .cloneMemRefs(MI);
Matt Arsenaulta40450c2015-11-05 02:46:56 +00004126 }
Tom Stellard15834092014-03-21 15:51:57 +00004127
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004128 MI.removeFromParent();
Tom Stellard15834092014-03-21 15:51:57 +00004129
Matt Arsenaultef67d762015-09-09 17:03:29 +00004130 // NewVaddr = {NewVaddrHi, NewVaddrLo}
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004131 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
4132 NewVAddr)
Scott Linder823549a2018-10-08 18:47:01 +00004133 .addReg(RsrcPtr, 0, AMDGPU::sub0)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004134 .addImm(AMDGPU::sub0)
Scott Linder823549a2018-10-08 18:47:01 +00004135 .addReg(RsrcPtr, 0, AMDGPU::sub1)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004136 .addImm(AMDGPU::sub1);
Scott Linder823549a2018-10-08 18:47:01 +00004137 } else {
4138 // This is another variant; legalize Rsrc with waterfall loop from VGPRs
4139 // to SGPRs.
4140 loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT);
Tom Stellard15834092014-03-21 15:51:57 +00004141 }
4142 }
Tom Stellard82166022013-11-13 23:36:37 +00004143}
4144
Scott Linder823549a2018-10-08 18:47:01 +00004145void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
4146 MachineDominatorTree *MDT) const {
Alfred Huang5b270722017-07-14 17:56:55 +00004147 SetVectorType Worklist;
4148 Worklist.insert(&TopInst);
Tom Stellard82166022013-11-13 23:36:37 +00004149
4150 while (!Worklist.empty()) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004151 MachineInstr &Inst = *Worklist.pop_back_val();
4152 MachineBasicBlock *MBB = Inst.getParent();
Tom Stellarde0387202014-03-21 15:51:54 +00004153 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
4154
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004155 unsigned Opcode = Inst.getOpcode();
4156 unsigned NewOpcode = getVALUOp(Inst);
Matt Arsenault27cc9582014-04-18 01:53:18 +00004157
Tom Stellarde0387202014-03-21 15:51:54 +00004158 // Handle some special cases
Matt Arsenault27cc9582014-04-18 01:53:18 +00004159 switch (Opcode) {
Tom Stellard0c354f22014-04-30 15:31:29 +00004160 default:
Tom Stellard0c354f22014-04-30 15:31:29 +00004161 break;
Matt Arsenault301162c2017-11-15 21:51:43 +00004162 case AMDGPU::S_ADD_U64_PSEUDO:
4163 case AMDGPU::S_SUB_U64_PSEUDO:
Scott Linder823549a2018-10-08 18:47:01 +00004164 splitScalar64BitAddSub(Worklist, Inst, MDT);
Matt Arsenault301162c2017-11-15 21:51:43 +00004165 Inst.eraseFromParent();
4166 continue;
Matt Arsenault84445dd2017-11-30 22:51:26 +00004167 case AMDGPU::S_ADD_I32:
4168 case AMDGPU::S_SUB_I32:
4169 // FIXME: The u32 versions currently selected use the carry.
Scott Linder823549a2018-10-08 18:47:01 +00004170 if (moveScalarAddSub(Worklist, Inst, MDT))
Matt Arsenault84445dd2017-11-30 22:51:26 +00004171 continue;
4172
4173 // Default handling
4174 break;
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004175 case AMDGPU::S_AND_B64:
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004176 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004177 Inst.eraseFromParent();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004178 continue;
4179
4180 case AMDGPU::S_OR_B64:
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004181 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004182 Inst.eraseFromParent();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004183 continue;
4184
4185 case AMDGPU::S_XOR_B64:
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004186 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT);
4187 Inst.eraseFromParent();
4188 continue;
4189
4190 case AMDGPU::S_NAND_B64:
4191 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT);
4192 Inst.eraseFromParent();
4193 continue;
4194
4195 case AMDGPU::S_NOR_B64:
4196 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT);
4197 Inst.eraseFromParent();
4198 continue;
4199
4200 case AMDGPU::S_XNOR_B64:
Graham Sellersba559ac2018-12-01 12:27:53 +00004201 if (ST.hasDLInsts())
4202 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
4203 else
4204 splitScalar64BitXnor(Worklist, Inst, MDT);
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004205 Inst.eraseFromParent();
4206 continue;
4207
4208 case AMDGPU::S_ANDN2_B64:
4209 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT);
4210 Inst.eraseFromParent();
4211 continue;
4212
4213 case AMDGPU::S_ORN2_B64:
4214 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004215 Inst.eraseFromParent();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004216 continue;
4217
4218 case AMDGPU::S_NOT_B64:
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004219 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004220 Inst.eraseFromParent();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004221 continue;
4222
Matt Arsenault8333e432014-06-10 19:18:24 +00004223 case AMDGPU::S_BCNT1_I32_B64:
4224 splitScalar64BitBCNT(Worklist, Inst);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004225 Inst.eraseFromParent();
Matt Arsenault8333e432014-06-10 19:18:24 +00004226 continue;
4227
Eugene Zelenko59e12822017-08-08 00:47:13 +00004228 case AMDGPU::S_BFE_I64:
Matt Arsenault94812212014-11-14 18:18:16 +00004229 splitScalar64BitBFE(Worklist, Inst);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004230 Inst.eraseFromParent();
Matt Arsenault94812212014-11-14 18:18:16 +00004231 continue;
Matt Arsenault94812212014-11-14 18:18:16 +00004232
Marek Olsakbe047802014-12-07 12:19:03 +00004233 case AMDGPU::S_LSHL_B32:
Tom Stellard5bfbae52018-07-11 20:59:01 +00004234 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Marek Olsakbe047802014-12-07 12:19:03 +00004235 NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
4236 swapOperands(Inst);
4237 }
4238 break;
4239 case AMDGPU::S_ASHR_I32:
Tom Stellard5bfbae52018-07-11 20:59:01 +00004240 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Marek Olsakbe047802014-12-07 12:19:03 +00004241 NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
4242 swapOperands(Inst);
4243 }
4244 break;
4245 case AMDGPU::S_LSHR_B32:
Tom Stellard5bfbae52018-07-11 20:59:01 +00004246 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Marek Olsakbe047802014-12-07 12:19:03 +00004247 NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
4248 swapOperands(Inst);
4249 }
4250 break;
Marek Olsak707a6d02015-02-03 21:53:01 +00004251 case AMDGPU::S_LSHL_B64:
Tom Stellard5bfbae52018-07-11 20:59:01 +00004252 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Marek Olsak707a6d02015-02-03 21:53:01 +00004253 NewOpcode = AMDGPU::V_LSHLREV_B64;
4254 swapOperands(Inst);
4255 }
4256 break;
4257 case AMDGPU::S_ASHR_I64:
Tom Stellard5bfbae52018-07-11 20:59:01 +00004258 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Marek Olsak707a6d02015-02-03 21:53:01 +00004259 NewOpcode = AMDGPU::V_ASHRREV_I64;
4260 swapOperands(Inst);
4261 }
4262 break;
4263 case AMDGPU::S_LSHR_B64:
Tom Stellard5bfbae52018-07-11 20:59:01 +00004264 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Marek Olsak707a6d02015-02-03 21:53:01 +00004265 NewOpcode = AMDGPU::V_LSHRREV_B64;
4266 swapOperands(Inst);
4267 }
4268 break;
Marek Olsakbe047802014-12-07 12:19:03 +00004269
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00004270 case AMDGPU::S_ABS_I32:
4271 lowerScalarAbs(Worklist, Inst);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004272 Inst.eraseFromParent();
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00004273 continue;
4274
Tom Stellardbc4497b2016-02-12 23:45:29 +00004275 case AMDGPU::S_CBRANCH_SCC0:
4276 case AMDGPU::S_CBRANCH_SCC1:
4277 // Clear unused bits of vcc
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004278 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
4279 AMDGPU::VCC)
4280 .addReg(AMDGPU::EXEC)
4281 .addReg(AMDGPU::VCC);
Tom Stellardbc4497b2016-02-12 23:45:29 +00004282 break;
4283
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004284 case AMDGPU::S_BFE_U64:
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004285 case AMDGPU::S_BFM_B64:
4286 llvm_unreachable("Moving this op to VALU not implemented");
Matt Arsenaulteb522e62017-02-27 22:15:25 +00004287
4288 case AMDGPU::S_PACK_LL_B32_B16:
4289 case AMDGPU::S_PACK_LH_B32_B16:
Eugene Zelenko59e12822017-08-08 00:47:13 +00004290 case AMDGPU::S_PACK_HH_B32_B16:
Matt Arsenaulteb522e62017-02-27 22:15:25 +00004291 movePackToVALU(Worklist, MRI, Inst);
4292 Inst.eraseFromParent();
4293 continue;
Konstantin Zhuravlyovca8946a2017-09-18 21:22:45 +00004294
4295 case AMDGPU::S_XNOR_B32:
4296 lowerScalarXnor(Worklist, Inst);
4297 Inst.eraseFromParent();
4298 continue;
4299
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004300 case AMDGPU::S_NAND_B32:
4301 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32);
4302 Inst.eraseFromParent();
4303 continue;
4304
4305 case AMDGPU::S_NOR_B32:
4306 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32);
4307 Inst.eraseFromParent();
4308 continue;
4309
4310 case AMDGPU::S_ANDN2_B32:
4311 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32);
4312 Inst.eraseFromParent();
4313 continue;
4314
4315 case AMDGPU::S_ORN2_B32:
4316 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32);
Konstantin Zhuravlyovca8946a2017-09-18 21:22:45 +00004317 Inst.eraseFromParent();
4318 continue;
Matt Arsenaulteb522e62017-02-27 22:15:25 +00004319 }
Tom Stellarde0387202014-03-21 15:51:54 +00004320
Tom Stellard15834092014-03-21 15:51:57 +00004321 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
4322 // We cannot move this instruction to the VALU, so we should try to
4323 // legalize its operands instead.
Scott Linder823549a2018-10-08 18:47:01 +00004324 legalizeOperands(Inst, MDT);
Tom Stellard82166022013-11-13 23:36:37 +00004325 continue;
Tom Stellard15834092014-03-21 15:51:57 +00004326 }
Tom Stellard82166022013-11-13 23:36:37 +00004327
Tom Stellard82166022013-11-13 23:36:37 +00004328 // Use the new VALU Opcode.
4329 const MCInstrDesc &NewDesc = get(NewOpcode);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004330 Inst.setDesc(NewDesc);
Tom Stellard82166022013-11-13 23:36:37 +00004331
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +00004332 // Remove any references to SCC. Vector instructions can't read from it, and
4333 // We're just about to add the implicit use / defs of VCC, and we don't want
4334 // both.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004335 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) {
4336 MachineOperand &Op = Inst.getOperand(i);
Tom Stellardbc4497b2016-02-12 23:45:29 +00004337 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) {
Michael Liao6883d7e2019-03-15 12:42:21 +00004338 // Only propagate through live-def of SCC.
4339 if (Op.isDef() && !Op.isDead())
4340 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004341 Inst.RemoveOperand(i);
Tom Stellardbc4497b2016-02-12 23:45:29 +00004342 }
Matt Arsenaultf0b1e3a2013-11-18 20:09:21 +00004343 }
4344
Matt Arsenault27cc9582014-04-18 01:53:18 +00004345 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
4346 // We are converting these to a BFE, so we need to add the missing
4347 // operands for the size and offset.
4348 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004349 Inst.addOperand(MachineOperand::CreateImm(0));
4350 Inst.addOperand(MachineOperand::CreateImm(Size));
Matt Arsenault27cc9582014-04-18 01:53:18 +00004351
Matt Arsenaultb5b51102014-06-10 19:18:21 +00004352 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
4353 // The VALU version adds the second operand to the result, so insert an
4354 // extra 0 operand.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004355 Inst.addOperand(MachineOperand::CreateImm(0));
Tom Stellard82166022013-11-13 23:36:37 +00004356 }
4357
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004358 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent());
Tom Stellard82166022013-11-13 23:36:37 +00004359
Matt Arsenault78b86702014-04-18 05:19:26 +00004360 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004361 const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
Matt Arsenault78b86702014-04-18 05:19:26 +00004362 // If we need to move this to VGPRs, we need to unpack the second operand
4363 // back into the 2 separate ones for bit offset and width.
4364 assert(OffsetWidthOp.isImm() &&
4365 "Scalar BFE is only implemented for constant width and offset");
4366 uint32_t Imm = OffsetWidthOp.getImm();
4367
4368 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
4369 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004370 Inst.RemoveOperand(2); // Remove old immediate.
4371 Inst.addOperand(MachineOperand::CreateImm(Offset));
4372 Inst.addOperand(MachineOperand::CreateImm(BitWidth));
Matt Arsenault78b86702014-04-18 05:19:26 +00004373 }
4374
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004375 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef();
Tom Stellardbc4497b2016-02-12 23:45:29 +00004376 unsigned NewDstReg = AMDGPU::NoRegister;
4377 if (HasDst) {
Matt Arsenault21a43822017-04-06 21:09:53 +00004378 unsigned DstReg = Inst.getOperand(0).getReg();
4379 if (TargetRegisterInfo::isPhysicalRegister(DstReg))
4380 continue;
4381
Tom Stellardbc4497b2016-02-12 23:45:29 +00004382 // Update the destination register class.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004383 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst);
Tom Stellardbc4497b2016-02-12 23:45:29 +00004384 if (!NewDstRC)
4385 continue;
Tom Stellard82166022013-11-13 23:36:37 +00004386
Tom Stellard0d162b12016-11-16 18:42:17 +00004387 if (Inst.isCopy() &&
4388 TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) &&
4389 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
4390 // Instead of creating a copy where src and dst are the same register
4391 // class, we just replace all uses of dst with src. These kinds of
4392 // copies interfere with the heuristics MachineSink uses to decide
4393 // whether or not to split a critical edge. Since the pass assumes
4394 // that copies will end up as machine instructions and not be
4395 // eliminated.
4396 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
4397 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg());
4398 MRI.clearKillFlags(Inst.getOperand(1).getReg());
4399 Inst.getOperand(0).setReg(DstReg);
Matt Arsenault69932e42018-03-19 14:07:15 +00004400
4401 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally
4402 // these are deleted later, but at -O0 it would leave a suspicious
4403 // looking illegal copy of an undef register.
4404 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I)
4405 Inst.RemoveOperand(I);
4406 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF));
Tom Stellard0d162b12016-11-16 18:42:17 +00004407 continue;
4408 }
4409
Tom Stellardbc4497b2016-02-12 23:45:29 +00004410 NewDstReg = MRI.createVirtualRegister(NewDstRC);
4411 MRI.replaceRegWith(DstReg, NewDstReg);
4412 }
Tom Stellard82166022013-11-13 23:36:37 +00004413
Tom Stellarde1a24452014-04-17 21:00:01 +00004414 // Legalize the operands
Scott Linder823549a2018-10-08 18:47:01 +00004415 legalizeOperands(Inst, MDT);
Tom Stellarde1a24452014-04-17 21:00:01 +00004416
Tom Stellardbc4497b2016-02-12 23:45:29 +00004417 if (HasDst)
4418 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
Tom Stellard82166022013-11-13 23:36:37 +00004419 }
4420}
4421
Matt Arsenault84445dd2017-11-30 22:51:26 +00004422// Add/sub require special handling to deal with carry outs.
Scott Linder823549a2018-10-08 18:47:01 +00004423bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
4424 MachineDominatorTree *MDT) const {
Matt Arsenault84445dd2017-11-30 22:51:26 +00004425 if (ST.hasAddNoCarry()) {
4426 // Assume there is no user of scc since we don't select this in that case.
4427 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant
4428 // is used.
4429
4430 MachineBasicBlock &MBB = *Inst.getParent();
4431 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4432
4433 unsigned OldDstReg = Inst.getOperand(0).getReg();
4434 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4435
4436 unsigned Opc = Inst.getOpcode();
4437 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32);
4438
4439 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ?
4440 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64;
4441
4442 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC);
4443 Inst.RemoveOperand(3);
4444
4445 Inst.setDesc(get(NewOpc));
4446 Inst.addImplicitDefUseOperands(*MBB.getParent());
4447 MRI.replaceRegWith(OldDstReg, ResultReg);
Scott Linder823549a2018-10-08 18:47:01 +00004448 legalizeOperands(Inst, MDT);
Matt Arsenault84445dd2017-11-30 22:51:26 +00004449
4450 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
4451 return true;
4452 }
4453
4454 return false;
4455}
4456
Alfred Huang5b270722017-07-14 17:56:55 +00004457void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004458 MachineInstr &Inst) const {
4459 MachineBasicBlock &MBB = *Inst.getParent();
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00004460 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4461 MachineBasicBlock::iterator MII = Inst;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004462 DebugLoc DL = Inst.getDebugLoc();
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00004463
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004464 MachineOperand &Dest = Inst.getOperand(0);
4465 MachineOperand &Src = Inst.getOperand(1);
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00004466 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4467 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4468
Matt Arsenault84445dd2017-11-30 22:51:26 +00004469 unsigned SubOp = ST.hasAddNoCarry() ?
4470 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_I32_e32;
4471
4472 BuildMI(MBB, MII, DL, get(SubOp), TmpReg)
Marek Olsak7ed6b2f2015-11-25 21:22:45 +00004473 .addImm(0)
4474 .addReg(Src.getReg());
4475
4476 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg)
4477 .addReg(Src.getReg())
4478 .addReg(TmpReg);
4479
4480 MRI.replaceRegWith(Dest.getReg(), ResultReg);
4481 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
4482}
4483
Konstantin Zhuravlyovca8946a2017-09-18 21:22:45 +00004484void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist,
4485 MachineInstr &Inst) const {
4486 MachineBasicBlock &MBB = *Inst.getParent();
4487 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4488 MachineBasicBlock::iterator MII = Inst;
4489 const DebugLoc &DL = Inst.getDebugLoc();
4490
4491 MachineOperand &Dest = Inst.getOperand(0);
4492 MachineOperand &Src0 = Inst.getOperand(1);
4493 MachineOperand &Src1 = Inst.getOperand(2);
4494
Matt Arsenault0084adc2018-04-30 19:08:16 +00004495 if (ST.hasDLInsts()) {
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004496 unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4497 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL);
4498 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL);
4499
Matt Arsenault0084adc2018-04-30 19:08:16 +00004500 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest)
4501 .add(Src0)
4502 .add(Src1);
Konstantin Zhuravlyovca8946a2017-09-18 21:22:45 +00004503
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004504 MRI.replaceRegWith(Dest.getReg(), NewDest);
4505 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
4506 } else {
4507 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can
4508 // invert either source and then perform the XOR. If either source is a
4509 // scalar register, then we can leave the inversion on the scalar unit to
4510 // acheive a better distrubution of scalar and vector instructions.
4511 bool Src0IsSGPR = Src0.isReg() &&
4512 RI.isSGPRClass(MRI.getRegClass(Src0.getReg()));
4513 bool Src1IsSGPR = Src1.isReg() &&
4514 RI.isSGPRClass(MRI.getRegClass(Src1.getReg()));
4515 MachineInstr *Not = nullptr;
4516 MachineInstr *Xor = nullptr;
4517 unsigned Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4518 unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4519
4520 // Build a pair of scalar instructions and add them to the work list.
4521 // The next iteration over the work list will lower these to the vector
4522 // unit as necessary.
4523 if (Src0IsSGPR) {
4524 Not = BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp)
4525 .add(Src0);
4526 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest)
4527 .addReg(Temp)
4528 .add(Src1);
4529 } else if (Src1IsSGPR) {
4530 Not = BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp)
4531 .add(Src1);
4532 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest)
4533 .add(Src0)
4534 .addReg(Temp);
4535 } else {
4536 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp)
4537 .add(Src0)
4538 .add(Src1);
4539 Not = BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest)
4540 .addReg(Temp);
4541 Worklist.insert(Not);
4542 }
4543
4544 MRI.replaceRegWith(Dest.getReg(), NewDest);
4545
4546 Worklist.insert(Xor);
4547
4548 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
Matt Arsenault0084adc2018-04-30 19:08:16 +00004549 }
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004550}
4551
4552void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist,
4553 MachineInstr &Inst,
4554 unsigned Opcode) const {
4555 MachineBasicBlock &MBB = *Inst.getParent();
4556 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4557 MachineBasicBlock::iterator MII = Inst;
4558 const DebugLoc &DL = Inst.getDebugLoc();
4559
4560 MachineOperand &Dest = Inst.getOperand(0);
4561 MachineOperand &Src0 = Inst.getOperand(1);
4562 MachineOperand &Src1 = Inst.getOperand(2);
4563
4564 unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4565 unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4566
4567 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm)
4568 .add(Src0)
4569 .add(Src1);
4570
4571 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest)
4572 .addReg(Interm);
4573
4574 Worklist.insert(&Op);
4575 Worklist.insert(&Not);
4576
4577 MRI.replaceRegWith(Dest.getReg(), NewDest);
4578 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
4579}
4580
4581void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist,
4582 MachineInstr &Inst,
4583 unsigned Opcode) const {
4584 MachineBasicBlock &MBB = *Inst.getParent();
4585 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4586 MachineBasicBlock::iterator MII = Inst;
4587 const DebugLoc &DL = Inst.getDebugLoc();
4588
4589 MachineOperand &Dest = Inst.getOperand(0);
4590 MachineOperand &Src0 = Inst.getOperand(1);
4591 MachineOperand &Src1 = Inst.getOperand(2);
4592
4593 unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4594 unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4595
4596 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm)
4597 .add(Src1);
4598
4599 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest)
4600 .add(Src0)
4601 .addReg(Interm);
4602
4603 Worklist.insert(&Not);
4604 Worklist.insert(&Op);
Konstantin Zhuravlyovca8946a2017-09-18 21:22:45 +00004605
Matt Arsenault0084adc2018-04-30 19:08:16 +00004606 MRI.replaceRegWith(Dest.getReg(), NewDest);
4607 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
Konstantin Zhuravlyovca8946a2017-09-18 21:22:45 +00004608}
4609
Matt Arsenault689f3252014-06-09 16:36:31 +00004610void SIInstrInfo::splitScalar64BitUnaryOp(
Alfred Huang5b270722017-07-14 17:56:55 +00004611 SetVectorType &Worklist, MachineInstr &Inst,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004612 unsigned Opcode) const {
4613 MachineBasicBlock &MBB = *Inst.getParent();
Matt Arsenault689f3252014-06-09 16:36:31 +00004614 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4615
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004616 MachineOperand &Dest = Inst.getOperand(0);
4617 MachineOperand &Src0 = Inst.getOperand(1);
4618 DebugLoc DL = Inst.getDebugLoc();
Matt Arsenault689f3252014-06-09 16:36:31 +00004619
4620 MachineBasicBlock::iterator MII = Inst;
4621
4622 const MCInstrDesc &InstDesc = get(Opcode);
4623 const TargetRegisterClass *Src0RC = Src0.isReg() ?
4624 MRI.getRegClass(Src0.getReg()) :
4625 &AMDGPU::SGPR_32RegClass;
4626
4627 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
4628
4629 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
4630 AMDGPU::sub0, Src0SubRC);
4631
4632 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
Matt Arsenaultf003c382015-08-26 20:47:50 +00004633 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
4634 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
Matt Arsenault689f3252014-06-09 16:36:31 +00004635
Matt Arsenaultf003c382015-08-26 20:47:50 +00004636 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004637 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0);
Matt Arsenault689f3252014-06-09 16:36:31 +00004638
4639 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
4640 AMDGPU::sub1, Src0SubRC);
4641
Matt Arsenaultf003c382015-08-26 20:47:50 +00004642 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004643 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1);
Matt Arsenault689f3252014-06-09 16:36:31 +00004644
Matt Arsenaultf003c382015-08-26 20:47:50 +00004645 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
Matt Arsenault689f3252014-06-09 16:36:31 +00004646 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
4647 .addReg(DestSub0)
4648 .addImm(AMDGPU::sub0)
4649 .addReg(DestSub1)
4650 .addImm(AMDGPU::sub1);
4651
4652 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
4653
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004654 Worklist.insert(&LoHalf);
4655 Worklist.insert(&HiHalf);
4656
Matt Arsenaultf003c382015-08-26 20:47:50 +00004657 // We don't need to legalizeOperands here because for a single operand, src0
4658 // will support any kind of input.
4659
4660 // Move all users of this moved value.
4661 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
Matt Arsenault689f3252014-06-09 16:36:31 +00004662}
4663
Scott Linder823549a2018-10-08 18:47:01 +00004664void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist,
4665 MachineInstr &Inst,
4666 MachineDominatorTree *MDT) const {
Matt Arsenault301162c2017-11-15 21:51:43 +00004667 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
4668
4669 MachineBasicBlock &MBB = *Inst.getParent();
4670 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4671
4672 unsigned FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
4673 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4674 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4675
4676 unsigned CarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
4677 unsigned DeadCarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
4678
4679 MachineOperand &Dest = Inst.getOperand(0);
4680 MachineOperand &Src0 = Inst.getOperand(1);
4681 MachineOperand &Src1 = Inst.getOperand(2);
4682 const DebugLoc &DL = Inst.getDebugLoc();
4683 MachineBasicBlock::iterator MII = Inst;
4684
4685 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
4686 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg());
4687 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
4688 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
4689
4690 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
4691 AMDGPU::sub0, Src0SubRC);
4692 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
4693 AMDGPU::sub0, Src1SubRC);
4694
4695
4696 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
4697 AMDGPU::sub1, Src0SubRC);
4698 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
4699 AMDGPU::sub1, Src1SubRC);
4700
4701 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
4702 MachineInstr *LoHalf =
4703 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0)
4704 .addReg(CarryReg, RegState::Define)
4705 .add(SrcReg0Sub0)
4706 .add(SrcReg1Sub0);
4707
4708 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
4709 MachineInstr *HiHalf =
4710 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1)
4711 .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
4712 .add(SrcReg0Sub1)
4713 .add(SrcReg1Sub1)
4714 .addReg(CarryReg, RegState::Kill);
4715
4716 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
4717 .addReg(DestSub0)
4718 .addImm(AMDGPU::sub0)
4719 .addReg(DestSub1)
4720 .addImm(AMDGPU::sub1);
4721
4722 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
4723
4724 // Try to legalize the operands in case we need to swap the order to keep it
4725 // valid.
Scott Linder823549a2018-10-08 18:47:01 +00004726 legalizeOperands(*LoHalf, MDT);
4727 legalizeOperands(*HiHalf, MDT);
Matt Arsenault301162c2017-11-15 21:51:43 +00004728
4729 // Move all users of this moved vlaue.
4730 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
4731}
4732
Scott Linder823549a2018-10-08 18:47:01 +00004733void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist,
4734 MachineInstr &Inst, unsigned Opcode,
4735 MachineDominatorTree *MDT) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004736 MachineBasicBlock &MBB = *Inst.getParent();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004737 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4738
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004739 MachineOperand &Dest = Inst.getOperand(0);
4740 MachineOperand &Src0 = Inst.getOperand(1);
4741 MachineOperand &Src1 = Inst.getOperand(2);
4742 DebugLoc DL = Inst.getDebugLoc();
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004743
4744 MachineBasicBlock::iterator MII = Inst;
4745
4746 const MCInstrDesc &InstDesc = get(Opcode);
Matt Arsenault684dc802014-03-24 20:08:13 +00004747 const TargetRegisterClass *Src0RC = Src0.isReg() ?
4748 MRI.getRegClass(Src0.getReg()) :
4749 &AMDGPU::SGPR_32RegClass;
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004750
Matt Arsenault684dc802014-03-24 20:08:13 +00004751 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
4752 const TargetRegisterClass *Src1RC = Src1.isReg() ?
4753 MRI.getRegClass(Src1.getReg()) :
4754 &AMDGPU::SGPR_32RegClass;
4755
4756 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
4757
4758 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
4759 AMDGPU::sub0, Src0SubRC);
4760 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
4761 AMDGPU::sub0, Src1SubRC);
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004762 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
4763 AMDGPU::sub1, Src0SubRC);
4764 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
4765 AMDGPU::sub1, Src1SubRC);
Matt Arsenault684dc802014-03-24 20:08:13 +00004766
4767 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
Matt Arsenaultf003c382015-08-26 20:47:50 +00004768 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
4769 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
Matt Arsenault684dc802014-03-24 20:08:13 +00004770
Matt Arsenaultf003c382015-08-26 20:47:50 +00004771 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004772 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
Diana Picus116bbab2017-01-13 09:58:52 +00004773 .add(SrcReg0Sub0)
4774 .add(SrcReg1Sub0);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004775
Matt Arsenaultf003c382015-08-26 20:47:50 +00004776 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004777 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
Diana Picus116bbab2017-01-13 09:58:52 +00004778 .add(SrcReg0Sub1)
4779 .add(SrcReg1Sub1);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004780
Matt Arsenaultf003c382015-08-26 20:47:50 +00004781 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004782 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
4783 .addReg(DestSub0)
4784 .addImm(AMDGPU::sub0)
4785 .addReg(DestSub1)
4786 .addImm(AMDGPU::sub1);
4787
4788 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
4789
Graham Sellers04f7a4d2018-11-29 16:05:38 +00004790 Worklist.insert(&LoHalf);
4791 Worklist.insert(&HiHalf);
Matt Arsenaultf003c382015-08-26 20:47:50 +00004792
4793 // Move all users of this moved vlaue.
4794 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
Matt Arsenaultf35182c2014-03-24 20:08:05 +00004795}
4796
Graham Sellersba559ac2018-12-01 12:27:53 +00004797void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist,
4798 MachineInstr &Inst,
4799 MachineDominatorTree *MDT) const {
4800 MachineBasicBlock &MBB = *Inst.getParent();
4801 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4802
4803 MachineOperand &Dest = Inst.getOperand(0);
4804 MachineOperand &Src0 = Inst.getOperand(1);
4805 MachineOperand &Src1 = Inst.getOperand(2);
4806 const DebugLoc &DL = Inst.getDebugLoc();
4807
4808 MachineBasicBlock::iterator MII = Inst;
4809
4810 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
4811
4812 unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4813
4814 MachineOperand* Op0;
4815 MachineOperand* Op1;
4816
4817 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) {
4818 Op0 = &Src0;
4819 Op1 = &Src1;
4820 } else {
4821 Op0 = &Src1;
4822 Op1 = &Src0;
4823 }
4824
4825 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm)
4826 .add(*Op0);
4827
4828 unsigned NewDest = MRI.createVirtualRegister(DestRC);
4829
4830 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest)
4831 .addReg(Interm)
4832 .add(*Op1);
4833
4834 MRI.replaceRegWith(Dest.getReg(), NewDest);
4835
4836 Worklist.insert(&Xor);
4837}
4838
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004839void SIInstrInfo::splitScalar64BitBCNT(
Alfred Huang5b270722017-07-14 17:56:55 +00004840 SetVectorType &Worklist, MachineInstr &Inst) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004841 MachineBasicBlock &MBB = *Inst.getParent();
Matt Arsenault8333e432014-06-10 19:18:24 +00004842 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4843
4844 MachineBasicBlock::iterator MII = Inst;
Graham Sellersba559ac2018-12-01 12:27:53 +00004845 const DebugLoc &DL = Inst.getDebugLoc();
Matt Arsenault8333e432014-06-10 19:18:24 +00004846
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004847 MachineOperand &Dest = Inst.getOperand(0);
4848 MachineOperand &Src = Inst.getOperand(1);
Matt Arsenault8333e432014-06-10 19:18:24 +00004849
Marek Olsakc5368502015-01-15 18:43:01 +00004850 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
Matt Arsenault8333e432014-06-10 19:18:24 +00004851 const TargetRegisterClass *SrcRC = Src.isReg() ?
4852 MRI.getRegClass(Src.getReg()) :
4853 &AMDGPU::SGPR_32RegClass;
4854
4855 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4856 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4857
4858 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
4859
4860 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
4861 AMDGPU::sub0, SrcSubRC);
4862 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
4863 AMDGPU::sub1, SrcSubRC);
4864
Diana Picus116bbab2017-01-13 09:58:52 +00004865 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0);
Matt Arsenault8333e432014-06-10 19:18:24 +00004866
Diana Picus116bbab2017-01-13 09:58:52 +00004867 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg);
Matt Arsenault8333e432014-06-10 19:18:24 +00004868
4869 MRI.replaceRegWith(Dest.getReg(), ResultReg);
4870
Matt Arsenault5e7f95e2015-08-26 20:48:04 +00004871 // We don't need to legalize operands here. src0 for etiher instruction can be
4872 // an SGPR, and the second input is unused or determined here.
4873 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
Matt Arsenault8333e432014-06-10 19:18:24 +00004874}
4875
Alfred Huang5b270722017-07-14 17:56:55 +00004876void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist,
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004877 MachineInstr &Inst) const {
4878 MachineBasicBlock &MBB = *Inst.getParent();
Matt Arsenault94812212014-11-14 18:18:16 +00004879 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4880 MachineBasicBlock::iterator MII = Inst;
Graham Sellersba559ac2018-12-01 12:27:53 +00004881 const DebugLoc &DL = Inst.getDebugLoc();
Matt Arsenault94812212014-11-14 18:18:16 +00004882
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004883 MachineOperand &Dest = Inst.getOperand(0);
4884 uint32_t Imm = Inst.getOperand(2).getImm();
Matt Arsenault94812212014-11-14 18:18:16 +00004885 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
4886 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
4887
Matt Arsenault6ad34262014-11-14 18:40:49 +00004888 (void) Offset;
4889
Matt Arsenault94812212014-11-14 18:18:16 +00004890 // Only sext_inreg cases handled.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004891 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 &&
4892 Offset == 0 && "Not implemented");
Matt Arsenault94812212014-11-14 18:18:16 +00004893
4894 if (BitWidth < 32) {
4895 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4896 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4897 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
4898
4899 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo)
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004900 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0)
4901 .addImm(0)
4902 .addImm(BitWidth);
Matt Arsenault94812212014-11-14 18:18:16 +00004903
4904 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
4905 .addImm(31)
4906 .addReg(MidRegLo);
4907
4908 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
4909 .addReg(MidRegLo)
4910 .addImm(AMDGPU::sub0)
4911 .addReg(MidRegHi)
4912 .addImm(AMDGPU::sub1);
4913
4914 MRI.replaceRegWith(Dest.getReg(), ResultReg);
Matt Arsenault445833c2015-08-26 20:47:58 +00004915 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
Matt Arsenault94812212014-11-14 18:18:16 +00004916 return;
4917 }
4918
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00004919 MachineOperand &Src = Inst.getOperand(1);
Matt Arsenault94812212014-11-14 18:18:16 +00004920 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4921 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
4922
4923 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
4924 .addImm(31)
4925 .addReg(Src.getReg(), 0, AMDGPU::sub0);
4926
4927 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
4928 .addReg(Src.getReg(), 0, AMDGPU::sub0)
4929 .addImm(AMDGPU::sub0)
4930 .addReg(TmpReg)
4931 .addImm(AMDGPU::sub1);
4932
4933 MRI.replaceRegWith(Dest.getReg(), ResultReg);
Matt Arsenault445833c2015-08-26 20:47:58 +00004934 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
Matt Arsenault94812212014-11-14 18:18:16 +00004935}
4936
Matt Arsenaultf003c382015-08-26 20:47:50 +00004937void SIInstrInfo::addUsersToMoveToVALUWorklist(
4938 unsigned DstReg,
4939 MachineRegisterInfo &MRI,
Alfred Huang5b270722017-07-14 17:56:55 +00004940 SetVectorType &Worklist) const {
Matt Arsenaultf003c382015-08-26 20:47:50 +00004941 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg),
Matt Arsenault4c1e9ec2016-12-20 18:55:06 +00004942 E = MRI.use_end(); I != E;) {
Matt Arsenaultf003c382015-08-26 20:47:50 +00004943 MachineInstr &UseMI = *I->getParent();
Neil Henning07993522019-01-29 14:28:17 +00004944
4945 unsigned OpNo = 0;
4946
4947 switch (UseMI.getOpcode()) {
4948 case AMDGPU::COPY:
4949 case AMDGPU::WQM:
4950 case AMDGPU::WWM:
4951 case AMDGPU::REG_SEQUENCE:
4952 case AMDGPU::PHI:
4953 case AMDGPU::INSERT_SUBREG:
4954 break;
4955 default:
4956 OpNo = I.getOperandNo();
4957 break;
4958 }
4959
4960 if (!RI.hasVGPRs(getOpRegClass(UseMI, OpNo))) {
Alfred Huang5b270722017-07-14 17:56:55 +00004961 Worklist.insert(&UseMI);
Matt Arsenault4c1e9ec2016-12-20 18:55:06 +00004962
4963 do {
4964 ++I;
4965 } while (I != E && I->getParent() == &UseMI);
4966 } else {
4967 ++I;
Matt Arsenaultf003c382015-08-26 20:47:50 +00004968 }
4969 }
4970}
4971
Alfred Huang5b270722017-07-14 17:56:55 +00004972void SIInstrInfo::movePackToVALU(SetVectorType &Worklist,
Matt Arsenaulteb522e62017-02-27 22:15:25 +00004973 MachineRegisterInfo &MRI,
4974 MachineInstr &Inst) const {
4975 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4976 MachineBasicBlock *MBB = Inst.getParent();
4977 MachineOperand &Src0 = Inst.getOperand(1);
4978 MachineOperand &Src1 = Inst.getOperand(2);
4979 const DebugLoc &DL = Inst.getDebugLoc();
4980
4981 switch (Inst.getOpcode()) {
4982 case AMDGPU::S_PACK_LL_B32_B16: {
Konstantin Zhuravlyovd24aeb22017-04-13 23:17:00 +00004983 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4984 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00004985
Konstantin Zhuravlyovd24aeb22017-04-13 23:17:00 +00004986 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are
4987 // 0.
4988 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
4989 .addImm(0xffff);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00004990
Konstantin Zhuravlyovd24aeb22017-04-13 23:17:00 +00004991 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg)
4992 .addReg(ImmReg, RegState::Kill)
4993 .add(Src0);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00004994
Konstantin Zhuravlyovd24aeb22017-04-13 23:17:00 +00004995 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg)
4996 .add(Src1)
4997 .addImm(16)
4998 .addReg(TmpReg, RegState::Kill);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00004999 break;
5000 }
5001 case AMDGPU::S_PACK_LH_B32_B16: {
5002 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5003 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
5004 .addImm(0xffff);
5005 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg)
5006 .addReg(ImmReg, RegState::Kill)
5007 .add(Src0)
5008 .add(Src1);
5009 break;
5010 }
5011 case AMDGPU::S_PACK_HH_B32_B16: {
5012 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5013 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5014 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
5015 .addImm(16)
5016 .add(Src0);
5017 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
Konstantin Zhuravlyov88938d42017-04-21 19:35:05 +00005018 .addImm(0xffff0000);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00005019 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg)
5020 .add(Src1)
5021 .addReg(ImmReg, RegState::Kill)
5022 .addReg(TmpReg, RegState::Kill);
5023 break;
5024 }
5025 default:
5026 llvm_unreachable("unhandled s_pack_* instruction");
5027 }
5028
5029 MachineOperand &Dest = Inst.getOperand(0);
5030 MRI.replaceRegWith(Dest.getReg(), ResultReg);
5031 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
5032}
5033
Michael Liao6883d7e2019-03-15 12:42:21 +00005034void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
5035 MachineInstr &SCCDefInst,
5036 SetVectorType &Worklist) const {
5037 // Ensure that def inst defines SCC, which is still live.
5038 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() &&
5039 !Op.isDead() && Op.getParent() == &SCCDefInst);
Tom Stellardbc4497b2016-02-12 23:45:29 +00005040 // This assumes that all the users of SCC are in the same block
5041 // as the SCC def.
Michael Liao6883d7e2019-03-15 12:42:21 +00005042 for (MachineInstr &MI : // Skip the def inst itself.
5043 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)),
5044 SCCDefInst.getParent()->end())) {
5045 // Check if SCC is used first.
5046 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1)
5047 Worklist.insert(&MI);
Tom Stellardbc4497b2016-02-12 23:45:29 +00005048 // Exit if we find another SCC def.
Stanislav Mekhanoshin13d33712018-11-09 17:58:59 +00005049 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1)
Tom Stellardbc4497b2016-02-12 23:45:29 +00005050 return;
Tom Stellardbc4497b2016-02-12 23:45:29 +00005051 }
5052}
5053
Matt Arsenaultba6aae72015-09-28 20:54:57 +00005054const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
5055 const MachineInstr &Inst) const {
5056 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);
5057
5058 switch (Inst.getOpcode()) {
5059 // For target instructions, getOpRegClass just returns the virtual register
5060 // class associated with the operand, so we need to find an equivalent VGPR
5061 // register class in order to move the instruction to the VALU.
5062 case AMDGPU::COPY:
5063 case AMDGPU::PHI:
5064 case AMDGPU::REG_SEQUENCE:
5065 case AMDGPU::INSERT_SUBREG:
Connor Abbott8c217d02017-08-04 18:36:49 +00005066 case AMDGPU::WQM:
Connor Abbott92638ab2017-08-04 18:36:52 +00005067 case AMDGPU::WWM:
Matt Arsenaultba6aae72015-09-28 20:54:57 +00005068 if (RI.hasVGPRs(NewDstRC))
5069 return nullptr;
5070
5071 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
5072 if (!NewDstRC)
5073 return nullptr;
5074 return NewDstRC;
5075 default:
5076 return NewDstRC;
5077 }
5078}
5079
Matt Arsenault6c067412015-11-03 22:30:15 +00005080// Find the one SGPR operand we are allowed to use.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00005081unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
Matt Arsenaultee522bf2014-09-26 17:55:06 +00005082 int OpIndices[3]) const {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00005083 const MCInstrDesc &Desc = MI.getDesc();
Matt Arsenaultee522bf2014-09-26 17:55:06 +00005084
5085 // Find the one SGPR operand we are allowed to use.
Matt Arsenaulte223ceb2015-10-21 21:15:01 +00005086 //
Matt Arsenaultee522bf2014-09-26 17:55:06 +00005087 // First we need to consider the instruction's operand requirements before
5088 // legalizing. Some operands are required to be SGPRs, such as implicit uses
5089 // of VCC, but we are still bound by the constant bus requirement to only use
5090 // one.
5091 //
5092 // If the operand's class is an SGPR, we can never move it.
5093
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00005094 unsigned SGPRReg = findImplicitSGPRRead(MI);
Matt Arsenaulte223ceb2015-10-21 21:15:01 +00005095 if (SGPRReg != AMDGPU::NoRegister)
5096 return SGPRReg;
Matt Arsenaultee522bf2014-09-26 17:55:06 +00005097
5098 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister };
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00005099 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Matt Arsenaultee522bf2014-09-26 17:55:06 +00005100
5101 for (unsigned i = 0; i < 3; ++i) {
5102 int Idx = OpIndices[i];
5103 if (Idx == -1)
5104 break;
5105
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00005106 const MachineOperand &MO = MI.getOperand(Idx);
Matt Arsenault6c067412015-11-03 22:30:15 +00005107 if (!MO.isReg())
5108 continue;
Matt Arsenaultee522bf2014-09-26 17:55:06 +00005109
Matt Arsenault6c067412015-11-03 22:30:15 +00005110 // Is this operand statically required to be an SGPR based on the operand
5111 // constraints?
5112 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass);
5113 bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
5114 if (IsRequiredSGPR)
5115 return MO.getReg();
5116
5117 // If this could be a VGPR or an SGPR, Check the dynamic register class.
5118 unsigned Reg = MO.getReg();
5119 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
5120 if (RI.isSGPRClass(RegRC))
5121 UsedSGPRs[i] = Reg;
Matt Arsenaultee522bf2014-09-26 17:55:06 +00005122 }
5123
Matt Arsenaultee522bf2014-09-26 17:55:06 +00005124 // We don't have a required SGPR operand, so we have a bit more freedom in
5125 // selecting operands to move.
5126
5127 // Try to select the most used SGPR. If an SGPR is equal to one of the
5128 // others, we choose that.
5129 //
5130 // e.g.
5131 // V_FMA_F32 v0, s0, s0, s0 -> No moves
5132 // V_FMA_F32 v0, s0, s1, s0 -> Move s1
5133
Matt Arsenault6c067412015-11-03 22:30:15 +00005134 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should
5135 // prefer those.
5136
Matt Arsenaultee522bf2014-09-26 17:55:06 +00005137 if (UsedSGPRs[0] != AMDGPU::NoRegister) {
5138 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
5139 SGPRReg = UsedSGPRs[0];
5140 }
5141
5142 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) {
5143 if (UsedSGPRs[1] == UsedSGPRs[2])
5144 SGPRReg = UsedSGPRs[1];
5145 }
5146
5147 return SGPRReg;
5148}
5149
Tom Stellard6407e1e2014-08-01 00:32:33 +00005150MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
Matt Arsenaultace5b762014-10-17 18:00:43 +00005151 unsigned OperandName) const {
Tom Stellard1aaad692014-07-21 16:55:33 +00005152 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
5153 if (Idx == -1)
5154 return nullptr;
5155
5156 return &MI.getOperand(Idx);
5157}
Tom Stellard794c8c02014-12-02 17:05:41 +00005158
5159uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const {
5160 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
Tom Stellard4694ed02015-06-26 21:58:42 +00005161 if (ST.isAmdHsaOS()) {
Marek Olsak5c7a61d2017-03-21 17:00:39 +00005162 // Set ATC = 1. GFX9 doesn't have this bit.
Tom Stellard5bfbae52018-07-11 20:59:01 +00005163 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Marek Olsak5c7a61d2017-03-21 17:00:39 +00005164 RsrcDataFormat |= (1ULL << 56);
Tom Stellard794c8c02014-12-02 17:05:41 +00005165
Marek Olsak5c7a61d2017-03-21 17:00:39 +00005166 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this.
5167 // BTW, it disables TC L2 and therefore decreases performance.
Tom Stellard5bfbae52018-07-11 20:59:01 +00005168 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS)
Michel Danzerbeb79ce2016-03-16 09:10:35 +00005169 RsrcDataFormat |= (2ULL << 59);
Tom Stellard4694ed02015-06-26 21:58:42 +00005170 }
5171
Tom Stellard794c8c02014-12-02 17:05:41 +00005172 return RsrcDataFormat;
5173}
Marek Olsakd1a69a22015-09-29 23:37:32 +00005174
5175uint64_t SIInstrInfo::getScratchRsrcWords23() const {
5176 uint64_t Rsrc23 = getDefaultRsrcDataFormat() |
5177 AMDGPU::RSRC_TID_ENABLE |
5178 0xffffffff; // Size;
5179
Marek Olsak5c7a61d2017-03-21 17:00:39 +00005180 // GFX9 doesn't have ELEMENT_SIZE.
Tom Stellard5bfbae52018-07-11 20:59:01 +00005181 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Marek Olsak5c7a61d2017-03-21 17:00:39 +00005182 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1;
5183 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT;
5184 }
Matt Arsenault24ee0782016-02-12 02:40:47 +00005185
Marek Olsak5c7a61d2017-03-21 17:00:39 +00005186 // IndexStride = 64.
5187 Rsrc23 |= UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT;
Matt Arsenault24ee0782016-02-12 02:40:47 +00005188
Marek Olsakd1a69a22015-09-29 23:37:32 +00005189 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17].
5190 // Clear them unless we want a huge stride.
Tom Stellard5bfbae52018-07-11 20:59:01 +00005191 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Marek Olsakd1a69a22015-09-29 23:37:32 +00005192 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT;
5193
5194 return Rsrc23;
5195}
Nicolai Haehnle02c32912016-01-13 16:10:10 +00005196
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00005197bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const {
5198 unsigned Opc = MI.getOpcode();
Nicolai Haehnle02c32912016-01-13 16:10:10 +00005199
5200 return isSMRD(Opc);
5201}
5202
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00005203bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const {
5204 unsigned Opc = MI.getOpcode();
Nicolai Haehnle02c32912016-01-13 16:10:10 +00005205
5206 return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc);
5207}
Tom Stellard2ff72622016-01-28 16:04:37 +00005208
Matt Arsenault3354f422016-09-10 01:20:33 +00005209unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI,
5210 int &FrameIndex) const {
5211 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
5212 if (!Addr || !Addr->isFI())
5213 return AMDGPU::NoRegister;
5214
5215 assert(!MI.memoperands_empty() &&
Matt Arsenault0da63502018-08-31 05:49:54 +00005216 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS);
Matt Arsenault3354f422016-09-10 01:20:33 +00005217
5218 FrameIndex = Addr->getIndex();
5219 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
5220}
5221
5222unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI,
5223 int &FrameIndex) const {
5224 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr);
5225 assert(Addr && Addr->isFI());
5226 FrameIndex = Addr->getIndex();
5227 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg();
5228}
5229
5230unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
5231 int &FrameIndex) const {
Matt Arsenault3354f422016-09-10 01:20:33 +00005232 if (!MI.mayLoad())
5233 return AMDGPU::NoRegister;
5234
5235 if (isMUBUF(MI) || isVGPRSpill(MI))
5236 return isStackAccess(MI, FrameIndex);
5237
5238 if (isSGPRSpill(MI))
5239 return isSGPRStackAccess(MI, FrameIndex);
5240
5241 return AMDGPU::NoRegister;
5242}
5243
5244unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
5245 int &FrameIndex) const {
5246 if (!MI.mayStore())
5247 return AMDGPU::NoRegister;
5248
5249 if (isMUBUF(MI) || isVGPRSpill(MI))
5250 return isStackAccess(MI, FrameIndex);
5251
5252 if (isSGPRSpill(MI))
5253 return isSGPRStackAccess(MI, FrameIndex);
5254
5255 return AMDGPU::NoRegister;
5256}
5257
Matt Arsenault9ab1fa62017-10-04 22:59:12 +00005258unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const {
5259 unsigned Size = 0;
5260 MachineBasicBlock::const_instr_iterator I = MI.getIterator();
5261 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
5262 while (++I != E && I->isInsideBundle()) {
5263 assert(!I->isBundle() && "No nested bundle!");
5264 Size += getInstSizeInBytes(*I);
5265 }
5266
5267 return Size;
5268}
5269
Matt Arsenault02458c22016-06-06 20:10:33 +00005270unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
5271 unsigned Opc = MI.getOpcode();
5272 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc);
5273 unsigned DescSize = Desc.getSize();
5274
5275 // If we have a definitive size, we can use it. Otherwise we need to inspect
5276 // the operands to know the size.
Matt Arsenault0183c562018-07-27 09:15:03 +00005277 if (isFixedSize(MI))
5278 return DescSize;
5279
Matt Arsenault02458c22016-06-06 20:10:33 +00005280 // 4-byte instructions may have a 32-bit literal encoded after them. Check
5281 // operands that coud ever be literals.
5282 if (isVALU(MI) || isSALU(MI)) {
5283 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
5284 if (Src0Idx == -1)
Nicolai Haehnle283b9952018-08-29 07:46:09 +00005285 return DescSize; // No operands.
Matt Arsenault02458c22016-06-06 20:10:33 +00005286
Matt Arsenault4bd72362016-12-10 00:39:12 +00005287 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx]))
Nicolai Haehnle283b9952018-08-29 07:46:09 +00005288 return DescSize + 4;
Matt Arsenault02458c22016-06-06 20:10:33 +00005289
5290 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
5291 if (Src1Idx == -1)
Nicolai Haehnle283b9952018-08-29 07:46:09 +00005292 return DescSize;
Matt Arsenault02458c22016-06-06 20:10:33 +00005293
Matt Arsenault4bd72362016-12-10 00:39:12 +00005294 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx]))
Nicolai Haehnle283b9952018-08-29 07:46:09 +00005295 return DescSize + 4;
Matt Arsenault02458c22016-06-06 20:10:33 +00005296
Nicolai Haehnle283b9952018-08-29 07:46:09 +00005297 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
5298 if (Src2Idx == -1)
5299 return DescSize;
5300
5301 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx]))
5302 return DescSize + 4;
5303
5304 return DescSize;
Matt Arsenault02458c22016-06-06 20:10:33 +00005305 }
5306
5307 switch (Opc) {
5308 case TargetOpcode::IMPLICIT_DEF:
5309 case TargetOpcode::KILL:
5310 case TargetOpcode::DBG_VALUE:
Matt Arsenault02458c22016-06-06 20:10:33 +00005311 case TargetOpcode::EH_LABEL:
5312 return 0;
Matt Arsenault9ab1fa62017-10-04 22:59:12 +00005313 case TargetOpcode::BUNDLE:
5314 return getInstBundleSize(MI);
Craig Topper784929d2019-02-08 20:48:56 +00005315 case TargetOpcode::INLINEASM:
5316 case TargetOpcode::INLINEASM_BR: {
Matt Arsenault02458c22016-06-06 20:10:33 +00005317 const MachineFunction *MF = MI.getParent()->getParent();
5318 const char *AsmStr = MI.getOperand(0).getSymbolName();
5319 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
5320 }
5321 default:
Nicolai Haehnle283b9952018-08-29 07:46:09 +00005322 return DescSize;
Matt Arsenault02458c22016-06-06 20:10:33 +00005323 }
5324}
5325
Tom Stellard6695ba02016-10-28 23:53:48 +00005326bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const {
5327 if (!isFLAT(MI))
5328 return false;
5329
5330 if (MI.memoperands_empty())
5331 return true;
5332
5333 for (const MachineMemOperand *MMO : MI.memoperands()) {
Matt Arsenault0da63502018-08-31 05:49:54 +00005334 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
Tom Stellard6695ba02016-10-28 23:53:48 +00005335 return true;
5336 }
5337 return false;
5338}
5339
Jan Sjodina06bfe02017-05-15 20:18:37 +00005340bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const {
5341 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO;
5342}
5343
5344void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry,
5345 MachineBasicBlock *IfEnd) const {
5346 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator();
5347 assert(TI != IfEntry->end());
5348
5349 MachineInstr *Branch = &(*TI);
5350 MachineFunction *MF = IfEntry->getParent();
5351 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo();
5352
5353 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
5354 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
5355 MachineInstr *SIIF =
5356 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg)
5357 .add(Branch->getOperand(0))
5358 .add(Branch->getOperand(1));
5359 MachineInstr *SIEND =
5360 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF))
5361 .addReg(DstReg);
5362
5363 IfEntry->erase(TI);
5364 IfEntry->insert(IfEntry->end(), SIIF);
5365 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND);
5366 }
5367}
5368
5369void SIInstrInfo::convertNonUniformLoopRegion(
5370 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const {
5371 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator();
5372 // We expect 2 terminators, one conditional and one unconditional.
5373 assert(TI != LoopEnd->end());
5374
5375 MachineInstr *Branch = &(*TI);
5376 MachineFunction *MF = LoopEnd->getParent();
5377 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo();
5378
5379 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
5380
5381 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
5382 unsigned BackEdgeReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
5383 MachineInstrBuilder HeaderPHIBuilder =
5384 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg);
5385 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(),
5386 E = LoopEntry->pred_end();
5387 PI != E; ++PI) {
5388 if (*PI == LoopEnd) {
5389 HeaderPHIBuilder.addReg(BackEdgeReg);
5390 } else {
5391 MachineBasicBlock *PMBB = *PI;
5392 unsigned ZeroReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
5393 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(),
5394 ZeroReg, 0);
5395 HeaderPHIBuilder.addReg(ZeroReg);
5396 }
5397 HeaderPHIBuilder.addMBB(*PI);
5398 }
5399 MachineInstr *HeaderPhi = HeaderPHIBuilder;
5400 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(),
5401 get(AMDGPU::SI_IF_BREAK), BackEdgeReg)
5402 .addReg(DstReg)
5403 .add(Branch->getOperand(0));
5404 MachineInstr *SILOOP =
5405 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP))
5406 .addReg(BackEdgeReg)
5407 .addMBB(LoopEntry);
5408
5409 LoopEntry->insert(LoopEntry->begin(), HeaderPhi);
5410 LoopEnd->erase(TI);
5411 LoopEnd->insert(LoopEnd->end(), SIIFBREAK);
5412 LoopEnd->insert(LoopEnd->end(), SILOOP);
5413 }
5414}
5415
Tom Stellard2ff72622016-01-28 16:04:37 +00005416ArrayRef<std::pair<int, const char *>>
5417SIInstrInfo::getSerializableTargetIndices() const {
5418 static const std::pair<int, const char *> TargetIndices[] = {
5419 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
5420 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
5421 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
5422 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
5423 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
5424 return makeArrayRef(TargetIndices);
5425}
Tom Stellardcb6ba622016-04-30 00:23:06 +00005426
5427/// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The
5428/// post-RA version of misched uses CreateTargetMIHazardRecognizer.
5429ScheduleHazardRecognizer *
5430SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
5431 const ScheduleDAG *DAG) const {
5432 return new GCNHazardRecognizer(DAG->MF);
5433}
5434
5435/// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer
5436/// pass.
5437ScheduleHazardRecognizer *
5438SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
5439 return new GCNHazardRecognizer(MF);
5440}
Stanislav Mekhanoshin6ec3e3a2017-01-20 00:44:31 +00005441
Matt Arsenault3f031e72017-07-02 23:21:48 +00005442std::pair<unsigned, unsigned>
5443SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
5444 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK);
5445}
5446
5447ArrayRef<std::pair<unsigned, const char *>>
5448SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
5449 static const std::pair<unsigned, const char *> TargetFlags[] = {
5450 { MO_GOTPCREL, "amdgpu-gotprel" },
5451 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" },
5452 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" },
5453 { MO_REL32_LO, "amdgpu-rel32-lo" },
5454 { MO_REL32_HI, "amdgpu-rel32-hi" }
5455 };
5456
5457 return makeArrayRef(TargetFlags);
5458}
5459
Stanislav Mekhanoshin6ec3e3a2017-01-20 00:44:31 +00005460bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const {
5461 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY &&
5462 MI.modifiesRegister(AMDGPU::EXEC, &RI);
5463}
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +00005464
5465MachineInstrBuilder
5466SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB,
5467 MachineBasicBlock::iterator I,
5468 const DebugLoc &DL,
5469 unsigned DestReg) const {
Matt Arsenault686d5c72017-11-30 23:42:30 +00005470 if (ST.hasAddNoCarry())
5471 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg);
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +00005472
Matt Arsenault686d5c72017-11-30 23:42:30 +00005473 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +00005474 unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
Matt Arsenault686d5c72017-11-30 23:42:30 +00005475 MRI.setRegAllocationHint(UnusedCarry, 0, AMDGPU::VCC);
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +00005476
5477 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg)
5478 .addReg(UnusedCarry, RegState::Define | RegState::Dead);
5479}
Marek Olsakce76ea02017-10-24 10:27:13 +00005480
5481bool SIInstrInfo::isKillTerminator(unsigned Opcode) {
5482 switch (Opcode) {
5483 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
5484 case AMDGPU::SI_KILL_I1_TERMINATOR:
5485 return true;
5486 default:
5487 return false;
5488 }
5489}
5490
5491const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const {
5492 switch (Opcode) {
5493 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
5494 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR);
5495 case AMDGPU::SI_KILL_I1_PSEUDO:
5496 return get(AMDGPU::SI_KILL_I1_TERMINATOR);
5497 default:
5498 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO");
5499 }
5500}
Tom Stellard44b30b42018-05-22 02:03:23 +00005501
5502bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const {
5503 if (!isSMRD(MI))
5504 return false;
5505
5506 // Check that it is using a buffer resource.
5507 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase);
5508 if (Idx == -1) // e.g. s_memtime
5509 return false;
5510
5511 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass;
5512 return RCID == AMDGPU::SReg_128RegClassID;
5513}
Tom Stellardc5a154d2018-06-28 23:47:12 +00005514
5515// This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td
5516enum SIEncodingFamily {
5517 SI = 0,
5518 VI = 1,
5519 SDWA = 2,
5520 SDWA9 = 3,
5521 GFX80 = 4,
5522 GFX9 = 5
5523};
5524
Tom Stellard5bfbae52018-07-11 20:59:01 +00005525static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) {
Tom Stellardc5a154d2018-06-28 23:47:12 +00005526 switch (ST.getGeneration()) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00005527 default:
5528 break;
5529 case AMDGPUSubtarget::SOUTHERN_ISLANDS:
5530 case AMDGPUSubtarget::SEA_ISLANDS:
Tom Stellardc5a154d2018-06-28 23:47:12 +00005531 return SIEncodingFamily::SI;
Tom Stellard5bfbae52018-07-11 20:59:01 +00005532 case AMDGPUSubtarget::VOLCANIC_ISLANDS:
5533 case AMDGPUSubtarget::GFX9:
Tom Stellardc5a154d2018-06-28 23:47:12 +00005534 return SIEncodingFamily::VI;
5535 }
5536 llvm_unreachable("Unknown subtarget generation!");
5537}
5538
5539int SIInstrInfo::pseudoToMCOpcode(int Opcode) const {
5540 SIEncodingFamily Gen = subtargetEncodingFamily(ST);
5541
5542 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 &&
Tom Stellard5bfbae52018-07-11 20:59:01 +00005543 ST.getGeneration() >= AMDGPUSubtarget::GFX9)
Tom Stellardc5a154d2018-06-28 23:47:12 +00005544 Gen = SIEncodingFamily::GFX9;
5545
5546 if (get(Opcode).TSFlags & SIInstrFlags::SDWA)
Tom Stellard5bfbae52018-07-11 20:59:01 +00005547 Gen = ST.getGeneration() == AMDGPUSubtarget::GFX9 ? SIEncodingFamily::SDWA9
Tom Stellardc5a154d2018-06-28 23:47:12 +00005548 : SIEncodingFamily::SDWA;
5549 // Adjust the encoding family to GFX80 for D16 buffer instructions when the
5550 // subtarget has UnpackedD16VMem feature.
5551 // TODO: remove this when we discard GFX80 encoding.
5552 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf))
5553 Gen = SIEncodingFamily::GFX80;
5554
5555 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen);
5556
5557 // -1 means that Opcode is already a native instruction.
5558 if (MCOp == -1)
5559 return Opcode;
5560
5561 // (uint16_t)-1 means that Opcode is a pseudo instruction that has
5562 // no encoding in the given subtarget generation.
5563 if (MCOp == (uint16_t)-1)
5564 return -1;
5565
5566 return MCOp;
5567}
Valery Pykhtin3d9afa22018-11-30 14:21:56 +00005568
5569static
5570TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) {
5571 assert(RegOpnd.isReg());
5572 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() :
5573 getRegSubRegPair(RegOpnd);
5574}
5575
5576TargetInstrInfo::RegSubRegPair
5577llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) {
5578 assert(MI.isRegSequence());
5579 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I)
5580 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) {
5581 auto &RegOp = MI.getOperand(1 + 2 * I);
5582 return getRegOrUndef(RegOp);
5583 }
5584 return TargetInstrInfo::RegSubRegPair();
5585}
5586
5587// Try to find the definition of reg:subreg in subreg-manipulation pseudos
5588// Following a subreg of reg:subreg isn't supported
5589static bool followSubRegDef(MachineInstr &MI,
5590 TargetInstrInfo::RegSubRegPair &RSR) {
5591 if (!RSR.SubReg)
5592 return false;
5593 switch (MI.getOpcode()) {
5594 default: break;
5595 case AMDGPU::REG_SEQUENCE:
5596 RSR = getRegSequenceSubReg(MI, RSR.SubReg);
5597 return true;
5598 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg
5599 case AMDGPU::INSERT_SUBREG:
5600 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm())
5601 // inserted the subreg we're looking for
5602 RSR = getRegOrUndef(MI.getOperand(2));
5603 else { // the subreg in the rest of the reg
5604 auto R1 = getRegOrUndef(MI.getOperand(1));
5605 if (R1.SubReg) // subreg of subreg isn't supported
5606 return false;
5607 RSR.Reg = R1.Reg;
5608 }
5609 return true;
5610 }
5611 return false;
5612}
5613
5614MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P,
5615 MachineRegisterInfo &MRI) {
5616 assert(MRI.isSSA());
5617 if (!TargetRegisterInfo::isVirtualRegister(P.Reg))
5618 return nullptr;
5619
5620 auto RSR = P;
5621 auto *DefInst = MRI.getVRegDef(RSR.Reg);
5622 while (auto *MI = DefInst) {
5623 DefInst = nullptr;
5624 switch (MI->getOpcode()) {
5625 case AMDGPU::COPY:
5626 case AMDGPU::V_MOV_B32_e32: {
5627 auto &Op1 = MI->getOperand(1);
5628 if (Op1.isReg() &&
5629 TargetRegisterInfo::isVirtualRegister(Op1.getReg())) {
5630 if (Op1.isUndef())
5631 return nullptr;
5632 RSR = getRegSubRegPair(Op1);
5633 DefInst = MRI.getVRegDef(RSR.Reg);
5634 }
5635 break;
5636 }
5637 default:
5638 if (followSubRegDef(*MI, RSR)) {
5639 if (!RSR.Reg)
5640 return nullptr;
5641 DefInst = MRI.getVRegDef(RSR.Reg);
5642 }
5643 }
5644 if (!DefInst)
5645 return MI;
5646 }
5647 return nullptr;
5648}
Valery Pykhtin7fe97f82019-02-08 11:59:48 +00005649
5650bool llvm::isEXECMaskConstantBetweenDefAndUses(unsigned VReg,
5651 MachineRegisterInfo &MRI) {
5652 assert(MRI.isSSA() && "Must be run on SSA");
5653 auto *TRI = MRI.getTargetRegisterInfo();
5654
5655 auto *DefI = MRI.getVRegDef(VReg);
5656 auto *BB = DefI->getParent();
5657
5658 DenseSet<MachineInstr*> Uses;
5659 for (auto &Use : MRI.use_nodbg_operands(VReg)) {
5660 auto *I = Use.getParent();
5661 if (I->getParent() != BB)
5662 return false;
5663 Uses.insert(I);
5664 }
5665
5666 auto E = BB->end();
5667 for (auto I = std::next(DefI->getIterator()); I != E; ++I) {
5668 Uses.erase(&*I);
5669 // don't check the last use
5670 if (Uses.empty() || I->modifiesRegister(AMDGPU::EXEC, TRI))
5671 break;
5672 }
5673 return Uses.empty();
5674}