blob: a68817d5f52b777ca26f04f4b5226068a6701922 [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPUInstructionSelector.h"
15#include "AMDGPUInstrInfo.h"
16#include "AMDGPURegisterBankInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUSubtarget.h"
Tom Stellard1dc90202018-05-10 20:53:06 +000019#include "AMDGPUTargetMachine.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000020#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenault7161fb02019-07-16 19:22:21 +000021#include "SIMachineFunctionInfo.h"
Tom Stellard1dc90202018-05-10 20:53:06 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
Matt Arsenault7161fb02019-07-16 19:22:21 +000024#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
Aditya Nandakumar18b3f9d2018-01-17 19:31:33 +000025#include "llvm/CodeGen/GlobalISel/Utils.h"
Tom Stellardca166212017-01-30 21:56:46 +000026#include "llvm/CodeGen/MachineBasicBlock.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineInstr.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/IR/Type.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/raw_ostream.h"
34
35#define DEBUG_TYPE "amdgpu-isel"
36
37using namespace llvm;
Matt Arsenault7161fb02019-07-16 19:22:21 +000038using namespace MIPatternMatch;
Tom Stellardca166212017-01-30 21:56:46 +000039
Tom Stellard1dc90202018-05-10 20:53:06 +000040#define GET_GLOBALISEL_IMPL
Tom Stellard5bfbae52018-07-11 20:59:01 +000041#define AMDGPUSubtarget GCNSubtarget
Tom Stellard1dc90202018-05-10 20:53:06 +000042#include "AMDGPUGenGlobalISel.inc"
43#undef GET_GLOBALISEL_IMPL
Tom Stellard5bfbae52018-07-11 20:59:01 +000044#undef AMDGPUSubtarget
Tom Stellard1dc90202018-05-10 20:53:06 +000045
Tom Stellardca166212017-01-30 21:56:46 +000046AMDGPUInstructionSelector::AMDGPUInstructionSelector(
Tom Stellard5bfbae52018-07-11 20:59:01 +000047 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
Tom Stellard1dc90202018-05-10 20:53:06 +000048 const AMDGPUTargetMachine &TM)
Tom Stellardca166212017-01-30 21:56:46 +000049 : InstructionSelector(), TII(*STI.getInstrInfo()),
Tom Stellard1dc90202018-05-10 20:53:06 +000050 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51 STI(STI),
52 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53#define GET_GLOBALISEL_PREDICATES_INIT
54#include "AMDGPUGenGlobalISel.inc"
55#undef GET_GLOBALISEL_PREDICATES_INIT
56#define GET_GLOBALISEL_TEMPORARIES_INIT
57#include "AMDGPUGenGlobalISel.inc"
58#undef GET_GLOBALISEL_TEMPORARIES_INIT
Tom Stellard1dc90202018-05-10 20:53:06 +000059{
60}
61
62const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
Tom Stellardca166212017-01-30 21:56:46 +000063
Matt Arsenault2ab25f92019-07-01 16:06:02 +000064static bool isSCC(Register Reg, const MachineRegisterInfo &MRI) {
65 if (TargetRegisterInfo::isPhysicalRegister(Reg))
66 return Reg == AMDGPU::SCC;
Tom Stellard8b1c53b2019-06-17 16:27:43 +000067
68 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
69 const TargetRegisterClass *RC =
70 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
Matt Arsenault1daad912019-07-01 15:23:04 +000071 if (RC) {
Matt Arsenaultc8291c92019-07-15 19:50:07 +000072 // FIXME: This is ambiguous for wave32. This could be SCC or VCC, but the
73 // context of the register bank has been lost.
Matt Arsenault1daad912019-07-01 15:23:04 +000074 if (RC->getID() != AMDGPU::SReg_32_XM0RegClassID)
75 return false;
76 const LLT Ty = MRI.getType(Reg);
77 return Ty.isValid() && Ty.getSizeInBits() == 1;
78 }
Tom Stellard8b1c53b2019-06-17 16:27:43 +000079
80 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
81 return RB->getID() == AMDGPU::SCCRegBankID;
82}
83
Matt Arsenault2ab25f92019-07-01 16:06:02 +000084bool AMDGPUInstructionSelector::isVCC(Register Reg,
85 const MachineRegisterInfo &MRI) const {
86 if (TargetRegisterInfo::isPhysicalRegister(Reg))
87 return Reg == TRI.getVCC();
Matt Arsenault9f992c22019-07-01 13:22:07 +000088
89 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
90 const TargetRegisterClass *RC =
91 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
92 if (RC) {
Matt Arsenault18b71332019-07-15 19:44:07 +000093 const LLT Ty = MRI.getType(Reg);
Matt Arsenault2ab25f92019-07-01 16:06:02 +000094 return RC->hasSuperClassEq(TRI.getBoolRC()) &&
Matt Arsenault18b71332019-07-15 19:44:07 +000095 Ty.isValid() && Ty.getSizeInBits() == 1;
Matt Arsenault9f992c22019-07-01 13:22:07 +000096 }
97
98 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
99 return RB->getID() == AMDGPU::VCCRegBankID;
100}
101
Tom Stellard1e0edad2018-05-10 21:20:10 +0000102bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
Matt Arsenault18b71332019-07-15 19:44:07 +0000103 const DebugLoc &DL = I.getDebugLoc();
Tom Stellard1e0edad2018-05-10 21:20:10 +0000104 MachineBasicBlock *BB = I.getParent();
105 MachineFunction *MF = BB->getParent();
106 MachineRegisterInfo &MRI = MF->getRegInfo();
107 I.setDesc(TII.get(TargetOpcode::COPY));
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000108
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000109 const MachineOperand &Src = I.getOperand(1);
Matt Arsenault18b71332019-07-15 19:44:07 +0000110 MachineOperand &Dst = I.getOperand(0);
111 Register DstReg = Dst.getReg();
112 Register SrcReg = Src.getReg();
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000113
Matt Arsenault18b71332019-07-15 19:44:07 +0000114 if (isVCC(DstReg, MRI)) {
115 if (SrcReg == AMDGPU::SCC) {
116 const TargetRegisterClass *RC
117 = TRI.getConstrainedRegClassForOperand(Dst, MRI);
118 if (!RC)
119 return true;
120 return RBI.constrainGenericRegister(DstReg, *RC, MRI);
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000121 }
Matt Arsenault18b71332019-07-15 19:44:07 +0000122
Matt Arsenaulte1b52f42019-07-15 19:46:48 +0000123 if (!isVCC(SrcReg, MRI)) {
124 // TODO: Should probably leave the copy and let copyPhysReg expand it.
125 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), MRI))
126 return false;
Matt Arsenault3bfdb542019-07-15 19:45:49 +0000127
Matt Arsenaulte1b52f42019-07-15 19:46:48 +0000128 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
129 .addImm(0)
130 .addReg(SrcReg);
Matt Arsenault18b71332019-07-15 19:44:07 +0000131
Matt Arsenaulte1b52f42019-07-15 19:46:48 +0000132 if (!MRI.getRegClassOrNull(SrcReg))
133 MRI.setRegClass(SrcReg, TRI.getConstrainedRegClassForOperand(Src, MRI));
134 I.eraseFromParent();
135 return true;
136 }
Matt Arsenaultad19b502019-07-15 19:48:36 +0000137
138 const TargetRegisterClass *RC =
139 TRI.getConstrainedRegClassForOperand(Dst, MRI);
140 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, MRI))
141 return false;
142
143 // Don't constrain the source register to a class so the def instruction
144 // handles it (unless it's undef).
145 //
146 // FIXME: This is a hack. When selecting the def, we neeed to know
147 // specifically know that the result is VCCRegBank, and not just an SGPR
148 // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
149 if (Src.isUndef()) {
150 const TargetRegisterClass *SrcRC =
151 TRI.getConstrainedRegClassForOperand(Src, MRI);
152 if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI))
153 return false;
154 }
155
156 return true;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000157 }
158
Tom Stellard1e0edad2018-05-10 21:20:10 +0000159 for (const MachineOperand &MO : I.operands()) {
160 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
161 continue;
162
163 const TargetRegisterClass *RC =
164 TRI.getConstrainedRegClassForOperand(MO, MRI);
165 if (!RC)
166 continue;
167 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
168 }
169 return true;
170}
171
Matt Arsenaulte1006252019-07-01 16:32:47 +0000172bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
173 MachineBasicBlock *BB = I.getParent();
174 MachineFunction *MF = BB->getParent();
175 MachineRegisterInfo &MRI = MF->getRegInfo();
176
177 const Register DefReg = I.getOperand(0).getReg();
178 const LLT DefTy = MRI.getType(DefReg);
179
180 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
181
182 const RegClassOrRegBank &RegClassOrBank =
183 MRI.getRegClassOrRegBank(DefReg);
184
185 const TargetRegisterClass *DefRC
186 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
187 if (!DefRC) {
188 if (!DefTy.isValid()) {
189 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
190 return false;
191 }
192
193 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
194 if (RB.getID() == AMDGPU::SCCRegBankID) {
195 LLVM_DEBUG(dbgs() << "illegal scc phi\n");
196 return false;
197 }
198
199 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, MRI);
200 if (!DefRC) {
201 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
202 return false;
203 }
204 }
205
206 I.setDesc(TII.get(TargetOpcode::PHI));
207 return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
208}
209
Tom Stellardca166212017-01-30 21:56:46 +0000210MachineOperand
211AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000212 const TargetRegisterClass &SubRC,
Tom Stellardca166212017-01-30 21:56:46 +0000213 unsigned SubIdx) const {
214
215 MachineInstr *MI = MO.getParent();
216 MachineBasicBlock *BB = MO.getParent()->getParent();
217 MachineFunction *MF = BB->getParent();
218 MachineRegisterInfo &MRI = MF->getRegInfo();
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000219 Register DstReg = MRI.createVirtualRegister(&SubRC);
Tom Stellardca166212017-01-30 21:56:46 +0000220
221 if (MO.isReg()) {
222 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
223 unsigned Reg = MO.getReg();
224 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
225 .addReg(Reg, 0, ComposedSubIdx);
226
227 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
228 MO.isKill(), MO.isDead(), MO.isUndef(),
229 MO.isEarlyClobber(), 0, MO.isDebug(),
230 MO.isInternalRead());
231 }
232
233 assert(MO.isImm());
234
235 APInt Imm(64, MO.getImm());
236
237 switch (SubIdx) {
238 default:
239 llvm_unreachable("do not know to split immediate with this sub index.");
240 case AMDGPU::sub0:
241 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
242 case AMDGPU::sub1:
243 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
244 }
245}
246
Tom Stellard390a5f42018-07-13 21:05:14 +0000247static int64_t getConstant(const MachineInstr *MI) {
248 return MI->getOperand(1).getCImm()->getSExtValue();
249}
250
Matt Arsenaultc8291c92019-07-15 19:50:07 +0000251static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
252 switch (Opc) {
253 case AMDGPU::G_AND:
254 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
255 case AMDGPU::G_OR:
256 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
257 case AMDGPU::G_XOR:
258 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
259 default:
260 llvm_unreachable("not a bit op");
261 }
262}
263
264bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
265 MachineBasicBlock *BB = I.getParent();
266 MachineFunction *MF = BB->getParent();
267 MachineRegisterInfo &MRI = MF->getRegInfo();
268 MachineOperand &Dst = I.getOperand(0);
269 MachineOperand &Src0 = I.getOperand(1);
270 MachineOperand &Src1 = I.getOperand(2);
271 Register DstReg = Dst.getReg();
272 unsigned Size = RBI.getSizeInBits(DstReg, MRI, TRI);
273
274 const RegisterBank *DstRB = RBI.getRegBank(DstReg, MRI, TRI);
275 if (DstRB->getID() == AMDGPU::VCCRegBankID) {
276 const TargetRegisterClass *RC = TRI.getBoolRC();
277 unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
278 RC == &AMDGPU::SReg_64RegClass);
279 I.setDesc(TII.get(InstOpc));
280
281 // FIXME: Hack to avoid turning the register bank into a register class.
282 // The selector for G_ICMP relies on seeing the register bank for the result
283 // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
284 // be ambiguous whether it's a scalar or vector bool.
285 if (Src0.isUndef() && !MRI.getRegClassOrNull(Src0.getReg()))
286 MRI.setRegClass(Src0.getReg(), RC);
287 if (Src1.isUndef() && !MRI.getRegClassOrNull(Src1.getReg()))
288 MRI.setRegClass(Src1.getReg(), RC);
289
290 return RBI.constrainGenericRegister(DstReg, *RC, MRI);
291 }
292
293 // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
294 // the result?
295 if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
Matt Arsenaultc8291c92019-07-15 19:50:07 +0000296 unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
297 I.setDesc(TII.get(InstOpc));
Matt Arsenault22c4a142019-07-16 14:28:30 +0000298
299 const TargetRegisterClass *RC
300 = TRI.getConstrainedRegClassForOperand(Dst, MRI);
301 if (!RC)
302 return false;
Matt Arsenaultc8291c92019-07-15 19:50:07 +0000303 return RBI.constrainGenericRegister(DstReg, *RC, MRI) &&
304 RBI.constrainGenericRegister(Src0.getReg(), *RC, MRI) &&
305 RBI.constrainGenericRegister(Src1.getReg(), *RC, MRI);
306 }
307
308 return false;
309}
310
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000311bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
Tom Stellardca166212017-01-30 21:56:46 +0000312 MachineBasicBlock *BB = I.getParent();
313 MachineFunction *MF = BB->getParent();
314 MachineRegisterInfo &MRI = MF->getRegInfo();
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000315 Register DstReg = I.getOperand(0).getReg();
316 const DebugLoc &DL = I.getDebugLoc();
317 unsigned Size = RBI.getSizeInBits(DstReg, MRI, TRI);
318 const RegisterBank *DstRB = RBI.getRegBank(DstReg, MRI, TRI);
319 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000320 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
Tom Stellardca166212017-01-30 21:56:46 +0000321
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000322 if (Size == 32) {
323 if (IsSALU) {
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000324 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000325 MachineInstr *Add =
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000326 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000327 .add(I.getOperand(1))
328 .add(I.getOperand(2));
329 I.eraseFromParent();
330 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
331 }
Tom Stellardca166212017-01-30 21:56:46 +0000332
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000333 if (STI.hasAddNoCarry()) {
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000334 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
335 I.setDesc(TII.get(Opc));
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000336 I.addOperand(*MF, MachineOperand::CreateImm(0));
337 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
338 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
339 }
Tom Stellardca166212017-01-30 21:56:46 +0000340
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000341 const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
342
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000343 Register UnusedCarry = MRI.createVirtualRegister(TRI.getWaveMaskRegClass());
344 MachineInstr *Add
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000345 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000346 .addDef(UnusedCarry, RegState::Dead)
347 .add(I.getOperand(1))
348 .add(I.getOperand(2))
349 .addImm(0);
350 I.eraseFromParent();
351 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
Tom Stellardca166212017-01-30 21:56:46 +0000352 }
353
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000354 assert(!Sub && "illegal sub should not reach here");
355
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000356 const TargetRegisterClass &RC
357 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
358 const TargetRegisterClass &HalfRC
359 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
360
361 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
362 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
363 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
364 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
365
366 Register DstLo = MRI.createVirtualRegister(&HalfRC);
367 Register DstHi = MRI.createVirtualRegister(&HalfRC);
368
369 if (IsSALU) {
370 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
371 .add(Lo1)
372 .add(Lo2);
373 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
374 .add(Hi1)
375 .add(Hi2);
376 } else {
377 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
378 Register CarryReg = MRI.createVirtualRegister(CarryRC);
379 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
380 .addDef(CarryReg)
381 .add(Lo1)
382 .add(Lo2)
383 .addImm(0);
Matt Arsenault70a4d3f2019-07-02 14:40:22 +0000384 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000385 .addDef(MRI.createVirtualRegister(CarryRC), RegState::Dead)
386 .add(Hi1)
387 .add(Hi2)
388 .addReg(CarryReg, RegState::Kill)
389 .addImm(0);
Matt Arsenault70a4d3f2019-07-02 14:40:22 +0000390
391 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
392 return false;
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000393 }
394
395 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
396 .addReg(DstLo)
397 .addImm(AMDGPU::sub0)
398 .addReg(DstHi)
399 .addImm(AMDGPU::sub1);
400
Matt Arsenault70a4d3f2019-07-02 14:40:22 +0000401
402 if (!RBI.constrainGenericRegister(DstReg, RC, MRI))
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000403 return false;
404
Tom Stellardca166212017-01-30 21:56:46 +0000405 I.eraseFromParent();
406 return true;
407}
408
Tom Stellard41f32192019-02-28 23:37:48 +0000409bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
410 MachineBasicBlock *BB = I.getParent();
411 MachineFunction *MF = BB->getParent();
412 MachineRegisterInfo &MRI = MF->getRegInfo();
413 assert(I.getOperand(2).getImm() % 32 == 0);
414 unsigned SubReg = TRI.getSubRegFromChannel(I.getOperand(2).getImm() / 32);
415 const DebugLoc &DL = I.getDebugLoc();
416 MachineInstr *Copy = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY),
417 I.getOperand(0).getReg())
418 .addReg(I.getOperand(1).getReg(), 0, SubReg);
419
420 for (const MachineOperand &MO : Copy->operands()) {
421 const TargetRegisterClass *RC =
422 TRI.getConstrainedRegClassForOperand(MO, MRI);
423 if (!RC)
424 continue;
425 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
426 }
427 I.eraseFromParent();
428 return true;
429}
430
Matt Arsenault9b7ffc42019-07-09 14:02:20 +0000431bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
432 MachineBasicBlock *BB = MI.getParent();
433 MachineFunction *MF = BB->getParent();
434 MachineRegisterInfo &MRI = MF->getRegInfo();
435 Register DstReg = MI.getOperand(0).getReg();
436 LLT DstTy = MRI.getType(DstReg);
437 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
438
439 const unsigned SrcSize = SrcTy.getSizeInBits();
Matt Arsenaulta65913e2019-07-15 17:26:43 +0000440 if (SrcSize < 32)
441 return false;
442
Matt Arsenault9b7ffc42019-07-09 14:02:20 +0000443 const DebugLoc &DL = MI.getDebugLoc();
444 const RegisterBank *DstBank = RBI.getRegBank(DstReg, MRI, TRI);
445 const unsigned DstSize = DstTy.getSizeInBits();
446 const TargetRegisterClass *DstRC =
447 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, MRI);
448 if (!DstRC)
449 return false;
450
451 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
452 MachineInstrBuilder MIB =
453 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
454 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
455 MachineOperand &Src = MI.getOperand(I + 1);
456 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
457 MIB.addImm(SubRegs[I]);
458
459 const TargetRegisterClass *SrcRC
460 = TRI.getConstrainedRegClassForOperand(Src, MRI);
461 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, MRI))
462 return false;
463 }
464
465 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI))
466 return false;
467
468 MI.eraseFromParent();
469 return true;
470}
471
Matt Arsenault872f38b2019-07-09 14:02:26 +0000472bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
473 MachineBasicBlock *BB = MI.getParent();
474 MachineFunction *MF = BB->getParent();
475 MachineRegisterInfo &MRI = MF->getRegInfo();
476 const int NumDst = MI.getNumOperands() - 1;
477
478 MachineOperand &Src = MI.getOperand(NumDst);
479
480 Register SrcReg = Src.getReg();
481 Register DstReg0 = MI.getOperand(0).getReg();
482 LLT DstTy = MRI.getType(DstReg0);
483 LLT SrcTy = MRI.getType(SrcReg);
484
485 const unsigned DstSize = DstTy.getSizeInBits();
486 const unsigned SrcSize = SrcTy.getSizeInBits();
487 const DebugLoc &DL = MI.getDebugLoc();
488 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, MRI, TRI);
489
490 const TargetRegisterClass *SrcRC =
491 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, MRI);
492 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI))
493 return false;
494
495 const unsigned SrcFlags = getUndefRegState(Src.isUndef());
496
497 // Note we could have mixed SGPR and VGPR destination banks for an SGPR
498 // source, and this relies on the fact that the same subregister indices are
499 // used for both.
500 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
501 for (int I = 0, E = NumDst; I != E; ++I) {
502 MachineOperand &Dst = MI.getOperand(I);
503 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
504 .addReg(SrcReg, SrcFlags, SubRegs[I]);
505
506 const TargetRegisterClass *DstRC =
507 TRI.getConstrainedRegClassForOperand(Dst, MRI);
508 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, MRI))
509 return false;
510 }
511
512 MI.eraseFromParent();
513 return true;
514}
515
Tom Stellardca166212017-01-30 21:56:46 +0000516bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const {
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000517 return selectG_ADD_SUB(I);
Tom Stellardca166212017-01-30 21:56:46 +0000518}
519
Tom Stellard3f1c6fe2018-06-21 23:38:20 +0000520bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
521 MachineBasicBlock *BB = I.getParent();
522 MachineFunction *MF = BB->getParent();
523 MachineRegisterInfo &MRI = MF->getRegInfo();
524 const MachineOperand &MO = I.getOperand(0);
Matt Arsenaultf8a841b2019-06-24 16:24:03 +0000525
526 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
527 // regbank check here is to know why getConstrainedRegClassForOperand failed.
528 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, MRI);
529 if ((!RC && !MRI.getRegBankOrNull(MO.getReg())) ||
530 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, MRI))) {
531 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
532 return true;
533 }
534
535 return false;
Tom Stellard3f1c6fe2018-06-21 23:38:20 +0000536}
537
Tom Stellard33634d1b2019-03-01 00:50:26 +0000538bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
539 MachineBasicBlock *BB = I.getParent();
540 MachineFunction *MF = BB->getParent();
541 MachineRegisterInfo &MRI = MF->getRegInfo();
542 unsigned SubReg = TRI.getSubRegFromChannel(I.getOperand(3).getImm() / 32);
543 DebugLoc DL = I.getDebugLoc();
544 MachineInstr *Ins = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG))
545 .addDef(I.getOperand(0).getReg())
546 .addReg(I.getOperand(1).getReg())
547 .addReg(I.getOperand(2).getReg())
548 .addImm(SubReg);
549
550 for (const MachineOperand &MO : Ins->operands()) {
551 if (!MO.isReg())
552 continue;
553 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
554 continue;
555
556 const TargetRegisterClass *RC =
557 TRI.getConstrainedRegClassForOperand(MO, MRI);
558 if (!RC)
559 continue;
560 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
561 }
562 I.eraseFromParent();
563 return true;
564}
565
Matt Arsenault50be3482019-07-02 14:52:16 +0000566bool AMDGPUInstructionSelector::selectG_INTRINSIC(
567 MachineInstr &I, CodeGenCoverage &CoverageInfo) const {
Matt Arsenaultfee19492019-06-17 17:01:27 +0000568 unsigned IntrinsicID = I.getOperand(I.getNumExplicitDefs()).getIntrinsicID();
Tom Stellarda9284732018-06-14 19:26:37 +0000569 switch (IntrinsicID) {
Matt Arsenault53fa7592019-07-15 18:25:24 +0000570 case Intrinsic::amdgcn_if_break: {
571 MachineBasicBlock *BB = I.getParent();
572 MachineFunction *MF = BB->getParent();
573 MachineRegisterInfo &MRI = MF->getRegInfo();
574
575 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
576 // SelectionDAG uses for wave32 vs wave64.
577 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
578 .add(I.getOperand(0))
579 .add(I.getOperand(2))
580 .add(I.getOperand(3));
581
582 Register DstReg = I.getOperand(0).getReg();
583 Register Src0Reg = I.getOperand(2).getReg();
584 Register Src1Reg = I.getOperand(3).getReg();
585
586 I.eraseFromParent();
587
588 for (Register Reg : { DstReg, Src0Reg, Src1Reg }) {
589 if (!MRI.getRegClassOrNull(Reg))
590 MRI.setRegClass(Reg, TRI.getWaveMaskRegClass());
591 }
592
593 return true;
594 }
Matt Arsenault50be3482019-07-02 14:52:16 +0000595 default:
596 return selectImpl(I, CoverageInfo);
Tom Stellarda9284732018-06-14 19:26:37 +0000597 }
Tom Stellarda9284732018-06-14 19:26:37 +0000598}
599
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000600static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
601 if (Size != 32 && Size != 64)
602 return -1;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000603 switch (P) {
604 default:
605 llvm_unreachable("Unknown condition code!");
606 case CmpInst::ICMP_NE:
607 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
608 case CmpInst::ICMP_EQ:
609 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
610 case CmpInst::ICMP_SGT:
611 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
612 case CmpInst::ICMP_SGE:
613 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
614 case CmpInst::ICMP_SLT:
615 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
616 case CmpInst::ICMP_SLE:
617 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
618 case CmpInst::ICMP_UGT:
619 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
620 case CmpInst::ICMP_UGE:
621 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
622 case CmpInst::ICMP_ULT:
623 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
624 case CmpInst::ICMP_ULE:
625 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
626 }
627}
628
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000629int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
630 unsigned Size) const {
631 if (Size == 64) {
632 if (!STI.hasScalarCompareEq64())
633 return -1;
634
635 switch (P) {
636 case CmpInst::ICMP_NE:
637 return AMDGPU::S_CMP_LG_U64;
638 case CmpInst::ICMP_EQ:
639 return AMDGPU::S_CMP_EQ_U64;
640 default:
641 return -1;
642 }
643 }
644
645 if (Size != 32)
646 return -1;
647
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000648 switch (P) {
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000649 case CmpInst::ICMP_NE:
650 return AMDGPU::S_CMP_LG_U32;
651 case CmpInst::ICMP_EQ:
652 return AMDGPU::S_CMP_EQ_U32;
653 case CmpInst::ICMP_SGT:
654 return AMDGPU::S_CMP_GT_I32;
655 case CmpInst::ICMP_SGE:
656 return AMDGPU::S_CMP_GE_I32;
657 case CmpInst::ICMP_SLT:
658 return AMDGPU::S_CMP_LT_I32;
659 case CmpInst::ICMP_SLE:
660 return AMDGPU::S_CMP_LE_I32;
661 case CmpInst::ICMP_UGT:
662 return AMDGPU::S_CMP_GT_U32;
663 case CmpInst::ICMP_UGE:
664 return AMDGPU::S_CMP_GE_U32;
665 case CmpInst::ICMP_ULT:
666 return AMDGPU::S_CMP_LT_U32;
667 case CmpInst::ICMP_ULE:
668 return AMDGPU::S_CMP_LE_U32;
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000669 default:
670 llvm_unreachable("Unknown condition code!");
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000671 }
672}
673
674bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
675 MachineBasicBlock *BB = I.getParent();
676 MachineFunction *MF = BB->getParent();
677 MachineRegisterInfo &MRI = MF->getRegInfo();
Matt Arsenault5dfd4662019-07-15 19:39:31 +0000678 const DebugLoc &DL = I.getDebugLoc();
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000679
680 unsigned SrcReg = I.getOperand(2).getReg();
681 unsigned Size = RBI.getSizeInBits(SrcReg, MRI, TRI);
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000682
683 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000684
685 unsigned CCReg = I.getOperand(0).getReg();
686 if (isSCC(CCReg, MRI)) {
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000687 int Opcode = getS_CMPOpcode(Pred, Size);
688 if (Opcode == -1)
689 return false;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000690 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
691 .add(I.getOperand(2))
692 .add(I.getOperand(3));
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000693 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
694 .addReg(AMDGPU::SCC);
695 bool Ret =
696 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
697 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, MRI);
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000698 I.eraseFromParent();
699 return Ret;
700 }
701
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000702 int Opcode = getV_CMPOpcode(Pred, Size);
703 if (Opcode == -1)
704 return false;
705
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000706 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
707 I.getOperand(0).getReg())
708 .add(I.getOperand(2))
709 .add(I.getOperand(3));
710 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
Matt Arsenault5dfd4662019-07-15 19:39:31 +0000711 *TRI.getBoolRC(), MRI);
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000712 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
713 I.eraseFromParent();
714 return Ret;
715}
716
Tom Stellard390a5f42018-07-13 21:05:14 +0000717static MachineInstr *
718buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt,
719 unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3,
720 unsigned VM, bool Compr, unsigned Enabled, bool Done) {
721 const DebugLoc &DL = Insert->getDebugLoc();
722 MachineBasicBlock &BB = *Insert->getParent();
723 unsigned Opcode = Done ? AMDGPU::EXP_DONE : AMDGPU::EXP;
724 return BuildMI(BB, Insert, DL, TII.get(Opcode))
725 .addImm(Tgt)
726 .addReg(Reg0)
727 .addReg(Reg1)
728 .addReg(Reg2)
729 .addReg(Reg3)
730 .addImm(VM)
731 .addImm(Compr)
732 .addImm(Enabled);
733}
734
735bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
Matt Arsenault50be3482019-07-02 14:52:16 +0000736 MachineInstr &I, CodeGenCoverage &CoverageInfo) const {
Tom Stellard390a5f42018-07-13 21:05:14 +0000737 MachineBasicBlock *BB = I.getParent();
738 MachineFunction *MF = BB->getParent();
739 MachineRegisterInfo &MRI = MF->getRegInfo();
740
741 unsigned IntrinsicID = I.getOperand(0).getIntrinsicID();
742 switch (IntrinsicID) {
743 case Intrinsic::amdgcn_exp: {
744 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
745 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
746 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(7).getReg()));
747 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(8).getReg()));
748
749 MachineInstr *Exp = buildEXP(TII, &I, Tgt, I.getOperand(3).getReg(),
750 I.getOperand(4).getReg(),
751 I.getOperand(5).getReg(),
752 I.getOperand(6).getReg(),
753 VM, false, Enabled, Done);
754
755 I.eraseFromParent();
756 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
757 }
758 case Intrinsic::amdgcn_exp_compr: {
759 const DebugLoc &DL = I.getDebugLoc();
760 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
761 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
762 unsigned Reg0 = I.getOperand(3).getReg();
763 unsigned Reg1 = I.getOperand(4).getReg();
764 unsigned Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
765 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(5).getReg()));
766 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(6).getReg()));
767
768 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
769 MachineInstr *Exp = buildEXP(TII, &I, Tgt, Reg0, Reg1, Undef, Undef, VM,
770 true, Enabled, Done);
771
772 I.eraseFromParent();
773 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
774 }
Matt Arsenaultb3901212019-07-15 18:18:46 +0000775 case Intrinsic::amdgcn_end_cf: {
776 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
777 // SelectionDAG uses for wave32 vs wave64.
778 BuildMI(*BB, &I, I.getDebugLoc(),
779 TII.get(AMDGPU::SI_END_CF))
780 .add(I.getOperand(1));
781
782 Register Reg = I.getOperand(1).getReg();
783 I.eraseFromParent();
784
785 if (!MRI.getRegClassOrNull(Reg))
786 MRI.setRegClass(Reg, TRI.getWaveMaskRegClass());
787 return true;
788 }
Matt Arsenault50be3482019-07-02 14:52:16 +0000789 default:
790 return selectImpl(I, CoverageInfo);
Tom Stellard390a5f42018-07-13 21:05:14 +0000791 }
Tom Stellard390a5f42018-07-13 21:05:14 +0000792}
793
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000794bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
795 MachineBasicBlock *BB = I.getParent();
796 MachineFunction *MF = BB->getParent();
797 MachineRegisterInfo &MRI = MF->getRegInfo();
798 const DebugLoc &DL = I.getDebugLoc();
799
800 unsigned DstReg = I.getOperand(0).getReg();
801 unsigned Size = RBI.getSizeInBits(DstReg, MRI, TRI);
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000802 assert(Size <= 32 || Size == 64);
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000803 const MachineOperand &CCOp = I.getOperand(1);
804 unsigned CCReg = CCOp.getReg();
805 if (isSCC(CCReg, MRI)) {
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000806 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
807 AMDGPU::S_CSELECT_B32;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000808 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
809 .addReg(CCReg);
810
811 // The generic constrainSelectedInstRegOperands doesn't work for the scc register
812 // bank, because it does not cover the register class that we used to represent
813 // for it. So we need to manually set the register class here.
814 if (!MRI.getRegClassOrNull(CCReg))
815 MRI.setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, MRI));
816 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
817 .add(I.getOperand(2))
818 .add(I.getOperand(3));
819
820 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
821 constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
822 I.eraseFromParent();
823 return Ret;
824 }
825
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000826 // Wide VGPR select should have been split in RegBankSelect.
827 if (Size > 32)
828 return false;
829
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000830 MachineInstr *Select =
831 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
832 .addImm(0)
833 .add(I.getOperand(3))
834 .addImm(0)
835 .add(I.getOperand(2))
836 .add(I.getOperand(1));
837
838 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
839 I.eraseFromParent();
840 return Ret;
841}
842
Tom Stellardca166212017-01-30 21:56:46 +0000843bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
844 MachineBasicBlock *BB = I.getParent();
Tom Stellard655fdd32018-05-11 23:12:49 +0000845 MachineFunction *MF = BB->getParent();
846 MachineRegisterInfo &MRI = MF->getRegInfo();
Tom Stellardca166212017-01-30 21:56:46 +0000847 DebugLoc DL = I.getDebugLoc();
Matt Arsenault89fc8bc2019-07-01 13:37:39 +0000848 unsigned PtrSize = RBI.getSizeInBits(I.getOperand(1).getReg(), MRI, TRI);
849 if (PtrSize != 64) {
850 LLVM_DEBUG(dbgs() << "Unhandled address space\n");
851 return false;
852 }
853
Tom Stellard655fdd32018-05-11 23:12:49 +0000854 unsigned StoreSize = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
855 unsigned Opcode;
Tom Stellardca166212017-01-30 21:56:46 +0000856
Matt Arsenaultdad1f892019-07-16 18:42:53 +0000857 // FIXME: Remove this when integers > s32 naturally selected.
Tom Stellard655fdd32018-05-11 23:12:49 +0000858 switch (StoreSize) {
859 default:
860 return false;
861 case 32:
862 Opcode = AMDGPU::FLAT_STORE_DWORD;
863 break;
864 case 64:
865 Opcode = AMDGPU::FLAT_STORE_DWORDX2;
866 break;
867 case 96:
868 Opcode = AMDGPU::FLAT_STORE_DWORDX3;
869 break;
870 case 128:
871 Opcode = AMDGPU::FLAT_STORE_DWORDX4;
872 break;
873 }
874
875 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
Tom Stellardca166212017-01-30 21:56:46 +0000876 .add(I.getOperand(1))
877 .add(I.getOperand(0))
Matt Arsenaultfd023142017-06-12 15:55:58 +0000878 .addImm(0) // offset
879 .addImm(0) // glc
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000880 .addImm(0) // slc
881 .addImm(0); // dlc
Tom Stellardca166212017-01-30 21:56:46 +0000882
Matt Arsenault47ccafe2017-05-11 17:38:33 +0000883
Tom Stellardca166212017-01-30 21:56:46 +0000884 // Now that we selected an opcode, we need to constrain the register
885 // operands to use appropriate classes.
886 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
887
888 I.eraseFromParent();
889 return Ret;
890}
891
Matt Arsenaultdbb6c032019-06-24 18:02:18 +0000892static int sizeToSubRegIndex(unsigned Size) {
893 switch (Size) {
894 case 32:
895 return AMDGPU::sub0;
896 case 64:
897 return AMDGPU::sub0_sub1;
898 case 96:
899 return AMDGPU::sub0_sub1_sub2;
900 case 128:
901 return AMDGPU::sub0_sub1_sub2_sub3;
902 case 256:
903 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
904 default:
905 if (Size < 32)
906 return AMDGPU::sub0;
907 if (Size > 256)
908 return -1;
909 return sizeToSubRegIndex(PowerOf2Ceil(Size));
910 }
911}
912
913bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
914 MachineBasicBlock *BB = I.getParent();
915 MachineFunction *MF = BB->getParent();
916 MachineRegisterInfo &MRI = MF->getRegInfo();
917
918 unsigned DstReg = I.getOperand(0).getReg();
919 unsigned SrcReg = I.getOperand(1).getReg();
920 const LLT DstTy = MRI.getType(DstReg);
921 const LLT SrcTy = MRI.getType(SrcReg);
922 if (!DstTy.isScalar())
923 return false;
924
925 const RegisterBank *DstRB = RBI.getRegBank(DstReg, MRI, TRI);
926 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, MRI, TRI);
927 if (SrcRB != DstRB)
928 return false;
929
930 unsigned DstSize = DstTy.getSizeInBits();
931 unsigned SrcSize = SrcTy.getSizeInBits();
932
933 const TargetRegisterClass *SrcRC
934 = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, MRI);
935 const TargetRegisterClass *DstRC
936 = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, MRI);
937
938 if (SrcSize > 32) {
939 int SubRegIdx = sizeToSubRegIndex(DstSize);
940 if (SubRegIdx == -1)
941 return false;
942
943 // Deal with weird cases where the class only partially supports the subreg
944 // index.
945 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
946 if (!SrcRC)
947 return false;
948
949 I.getOperand(1).setSubReg(SubRegIdx);
950 }
951
952 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
953 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
954 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
955 return false;
956 }
957
958 I.setDesc(TII.get(TargetOpcode::COPY));
959 return true;
960}
961
Matt Arsenault5dafcb92019-07-01 13:22:06 +0000962/// \returns true if a bitmask for \p Size bits will be an inline immediate.
963static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
964 Mask = maskTrailingOnes<unsigned>(Size);
965 int SignedMask = static_cast<int>(Mask);
966 return SignedMask >= -16 && SignedMask <= 64;
967}
968
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000969bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
970 bool Signed = I.getOpcode() == AMDGPU::G_SEXT;
971 const DebugLoc &DL = I.getDebugLoc();
972 MachineBasicBlock &MBB = *I.getParent();
973 MachineFunction &MF = *MBB.getParent();
974 MachineRegisterInfo &MRI = MF.getRegInfo();
975 const unsigned DstReg = I.getOperand(0).getReg();
976 const unsigned SrcReg = I.getOperand(1).getReg();
977
978 const LLT DstTy = MRI.getType(DstReg);
979 const LLT SrcTy = MRI.getType(SrcReg);
980 const LLT S1 = LLT::scalar(1);
981 const unsigned SrcSize = SrcTy.getSizeInBits();
982 const unsigned DstSize = DstTy.getSizeInBits();
983 if (!DstTy.isScalar())
984 return false;
985
986 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, MRI, TRI);
987
988 if (SrcBank->getID() == AMDGPU::SCCRegBankID) {
989 if (SrcTy != S1 || DstSize > 64) // Invalid
990 return false;
991
992 unsigned Opcode =
993 DstSize > 32 ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
994 const TargetRegisterClass *DstRC =
995 DstSize > 32 ? &AMDGPU::SReg_64RegClass : &AMDGPU::SReg_32RegClass;
996
997 // FIXME: Create an extra copy to avoid incorrectly constraining the result
998 // of the scc producer.
999 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
1000 BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), TmpReg)
1001 .addReg(SrcReg);
1002 BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1003 .addReg(TmpReg);
1004
1005 // The instruction operands are backwards from what you would expect.
1006 BuildMI(MBB, I, DL, TII.get(Opcode), DstReg)
1007 .addImm(0)
1008 .addImm(Signed ? -1 : 1);
1009 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
1010 }
1011
1012 if (SrcBank->getID() == AMDGPU::VCCRegBankID && DstSize <= 32) {
1013 if (SrcTy != S1) // Invalid
1014 return false;
1015
1016 MachineInstr *ExtI =
1017 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1018 .addImm(0) // src0_modifiers
1019 .addImm(0) // src0
1020 .addImm(0) // src1_modifiers
1021 .addImm(Signed ? -1 : 1) // src1
1022 .addUse(SrcReg);
1023 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1024 }
1025
1026 if (I.getOpcode() == AMDGPU::G_ANYEXT)
1027 return selectCOPY(I);
1028
1029 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1030 // 64-bit should have been split up in RegBankSelect
Matt Arsenault5dafcb92019-07-01 13:22:06 +00001031
1032 // Try to use an and with a mask if it will save code size.
1033 unsigned Mask;
1034 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1035 MachineInstr *ExtI =
1036 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1037 .addImm(Mask)
1038 .addReg(SrcReg);
1039 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1040 }
1041
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001042 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
1043 MachineInstr *ExtI =
1044 BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1045 .addReg(SrcReg)
1046 .addImm(0) // Offset
1047 .addImm(SrcSize); // Width
1048 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1049 }
1050
1051 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
1052 if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, MRI))
1053 return false;
1054
1055 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
1056 const unsigned SextOpc = SrcSize == 8 ?
1057 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
1058 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
1059 .addReg(SrcReg);
1060 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, MRI);
1061 }
1062
1063 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
1064 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1065
1066 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
1067 if (DstSize > 32 && SrcSize <= 32) {
1068 // We need a 64-bit register source, but the high bits don't matter.
1069 unsigned ExtReg
1070 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1071 unsigned UndefReg
1072 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
1073 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1074 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
1075 .addReg(SrcReg)
1076 .addImm(AMDGPU::sub0)
1077 .addReg(UndefReg)
1078 .addImm(AMDGPU::sub1);
1079
1080 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
1081 .addReg(ExtReg)
1082 .addImm(SrcSize << 16);
1083
1084 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, MRI);
1085 }
1086
Matt Arsenault5dafcb92019-07-01 13:22:06 +00001087 unsigned Mask;
1088 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1089 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
1090 .addReg(SrcReg)
1091 .addImm(Mask);
1092 } else {
1093 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
1094 .addReg(SrcReg)
1095 .addImm(SrcSize << 16);
1096 }
1097
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001098 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, MRI);
1099 }
1100
1101 return false;
1102}
1103
Tom Stellardca166212017-01-30 21:56:46 +00001104bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
1105 MachineBasicBlock *BB = I.getParent();
1106 MachineFunction *MF = BB->getParent();
1107 MachineRegisterInfo &MRI = MF->getRegInfo();
Tom Stellarde182b282018-05-15 17:57:09 +00001108 MachineOperand &ImmOp = I.getOperand(1);
Tom Stellardca166212017-01-30 21:56:46 +00001109
Tom Stellarde182b282018-05-15 17:57:09 +00001110 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
1111 if (ImmOp.isFPImm()) {
1112 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
1113 ImmOp.ChangeToImmediate(Imm.getZExtValue());
1114 } else if (ImmOp.isCImm()) {
1115 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
1116 }
1117
1118 unsigned DstReg = I.getOperand(0).getReg();
1119 unsigned Size;
1120 bool IsSgpr;
1121 const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg());
1122 if (RB) {
1123 IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
1124 Size = MRI.getType(DstReg).getSizeInBits();
1125 } else {
1126 const TargetRegisterClass *RC = TRI.getRegClassForReg(MRI, DstReg);
1127 IsSgpr = TRI.isSGPRClass(RC);
Tom Stellarda91ce172018-05-21 17:49:31 +00001128 Size = TRI.getRegSizeInBits(*RC);
Tom Stellarde182b282018-05-15 17:57:09 +00001129 }
1130
1131 if (Size != 32 && Size != 64)
1132 return false;
1133
1134 unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
Tom Stellardca166212017-01-30 21:56:46 +00001135 if (Size == 32) {
Tom Stellarde182b282018-05-15 17:57:09 +00001136 I.setDesc(TII.get(Opcode));
1137 I.addImplicitDefUseOperands(*MF);
Tom Stellardca166212017-01-30 21:56:46 +00001138 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1139 }
1140
Tom Stellardca166212017-01-30 21:56:46 +00001141 DebugLoc DL = I.getDebugLoc();
Tom Stellarde182b282018-05-15 17:57:09 +00001142 const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass :
1143 &AMDGPU::VGPR_32RegClass;
1144 unsigned LoReg = MRI.createVirtualRegister(RC);
1145 unsigned HiReg = MRI.createVirtualRegister(RC);
1146 const APInt &Imm = APInt(Size, I.getOperand(1).getImm());
Tom Stellardca166212017-01-30 21:56:46 +00001147
Tom Stellarde182b282018-05-15 17:57:09 +00001148 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
Tom Stellardca166212017-01-30 21:56:46 +00001149 .addImm(Imm.trunc(32).getZExtValue());
1150
Tom Stellarde182b282018-05-15 17:57:09 +00001151 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
Tom Stellardca166212017-01-30 21:56:46 +00001152 .addImm(Imm.ashr(32).getZExtValue());
1153
Tom Stellarde182b282018-05-15 17:57:09 +00001154 const MachineInstr *RS =
1155 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1156 .addReg(LoReg)
1157 .addImm(AMDGPU::sub0)
1158 .addReg(HiReg)
1159 .addImm(AMDGPU::sub1);
1160
Tom Stellardca166212017-01-30 21:56:46 +00001161 // We can't call constrainSelectedInstRegOperands here, because it doesn't
1162 // work for target independent opcodes
1163 I.eraseFromParent();
Tom Stellarde182b282018-05-15 17:57:09 +00001164 const TargetRegisterClass *DstRC =
1165 TRI.getConstrainedRegClassForOperand(RS->getOperand(0), MRI);
1166 if (!DstRC)
1167 return true;
1168 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
Tom Stellardca166212017-01-30 21:56:46 +00001169}
1170
1171static bool isConstant(const MachineInstr &MI) {
1172 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
1173}
1174
1175void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
1176 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
1177
1178 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
1179
1180 assert(PtrMI);
1181
1182 if (PtrMI->getOpcode() != TargetOpcode::G_GEP)
1183 return;
1184
1185 GEPInfo GEPInfo(*PtrMI);
1186
1187 for (unsigned i = 1, e = 3; i < e; ++i) {
1188 const MachineOperand &GEPOp = PtrMI->getOperand(i);
1189 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
1190 assert(OpDef);
1191 if (isConstant(*OpDef)) {
1192 // FIXME: Is it possible to have multiple Imm parts? Maybe if we
1193 // are lacking other optimizations.
1194 assert(GEPInfo.Imm == 0);
1195 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
1196 continue;
1197 }
1198 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
1199 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
1200 GEPInfo.SgprParts.push_back(GEPOp.getReg());
1201 else
1202 GEPInfo.VgprParts.push_back(GEPOp.getReg());
1203 }
1204
1205 AddrInfo.push_back(GEPInfo);
1206 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
1207}
1208
Tom Stellard79b5c382019-02-20 21:02:37 +00001209bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
Tom Stellardca166212017-01-30 21:56:46 +00001210 if (!MI.hasOneMemOperand())
1211 return false;
1212
1213 const MachineMemOperand *MMO = *MI.memoperands_begin();
1214 const Value *Ptr = MMO->getValue();
1215
1216 // UndefValue means this is a load of a kernel input. These are uniform.
1217 // Sometimes LDS instructions have constant pointers.
1218 // If Ptr is null, then that means this mem operand contains a
1219 // PseudoSourceValue like GOT.
1220 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
1221 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
1222 return true;
1223
Matt Arsenault923712b2018-02-09 16:57:57 +00001224 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
1225 return true;
1226
Tom Stellardca166212017-01-30 21:56:46 +00001227 const Instruction *I = dyn_cast<Instruction>(Ptr);
1228 return I && I->getMetadata("amdgpu.uniform");
1229}
1230
Tom Stellardca166212017-01-30 21:56:46 +00001231bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
1232 for (const GEPInfo &GEPInfo : AddrInfo) {
1233 if (!GEPInfo.VgprParts.empty())
1234 return true;
1235 }
1236 return false;
1237}
1238
Tom Stellardca166212017-01-30 21:56:46 +00001239bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const {
Matt Arsenault35c96592019-07-16 18:05:29 +00001240 // TODO: Can/should we insert m0 initialization here for DS instructions and
1241 // call the normal selector?
1242 return false;
Tom Stellardca166212017-01-30 21:56:46 +00001243}
1244
Matt Arsenault64642802019-07-01 15:39:27 +00001245bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
1246 MachineBasicBlock *BB = I.getParent();
1247 MachineFunction *MF = BB->getParent();
1248 MachineRegisterInfo &MRI = MF->getRegInfo();
1249 MachineOperand &CondOp = I.getOperand(0);
1250 Register CondReg = CondOp.getReg();
1251 const DebugLoc &DL = I.getDebugLoc();
1252
Matt Arsenault2ab25f92019-07-01 16:06:02 +00001253 unsigned BrOpcode;
1254 Register CondPhysReg;
1255 const TargetRegisterClass *ConstrainRC;
1256
1257 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
1258 // whether the branch is uniform when selecting the instruction. In
1259 // GlobalISel, we should push that decision into RegBankSelect. Assume for now
1260 // RegBankSelect knows what it's doing if the branch condition is scc, even
1261 // though it currently does not.
Matt Arsenault64642802019-07-01 15:39:27 +00001262 if (isSCC(CondReg, MRI)) {
Matt Arsenault2ab25f92019-07-01 16:06:02 +00001263 CondPhysReg = AMDGPU::SCC;
1264 BrOpcode = AMDGPU::S_CBRANCH_SCC1;
1265 ConstrainRC = &AMDGPU::SReg_32_XM0RegClass;
1266 } else if (isVCC(CondReg, MRI)) {
1267 // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
1268 // We sort of know that a VCC producer based on the register bank, that ands
1269 // inactive lanes with 0. What if there was a logical operation with vcc
1270 // producers in different blocks/with different exec masks?
1271 // FIXME: Should scc->vcc copies and with exec?
1272 CondPhysReg = TRI.getVCC();
1273 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
1274 ConstrainRC = TRI.getBoolRC();
1275 } else
1276 return false;
Matt Arsenault64642802019-07-01 15:39:27 +00001277
Matt Arsenault2ab25f92019-07-01 16:06:02 +00001278 if (!MRI.getRegClassOrNull(CondReg))
1279 MRI.setRegClass(CondReg, ConstrainRC);
Matt Arsenault64642802019-07-01 15:39:27 +00001280
Matt Arsenault2ab25f92019-07-01 16:06:02 +00001281 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
1282 .addReg(CondReg);
1283 BuildMI(*BB, &I, DL, TII.get(BrOpcode))
1284 .addMBB(I.getOperand(1).getMBB());
1285
1286 I.eraseFromParent();
1287 return true;
Matt Arsenault64642802019-07-01 15:39:27 +00001288}
1289
Matt Arsenaultcda82f02019-07-01 15:48:18 +00001290bool AMDGPUInstructionSelector::selectG_FRAME_INDEX(MachineInstr &I) const {
1291 MachineBasicBlock *BB = I.getParent();
1292 MachineFunction *MF = BB->getParent();
1293 MachineRegisterInfo &MRI = MF->getRegInfo();
1294
1295 Register DstReg = I.getOperand(0).getReg();
1296 const RegisterBank *DstRB = RBI.getRegBank(DstReg, MRI, TRI);
1297 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1298 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
1299 if (IsVGPR)
1300 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
1301
1302 return RBI.constrainGenericRegister(
1303 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, MRI);
1304}
1305
Daniel Sandersf76f3152017-11-16 00:46:35 +00001306bool AMDGPUInstructionSelector::select(MachineInstr &I,
1307 CodeGenCoverage &CoverageInfo) const {
Matt Arsenaulte1006252019-07-01 16:32:47 +00001308 if (I.isPHI())
1309 return selectPHI(I);
Tom Stellardca166212017-01-30 21:56:46 +00001310
Tom Stellard7712ee82018-06-22 00:44:29 +00001311 if (!isPreISelGenericOpcode(I.getOpcode())) {
1312 if (I.isCopy())
1313 return selectCOPY(I);
Tom Stellardca166212017-01-30 21:56:46 +00001314 return true;
Tom Stellard7712ee82018-06-22 00:44:29 +00001315 }
Tom Stellardca166212017-01-30 21:56:46 +00001316
1317 switch (I.getOpcode()) {
Matt Arsenaultc8291c92019-07-15 19:50:07 +00001318 case TargetOpcode::G_AND:
1319 case TargetOpcode::G_OR:
1320 case TargetOpcode::G_XOR:
1321 if (selectG_AND_OR_XOR(I))
1322 return true;
1323 return selectImpl(I, CoverageInfo);
Tom Stellard9e9dd302019-07-01 16:09:33 +00001324 case TargetOpcode::G_ADD:
Matt Arsenaulte6d10f92019-07-09 14:05:11 +00001325 case TargetOpcode::G_SUB:
1326 if (selectG_ADD_SUB(I))
Tom Stellard9e9dd302019-07-01 16:09:33 +00001327 return true;
1328 LLVM_FALLTHROUGH;
Tom Stellardca166212017-01-30 21:56:46 +00001329 default:
Tom Stellard1dc90202018-05-10 20:53:06 +00001330 return selectImpl(I, CoverageInfo);
Tom Stellard7c650782018-10-05 04:34:09 +00001331 case TargetOpcode::G_INTTOPTR:
Tom Stellard1e0edad2018-05-10 21:20:10 +00001332 case TargetOpcode::G_BITCAST:
1333 return selectCOPY(I);
Tom Stellardca166212017-01-30 21:56:46 +00001334 case TargetOpcode::G_CONSTANT:
Tom Stellarde182b282018-05-15 17:57:09 +00001335 case TargetOpcode::G_FCONSTANT:
Tom Stellardca166212017-01-30 21:56:46 +00001336 return selectG_CONSTANT(I);
Tom Stellard41f32192019-02-28 23:37:48 +00001337 case TargetOpcode::G_EXTRACT:
1338 return selectG_EXTRACT(I);
Matt Arsenault9b7ffc42019-07-09 14:02:20 +00001339 case TargetOpcode::G_MERGE_VALUES:
Matt Arsenaulta65913e2019-07-15 17:26:43 +00001340 case TargetOpcode::G_BUILD_VECTOR:
Matt Arsenault9b7ffc42019-07-09 14:02:20 +00001341 case TargetOpcode::G_CONCAT_VECTORS:
1342 return selectG_MERGE_VALUES(I);
Matt Arsenault872f38b2019-07-09 14:02:26 +00001343 case TargetOpcode::G_UNMERGE_VALUES:
1344 return selectG_UNMERGE_VALUES(I);
Tom Stellardca166212017-01-30 21:56:46 +00001345 case TargetOpcode::G_GEP:
1346 return selectG_GEP(I);
Tom Stellard3f1c6fe2018-06-21 23:38:20 +00001347 case TargetOpcode::G_IMPLICIT_DEF:
1348 return selectG_IMPLICIT_DEF(I);
Tom Stellard33634d1b2019-03-01 00:50:26 +00001349 case TargetOpcode::G_INSERT:
1350 return selectG_INSERT(I);
Tom Stellarda9284732018-06-14 19:26:37 +00001351 case TargetOpcode::G_INTRINSIC:
1352 return selectG_INTRINSIC(I, CoverageInfo);
Tom Stellard390a5f42018-07-13 21:05:14 +00001353 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1354 return selectG_INTRINSIC_W_SIDE_EFFECTS(I, CoverageInfo);
Tom Stellard8b1c53b2019-06-17 16:27:43 +00001355 case TargetOpcode::G_ICMP:
Matt Arsenault3b7668a2019-07-01 13:34:26 +00001356 if (selectG_ICMP(I))
1357 return true;
1358 return selectImpl(I, CoverageInfo);
Tom Stellardca166212017-01-30 21:56:46 +00001359 case TargetOpcode::G_LOAD:
Matt Arsenault35c96592019-07-16 18:05:29 +00001360 return selectImpl(I, CoverageInfo);
Tom Stellard8b1c53b2019-06-17 16:27:43 +00001361 case TargetOpcode::G_SELECT:
1362 return selectG_SELECT(I);
Tom Stellardca166212017-01-30 21:56:46 +00001363 case TargetOpcode::G_STORE:
Matt Arsenaultdad1f892019-07-16 18:42:53 +00001364 if (selectImpl(I, CoverageInfo))
1365 return true;
Tom Stellardca166212017-01-30 21:56:46 +00001366 return selectG_STORE(I);
Matt Arsenaultdbb6c032019-06-24 18:02:18 +00001367 case TargetOpcode::G_TRUNC:
1368 return selectG_TRUNC(I);
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001369 case TargetOpcode::G_SEXT:
1370 case TargetOpcode::G_ZEXT:
1371 case TargetOpcode::G_ANYEXT:
1372 if (selectG_SZA_EXT(I)) {
1373 I.eraseFromParent();
1374 return true;
1375 }
1376
1377 return false;
Matt Arsenault64642802019-07-01 15:39:27 +00001378 case TargetOpcode::G_BRCOND:
1379 return selectG_BRCOND(I);
Matt Arsenaultcda82f02019-07-01 15:48:18 +00001380 case TargetOpcode::G_FRAME_INDEX:
1381 return selectG_FRAME_INDEX(I);
Matt Arsenaulted633992019-07-02 14:17:38 +00001382 case TargetOpcode::G_FENCE:
1383 // FIXME: Tablegen importer doesn't handle the imm operands correctly, and
1384 // is checking for G_CONSTANT
1385 I.setDesc(TII.get(AMDGPU::ATOMIC_FENCE));
1386 return true;
Tom Stellardca166212017-01-30 21:56:46 +00001387 }
1388 return false;
1389}
Tom Stellard1dc90202018-05-10 20:53:06 +00001390
Tom Stellard26fac0f2018-06-22 02:54:57 +00001391InstructionSelector::ComplexRendererFns
1392AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
1393 return {{
1394 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1395 }};
1396
1397}
1398
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001399std::pair<Register, unsigned>
1400AMDGPUInstructionSelector::selectVOP3ModsImpl(
1401 Register Src, const MachineRegisterInfo &MRI) const {
1402 unsigned Mods = 0;
1403 MachineInstr *MI = MRI.getVRegDef(Src);
1404
1405 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
1406 Src = MI->getOperand(1).getReg();
1407 Mods |= SISrcMods::NEG;
1408 MI = MRI.getVRegDef(Src);
1409 }
1410
1411 if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
1412 Src = MI->getOperand(1).getReg();
1413 Mods |= SISrcMods::ABS;
1414 }
1415
1416 return std::make_pair(Src, Mods);
1417}
1418
Tom Stellard1dc90202018-05-10 20:53:06 +00001419///
1420/// This will select either an SGPR or VGPR operand and will save us from
1421/// having to write an extra tablegen pattern.
1422InstructionSelector::ComplexRendererFns
1423AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
1424 return {{
1425 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1426 }};
1427}
Tom Stellarddcc95e92018-05-11 05:44:16 +00001428
1429InstructionSelector::ComplexRendererFns
1430AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001431 MachineRegisterInfo &MRI
1432 = Root.getParent()->getParent()->getParent()->getRegInfo();
1433
1434 Register Src;
1435 unsigned Mods;
1436 std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg(), MRI);
1437
Tom Stellarddcc95e92018-05-11 05:44:16 +00001438 return {{
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001439 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1440 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
1441 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1442 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
Tom Stellarddcc95e92018-05-11 05:44:16 +00001443 }};
1444}
Tom Stellard9a653572018-06-22 02:34:29 +00001445InstructionSelector::ComplexRendererFns
1446AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
1447 return {{
1448 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1449 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1450 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
1451 }};
1452}
Tom Stellard46bbbc32018-06-13 22:30:47 +00001453
1454InstructionSelector::ComplexRendererFns
1455AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001456 MachineRegisterInfo &MRI
1457 = Root.getParent()->getParent()->getParent()->getRegInfo();
1458
1459 Register Src;
1460 unsigned Mods;
1461 std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg(), MRI);
1462
Tom Stellard46bbbc32018-06-13 22:30:47 +00001463 return {{
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001464 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1465 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
Tom Stellard46bbbc32018-06-13 22:30:47 +00001466 }};
1467}
Tom Stellard79b5c382019-02-20 21:02:37 +00001468
1469InstructionSelector::ComplexRendererFns
1470AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
1471 MachineRegisterInfo &MRI =
1472 Root.getParent()->getParent()->getParent()->getRegInfo();
1473
1474 SmallVector<GEPInfo, 4> AddrInfo;
1475 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo);
1476
1477 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1478 return None;
1479
1480 const GEPInfo &GEPInfo = AddrInfo[0];
1481
1482 if (!AMDGPU::isLegalSMRDImmOffset(STI, GEPInfo.Imm))
1483 return None;
1484
1485 unsigned PtrReg = GEPInfo.SgprParts[0];
1486 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
1487 return {{
1488 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1489 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
1490 }};
1491}
1492
1493InstructionSelector::ComplexRendererFns
1494AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
1495 MachineRegisterInfo &MRI =
1496 Root.getParent()->getParent()->getParent()->getRegInfo();
1497
1498 SmallVector<GEPInfo, 4> AddrInfo;
1499 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo);
1500
1501 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1502 return None;
1503
1504 const GEPInfo &GEPInfo = AddrInfo[0];
1505 unsigned PtrReg = GEPInfo.SgprParts[0];
1506 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
1507 if (!isUInt<32>(EncodedImm))
1508 return None;
1509
1510 return {{
1511 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1512 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
1513 }};
1514}
1515
1516InstructionSelector::ComplexRendererFns
1517AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
1518 MachineInstr *MI = Root.getParent();
1519 MachineBasicBlock *MBB = MI->getParent();
1520 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1521
1522 SmallVector<GEPInfo, 4> AddrInfo;
1523 getAddrModeInfo(*MI, MRI, AddrInfo);
1524
1525 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
1526 // then we can select all ptr + 32-bit offsets not just immediate offsets.
1527 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1528 return None;
1529
1530 const GEPInfo &GEPInfo = AddrInfo[0];
1531 if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm))
1532 return None;
1533
1534 // If we make it this far we have a load with an 32-bit immediate offset.
1535 // It is OK to select this using a sgpr offset, because we have already
1536 // failed trying to select this load into one of the _IMM variants since
1537 // the _IMM Patterns are considered before the _SGPR patterns.
1538 unsigned PtrReg = GEPInfo.SgprParts[0];
1539 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1540 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
1541 .addImm(GEPInfo.Imm);
1542 return {{
1543 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1544 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
1545 }};
1546}
Matt Arsenault35c96592019-07-16 18:05:29 +00001547
Matt Arsenaultdad1f892019-07-16 18:42:53 +00001548template <bool Signed>
Matt Arsenault35c96592019-07-16 18:05:29 +00001549InstructionSelector::ComplexRendererFns
1550AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
1551 MachineInstr *MI = Root.getParent();
1552 MachineBasicBlock *MBB = MI->getParent();
1553 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1554
1555 InstructionSelector::ComplexRendererFns Default = {{
1556 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
1557 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // offset
1558 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // slc
1559 }};
1560
1561 if (!STI.hasFlatInstOffsets())
1562 return Default;
1563
1564 const MachineInstr *OpDef = MRI.getVRegDef(Root.getReg());
1565 if (!OpDef || OpDef->getOpcode() != AMDGPU::G_GEP)
1566 return Default;
1567
1568 Optional<int64_t> Offset =
1569 getConstantVRegVal(OpDef->getOperand(2).getReg(), MRI);
1570 if (!Offset.hasValue())
1571 return Default;
1572
1573 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
1574 if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
1575 return Default;
1576
1577 Register BasePtr = OpDef->getOperand(1).getReg();
1578
1579 return {{
1580 [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
1581 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
1582 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // slc
1583 }};
1584}
1585
1586InstructionSelector::ComplexRendererFns
1587AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
1588 return selectFlatOffsetImpl<false>(Root);
1589}
1590
1591InstructionSelector::ComplexRendererFns
1592AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
1593 return selectFlatOffsetImpl<true>(Root);
1594}
Matt Arsenault7161fb02019-07-16 19:22:21 +00001595
1596// FIXME: Implement
1597static bool signBitIsZero(const MachineOperand &Op,
1598 const MachineRegisterInfo &MRI) {
1599 return false;
1600}
1601
1602static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
1603 auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
1604 return PSV && PSV->isStack();
1605}
1606
1607InstructionSelector::ComplexRendererFns
1608AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
1609 MachineInstr *MI = Root.getParent();
1610 MachineBasicBlock *MBB = MI->getParent();
1611 MachineFunction *MF = MBB->getParent();
1612 MachineRegisterInfo &MRI = MF->getRegInfo();
1613 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1614
1615 int64_t Offset = 0;
1616 if (mi_match(Root.getReg(), MRI, m_ICst(Offset))) {
1617 Register HighBits = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1618
1619 // TODO: Should this be inside the render function? The iterator seems to
1620 // move.
1621 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
1622 HighBits)
1623 .addImm(Offset & ~4095);
1624
1625 return {{[=](MachineInstrBuilder &MIB) { // rsrc
1626 MIB.addReg(Info->getScratchRSrcReg());
1627 },
1628 [=](MachineInstrBuilder &MIB) { // vaddr
1629 MIB.addReg(HighBits);
1630 },
1631 [=](MachineInstrBuilder &MIB) { // soffset
1632 const MachineMemOperand *MMO = *MI->memoperands_begin();
1633 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
1634
1635 Register SOffsetReg = isStackPtrRelative(PtrInfo)
1636 ? Info->getStackPtrOffsetReg()
1637 : Info->getScratchWaveOffsetReg();
1638 MIB.addReg(SOffsetReg);
1639 },
1640 [=](MachineInstrBuilder &MIB) { // offset
1641 MIB.addImm(Offset & 4095);
1642 }}};
1643 }
1644
1645 assert(Offset == 0);
1646
1647 // Try to fold a frame index directly into the MUBUF vaddr field, and any
1648 // offsets.
1649 Optional<int> FI;
1650 Register VAddr = Root.getReg();
1651 if (const MachineInstr *RootDef = MRI.getVRegDef(Root.getReg())) {
1652 if (isBaseWithConstantOffset(Root, MRI)) {
1653 const MachineOperand &LHS = RootDef->getOperand(1);
1654 const MachineOperand &RHS = RootDef->getOperand(2);
1655 const MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
1656 const MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
1657 if (LHSDef && RHSDef) {
1658 int64_t PossibleOffset =
1659 RHSDef->getOperand(1).getCImm()->getSExtValue();
1660 if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
1661 (!STI.privateMemoryResourceIsRangeChecked() ||
1662 signBitIsZero(LHS, MRI))) {
1663 if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
1664 FI = LHSDef->getOperand(1).getIndex();
1665 else
1666 VAddr = LHS.getReg();
1667 Offset = PossibleOffset;
1668 }
1669 }
1670 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
1671 FI = RootDef->getOperand(1).getIndex();
1672 }
1673 }
1674
1675 // If we don't know this private access is a local stack object, it needs to
1676 // be relative to the entry point's scratch wave offset register.
1677 // TODO: Should split large offsets that don't fit like above.
1678 // TODO: Don't use scratch wave offset just because the offset didn't fit.
1679 Register SOffset = FI.hasValue() ? Info->getStackPtrOffsetReg()
1680 : Info->getScratchWaveOffsetReg();
1681
1682 return {{[=](MachineInstrBuilder &MIB) { // rsrc
1683 MIB.addReg(Info->getScratchRSrcReg());
1684 },
1685 [=](MachineInstrBuilder &MIB) { // vaddr
1686 if (FI.hasValue())
1687 MIB.addFrameIndex(FI.getValue());
1688 else
1689 MIB.addReg(VAddr);
1690 },
1691 [=](MachineInstrBuilder &MIB) { // soffset
1692 MIB.addReg(SOffset);
1693 },
1694 [=](MachineInstrBuilder &MIB) { // offset
1695 MIB.addImm(Offset);
1696 }}};
1697}
1698
1699InstructionSelector::ComplexRendererFns
1700AMDGPUInstructionSelector::selectMUBUFScratchOffset(
1701 MachineOperand &Root) const {
1702 MachineInstr *MI = Root.getParent();
1703 MachineBasicBlock *MBB = MI->getParent();
1704 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1705
1706 int64_t Offset = 0;
1707 if (!mi_match(Root.getReg(), MRI, m_ICst(Offset)) ||
1708 !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
1709 return {};
1710
1711 const MachineFunction *MF = MBB->getParent();
1712 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1713 const MachineMemOperand *MMO = *MI->memoperands_begin();
1714 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
1715
1716 Register SOffsetReg = isStackPtrRelative(PtrInfo)
1717 ? Info->getStackPtrOffsetReg()
1718 : Info->getScratchWaveOffsetReg();
1719 return {{
1720 [=](MachineInstrBuilder &MIB) {
1721 MIB.addReg(Info->getScratchRSrcReg());
1722 }, // rsrc
1723 [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffsetReg); }, // soffset
1724 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
1725 }};
1726}