blob: 9fe9b3782c890c035173bcee4ceac302a2a3bece [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPUInstructionSelector.h"
15#include "AMDGPUInstrInfo.h"
16#include "AMDGPURegisterBankInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUSubtarget.h"
Tom Stellard1dc90202018-05-10 20:53:06 +000019#include "AMDGPUTargetMachine.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000020#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenault7161fb02019-07-16 19:22:21 +000021#include "SIMachineFunctionInfo.h"
Tom Stellard1dc90202018-05-10 20:53:06 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
Matt Arsenault7161fb02019-07-16 19:22:21 +000024#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
Aditya Nandakumar18b3f9d2018-01-17 19:31:33 +000025#include "llvm/CodeGen/GlobalISel/Utils.h"
Tom Stellardca166212017-01-30 21:56:46 +000026#include "llvm/CodeGen/MachineBasicBlock.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineInstr.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/IR/Type.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/raw_ostream.h"
34
35#define DEBUG_TYPE "amdgpu-isel"
36
37using namespace llvm;
Matt Arsenault7161fb02019-07-16 19:22:21 +000038using namespace MIPatternMatch;
Tom Stellardca166212017-01-30 21:56:46 +000039
Tom Stellard1dc90202018-05-10 20:53:06 +000040#define GET_GLOBALISEL_IMPL
Tom Stellard5bfbae52018-07-11 20:59:01 +000041#define AMDGPUSubtarget GCNSubtarget
Tom Stellard1dc90202018-05-10 20:53:06 +000042#include "AMDGPUGenGlobalISel.inc"
43#undef GET_GLOBALISEL_IMPL
Tom Stellard5bfbae52018-07-11 20:59:01 +000044#undef AMDGPUSubtarget
Tom Stellard1dc90202018-05-10 20:53:06 +000045
Tom Stellardca166212017-01-30 21:56:46 +000046AMDGPUInstructionSelector::AMDGPUInstructionSelector(
Tom Stellard5bfbae52018-07-11 20:59:01 +000047 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
Tom Stellard1dc90202018-05-10 20:53:06 +000048 const AMDGPUTargetMachine &TM)
Tom Stellardca166212017-01-30 21:56:46 +000049 : InstructionSelector(), TII(*STI.getInstrInfo()),
Tom Stellard1dc90202018-05-10 20:53:06 +000050 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51 STI(STI),
52 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53#define GET_GLOBALISEL_PREDICATES_INIT
54#include "AMDGPUGenGlobalISel.inc"
55#undef GET_GLOBALISEL_PREDICATES_INIT
56#define GET_GLOBALISEL_TEMPORARIES_INIT
57#include "AMDGPUGenGlobalISel.inc"
58#undef GET_GLOBALISEL_TEMPORARIES_INIT
Tom Stellard1dc90202018-05-10 20:53:06 +000059{
60}
61
62const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
Tom Stellardca166212017-01-30 21:56:46 +000063
Matt Arsenault2ab25f92019-07-01 16:06:02 +000064static bool isSCC(Register Reg, const MachineRegisterInfo &MRI) {
65 if (TargetRegisterInfo::isPhysicalRegister(Reg))
66 return Reg == AMDGPU::SCC;
Tom Stellard8b1c53b2019-06-17 16:27:43 +000067
68 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
69 const TargetRegisterClass *RC =
70 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
Matt Arsenault1daad912019-07-01 15:23:04 +000071 if (RC) {
Matt Arsenaultc8291c92019-07-15 19:50:07 +000072 // FIXME: This is ambiguous for wave32. This could be SCC or VCC, but the
73 // context of the register bank has been lost.
Matt Arsenault1daad912019-07-01 15:23:04 +000074 if (RC->getID() != AMDGPU::SReg_32_XM0RegClassID)
75 return false;
76 const LLT Ty = MRI.getType(Reg);
77 return Ty.isValid() && Ty.getSizeInBits() == 1;
78 }
Tom Stellard8b1c53b2019-06-17 16:27:43 +000079
80 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
81 return RB->getID() == AMDGPU::SCCRegBankID;
82}
83
Matt Arsenault2ab25f92019-07-01 16:06:02 +000084bool AMDGPUInstructionSelector::isVCC(Register Reg,
85 const MachineRegisterInfo &MRI) const {
86 if (TargetRegisterInfo::isPhysicalRegister(Reg))
87 return Reg == TRI.getVCC();
Matt Arsenault9f992c22019-07-01 13:22:07 +000088
89 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
90 const TargetRegisterClass *RC =
91 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
92 if (RC) {
Matt Arsenault18b71332019-07-15 19:44:07 +000093 const LLT Ty = MRI.getType(Reg);
Matt Arsenault2ab25f92019-07-01 16:06:02 +000094 return RC->hasSuperClassEq(TRI.getBoolRC()) &&
Matt Arsenault18b71332019-07-15 19:44:07 +000095 Ty.isValid() && Ty.getSizeInBits() == 1;
Matt Arsenault9f992c22019-07-01 13:22:07 +000096 }
97
98 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
99 return RB->getID() == AMDGPU::VCCRegBankID;
100}
101
Tom Stellard1e0edad2018-05-10 21:20:10 +0000102bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
Matt Arsenault18b71332019-07-15 19:44:07 +0000103 const DebugLoc &DL = I.getDebugLoc();
Tom Stellard1e0edad2018-05-10 21:20:10 +0000104 MachineBasicBlock *BB = I.getParent();
105 MachineFunction *MF = BB->getParent();
106 MachineRegisterInfo &MRI = MF->getRegInfo();
107 I.setDesc(TII.get(TargetOpcode::COPY));
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000108
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000109 const MachineOperand &Src = I.getOperand(1);
Matt Arsenault18b71332019-07-15 19:44:07 +0000110 MachineOperand &Dst = I.getOperand(0);
111 Register DstReg = Dst.getReg();
112 Register SrcReg = Src.getReg();
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000113
Matt Arsenault18b71332019-07-15 19:44:07 +0000114 if (isVCC(DstReg, MRI)) {
115 if (SrcReg == AMDGPU::SCC) {
116 const TargetRegisterClass *RC
117 = TRI.getConstrainedRegClassForOperand(Dst, MRI);
118 if (!RC)
119 return true;
120 return RBI.constrainGenericRegister(DstReg, *RC, MRI);
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000121 }
Matt Arsenault18b71332019-07-15 19:44:07 +0000122
Matt Arsenaulte1b52f42019-07-15 19:46:48 +0000123 if (!isVCC(SrcReg, MRI)) {
124 // TODO: Should probably leave the copy and let copyPhysReg expand it.
125 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), MRI))
126 return false;
Matt Arsenault3bfdb542019-07-15 19:45:49 +0000127
Matt Arsenaulte1b52f42019-07-15 19:46:48 +0000128 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
129 .addImm(0)
130 .addReg(SrcReg);
Matt Arsenault18b71332019-07-15 19:44:07 +0000131
Matt Arsenaulte1b52f42019-07-15 19:46:48 +0000132 if (!MRI.getRegClassOrNull(SrcReg))
133 MRI.setRegClass(SrcReg, TRI.getConstrainedRegClassForOperand(Src, MRI));
134 I.eraseFromParent();
135 return true;
136 }
Matt Arsenaultad19b502019-07-15 19:48:36 +0000137
138 const TargetRegisterClass *RC =
139 TRI.getConstrainedRegClassForOperand(Dst, MRI);
140 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, MRI))
141 return false;
142
143 // Don't constrain the source register to a class so the def instruction
144 // handles it (unless it's undef).
145 //
146 // FIXME: This is a hack. When selecting the def, we neeed to know
147 // specifically know that the result is VCCRegBank, and not just an SGPR
148 // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
149 if (Src.isUndef()) {
150 const TargetRegisterClass *SrcRC =
151 TRI.getConstrainedRegClassForOperand(Src, MRI);
152 if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI))
153 return false;
154 }
155
156 return true;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000157 }
158
Tom Stellard1e0edad2018-05-10 21:20:10 +0000159 for (const MachineOperand &MO : I.operands()) {
160 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
161 continue;
162
163 const TargetRegisterClass *RC =
164 TRI.getConstrainedRegClassForOperand(MO, MRI);
165 if (!RC)
166 continue;
167 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
168 }
169 return true;
170}
171
Matt Arsenaulte1006252019-07-01 16:32:47 +0000172bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
173 MachineBasicBlock *BB = I.getParent();
174 MachineFunction *MF = BB->getParent();
175 MachineRegisterInfo &MRI = MF->getRegInfo();
176
177 const Register DefReg = I.getOperand(0).getReg();
178 const LLT DefTy = MRI.getType(DefReg);
179
180 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
181
182 const RegClassOrRegBank &RegClassOrBank =
183 MRI.getRegClassOrRegBank(DefReg);
184
185 const TargetRegisterClass *DefRC
186 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
187 if (!DefRC) {
188 if (!DefTy.isValid()) {
189 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
190 return false;
191 }
192
193 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
194 if (RB.getID() == AMDGPU::SCCRegBankID) {
195 LLVM_DEBUG(dbgs() << "illegal scc phi\n");
196 return false;
197 }
198
199 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, MRI);
200 if (!DefRC) {
201 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
202 return false;
203 }
204 }
205
206 I.setDesc(TII.get(TargetOpcode::PHI));
207 return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
208}
209
Tom Stellardca166212017-01-30 21:56:46 +0000210MachineOperand
211AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000212 const TargetRegisterClass &SubRC,
Tom Stellardca166212017-01-30 21:56:46 +0000213 unsigned SubIdx) const {
214
215 MachineInstr *MI = MO.getParent();
216 MachineBasicBlock *BB = MO.getParent()->getParent();
217 MachineFunction *MF = BB->getParent();
218 MachineRegisterInfo &MRI = MF->getRegInfo();
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000219 Register DstReg = MRI.createVirtualRegister(&SubRC);
Tom Stellardca166212017-01-30 21:56:46 +0000220
221 if (MO.isReg()) {
222 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
223 unsigned Reg = MO.getReg();
224 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
225 .addReg(Reg, 0, ComposedSubIdx);
226
227 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
228 MO.isKill(), MO.isDead(), MO.isUndef(),
229 MO.isEarlyClobber(), 0, MO.isDebug(),
230 MO.isInternalRead());
231 }
232
233 assert(MO.isImm());
234
235 APInt Imm(64, MO.getImm());
236
237 switch (SubIdx) {
238 default:
239 llvm_unreachable("do not know to split immediate with this sub index.");
240 case AMDGPU::sub0:
241 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
242 case AMDGPU::sub1:
243 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
244 }
245}
246
Tom Stellard390a5f42018-07-13 21:05:14 +0000247static int64_t getConstant(const MachineInstr *MI) {
248 return MI->getOperand(1).getCImm()->getSExtValue();
249}
250
Matt Arsenaultc8291c92019-07-15 19:50:07 +0000251static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
252 switch (Opc) {
253 case AMDGPU::G_AND:
254 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
255 case AMDGPU::G_OR:
256 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
257 case AMDGPU::G_XOR:
258 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
259 default:
260 llvm_unreachable("not a bit op");
261 }
262}
263
264bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
265 MachineBasicBlock *BB = I.getParent();
266 MachineFunction *MF = BB->getParent();
267 MachineRegisterInfo &MRI = MF->getRegInfo();
268 MachineOperand &Dst = I.getOperand(0);
269 MachineOperand &Src0 = I.getOperand(1);
270 MachineOperand &Src1 = I.getOperand(2);
271 Register DstReg = Dst.getReg();
272 unsigned Size = RBI.getSizeInBits(DstReg, MRI, TRI);
273
274 const RegisterBank *DstRB = RBI.getRegBank(DstReg, MRI, TRI);
275 if (DstRB->getID() == AMDGPU::VCCRegBankID) {
276 const TargetRegisterClass *RC = TRI.getBoolRC();
277 unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
278 RC == &AMDGPU::SReg_64RegClass);
279 I.setDesc(TII.get(InstOpc));
280
281 // FIXME: Hack to avoid turning the register bank into a register class.
282 // The selector for G_ICMP relies on seeing the register bank for the result
283 // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
284 // be ambiguous whether it's a scalar or vector bool.
285 if (Src0.isUndef() && !MRI.getRegClassOrNull(Src0.getReg()))
286 MRI.setRegClass(Src0.getReg(), RC);
287 if (Src1.isUndef() && !MRI.getRegClassOrNull(Src1.getReg()))
288 MRI.setRegClass(Src1.getReg(), RC);
289
290 return RBI.constrainGenericRegister(DstReg, *RC, MRI);
291 }
292
293 // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
294 // the result?
295 if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
Matt Arsenaultc8291c92019-07-15 19:50:07 +0000296 unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
297 I.setDesc(TII.get(InstOpc));
Matt Arsenault22c4a142019-07-16 14:28:30 +0000298
299 const TargetRegisterClass *RC
300 = TRI.getConstrainedRegClassForOperand(Dst, MRI);
301 if (!RC)
302 return false;
Matt Arsenaultc8291c92019-07-15 19:50:07 +0000303 return RBI.constrainGenericRegister(DstReg, *RC, MRI) &&
304 RBI.constrainGenericRegister(Src0.getReg(), *RC, MRI) &&
305 RBI.constrainGenericRegister(Src1.getReg(), *RC, MRI);
306 }
307
308 return false;
309}
310
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000311bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
Tom Stellardca166212017-01-30 21:56:46 +0000312 MachineBasicBlock *BB = I.getParent();
313 MachineFunction *MF = BB->getParent();
314 MachineRegisterInfo &MRI = MF->getRegInfo();
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000315 Register DstReg = I.getOperand(0).getReg();
316 const DebugLoc &DL = I.getDebugLoc();
317 unsigned Size = RBI.getSizeInBits(DstReg, MRI, TRI);
318 const RegisterBank *DstRB = RBI.getRegBank(DstReg, MRI, TRI);
319 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000320 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
Tom Stellardca166212017-01-30 21:56:46 +0000321
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000322 if (Size == 32) {
323 if (IsSALU) {
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000324 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000325 MachineInstr *Add =
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000326 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000327 .add(I.getOperand(1))
328 .add(I.getOperand(2));
329 I.eraseFromParent();
330 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
331 }
Tom Stellardca166212017-01-30 21:56:46 +0000332
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000333 if (STI.hasAddNoCarry()) {
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000334 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
335 I.setDesc(TII.get(Opc));
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000336 I.addOperand(*MF, MachineOperand::CreateImm(0));
337 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
338 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
339 }
Tom Stellardca166212017-01-30 21:56:46 +0000340
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000341 const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
342
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000343 Register UnusedCarry = MRI.createVirtualRegister(TRI.getWaveMaskRegClass());
344 MachineInstr *Add
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000345 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000346 .addDef(UnusedCarry, RegState::Dead)
347 .add(I.getOperand(1))
348 .add(I.getOperand(2))
349 .addImm(0);
350 I.eraseFromParent();
351 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
Tom Stellardca166212017-01-30 21:56:46 +0000352 }
353
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000354 assert(!Sub && "illegal sub should not reach here");
355
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000356 const TargetRegisterClass &RC
357 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
358 const TargetRegisterClass &HalfRC
359 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
360
361 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
362 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
363 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
364 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
365
366 Register DstLo = MRI.createVirtualRegister(&HalfRC);
367 Register DstHi = MRI.createVirtualRegister(&HalfRC);
368
369 if (IsSALU) {
370 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
371 .add(Lo1)
372 .add(Lo2);
373 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
374 .add(Hi1)
375 .add(Hi2);
376 } else {
377 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
378 Register CarryReg = MRI.createVirtualRegister(CarryRC);
379 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
380 .addDef(CarryReg)
381 .add(Lo1)
382 .add(Lo2)
383 .addImm(0);
Matt Arsenault70a4d3f2019-07-02 14:40:22 +0000384 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000385 .addDef(MRI.createVirtualRegister(CarryRC), RegState::Dead)
386 .add(Hi1)
387 .add(Hi2)
388 .addReg(CarryReg, RegState::Kill)
389 .addImm(0);
Matt Arsenault70a4d3f2019-07-02 14:40:22 +0000390
391 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
392 return false;
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000393 }
394
395 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
396 .addReg(DstLo)
397 .addImm(AMDGPU::sub0)
398 .addReg(DstHi)
399 .addImm(AMDGPU::sub1);
400
Matt Arsenault70a4d3f2019-07-02 14:40:22 +0000401
402 if (!RBI.constrainGenericRegister(DstReg, RC, MRI))
Matt Arsenault0a52e9d2019-07-01 16:34:48 +0000403 return false;
404
Tom Stellardca166212017-01-30 21:56:46 +0000405 I.eraseFromParent();
406 return true;
407}
408
Tom Stellard41f32192019-02-28 23:37:48 +0000409bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
410 MachineBasicBlock *BB = I.getParent();
411 MachineFunction *MF = BB->getParent();
412 MachineRegisterInfo &MRI = MF->getRegInfo();
413 assert(I.getOperand(2).getImm() % 32 == 0);
414 unsigned SubReg = TRI.getSubRegFromChannel(I.getOperand(2).getImm() / 32);
415 const DebugLoc &DL = I.getDebugLoc();
416 MachineInstr *Copy = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY),
417 I.getOperand(0).getReg())
418 .addReg(I.getOperand(1).getReg(), 0, SubReg);
419
420 for (const MachineOperand &MO : Copy->operands()) {
421 const TargetRegisterClass *RC =
422 TRI.getConstrainedRegClassForOperand(MO, MRI);
423 if (!RC)
424 continue;
425 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
426 }
427 I.eraseFromParent();
428 return true;
429}
430
Matt Arsenault9b7ffc42019-07-09 14:02:20 +0000431bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
432 MachineBasicBlock *BB = MI.getParent();
433 MachineFunction *MF = BB->getParent();
434 MachineRegisterInfo &MRI = MF->getRegInfo();
435 Register DstReg = MI.getOperand(0).getReg();
436 LLT DstTy = MRI.getType(DstReg);
437 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
438
439 const unsigned SrcSize = SrcTy.getSizeInBits();
Matt Arsenaulta65913e2019-07-15 17:26:43 +0000440 if (SrcSize < 32)
441 return false;
442
Matt Arsenault9b7ffc42019-07-09 14:02:20 +0000443 const DebugLoc &DL = MI.getDebugLoc();
444 const RegisterBank *DstBank = RBI.getRegBank(DstReg, MRI, TRI);
445 const unsigned DstSize = DstTy.getSizeInBits();
446 const TargetRegisterClass *DstRC =
447 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, MRI);
448 if (!DstRC)
449 return false;
450
451 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
452 MachineInstrBuilder MIB =
453 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
454 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
455 MachineOperand &Src = MI.getOperand(I + 1);
456 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
457 MIB.addImm(SubRegs[I]);
458
459 const TargetRegisterClass *SrcRC
460 = TRI.getConstrainedRegClassForOperand(Src, MRI);
461 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, MRI))
462 return false;
463 }
464
465 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI))
466 return false;
467
468 MI.eraseFromParent();
469 return true;
470}
471
Matt Arsenault872f38b2019-07-09 14:02:26 +0000472bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
473 MachineBasicBlock *BB = MI.getParent();
474 MachineFunction *MF = BB->getParent();
475 MachineRegisterInfo &MRI = MF->getRegInfo();
476 const int NumDst = MI.getNumOperands() - 1;
477
478 MachineOperand &Src = MI.getOperand(NumDst);
479
480 Register SrcReg = Src.getReg();
481 Register DstReg0 = MI.getOperand(0).getReg();
482 LLT DstTy = MRI.getType(DstReg0);
483 LLT SrcTy = MRI.getType(SrcReg);
484
485 const unsigned DstSize = DstTy.getSizeInBits();
486 const unsigned SrcSize = SrcTy.getSizeInBits();
487 const DebugLoc &DL = MI.getDebugLoc();
488 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, MRI, TRI);
489
490 const TargetRegisterClass *SrcRC =
491 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, MRI);
492 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI))
493 return false;
494
495 const unsigned SrcFlags = getUndefRegState(Src.isUndef());
496
497 // Note we could have mixed SGPR and VGPR destination banks for an SGPR
498 // source, and this relies on the fact that the same subregister indices are
499 // used for both.
500 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
501 for (int I = 0, E = NumDst; I != E; ++I) {
502 MachineOperand &Dst = MI.getOperand(I);
503 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
504 .addReg(SrcReg, SrcFlags, SubRegs[I]);
505
506 const TargetRegisterClass *DstRC =
507 TRI.getConstrainedRegClassForOperand(Dst, MRI);
508 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, MRI))
509 return false;
510 }
511
512 MI.eraseFromParent();
513 return true;
514}
515
Tom Stellardca166212017-01-30 21:56:46 +0000516bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const {
Matt Arsenaulte6d10f92019-07-09 14:05:11 +0000517 return selectG_ADD_SUB(I);
Tom Stellardca166212017-01-30 21:56:46 +0000518}
519
Tom Stellard3f1c6fe2018-06-21 23:38:20 +0000520bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
521 MachineBasicBlock *BB = I.getParent();
522 MachineFunction *MF = BB->getParent();
523 MachineRegisterInfo &MRI = MF->getRegInfo();
524 const MachineOperand &MO = I.getOperand(0);
Matt Arsenaultf8a841b2019-06-24 16:24:03 +0000525
526 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
527 // regbank check here is to know why getConstrainedRegClassForOperand failed.
528 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, MRI);
529 if ((!RC && !MRI.getRegBankOrNull(MO.getReg())) ||
530 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, MRI))) {
531 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
532 return true;
533 }
534
535 return false;
Tom Stellard3f1c6fe2018-06-21 23:38:20 +0000536}
537
Tom Stellard33634d1b2019-03-01 00:50:26 +0000538bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
539 MachineBasicBlock *BB = I.getParent();
540 MachineFunction *MF = BB->getParent();
541 MachineRegisterInfo &MRI = MF->getRegInfo();
542 unsigned SubReg = TRI.getSubRegFromChannel(I.getOperand(3).getImm() / 32);
543 DebugLoc DL = I.getDebugLoc();
544 MachineInstr *Ins = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG))
545 .addDef(I.getOperand(0).getReg())
546 .addReg(I.getOperand(1).getReg())
547 .addReg(I.getOperand(2).getReg())
548 .addImm(SubReg);
549
550 for (const MachineOperand &MO : Ins->operands()) {
551 if (!MO.isReg())
552 continue;
553 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
554 continue;
555
556 const TargetRegisterClass *RC =
557 TRI.getConstrainedRegClassForOperand(MO, MRI);
558 if (!RC)
559 continue;
560 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
561 }
562 I.eraseFromParent();
563 return true;
564}
565
Matt Arsenault50be3482019-07-02 14:52:16 +0000566bool AMDGPUInstructionSelector::selectG_INTRINSIC(
567 MachineInstr &I, CodeGenCoverage &CoverageInfo) const {
Matt Arsenaultfee19492019-06-17 17:01:27 +0000568 unsigned IntrinsicID = I.getOperand(I.getNumExplicitDefs()).getIntrinsicID();
Tom Stellarda9284732018-06-14 19:26:37 +0000569 switch (IntrinsicID) {
Matt Arsenault53fa7592019-07-15 18:25:24 +0000570 case Intrinsic::amdgcn_if_break: {
571 MachineBasicBlock *BB = I.getParent();
572 MachineFunction *MF = BB->getParent();
573 MachineRegisterInfo &MRI = MF->getRegInfo();
574
575 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
576 // SelectionDAG uses for wave32 vs wave64.
577 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
578 .add(I.getOperand(0))
579 .add(I.getOperand(2))
580 .add(I.getOperand(3));
581
582 Register DstReg = I.getOperand(0).getReg();
583 Register Src0Reg = I.getOperand(2).getReg();
584 Register Src1Reg = I.getOperand(3).getReg();
585
586 I.eraseFromParent();
587
588 for (Register Reg : { DstReg, Src0Reg, Src1Reg }) {
589 if (!MRI.getRegClassOrNull(Reg))
590 MRI.setRegClass(Reg, TRI.getWaveMaskRegClass());
591 }
592
593 return true;
594 }
Matt Arsenault50be3482019-07-02 14:52:16 +0000595 default:
596 return selectImpl(I, CoverageInfo);
Tom Stellarda9284732018-06-14 19:26:37 +0000597 }
Tom Stellarda9284732018-06-14 19:26:37 +0000598}
599
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000600static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
601 if (Size != 32 && Size != 64)
602 return -1;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000603 switch (P) {
604 default:
605 llvm_unreachable("Unknown condition code!");
606 case CmpInst::ICMP_NE:
607 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
608 case CmpInst::ICMP_EQ:
609 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
610 case CmpInst::ICMP_SGT:
611 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
612 case CmpInst::ICMP_SGE:
613 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
614 case CmpInst::ICMP_SLT:
615 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
616 case CmpInst::ICMP_SLE:
617 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
618 case CmpInst::ICMP_UGT:
619 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
620 case CmpInst::ICMP_UGE:
621 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
622 case CmpInst::ICMP_ULT:
623 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
624 case CmpInst::ICMP_ULE:
625 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
626 }
627}
628
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000629int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
630 unsigned Size) const {
631 if (Size == 64) {
632 if (!STI.hasScalarCompareEq64())
633 return -1;
634
635 switch (P) {
636 case CmpInst::ICMP_NE:
637 return AMDGPU::S_CMP_LG_U64;
638 case CmpInst::ICMP_EQ:
639 return AMDGPU::S_CMP_EQ_U64;
640 default:
641 return -1;
642 }
643 }
644
645 if (Size != 32)
646 return -1;
647
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000648 switch (P) {
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000649 case CmpInst::ICMP_NE:
650 return AMDGPU::S_CMP_LG_U32;
651 case CmpInst::ICMP_EQ:
652 return AMDGPU::S_CMP_EQ_U32;
653 case CmpInst::ICMP_SGT:
654 return AMDGPU::S_CMP_GT_I32;
655 case CmpInst::ICMP_SGE:
656 return AMDGPU::S_CMP_GE_I32;
657 case CmpInst::ICMP_SLT:
658 return AMDGPU::S_CMP_LT_I32;
659 case CmpInst::ICMP_SLE:
660 return AMDGPU::S_CMP_LE_I32;
661 case CmpInst::ICMP_UGT:
662 return AMDGPU::S_CMP_GT_U32;
663 case CmpInst::ICMP_UGE:
664 return AMDGPU::S_CMP_GE_U32;
665 case CmpInst::ICMP_ULT:
666 return AMDGPU::S_CMP_LT_U32;
667 case CmpInst::ICMP_ULE:
668 return AMDGPU::S_CMP_LE_U32;
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000669 default:
670 llvm_unreachable("Unknown condition code!");
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000671 }
672}
673
674bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
675 MachineBasicBlock *BB = I.getParent();
676 MachineFunction *MF = BB->getParent();
677 MachineRegisterInfo &MRI = MF->getRegInfo();
Matt Arsenault5dfd4662019-07-15 19:39:31 +0000678 const DebugLoc &DL = I.getDebugLoc();
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000679
680 unsigned SrcReg = I.getOperand(2).getReg();
681 unsigned Size = RBI.getSizeInBits(SrcReg, MRI, TRI);
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000682
683 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000684
685 unsigned CCReg = I.getOperand(0).getReg();
686 if (isSCC(CCReg, MRI)) {
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000687 int Opcode = getS_CMPOpcode(Pred, Size);
688 if (Opcode == -1)
689 return false;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000690 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
691 .add(I.getOperand(2))
692 .add(I.getOperand(3));
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000693 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
694 .addReg(AMDGPU::SCC);
695 bool Ret =
696 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
697 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, MRI);
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000698 I.eraseFromParent();
699 return Ret;
700 }
701
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000702 int Opcode = getV_CMPOpcode(Pred, Size);
703 if (Opcode == -1)
704 return false;
705
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000706 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
707 I.getOperand(0).getReg())
708 .add(I.getOperand(2))
709 .add(I.getOperand(3));
710 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
Matt Arsenault5dfd4662019-07-15 19:39:31 +0000711 *TRI.getBoolRC(), MRI);
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000712 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
713 I.eraseFromParent();
714 return Ret;
715}
716
Tom Stellard390a5f42018-07-13 21:05:14 +0000717static MachineInstr *
718buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt,
719 unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3,
720 unsigned VM, bool Compr, unsigned Enabled, bool Done) {
721 const DebugLoc &DL = Insert->getDebugLoc();
722 MachineBasicBlock &BB = *Insert->getParent();
723 unsigned Opcode = Done ? AMDGPU::EXP_DONE : AMDGPU::EXP;
724 return BuildMI(BB, Insert, DL, TII.get(Opcode))
725 .addImm(Tgt)
726 .addReg(Reg0)
727 .addReg(Reg1)
728 .addReg(Reg2)
729 .addReg(Reg3)
730 .addImm(VM)
731 .addImm(Compr)
732 .addImm(Enabled);
733}
734
735bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
Matt Arsenault50be3482019-07-02 14:52:16 +0000736 MachineInstr &I, CodeGenCoverage &CoverageInfo) const {
Tom Stellard390a5f42018-07-13 21:05:14 +0000737 MachineBasicBlock *BB = I.getParent();
738 MachineFunction *MF = BB->getParent();
739 MachineRegisterInfo &MRI = MF->getRegInfo();
740
741 unsigned IntrinsicID = I.getOperand(0).getIntrinsicID();
742 switch (IntrinsicID) {
743 case Intrinsic::amdgcn_exp: {
744 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
745 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
746 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(7).getReg()));
747 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(8).getReg()));
748
749 MachineInstr *Exp = buildEXP(TII, &I, Tgt, I.getOperand(3).getReg(),
750 I.getOperand(4).getReg(),
751 I.getOperand(5).getReg(),
752 I.getOperand(6).getReg(),
753 VM, false, Enabled, Done);
754
755 I.eraseFromParent();
756 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
757 }
758 case Intrinsic::amdgcn_exp_compr: {
759 const DebugLoc &DL = I.getDebugLoc();
760 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
761 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
762 unsigned Reg0 = I.getOperand(3).getReg();
763 unsigned Reg1 = I.getOperand(4).getReg();
764 unsigned Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
765 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(5).getReg()));
766 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(6).getReg()));
767
768 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
769 MachineInstr *Exp = buildEXP(TII, &I, Tgt, Reg0, Reg1, Undef, Undef, VM,
770 true, Enabled, Done);
771
772 I.eraseFromParent();
773 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
774 }
Matt Arsenaultb3901212019-07-15 18:18:46 +0000775 case Intrinsic::amdgcn_end_cf: {
776 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
777 // SelectionDAG uses for wave32 vs wave64.
778 BuildMI(*BB, &I, I.getDebugLoc(),
779 TII.get(AMDGPU::SI_END_CF))
780 .add(I.getOperand(1));
781
782 Register Reg = I.getOperand(1).getReg();
783 I.eraseFromParent();
784
785 if (!MRI.getRegClassOrNull(Reg))
786 MRI.setRegClass(Reg, TRI.getWaveMaskRegClass());
787 return true;
788 }
Matt Arsenault50be3482019-07-02 14:52:16 +0000789 default:
790 return selectImpl(I, CoverageInfo);
Tom Stellard390a5f42018-07-13 21:05:14 +0000791 }
Tom Stellard390a5f42018-07-13 21:05:14 +0000792}
793
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000794bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
795 MachineBasicBlock *BB = I.getParent();
796 MachineFunction *MF = BB->getParent();
797 MachineRegisterInfo &MRI = MF->getRegInfo();
798 const DebugLoc &DL = I.getDebugLoc();
799
800 unsigned DstReg = I.getOperand(0).getReg();
801 unsigned Size = RBI.getSizeInBits(DstReg, MRI, TRI);
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000802 assert(Size <= 32 || Size == 64);
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000803 const MachineOperand &CCOp = I.getOperand(1);
804 unsigned CCReg = CCOp.getReg();
805 if (isSCC(CCReg, MRI)) {
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000806 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
807 AMDGPU::S_CSELECT_B32;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000808 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
809 .addReg(CCReg);
810
811 // The generic constrainSelectedInstRegOperands doesn't work for the scc register
812 // bank, because it does not cover the register class that we used to represent
813 // for it. So we need to manually set the register class here.
814 if (!MRI.getRegClassOrNull(CCReg))
815 MRI.setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, MRI));
816 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
817 .add(I.getOperand(2))
818 .add(I.getOperand(3));
819
820 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
821 constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
822 I.eraseFromParent();
823 return Ret;
824 }
825
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000826 // Wide VGPR select should have been split in RegBankSelect.
827 if (Size > 32)
828 return false;
829
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000830 MachineInstr *Select =
831 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
832 .addImm(0)
833 .add(I.getOperand(3))
834 .addImm(0)
835 .add(I.getOperand(2))
836 .add(I.getOperand(1));
837
838 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
839 I.eraseFromParent();
840 return Ret;
841}
842
Matt Arsenault3baf4d32019-08-01 03:09:15 +0000843bool AMDGPUInstructionSelector::selectG_STORE(
844 MachineInstr &I, CodeGenCoverage &CoverageInfo) const {
Tom Stellardca166212017-01-30 21:56:46 +0000845 MachineBasicBlock *BB = I.getParent();
Tom Stellard655fdd32018-05-11 23:12:49 +0000846 MachineFunction *MF = BB->getParent();
847 MachineRegisterInfo &MRI = MF->getRegInfo();
Matt Arsenault3baf4d32019-08-01 03:09:15 +0000848 const DebugLoc &DL = I.getDebugLoc();
849
850 LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
851 if (PtrTy.getSizeInBits() != 64) {
852 initM0(I);
853 return selectImpl(I, CoverageInfo);
Matt Arsenault89fc8bc2019-07-01 13:37:39 +0000854 }
855
Matt Arsenault3baf4d32019-08-01 03:09:15 +0000856 if (selectImpl(I, CoverageInfo))
857 return true;
858
Tom Stellard655fdd32018-05-11 23:12:49 +0000859 unsigned StoreSize = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
860 unsigned Opcode;
Tom Stellardca166212017-01-30 21:56:46 +0000861
Matt Arsenaultdad1f892019-07-16 18:42:53 +0000862 // FIXME: Remove this when integers > s32 naturally selected.
Tom Stellard655fdd32018-05-11 23:12:49 +0000863 switch (StoreSize) {
864 default:
865 return false;
866 case 32:
867 Opcode = AMDGPU::FLAT_STORE_DWORD;
868 break;
869 case 64:
870 Opcode = AMDGPU::FLAT_STORE_DWORDX2;
871 break;
872 case 96:
873 Opcode = AMDGPU::FLAT_STORE_DWORDX3;
874 break;
875 case 128:
876 Opcode = AMDGPU::FLAT_STORE_DWORDX4;
877 break;
878 }
879
880 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
Tom Stellardca166212017-01-30 21:56:46 +0000881 .add(I.getOperand(1))
882 .add(I.getOperand(0))
Matt Arsenaultfd023142017-06-12 15:55:58 +0000883 .addImm(0) // offset
884 .addImm(0) // glc
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000885 .addImm(0) // slc
886 .addImm(0); // dlc
Tom Stellardca166212017-01-30 21:56:46 +0000887
Matt Arsenault47ccafe2017-05-11 17:38:33 +0000888
Tom Stellardca166212017-01-30 21:56:46 +0000889 // Now that we selected an opcode, we need to constrain the register
890 // operands to use appropriate classes.
891 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
892
893 I.eraseFromParent();
894 return Ret;
895}
896
Matt Arsenaultdbb6c032019-06-24 18:02:18 +0000897static int sizeToSubRegIndex(unsigned Size) {
898 switch (Size) {
899 case 32:
900 return AMDGPU::sub0;
901 case 64:
902 return AMDGPU::sub0_sub1;
903 case 96:
904 return AMDGPU::sub0_sub1_sub2;
905 case 128:
906 return AMDGPU::sub0_sub1_sub2_sub3;
907 case 256:
908 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
909 default:
910 if (Size < 32)
911 return AMDGPU::sub0;
912 if (Size > 256)
913 return -1;
914 return sizeToSubRegIndex(PowerOf2Ceil(Size));
915 }
916}
917
918bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
919 MachineBasicBlock *BB = I.getParent();
920 MachineFunction *MF = BB->getParent();
921 MachineRegisterInfo &MRI = MF->getRegInfo();
922
923 unsigned DstReg = I.getOperand(0).getReg();
924 unsigned SrcReg = I.getOperand(1).getReg();
925 const LLT DstTy = MRI.getType(DstReg);
926 const LLT SrcTy = MRI.getType(SrcReg);
927 if (!DstTy.isScalar())
928 return false;
929
930 const RegisterBank *DstRB = RBI.getRegBank(DstReg, MRI, TRI);
931 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, MRI, TRI);
932 if (SrcRB != DstRB)
933 return false;
934
935 unsigned DstSize = DstTy.getSizeInBits();
936 unsigned SrcSize = SrcTy.getSizeInBits();
937
938 const TargetRegisterClass *SrcRC
939 = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, MRI);
940 const TargetRegisterClass *DstRC
941 = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, MRI);
942
943 if (SrcSize > 32) {
944 int SubRegIdx = sizeToSubRegIndex(DstSize);
945 if (SubRegIdx == -1)
946 return false;
947
948 // Deal with weird cases where the class only partially supports the subreg
949 // index.
950 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
951 if (!SrcRC)
952 return false;
953
954 I.getOperand(1).setSubReg(SubRegIdx);
955 }
956
957 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
958 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
959 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
960 return false;
961 }
962
963 I.setDesc(TII.get(TargetOpcode::COPY));
964 return true;
965}
966
Matt Arsenault5dafcb92019-07-01 13:22:06 +0000967/// \returns true if a bitmask for \p Size bits will be an inline immediate.
968static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
969 Mask = maskTrailingOnes<unsigned>(Size);
970 int SignedMask = static_cast<int>(Mask);
971 return SignedMask >= -16 && SignedMask <= 64;
972}
973
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000974bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
975 bool Signed = I.getOpcode() == AMDGPU::G_SEXT;
976 const DebugLoc &DL = I.getDebugLoc();
977 MachineBasicBlock &MBB = *I.getParent();
978 MachineFunction &MF = *MBB.getParent();
979 MachineRegisterInfo &MRI = MF.getRegInfo();
980 const unsigned DstReg = I.getOperand(0).getReg();
981 const unsigned SrcReg = I.getOperand(1).getReg();
982
983 const LLT DstTy = MRI.getType(DstReg);
984 const LLT SrcTy = MRI.getType(SrcReg);
985 const LLT S1 = LLT::scalar(1);
986 const unsigned SrcSize = SrcTy.getSizeInBits();
987 const unsigned DstSize = DstTy.getSizeInBits();
988 if (!DstTy.isScalar())
989 return false;
990
991 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, MRI, TRI);
992
993 if (SrcBank->getID() == AMDGPU::SCCRegBankID) {
994 if (SrcTy != S1 || DstSize > 64) // Invalid
995 return false;
996
997 unsigned Opcode =
998 DstSize > 32 ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
999 const TargetRegisterClass *DstRC =
1000 DstSize > 32 ? &AMDGPU::SReg_64RegClass : &AMDGPU::SReg_32RegClass;
1001
1002 // FIXME: Create an extra copy to avoid incorrectly constraining the result
1003 // of the scc producer.
1004 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
1005 BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), TmpReg)
1006 .addReg(SrcReg);
1007 BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1008 .addReg(TmpReg);
1009
1010 // The instruction operands are backwards from what you would expect.
1011 BuildMI(MBB, I, DL, TII.get(Opcode), DstReg)
1012 .addImm(0)
1013 .addImm(Signed ? -1 : 1);
Matt Arsenault0e7d8692019-07-24 16:05:53 +00001014 I.eraseFromParent();
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001015 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
1016 }
1017
1018 if (SrcBank->getID() == AMDGPU::VCCRegBankID && DstSize <= 32) {
1019 if (SrcTy != S1) // Invalid
1020 return false;
1021
1022 MachineInstr *ExtI =
1023 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1024 .addImm(0) // src0_modifiers
1025 .addImm(0) // src0
1026 .addImm(0) // src1_modifiers
1027 .addImm(Signed ? -1 : 1) // src1
1028 .addUse(SrcReg);
Matt Arsenault0e7d8692019-07-24 16:05:53 +00001029 I.eraseFromParent();
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001030 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1031 }
1032
1033 if (I.getOpcode() == AMDGPU::G_ANYEXT)
1034 return selectCOPY(I);
1035
1036 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1037 // 64-bit should have been split up in RegBankSelect
Matt Arsenault5dafcb92019-07-01 13:22:06 +00001038
1039 // Try to use an and with a mask if it will save code size.
1040 unsigned Mask;
1041 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1042 MachineInstr *ExtI =
1043 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1044 .addImm(Mask)
1045 .addReg(SrcReg);
Matt Arsenault0e7d8692019-07-24 16:05:53 +00001046 I.eraseFromParent();
Matt Arsenault5dafcb92019-07-01 13:22:06 +00001047 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1048 }
1049
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001050 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
1051 MachineInstr *ExtI =
1052 BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1053 .addReg(SrcReg)
1054 .addImm(0) // Offset
1055 .addImm(SrcSize); // Width
Matt Arsenault0e7d8692019-07-24 16:05:53 +00001056 I.eraseFromParent();
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001057 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1058 }
1059
1060 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
1061 if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, MRI))
1062 return false;
1063
1064 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
1065 const unsigned SextOpc = SrcSize == 8 ?
1066 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
1067 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
1068 .addReg(SrcReg);
Matt Arsenault0e7d8692019-07-24 16:05:53 +00001069 I.eraseFromParent();
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001070 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, MRI);
1071 }
1072
1073 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
1074 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1075
1076 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
1077 if (DstSize > 32 && SrcSize <= 32) {
1078 // We need a 64-bit register source, but the high bits don't matter.
1079 unsigned ExtReg
1080 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1081 unsigned UndefReg
1082 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
1083 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1084 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
1085 .addReg(SrcReg)
1086 .addImm(AMDGPU::sub0)
1087 .addReg(UndefReg)
1088 .addImm(AMDGPU::sub1);
1089
1090 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
1091 .addReg(ExtReg)
1092 .addImm(SrcSize << 16);
1093
Matt Arsenault0e7d8692019-07-24 16:05:53 +00001094 I.eraseFromParent();
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001095 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, MRI);
1096 }
1097
Matt Arsenault5dafcb92019-07-01 13:22:06 +00001098 unsigned Mask;
1099 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1100 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
1101 .addReg(SrcReg)
1102 .addImm(Mask);
1103 } else {
1104 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
1105 .addReg(SrcReg)
1106 .addImm(SrcSize << 16);
1107 }
1108
Matt Arsenault0e7d8692019-07-24 16:05:53 +00001109 I.eraseFromParent();
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001110 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, MRI);
1111 }
1112
1113 return false;
1114}
1115
Tom Stellardca166212017-01-30 21:56:46 +00001116bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
1117 MachineBasicBlock *BB = I.getParent();
1118 MachineFunction *MF = BB->getParent();
1119 MachineRegisterInfo &MRI = MF->getRegInfo();
Tom Stellarde182b282018-05-15 17:57:09 +00001120 MachineOperand &ImmOp = I.getOperand(1);
Tom Stellardca166212017-01-30 21:56:46 +00001121
Tom Stellarde182b282018-05-15 17:57:09 +00001122 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
1123 if (ImmOp.isFPImm()) {
1124 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
1125 ImmOp.ChangeToImmediate(Imm.getZExtValue());
1126 } else if (ImmOp.isCImm()) {
1127 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
1128 }
1129
1130 unsigned DstReg = I.getOperand(0).getReg();
1131 unsigned Size;
1132 bool IsSgpr;
1133 const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg());
1134 if (RB) {
1135 IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
1136 Size = MRI.getType(DstReg).getSizeInBits();
1137 } else {
1138 const TargetRegisterClass *RC = TRI.getRegClassForReg(MRI, DstReg);
1139 IsSgpr = TRI.isSGPRClass(RC);
Tom Stellarda91ce172018-05-21 17:49:31 +00001140 Size = TRI.getRegSizeInBits(*RC);
Tom Stellarde182b282018-05-15 17:57:09 +00001141 }
1142
1143 if (Size != 32 && Size != 64)
1144 return false;
1145
1146 unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
Tom Stellardca166212017-01-30 21:56:46 +00001147 if (Size == 32) {
Tom Stellarde182b282018-05-15 17:57:09 +00001148 I.setDesc(TII.get(Opcode));
1149 I.addImplicitDefUseOperands(*MF);
Tom Stellardca166212017-01-30 21:56:46 +00001150 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1151 }
1152
Tom Stellardca166212017-01-30 21:56:46 +00001153 DebugLoc DL = I.getDebugLoc();
Tom Stellarde182b282018-05-15 17:57:09 +00001154 const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass :
1155 &AMDGPU::VGPR_32RegClass;
1156 unsigned LoReg = MRI.createVirtualRegister(RC);
1157 unsigned HiReg = MRI.createVirtualRegister(RC);
1158 const APInt &Imm = APInt(Size, I.getOperand(1).getImm());
Tom Stellardca166212017-01-30 21:56:46 +00001159
Tom Stellarde182b282018-05-15 17:57:09 +00001160 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
Tom Stellardca166212017-01-30 21:56:46 +00001161 .addImm(Imm.trunc(32).getZExtValue());
1162
Tom Stellarde182b282018-05-15 17:57:09 +00001163 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
Tom Stellardca166212017-01-30 21:56:46 +00001164 .addImm(Imm.ashr(32).getZExtValue());
1165
Tom Stellarde182b282018-05-15 17:57:09 +00001166 const MachineInstr *RS =
1167 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1168 .addReg(LoReg)
1169 .addImm(AMDGPU::sub0)
1170 .addReg(HiReg)
1171 .addImm(AMDGPU::sub1);
1172
Tom Stellardca166212017-01-30 21:56:46 +00001173 // We can't call constrainSelectedInstRegOperands here, because it doesn't
1174 // work for target independent opcodes
1175 I.eraseFromParent();
Tom Stellarde182b282018-05-15 17:57:09 +00001176 const TargetRegisterClass *DstRC =
1177 TRI.getConstrainedRegClassForOperand(RS->getOperand(0), MRI);
1178 if (!DstRC)
1179 return true;
1180 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
Tom Stellardca166212017-01-30 21:56:46 +00001181}
1182
1183static bool isConstant(const MachineInstr &MI) {
1184 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
1185}
1186
1187void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
1188 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
1189
1190 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
1191
1192 assert(PtrMI);
1193
1194 if (PtrMI->getOpcode() != TargetOpcode::G_GEP)
1195 return;
1196
1197 GEPInfo GEPInfo(*PtrMI);
1198
1199 for (unsigned i = 1, e = 3; i < e; ++i) {
1200 const MachineOperand &GEPOp = PtrMI->getOperand(i);
1201 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
1202 assert(OpDef);
1203 if (isConstant(*OpDef)) {
1204 // FIXME: Is it possible to have multiple Imm parts? Maybe if we
1205 // are lacking other optimizations.
1206 assert(GEPInfo.Imm == 0);
1207 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
1208 continue;
1209 }
1210 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
1211 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
1212 GEPInfo.SgprParts.push_back(GEPOp.getReg());
1213 else
1214 GEPInfo.VgprParts.push_back(GEPOp.getReg());
1215 }
1216
1217 AddrInfo.push_back(GEPInfo);
1218 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
1219}
1220
Tom Stellard79b5c382019-02-20 21:02:37 +00001221bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
Tom Stellardca166212017-01-30 21:56:46 +00001222 if (!MI.hasOneMemOperand())
1223 return false;
1224
1225 const MachineMemOperand *MMO = *MI.memoperands_begin();
1226 const Value *Ptr = MMO->getValue();
1227
1228 // UndefValue means this is a load of a kernel input. These are uniform.
1229 // Sometimes LDS instructions have constant pointers.
1230 // If Ptr is null, then that means this mem operand contains a
1231 // PseudoSourceValue like GOT.
1232 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
1233 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
1234 return true;
1235
Matt Arsenault923712b2018-02-09 16:57:57 +00001236 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
1237 return true;
1238
Tom Stellardca166212017-01-30 21:56:46 +00001239 const Instruction *I = dyn_cast<Instruction>(Ptr);
1240 return I && I->getMetadata("amdgpu.uniform");
1241}
1242
Tom Stellardca166212017-01-30 21:56:46 +00001243bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
1244 for (const GEPInfo &GEPInfo : AddrInfo) {
1245 if (!GEPInfo.VgprParts.empty())
1246 return true;
1247 }
1248 return false;
1249}
1250
Matt Arsenault3baf4d32019-08-01 03:09:15 +00001251void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
Matt Arsenault35940112019-08-01 00:53:38 +00001252 MachineBasicBlock *BB = I.getParent();
1253 MachineFunction *MF = BB->getParent();
1254 MachineRegisterInfo &MRI = MF->getRegInfo();
1255
1256 const LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
1257 unsigned AS = PtrTy.getAddressSpace();
1258 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
1259 STI.ldsRequiresM0Init()) {
1260 // If DS instructions require M0 initializtion, insert it before selecting.
1261 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1262 .addImm(-1);
1263 }
Matt Arsenault3baf4d32019-08-01 03:09:15 +00001264}
Matt Arsenault35940112019-08-01 00:53:38 +00001265
Matt Arsenaultda5b9bf2019-08-01 03:29:01 +00001266bool AMDGPUInstructionSelector::selectG_LOAD_ATOMICRMW(MachineInstr &I,
1267 CodeGenCoverage &CoverageInfo) const {
Matt Arsenault3baf4d32019-08-01 03:09:15 +00001268 initM0(I);
Matt Arsenault35940112019-08-01 00:53:38 +00001269 return selectImpl(I, CoverageInfo);
Tom Stellardca166212017-01-30 21:56:46 +00001270}
1271
Matt Arsenault64642802019-07-01 15:39:27 +00001272bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
1273 MachineBasicBlock *BB = I.getParent();
1274 MachineFunction *MF = BB->getParent();
1275 MachineRegisterInfo &MRI = MF->getRegInfo();
1276 MachineOperand &CondOp = I.getOperand(0);
1277 Register CondReg = CondOp.getReg();
1278 const DebugLoc &DL = I.getDebugLoc();
1279
Matt Arsenault2ab25f92019-07-01 16:06:02 +00001280 unsigned BrOpcode;
1281 Register CondPhysReg;
1282 const TargetRegisterClass *ConstrainRC;
1283
1284 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
1285 // whether the branch is uniform when selecting the instruction. In
1286 // GlobalISel, we should push that decision into RegBankSelect. Assume for now
1287 // RegBankSelect knows what it's doing if the branch condition is scc, even
1288 // though it currently does not.
Matt Arsenault64642802019-07-01 15:39:27 +00001289 if (isSCC(CondReg, MRI)) {
Matt Arsenault2ab25f92019-07-01 16:06:02 +00001290 CondPhysReg = AMDGPU::SCC;
1291 BrOpcode = AMDGPU::S_CBRANCH_SCC1;
1292 ConstrainRC = &AMDGPU::SReg_32_XM0RegClass;
1293 } else if (isVCC(CondReg, MRI)) {
1294 // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
1295 // We sort of know that a VCC producer based on the register bank, that ands
1296 // inactive lanes with 0. What if there was a logical operation with vcc
1297 // producers in different blocks/with different exec masks?
1298 // FIXME: Should scc->vcc copies and with exec?
1299 CondPhysReg = TRI.getVCC();
1300 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
1301 ConstrainRC = TRI.getBoolRC();
1302 } else
1303 return false;
Matt Arsenault64642802019-07-01 15:39:27 +00001304
Matt Arsenault2ab25f92019-07-01 16:06:02 +00001305 if (!MRI.getRegClassOrNull(CondReg))
1306 MRI.setRegClass(CondReg, ConstrainRC);
Matt Arsenault64642802019-07-01 15:39:27 +00001307
Matt Arsenault2ab25f92019-07-01 16:06:02 +00001308 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
1309 .addReg(CondReg);
1310 BuildMI(*BB, &I, DL, TII.get(BrOpcode))
1311 .addMBB(I.getOperand(1).getMBB());
1312
1313 I.eraseFromParent();
1314 return true;
Matt Arsenault64642802019-07-01 15:39:27 +00001315}
1316
Matt Arsenaultcda82f02019-07-01 15:48:18 +00001317bool AMDGPUInstructionSelector::selectG_FRAME_INDEX(MachineInstr &I) const {
1318 MachineBasicBlock *BB = I.getParent();
1319 MachineFunction *MF = BB->getParent();
1320 MachineRegisterInfo &MRI = MF->getRegInfo();
1321
1322 Register DstReg = I.getOperand(0).getReg();
1323 const RegisterBank *DstRB = RBI.getRegBank(DstReg, MRI, TRI);
1324 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1325 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
1326 if (IsVGPR)
1327 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
1328
1329 return RBI.constrainGenericRegister(
1330 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, MRI);
1331}
1332
Daniel Sandersf76f3152017-11-16 00:46:35 +00001333bool AMDGPUInstructionSelector::select(MachineInstr &I,
1334 CodeGenCoverage &CoverageInfo) const {
Matt Arsenaulte1006252019-07-01 16:32:47 +00001335 if (I.isPHI())
1336 return selectPHI(I);
Tom Stellardca166212017-01-30 21:56:46 +00001337
Tom Stellard7712ee82018-06-22 00:44:29 +00001338 if (!isPreISelGenericOpcode(I.getOpcode())) {
1339 if (I.isCopy())
1340 return selectCOPY(I);
Tom Stellardca166212017-01-30 21:56:46 +00001341 return true;
Tom Stellard7712ee82018-06-22 00:44:29 +00001342 }
Tom Stellardca166212017-01-30 21:56:46 +00001343
1344 switch (I.getOpcode()) {
Matt Arsenaultc8291c92019-07-15 19:50:07 +00001345 case TargetOpcode::G_AND:
1346 case TargetOpcode::G_OR:
1347 case TargetOpcode::G_XOR:
1348 if (selectG_AND_OR_XOR(I))
1349 return true;
1350 return selectImpl(I, CoverageInfo);
Tom Stellard9e9dd302019-07-01 16:09:33 +00001351 case TargetOpcode::G_ADD:
Matt Arsenaulte6d10f92019-07-09 14:05:11 +00001352 case TargetOpcode::G_SUB:
1353 if (selectG_ADD_SUB(I))
Tom Stellard9e9dd302019-07-01 16:09:33 +00001354 return true;
1355 LLVM_FALLTHROUGH;
Tom Stellardca166212017-01-30 21:56:46 +00001356 default:
Tom Stellard1dc90202018-05-10 20:53:06 +00001357 return selectImpl(I, CoverageInfo);
Tom Stellard7c650782018-10-05 04:34:09 +00001358 case TargetOpcode::G_INTTOPTR:
Tom Stellard1e0edad2018-05-10 21:20:10 +00001359 case TargetOpcode::G_BITCAST:
1360 return selectCOPY(I);
Tom Stellardca166212017-01-30 21:56:46 +00001361 case TargetOpcode::G_CONSTANT:
Tom Stellarde182b282018-05-15 17:57:09 +00001362 case TargetOpcode::G_FCONSTANT:
Tom Stellardca166212017-01-30 21:56:46 +00001363 return selectG_CONSTANT(I);
Tom Stellard41f32192019-02-28 23:37:48 +00001364 case TargetOpcode::G_EXTRACT:
1365 return selectG_EXTRACT(I);
Matt Arsenault9b7ffc42019-07-09 14:02:20 +00001366 case TargetOpcode::G_MERGE_VALUES:
Matt Arsenaulta65913e2019-07-15 17:26:43 +00001367 case TargetOpcode::G_BUILD_VECTOR:
Matt Arsenault9b7ffc42019-07-09 14:02:20 +00001368 case TargetOpcode::G_CONCAT_VECTORS:
1369 return selectG_MERGE_VALUES(I);
Matt Arsenault872f38b2019-07-09 14:02:26 +00001370 case TargetOpcode::G_UNMERGE_VALUES:
1371 return selectG_UNMERGE_VALUES(I);
Tom Stellardca166212017-01-30 21:56:46 +00001372 case TargetOpcode::G_GEP:
1373 return selectG_GEP(I);
Tom Stellard3f1c6fe2018-06-21 23:38:20 +00001374 case TargetOpcode::G_IMPLICIT_DEF:
1375 return selectG_IMPLICIT_DEF(I);
Tom Stellard33634d1b2019-03-01 00:50:26 +00001376 case TargetOpcode::G_INSERT:
1377 return selectG_INSERT(I);
Tom Stellarda9284732018-06-14 19:26:37 +00001378 case TargetOpcode::G_INTRINSIC:
1379 return selectG_INTRINSIC(I, CoverageInfo);
Tom Stellard390a5f42018-07-13 21:05:14 +00001380 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1381 return selectG_INTRINSIC_W_SIDE_EFFECTS(I, CoverageInfo);
Tom Stellard8b1c53b2019-06-17 16:27:43 +00001382 case TargetOpcode::G_ICMP:
Matt Arsenault3b7668a2019-07-01 13:34:26 +00001383 if (selectG_ICMP(I))
1384 return true;
1385 return selectImpl(I, CoverageInfo);
Tom Stellardca166212017-01-30 21:56:46 +00001386 case TargetOpcode::G_LOAD:
Matt Arsenaultda5b9bf2019-08-01 03:29:01 +00001387 case TargetOpcode::G_ATOMIC_CMPXCHG:
1388 case TargetOpcode::G_ATOMICRMW_XCHG:
1389 case TargetOpcode::G_ATOMICRMW_ADD:
1390 case TargetOpcode::G_ATOMICRMW_SUB:
1391 case TargetOpcode::G_ATOMICRMW_AND:
1392 case TargetOpcode::G_ATOMICRMW_OR:
1393 case TargetOpcode::G_ATOMICRMW_XOR:
1394 case TargetOpcode::G_ATOMICRMW_MIN:
1395 case TargetOpcode::G_ATOMICRMW_MAX:
1396 case TargetOpcode::G_ATOMICRMW_UMIN:
1397 case TargetOpcode::G_ATOMICRMW_UMAX:
Matt Arsenault26cb53b2019-08-01 03:33:15 +00001398 case TargetOpcode::G_ATOMICRMW_FADD:
Matt Arsenaultda5b9bf2019-08-01 03:29:01 +00001399 return selectG_LOAD_ATOMICRMW(I, CoverageInfo);
Tom Stellard8b1c53b2019-06-17 16:27:43 +00001400 case TargetOpcode::G_SELECT:
1401 return selectG_SELECT(I);
Tom Stellardca166212017-01-30 21:56:46 +00001402 case TargetOpcode::G_STORE:
Matt Arsenault3baf4d32019-08-01 03:09:15 +00001403 return selectG_STORE(I, CoverageInfo);
Matt Arsenaultdbb6c032019-06-24 18:02:18 +00001404 case TargetOpcode::G_TRUNC:
1405 return selectG_TRUNC(I);
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001406 case TargetOpcode::G_SEXT:
1407 case TargetOpcode::G_ZEXT:
1408 case TargetOpcode::G_ANYEXT:
Matt Arsenault0e7d8692019-07-24 16:05:53 +00001409 return selectG_SZA_EXT(I);
Matt Arsenault64642802019-07-01 15:39:27 +00001410 case TargetOpcode::G_BRCOND:
1411 return selectG_BRCOND(I);
Matt Arsenaultcda82f02019-07-01 15:48:18 +00001412 case TargetOpcode::G_FRAME_INDEX:
1413 return selectG_FRAME_INDEX(I);
Matt Arsenaulted633992019-07-02 14:17:38 +00001414 case TargetOpcode::G_FENCE:
1415 // FIXME: Tablegen importer doesn't handle the imm operands correctly, and
1416 // is checking for G_CONSTANT
1417 I.setDesc(TII.get(AMDGPU::ATOMIC_FENCE));
1418 return true;
Tom Stellardca166212017-01-30 21:56:46 +00001419 }
1420 return false;
1421}
Tom Stellard1dc90202018-05-10 20:53:06 +00001422
Tom Stellard26fac0f2018-06-22 02:54:57 +00001423InstructionSelector::ComplexRendererFns
1424AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
1425 return {{
1426 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1427 }};
1428
1429}
1430
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001431std::pair<Register, unsigned>
1432AMDGPUInstructionSelector::selectVOP3ModsImpl(
1433 Register Src, const MachineRegisterInfo &MRI) const {
1434 unsigned Mods = 0;
1435 MachineInstr *MI = MRI.getVRegDef(Src);
1436
1437 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
1438 Src = MI->getOperand(1).getReg();
1439 Mods |= SISrcMods::NEG;
1440 MI = MRI.getVRegDef(Src);
1441 }
1442
1443 if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
1444 Src = MI->getOperand(1).getReg();
1445 Mods |= SISrcMods::ABS;
1446 }
1447
1448 return std::make_pair(Src, Mods);
1449}
1450
Tom Stellard1dc90202018-05-10 20:53:06 +00001451///
1452/// This will select either an SGPR or VGPR operand and will save us from
1453/// having to write an extra tablegen pattern.
1454InstructionSelector::ComplexRendererFns
1455AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
1456 return {{
1457 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1458 }};
1459}
Tom Stellarddcc95e92018-05-11 05:44:16 +00001460
1461InstructionSelector::ComplexRendererFns
1462AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001463 MachineRegisterInfo &MRI
1464 = Root.getParent()->getParent()->getParent()->getRegInfo();
1465
1466 Register Src;
1467 unsigned Mods;
1468 std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg(), MRI);
1469
Tom Stellarddcc95e92018-05-11 05:44:16 +00001470 return {{
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001471 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1472 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
1473 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1474 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
Tom Stellarddcc95e92018-05-11 05:44:16 +00001475 }};
1476}
Tom Stellard9a653572018-06-22 02:34:29 +00001477InstructionSelector::ComplexRendererFns
1478AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
1479 return {{
1480 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1481 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1482 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
1483 }};
1484}
Tom Stellard46bbbc32018-06-13 22:30:47 +00001485
1486InstructionSelector::ComplexRendererFns
1487AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001488 MachineRegisterInfo &MRI
1489 = Root.getParent()->getParent()->getParent()->getRegInfo();
1490
1491 Register Src;
1492 unsigned Mods;
1493 std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg(), MRI);
1494
Tom Stellard46bbbc32018-06-13 22:30:47 +00001495 return {{
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001496 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1497 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
Tom Stellard46bbbc32018-06-13 22:30:47 +00001498 }};
1499}
Tom Stellard79b5c382019-02-20 21:02:37 +00001500
1501InstructionSelector::ComplexRendererFns
1502AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
1503 MachineRegisterInfo &MRI =
1504 Root.getParent()->getParent()->getParent()->getRegInfo();
1505
1506 SmallVector<GEPInfo, 4> AddrInfo;
1507 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo);
1508
1509 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1510 return None;
1511
1512 const GEPInfo &GEPInfo = AddrInfo[0];
1513
1514 if (!AMDGPU::isLegalSMRDImmOffset(STI, GEPInfo.Imm))
1515 return None;
1516
1517 unsigned PtrReg = GEPInfo.SgprParts[0];
1518 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
1519 return {{
1520 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1521 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
1522 }};
1523}
1524
1525InstructionSelector::ComplexRendererFns
1526AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
1527 MachineRegisterInfo &MRI =
1528 Root.getParent()->getParent()->getParent()->getRegInfo();
1529
1530 SmallVector<GEPInfo, 4> AddrInfo;
1531 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo);
1532
1533 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1534 return None;
1535
1536 const GEPInfo &GEPInfo = AddrInfo[0];
1537 unsigned PtrReg = GEPInfo.SgprParts[0];
1538 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
1539 if (!isUInt<32>(EncodedImm))
1540 return None;
1541
1542 return {{
1543 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1544 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
1545 }};
1546}
1547
1548InstructionSelector::ComplexRendererFns
1549AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
1550 MachineInstr *MI = Root.getParent();
1551 MachineBasicBlock *MBB = MI->getParent();
1552 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1553
1554 SmallVector<GEPInfo, 4> AddrInfo;
1555 getAddrModeInfo(*MI, MRI, AddrInfo);
1556
1557 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
1558 // then we can select all ptr + 32-bit offsets not just immediate offsets.
1559 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1560 return None;
1561
1562 const GEPInfo &GEPInfo = AddrInfo[0];
1563 if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm))
1564 return None;
1565
1566 // If we make it this far we have a load with an 32-bit immediate offset.
1567 // It is OK to select this using a sgpr offset, because we have already
1568 // failed trying to select this load into one of the _IMM variants since
1569 // the _IMM Patterns are considered before the _SGPR patterns.
1570 unsigned PtrReg = GEPInfo.SgprParts[0];
1571 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1572 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
1573 .addImm(GEPInfo.Imm);
1574 return {{
1575 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1576 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
1577 }};
1578}
Matt Arsenault35c96592019-07-16 18:05:29 +00001579
Matt Arsenaultdad1f892019-07-16 18:42:53 +00001580template <bool Signed>
Matt Arsenault35c96592019-07-16 18:05:29 +00001581InstructionSelector::ComplexRendererFns
1582AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
1583 MachineInstr *MI = Root.getParent();
1584 MachineBasicBlock *MBB = MI->getParent();
1585 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1586
1587 InstructionSelector::ComplexRendererFns Default = {{
1588 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
1589 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // offset
1590 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // slc
1591 }};
1592
1593 if (!STI.hasFlatInstOffsets())
1594 return Default;
1595
1596 const MachineInstr *OpDef = MRI.getVRegDef(Root.getReg());
1597 if (!OpDef || OpDef->getOpcode() != AMDGPU::G_GEP)
1598 return Default;
1599
1600 Optional<int64_t> Offset =
1601 getConstantVRegVal(OpDef->getOperand(2).getReg(), MRI);
1602 if (!Offset.hasValue())
1603 return Default;
1604
1605 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
1606 if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
1607 return Default;
1608
1609 Register BasePtr = OpDef->getOperand(1).getReg();
1610
1611 return {{
1612 [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
1613 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
1614 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // slc
1615 }};
1616}
1617
1618InstructionSelector::ComplexRendererFns
1619AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
1620 return selectFlatOffsetImpl<false>(Root);
1621}
1622
1623InstructionSelector::ComplexRendererFns
1624AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
1625 return selectFlatOffsetImpl<true>(Root);
1626}
Matt Arsenault7161fb02019-07-16 19:22:21 +00001627
1628// FIXME: Implement
1629static bool signBitIsZero(const MachineOperand &Op,
1630 const MachineRegisterInfo &MRI) {
1631 return false;
1632}
1633
1634static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
1635 auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
1636 return PSV && PSV->isStack();
1637}
1638
1639InstructionSelector::ComplexRendererFns
1640AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
1641 MachineInstr *MI = Root.getParent();
1642 MachineBasicBlock *MBB = MI->getParent();
1643 MachineFunction *MF = MBB->getParent();
1644 MachineRegisterInfo &MRI = MF->getRegInfo();
1645 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1646
1647 int64_t Offset = 0;
1648 if (mi_match(Root.getReg(), MRI, m_ICst(Offset))) {
1649 Register HighBits = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1650
1651 // TODO: Should this be inside the render function? The iterator seems to
1652 // move.
1653 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
1654 HighBits)
1655 .addImm(Offset & ~4095);
1656
1657 return {{[=](MachineInstrBuilder &MIB) { // rsrc
1658 MIB.addReg(Info->getScratchRSrcReg());
1659 },
1660 [=](MachineInstrBuilder &MIB) { // vaddr
1661 MIB.addReg(HighBits);
1662 },
1663 [=](MachineInstrBuilder &MIB) { // soffset
1664 const MachineMemOperand *MMO = *MI->memoperands_begin();
1665 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
1666
1667 Register SOffsetReg = isStackPtrRelative(PtrInfo)
1668 ? Info->getStackPtrOffsetReg()
1669 : Info->getScratchWaveOffsetReg();
1670 MIB.addReg(SOffsetReg);
1671 },
1672 [=](MachineInstrBuilder &MIB) { // offset
1673 MIB.addImm(Offset & 4095);
1674 }}};
1675 }
1676
1677 assert(Offset == 0);
1678
1679 // Try to fold a frame index directly into the MUBUF vaddr field, and any
1680 // offsets.
1681 Optional<int> FI;
1682 Register VAddr = Root.getReg();
1683 if (const MachineInstr *RootDef = MRI.getVRegDef(Root.getReg())) {
1684 if (isBaseWithConstantOffset(Root, MRI)) {
1685 const MachineOperand &LHS = RootDef->getOperand(1);
1686 const MachineOperand &RHS = RootDef->getOperand(2);
1687 const MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
1688 const MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
1689 if (LHSDef && RHSDef) {
1690 int64_t PossibleOffset =
1691 RHSDef->getOperand(1).getCImm()->getSExtValue();
1692 if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
1693 (!STI.privateMemoryResourceIsRangeChecked() ||
1694 signBitIsZero(LHS, MRI))) {
1695 if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
1696 FI = LHSDef->getOperand(1).getIndex();
1697 else
1698 VAddr = LHS.getReg();
1699 Offset = PossibleOffset;
1700 }
1701 }
1702 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
1703 FI = RootDef->getOperand(1).getIndex();
1704 }
1705 }
1706
1707 // If we don't know this private access is a local stack object, it needs to
1708 // be relative to the entry point's scratch wave offset register.
1709 // TODO: Should split large offsets that don't fit like above.
1710 // TODO: Don't use scratch wave offset just because the offset didn't fit.
1711 Register SOffset = FI.hasValue() ? Info->getStackPtrOffsetReg()
1712 : Info->getScratchWaveOffsetReg();
1713
1714 return {{[=](MachineInstrBuilder &MIB) { // rsrc
1715 MIB.addReg(Info->getScratchRSrcReg());
1716 },
1717 [=](MachineInstrBuilder &MIB) { // vaddr
1718 if (FI.hasValue())
1719 MIB.addFrameIndex(FI.getValue());
1720 else
1721 MIB.addReg(VAddr);
1722 },
1723 [=](MachineInstrBuilder &MIB) { // soffset
1724 MIB.addReg(SOffset);
1725 },
1726 [=](MachineInstrBuilder &MIB) { // offset
1727 MIB.addImm(Offset);
1728 }}};
1729}
1730
Matt Arsenault35940112019-08-01 00:53:38 +00001731bool AMDGPUInstructionSelector::isDSOffsetLegal(const MachineRegisterInfo &MRI,
1732 const MachineOperand &Base,
1733 int64_t Offset,
1734 unsigned OffsetBits) const {
1735 if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
1736 (OffsetBits == 8 && !isUInt<8>(Offset)))
1737 return false;
1738
1739 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
1740 return true;
1741
1742 // On Southern Islands instruction with a negative base value and an offset
1743 // don't seem to work.
1744 return signBitIsZero(Base, MRI);
1745}
1746
Matt Arsenault7161fb02019-07-16 19:22:21 +00001747InstructionSelector::ComplexRendererFns
1748AMDGPUInstructionSelector::selectMUBUFScratchOffset(
1749 MachineOperand &Root) const {
1750 MachineInstr *MI = Root.getParent();
1751 MachineBasicBlock *MBB = MI->getParent();
1752 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1753
1754 int64_t Offset = 0;
1755 if (!mi_match(Root.getReg(), MRI, m_ICst(Offset)) ||
1756 !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
1757 return {};
1758
1759 const MachineFunction *MF = MBB->getParent();
1760 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1761 const MachineMemOperand *MMO = *MI->memoperands_begin();
1762 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
1763
1764 Register SOffsetReg = isStackPtrRelative(PtrInfo)
1765 ? Info->getStackPtrOffsetReg()
1766 : Info->getScratchWaveOffsetReg();
1767 return {{
1768 [=](MachineInstrBuilder &MIB) {
1769 MIB.addReg(Info->getScratchRSrcReg());
1770 }, // rsrc
1771 [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffsetReg); }, // soffset
1772 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
1773 }};
1774}
Matt Arsenault35940112019-08-01 00:53:38 +00001775
1776InstructionSelector::ComplexRendererFns
1777AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
1778 MachineInstr *MI = Root.getParent();
1779 MachineBasicBlock *MBB = MI->getParent();
1780 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1781
1782 const MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1783 if (!RootDef) {
1784 return {{
1785 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1786 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }
1787 }};
1788 }
1789
1790 int64_t ConstAddr = 0;
1791 if (isBaseWithConstantOffset(Root, MRI)) {
1792 const MachineOperand &LHS = RootDef->getOperand(1);
1793 const MachineOperand &RHS = RootDef->getOperand(2);
1794 const MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
1795 const MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
1796 if (LHSDef && RHSDef) {
1797 int64_t PossibleOffset =
1798 RHSDef->getOperand(1).getCImm()->getSExtValue();
1799 if (isDSOffsetLegal(MRI, LHS, PossibleOffset, 16)) {
1800 // (add n0, c0)
1801 return {{
1802 [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
1803 [=](MachineInstrBuilder &MIB) { MIB.addImm(PossibleOffset); }
1804 }};
1805 }
1806 }
1807 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
1808
1809
1810
1811 } else if (mi_match(Root.getReg(), MRI, m_ICst(ConstAddr))) {
1812
1813
1814 }
1815
1816 return {{
1817 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1818 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }
1819 }};
1820}