blob: 399ade500ab9ffe5f723e8ee2e7cf9c3dfbe848f [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPUInstructionSelector.h"
15#include "AMDGPUInstrInfo.h"
16#include "AMDGPURegisterBankInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUSubtarget.h"
Tom Stellard1dc90202018-05-10 20:53:06 +000019#include "AMDGPUTargetMachine.h"
Matt Arsenaultb1cc4f52018-06-25 16:17:48 +000020#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000021#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard1dc90202018-05-10 20:53:06 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
Aditya Nandakumar18b3f9d2018-01-17 19:31:33 +000024#include "llvm/CodeGen/GlobalISel/Utils.h"
Tom Stellardca166212017-01-30 21:56:46 +000025#include "llvm/CodeGen/MachineBasicBlock.h"
26#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineInstr.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/IR/Type.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/raw_ostream.h"
33
34#define DEBUG_TYPE "amdgpu-isel"
35
36using namespace llvm;
37
Tom Stellard1dc90202018-05-10 20:53:06 +000038#define GET_GLOBALISEL_IMPL
Tom Stellard5bfbae52018-07-11 20:59:01 +000039#define AMDGPUSubtarget GCNSubtarget
Tom Stellard1dc90202018-05-10 20:53:06 +000040#include "AMDGPUGenGlobalISel.inc"
41#undef GET_GLOBALISEL_IMPL
Tom Stellard5bfbae52018-07-11 20:59:01 +000042#undef AMDGPUSubtarget
Tom Stellard1dc90202018-05-10 20:53:06 +000043
Tom Stellardca166212017-01-30 21:56:46 +000044AMDGPUInstructionSelector::AMDGPUInstructionSelector(
Tom Stellard5bfbae52018-07-11 20:59:01 +000045 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
Tom Stellard1dc90202018-05-10 20:53:06 +000046 const AMDGPUTargetMachine &TM)
Tom Stellardca166212017-01-30 21:56:46 +000047 : InstructionSelector(), TII(*STI.getInstrInfo()),
Tom Stellard1dc90202018-05-10 20:53:06 +000048 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
49 STI(STI),
50 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
51#define GET_GLOBALISEL_PREDICATES_INIT
52#include "AMDGPUGenGlobalISel.inc"
53#undef GET_GLOBALISEL_PREDICATES_INIT
54#define GET_GLOBALISEL_TEMPORARIES_INIT
55#include "AMDGPUGenGlobalISel.inc"
56#undef GET_GLOBALISEL_TEMPORARIES_INIT
Tom Stellard1dc90202018-05-10 20:53:06 +000057{
58}
59
60const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
Tom Stellardca166212017-01-30 21:56:46 +000061
Tom Stellard1e0edad2018-05-10 21:20:10 +000062bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
63 MachineBasicBlock *BB = I.getParent();
64 MachineFunction *MF = BB->getParent();
65 MachineRegisterInfo &MRI = MF->getRegInfo();
66 I.setDesc(TII.get(TargetOpcode::COPY));
67 for (const MachineOperand &MO : I.operands()) {
68 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
69 continue;
70
71 const TargetRegisterClass *RC =
72 TRI.getConstrainedRegClassForOperand(MO, MRI);
73 if (!RC)
74 continue;
75 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
76 }
77 return true;
78}
79
Tom Stellardca166212017-01-30 21:56:46 +000080MachineOperand
81AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
82 unsigned SubIdx) const {
83
84 MachineInstr *MI = MO.getParent();
85 MachineBasicBlock *BB = MO.getParent()->getParent();
86 MachineFunction *MF = BB->getParent();
87 MachineRegisterInfo &MRI = MF->getRegInfo();
88 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
89
90 if (MO.isReg()) {
91 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
92 unsigned Reg = MO.getReg();
93 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
94 .addReg(Reg, 0, ComposedSubIdx);
95
96 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
97 MO.isKill(), MO.isDead(), MO.isUndef(),
98 MO.isEarlyClobber(), 0, MO.isDebug(),
99 MO.isInternalRead());
100 }
101
102 assert(MO.isImm());
103
104 APInt Imm(64, MO.getImm());
105
106 switch (SubIdx) {
107 default:
108 llvm_unreachable("do not know to split immediate with this sub index.");
109 case AMDGPU::sub0:
110 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
111 case AMDGPU::sub1:
112 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
113 }
114}
115
Tom Stellard390a5f42018-07-13 21:05:14 +0000116static int64_t getConstant(const MachineInstr *MI) {
117 return MI->getOperand(1).getCImm()->getSExtValue();
118}
119
Tom Stellardca166212017-01-30 21:56:46 +0000120bool AMDGPUInstructionSelector::selectG_ADD(MachineInstr &I) const {
121 MachineBasicBlock *BB = I.getParent();
122 MachineFunction *MF = BB->getParent();
123 MachineRegisterInfo &MRI = MF->getRegInfo();
124 unsigned Size = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
125 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
126 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
127
128 if (Size != 64)
129 return false;
130
131 DebugLoc DL = I.getDebugLoc();
132
Tom Stellard124f5cc2017-01-31 15:24:11 +0000133 MachineOperand Lo1(getSubOperand64(I.getOperand(1), AMDGPU::sub0));
134 MachineOperand Lo2(getSubOperand64(I.getOperand(2), AMDGPU::sub0));
135
Tom Stellardca166212017-01-30 21:56:46 +0000136 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
Tom Stellard124f5cc2017-01-31 15:24:11 +0000137 .add(Lo1)
138 .add(Lo2);
139
140 MachineOperand Hi1(getSubOperand64(I.getOperand(1), AMDGPU::sub1));
141 MachineOperand Hi2(getSubOperand64(I.getOperand(2), AMDGPU::sub1));
Tom Stellardca166212017-01-30 21:56:46 +0000142
143 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
Tom Stellard124f5cc2017-01-31 15:24:11 +0000144 .add(Hi1)
145 .add(Hi2);
Tom Stellardca166212017-01-30 21:56:46 +0000146
147 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), I.getOperand(0).getReg())
148 .addReg(DstLo)
149 .addImm(AMDGPU::sub0)
150 .addReg(DstHi)
151 .addImm(AMDGPU::sub1);
152
153 for (MachineOperand &MO : I.explicit_operands()) {
154 if (!MO.isReg() || TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
155 continue;
156 RBI.constrainGenericRegister(MO.getReg(), AMDGPU::SReg_64RegClass, MRI);
157 }
158
159 I.eraseFromParent();
160 return true;
161}
162
163bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const {
164 return selectG_ADD(I);
165}
166
Tom Stellard3f1c6fe2018-06-21 23:38:20 +0000167bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
168 MachineBasicBlock *BB = I.getParent();
169 MachineFunction *MF = BB->getParent();
170 MachineRegisterInfo &MRI = MF->getRegInfo();
171 const MachineOperand &MO = I.getOperand(0);
172 const TargetRegisterClass *RC =
173 TRI.getConstrainedRegClassForOperand(MO, MRI);
174 if (RC)
175 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
176 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
177 return true;
178}
179
Tom Stellarda9284732018-06-14 19:26:37 +0000180bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I,
181 CodeGenCoverage &CoverageInfo) const {
182 unsigned IntrinsicID = I.getOperand(1).getIntrinsicID();
183
184 switch (IntrinsicID) {
185 default:
186 break;
Tom Stellardac684712018-07-13 22:16:03 +0000187 case Intrinsic::maxnum:
188 case Intrinsic::minnum:
Tom Stellarda9284732018-06-14 19:26:37 +0000189 case Intrinsic::amdgcn_cvt_pkrtz:
190 return selectImpl(I, CoverageInfo);
Matt Arsenaultb1cc4f52018-06-25 16:17:48 +0000191
192 case Intrinsic::amdgcn_kernarg_segment_ptr: {
193 MachineFunction *MF = I.getParent()->getParent();
194 MachineRegisterInfo &MRI = MF->getRegInfo();
195 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
196 const ArgDescriptor *InputPtrReg;
197 const TargetRegisterClass *RC;
198 const DebugLoc &DL = I.getDebugLoc();
199
200 std::tie(InputPtrReg, RC)
201 = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
202 if (!InputPtrReg)
203 report_fatal_error("missing kernarg segment ptr");
204
205 BuildMI(*I.getParent(), &I, DL, TII.get(AMDGPU::COPY))
206 .add(I.getOperand(0))
207 .addReg(MRI.getLiveInVirtReg(InputPtrReg->getRegister()));
208 I.eraseFromParent();
209 return true;
210 }
Tom Stellarda9284732018-06-14 19:26:37 +0000211 }
212 return false;
213}
214
Tom Stellard390a5f42018-07-13 21:05:14 +0000215static MachineInstr *
216buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt,
217 unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3,
218 unsigned VM, bool Compr, unsigned Enabled, bool Done) {
219 const DebugLoc &DL = Insert->getDebugLoc();
220 MachineBasicBlock &BB = *Insert->getParent();
221 unsigned Opcode = Done ? AMDGPU::EXP_DONE : AMDGPU::EXP;
222 return BuildMI(BB, Insert, DL, TII.get(Opcode))
223 .addImm(Tgt)
224 .addReg(Reg0)
225 .addReg(Reg1)
226 .addReg(Reg2)
227 .addReg(Reg3)
228 .addImm(VM)
229 .addImm(Compr)
230 .addImm(Enabled);
231}
232
233bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
234 MachineInstr &I,
235 CodeGenCoverage &CoverageInfo) const {
236 MachineBasicBlock *BB = I.getParent();
237 MachineFunction *MF = BB->getParent();
238 MachineRegisterInfo &MRI = MF->getRegInfo();
239
240 unsigned IntrinsicID = I.getOperand(0).getIntrinsicID();
241 switch (IntrinsicID) {
242 case Intrinsic::amdgcn_exp: {
243 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
244 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
245 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(7).getReg()));
246 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(8).getReg()));
247
248 MachineInstr *Exp = buildEXP(TII, &I, Tgt, I.getOperand(3).getReg(),
249 I.getOperand(4).getReg(),
250 I.getOperand(5).getReg(),
251 I.getOperand(6).getReg(),
252 VM, false, Enabled, Done);
253
254 I.eraseFromParent();
255 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
256 }
257 case Intrinsic::amdgcn_exp_compr: {
258 const DebugLoc &DL = I.getDebugLoc();
259 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
260 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
261 unsigned Reg0 = I.getOperand(3).getReg();
262 unsigned Reg1 = I.getOperand(4).getReg();
263 unsigned Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
264 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(5).getReg()));
265 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(6).getReg()));
266
267 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
268 MachineInstr *Exp = buildEXP(TII, &I, Tgt, Reg0, Reg1, Undef, Undef, VM,
269 true, Enabled, Done);
270
271 I.eraseFromParent();
272 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
273 }
274 }
275 return false;
276}
277
Tom Stellardca166212017-01-30 21:56:46 +0000278bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
279 MachineBasicBlock *BB = I.getParent();
Tom Stellard655fdd32018-05-11 23:12:49 +0000280 MachineFunction *MF = BB->getParent();
281 MachineRegisterInfo &MRI = MF->getRegInfo();
Tom Stellardca166212017-01-30 21:56:46 +0000282 DebugLoc DL = I.getDebugLoc();
Tom Stellard655fdd32018-05-11 23:12:49 +0000283 unsigned StoreSize = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
284 unsigned Opcode;
Tom Stellardca166212017-01-30 21:56:46 +0000285
286 // FIXME: Select store instruction based on address space
Tom Stellard655fdd32018-05-11 23:12:49 +0000287 switch (StoreSize) {
288 default:
289 return false;
290 case 32:
291 Opcode = AMDGPU::FLAT_STORE_DWORD;
292 break;
293 case 64:
294 Opcode = AMDGPU::FLAT_STORE_DWORDX2;
295 break;
296 case 96:
297 Opcode = AMDGPU::FLAT_STORE_DWORDX3;
298 break;
299 case 128:
300 Opcode = AMDGPU::FLAT_STORE_DWORDX4;
301 break;
302 }
303
304 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
Tom Stellardca166212017-01-30 21:56:46 +0000305 .add(I.getOperand(1))
306 .add(I.getOperand(0))
Matt Arsenaultfd023142017-06-12 15:55:58 +0000307 .addImm(0) // offset
308 .addImm(0) // glc
309 .addImm(0); // slc
Tom Stellardca166212017-01-30 21:56:46 +0000310
Matt Arsenault47ccafe2017-05-11 17:38:33 +0000311
Tom Stellardca166212017-01-30 21:56:46 +0000312 // Now that we selected an opcode, we need to constrain the register
313 // operands to use appropriate classes.
314 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
315
316 I.eraseFromParent();
317 return Ret;
318}
319
320bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
321 MachineBasicBlock *BB = I.getParent();
322 MachineFunction *MF = BB->getParent();
323 MachineRegisterInfo &MRI = MF->getRegInfo();
Tom Stellarde182b282018-05-15 17:57:09 +0000324 MachineOperand &ImmOp = I.getOperand(1);
Tom Stellardca166212017-01-30 21:56:46 +0000325
Tom Stellarde182b282018-05-15 17:57:09 +0000326 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
327 if (ImmOp.isFPImm()) {
328 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
329 ImmOp.ChangeToImmediate(Imm.getZExtValue());
330 } else if (ImmOp.isCImm()) {
331 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
332 }
333
334 unsigned DstReg = I.getOperand(0).getReg();
335 unsigned Size;
336 bool IsSgpr;
337 const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg());
338 if (RB) {
339 IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
340 Size = MRI.getType(DstReg).getSizeInBits();
341 } else {
342 const TargetRegisterClass *RC = TRI.getRegClassForReg(MRI, DstReg);
343 IsSgpr = TRI.isSGPRClass(RC);
Tom Stellarda91ce172018-05-21 17:49:31 +0000344 Size = TRI.getRegSizeInBits(*RC);
Tom Stellarde182b282018-05-15 17:57:09 +0000345 }
346
347 if (Size != 32 && Size != 64)
348 return false;
349
350 unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
Tom Stellardca166212017-01-30 21:56:46 +0000351 if (Size == 32) {
Tom Stellarde182b282018-05-15 17:57:09 +0000352 I.setDesc(TII.get(Opcode));
353 I.addImplicitDefUseOperands(*MF);
Tom Stellardca166212017-01-30 21:56:46 +0000354 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
355 }
356
Tom Stellardca166212017-01-30 21:56:46 +0000357 DebugLoc DL = I.getDebugLoc();
Tom Stellarde182b282018-05-15 17:57:09 +0000358 const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass :
359 &AMDGPU::VGPR_32RegClass;
360 unsigned LoReg = MRI.createVirtualRegister(RC);
361 unsigned HiReg = MRI.createVirtualRegister(RC);
362 const APInt &Imm = APInt(Size, I.getOperand(1).getImm());
Tom Stellardca166212017-01-30 21:56:46 +0000363
Tom Stellarde182b282018-05-15 17:57:09 +0000364 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
Tom Stellardca166212017-01-30 21:56:46 +0000365 .addImm(Imm.trunc(32).getZExtValue());
366
Tom Stellarde182b282018-05-15 17:57:09 +0000367 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
Tom Stellardca166212017-01-30 21:56:46 +0000368 .addImm(Imm.ashr(32).getZExtValue());
369
Tom Stellarde182b282018-05-15 17:57:09 +0000370 const MachineInstr *RS =
371 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
372 .addReg(LoReg)
373 .addImm(AMDGPU::sub0)
374 .addReg(HiReg)
375 .addImm(AMDGPU::sub1);
376
Tom Stellardca166212017-01-30 21:56:46 +0000377 // We can't call constrainSelectedInstRegOperands here, because it doesn't
378 // work for target independent opcodes
379 I.eraseFromParent();
Tom Stellarde182b282018-05-15 17:57:09 +0000380 const TargetRegisterClass *DstRC =
381 TRI.getConstrainedRegClassForOperand(RS->getOperand(0), MRI);
382 if (!DstRC)
383 return true;
384 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
Tom Stellardca166212017-01-30 21:56:46 +0000385}
386
387static bool isConstant(const MachineInstr &MI) {
388 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
389}
390
391void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
392 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
393
394 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
395
396 assert(PtrMI);
397
398 if (PtrMI->getOpcode() != TargetOpcode::G_GEP)
399 return;
400
401 GEPInfo GEPInfo(*PtrMI);
402
403 for (unsigned i = 1, e = 3; i < e; ++i) {
404 const MachineOperand &GEPOp = PtrMI->getOperand(i);
405 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
406 assert(OpDef);
407 if (isConstant(*OpDef)) {
408 // FIXME: Is it possible to have multiple Imm parts? Maybe if we
409 // are lacking other optimizations.
410 assert(GEPInfo.Imm == 0);
411 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
412 continue;
413 }
414 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
415 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
416 GEPInfo.SgprParts.push_back(GEPOp.getReg());
417 else
418 GEPInfo.VgprParts.push_back(GEPOp.getReg());
419 }
420
421 AddrInfo.push_back(GEPInfo);
422 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
423}
424
Tom Stellard79b5c382019-02-20 21:02:37 +0000425bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
Tom Stellardca166212017-01-30 21:56:46 +0000426 if (!MI.hasOneMemOperand())
427 return false;
428
429 const MachineMemOperand *MMO = *MI.memoperands_begin();
430 const Value *Ptr = MMO->getValue();
431
432 // UndefValue means this is a load of a kernel input. These are uniform.
433 // Sometimes LDS instructions have constant pointers.
434 // If Ptr is null, then that means this mem operand contains a
435 // PseudoSourceValue like GOT.
436 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
437 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
438 return true;
439
Matt Arsenault923712b2018-02-09 16:57:57 +0000440 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
441 return true;
442
Tom Stellardca166212017-01-30 21:56:46 +0000443 const Instruction *I = dyn_cast<Instruction>(Ptr);
444 return I && I->getMetadata("amdgpu.uniform");
445}
446
Tom Stellardca166212017-01-30 21:56:46 +0000447bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
448 for (const GEPInfo &GEPInfo : AddrInfo) {
449 if (!GEPInfo.VgprParts.empty())
450 return true;
451 }
452 return false;
453}
454
Tom Stellardca166212017-01-30 21:56:46 +0000455bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const {
456 MachineBasicBlock *BB = I.getParent();
457 MachineFunction *MF = BB->getParent();
458 MachineRegisterInfo &MRI = MF->getRegInfo();
459 DebugLoc DL = I.getDebugLoc();
460 unsigned DstReg = I.getOperand(0).getReg();
461 unsigned PtrReg = I.getOperand(1).getReg();
462 unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI);
463 unsigned Opcode;
464
465 SmallVector<GEPInfo, 4> AddrInfo;
466
467 getAddrModeInfo(I, MRI, AddrInfo);
468
Tom Stellardca166212017-01-30 21:56:46 +0000469 switch (LoadSize) {
470 default:
471 llvm_unreachable("Load size not supported\n");
472 case 32:
473 Opcode = AMDGPU::FLAT_LOAD_DWORD;
474 break;
475 case 64:
476 Opcode = AMDGPU::FLAT_LOAD_DWORDX2;
477 break;
478 }
479
480 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
481 .add(I.getOperand(0))
482 .addReg(PtrReg)
Matt Arsenaultfd023142017-06-12 15:55:58 +0000483 .addImm(0) // offset
484 .addImm(0) // glc
485 .addImm(0); // slc
Tom Stellardca166212017-01-30 21:56:46 +0000486
487 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
488 I.eraseFromParent();
489 return Ret;
490}
491
Daniel Sandersf76f3152017-11-16 00:46:35 +0000492bool AMDGPUInstructionSelector::select(MachineInstr &I,
493 CodeGenCoverage &CoverageInfo) const {
Tom Stellardca166212017-01-30 21:56:46 +0000494
Tom Stellard7712ee82018-06-22 00:44:29 +0000495 if (!isPreISelGenericOpcode(I.getOpcode())) {
496 if (I.isCopy())
497 return selectCOPY(I);
Tom Stellardca166212017-01-30 21:56:46 +0000498 return true;
Tom Stellard7712ee82018-06-22 00:44:29 +0000499 }
Tom Stellardca166212017-01-30 21:56:46 +0000500
501 switch (I.getOpcode()) {
502 default:
Tom Stellard1dc90202018-05-10 20:53:06 +0000503 return selectImpl(I, CoverageInfo);
Tom Stellardca166212017-01-30 21:56:46 +0000504 case TargetOpcode::G_ADD:
505 return selectG_ADD(I);
Tom Stellard7c650782018-10-05 04:34:09 +0000506 case TargetOpcode::G_INTTOPTR:
Tom Stellard1e0edad2018-05-10 21:20:10 +0000507 case TargetOpcode::G_BITCAST:
508 return selectCOPY(I);
Tom Stellardca166212017-01-30 21:56:46 +0000509 case TargetOpcode::G_CONSTANT:
Tom Stellarde182b282018-05-15 17:57:09 +0000510 case TargetOpcode::G_FCONSTANT:
Tom Stellardca166212017-01-30 21:56:46 +0000511 return selectG_CONSTANT(I);
512 case TargetOpcode::G_GEP:
513 return selectG_GEP(I);
Tom Stellard3f1c6fe2018-06-21 23:38:20 +0000514 case TargetOpcode::G_IMPLICIT_DEF:
515 return selectG_IMPLICIT_DEF(I);
Tom Stellarda9284732018-06-14 19:26:37 +0000516 case TargetOpcode::G_INTRINSIC:
517 return selectG_INTRINSIC(I, CoverageInfo);
Tom Stellard390a5f42018-07-13 21:05:14 +0000518 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
519 return selectG_INTRINSIC_W_SIDE_EFFECTS(I, CoverageInfo);
Tom Stellardca166212017-01-30 21:56:46 +0000520 case TargetOpcode::G_LOAD:
Tom Stellard79b5c382019-02-20 21:02:37 +0000521 if (selectImpl(I, CoverageInfo))
522 return true;
Tom Stellardca166212017-01-30 21:56:46 +0000523 return selectG_LOAD(I);
524 case TargetOpcode::G_STORE:
525 return selectG_STORE(I);
526 }
527 return false;
528}
Tom Stellard1dc90202018-05-10 20:53:06 +0000529
Tom Stellard26fac0f2018-06-22 02:54:57 +0000530InstructionSelector::ComplexRendererFns
531AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
532 return {{
533 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
534 }};
535
536}
537
Tom Stellard1dc90202018-05-10 20:53:06 +0000538///
539/// This will select either an SGPR or VGPR operand and will save us from
540/// having to write an extra tablegen pattern.
541InstructionSelector::ComplexRendererFns
542AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
543 return {{
544 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
545 }};
546}
Tom Stellarddcc95e92018-05-11 05:44:16 +0000547
548InstructionSelector::ComplexRendererFns
549AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
550 return {{
551 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
552 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src0_mods
553 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
554 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
555 }};
556}
Tom Stellard9a653572018-06-22 02:34:29 +0000557InstructionSelector::ComplexRendererFns
558AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
559 return {{
560 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
561 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
562 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
563 }};
564}
Tom Stellard46bbbc32018-06-13 22:30:47 +0000565
566InstructionSelector::ComplexRendererFns
567AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
568 return {{
569 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
570 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
571 }};
572}
Tom Stellard79b5c382019-02-20 21:02:37 +0000573
574InstructionSelector::ComplexRendererFns
575AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
576 MachineRegisterInfo &MRI =
577 Root.getParent()->getParent()->getParent()->getRegInfo();
578
579 SmallVector<GEPInfo, 4> AddrInfo;
580 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo);
581
582 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
583 return None;
584
585 const GEPInfo &GEPInfo = AddrInfo[0];
586
587 if (!AMDGPU::isLegalSMRDImmOffset(STI, GEPInfo.Imm))
588 return None;
589
590 unsigned PtrReg = GEPInfo.SgprParts[0];
591 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
592 return {{
593 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
594 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
595 }};
596}
597
598InstructionSelector::ComplexRendererFns
599AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
600 MachineRegisterInfo &MRI =
601 Root.getParent()->getParent()->getParent()->getRegInfo();
602
603 SmallVector<GEPInfo, 4> AddrInfo;
604 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo);
605
606 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
607 return None;
608
609 const GEPInfo &GEPInfo = AddrInfo[0];
610 unsigned PtrReg = GEPInfo.SgprParts[0];
611 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
612 if (!isUInt<32>(EncodedImm))
613 return None;
614
615 return {{
616 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
617 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
618 }};
619}
620
621InstructionSelector::ComplexRendererFns
622AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
623 MachineInstr *MI = Root.getParent();
624 MachineBasicBlock *MBB = MI->getParent();
625 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
626
627 SmallVector<GEPInfo, 4> AddrInfo;
628 getAddrModeInfo(*MI, MRI, AddrInfo);
629
630 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
631 // then we can select all ptr + 32-bit offsets not just immediate offsets.
632 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
633 return None;
634
635 const GEPInfo &GEPInfo = AddrInfo[0];
636 if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm))
637 return None;
638
639 // If we make it this far we have a load with an 32-bit immediate offset.
640 // It is OK to select this using a sgpr offset, because we have already
641 // failed trying to select this load into one of the _IMM variants since
642 // the _IMM Patterns are considered before the _SGPR patterns.
643 unsigned PtrReg = GEPInfo.SgprParts[0];
644 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
645 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
646 .addImm(GEPInfo.Imm);
647 return {{
648 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
649 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
650 }};
651}