blob: b776b4137dfa5a6639596f0ba092423f8b8dfdc1 [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPUInstructionSelector.h"
15#include "AMDGPUInstrInfo.h"
16#include "AMDGPURegisterBankInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUSubtarget.h"
Tom Stellard1dc90202018-05-10 20:53:06 +000019#include "AMDGPUTargetMachine.h"
Matt Arsenaultb1cc4f52018-06-25 16:17:48 +000020#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000021#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard1dc90202018-05-10 20:53:06 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
Aditya Nandakumar18b3f9d2018-01-17 19:31:33 +000024#include "llvm/CodeGen/GlobalISel/Utils.h"
Tom Stellardca166212017-01-30 21:56:46 +000025#include "llvm/CodeGen/MachineBasicBlock.h"
26#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineInstr.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/IR/Type.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/raw_ostream.h"
33
34#define DEBUG_TYPE "amdgpu-isel"
35
36using namespace llvm;
37
Tom Stellard1dc90202018-05-10 20:53:06 +000038#define GET_GLOBALISEL_IMPL
Tom Stellard5bfbae52018-07-11 20:59:01 +000039#define AMDGPUSubtarget GCNSubtarget
Tom Stellard1dc90202018-05-10 20:53:06 +000040#include "AMDGPUGenGlobalISel.inc"
41#undef GET_GLOBALISEL_IMPL
Tom Stellard5bfbae52018-07-11 20:59:01 +000042#undef AMDGPUSubtarget
Tom Stellard1dc90202018-05-10 20:53:06 +000043
Tom Stellardca166212017-01-30 21:56:46 +000044AMDGPUInstructionSelector::AMDGPUInstructionSelector(
Tom Stellard5bfbae52018-07-11 20:59:01 +000045 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
Tom Stellard1dc90202018-05-10 20:53:06 +000046 const AMDGPUTargetMachine &TM)
Tom Stellardca166212017-01-30 21:56:46 +000047 : InstructionSelector(), TII(*STI.getInstrInfo()),
Tom Stellard1dc90202018-05-10 20:53:06 +000048 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
49 STI(STI),
50 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
51#define GET_GLOBALISEL_PREDICATES_INIT
52#include "AMDGPUGenGlobalISel.inc"
53#undef GET_GLOBALISEL_PREDICATES_INIT
54#define GET_GLOBALISEL_TEMPORARIES_INIT
55#include "AMDGPUGenGlobalISel.inc"
56#undef GET_GLOBALISEL_TEMPORARIES_INIT
Tom Stellard1dc90202018-05-10 20:53:06 +000057{
58}
59
60const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
Tom Stellardca166212017-01-30 21:56:46 +000061
Tom Stellard8b1c53b2019-06-17 16:27:43 +000062static bool isSCC(unsigned Reg, const MachineRegisterInfo &MRI) {
Matt Arsenault9f992c22019-07-01 13:22:07 +000063 assert(!TargetRegisterInfo::isPhysicalRegister(Reg));
Tom Stellard8b1c53b2019-06-17 16:27:43 +000064
65 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
66 const TargetRegisterClass *RC =
67 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
68 if (RC)
69 return RC->getID() == AMDGPU::SReg_32_XM0RegClassID &&
70 MRI.getType(Reg).getSizeInBits() == 1;
71
72 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
73 return RB->getID() == AMDGPU::SCCRegBankID;
74}
75
Matt Arsenault9f992c22019-07-01 13:22:07 +000076static bool isVCC(unsigned Reg, const MachineRegisterInfo &MRI,
77 const SIRegisterInfo &TRI) {
78 assert(!TargetRegisterInfo::isPhysicalRegister(Reg));
79
80 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
81 const TargetRegisterClass *RC =
82 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
83 if (RC) {
84 return RC == TRI.getWaveMaskRegClass() &&
85 MRI.getType(Reg).getSizeInBits() == 1;
86 }
87
88 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
89 return RB->getID() == AMDGPU::VCCRegBankID;
90}
91
Tom Stellard1e0edad2018-05-10 21:20:10 +000092bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
93 MachineBasicBlock *BB = I.getParent();
94 MachineFunction *MF = BB->getParent();
95 MachineRegisterInfo &MRI = MF->getRegInfo();
96 I.setDesc(TII.get(TargetOpcode::COPY));
Tom Stellard8b1c53b2019-06-17 16:27:43 +000097
98 // Special case for COPY from the scc register bank. The scc register bank
99 // is modeled using 32-bit sgprs.
100 const MachineOperand &Src = I.getOperand(1);
101 unsigned SrcReg = Src.getReg();
102 if (!TargetRegisterInfo::isPhysicalRegister(SrcReg) && isSCC(SrcReg, MRI)) {
Matt Arsenault9f992c22019-07-01 13:22:07 +0000103 unsigned DstReg = I.getOperand(0).getReg();
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000104
Matt Arsenault9f992c22019-07-01 13:22:07 +0000105 // Specially handle scc->vcc copies.
106 if (isVCC(DstReg, MRI, TRI)) {
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000107 const DebugLoc &DL = I.getDebugLoc();
Matt Arsenault9f992c22019-07-01 13:22:07 +0000108 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000109 .addImm(0)
110 .addReg(SrcReg);
111 if (!MRI.getRegClassOrNull(SrcReg))
112 MRI.setRegClass(SrcReg, TRI.getConstrainedRegClassForOperand(Src, MRI));
113 I.eraseFromParent();
114 return true;
115 }
116 }
117
Tom Stellard1e0edad2018-05-10 21:20:10 +0000118 for (const MachineOperand &MO : I.operands()) {
119 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
120 continue;
121
122 const TargetRegisterClass *RC =
123 TRI.getConstrainedRegClassForOperand(MO, MRI);
124 if (!RC)
125 continue;
126 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
127 }
128 return true;
129}
130
Tom Stellardca166212017-01-30 21:56:46 +0000131MachineOperand
132AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
133 unsigned SubIdx) const {
134
135 MachineInstr *MI = MO.getParent();
136 MachineBasicBlock *BB = MO.getParent()->getParent();
137 MachineFunction *MF = BB->getParent();
138 MachineRegisterInfo &MRI = MF->getRegInfo();
139 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
140
141 if (MO.isReg()) {
142 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
143 unsigned Reg = MO.getReg();
144 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
145 .addReg(Reg, 0, ComposedSubIdx);
146
147 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
148 MO.isKill(), MO.isDead(), MO.isUndef(),
149 MO.isEarlyClobber(), 0, MO.isDebug(),
150 MO.isInternalRead());
151 }
152
153 assert(MO.isImm());
154
155 APInt Imm(64, MO.getImm());
156
157 switch (SubIdx) {
158 default:
159 llvm_unreachable("do not know to split immediate with this sub index.");
160 case AMDGPU::sub0:
161 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
162 case AMDGPU::sub1:
163 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
164 }
165}
166
Tom Stellard390a5f42018-07-13 21:05:14 +0000167static int64_t getConstant(const MachineInstr *MI) {
168 return MI->getOperand(1).getCImm()->getSExtValue();
169}
170
Tom Stellardca166212017-01-30 21:56:46 +0000171bool AMDGPUInstructionSelector::selectG_ADD(MachineInstr &I) const {
172 MachineBasicBlock *BB = I.getParent();
173 MachineFunction *MF = BB->getParent();
174 MachineRegisterInfo &MRI = MF->getRegInfo();
175 unsigned Size = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
176 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
177 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
178
179 if (Size != 64)
180 return false;
181
182 DebugLoc DL = I.getDebugLoc();
183
Tom Stellard124f5cc2017-01-31 15:24:11 +0000184 MachineOperand Lo1(getSubOperand64(I.getOperand(1), AMDGPU::sub0));
185 MachineOperand Lo2(getSubOperand64(I.getOperand(2), AMDGPU::sub0));
186
Tom Stellardca166212017-01-30 21:56:46 +0000187 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
Tom Stellard124f5cc2017-01-31 15:24:11 +0000188 .add(Lo1)
189 .add(Lo2);
190
191 MachineOperand Hi1(getSubOperand64(I.getOperand(1), AMDGPU::sub1));
192 MachineOperand Hi2(getSubOperand64(I.getOperand(2), AMDGPU::sub1));
Tom Stellardca166212017-01-30 21:56:46 +0000193
194 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
Tom Stellard124f5cc2017-01-31 15:24:11 +0000195 .add(Hi1)
196 .add(Hi2);
Tom Stellardca166212017-01-30 21:56:46 +0000197
198 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), I.getOperand(0).getReg())
199 .addReg(DstLo)
200 .addImm(AMDGPU::sub0)
201 .addReg(DstHi)
202 .addImm(AMDGPU::sub1);
203
204 for (MachineOperand &MO : I.explicit_operands()) {
205 if (!MO.isReg() || TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
206 continue;
207 RBI.constrainGenericRegister(MO.getReg(), AMDGPU::SReg_64RegClass, MRI);
208 }
209
210 I.eraseFromParent();
211 return true;
212}
213
Tom Stellard41f32192019-02-28 23:37:48 +0000214bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
215 MachineBasicBlock *BB = I.getParent();
216 MachineFunction *MF = BB->getParent();
217 MachineRegisterInfo &MRI = MF->getRegInfo();
218 assert(I.getOperand(2).getImm() % 32 == 0);
219 unsigned SubReg = TRI.getSubRegFromChannel(I.getOperand(2).getImm() / 32);
220 const DebugLoc &DL = I.getDebugLoc();
221 MachineInstr *Copy = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY),
222 I.getOperand(0).getReg())
223 .addReg(I.getOperand(1).getReg(), 0, SubReg);
224
225 for (const MachineOperand &MO : Copy->operands()) {
226 const TargetRegisterClass *RC =
227 TRI.getConstrainedRegClassForOperand(MO, MRI);
228 if (!RC)
229 continue;
230 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
231 }
232 I.eraseFromParent();
233 return true;
234}
235
Tom Stellardca166212017-01-30 21:56:46 +0000236bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const {
237 return selectG_ADD(I);
238}
239
Tom Stellard3f1c6fe2018-06-21 23:38:20 +0000240bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
241 MachineBasicBlock *BB = I.getParent();
242 MachineFunction *MF = BB->getParent();
243 MachineRegisterInfo &MRI = MF->getRegInfo();
244 const MachineOperand &MO = I.getOperand(0);
Matt Arsenaultf8a841b2019-06-24 16:24:03 +0000245
246 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
247 // regbank check here is to know why getConstrainedRegClassForOperand failed.
248 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, MRI);
249 if ((!RC && !MRI.getRegBankOrNull(MO.getReg())) ||
250 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, MRI))) {
251 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
252 return true;
253 }
254
255 return false;
Tom Stellard3f1c6fe2018-06-21 23:38:20 +0000256}
257
Tom Stellard33634d1b2019-03-01 00:50:26 +0000258bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
259 MachineBasicBlock *BB = I.getParent();
260 MachineFunction *MF = BB->getParent();
261 MachineRegisterInfo &MRI = MF->getRegInfo();
262 unsigned SubReg = TRI.getSubRegFromChannel(I.getOperand(3).getImm() / 32);
263 DebugLoc DL = I.getDebugLoc();
264 MachineInstr *Ins = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG))
265 .addDef(I.getOperand(0).getReg())
266 .addReg(I.getOperand(1).getReg())
267 .addReg(I.getOperand(2).getReg())
268 .addImm(SubReg);
269
270 for (const MachineOperand &MO : Ins->operands()) {
271 if (!MO.isReg())
272 continue;
273 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
274 continue;
275
276 const TargetRegisterClass *RC =
277 TRI.getConstrainedRegClassForOperand(MO, MRI);
278 if (!RC)
279 continue;
280 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
281 }
282 I.eraseFromParent();
283 return true;
284}
285
Tom Stellarda9284732018-06-14 19:26:37 +0000286bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I,
287 CodeGenCoverage &CoverageInfo) const {
Matt Arsenaultfee19492019-06-17 17:01:27 +0000288 unsigned IntrinsicID = I.getOperand(I.getNumExplicitDefs()).getIntrinsicID();
Tom Stellarda9284732018-06-14 19:26:37 +0000289 switch (IntrinsicID) {
290 default:
291 break;
Tom Stellardac684712018-07-13 22:16:03 +0000292 case Intrinsic::maxnum:
293 case Intrinsic::minnum:
Tom Stellarda9284732018-06-14 19:26:37 +0000294 case Intrinsic::amdgcn_cvt_pkrtz:
295 return selectImpl(I, CoverageInfo);
Matt Arsenaultb1cc4f52018-06-25 16:17:48 +0000296
297 case Intrinsic::amdgcn_kernarg_segment_ptr: {
298 MachineFunction *MF = I.getParent()->getParent();
299 MachineRegisterInfo &MRI = MF->getRegInfo();
300 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
301 const ArgDescriptor *InputPtrReg;
302 const TargetRegisterClass *RC;
303 const DebugLoc &DL = I.getDebugLoc();
304
305 std::tie(InputPtrReg, RC)
306 = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
307 if (!InputPtrReg)
308 report_fatal_error("missing kernarg segment ptr");
309
310 BuildMI(*I.getParent(), &I, DL, TII.get(AMDGPU::COPY))
311 .add(I.getOperand(0))
312 .addReg(MRI.getLiveInVirtReg(InputPtrReg->getRegister()));
313 I.eraseFromParent();
314 return true;
315 }
Tom Stellarda9284732018-06-14 19:26:37 +0000316 }
317 return false;
318}
319
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000320static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
321 if (Size != 32 && Size != 64)
322 return -1;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000323 switch (P) {
324 default:
325 llvm_unreachable("Unknown condition code!");
326 case CmpInst::ICMP_NE:
327 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
328 case CmpInst::ICMP_EQ:
329 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
330 case CmpInst::ICMP_SGT:
331 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
332 case CmpInst::ICMP_SGE:
333 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
334 case CmpInst::ICMP_SLT:
335 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
336 case CmpInst::ICMP_SLE:
337 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
338 case CmpInst::ICMP_UGT:
339 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
340 case CmpInst::ICMP_UGE:
341 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
342 case CmpInst::ICMP_ULT:
343 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
344 case CmpInst::ICMP_ULE:
345 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
346 }
347}
348
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000349int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
350 unsigned Size) const {
351 if (Size == 64) {
352 if (!STI.hasScalarCompareEq64())
353 return -1;
354
355 switch (P) {
356 case CmpInst::ICMP_NE:
357 return AMDGPU::S_CMP_LG_U64;
358 case CmpInst::ICMP_EQ:
359 return AMDGPU::S_CMP_EQ_U64;
360 default:
361 return -1;
362 }
363 }
364
365 if (Size != 32)
366 return -1;
367
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000368 switch (P) {
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000369 case CmpInst::ICMP_NE:
370 return AMDGPU::S_CMP_LG_U32;
371 case CmpInst::ICMP_EQ:
372 return AMDGPU::S_CMP_EQ_U32;
373 case CmpInst::ICMP_SGT:
374 return AMDGPU::S_CMP_GT_I32;
375 case CmpInst::ICMP_SGE:
376 return AMDGPU::S_CMP_GE_I32;
377 case CmpInst::ICMP_SLT:
378 return AMDGPU::S_CMP_LT_I32;
379 case CmpInst::ICMP_SLE:
380 return AMDGPU::S_CMP_LE_I32;
381 case CmpInst::ICMP_UGT:
382 return AMDGPU::S_CMP_GT_U32;
383 case CmpInst::ICMP_UGE:
384 return AMDGPU::S_CMP_GE_U32;
385 case CmpInst::ICMP_ULT:
386 return AMDGPU::S_CMP_LT_U32;
387 case CmpInst::ICMP_ULE:
388 return AMDGPU::S_CMP_LE_U32;
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000389 default:
390 llvm_unreachable("Unknown condition code!");
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000391 }
392}
393
394bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
395 MachineBasicBlock *BB = I.getParent();
396 MachineFunction *MF = BB->getParent();
397 MachineRegisterInfo &MRI = MF->getRegInfo();
398 DebugLoc DL = I.getDebugLoc();
399
400 unsigned SrcReg = I.getOperand(2).getReg();
401 unsigned Size = RBI.getSizeInBits(SrcReg, MRI, TRI);
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000402
403 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000404
405 unsigned CCReg = I.getOperand(0).getReg();
406 if (isSCC(CCReg, MRI)) {
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000407 int Opcode = getS_CMPOpcode(Pred, Size);
408 if (Opcode == -1)
409 return false;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000410 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
411 .add(I.getOperand(2))
412 .add(I.getOperand(3));
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000413 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
414 .addReg(AMDGPU::SCC);
415 bool Ret =
416 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
417 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, MRI);
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000418 I.eraseFromParent();
419 return Ret;
420 }
421
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000422 int Opcode = getV_CMPOpcode(Pred, Size);
423 if (Opcode == -1)
424 return false;
425
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000426 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
427 I.getOperand(0).getReg())
428 .add(I.getOperand(2))
429 .add(I.getOperand(3));
430 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
431 AMDGPU::SReg_64RegClass, MRI);
432 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
433 I.eraseFromParent();
434 return Ret;
435}
436
Tom Stellard390a5f42018-07-13 21:05:14 +0000437static MachineInstr *
438buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt,
439 unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3,
440 unsigned VM, bool Compr, unsigned Enabled, bool Done) {
441 const DebugLoc &DL = Insert->getDebugLoc();
442 MachineBasicBlock &BB = *Insert->getParent();
443 unsigned Opcode = Done ? AMDGPU::EXP_DONE : AMDGPU::EXP;
444 return BuildMI(BB, Insert, DL, TII.get(Opcode))
445 .addImm(Tgt)
446 .addReg(Reg0)
447 .addReg(Reg1)
448 .addReg(Reg2)
449 .addReg(Reg3)
450 .addImm(VM)
451 .addImm(Compr)
452 .addImm(Enabled);
453}
454
455bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
456 MachineInstr &I,
457 CodeGenCoverage &CoverageInfo) const {
458 MachineBasicBlock *BB = I.getParent();
459 MachineFunction *MF = BB->getParent();
460 MachineRegisterInfo &MRI = MF->getRegInfo();
461
462 unsigned IntrinsicID = I.getOperand(0).getIntrinsicID();
463 switch (IntrinsicID) {
464 case Intrinsic::amdgcn_exp: {
465 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
466 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
467 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(7).getReg()));
468 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(8).getReg()));
469
470 MachineInstr *Exp = buildEXP(TII, &I, Tgt, I.getOperand(3).getReg(),
471 I.getOperand(4).getReg(),
472 I.getOperand(5).getReg(),
473 I.getOperand(6).getReg(),
474 VM, false, Enabled, Done);
475
476 I.eraseFromParent();
477 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
478 }
479 case Intrinsic::amdgcn_exp_compr: {
480 const DebugLoc &DL = I.getDebugLoc();
481 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
482 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
483 unsigned Reg0 = I.getOperand(3).getReg();
484 unsigned Reg1 = I.getOperand(4).getReg();
485 unsigned Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
486 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(5).getReg()));
487 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(6).getReg()));
488
489 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
490 MachineInstr *Exp = buildEXP(TII, &I, Tgt, Reg0, Reg1, Undef, Undef, VM,
491 true, Enabled, Done);
492
493 I.eraseFromParent();
494 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
495 }
496 }
497 return false;
498}
499
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000500bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
501 MachineBasicBlock *BB = I.getParent();
502 MachineFunction *MF = BB->getParent();
503 MachineRegisterInfo &MRI = MF->getRegInfo();
504 const DebugLoc &DL = I.getDebugLoc();
505
506 unsigned DstReg = I.getOperand(0).getReg();
507 unsigned Size = RBI.getSizeInBits(DstReg, MRI, TRI);
508 assert(Size == 32 || Size == 64);
509 const MachineOperand &CCOp = I.getOperand(1);
510 unsigned CCReg = CCOp.getReg();
511 if (isSCC(CCReg, MRI)) {
512 unsigned SelectOpcode = Size == 32 ? AMDGPU::S_CSELECT_B32 :
513 AMDGPU::S_CSELECT_B64;
514 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
515 .addReg(CCReg);
516
517 // The generic constrainSelectedInstRegOperands doesn't work for the scc register
518 // bank, because it does not cover the register class that we used to represent
519 // for it. So we need to manually set the register class here.
520 if (!MRI.getRegClassOrNull(CCReg))
521 MRI.setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, MRI));
522 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
523 .add(I.getOperand(2))
524 .add(I.getOperand(3));
525
526 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
527 constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
528 I.eraseFromParent();
529 return Ret;
530 }
531
532 assert(Size == 32);
533 // FIXME: Support 64-bit select
534 MachineInstr *Select =
535 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
536 .addImm(0)
537 .add(I.getOperand(3))
538 .addImm(0)
539 .add(I.getOperand(2))
540 .add(I.getOperand(1));
541
542 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
543 I.eraseFromParent();
544 return Ret;
545}
546
Tom Stellardca166212017-01-30 21:56:46 +0000547bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
548 MachineBasicBlock *BB = I.getParent();
Tom Stellard655fdd32018-05-11 23:12:49 +0000549 MachineFunction *MF = BB->getParent();
550 MachineRegisterInfo &MRI = MF->getRegInfo();
Tom Stellardca166212017-01-30 21:56:46 +0000551 DebugLoc DL = I.getDebugLoc();
Tom Stellard655fdd32018-05-11 23:12:49 +0000552 unsigned StoreSize = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
553 unsigned Opcode;
Tom Stellardca166212017-01-30 21:56:46 +0000554
555 // FIXME: Select store instruction based on address space
Tom Stellard655fdd32018-05-11 23:12:49 +0000556 switch (StoreSize) {
557 default:
558 return false;
559 case 32:
560 Opcode = AMDGPU::FLAT_STORE_DWORD;
561 break;
562 case 64:
563 Opcode = AMDGPU::FLAT_STORE_DWORDX2;
564 break;
565 case 96:
566 Opcode = AMDGPU::FLAT_STORE_DWORDX3;
567 break;
568 case 128:
569 Opcode = AMDGPU::FLAT_STORE_DWORDX4;
570 break;
571 }
572
573 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
Tom Stellardca166212017-01-30 21:56:46 +0000574 .add(I.getOperand(1))
575 .add(I.getOperand(0))
Matt Arsenaultfd023142017-06-12 15:55:58 +0000576 .addImm(0) // offset
577 .addImm(0) // glc
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000578 .addImm(0) // slc
579 .addImm(0); // dlc
Tom Stellardca166212017-01-30 21:56:46 +0000580
Matt Arsenault47ccafe2017-05-11 17:38:33 +0000581
Tom Stellardca166212017-01-30 21:56:46 +0000582 // Now that we selected an opcode, we need to constrain the register
583 // operands to use appropriate classes.
584 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
585
586 I.eraseFromParent();
587 return Ret;
588}
589
Matt Arsenaultdbb6c032019-06-24 18:02:18 +0000590static int sizeToSubRegIndex(unsigned Size) {
591 switch (Size) {
592 case 32:
593 return AMDGPU::sub0;
594 case 64:
595 return AMDGPU::sub0_sub1;
596 case 96:
597 return AMDGPU::sub0_sub1_sub2;
598 case 128:
599 return AMDGPU::sub0_sub1_sub2_sub3;
600 case 256:
601 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
602 default:
603 if (Size < 32)
604 return AMDGPU::sub0;
605 if (Size > 256)
606 return -1;
607 return sizeToSubRegIndex(PowerOf2Ceil(Size));
608 }
609}
610
611bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
612 MachineBasicBlock *BB = I.getParent();
613 MachineFunction *MF = BB->getParent();
614 MachineRegisterInfo &MRI = MF->getRegInfo();
615
616 unsigned DstReg = I.getOperand(0).getReg();
617 unsigned SrcReg = I.getOperand(1).getReg();
618 const LLT DstTy = MRI.getType(DstReg);
619 const LLT SrcTy = MRI.getType(SrcReg);
620 if (!DstTy.isScalar())
621 return false;
622
623 const RegisterBank *DstRB = RBI.getRegBank(DstReg, MRI, TRI);
624 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, MRI, TRI);
625 if (SrcRB != DstRB)
626 return false;
627
628 unsigned DstSize = DstTy.getSizeInBits();
629 unsigned SrcSize = SrcTy.getSizeInBits();
630
631 const TargetRegisterClass *SrcRC
632 = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, MRI);
633 const TargetRegisterClass *DstRC
634 = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, MRI);
635
636 if (SrcSize > 32) {
637 int SubRegIdx = sizeToSubRegIndex(DstSize);
638 if (SubRegIdx == -1)
639 return false;
640
641 // Deal with weird cases where the class only partially supports the subreg
642 // index.
643 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
644 if (!SrcRC)
645 return false;
646
647 I.getOperand(1).setSubReg(SubRegIdx);
648 }
649
650 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
651 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
652 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
653 return false;
654 }
655
656 I.setDesc(TII.get(TargetOpcode::COPY));
657 return true;
658}
659
Matt Arsenault5dafcb92019-07-01 13:22:06 +0000660/// \returns true if a bitmask for \p Size bits will be an inline immediate.
661static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
662 Mask = maskTrailingOnes<unsigned>(Size);
663 int SignedMask = static_cast<int>(Mask);
664 return SignedMask >= -16 && SignedMask <= 64;
665}
666
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000667bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
668 bool Signed = I.getOpcode() == AMDGPU::G_SEXT;
669 const DebugLoc &DL = I.getDebugLoc();
670 MachineBasicBlock &MBB = *I.getParent();
671 MachineFunction &MF = *MBB.getParent();
672 MachineRegisterInfo &MRI = MF.getRegInfo();
673 const unsigned DstReg = I.getOperand(0).getReg();
674 const unsigned SrcReg = I.getOperand(1).getReg();
675
676 const LLT DstTy = MRI.getType(DstReg);
677 const LLT SrcTy = MRI.getType(SrcReg);
678 const LLT S1 = LLT::scalar(1);
679 const unsigned SrcSize = SrcTy.getSizeInBits();
680 const unsigned DstSize = DstTy.getSizeInBits();
681 if (!DstTy.isScalar())
682 return false;
683
684 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, MRI, TRI);
685
686 if (SrcBank->getID() == AMDGPU::SCCRegBankID) {
687 if (SrcTy != S1 || DstSize > 64) // Invalid
688 return false;
689
690 unsigned Opcode =
691 DstSize > 32 ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
692 const TargetRegisterClass *DstRC =
693 DstSize > 32 ? &AMDGPU::SReg_64RegClass : &AMDGPU::SReg_32RegClass;
694
695 // FIXME: Create an extra copy to avoid incorrectly constraining the result
696 // of the scc producer.
697 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
698 BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), TmpReg)
699 .addReg(SrcReg);
700 BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
701 .addReg(TmpReg);
702
703 // The instruction operands are backwards from what you would expect.
704 BuildMI(MBB, I, DL, TII.get(Opcode), DstReg)
705 .addImm(0)
706 .addImm(Signed ? -1 : 1);
707 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
708 }
709
710 if (SrcBank->getID() == AMDGPU::VCCRegBankID && DstSize <= 32) {
711 if (SrcTy != S1) // Invalid
712 return false;
713
714 MachineInstr *ExtI =
715 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
716 .addImm(0) // src0_modifiers
717 .addImm(0) // src0
718 .addImm(0) // src1_modifiers
719 .addImm(Signed ? -1 : 1) // src1
720 .addUse(SrcReg);
721 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
722 }
723
724 if (I.getOpcode() == AMDGPU::G_ANYEXT)
725 return selectCOPY(I);
726
727 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
728 // 64-bit should have been split up in RegBankSelect
Matt Arsenault5dafcb92019-07-01 13:22:06 +0000729
730 // Try to use an and with a mask if it will save code size.
731 unsigned Mask;
732 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
733 MachineInstr *ExtI =
734 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
735 .addImm(Mask)
736 .addReg(SrcReg);
737 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
738 }
739
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000740 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
741 MachineInstr *ExtI =
742 BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
743 .addReg(SrcReg)
744 .addImm(0) // Offset
745 .addImm(SrcSize); // Width
746 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
747 }
748
749 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
750 if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, MRI))
751 return false;
752
753 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
754 const unsigned SextOpc = SrcSize == 8 ?
755 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
756 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
757 .addReg(SrcReg);
758 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, MRI);
759 }
760
761 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
762 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
763
764 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
765 if (DstSize > 32 && SrcSize <= 32) {
766 // We need a 64-bit register source, but the high bits don't matter.
767 unsigned ExtReg
768 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
769 unsigned UndefReg
770 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
771 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
772 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
773 .addReg(SrcReg)
774 .addImm(AMDGPU::sub0)
775 .addReg(UndefReg)
776 .addImm(AMDGPU::sub1);
777
778 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
779 .addReg(ExtReg)
780 .addImm(SrcSize << 16);
781
782 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, MRI);
783 }
784
Matt Arsenault5dafcb92019-07-01 13:22:06 +0000785 unsigned Mask;
786 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
787 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
788 .addReg(SrcReg)
789 .addImm(Mask);
790 } else {
791 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
792 .addReg(SrcReg)
793 .addImm(SrcSize << 16);
794 }
795
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000796 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, MRI);
797 }
798
799 return false;
800}
801
Tom Stellardca166212017-01-30 21:56:46 +0000802bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
803 MachineBasicBlock *BB = I.getParent();
804 MachineFunction *MF = BB->getParent();
805 MachineRegisterInfo &MRI = MF->getRegInfo();
Tom Stellarde182b282018-05-15 17:57:09 +0000806 MachineOperand &ImmOp = I.getOperand(1);
Tom Stellardca166212017-01-30 21:56:46 +0000807
Tom Stellarde182b282018-05-15 17:57:09 +0000808 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
809 if (ImmOp.isFPImm()) {
810 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
811 ImmOp.ChangeToImmediate(Imm.getZExtValue());
812 } else if (ImmOp.isCImm()) {
813 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
814 }
815
816 unsigned DstReg = I.getOperand(0).getReg();
817 unsigned Size;
818 bool IsSgpr;
819 const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg());
820 if (RB) {
821 IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
822 Size = MRI.getType(DstReg).getSizeInBits();
823 } else {
824 const TargetRegisterClass *RC = TRI.getRegClassForReg(MRI, DstReg);
825 IsSgpr = TRI.isSGPRClass(RC);
Tom Stellarda91ce172018-05-21 17:49:31 +0000826 Size = TRI.getRegSizeInBits(*RC);
Tom Stellarde182b282018-05-15 17:57:09 +0000827 }
828
829 if (Size != 32 && Size != 64)
830 return false;
831
832 unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
Tom Stellardca166212017-01-30 21:56:46 +0000833 if (Size == 32) {
Tom Stellarde182b282018-05-15 17:57:09 +0000834 I.setDesc(TII.get(Opcode));
835 I.addImplicitDefUseOperands(*MF);
Tom Stellardca166212017-01-30 21:56:46 +0000836 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
837 }
838
Tom Stellardca166212017-01-30 21:56:46 +0000839 DebugLoc DL = I.getDebugLoc();
Tom Stellarde182b282018-05-15 17:57:09 +0000840 const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass :
841 &AMDGPU::VGPR_32RegClass;
842 unsigned LoReg = MRI.createVirtualRegister(RC);
843 unsigned HiReg = MRI.createVirtualRegister(RC);
844 const APInt &Imm = APInt(Size, I.getOperand(1).getImm());
Tom Stellardca166212017-01-30 21:56:46 +0000845
Tom Stellarde182b282018-05-15 17:57:09 +0000846 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
Tom Stellardca166212017-01-30 21:56:46 +0000847 .addImm(Imm.trunc(32).getZExtValue());
848
Tom Stellarde182b282018-05-15 17:57:09 +0000849 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
Tom Stellardca166212017-01-30 21:56:46 +0000850 .addImm(Imm.ashr(32).getZExtValue());
851
Tom Stellarde182b282018-05-15 17:57:09 +0000852 const MachineInstr *RS =
853 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
854 .addReg(LoReg)
855 .addImm(AMDGPU::sub0)
856 .addReg(HiReg)
857 .addImm(AMDGPU::sub1);
858
Tom Stellardca166212017-01-30 21:56:46 +0000859 // We can't call constrainSelectedInstRegOperands here, because it doesn't
860 // work for target independent opcodes
861 I.eraseFromParent();
Tom Stellarde182b282018-05-15 17:57:09 +0000862 const TargetRegisterClass *DstRC =
863 TRI.getConstrainedRegClassForOperand(RS->getOperand(0), MRI);
864 if (!DstRC)
865 return true;
866 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
Tom Stellardca166212017-01-30 21:56:46 +0000867}
868
869static bool isConstant(const MachineInstr &MI) {
870 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
871}
872
873void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
874 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
875
876 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
877
878 assert(PtrMI);
879
880 if (PtrMI->getOpcode() != TargetOpcode::G_GEP)
881 return;
882
883 GEPInfo GEPInfo(*PtrMI);
884
885 for (unsigned i = 1, e = 3; i < e; ++i) {
886 const MachineOperand &GEPOp = PtrMI->getOperand(i);
887 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
888 assert(OpDef);
889 if (isConstant(*OpDef)) {
890 // FIXME: Is it possible to have multiple Imm parts? Maybe if we
891 // are lacking other optimizations.
892 assert(GEPInfo.Imm == 0);
893 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
894 continue;
895 }
896 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
897 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
898 GEPInfo.SgprParts.push_back(GEPOp.getReg());
899 else
900 GEPInfo.VgprParts.push_back(GEPOp.getReg());
901 }
902
903 AddrInfo.push_back(GEPInfo);
904 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
905}
906
Tom Stellard79b5c382019-02-20 21:02:37 +0000907bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
Tom Stellardca166212017-01-30 21:56:46 +0000908 if (!MI.hasOneMemOperand())
909 return false;
910
911 const MachineMemOperand *MMO = *MI.memoperands_begin();
912 const Value *Ptr = MMO->getValue();
913
914 // UndefValue means this is a load of a kernel input. These are uniform.
915 // Sometimes LDS instructions have constant pointers.
916 // If Ptr is null, then that means this mem operand contains a
917 // PseudoSourceValue like GOT.
918 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
919 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
920 return true;
921
Matt Arsenault923712b2018-02-09 16:57:57 +0000922 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
923 return true;
924
Tom Stellardca166212017-01-30 21:56:46 +0000925 const Instruction *I = dyn_cast<Instruction>(Ptr);
926 return I && I->getMetadata("amdgpu.uniform");
927}
928
Tom Stellardca166212017-01-30 21:56:46 +0000929bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
930 for (const GEPInfo &GEPInfo : AddrInfo) {
931 if (!GEPInfo.VgprParts.empty())
932 return true;
933 }
934 return false;
935}
936
Tom Stellardca166212017-01-30 21:56:46 +0000937bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const {
938 MachineBasicBlock *BB = I.getParent();
939 MachineFunction *MF = BB->getParent();
940 MachineRegisterInfo &MRI = MF->getRegInfo();
941 DebugLoc DL = I.getDebugLoc();
942 unsigned DstReg = I.getOperand(0).getReg();
943 unsigned PtrReg = I.getOperand(1).getReg();
944 unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI);
945 unsigned Opcode;
946
947 SmallVector<GEPInfo, 4> AddrInfo;
948
949 getAddrModeInfo(I, MRI, AddrInfo);
950
Tom Stellardca166212017-01-30 21:56:46 +0000951 switch (LoadSize) {
952 default:
953 llvm_unreachable("Load size not supported\n");
954 case 32:
955 Opcode = AMDGPU::FLAT_LOAD_DWORD;
956 break;
957 case 64:
958 Opcode = AMDGPU::FLAT_LOAD_DWORDX2;
959 break;
960 }
961
962 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
963 .add(I.getOperand(0))
964 .addReg(PtrReg)
Matt Arsenaultfd023142017-06-12 15:55:58 +0000965 .addImm(0) // offset
966 .addImm(0) // glc
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000967 .addImm(0) // slc
968 .addImm(0); // dlc
Tom Stellardca166212017-01-30 21:56:46 +0000969
970 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
971 I.eraseFromParent();
972 return Ret;
973}
974
Daniel Sandersf76f3152017-11-16 00:46:35 +0000975bool AMDGPUInstructionSelector::select(MachineInstr &I,
976 CodeGenCoverage &CoverageInfo) const {
Tom Stellardca166212017-01-30 21:56:46 +0000977
Tom Stellard7712ee82018-06-22 00:44:29 +0000978 if (!isPreISelGenericOpcode(I.getOpcode())) {
979 if (I.isCopy())
980 return selectCOPY(I);
Tom Stellardca166212017-01-30 21:56:46 +0000981 return true;
Tom Stellard7712ee82018-06-22 00:44:29 +0000982 }
Tom Stellardca166212017-01-30 21:56:46 +0000983
984 switch (I.getOpcode()) {
985 default:
Tom Stellard1dc90202018-05-10 20:53:06 +0000986 return selectImpl(I, CoverageInfo);
Tom Stellardca166212017-01-30 21:56:46 +0000987 case TargetOpcode::G_ADD:
988 return selectG_ADD(I);
Tom Stellard7c650782018-10-05 04:34:09 +0000989 case TargetOpcode::G_INTTOPTR:
Tom Stellard1e0edad2018-05-10 21:20:10 +0000990 case TargetOpcode::G_BITCAST:
991 return selectCOPY(I);
Tom Stellardca166212017-01-30 21:56:46 +0000992 case TargetOpcode::G_CONSTANT:
Tom Stellarde182b282018-05-15 17:57:09 +0000993 case TargetOpcode::G_FCONSTANT:
Tom Stellardca166212017-01-30 21:56:46 +0000994 return selectG_CONSTANT(I);
Tom Stellard41f32192019-02-28 23:37:48 +0000995 case TargetOpcode::G_EXTRACT:
996 return selectG_EXTRACT(I);
Tom Stellardca166212017-01-30 21:56:46 +0000997 case TargetOpcode::G_GEP:
998 return selectG_GEP(I);
Tom Stellard3f1c6fe2018-06-21 23:38:20 +0000999 case TargetOpcode::G_IMPLICIT_DEF:
1000 return selectG_IMPLICIT_DEF(I);
Tom Stellard33634d1b2019-03-01 00:50:26 +00001001 case TargetOpcode::G_INSERT:
1002 return selectG_INSERT(I);
Tom Stellarda9284732018-06-14 19:26:37 +00001003 case TargetOpcode::G_INTRINSIC:
1004 return selectG_INTRINSIC(I, CoverageInfo);
Tom Stellard390a5f42018-07-13 21:05:14 +00001005 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1006 return selectG_INTRINSIC_W_SIDE_EFFECTS(I, CoverageInfo);
Tom Stellard8b1c53b2019-06-17 16:27:43 +00001007 case TargetOpcode::G_ICMP:
Matt Arsenault3b7668a2019-07-01 13:34:26 +00001008 if (selectG_ICMP(I))
1009 return true;
1010 return selectImpl(I, CoverageInfo);
Tom Stellardca166212017-01-30 21:56:46 +00001011 case TargetOpcode::G_LOAD:
Tom Stellard79b5c382019-02-20 21:02:37 +00001012 if (selectImpl(I, CoverageInfo))
1013 return true;
Tom Stellardca166212017-01-30 21:56:46 +00001014 return selectG_LOAD(I);
Tom Stellard8b1c53b2019-06-17 16:27:43 +00001015 case TargetOpcode::G_SELECT:
1016 return selectG_SELECT(I);
Tom Stellardca166212017-01-30 21:56:46 +00001017 case TargetOpcode::G_STORE:
1018 return selectG_STORE(I);
Matt Arsenaultdbb6c032019-06-24 18:02:18 +00001019 case TargetOpcode::G_TRUNC:
1020 return selectG_TRUNC(I);
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001021 case TargetOpcode::G_SEXT:
1022 case TargetOpcode::G_ZEXT:
1023 case TargetOpcode::G_ANYEXT:
1024 if (selectG_SZA_EXT(I)) {
1025 I.eraseFromParent();
1026 return true;
1027 }
1028
1029 return false;
Tom Stellardca166212017-01-30 21:56:46 +00001030 }
1031 return false;
1032}
Tom Stellard1dc90202018-05-10 20:53:06 +00001033
Tom Stellard26fac0f2018-06-22 02:54:57 +00001034InstructionSelector::ComplexRendererFns
1035AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
1036 return {{
1037 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1038 }};
1039
1040}
1041
Tom Stellard1dc90202018-05-10 20:53:06 +00001042///
1043/// This will select either an SGPR or VGPR operand and will save us from
1044/// having to write an extra tablegen pattern.
1045InstructionSelector::ComplexRendererFns
1046AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
1047 return {{
1048 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1049 }};
1050}
Tom Stellarddcc95e92018-05-11 05:44:16 +00001051
1052InstructionSelector::ComplexRendererFns
1053AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
1054 return {{
1055 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1056 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src0_mods
1057 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1058 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
1059 }};
1060}
Tom Stellard9a653572018-06-22 02:34:29 +00001061InstructionSelector::ComplexRendererFns
1062AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
1063 return {{
1064 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1065 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1066 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
1067 }};
1068}
Tom Stellard46bbbc32018-06-13 22:30:47 +00001069
1070InstructionSelector::ComplexRendererFns
1071AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
1072 return {{
1073 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1074 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
1075 }};
1076}
Tom Stellard79b5c382019-02-20 21:02:37 +00001077
1078InstructionSelector::ComplexRendererFns
1079AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
1080 MachineRegisterInfo &MRI =
1081 Root.getParent()->getParent()->getParent()->getRegInfo();
1082
1083 SmallVector<GEPInfo, 4> AddrInfo;
1084 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo);
1085
1086 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1087 return None;
1088
1089 const GEPInfo &GEPInfo = AddrInfo[0];
1090
1091 if (!AMDGPU::isLegalSMRDImmOffset(STI, GEPInfo.Imm))
1092 return None;
1093
1094 unsigned PtrReg = GEPInfo.SgprParts[0];
1095 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
1096 return {{
1097 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1098 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
1099 }};
1100}
1101
1102InstructionSelector::ComplexRendererFns
1103AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
1104 MachineRegisterInfo &MRI =
1105 Root.getParent()->getParent()->getParent()->getRegInfo();
1106
1107 SmallVector<GEPInfo, 4> AddrInfo;
1108 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo);
1109
1110 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1111 return None;
1112
1113 const GEPInfo &GEPInfo = AddrInfo[0];
1114 unsigned PtrReg = GEPInfo.SgprParts[0];
1115 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
1116 if (!isUInt<32>(EncodedImm))
1117 return None;
1118
1119 return {{
1120 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1121 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
1122 }};
1123}
1124
1125InstructionSelector::ComplexRendererFns
1126AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
1127 MachineInstr *MI = Root.getParent();
1128 MachineBasicBlock *MBB = MI->getParent();
1129 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1130
1131 SmallVector<GEPInfo, 4> AddrInfo;
1132 getAddrModeInfo(*MI, MRI, AddrInfo);
1133
1134 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
1135 // then we can select all ptr + 32-bit offsets not just immediate offsets.
1136 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1137 return None;
1138
1139 const GEPInfo &GEPInfo = AddrInfo[0];
1140 if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm))
1141 return None;
1142
1143 // If we make it this far we have a load with an 32-bit immediate offset.
1144 // It is OK to select this using a sgpr offset, because we have already
1145 // failed trying to select this load into one of the _IMM variants since
1146 // the _IMM Patterns are considered before the _SGPR patterns.
1147 unsigned PtrReg = GEPInfo.SgprParts[0];
1148 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1149 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
1150 .addImm(GEPInfo.Imm);
1151 return {{
1152 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1153 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
1154 }};
1155}