blob: 3e7cd2de89d7758dde3c388223f1a4a38a820391 [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPUInstructionSelector.h"
15#include "AMDGPUInstrInfo.h"
16#include "AMDGPURegisterBankInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUSubtarget.h"
Tom Stellard1dc90202018-05-10 20:53:06 +000019#include "AMDGPUTargetMachine.h"
Matt Arsenaultb1cc4f52018-06-25 16:17:48 +000020#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000021#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard1dc90202018-05-10 20:53:06 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
Aditya Nandakumar18b3f9d2018-01-17 19:31:33 +000024#include "llvm/CodeGen/GlobalISel/Utils.h"
Tom Stellardca166212017-01-30 21:56:46 +000025#include "llvm/CodeGen/MachineBasicBlock.h"
26#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineInstr.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/IR/Type.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/raw_ostream.h"
33
34#define DEBUG_TYPE "amdgpu-isel"
35
36using namespace llvm;
37
Tom Stellard1dc90202018-05-10 20:53:06 +000038#define GET_GLOBALISEL_IMPL
Tom Stellard5bfbae52018-07-11 20:59:01 +000039#define AMDGPUSubtarget GCNSubtarget
Tom Stellard1dc90202018-05-10 20:53:06 +000040#include "AMDGPUGenGlobalISel.inc"
41#undef GET_GLOBALISEL_IMPL
Tom Stellard5bfbae52018-07-11 20:59:01 +000042#undef AMDGPUSubtarget
Tom Stellard1dc90202018-05-10 20:53:06 +000043
Tom Stellardca166212017-01-30 21:56:46 +000044AMDGPUInstructionSelector::AMDGPUInstructionSelector(
Tom Stellard5bfbae52018-07-11 20:59:01 +000045 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
Tom Stellard1dc90202018-05-10 20:53:06 +000046 const AMDGPUTargetMachine &TM)
Tom Stellardca166212017-01-30 21:56:46 +000047 : InstructionSelector(), TII(*STI.getInstrInfo()),
Tom Stellard1dc90202018-05-10 20:53:06 +000048 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
49 STI(STI),
50 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
51#define GET_GLOBALISEL_PREDICATES_INIT
52#include "AMDGPUGenGlobalISel.inc"
53#undef GET_GLOBALISEL_PREDICATES_INIT
54#define GET_GLOBALISEL_TEMPORARIES_INIT
55#include "AMDGPUGenGlobalISel.inc"
56#undef GET_GLOBALISEL_TEMPORARIES_INIT
Tom Stellard1dc90202018-05-10 20:53:06 +000057{
58}
59
60const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
Tom Stellardca166212017-01-30 21:56:46 +000061
Tom Stellard8b1c53b2019-06-17 16:27:43 +000062static bool isSCC(unsigned Reg, const MachineRegisterInfo &MRI) {
Matt Arsenault9f992c22019-07-01 13:22:07 +000063 assert(!TargetRegisterInfo::isPhysicalRegister(Reg));
Tom Stellard8b1c53b2019-06-17 16:27:43 +000064
65 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
66 const TargetRegisterClass *RC =
67 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
68 if (RC)
69 return RC->getID() == AMDGPU::SReg_32_XM0RegClassID &&
70 MRI.getType(Reg).getSizeInBits() == 1;
71
72 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
73 return RB->getID() == AMDGPU::SCCRegBankID;
74}
75
Matt Arsenault9f992c22019-07-01 13:22:07 +000076static bool isVCC(unsigned Reg, const MachineRegisterInfo &MRI,
77 const SIRegisterInfo &TRI) {
78 assert(!TargetRegisterInfo::isPhysicalRegister(Reg));
79
80 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
81 const TargetRegisterClass *RC =
82 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
83 if (RC) {
84 return RC == TRI.getWaveMaskRegClass() &&
85 MRI.getType(Reg).getSizeInBits() == 1;
86 }
87
88 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
89 return RB->getID() == AMDGPU::VCCRegBankID;
90}
91
Tom Stellard1e0edad2018-05-10 21:20:10 +000092bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
93 MachineBasicBlock *BB = I.getParent();
94 MachineFunction *MF = BB->getParent();
95 MachineRegisterInfo &MRI = MF->getRegInfo();
96 I.setDesc(TII.get(TargetOpcode::COPY));
Tom Stellard8b1c53b2019-06-17 16:27:43 +000097
98 // Special case for COPY from the scc register bank. The scc register bank
99 // is modeled using 32-bit sgprs.
100 const MachineOperand &Src = I.getOperand(1);
101 unsigned SrcReg = Src.getReg();
102 if (!TargetRegisterInfo::isPhysicalRegister(SrcReg) && isSCC(SrcReg, MRI)) {
Matt Arsenault9f992c22019-07-01 13:22:07 +0000103 unsigned DstReg = I.getOperand(0).getReg();
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000104
Matt Arsenault9f992c22019-07-01 13:22:07 +0000105 // Specially handle scc->vcc copies.
106 if (isVCC(DstReg, MRI, TRI)) {
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000107 const DebugLoc &DL = I.getDebugLoc();
Matt Arsenault9f992c22019-07-01 13:22:07 +0000108 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000109 .addImm(0)
110 .addReg(SrcReg);
111 if (!MRI.getRegClassOrNull(SrcReg))
112 MRI.setRegClass(SrcReg, TRI.getConstrainedRegClassForOperand(Src, MRI));
113 I.eraseFromParent();
114 return true;
115 }
116 }
117
Tom Stellard1e0edad2018-05-10 21:20:10 +0000118 for (const MachineOperand &MO : I.operands()) {
119 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
120 continue;
121
122 const TargetRegisterClass *RC =
123 TRI.getConstrainedRegClassForOperand(MO, MRI);
124 if (!RC)
125 continue;
126 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
127 }
128 return true;
129}
130
Tom Stellardca166212017-01-30 21:56:46 +0000131MachineOperand
132AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
133 unsigned SubIdx) const {
134
135 MachineInstr *MI = MO.getParent();
136 MachineBasicBlock *BB = MO.getParent()->getParent();
137 MachineFunction *MF = BB->getParent();
138 MachineRegisterInfo &MRI = MF->getRegInfo();
139 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
140
141 if (MO.isReg()) {
142 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
143 unsigned Reg = MO.getReg();
144 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
145 .addReg(Reg, 0, ComposedSubIdx);
146
147 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
148 MO.isKill(), MO.isDead(), MO.isUndef(),
149 MO.isEarlyClobber(), 0, MO.isDebug(),
150 MO.isInternalRead());
151 }
152
153 assert(MO.isImm());
154
155 APInt Imm(64, MO.getImm());
156
157 switch (SubIdx) {
158 default:
159 llvm_unreachable("do not know to split immediate with this sub index.");
160 case AMDGPU::sub0:
161 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
162 case AMDGPU::sub1:
163 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
164 }
165}
166
Tom Stellard390a5f42018-07-13 21:05:14 +0000167static int64_t getConstant(const MachineInstr *MI) {
168 return MI->getOperand(1).getCImm()->getSExtValue();
169}
170
Tom Stellardca166212017-01-30 21:56:46 +0000171bool AMDGPUInstructionSelector::selectG_ADD(MachineInstr &I) const {
172 MachineBasicBlock *BB = I.getParent();
173 MachineFunction *MF = BB->getParent();
174 MachineRegisterInfo &MRI = MF->getRegInfo();
175 unsigned Size = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
176 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
177 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
178
179 if (Size != 64)
180 return false;
181
182 DebugLoc DL = I.getDebugLoc();
183
Tom Stellard124f5cc2017-01-31 15:24:11 +0000184 MachineOperand Lo1(getSubOperand64(I.getOperand(1), AMDGPU::sub0));
185 MachineOperand Lo2(getSubOperand64(I.getOperand(2), AMDGPU::sub0));
186
Tom Stellardca166212017-01-30 21:56:46 +0000187 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
Tom Stellard124f5cc2017-01-31 15:24:11 +0000188 .add(Lo1)
189 .add(Lo2);
190
191 MachineOperand Hi1(getSubOperand64(I.getOperand(1), AMDGPU::sub1));
192 MachineOperand Hi2(getSubOperand64(I.getOperand(2), AMDGPU::sub1));
Tom Stellardca166212017-01-30 21:56:46 +0000193
194 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
Tom Stellard124f5cc2017-01-31 15:24:11 +0000195 .add(Hi1)
196 .add(Hi2);
Tom Stellardca166212017-01-30 21:56:46 +0000197
198 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), I.getOperand(0).getReg())
199 .addReg(DstLo)
200 .addImm(AMDGPU::sub0)
201 .addReg(DstHi)
202 .addImm(AMDGPU::sub1);
203
204 for (MachineOperand &MO : I.explicit_operands()) {
205 if (!MO.isReg() || TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
206 continue;
207 RBI.constrainGenericRegister(MO.getReg(), AMDGPU::SReg_64RegClass, MRI);
208 }
209
210 I.eraseFromParent();
211 return true;
212}
213
Tom Stellard41f32192019-02-28 23:37:48 +0000214bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
215 MachineBasicBlock *BB = I.getParent();
216 MachineFunction *MF = BB->getParent();
217 MachineRegisterInfo &MRI = MF->getRegInfo();
218 assert(I.getOperand(2).getImm() % 32 == 0);
219 unsigned SubReg = TRI.getSubRegFromChannel(I.getOperand(2).getImm() / 32);
220 const DebugLoc &DL = I.getDebugLoc();
221 MachineInstr *Copy = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY),
222 I.getOperand(0).getReg())
223 .addReg(I.getOperand(1).getReg(), 0, SubReg);
224
225 for (const MachineOperand &MO : Copy->operands()) {
226 const TargetRegisterClass *RC =
227 TRI.getConstrainedRegClassForOperand(MO, MRI);
228 if (!RC)
229 continue;
230 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
231 }
232 I.eraseFromParent();
233 return true;
234}
235
Tom Stellardca166212017-01-30 21:56:46 +0000236bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const {
237 return selectG_ADD(I);
238}
239
Tom Stellard3f1c6fe2018-06-21 23:38:20 +0000240bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
241 MachineBasicBlock *BB = I.getParent();
242 MachineFunction *MF = BB->getParent();
243 MachineRegisterInfo &MRI = MF->getRegInfo();
244 const MachineOperand &MO = I.getOperand(0);
Matt Arsenaultf8a841b2019-06-24 16:24:03 +0000245
246 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
247 // regbank check here is to know why getConstrainedRegClassForOperand failed.
248 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, MRI);
249 if ((!RC && !MRI.getRegBankOrNull(MO.getReg())) ||
250 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, MRI))) {
251 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
252 return true;
253 }
254
255 return false;
Tom Stellard3f1c6fe2018-06-21 23:38:20 +0000256}
257
Tom Stellard33634d1b2019-03-01 00:50:26 +0000258bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
259 MachineBasicBlock *BB = I.getParent();
260 MachineFunction *MF = BB->getParent();
261 MachineRegisterInfo &MRI = MF->getRegInfo();
262 unsigned SubReg = TRI.getSubRegFromChannel(I.getOperand(3).getImm() / 32);
263 DebugLoc DL = I.getDebugLoc();
264 MachineInstr *Ins = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG))
265 .addDef(I.getOperand(0).getReg())
266 .addReg(I.getOperand(1).getReg())
267 .addReg(I.getOperand(2).getReg())
268 .addImm(SubReg);
269
270 for (const MachineOperand &MO : Ins->operands()) {
271 if (!MO.isReg())
272 continue;
273 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
274 continue;
275
276 const TargetRegisterClass *RC =
277 TRI.getConstrainedRegClassForOperand(MO, MRI);
278 if (!RC)
279 continue;
280 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
281 }
282 I.eraseFromParent();
283 return true;
284}
285
Tom Stellarda9284732018-06-14 19:26:37 +0000286bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I,
287 CodeGenCoverage &CoverageInfo) const {
Matt Arsenaultfee19492019-06-17 17:01:27 +0000288 unsigned IntrinsicID = I.getOperand(I.getNumExplicitDefs()).getIntrinsicID();
Tom Stellarda9284732018-06-14 19:26:37 +0000289 switch (IntrinsicID) {
290 default:
291 break;
Tom Stellardac684712018-07-13 22:16:03 +0000292 case Intrinsic::maxnum:
293 case Intrinsic::minnum:
Tom Stellarda9284732018-06-14 19:26:37 +0000294 case Intrinsic::amdgcn_cvt_pkrtz:
295 return selectImpl(I, CoverageInfo);
Matt Arsenaultb1cc4f52018-06-25 16:17:48 +0000296
297 case Intrinsic::amdgcn_kernarg_segment_ptr: {
298 MachineFunction *MF = I.getParent()->getParent();
299 MachineRegisterInfo &MRI = MF->getRegInfo();
300 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
301 const ArgDescriptor *InputPtrReg;
302 const TargetRegisterClass *RC;
303 const DebugLoc &DL = I.getDebugLoc();
304
305 std::tie(InputPtrReg, RC)
306 = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
307 if (!InputPtrReg)
308 report_fatal_error("missing kernarg segment ptr");
309
310 BuildMI(*I.getParent(), &I, DL, TII.get(AMDGPU::COPY))
311 .add(I.getOperand(0))
312 .addReg(MRI.getLiveInVirtReg(InputPtrReg->getRegister()));
313 I.eraseFromParent();
314 return true;
315 }
Tom Stellarda9284732018-06-14 19:26:37 +0000316 }
317 return false;
318}
319
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000320static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
321 if (Size != 32 && Size != 64)
322 return -1;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000323 switch (P) {
324 default:
325 llvm_unreachable("Unknown condition code!");
326 case CmpInst::ICMP_NE:
327 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
328 case CmpInst::ICMP_EQ:
329 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
330 case CmpInst::ICMP_SGT:
331 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
332 case CmpInst::ICMP_SGE:
333 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
334 case CmpInst::ICMP_SLT:
335 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
336 case CmpInst::ICMP_SLE:
337 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
338 case CmpInst::ICMP_UGT:
339 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
340 case CmpInst::ICMP_UGE:
341 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
342 case CmpInst::ICMP_ULT:
343 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
344 case CmpInst::ICMP_ULE:
345 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
346 }
347}
348
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000349int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
350 unsigned Size) const {
351 if (Size == 64) {
352 if (!STI.hasScalarCompareEq64())
353 return -1;
354
355 switch (P) {
356 case CmpInst::ICMP_NE:
357 return AMDGPU::S_CMP_LG_U64;
358 case CmpInst::ICMP_EQ:
359 return AMDGPU::S_CMP_EQ_U64;
360 default:
361 return -1;
362 }
363 }
364
365 if (Size != 32)
366 return -1;
367
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000368 switch (P) {
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000369 case CmpInst::ICMP_NE:
370 return AMDGPU::S_CMP_LG_U32;
371 case CmpInst::ICMP_EQ:
372 return AMDGPU::S_CMP_EQ_U32;
373 case CmpInst::ICMP_SGT:
374 return AMDGPU::S_CMP_GT_I32;
375 case CmpInst::ICMP_SGE:
376 return AMDGPU::S_CMP_GE_I32;
377 case CmpInst::ICMP_SLT:
378 return AMDGPU::S_CMP_LT_I32;
379 case CmpInst::ICMP_SLE:
380 return AMDGPU::S_CMP_LE_I32;
381 case CmpInst::ICMP_UGT:
382 return AMDGPU::S_CMP_GT_U32;
383 case CmpInst::ICMP_UGE:
384 return AMDGPU::S_CMP_GE_U32;
385 case CmpInst::ICMP_ULT:
386 return AMDGPU::S_CMP_LT_U32;
387 case CmpInst::ICMP_ULE:
388 return AMDGPU::S_CMP_LE_U32;
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000389 default:
390 llvm_unreachable("Unknown condition code!");
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000391 }
392}
393
394bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
395 MachineBasicBlock *BB = I.getParent();
396 MachineFunction *MF = BB->getParent();
397 MachineRegisterInfo &MRI = MF->getRegInfo();
398 DebugLoc DL = I.getDebugLoc();
399
400 unsigned SrcReg = I.getOperand(2).getReg();
401 unsigned Size = RBI.getSizeInBits(SrcReg, MRI, TRI);
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000402
403 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000404
405 unsigned CCReg = I.getOperand(0).getReg();
406 if (isSCC(CCReg, MRI)) {
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000407 int Opcode = getS_CMPOpcode(Pred, Size);
408 if (Opcode == -1)
409 return false;
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000410 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
411 .add(I.getOperand(2))
412 .add(I.getOperand(3));
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000413 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
414 .addReg(AMDGPU::SCC);
415 bool Ret =
416 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
417 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, MRI);
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000418 I.eraseFromParent();
419 return Ret;
420 }
421
Matt Arsenault3b7668a2019-07-01 13:34:26 +0000422 int Opcode = getV_CMPOpcode(Pred, Size);
423 if (Opcode == -1)
424 return false;
425
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000426 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
427 I.getOperand(0).getReg())
428 .add(I.getOperand(2))
429 .add(I.getOperand(3));
430 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
431 AMDGPU::SReg_64RegClass, MRI);
432 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
433 I.eraseFromParent();
434 return Ret;
435}
436
Tom Stellard390a5f42018-07-13 21:05:14 +0000437static MachineInstr *
438buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt,
439 unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3,
440 unsigned VM, bool Compr, unsigned Enabled, bool Done) {
441 const DebugLoc &DL = Insert->getDebugLoc();
442 MachineBasicBlock &BB = *Insert->getParent();
443 unsigned Opcode = Done ? AMDGPU::EXP_DONE : AMDGPU::EXP;
444 return BuildMI(BB, Insert, DL, TII.get(Opcode))
445 .addImm(Tgt)
446 .addReg(Reg0)
447 .addReg(Reg1)
448 .addReg(Reg2)
449 .addReg(Reg3)
450 .addImm(VM)
451 .addImm(Compr)
452 .addImm(Enabled);
453}
454
455bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
456 MachineInstr &I,
457 CodeGenCoverage &CoverageInfo) const {
458 MachineBasicBlock *BB = I.getParent();
459 MachineFunction *MF = BB->getParent();
460 MachineRegisterInfo &MRI = MF->getRegInfo();
461
462 unsigned IntrinsicID = I.getOperand(0).getIntrinsicID();
463 switch (IntrinsicID) {
464 case Intrinsic::amdgcn_exp: {
465 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
466 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
467 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(7).getReg()));
468 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(8).getReg()));
469
470 MachineInstr *Exp = buildEXP(TII, &I, Tgt, I.getOperand(3).getReg(),
471 I.getOperand(4).getReg(),
472 I.getOperand(5).getReg(),
473 I.getOperand(6).getReg(),
474 VM, false, Enabled, Done);
475
476 I.eraseFromParent();
477 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
478 }
479 case Intrinsic::amdgcn_exp_compr: {
480 const DebugLoc &DL = I.getDebugLoc();
481 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
482 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
483 unsigned Reg0 = I.getOperand(3).getReg();
484 unsigned Reg1 = I.getOperand(4).getReg();
485 unsigned Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
486 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(5).getReg()));
487 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(6).getReg()));
488
489 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
490 MachineInstr *Exp = buildEXP(TII, &I, Tgt, Reg0, Reg1, Undef, Undef, VM,
491 true, Enabled, Done);
492
493 I.eraseFromParent();
494 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
495 }
496 }
497 return false;
498}
499
Tom Stellard8b1c53b2019-06-17 16:27:43 +0000500bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
501 MachineBasicBlock *BB = I.getParent();
502 MachineFunction *MF = BB->getParent();
503 MachineRegisterInfo &MRI = MF->getRegInfo();
504 const DebugLoc &DL = I.getDebugLoc();
505
506 unsigned DstReg = I.getOperand(0).getReg();
507 unsigned Size = RBI.getSizeInBits(DstReg, MRI, TRI);
508 assert(Size == 32 || Size == 64);
509 const MachineOperand &CCOp = I.getOperand(1);
510 unsigned CCReg = CCOp.getReg();
511 if (isSCC(CCReg, MRI)) {
512 unsigned SelectOpcode = Size == 32 ? AMDGPU::S_CSELECT_B32 :
513 AMDGPU::S_CSELECT_B64;
514 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
515 .addReg(CCReg);
516
517 // The generic constrainSelectedInstRegOperands doesn't work for the scc register
518 // bank, because it does not cover the register class that we used to represent
519 // for it. So we need to manually set the register class here.
520 if (!MRI.getRegClassOrNull(CCReg))
521 MRI.setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, MRI));
522 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
523 .add(I.getOperand(2))
524 .add(I.getOperand(3));
525
526 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
527 constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
528 I.eraseFromParent();
529 return Ret;
530 }
531
532 assert(Size == 32);
533 // FIXME: Support 64-bit select
534 MachineInstr *Select =
535 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
536 .addImm(0)
537 .add(I.getOperand(3))
538 .addImm(0)
539 .add(I.getOperand(2))
540 .add(I.getOperand(1));
541
542 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
543 I.eraseFromParent();
544 return Ret;
545}
546
Tom Stellardca166212017-01-30 21:56:46 +0000547bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
548 MachineBasicBlock *BB = I.getParent();
Tom Stellard655fdd32018-05-11 23:12:49 +0000549 MachineFunction *MF = BB->getParent();
550 MachineRegisterInfo &MRI = MF->getRegInfo();
Tom Stellardca166212017-01-30 21:56:46 +0000551 DebugLoc DL = I.getDebugLoc();
Matt Arsenault89fc8bc2019-07-01 13:37:39 +0000552 unsigned PtrSize = RBI.getSizeInBits(I.getOperand(1).getReg(), MRI, TRI);
553 if (PtrSize != 64) {
554 LLVM_DEBUG(dbgs() << "Unhandled address space\n");
555 return false;
556 }
557
Tom Stellard655fdd32018-05-11 23:12:49 +0000558 unsigned StoreSize = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
559 unsigned Opcode;
Tom Stellardca166212017-01-30 21:56:46 +0000560
561 // FIXME: Select store instruction based on address space
Tom Stellard655fdd32018-05-11 23:12:49 +0000562 switch (StoreSize) {
563 default:
564 return false;
565 case 32:
566 Opcode = AMDGPU::FLAT_STORE_DWORD;
567 break;
568 case 64:
569 Opcode = AMDGPU::FLAT_STORE_DWORDX2;
570 break;
571 case 96:
572 Opcode = AMDGPU::FLAT_STORE_DWORDX3;
573 break;
574 case 128:
575 Opcode = AMDGPU::FLAT_STORE_DWORDX4;
576 break;
577 }
578
579 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
Tom Stellardca166212017-01-30 21:56:46 +0000580 .add(I.getOperand(1))
581 .add(I.getOperand(0))
Matt Arsenaultfd023142017-06-12 15:55:58 +0000582 .addImm(0) // offset
583 .addImm(0) // glc
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000584 .addImm(0) // slc
585 .addImm(0); // dlc
Tom Stellardca166212017-01-30 21:56:46 +0000586
Matt Arsenault47ccafe2017-05-11 17:38:33 +0000587
Tom Stellardca166212017-01-30 21:56:46 +0000588 // Now that we selected an opcode, we need to constrain the register
589 // operands to use appropriate classes.
590 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
591
592 I.eraseFromParent();
593 return Ret;
594}
595
Matt Arsenaultdbb6c032019-06-24 18:02:18 +0000596static int sizeToSubRegIndex(unsigned Size) {
597 switch (Size) {
598 case 32:
599 return AMDGPU::sub0;
600 case 64:
601 return AMDGPU::sub0_sub1;
602 case 96:
603 return AMDGPU::sub0_sub1_sub2;
604 case 128:
605 return AMDGPU::sub0_sub1_sub2_sub3;
606 case 256:
607 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
608 default:
609 if (Size < 32)
610 return AMDGPU::sub0;
611 if (Size > 256)
612 return -1;
613 return sizeToSubRegIndex(PowerOf2Ceil(Size));
614 }
615}
616
617bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
618 MachineBasicBlock *BB = I.getParent();
619 MachineFunction *MF = BB->getParent();
620 MachineRegisterInfo &MRI = MF->getRegInfo();
621
622 unsigned DstReg = I.getOperand(0).getReg();
623 unsigned SrcReg = I.getOperand(1).getReg();
624 const LLT DstTy = MRI.getType(DstReg);
625 const LLT SrcTy = MRI.getType(SrcReg);
626 if (!DstTy.isScalar())
627 return false;
628
629 const RegisterBank *DstRB = RBI.getRegBank(DstReg, MRI, TRI);
630 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, MRI, TRI);
631 if (SrcRB != DstRB)
632 return false;
633
634 unsigned DstSize = DstTy.getSizeInBits();
635 unsigned SrcSize = SrcTy.getSizeInBits();
636
637 const TargetRegisterClass *SrcRC
638 = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, MRI);
639 const TargetRegisterClass *DstRC
640 = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, MRI);
641
642 if (SrcSize > 32) {
643 int SubRegIdx = sizeToSubRegIndex(DstSize);
644 if (SubRegIdx == -1)
645 return false;
646
647 // Deal with weird cases where the class only partially supports the subreg
648 // index.
649 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
650 if (!SrcRC)
651 return false;
652
653 I.getOperand(1).setSubReg(SubRegIdx);
654 }
655
656 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
657 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
658 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
659 return false;
660 }
661
662 I.setDesc(TII.get(TargetOpcode::COPY));
663 return true;
664}
665
Matt Arsenault5dafcb92019-07-01 13:22:06 +0000666/// \returns true if a bitmask for \p Size bits will be an inline immediate.
667static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
668 Mask = maskTrailingOnes<unsigned>(Size);
669 int SignedMask = static_cast<int>(Mask);
670 return SignedMask >= -16 && SignedMask <= 64;
671}
672
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000673bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
674 bool Signed = I.getOpcode() == AMDGPU::G_SEXT;
675 const DebugLoc &DL = I.getDebugLoc();
676 MachineBasicBlock &MBB = *I.getParent();
677 MachineFunction &MF = *MBB.getParent();
678 MachineRegisterInfo &MRI = MF.getRegInfo();
679 const unsigned DstReg = I.getOperand(0).getReg();
680 const unsigned SrcReg = I.getOperand(1).getReg();
681
682 const LLT DstTy = MRI.getType(DstReg);
683 const LLT SrcTy = MRI.getType(SrcReg);
684 const LLT S1 = LLT::scalar(1);
685 const unsigned SrcSize = SrcTy.getSizeInBits();
686 const unsigned DstSize = DstTy.getSizeInBits();
687 if (!DstTy.isScalar())
688 return false;
689
690 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, MRI, TRI);
691
692 if (SrcBank->getID() == AMDGPU::SCCRegBankID) {
693 if (SrcTy != S1 || DstSize > 64) // Invalid
694 return false;
695
696 unsigned Opcode =
697 DstSize > 32 ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
698 const TargetRegisterClass *DstRC =
699 DstSize > 32 ? &AMDGPU::SReg_64RegClass : &AMDGPU::SReg_32RegClass;
700
701 // FIXME: Create an extra copy to avoid incorrectly constraining the result
702 // of the scc producer.
703 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
704 BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), TmpReg)
705 .addReg(SrcReg);
706 BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
707 .addReg(TmpReg);
708
709 // The instruction operands are backwards from what you would expect.
710 BuildMI(MBB, I, DL, TII.get(Opcode), DstReg)
711 .addImm(0)
712 .addImm(Signed ? -1 : 1);
713 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
714 }
715
716 if (SrcBank->getID() == AMDGPU::VCCRegBankID && DstSize <= 32) {
717 if (SrcTy != S1) // Invalid
718 return false;
719
720 MachineInstr *ExtI =
721 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
722 .addImm(0) // src0_modifiers
723 .addImm(0) // src0
724 .addImm(0) // src1_modifiers
725 .addImm(Signed ? -1 : 1) // src1
726 .addUse(SrcReg);
727 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
728 }
729
730 if (I.getOpcode() == AMDGPU::G_ANYEXT)
731 return selectCOPY(I);
732
733 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
734 // 64-bit should have been split up in RegBankSelect
Matt Arsenault5dafcb92019-07-01 13:22:06 +0000735
736 // Try to use an and with a mask if it will save code size.
737 unsigned Mask;
738 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
739 MachineInstr *ExtI =
740 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
741 .addImm(Mask)
742 .addReg(SrcReg);
743 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
744 }
745
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000746 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
747 MachineInstr *ExtI =
748 BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
749 .addReg(SrcReg)
750 .addImm(0) // Offset
751 .addImm(SrcSize); // Width
752 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
753 }
754
755 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
756 if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, MRI))
757 return false;
758
759 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
760 const unsigned SextOpc = SrcSize == 8 ?
761 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
762 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
763 .addReg(SrcReg);
764 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, MRI);
765 }
766
767 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
768 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
769
770 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
771 if (DstSize > 32 && SrcSize <= 32) {
772 // We need a 64-bit register source, but the high bits don't matter.
773 unsigned ExtReg
774 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
775 unsigned UndefReg
776 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
777 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
778 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
779 .addReg(SrcReg)
780 .addImm(AMDGPU::sub0)
781 .addReg(UndefReg)
782 .addImm(AMDGPU::sub1);
783
784 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
785 .addReg(ExtReg)
786 .addImm(SrcSize << 16);
787
788 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, MRI);
789 }
790
Matt Arsenault5dafcb92019-07-01 13:22:06 +0000791 unsigned Mask;
792 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
793 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
794 .addReg(SrcReg)
795 .addImm(Mask);
796 } else {
797 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
798 .addReg(SrcReg)
799 .addImm(SrcSize << 16);
800 }
801
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +0000802 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, MRI);
803 }
804
805 return false;
806}
807
Tom Stellardca166212017-01-30 21:56:46 +0000808bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
809 MachineBasicBlock *BB = I.getParent();
810 MachineFunction *MF = BB->getParent();
811 MachineRegisterInfo &MRI = MF->getRegInfo();
Tom Stellarde182b282018-05-15 17:57:09 +0000812 MachineOperand &ImmOp = I.getOperand(1);
Tom Stellardca166212017-01-30 21:56:46 +0000813
Tom Stellarde182b282018-05-15 17:57:09 +0000814 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
815 if (ImmOp.isFPImm()) {
816 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
817 ImmOp.ChangeToImmediate(Imm.getZExtValue());
818 } else if (ImmOp.isCImm()) {
819 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
820 }
821
822 unsigned DstReg = I.getOperand(0).getReg();
823 unsigned Size;
824 bool IsSgpr;
825 const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg());
826 if (RB) {
827 IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
828 Size = MRI.getType(DstReg).getSizeInBits();
829 } else {
830 const TargetRegisterClass *RC = TRI.getRegClassForReg(MRI, DstReg);
831 IsSgpr = TRI.isSGPRClass(RC);
Tom Stellarda91ce172018-05-21 17:49:31 +0000832 Size = TRI.getRegSizeInBits(*RC);
Tom Stellarde182b282018-05-15 17:57:09 +0000833 }
834
835 if (Size != 32 && Size != 64)
836 return false;
837
838 unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
Tom Stellardca166212017-01-30 21:56:46 +0000839 if (Size == 32) {
Tom Stellarde182b282018-05-15 17:57:09 +0000840 I.setDesc(TII.get(Opcode));
841 I.addImplicitDefUseOperands(*MF);
Tom Stellardca166212017-01-30 21:56:46 +0000842 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
843 }
844
Tom Stellardca166212017-01-30 21:56:46 +0000845 DebugLoc DL = I.getDebugLoc();
Tom Stellarde182b282018-05-15 17:57:09 +0000846 const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass :
847 &AMDGPU::VGPR_32RegClass;
848 unsigned LoReg = MRI.createVirtualRegister(RC);
849 unsigned HiReg = MRI.createVirtualRegister(RC);
850 const APInt &Imm = APInt(Size, I.getOperand(1).getImm());
Tom Stellardca166212017-01-30 21:56:46 +0000851
Tom Stellarde182b282018-05-15 17:57:09 +0000852 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
Tom Stellardca166212017-01-30 21:56:46 +0000853 .addImm(Imm.trunc(32).getZExtValue());
854
Tom Stellarde182b282018-05-15 17:57:09 +0000855 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
Tom Stellardca166212017-01-30 21:56:46 +0000856 .addImm(Imm.ashr(32).getZExtValue());
857
Tom Stellarde182b282018-05-15 17:57:09 +0000858 const MachineInstr *RS =
859 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
860 .addReg(LoReg)
861 .addImm(AMDGPU::sub0)
862 .addReg(HiReg)
863 .addImm(AMDGPU::sub1);
864
Tom Stellardca166212017-01-30 21:56:46 +0000865 // We can't call constrainSelectedInstRegOperands here, because it doesn't
866 // work for target independent opcodes
867 I.eraseFromParent();
Tom Stellarde182b282018-05-15 17:57:09 +0000868 const TargetRegisterClass *DstRC =
869 TRI.getConstrainedRegClassForOperand(RS->getOperand(0), MRI);
870 if (!DstRC)
871 return true;
872 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
Tom Stellardca166212017-01-30 21:56:46 +0000873}
874
875static bool isConstant(const MachineInstr &MI) {
876 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
877}
878
879void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
880 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
881
882 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
883
884 assert(PtrMI);
885
886 if (PtrMI->getOpcode() != TargetOpcode::G_GEP)
887 return;
888
889 GEPInfo GEPInfo(*PtrMI);
890
891 for (unsigned i = 1, e = 3; i < e; ++i) {
892 const MachineOperand &GEPOp = PtrMI->getOperand(i);
893 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
894 assert(OpDef);
895 if (isConstant(*OpDef)) {
896 // FIXME: Is it possible to have multiple Imm parts? Maybe if we
897 // are lacking other optimizations.
898 assert(GEPInfo.Imm == 0);
899 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
900 continue;
901 }
902 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
903 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
904 GEPInfo.SgprParts.push_back(GEPOp.getReg());
905 else
906 GEPInfo.VgprParts.push_back(GEPOp.getReg());
907 }
908
909 AddrInfo.push_back(GEPInfo);
910 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
911}
912
Tom Stellard79b5c382019-02-20 21:02:37 +0000913bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
Tom Stellardca166212017-01-30 21:56:46 +0000914 if (!MI.hasOneMemOperand())
915 return false;
916
917 const MachineMemOperand *MMO = *MI.memoperands_begin();
918 const Value *Ptr = MMO->getValue();
919
920 // UndefValue means this is a load of a kernel input. These are uniform.
921 // Sometimes LDS instructions have constant pointers.
922 // If Ptr is null, then that means this mem operand contains a
923 // PseudoSourceValue like GOT.
924 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
925 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
926 return true;
927
Matt Arsenault923712b2018-02-09 16:57:57 +0000928 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
929 return true;
930
Tom Stellardca166212017-01-30 21:56:46 +0000931 const Instruction *I = dyn_cast<Instruction>(Ptr);
932 return I && I->getMetadata("amdgpu.uniform");
933}
934
Tom Stellardca166212017-01-30 21:56:46 +0000935bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
936 for (const GEPInfo &GEPInfo : AddrInfo) {
937 if (!GEPInfo.VgprParts.empty())
938 return true;
939 }
940 return false;
941}
942
Tom Stellardca166212017-01-30 21:56:46 +0000943bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const {
944 MachineBasicBlock *BB = I.getParent();
945 MachineFunction *MF = BB->getParent();
946 MachineRegisterInfo &MRI = MF->getRegInfo();
947 DebugLoc DL = I.getDebugLoc();
948 unsigned DstReg = I.getOperand(0).getReg();
949 unsigned PtrReg = I.getOperand(1).getReg();
950 unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI);
951 unsigned Opcode;
952
953 SmallVector<GEPInfo, 4> AddrInfo;
954
955 getAddrModeInfo(I, MRI, AddrInfo);
956
Tom Stellardca166212017-01-30 21:56:46 +0000957 switch (LoadSize) {
958 default:
959 llvm_unreachable("Load size not supported\n");
960 case 32:
961 Opcode = AMDGPU::FLAT_LOAD_DWORD;
962 break;
963 case 64:
964 Opcode = AMDGPU::FLAT_LOAD_DWORDX2;
965 break;
966 }
967
968 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
969 .add(I.getOperand(0))
970 .addReg(PtrReg)
Matt Arsenaultfd023142017-06-12 15:55:58 +0000971 .addImm(0) // offset
972 .addImm(0) // glc
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000973 .addImm(0) // slc
974 .addImm(0); // dlc
Tom Stellardca166212017-01-30 21:56:46 +0000975
976 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
977 I.eraseFromParent();
978 return Ret;
979}
980
Daniel Sandersf76f3152017-11-16 00:46:35 +0000981bool AMDGPUInstructionSelector::select(MachineInstr &I,
982 CodeGenCoverage &CoverageInfo) const {
Tom Stellardca166212017-01-30 21:56:46 +0000983
Tom Stellard7712ee82018-06-22 00:44:29 +0000984 if (!isPreISelGenericOpcode(I.getOpcode())) {
985 if (I.isCopy())
986 return selectCOPY(I);
Tom Stellardca166212017-01-30 21:56:46 +0000987 return true;
Tom Stellard7712ee82018-06-22 00:44:29 +0000988 }
Tom Stellardca166212017-01-30 21:56:46 +0000989
990 switch (I.getOpcode()) {
991 default:
Tom Stellard1dc90202018-05-10 20:53:06 +0000992 return selectImpl(I, CoverageInfo);
Tom Stellardca166212017-01-30 21:56:46 +0000993 case TargetOpcode::G_ADD:
994 return selectG_ADD(I);
Tom Stellard7c650782018-10-05 04:34:09 +0000995 case TargetOpcode::G_INTTOPTR:
Tom Stellard1e0edad2018-05-10 21:20:10 +0000996 case TargetOpcode::G_BITCAST:
997 return selectCOPY(I);
Tom Stellardca166212017-01-30 21:56:46 +0000998 case TargetOpcode::G_CONSTANT:
Tom Stellarde182b282018-05-15 17:57:09 +0000999 case TargetOpcode::G_FCONSTANT:
Tom Stellardca166212017-01-30 21:56:46 +00001000 return selectG_CONSTANT(I);
Tom Stellard41f32192019-02-28 23:37:48 +00001001 case TargetOpcode::G_EXTRACT:
1002 return selectG_EXTRACT(I);
Tom Stellardca166212017-01-30 21:56:46 +00001003 case TargetOpcode::G_GEP:
1004 return selectG_GEP(I);
Tom Stellard3f1c6fe2018-06-21 23:38:20 +00001005 case TargetOpcode::G_IMPLICIT_DEF:
1006 return selectG_IMPLICIT_DEF(I);
Tom Stellard33634d1b2019-03-01 00:50:26 +00001007 case TargetOpcode::G_INSERT:
1008 return selectG_INSERT(I);
Tom Stellarda9284732018-06-14 19:26:37 +00001009 case TargetOpcode::G_INTRINSIC:
1010 return selectG_INTRINSIC(I, CoverageInfo);
Tom Stellard390a5f42018-07-13 21:05:14 +00001011 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1012 return selectG_INTRINSIC_W_SIDE_EFFECTS(I, CoverageInfo);
Tom Stellard8b1c53b2019-06-17 16:27:43 +00001013 case TargetOpcode::G_ICMP:
Matt Arsenault3b7668a2019-07-01 13:34:26 +00001014 if (selectG_ICMP(I))
1015 return true;
1016 return selectImpl(I, CoverageInfo);
Tom Stellardca166212017-01-30 21:56:46 +00001017 case TargetOpcode::G_LOAD:
Tom Stellard79b5c382019-02-20 21:02:37 +00001018 if (selectImpl(I, CoverageInfo))
1019 return true;
Tom Stellardca166212017-01-30 21:56:46 +00001020 return selectG_LOAD(I);
Tom Stellard8b1c53b2019-06-17 16:27:43 +00001021 case TargetOpcode::G_SELECT:
1022 return selectG_SELECT(I);
Tom Stellardca166212017-01-30 21:56:46 +00001023 case TargetOpcode::G_STORE:
1024 return selectG_STORE(I);
Matt Arsenaultdbb6c032019-06-24 18:02:18 +00001025 case TargetOpcode::G_TRUNC:
1026 return selectG_TRUNC(I);
Matt Arsenaultd7ffa2a2019-06-25 13:18:11 +00001027 case TargetOpcode::G_SEXT:
1028 case TargetOpcode::G_ZEXT:
1029 case TargetOpcode::G_ANYEXT:
1030 if (selectG_SZA_EXT(I)) {
1031 I.eraseFromParent();
1032 return true;
1033 }
1034
1035 return false;
Tom Stellardca166212017-01-30 21:56:46 +00001036 }
1037 return false;
1038}
Tom Stellard1dc90202018-05-10 20:53:06 +00001039
Tom Stellard26fac0f2018-06-22 02:54:57 +00001040InstructionSelector::ComplexRendererFns
1041AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
1042 return {{
1043 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1044 }};
1045
1046}
1047
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001048std::pair<Register, unsigned>
1049AMDGPUInstructionSelector::selectVOP3ModsImpl(
1050 Register Src, const MachineRegisterInfo &MRI) const {
1051 unsigned Mods = 0;
1052 MachineInstr *MI = MRI.getVRegDef(Src);
1053
1054 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
1055 Src = MI->getOperand(1).getReg();
1056 Mods |= SISrcMods::NEG;
1057 MI = MRI.getVRegDef(Src);
1058 }
1059
1060 if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
1061 Src = MI->getOperand(1).getReg();
1062 Mods |= SISrcMods::ABS;
1063 }
1064
1065 return std::make_pair(Src, Mods);
1066}
1067
Tom Stellard1dc90202018-05-10 20:53:06 +00001068///
1069/// This will select either an SGPR or VGPR operand and will save us from
1070/// having to write an extra tablegen pattern.
1071InstructionSelector::ComplexRendererFns
1072AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
1073 return {{
1074 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1075 }};
1076}
Tom Stellarddcc95e92018-05-11 05:44:16 +00001077
1078InstructionSelector::ComplexRendererFns
1079AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001080 MachineRegisterInfo &MRI
1081 = Root.getParent()->getParent()->getParent()->getRegInfo();
1082
1083 Register Src;
1084 unsigned Mods;
1085 std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg(), MRI);
1086
Tom Stellarddcc95e92018-05-11 05:44:16 +00001087 return {{
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001088 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1089 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
1090 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1091 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
Tom Stellarddcc95e92018-05-11 05:44:16 +00001092 }};
1093}
Tom Stellard9a653572018-06-22 02:34:29 +00001094InstructionSelector::ComplexRendererFns
1095AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
1096 return {{
1097 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1098 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1099 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
1100 }};
1101}
Tom Stellard46bbbc32018-06-13 22:30:47 +00001102
1103InstructionSelector::ComplexRendererFns
1104AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001105 MachineRegisterInfo &MRI
1106 = Root.getParent()->getParent()->getParent()->getRegInfo();
1107
1108 Register Src;
1109 unsigned Mods;
1110 std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg(), MRI);
1111
Tom Stellard46bbbc32018-06-13 22:30:47 +00001112 return {{
Matt Arsenault4f64ade2019-07-01 15:18:56 +00001113 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1114 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
Tom Stellard46bbbc32018-06-13 22:30:47 +00001115 }};
1116}
Tom Stellard79b5c382019-02-20 21:02:37 +00001117
1118InstructionSelector::ComplexRendererFns
1119AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
1120 MachineRegisterInfo &MRI =
1121 Root.getParent()->getParent()->getParent()->getRegInfo();
1122
1123 SmallVector<GEPInfo, 4> AddrInfo;
1124 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo);
1125
1126 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1127 return None;
1128
1129 const GEPInfo &GEPInfo = AddrInfo[0];
1130
1131 if (!AMDGPU::isLegalSMRDImmOffset(STI, GEPInfo.Imm))
1132 return None;
1133
1134 unsigned PtrReg = GEPInfo.SgprParts[0];
1135 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
1136 return {{
1137 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1138 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
1139 }};
1140}
1141
1142InstructionSelector::ComplexRendererFns
1143AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
1144 MachineRegisterInfo &MRI =
1145 Root.getParent()->getParent()->getParent()->getRegInfo();
1146
1147 SmallVector<GEPInfo, 4> AddrInfo;
1148 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo);
1149
1150 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1151 return None;
1152
1153 const GEPInfo &GEPInfo = AddrInfo[0];
1154 unsigned PtrReg = GEPInfo.SgprParts[0];
1155 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
1156 if (!isUInt<32>(EncodedImm))
1157 return None;
1158
1159 return {{
1160 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1161 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
1162 }};
1163}
1164
1165InstructionSelector::ComplexRendererFns
1166AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
1167 MachineInstr *MI = Root.getParent();
1168 MachineBasicBlock *MBB = MI->getParent();
1169 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1170
1171 SmallVector<GEPInfo, 4> AddrInfo;
1172 getAddrModeInfo(*MI, MRI, AddrInfo);
1173
1174 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
1175 // then we can select all ptr + 32-bit offsets not just immediate offsets.
1176 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1177 return None;
1178
1179 const GEPInfo &GEPInfo = AddrInfo[0];
1180 if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm))
1181 return None;
1182
1183 // If we make it this far we have a load with an 32-bit immediate offset.
1184 // It is OK to select this using a sgpr offset, because we have already
1185 // failed trying to select this load into one of the _IMM variants since
1186 // the _IMM Patterns are considered before the _SGPR patterns.
1187 unsigned PtrReg = GEPInfo.SgprParts[0];
1188 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1189 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
1190 .addImm(GEPInfo.Imm);
1191 return {{
1192 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1193 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
1194 }};
1195}