blob: 9af01a73030d2b30e4fd7e4dd51ed93abb1b9fa8 [file] [log] [blame]
Matt Arsenault7836f892016-01-20 21:22:21 +00001//===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===//
Tom Stellard75aadc22012-12-11 21:25:42 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard75aadc22012-12-11 21:25:42 +00006//
7//==-----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// Defines an instruction selector for the AMDGPU target.
Tom Stellard75aadc22012-12-11 21:25:42 +000011//
12//===----------------------------------------------------------------------===//
Matt Arsenault592d0682015-12-01 23:04:05 +000013
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000014#include "AMDGPU.h"
Matt Arsenault7016f132017-08-03 22:30:46 +000015#include "AMDGPUArgumentUsageInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000016#include "AMDGPUISelLowering.h" // For AMDGPUISD
Tom Stellard75aadc22012-12-11 21:25:42 +000017#include "AMDGPUInstrInfo.h"
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +000018#include "AMDGPUPerfHintAnalysis.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000019#include "AMDGPURegisterInfo.h"
Tom Stellard2e59a452014-06-13 01:32:00 +000020#include "AMDGPUSubtarget.h"
Matt Arsenaultcc852232017-10-10 20:22:07 +000021#include "AMDGPUTargetMachine.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000022#include "SIDefines.h"
Christian Konigf82901a2013-02-26 17:52:23 +000023#include "SIISelLowering.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000024#include "SIInstrInfo.h"
Tom Stellardb02094e2014-07-21 15:45:01 +000025#include "SIMachineFunctionInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000026#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000027#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000028#include "llvm/ADT/APInt.h"
29#include "llvm/ADT/SmallVector.h"
30#include "llvm/ADT/StringRef.h"
Nicolai Haehnle35617ed2018-08-30 14:21:36 +000031#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
Jan Veselyf97de002016-05-13 20:39:29 +000032#include "llvm/Analysis/ValueTracking.h"
Tom Stellard58ac7442014-04-29 23:12:48 +000033#include "llvm/CodeGen/FunctionLoweringInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000034#include "llvm/CodeGen/ISDOpcodes.h"
35#include "llvm/CodeGen/MachineFunction.h"
36#include "llvm/CodeGen/MachineRegisterInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000037#include "llvm/CodeGen/SelectionDAG.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000038#include "llvm/CodeGen/SelectionDAGISel.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000039#include "llvm/CodeGen/SelectionDAGNodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000040#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000041#include "llvm/IR/BasicBlock.h"
Alexander Timofeev2ce560f2019-07-02 17:59:44 +000042#ifdef EXPENSIVE_CHECKS
43#include "llvm/IR/Dominators.h"
44#endif
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000045#include "llvm/IR/Instruction.h"
46#include "llvm/MC/MCInstrDesc.h"
47#include "llvm/Support/Casting.h"
48#include "llvm/Support/CodeGen.h"
49#include "llvm/Support/ErrorHandling.h"
David Blaikie13e77db2018-03-23 23:58:25 +000050#include "llvm/Support/MachineValueType.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000051#include "llvm/Support/MathExtras.h"
52#include <cassert>
53#include <cstdint>
54#include <new>
55#include <vector>
Tom Stellard75aadc22012-12-11 21:25:42 +000056
Matt Arsenaulte8c03a22019-03-08 20:58:11 +000057#define DEBUG_TYPE "isel"
58
Tom Stellard75aadc22012-12-11 21:25:42 +000059using namespace llvm;
60
Matt Arsenaultd2759212016-02-13 01:24:08 +000061namespace llvm {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000062
Matt Arsenaultd2759212016-02-13 01:24:08 +000063class R600InstrInfo;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000064
65} // end namespace llvm
Matt Arsenaultd2759212016-02-13 01:24:08 +000066
Tom Stellard75aadc22012-12-11 21:25:42 +000067//===----------------------------------------------------------------------===//
68// Instruction Selector Implementation
69//===----------------------------------------------------------------------===//
70
71namespace {
Tom Stellardbc4497b2016-02-12 23:45:29 +000072
Matt Arsenaultb7f87c02019-06-20 16:01:09 +000073static bool isNullConstantOrUndef(SDValue V) {
74 if (V.isUndef())
75 return true;
76
77 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
78 return Const != nullptr && Const->isNullValue();
79}
80
Matt Arsenaulte24b34e2019-06-19 23:37:43 +000081static bool getConstantValue(SDValue N, uint32_t &Out) {
Matt Arsenaultb7f87c02019-06-20 16:01:09 +000082 // This is only used for packed vectors, where ussing 0 for undef should
83 // always be good.
84 if (N.isUndef()) {
85 Out = 0;
86 return true;
87 }
88
Matt Arsenaulte24b34e2019-06-19 23:37:43 +000089 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
90 Out = C->getAPIntValue().getSExtValue();
91 return true;
92 }
93
94 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N)) {
95 Out = C->getValueAPF().bitcastToAPInt().getSExtValue();
96 return true;
97 }
98
99 return false;
100}
101
102// TODO: Handle undef as zero
103static SDNode *packConstantV2I16(const SDNode *N, SelectionDAG &DAG,
104 bool Negate = false) {
105 assert(N->getOpcode() == ISD::BUILD_VECTOR && N->getNumOperands() == 2);
106 uint32_t LHSVal, RHSVal;
107 if (getConstantValue(N->getOperand(0), LHSVal) &&
108 getConstantValue(N->getOperand(1), RHSVal)) {
109 SDLoc SL(N);
110 uint32_t K = Negate ?
111 (-LHSVal & 0xffff) | (-RHSVal << 16) :
112 (LHSVal & 0xffff) | (RHSVal << 16);
113 return DAG.getMachineNode(AMDGPU::S_MOV_B32, SL, N->getValueType(0),
114 DAG.getTargetConstant(K, SL, MVT::i32));
115 }
116
117 return nullptr;
118}
119
120static SDNode *packNegConstantV2I16(const SDNode *N, SelectionDAG &DAG) {
121 return packConstantV2I16(N, DAG, true);
122}
123
Tom Stellard75aadc22012-12-11 21:25:42 +0000124/// AMDGPU specific code to select AMDGPU machine instructions for
125/// SelectionDAG operations.
126class AMDGPUDAGToDAGISel : public SelectionDAGISel {
127 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
128 // make the right decision when generating code for different targets.
Tom Stellard5bfbae52018-07-11 20:59:01 +0000129 const GCNSubtarget *Subtarget;
Matt Arsenaultcc852232017-10-10 20:22:07 +0000130 bool EnableLateStructurizeCFG;
NAKAMURA Takumia9cb5382015-09-22 11:14:39 +0000131
Tom Stellard75aadc22012-12-11 21:25:42 +0000132public:
Matt Arsenault7016f132017-08-03 22:30:46 +0000133 explicit AMDGPUDAGToDAGISel(TargetMachine *TM = nullptr,
134 CodeGenOpt::Level OptLevel = CodeGenOpt::Default)
135 : SelectionDAGISel(*TM, OptLevel) {
Matt Arsenaultcc852232017-10-10 20:22:07 +0000136 EnableLateStructurizeCFG = AMDGPUTargetMachine::EnableLateStructurizeCFG;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000137 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000138 ~AMDGPUDAGToDAGISel() override = default;
Konstantin Zhuravlyov60a83732016-10-03 18:47:26 +0000139
Matt Arsenault7016f132017-08-03 22:30:46 +0000140 void getAnalysisUsage(AnalysisUsage &AU) const override {
141 AU.addRequired<AMDGPUArgumentUsageInfo>();
Nicolai Haehnle35617ed2018-08-30 14:21:36 +0000142 AU.addRequired<LegacyDivergenceAnalysis>();
Alexander Timofeev66ac6b42019-07-02 18:16:42 +0000143#ifdef EXPENSIVE_CHECKS
Alexander Timofeev2ce560f2019-07-02 17:59:44 +0000144 AU.addRequired<DominatorTreeWrapperPass>();
145 AU.addRequired<LoopInfoWrapperPass>();
146#endif
Matt Arsenault7016f132017-08-03 22:30:46 +0000147 SelectionDAGISel::getAnalysisUsage(AU);
148 }
149
Matt Arsenaulte8c03a22019-03-08 20:58:11 +0000150 bool matchLoadD16FromBuildVector(SDNode *N) const;
151
Eric Christopher7792e322015-01-30 23:24:40 +0000152 bool runOnMachineFunction(MachineFunction &MF) override;
Matt Arsenaulte8c03a22019-03-08 20:58:11 +0000153 void PreprocessISelDAG() override;
Justin Bogner95927c02016-05-12 21:03:32 +0000154 void Select(SDNode *N) override;
Mehdi Amini117296c2016-10-01 02:56:57 +0000155 StringRef getPassName() const override;
Craig Topper5656db42014-04-29 07:57:24 +0000156 void PostprocessISelDAG() override;
Tom Stellard75aadc22012-12-11 21:25:42 +0000157
Tom Stellard20287692017-08-08 04:57:55 +0000158protected:
159 void SelectBuildVector(SDNode *N, unsigned RegClassID);
160
Tom Stellard75aadc22012-12-11 21:25:42 +0000161private:
Matt Arsenault156d3ae2017-05-17 21:02:58 +0000162 std::pair<SDValue, SDValue> foldFrameIndex(SDValue N) const;
Matt Arsenaultf84e5d92017-01-31 03:07:46 +0000163 bool isNoNanSrc(SDValue N) const;
Matt Arsenaulte24b34e2019-06-19 23:37:43 +0000164 bool isInlineImmediate(const SDNode *N, bool Negated = false) const;
165 bool isNegInlineImmediate(const SDNode *N) const {
166 return isInlineImmediate(N, true);
167 }
168
Alexander Timofeevdb7ee762018-09-11 11:56:50 +0000169 bool isVGPRImm(const SDNode *N) const;
Alexander Timofeev4d302f62018-09-13 09:06:56 +0000170 bool isUniformLoad(const SDNode *N) const;
Tom Stellardbc4497b2016-02-12 23:45:29 +0000171 bool isUniformBr(const SDNode *N) const;
172
Tim Renouff1c7b922018-08-02 22:53:57 +0000173 MachineSDNode *buildSMovImm64(SDLoc &DL, uint64_t Val, EVT VT) const;
174
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000175 SDNode *glueCopyToM0LDSInit(SDNode *N) const;
176 SDNode *glueCopyToM0(SDNode *N, SDValue Val) const;
Tom Stellard381a94a2015-05-12 15:00:49 +0000177
Tom Stellarddf94dc32013-08-14 23:24:24 +0000178 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
Tom Stellard20287692017-08-08 04:57:55 +0000179 virtual bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
180 virtual bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000181 bool isDSOffsetLegal(SDValue Base, unsigned Offset,
Tom Stellard85e8b6d2014-08-22 18:49:33 +0000182 unsigned OffsetBits) const;
183 bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
Tom Stellardf3fc5552014-08-22 18:49:35 +0000184 bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
185 SDValue &Offset1) const;
Changpeng Fangb41574a2015-12-22 20:55:23 +0000186 bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
Tom Stellard155bbb72014-08-11 22:18:17 +0000187 SDValue &SOffset, SDValue &Offset, SDValue &Offen,
188 SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000189 SDValue &TFE, SDValue &DLC) const;
Tom Stellard155bbb72014-08-11 22:18:17 +0000190 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
Tom Stellard1f9939f2015-02-27 14:59:41 +0000191 SDValue &SOffset, SDValue &Offset, SDValue &GLC,
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000192 SDValue &SLC, SDValue &TFE, SDValue &DLC) const;
Tom Stellard7980fc82014-09-25 18:30:26 +0000193 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
Tom Stellardc53861a2015-02-11 00:34:32 +0000194 SDValue &VAddr, SDValue &SOffset, SDValue &Offset,
Tom Stellard7980fc82014-09-25 18:30:26 +0000195 SDValue &SLC) const;
Matt Arsenaultb81495d2017-09-20 05:01:53 +0000196 bool SelectMUBUFScratchOffen(SDNode *Parent,
Matt Arsenault156d3ae2017-05-17 21:02:58 +0000197 SDValue Addr, SDValue &RSrc, SDValue &VAddr,
Matt Arsenault0774ea22017-04-24 19:40:59 +0000198 SDValue &SOffset, SDValue &ImmOffset) const;
Matt Arsenaultb81495d2017-09-20 05:01:53 +0000199 bool SelectMUBUFScratchOffset(SDNode *Parent,
Matt Arsenault156d3ae2017-05-17 21:02:58 +0000200 SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
Matt Arsenault0774ea22017-04-24 19:40:59 +0000201 SDValue &Offset) const;
202
Tom Stellard155bbb72014-08-11 22:18:17 +0000203 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
204 SDValue &Offset, SDValue &GLC, SDValue &SLC,
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000205 SDValue &TFE, SDValue &DLC) const;
Tom Stellard7980fc82014-09-25 18:30:26 +0000206 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
Matt Arsenault88701812016-06-09 23:42:48 +0000207 SDValue &Offset, SDValue &SLC) const;
Jan Vesely43b7b5b2016-04-07 19:23:11 +0000208 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
209 SDValue &Offset) const;
Matt Arsenault7757c592016-06-09 23:42:54 +0000210
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000211 bool SelectFlatAtomic(SDNode *N, SDValue Addr, SDValue &VAddr,
Matt Arsenaultdb7c6a82017-06-12 16:53:51 +0000212 SDValue &Offset, SDValue &SLC) const;
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000213 bool SelectFlatAtomicSigned(SDNode *N, SDValue Addr, SDValue &VAddr,
Matt Arsenault4e309b02017-07-29 01:03:53 +0000214 SDValue &Offset, SDValue &SLC) const;
215
216 template <bool IsSigned>
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000217 bool SelectFlatOffset(SDNode *N, SDValue Addr, SDValue &VAddr,
Matt Arsenaultdb7c6a82017-06-12 16:53:51 +0000218 SDValue &Offset, SDValue &SLC) const;
Matt Arsenault7757c592016-06-09 23:42:54 +0000219
Tom Stellarddee26a22015-08-06 19:28:30 +0000220 bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset,
221 bool &Imm) const;
Matt Arsenault923712b2018-02-09 16:57:57 +0000222 SDValue Expand32BitAddress(SDValue Addr) const;
Tom Stellarddee26a22015-08-06 19:28:30 +0000223 bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset,
224 bool &Imm) const;
225 bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
Marek Olsak8973a0a2017-05-24 14:53:50 +0000226 bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
Tom Stellarddee26a22015-08-06 19:28:30 +0000227 bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
228 bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const;
Marek Olsak8973a0a2017-05-24 14:53:50 +0000229 bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const;
Nicolai Haehnle7968c342016-07-12 08:12:16 +0000230 bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const;
Matt Arsenaultf84e5d92017-01-31 03:07:46 +0000231
232 bool SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, SDValue &SrcMods) const;
Jay Foad7816ad92019-07-12 15:02:59 +0000233 bool SelectVOP3Mods_f32(SDValue In, SDValue &Src, SDValue &SrcMods) const;
Matt Arsenaultd7e23032017-09-07 18:05:07 +0000234 bool SelectVOP3ModsImpl(SDValue In, SDValue &Src, unsigned &SrcMods) const;
Tom Stellardb4a313a2014-08-01 00:32:39 +0000235 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
Matt Arsenaultdf58e822017-04-25 21:17:38 +0000236 bool SelectVOP3NoMods(SDValue In, SDValue &Src) const;
Tom Stellardb4a313a2014-08-01 00:32:39 +0000237 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
238 SDValue &Clamp, SDValue &Omod) const;
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000239 bool SelectVOP3NoMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
240 SDValue &Clamp, SDValue &Omod) const;
Tom Stellard75aadc22012-12-11 21:25:42 +0000241
Matt Arsenault4831ce52015-01-06 23:00:37 +0000242 bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods,
243 SDValue &Clamp,
244 SDValue &Omod) const;
Matt Arsenault1cffa4c2014-11-13 19:49:04 +0000245
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +0000246 bool SelectVOP3OMods(SDValue In, SDValue &Src,
247 SDValue &Clamp, SDValue &Omod) const;
248
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000249 bool SelectVOP3PMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
250 bool SelectVOP3PMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
251 SDValue &Clamp) const;
252
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +0000253 bool SelectVOP3OpSel(SDValue In, SDValue &Src, SDValue &SrcMods) const;
254 bool SelectVOP3OpSel0(SDValue In, SDValue &Src, SDValue &SrcMods,
255 SDValue &Clamp) const;
256
257 bool SelectVOP3OpSelMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
258 bool SelectVOP3OpSelMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
259 SDValue &Clamp) const;
Matt Arsenaultd7e23032017-09-07 18:05:07 +0000260 bool SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, unsigned &Mods) const;
Matt Arsenault76935122017-09-20 20:28:39 +0000261 bool SelectVOP3PMadMixMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +0000262
Matt Arsenaulte8c03a22019-03-08 20:58:11 +0000263 SDValue getHi16Elt(SDValue In) const;
Matt Arsenaulte1cd4822017-11-13 00:22:09 +0000264
Justin Bogner95927c02016-05-12 21:03:32 +0000265 void SelectADD_SUB_I64(SDNode *N);
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +0000266 void SelectAddcSubb(SDNode *N);
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +0000267 void SelectUADDO_USUBO(SDNode *N);
Justin Bogner95927c02016-05-12 21:03:32 +0000268 void SelectDIV_SCALE(SDNode *N);
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +0000269 void SelectDIV_FMAS(SDNode *N);
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000270 void SelectMAD_64_32(SDNode *N);
Tom Stellard8485fa02016-12-07 02:42:15 +0000271 void SelectFMA_W_CHAIN(SDNode *N);
272 void SelectFMUL_W_CHAIN(SDNode *N);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000273
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000274 SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val,
Marek Olsak9b728682015-03-24 13:40:27 +0000275 uint32_t Offset, uint32_t Width);
Justin Bogner95927c02016-05-12 21:03:32 +0000276 void SelectS_BFEFromShifts(SDNode *N);
277 void SelectS_BFE(SDNode *N);
Matt Arsenault7b1dc2c2016-09-17 02:02:19 +0000278 bool isCBranchSCC(const SDNode *N) const;
Justin Bogner95927c02016-05-12 21:03:32 +0000279 void SelectBRCOND(SDNode *N);
Matt Arsenault0084adc2018-04-30 19:08:16 +0000280 void SelectFMAD_FMA(SDNode *N);
Matt Arsenault88701812016-06-09 23:42:48 +0000281 void SelectATOMIC_CMP_SWAP(SDNode *N);
Matt Arsenaultd3c84e62019-06-14 13:26:32 +0000282 void SelectDSAppendConsume(SDNode *N, unsigned IntrID);
Matt Arsenault4d55d022019-06-19 19:55:27 +0000283 void SelectDS_GWS(SDNode *N, unsigned IntrID);
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000284 void SelectINTRINSIC_W_CHAIN(SDNode *N);
Matt Arsenault4d55d022019-06-19 19:55:27 +0000285 void SelectINTRINSIC_VOID(SDNode *N);
Marek Olsak9b728682015-03-24 13:40:27 +0000286
Tom Stellard20287692017-08-08 04:57:55 +0000287protected:
Tom Stellard75aadc22012-12-11 21:25:42 +0000288 // Include the pieces autogenerated from the target description.
289#include "AMDGPUGenDAGISel.inc"
290};
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000291
Tom Stellard20287692017-08-08 04:57:55 +0000292class R600DAGToDAGISel : public AMDGPUDAGToDAGISel {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000293 const R600Subtarget *Subtarget;
Tom Stellardc5a154d2018-06-28 23:47:12 +0000294
295 bool isConstantLoad(const MemSDNode *N, int cbID) const;
296 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
297 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
298 SDValue& Offset);
Tom Stellard20287692017-08-08 04:57:55 +0000299public:
300 explicit R600DAGToDAGISel(TargetMachine *TM, CodeGenOpt::Level OptLevel) :
Matt Arsenault0da63502018-08-31 05:49:54 +0000301 AMDGPUDAGToDAGISel(TM, OptLevel) {}
Tom Stellard20287692017-08-08 04:57:55 +0000302
303 void Select(SDNode *N) override;
304
305 bool SelectADDRIndirect(SDValue Addr, SDValue &Base,
306 SDValue &Offset) override;
307 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
308 SDValue &Offset) override;
Tom Stellardc5a154d2018-06-28 23:47:12 +0000309
310 bool runOnMachineFunction(MachineFunction &MF) override;
Matt Arsenaulte8c03a22019-03-08 20:58:11 +0000311
312 void PreprocessISelDAG() override {}
313
Tom Stellardc5a154d2018-06-28 23:47:12 +0000314protected:
315 // Include the pieces autogenerated from the target description.
316#include "R600GenDAGISel.inc"
Tom Stellard20287692017-08-08 04:57:55 +0000317};
318
Matt Arsenaulte8c03a22019-03-08 20:58:11 +0000319static SDValue stripBitcast(SDValue Val) {
320 return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
321}
322
323// Figure out if this is really an extract of the high 16-bits of a dword.
324static bool isExtractHiElt(SDValue In, SDValue &Out) {
325 In = stripBitcast(In);
326 if (In.getOpcode() != ISD::TRUNCATE)
327 return false;
328
329 SDValue Srl = In.getOperand(0);
330 if (Srl.getOpcode() == ISD::SRL) {
331 if (ConstantSDNode *ShiftAmt = dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
332 if (ShiftAmt->getZExtValue() == 16) {
333 Out = stripBitcast(Srl.getOperand(0));
334 return true;
335 }
336 }
337 }
338
339 return false;
340}
341
342// Look through operations that obscure just looking at the low 16-bits of the
343// same register.
344static SDValue stripExtractLoElt(SDValue In) {
345 if (In.getOpcode() == ISD::TRUNCATE) {
346 SDValue Src = In.getOperand(0);
347 if (Src.getValueType().getSizeInBits() == 32)
348 return stripBitcast(Src);
349 }
350
351 return In;
352}
353
Tom Stellard75aadc22012-12-11 21:25:42 +0000354} // end anonymous namespace
355
Fangrui Song3d76d362018-10-03 03:38:22 +0000356INITIALIZE_PASS_BEGIN(AMDGPUDAGToDAGISel, "amdgpu-isel",
Matt Arsenault7016f132017-08-03 22:30:46 +0000357 "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
358INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo)
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000359INITIALIZE_PASS_DEPENDENCY(AMDGPUPerfHintAnalysis)
Nicolai Haehnle35617ed2018-08-30 14:21:36 +0000360INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
Alexander Timofeev2ce560f2019-07-02 17:59:44 +0000361#ifdef EXPENSIVE_CHECKS
362INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
363INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
364#endif
Fangrui Song3d76d362018-10-03 03:38:22 +0000365INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "amdgpu-isel",
Matt Arsenault7016f132017-08-03 22:30:46 +0000366 "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
367
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000368/// This pass converts a legalized DAG into a AMDGPU-specific
Tom Stellard75aadc22012-12-11 21:25:42 +0000369// DAG, ready for instruction scheduling.
Matt Arsenault7016f132017-08-03 22:30:46 +0000370FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM,
Konstantin Zhuravlyov60a83732016-10-03 18:47:26 +0000371 CodeGenOpt::Level OptLevel) {
372 return new AMDGPUDAGToDAGISel(TM, OptLevel);
Tom Stellard75aadc22012-12-11 21:25:42 +0000373}
374
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000375/// This pass converts a legalized DAG into a R600-specific
Tom Stellard20287692017-08-08 04:57:55 +0000376// DAG, ready for instruction scheduling.
377FunctionPass *llvm::createR600ISelDag(TargetMachine *TM,
378 CodeGenOpt::Level OptLevel) {
379 return new R600DAGToDAGISel(TM, OptLevel);
380}
381
Eric Christopher7792e322015-01-30 23:24:40 +0000382bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
Alexander Timofeev2ce560f2019-07-02 17:59:44 +0000383#ifdef EXPENSIVE_CHECKS
384 DominatorTree & DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
385 LoopInfo * LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
386 for (auto &L : LI->getLoopsInPreorder()) {
387 assert(L->isLCSSAForm(DT));
388 }
389#endif
Tom Stellard5bfbae52018-07-11 20:59:01 +0000390 Subtarget = &MF.getSubtarget<GCNSubtarget>();
Eric Christopher7792e322015-01-30 23:24:40 +0000391 return SelectionDAGISel::runOnMachineFunction(MF);
Tom Stellard75aadc22012-12-11 21:25:42 +0000392}
393
Matt Arsenaulte8c03a22019-03-08 20:58:11 +0000394bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const {
395 assert(Subtarget->d16PreservesUnusedBits());
396 MVT VT = N->getValueType(0).getSimpleVT();
397 if (VT != MVT::v2i16 && VT != MVT::v2f16)
398 return false;
399
400 SDValue Lo = N->getOperand(0);
401 SDValue Hi = N->getOperand(1);
402
403 LoadSDNode *LdHi = dyn_cast<LoadSDNode>(stripBitcast(Hi));
404
405 // build_vector lo, (load ptr) -> load_d16_hi ptr, lo
406 // build_vector lo, (zextload ptr from i8) -> load_d16_hi_u8 ptr, lo
407 // build_vector lo, (sextload ptr from i8) -> load_d16_hi_i8 ptr, lo
408
409 // Need to check for possible indirect dependencies on the other half of the
410 // vector to avoid introducing a cycle.
411 if (LdHi && Hi.hasOneUse() && !LdHi->isPredecessorOf(Lo.getNode())) {
412 SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
413
414 SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo);
415 SDValue Ops[] = {
416 LdHi->getChain(), LdHi->getBasePtr(), TiedIn
417 };
418
419 unsigned LoadOp = AMDGPUISD::LOAD_D16_HI;
420 if (LdHi->getMemoryVT() == MVT::i8) {
421 LoadOp = LdHi->getExtensionType() == ISD::SEXTLOAD ?
422 AMDGPUISD::LOAD_D16_HI_I8 : AMDGPUISD::LOAD_D16_HI_U8;
423 } else {
424 assert(LdHi->getMemoryVT() == MVT::i16);
425 }
426
427 SDValue NewLoadHi =
428 CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdHi), VTList,
429 Ops, LdHi->getMemoryVT(),
430 LdHi->getMemOperand());
431
432 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadHi);
433 CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdHi, 1), NewLoadHi.getValue(1));
434 return true;
435 }
436
437 // build_vector (load ptr), hi -> load_d16_lo ptr, hi
438 // build_vector (zextload ptr from i8), hi -> load_d16_lo_u8 ptr, hi
439 // build_vector (sextload ptr from i8), hi -> load_d16_lo_i8 ptr, hi
440 LoadSDNode *LdLo = dyn_cast<LoadSDNode>(stripBitcast(Lo));
441 if (LdLo && Lo.hasOneUse()) {
442 SDValue TiedIn = getHi16Elt(Hi);
443 if (!TiedIn || LdLo->isPredecessorOf(TiedIn.getNode()))
444 return false;
445
446 SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
447 unsigned LoadOp = AMDGPUISD::LOAD_D16_LO;
448 if (LdLo->getMemoryVT() == MVT::i8) {
449 LoadOp = LdLo->getExtensionType() == ISD::SEXTLOAD ?
450 AMDGPUISD::LOAD_D16_LO_I8 : AMDGPUISD::LOAD_D16_LO_U8;
451 } else {
452 assert(LdLo->getMemoryVT() == MVT::i16);
453 }
454
455 TiedIn = CurDAG->getNode(ISD::BITCAST, SDLoc(N), VT, TiedIn);
456
457 SDValue Ops[] = {
458 LdLo->getChain(), LdLo->getBasePtr(), TiedIn
459 };
460
461 SDValue NewLoadLo =
462 CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdLo), VTList,
463 Ops, LdLo->getMemoryVT(),
464 LdLo->getMemOperand());
465
466 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadLo);
467 CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdLo, 1), NewLoadLo.getValue(1));
468 return true;
469 }
470
471 return false;
472}
473
474void AMDGPUDAGToDAGISel::PreprocessISelDAG() {
475 if (!Subtarget->d16PreservesUnusedBits())
476 return;
477
478 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
479
480 bool MadeChange = false;
481 while (Position != CurDAG->allnodes_begin()) {
482 SDNode *N = &*--Position;
483 if (N->use_empty())
484 continue;
485
486 switch (N->getOpcode()) {
487 case ISD::BUILD_VECTOR:
488 MadeChange |= matchLoadD16FromBuildVector(N);
489 break;
490 default:
491 break;
492 }
493 }
494
495 if (MadeChange) {
496 CurDAG->RemoveDeadNodes();
497 LLVM_DEBUG(dbgs() << "After PreProcess:\n";
498 CurDAG->dump(););
499 }
500}
501
Matt Arsenaultf84e5d92017-01-31 03:07:46 +0000502bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const {
503 if (TM.Options.NoNaNsFPMath)
504 return true;
505
506 // TODO: Move into isKnownNeverNaN
Amara Emersond28f0cd42017-05-01 15:17:51 +0000507 if (N->getFlags().isDefined())
508 return N->getFlags().hasNoNaNs();
Matt Arsenaultf84e5d92017-01-31 03:07:46 +0000509
510 return CurDAG->isKnownNeverNaN(N);
511}
512
Matt Arsenaulte24b34e2019-06-19 23:37:43 +0000513bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N,
514 bool Negated) const {
Matt Arsenaultb7f87c02019-06-20 16:01:09 +0000515 if (N->isUndef())
516 return true;
Matt Arsenaulte24b34e2019-06-19 23:37:43 +0000517
Tom Stellardc5a154d2018-06-28 23:47:12 +0000518 const SIInstrInfo *TII = Subtarget->getInstrInfo();
Matt Arsenaulte24b34e2019-06-19 23:37:43 +0000519 if (Negated) {
520 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
521 return TII->isInlineConstant(-C->getAPIntValue());
Matt Arsenaultfe267752016-07-28 00:32:02 +0000522
Matt Arsenaulte24b34e2019-06-19 23:37:43 +0000523 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
524 return TII->isInlineConstant(-C->getValueAPF().bitcastToAPInt());
Matt Arsenaultfe267752016-07-28 00:32:02 +0000525
Matt Arsenaulte24b34e2019-06-19 23:37:43 +0000526 } else {
527 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
528 return TII->isInlineConstant(C->getAPIntValue());
529
530 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
531 return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt());
532 }
Matt Arsenaultfe267752016-07-28 00:32:02 +0000533
534 return false;
Tom Stellard7ed0b522014-04-03 20:19:27 +0000535}
536
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000537/// Determine the register class for \p OpNo
Tom Stellarddf94dc32013-08-14 23:24:24 +0000538/// \returns The register class of the virtual register that will be used for
539/// the given operand number \OpNo or NULL if the register class cannot be
540/// determined.
541const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
542 unsigned OpNo) const {
Matt Arsenaultc507cdb2016-11-01 23:22:17 +0000543 if (!N->isMachineOpcode()) {
544 if (N->getOpcode() == ISD::CopyToReg) {
545 unsigned Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
546 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
547 MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
548 return MRI.getRegClass(Reg);
549 }
550
551 const SIRegisterInfo *TRI
Tom Stellard5bfbae52018-07-11 20:59:01 +0000552 = static_cast<const GCNSubtarget *>(Subtarget)->getRegisterInfo();
Matt Arsenaultc507cdb2016-11-01 23:22:17 +0000553 return TRI->getPhysRegClass(Reg);
554 }
555
Matt Arsenault209a7b92014-04-18 07:40:20 +0000556 return nullptr;
Matt Arsenaultc507cdb2016-11-01 23:22:17 +0000557 }
Matt Arsenault209a7b92014-04-18 07:40:20 +0000558
Tom Stellarddf94dc32013-08-14 23:24:24 +0000559 switch (N->getMachineOpcode()) {
560 default: {
Eric Christopherd9134482014-08-04 21:25:23 +0000561 const MCInstrDesc &Desc =
Eric Christopher7792e322015-01-30 23:24:40 +0000562 Subtarget->getInstrInfo()->get(N->getMachineOpcode());
Alexey Samsonov3186eb32013-08-15 07:11:34 +0000563 unsigned OpIdx = Desc.getNumDefs() + OpNo;
564 if (OpIdx >= Desc.getNumOperands())
Matt Arsenault209a7b92014-04-18 07:40:20 +0000565 return nullptr;
Alexey Samsonov3186eb32013-08-15 07:11:34 +0000566 int RegClass = Desc.OpInfo[OpIdx].RegClass;
Matt Arsenault209a7b92014-04-18 07:40:20 +0000567 if (RegClass == -1)
568 return nullptr;
569
Eric Christopher7792e322015-01-30 23:24:40 +0000570 return Subtarget->getRegisterInfo()->getRegClass(RegClass);
Tom Stellarddf94dc32013-08-14 23:24:24 +0000571 }
572 case AMDGPU::REG_SEQUENCE: {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000573 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Eric Christopherd9134482014-08-04 21:25:23 +0000574 const TargetRegisterClass *SuperRC =
Eric Christopher7792e322015-01-30 23:24:40 +0000575 Subtarget->getRegisterInfo()->getRegClass(RCID);
Matt Arsenault209a7b92014-04-18 07:40:20 +0000576
577 SDValue SubRegOp = N->getOperand(OpNo + 1);
578 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
Eric Christopher7792e322015-01-30 23:24:40 +0000579 return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
580 SubRegIdx);
Tom Stellarddf94dc32013-08-14 23:24:24 +0000581 }
582 }
583}
584
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000585SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N, SDValue Val) const {
Tom Stellard381a94a2015-05-12 15:00:49 +0000586 const SITargetLowering& Lowering =
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000587 *static_cast<const SITargetLowering*>(getTargetLowering());
Tom Stellard381a94a2015-05-12 15:00:49 +0000588
Matt Arsenault5a86dbc2019-06-14 13:33:36 +0000589 assert(N->getOperand(0).getValueType() == MVT::Other && "Expected chain");
590
591 SDValue M0 = Lowering.copyToM0(*CurDAG, N->getOperand(0), SDLoc(N),
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000592 Val);
Tom Stellard381a94a2015-05-12 15:00:49 +0000593
594 SDValue Glue = M0.getValue(1);
595
596 SmallVector <SDValue, 8> Ops;
Matt Arsenault5a86dbc2019-06-14 13:33:36 +0000597 Ops.push_back(M0); // Replace the chain.
598 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000599 Ops.push_back(N->getOperand(i));
600
Tom Stellard381a94a2015-05-12 15:00:49 +0000601 Ops.push_back(Glue);
Matt Arsenaulte6667de2017-12-04 22:18:22 +0000602 return CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
Tom Stellard381a94a2015-05-12 15:00:49 +0000603}
604
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000605SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(SDNode *N) const {
Nicolai Haehnle4dc3b2b2019-07-01 17:17:45 +0000606 unsigned AS = cast<MemSDNode>(N)->getAddressSpace();
607 if (AS == AMDGPUAS::LOCAL_ADDRESS) {
608 if (Subtarget->ldsRequiresM0Init())
609 return glueCopyToM0(N, CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
610 } else if (AS == AMDGPUAS::REGION_ADDRESS) {
611 MachineFunction &MF = CurDAG->getMachineFunction();
612 unsigned Value = MF.getInfo<SIMachineFunctionInfo>()->getGDSSize();
613 return
614 glueCopyToM0(N, CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i32));
615 }
616 return N;
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000617}
618
Tim Renouff1c7b922018-08-02 22:53:57 +0000619MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm,
620 EVT VT) const {
621 SDNode *Lo = CurDAG->getMachineNode(
622 AMDGPU::S_MOV_B32, DL, MVT::i32,
623 CurDAG->getConstant(Imm & 0xFFFFFFFF, DL, MVT::i32));
624 SDNode *Hi =
625 CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
626 CurDAG->getConstant(Imm >> 32, DL, MVT::i32));
627 const SDValue Ops[] = {
628 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
629 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
630 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)};
631
632 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops);
633}
634
Matt Arsenault61cb6fa2015-11-11 00:01:36 +0000635static unsigned selectSGPRVectorRegClassID(unsigned NumVectorElts) {
Matt Arsenaultf1aebbf2015-11-02 23:30:48 +0000636 switch (NumVectorElts) {
637 case 1:
Marek Olsak79c05872016-11-25 17:37:09 +0000638 return AMDGPU::SReg_32_XM0RegClassID;
Matt Arsenaultf1aebbf2015-11-02 23:30:48 +0000639 case 2:
640 return AMDGPU::SReg_64RegClassID;
Tim Renouf361b5b22019-03-21 12:01:21 +0000641 case 3:
642 return AMDGPU::SGPR_96RegClassID;
Matt Arsenaultf1aebbf2015-11-02 23:30:48 +0000643 case 4:
644 return AMDGPU::SReg_128RegClassID;
Tim Renouf033f99a2019-03-22 10:11:21 +0000645 case 5:
646 return AMDGPU::SGPR_160RegClassID;
Matt Arsenaultf1aebbf2015-11-02 23:30:48 +0000647 case 8:
648 return AMDGPU::SReg_256RegClassID;
649 case 16:
650 return AMDGPU::SReg_512RegClassID;
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000651 case 32:
652 return AMDGPU::SReg_1024RegClassID;
Matt Arsenaultf1aebbf2015-11-02 23:30:48 +0000653 }
654
655 llvm_unreachable("invalid vector size");
656}
657
Tom Stellard20287692017-08-08 04:57:55 +0000658void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
Tom Stellard20287692017-08-08 04:57:55 +0000659 EVT VT = N->getValueType(0);
660 unsigned NumVectorElts = VT.getVectorNumElements();
661 EVT EltVT = VT.getVectorElementType();
Tom Stellard20287692017-08-08 04:57:55 +0000662 SDLoc DL(N);
663 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
664
665 if (NumVectorElts == 1) {
666 CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
667 RegClass);
668 return;
669 }
670
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000671 assert(NumVectorElts <= 32 && "Vectors with more than 32 elements not "
Tom Stellard20287692017-08-08 04:57:55 +0000672 "supported yet");
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000673 // 32 = Max Num Vector Elements
Tom Stellard20287692017-08-08 04:57:55 +0000674 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
675 // 1 = Vector Register Class
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000676 SmallVector<SDValue, 32 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
Tom Stellard20287692017-08-08 04:57:55 +0000677
678 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
679 bool IsRegSeq = true;
680 unsigned NOps = N->getNumOperands();
681 for (unsigned i = 0; i < NOps; i++) {
682 // XXX: Why is this here?
683 if (isa<RegisterSDNode>(N->getOperand(i))) {
684 IsRegSeq = false;
685 break;
686 }
Simon Pilgrimede0e402018-05-19 12:46:02 +0000687 unsigned Sub = AMDGPURegisterInfo::getSubRegFromChannel(i);
Tom Stellard20287692017-08-08 04:57:55 +0000688 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
Simon Pilgrimede0e402018-05-19 12:46:02 +0000689 RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32);
Tom Stellard20287692017-08-08 04:57:55 +0000690 }
691 if (NOps != NumVectorElts) {
692 // Fill in the missing undef elements if this was a scalar_to_vector.
Tom Stellard03aa3ae2017-08-08 05:52:00 +0000693 assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
Tom Stellard20287692017-08-08 04:57:55 +0000694 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
695 DL, EltVT);
696 for (unsigned i = NOps; i < NumVectorElts; ++i) {
Simon Pilgrimede0e402018-05-19 12:46:02 +0000697 unsigned Sub = AMDGPURegisterInfo::getSubRegFromChannel(i);
Tom Stellard20287692017-08-08 04:57:55 +0000698 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
699 RegSeqArgs[1 + (2 * i) + 1] =
Simon Pilgrimede0e402018-05-19 12:46:02 +0000700 CurDAG->getTargetConstant(Sub, DL, MVT::i32);
Tom Stellard20287692017-08-08 04:57:55 +0000701 }
702 }
703
704 if (!IsRegSeq)
705 SelectCode(N);
706 CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
707}
708
Justin Bogner95927c02016-05-12 21:03:32 +0000709void AMDGPUDAGToDAGISel::Select(SDNode *N) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000710 unsigned int Opc = N->getOpcode();
711 if (N->isMachineOpcode()) {
Tim Northover31d093c2013-09-22 08:21:56 +0000712 N->setNodeId(-1);
Justin Bogner95927c02016-05-12 21:03:32 +0000713 return; // Already selected.
Tom Stellard75aadc22012-12-11 21:25:42 +0000714 }
Matt Arsenault78b86702014-04-18 05:19:26 +0000715
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000716 if (isa<AtomicSDNode>(N) ||
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000717 (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC ||
Matt Arsenaulta5840c32019-01-22 18:36:06 +0000718 Opc == ISD::ATOMIC_LOAD_FADD ||
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000719 Opc == AMDGPUISD::ATOMIC_LOAD_FMIN ||
720 Opc == AMDGPUISD::ATOMIC_LOAD_FMAX))
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000721 N = glueCopyToM0LDSInit(N);
Tom Stellard381a94a2015-05-12 15:00:49 +0000722
Tom Stellard75aadc22012-12-11 21:25:42 +0000723 switch (Opc) {
Matt Arsenault84445dd2017-11-30 22:51:26 +0000724 default:
725 break;
Tom Stellard1f15bff2014-02-25 21:36:18 +0000726 // We are selecting i64 ADD here instead of custom lower it during
727 // DAG legalization, so we can fold some i64 ADDs used for address
728 // calculation into the LOAD and STORE instructions.
Nicolai Haehnle67624af2016-10-14 10:30:00 +0000729 case ISD::ADDC:
730 case ISD::ADDE:
Nicolai Haehnle67624af2016-10-14 10:30:00 +0000731 case ISD::SUBC:
732 case ISD::SUBE: {
Tom Stellard20287692017-08-08 04:57:55 +0000733 if (N->getValueType(0) != MVT::i64)
Tom Stellard1f15bff2014-02-25 21:36:18 +0000734 break;
735
Justin Bogner95927c02016-05-12 21:03:32 +0000736 SelectADD_SUB_I64(N);
737 return;
Tom Stellard1f15bff2014-02-25 21:36:18 +0000738 }
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +0000739 case ISD::ADDCARRY:
740 case ISD::SUBCARRY:
741 if (N->getValueType(0) != MVT::i32)
742 break;
743
744 SelectAddcSubb(N);
745 return;
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +0000746 case ISD::UADDO:
747 case ISD::USUBO: {
748 SelectUADDO_USUBO(N);
749 return;
750 }
Tom Stellard8485fa02016-12-07 02:42:15 +0000751 case AMDGPUISD::FMUL_W_CHAIN: {
752 SelectFMUL_W_CHAIN(N);
753 return;
754 }
755 case AMDGPUISD::FMA_W_CHAIN: {
756 SelectFMA_W_CHAIN(N);
757 return;
758 }
759
Matt Arsenault064c2062014-06-11 17:40:32 +0000760 case ISD::SCALAR_TO_VECTOR:
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000761 case ISD::BUILD_VECTOR: {
Tom Stellard8e5da412013-08-14 23:24:32 +0000762 EVT VT = N->getValueType(0);
763 unsigned NumVectorElts = VT.getVectorNumElements();
Matt Arsenault5a4ec812018-06-20 19:45:48 +0000764 if (VT.getScalarSizeInBits() == 16) {
765 if (Opc == ISD::BUILD_VECTOR && NumVectorElts == 2) {
Matt Arsenaulte24b34e2019-06-19 23:37:43 +0000766 if (SDNode *Packed = packConstantV2I16(N, *CurDAG)) {
767 ReplaceNode(N, Packed);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000768 return;
769 }
770 }
771
772 break;
773 }
774
Tom Stellard03aa3ae2017-08-08 05:52:00 +0000775 assert(VT.getVectorElementType().bitsEq(MVT::i32));
Tom Stellard20287692017-08-08 04:57:55 +0000776 unsigned RegClassID = selectSGPRVectorRegClassID(NumVectorElts);
777 SelectBuildVector(N, RegClassID);
Justin Bogner95927c02016-05-12 21:03:32 +0000778 return;
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000779 }
Tom Stellard754f80f2013-04-05 23:31:51 +0000780 case ISD::BUILD_PAIR: {
781 SDValue RC, SubReg0, SubReg1;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000782 SDLoc DL(N);
Tom Stellard754f80f2013-04-05 23:31:51 +0000783 if (N->getValueType(0) == MVT::i128) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000784 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32);
785 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
786 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
Tom Stellard754f80f2013-04-05 23:31:51 +0000787 } else if (N->getValueType(0) == MVT::i64) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000788 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
789 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
790 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
Tom Stellard754f80f2013-04-05 23:31:51 +0000791 } else {
792 llvm_unreachable("Unhandled value type for BUILD_PAIR");
793 }
794 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
795 N->getOperand(1), SubReg1 };
Justin Bogner95927c02016-05-12 21:03:32 +0000796 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
797 N->getValueType(0), Ops));
798 return;
Tom Stellard754f80f2013-04-05 23:31:51 +0000799 }
Tom Stellard7ed0b522014-04-03 20:19:27 +0000800
801 case ISD::Constant:
802 case ISD::ConstantFP: {
Tom Stellard20287692017-08-08 04:57:55 +0000803 if (N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
Tom Stellard7ed0b522014-04-03 20:19:27 +0000804 break;
805
806 uint64_t Imm;
807 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
808 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
809 else {
Tom Stellard3cbe0142014-04-07 19:31:13 +0000810 ConstantSDNode *C = cast<ConstantSDNode>(N);
Tom Stellard7ed0b522014-04-03 20:19:27 +0000811 Imm = C->getZExtValue();
812 }
813
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000814 SDLoc DL(N);
Tim Renouff1c7b922018-08-02 22:53:57 +0000815 ReplaceNode(N, buildSMovImm64(DL, Imm, N->getValueType(0)));
Justin Bogner95927c02016-05-12 21:03:32 +0000816 return;
Tom Stellard7ed0b522014-04-03 20:19:27 +0000817 }
Matt Arsenault4bf43d42015-09-25 17:27:08 +0000818 case ISD::LOAD:
Matt Arsenault3f8e7a32018-06-22 08:39:52 +0000819 case ISD::STORE:
820 case ISD::ATOMIC_LOAD:
821 case ISD::ATOMIC_STORE: {
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000822 N = glueCopyToM0LDSInit(N);
Tom Stellard096b8c12015-02-04 20:49:49 +0000823 break;
824 }
Matt Arsenault78b86702014-04-18 05:19:26 +0000825
826 case AMDGPUISD::BFE_I32:
827 case AMDGPUISD::BFE_U32: {
Matt Arsenault78b86702014-04-18 05:19:26 +0000828 // There is a scalar version available, but unlike the vector version which
829 // has a separate operand for the offset and width, the scalar version packs
830 // the width and offset into a single operand. Try to move to the scalar
831 // version if the offsets are constant, so that we can try to keep extended
832 // loads of kernel arguments in SGPRs.
833
834 // TODO: Technically we could try to pattern match scalar bitshifts of
835 // dynamic values, but it's probably not useful.
836 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
837 if (!Offset)
838 break;
839
840 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
841 if (!Width)
842 break;
843
844 bool Signed = Opc == AMDGPUISD::BFE_I32;
845
Matt Arsenault78b86702014-04-18 05:19:26 +0000846 uint32_t OffsetVal = Offset->getZExtValue();
847 uint32_t WidthVal = Width->getZExtValue();
848
Justin Bogner95927c02016-05-12 21:03:32 +0000849 ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
850 SDLoc(N), N->getOperand(0), OffsetVal, WidthVal));
851 return;
Matt Arsenault78b86702014-04-18 05:19:26 +0000852 }
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +0000853 case AMDGPUISD::DIV_SCALE: {
Justin Bogner95927c02016-05-12 21:03:32 +0000854 SelectDIV_SCALE(N);
855 return;
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +0000856 }
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +0000857 case AMDGPUISD::DIV_FMAS: {
858 SelectDIV_FMAS(N);
859 return;
860 }
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000861 case AMDGPUISD::MAD_I64_I32:
862 case AMDGPUISD::MAD_U64_U32: {
863 SelectMAD_64_32(N);
864 return;
865 }
Tom Stellard3457a842014-10-09 19:06:00 +0000866 case ISD::CopyToReg: {
867 const SITargetLowering& Lowering =
868 *static_cast<const SITargetLowering*>(getTargetLowering());
Matt Arsenault0d0d6c22017-04-12 21:58:23 +0000869 N = Lowering.legalizeTargetIndependentNode(N, *CurDAG);
Tom Stellard3457a842014-10-09 19:06:00 +0000870 break;
871 }
Marek Olsak9b728682015-03-24 13:40:27 +0000872 case ISD::AND:
873 case ISD::SRL:
874 case ISD::SRA:
Matt Arsenault7e8de012016-04-22 22:59:16 +0000875 case ISD::SIGN_EXTEND_INREG:
Tom Stellard20287692017-08-08 04:57:55 +0000876 if (N->getValueType(0) != MVT::i32)
Marek Olsak9b728682015-03-24 13:40:27 +0000877 break;
878
Justin Bogner95927c02016-05-12 21:03:32 +0000879 SelectS_BFE(N);
880 return;
Tom Stellardbc4497b2016-02-12 23:45:29 +0000881 case ISD::BRCOND:
Justin Bogner95927c02016-05-12 21:03:32 +0000882 SelectBRCOND(N);
883 return;
Matt Arsenaultd7e23032017-09-07 18:05:07 +0000884 case ISD::FMAD:
Matt Arsenault0084adc2018-04-30 19:08:16 +0000885 case ISD::FMA:
886 SelectFMAD_FMA(N);
Matt Arsenaultd7e23032017-09-07 18:05:07 +0000887 return;
Matt Arsenault88701812016-06-09 23:42:48 +0000888 case AMDGPUISD::ATOMIC_CMP_SWAP:
889 SelectATOMIC_CMP_SWAP(N);
890 return;
Matt Arsenault709374d2018-08-01 20:13:58 +0000891 case AMDGPUISD::CVT_PKRTZ_F16_F32:
892 case AMDGPUISD::CVT_PKNORM_I16_F32:
893 case AMDGPUISD::CVT_PKNORM_U16_F32:
894 case AMDGPUISD::CVT_PK_U16_U32:
895 case AMDGPUISD::CVT_PK_I16_I32: {
896 // Hack around using a legal type if f16 is illegal.
897 if (N->getValueType(0) == MVT::i32) {
898 MVT NewVT = Opc == AMDGPUISD::CVT_PKRTZ_F16_F32 ? MVT::v2f16 : MVT::v2i16;
899 N = CurDAG->MorphNodeTo(N, N->getOpcode(), CurDAG->getVTList(NewVT),
900 { N->getOperand(0), N->getOperand(1) });
901 SelectCode(N);
902 return;
903 }
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000904
905 break;
906 }
907 case ISD::INTRINSIC_W_CHAIN: {
908 SelectINTRINSIC_W_CHAIN(N);
909 return;
Matt Arsenault709374d2018-08-01 20:13:58 +0000910 }
Matt Arsenault4d55d022019-06-19 19:55:27 +0000911 case ISD::INTRINSIC_VOID: {
912 SelectINTRINSIC_VOID(N);
913 return;
914 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000915 }
Tom Stellard3457a842014-10-09 19:06:00 +0000916
Justin Bogner95927c02016-05-12 21:03:32 +0000917 SelectCode(N);
Tom Stellard365366f2013-01-23 02:09:06 +0000918}
919
Tom Stellardbc4497b2016-02-12 23:45:29 +0000920bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
921 const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
Nicolai Haehnle05b127d2016-04-14 17:42:35 +0000922 const Instruction *Term = BB->getTerminator();
923 return Term->getMetadata("amdgpu.uniform") ||
924 Term->getMetadata("structurizecfg.uniform");
Tom Stellardbc4497b2016-02-12 23:45:29 +0000925}
926
Mehdi Amini117296c2016-10-01 02:56:57 +0000927StringRef AMDGPUDAGToDAGISel::getPassName() const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000928 return "AMDGPU DAG->DAG Pattern Instruction Selection";
929}
930
Tom Stellard41fc7852013-07-23 01:48:42 +0000931//===----------------------------------------------------------------------===//
932// Complex Patterns
933//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000934
Tom Stellard75aadc22012-12-11 21:25:42 +0000935bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
Tom Stellard20287692017-08-08 04:57:55 +0000936 SDValue &Offset) {
937 return false;
Tom Stellard75aadc22012-12-11 21:25:42 +0000938}
939
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000940bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
941 SDValue &Offset) {
942 ConstantSDNode *C;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000943 SDLoc DL(Addr);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000944
945 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000946 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000947 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
Jan Vesely06200bd2017-01-06 21:00:46 +0000948 } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
949 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000950 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
Jan Vesely06200bd2017-01-06 21:00:46 +0000951 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000952 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
953 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
954 Base = Addr.getOperand(0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000955 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000956 } else {
957 Base = Addr;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000958 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000959 }
960
961 return true;
962}
Christian Konigd910b7d2013-02-26 17:52:16 +0000963
Matt Arsenault84445dd2017-11-30 22:51:26 +0000964// FIXME: Should only handle addcarry/subcarry
Justin Bogner95927c02016-05-12 21:03:32 +0000965void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000966 SDLoc DL(N);
967 SDValue LHS = N->getOperand(0);
968 SDValue RHS = N->getOperand(1);
969
Nicolai Haehnle67624af2016-10-14 10:30:00 +0000970 unsigned Opcode = N->getOpcode();
971 bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE);
972 bool ProduceCarry =
973 ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC;
Matt Arsenault84445dd2017-11-30 22:51:26 +0000974 bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE;
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000975
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000976 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
977 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000978
979 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
980 DL, MVT::i32, LHS, Sub0);
981 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
982 DL, MVT::i32, LHS, Sub1);
983
984 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
985 DL, MVT::i32, RHS, Sub0);
986 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
987 DL, MVT::i32, RHS, Sub1);
988
989 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000990
Tom Stellard80942a12014-09-05 14:07:59 +0000991 unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000992 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
993
Nicolai Haehnle67624af2016-10-14 10:30:00 +0000994 SDNode *AddLo;
995 if (!ConsumeCarry) {
996 SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
997 AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args);
998 } else {
999 SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) };
1000 AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args);
1001 }
1002 SDValue AddHiArgs[] = {
1003 SDValue(Hi0, 0),
1004 SDValue(Hi1, 0),
1005 SDValue(AddLo, 1)
1006 };
1007 SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs);
Matt Arsenault9fa3f932014-06-23 18:00:34 +00001008
Nicolai Haehnle67624af2016-10-14 10:30:00 +00001009 SDValue RegSequenceArgs[] = {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001010 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
Matt Arsenault9fa3f932014-06-23 18:00:34 +00001011 SDValue(AddLo,0),
1012 Sub0,
1013 SDValue(AddHi,0),
1014 Sub1,
1015 };
Nicolai Haehnle67624af2016-10-14 10:30:00 +00001016 SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1017 MVT::i64, RegSequenceArgs);
1018
1019 if (ProduceCarry) {
1020 // Replace the carry-use
Nirav Dave3264c1b2018-03-19 20:19:46 +00001021 ReplaceUses(SDValue(N, 1), SDValue(AddHi, 1));
Nicolai Haehnle67624af2016-10-14 10:30:00 +00001022 }
1023
1024 // Replace the remaining uses.
Nirav Dave3264c1b2018-03-19 20:19:46 +00001025 ReplaceNode(N, RegSequence);
Matt Arsenault9fa3f932014-06-23 18:00:34 +00001026}
1027
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00001028void AMDGPUDAGToDAGISel::SelectAddcSubb(SDNode *N) {
1029 SDLoc DL(N);
1030 SDValue LHS = N->getOperand(0);
1031 SDValue RHS = N->getOperand(1);
1032 SDValue CI = N->getOperand(2);
1033
1034 unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::V_ADDC_U32_e64
1035 : AMDGPU::V_SUBB_U32_e64;
1036 CurDAG->SelectNodeTo(
1037 N, Opc, N->getVTList(),
1038 {LHS, RHS, CI, CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
1039}
1040
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +00001041void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) {
1042 // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
1043 // carry out despite the _i32 name. These were renamed in VI to _U32.
1044 // FIXME: We should probably rename the opcodes here.
1045 unsigned Opc = N->getOpcode() == ISD::UADDO ?
1046 AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
1047
Michael Liaoeea51772019-03-20 20:18:56 +00001048 CurDAG->SelectNodeTo(
1049 N, Opc, N->getVTList(),
1050 {N->getOperand(0), N->getOperand(1),
1051 CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +00001052}
1053
Tom Stellard8485fa02016-12-07 02:42:15 +00001054void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) {
1055 SDLoc SL(N);
1056 // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod
1057 SDValue Ops[10];
1058
1059 SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]);
1060 SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1061 SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]);
1062 Ops[8] = N->getOperand(0);
1063 Ops[9] = N->getOperand(4);
1064
1065 CurDAG->SelectNodeTo(N, AMDGPU::V_FMA_F32, N->getVTList(), Ops);
1066}
1067
1068void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) {
1069 SDLoc SL(N);
NAKAMURA Takumi6f43bd42017-10-18 13:31:28 +00001070 // src0_modifiers, src0, src1_modifiers, src1, clamp, omod
Tom Stellard8485fa02016-12-07 02:42:15 +00001071 SDValue Ops[8];
1072
1073 SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]);
1074 SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1075 Ops[6] = N->getOperand(0);
1076 Ops[7] = N->getOperand(3);
1077
1078 CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops);
1079}
1080
Matt Arsenault044f1d12015-02-14 04:24:28 +00001081// We need to handle this here because tablegen doesn't support matching
1082// instructions with multiple outputs.
Justin Bogner95927c02016-05-12 21:03:32 +00001083void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +00001084 SDLoc SL(N);
1085 EVT VT = N->getValueType(0);
1086
1087 assert(VT == MVT::f32 || VT == MVT::f64);
1088
1089 unsigned Opc
1090 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
1091
Matt Arsenault3b99f122017-01-19 06:04:12 +00001092 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) };
1093 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +00001094}
1095
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00001096void AMDGPUDAGToDAGISel::SelectDIV_FMAS(SDNode *N) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00001097 const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
1098 const SIRegisterInfo *TRI = ST->getRegisterInfo();
1099
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00001100 SDLoc SL(N);
1101 EVT VT = N->getValueType(0);
1102
1103 assert(VT == MVT::f32 || VT == MVT::f64);
1104
1105 unsigned Opc
1106 = (VT == MVT::f64) ? AMDGPU::V_DIV_FMAS_F64 : AMDGPU::V_DIV_FMAS_F32;
1107
1108 SDValue CarryIn = N->getOperand(3);
1109 // V_DIV_FMAS implicitly reads VCC.
1110 SDValue VCC = CurDAG->getCopyToReg(CurDAG->getEntryNode(), SL,
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00001111 TRI->getVCC(), CarryIn, SDValue());
Stanislav Mekhanoshin8f3da702019-04-26 16:37:51 +00001112
1113 SDValue Ops[10];
1114
1115 SelectVOP3Mods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
1116 SelectVOP3Mods(N->getOperand(1), Ops[3], Ops[2]);
1117 SelectVOP3Mods(N->getOperand(2), Ops[5], Ops[4]);
1118
1119 Ops[8] = VCC;
1120 Ops[9] = VCC.getValue(1);
1121
1122 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1123}
1124
Matt Arsenault4f6318f2017-11-06 17:04:37 +00001125// We need to handle this here because tablegen doesn't support matching
1126// instructions with multiple outputs.
1127void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) {
1128 SDLoc SL(N);
1129 bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32;
1130 unsigned Opc = Signed ? AMDGPU::V_MAD_I64_I32 : AMDGPU::V_MAD_U64_U32;
1131
1132 SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
1133 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
1134 Clamp };
1135 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1136}
1137
Matt Arsenaultcdd191d2019-01-28 20:14:49 +00001138bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset,
Tom Stellard85e8b6d2014-08-22 18:49:33 +00001139 unsigned OffsetBits) const {
Tom Stellard85e8b6d2014-08-22 18:49:33 +00001140 if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
1141 (OffsetBits == 8 && !isUInt<8>(Offset)))
1142 return false;
1143
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +00001144 if (Subtarget->hasUsableDSOffset() ||
Matt Arsenault706f9302015-07-06 16:01:58 +00001145 Subtarget->unsafeDSOffsetFoldingEnabled())
Tom Stellard85e8b6d2014-08-22 18:49:33 +00001146 return true;
1147
1148 // On Southern Islands instruction with a negative base value and an offset
1149 // don't seem to work.
1150 return CurDAG->SignBitIsZero(Base);
1151}
1152
1153bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
1154 SDValue &Offset) const {
Tom Stellard92b24f32016-04-29 14:34:26 +00001155 SDLoc DL(Addr);
Tom Stellard85e8b6d2014-08-22 18:49:33 +00001156 if (CurDAG->isBaseWithConstantOffset(Addr)) {
1157 SDValue N0 = Addr.getOperand(0);
1158 SDValue N1 = Addr.getOperand(1);
1159 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1160 if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
1161 // (add n0, c0)
1162 Base = N0;
Tom Stellard92b24f32016-04-29 14:34:26 +00001163 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
Tom Stellard85e8b6d2014-08-22 18:49:33 +00001164 return true;
1165 }
Matt Arsenault966a94f2015-09-08 19:34:22 +00001166 } else if (Addr.getOpcode() == ISD::SUB) {
1167 // sub C, x -> add (sub 0, x), C
1168 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1169 int64_t ByteOffset = C->getSExtValue();
1170 if (isUInt<16>(ByteOffset)) {
Matt Arsenault966a94f2015-09-08 19:34:22 +00001171 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
Tom Stellard85e8b6d2014-08-22 18:49:33 +00001172
Matt Arsenault966a94f2015-09-08 19:34:22 +00001173 // XXX - This is kind of hacky. Create a dummy sub node so we can check
1174 // the known bits in isDSOffsetLegal. We need to emit the selected node
1175 // here, so this is thrown away.
1176 SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
1177 Zero, Addr.getOperand(1));
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001178
Matt Arsenault966a94f2015-09-08 19:34:22 +00001179 if (isDSOffsetLegal(Sub, ByteOffset, 16)) {
Tim Renoufcfdfba92019-03-18 19:35:44 +00001180 SmallVector<SDValue, 3> Opnds;
1181 Opnds.push_back(Zero);
1182 Opnds.push_back(Addr.getOperand(1));
Matt Arsenault84445dd2017-11-30 22:51:26 +00001183
Tim Renoufcfdfba92019-03-18 19:35:44 +00001184 // FIXME: Select to VOP3 version for with-carry.
1185 unsigned SubOp = AMDGPU::V_SUB_I32_e32;
1186 if (Subtarget->hasAddNoCarry()) {
1187 SubOp = AMDGPU::V_SUB_U32_e64;
Michael Liaoeea51772019-03-20 20:18:56 +00001188 Opnds.push_back(
1189 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
Tim Renoufcfdfba92019-03-18 19:35:44 +00001190 }
1191
1192 MachineSDNode *MachineSub =
1193 CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds);
Matt Arsenault966a94f2015-09-08 19:34:22 +00001194
1195 Base = SDValue(MachineSub, 0);
Tom Stellard26a2ab72016-06-10 00:01:04 +00001196 Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16);
Matt Arsenault966a94f2015-09-08 19:34:22 +00001197 return true;
1198 }
1199 }
1200 }
1201 } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1202 // If we have a constant address, prefer to put the constant into the
1203 // offset. This can save moves to load the constant address since multiple
1204 // operations can share the zero base address register, and enables merging
1205 // into read2 / write2 instructions.
1206
1207 SDLoc DL(Addr);
1208
Matt Arsenaulte775f5f2014-10-14 17:21:19 +00001209 if (isUInt<16>(CAddr->getZExtValue())) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001210 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
Tom Stellardc8d79202014-10-15 21:08:59 +00001211 MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001212 DL, MVT::i32, Zero);
Tom Stellardc8d79202014-10-15 21:08:59 +00001213 Base = SDValue(MovZero, 0);
Tom Stellard26a2ab72016-06-10 00:01:04 +00001214 Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
Matt Arsenaulte775f5f2014-10-14 17:21:19 +00001215 return true;
1216 }
1217 }
1218
Tom Stellard85e8b6d2014-08-22 18:49:33 +00001219 // default case
1220 Base = Addr;
Matt Arsenault966a94f2015-09-08 19:34:22 +00001221 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16);
Tom Stellard85e8b6d2014-08-22 18:49:33 +00001222 return true;
1223}
1224
Matt Arsenault966a94f2015-09-08 19:34:22 +00001225// TODO: If offset is too big, put low 16-bit into offset.
Tom Stellardf3fc5552014-08-22 18:49:35 +00001226bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
1227 SDValue &Offset0,
1228 SDValue &Offset1) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001229 SDLoc DL(Addr);
1230
Tom Stellardf3fc5552014-08-22 18:49:35 +00001231 if (CurDAG->isBaseWithConstantOffset(Addr)) {
1232 SDValue N0 = Addr.getOperand(0);
1233 SDValue N1 = Addr.getOperand(1);
1234 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1235 unsigned DWordOffset0 = C1->getZExtValue() / 4;
1236 unsigned DWordOffset1 = DWordOffset0 + 1;
1237 // (add n0, c0)
1238 if (isDSOffsetLegal(N0, DWordOffset1, 8)) {
1239 Base = N0;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001240 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
1241 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
Tom Stellardf3fc5552014-08-22 18:49:35 +00001242 return true;
1243 }
Matt Arsenault966a94f2015-09-08 19:34:22 +00001244 } else if (Addr.getOpcode() == ISD::SUB) {
1245 // sub C, x -> add (sub 0, x), C
1246 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1247 unsigned DWordOffset0 = C->getZExtValue() / 4;
1248 unsigned DWordOffset1 = DWordOffset0 + 1;
Tom Stellardf3fc5552014-08-22 18:49:35 +00001249
Matt Arsenault966a94f2015-09-08 19:34:22 +00001250 if (isUInt<8>(DWordOffset0)) {
1251 SDLoc DL(Addr);
1252 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1253
1254 // XXX - This is kind of hacky. Create a dummy sub node so we can check
1255 // the known bits in isDSOffsetLegal. We need to emit the selected node
1256 // here, so this is thrown away.
1257 SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
1258 Zero, Addr.getOperand(1));
1259
1260 if (isDSOffsetLegal(Sub, DWordOffset1, 8)) {
Tim Renoufcfdfba92019-03-18 19:35:44 +00001261 SmallVector<SDValue, 3> Opnds;
1262 Opnds.push_back(Zero);
1263 Opnds.push_back(Addr.getOperand(1));
1264 unsigned SubOp = AMDGPU::V_SUB_I32_e32;
1265 if (Subtarget->hasAddNoCarry()) {
1266 SubOp = AMDGPU::V_SUB_U32_e64;
Michael Liaoeea51772019-03-20 20:18:56 +00001267 Opnds.push_back(
1268 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
Tim Renoufcfdfba92019-03-18 19:35:44 +00001269 }
Matt Arsenault84445dd2017-11-30 22:51:26 +00001270
Matt Arsenault966a94f2015-09-08 19:34:22 +00001271 MachineSDNode *MachineSub
Tim Renoufcfdfba92019-03-18 19:35:44 +00001272 = CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds);
Matt Arsenault966a94f2015-09-08 19:34:22 +00001273
1274 Base = SDValue(MachineSub, 0);
1275 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
1276 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
1277 return true;
1278 }
1279 }
1280 }
1281 } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
Matt Arsenault1a74aff2014-10-15 18:06:43 +00001282 unsigned DWordOffset0 = CAddr->getZExtValue() / 4;
1283 unsigned DWordOffset1 = DWordOffset0 + 1;
1284 assert(4 * DWordOffset0 == CAddr->getZExtValue());
1285
1286 if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001287 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
Matt Arsenault1a74aff2014-10-15 18:06:43 +00001288 MachineSDNode *MovZero
1289 = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001290 DL, MVT::i32, Zero);
Matt Arsenault1a74aff2014-10-15 18:06:43 +00001291 Base = SDValue(MovZero, 0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001292 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
1293 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
Matt Arsenault1a74aff2014-10-15 18:06:43 +00001294 return true;
1295 }
1296 }
1297
Tom Stellardf3fc5552014-08-22 18:49:35 +00001298 // default case
Matt Arsenault0efdd062016-09-09 22:29:28 +00001299
Tom Stellardf3fc5552014-08-22 18:49:35 +00001300 Base = Addr;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001301 Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
1302 Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
Tom Stellardf3fc5552014-08-22 18:49:35 +00001303 return true;
1304}
1305
Changpeng Fangb41574a2015-12-22 20:55:23 +00001306bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
Tom Stellard155bbb72014-08-11 22:18:17 +00001307 SDValue &VAddr, SDValue &SOffset,
1308 SDValue &Offset, SDValue &Offen,
1309 SDValue &Idxen, SDValue &Addr64,
1310 SDValue &GLC, SDValue &SLC,
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001311 SDValue &TFE, SDValue &DLC) const {
Changpeng Fangb41574a2015-12-22 20:55:23 +00001312 // Subtarget prefers to use flat instruction
1313 if (Subtarget->useFlatForGlobal())
1314 return false;
1315
Tom Stellardb02c2682014-06-24 23:33:07 +00001316 SDLoc DL(Addr);
1317
Jan Vesely43b7b5b2016-04-07 19:23:11 +00001318 if (!GLC.getNode())
1319 GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1320 if (!SLC.getNode())
1321 SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001322 TFE = CurDAG->getTargetConstant(0, DL, MVT::i1);
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001323 DLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
Tom Stellard155bbb72014-08-11 22:18:17 +00001324
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001325 Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1326 Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1327 Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
1328 SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
Tom Stellard155bbb72014-08-11 22:18:17 +00001329
Tim Renouff1c7b922018-08-02 22:53:57 +00001330 ConstantSDNode *C1 = nullptr;
1331 SDValue N0 = Addr;
Tom Stellardb02c2682014-06-24 23:33:07 +00001332 if (CurDAG->isBaseWithConstantOffset(Addr)) {
Tim Renouff1c7b922018-08-02 22:53:57 +00001333 C1 = cast<ConstantSDNode>(Addr.getOperand(1));
1334 if (isUInt<32>(C1->getZExtValue()))
1335 N0 = Addr.getOperand(0);
1336 else
1337 C1 = nullptr;
Tom Stellardb02c2682014-06-24 23:33:07 +00001338 }
Tom Stellard94b72312015-02-11 00:34:35 +00001339
Tim Renouff1c7b922018-08-02 22:53:57 +00001340 if (N0.getOpcode() == ISD::ADD) {
1341 // (add N2, N3) -> addr64, or
1342 // (add (add N2, N3), C1) -> addr64
1343 SDValue N2 = N0.getOperand(0);
1344 SDValue N3 = N0.getOperand(1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001345 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
Tim Renouff1c7b922018-08-02 22:53:57 +00001346
1347 if (N2->isDivergent()) {
1348 if (N3->isDivergent()) {
1349 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
1350 // addr64, and construct the resource from a 0 address.
1351 Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1352 VAddr = N0;
1353 } else {
1354 // N2 is divergent, N3 is not.
1355 Ptr = N3;
1356 VAddr = N2;
1357 }
1358 } else {
1359 // N2 is not divergent.
1360 Ptr = N2;
1361 VAddr = N3;
1362 }
1363 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1364 } else if (N0->isDivergent()) {
1365 // N0 is divergent. Use it as the addr64, and construct the resource from a
1366 // 0 address.
1367 Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1368 VAddr = N0;
1369 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1370 } else {
1371 // N0 -> offset, or
1372 // (N0 + C1) -> offset
1373 VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
Tom Stellard155bbb72014-08-11 22:18:17 +00001374 Ptr = N0;
Tim Renouff1c7b922018-08-02 22:53:57 +00001375 }
1376
1377 if (!C1) {
1378 // No offset.
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001379 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
Changpeng Fangb41574a2015-12-22 20:55:23 +00001380 return true;
Tom Stellardb02c2682014-06-24 23:33:07 +00001381 }
1382
Tim Renouff1c7b922018-08-02 22:53:57 +00001383 if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue())) {
1384 // Legal offset for instruction.
1385 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1386 return true;
1387 }
Changpeng Fangb41574a2015-12-22 20:55:23 +00001388
Tim Renouff1c7b922018-08-02 22:53:57 +00001389 // Illegal offset, store it in soffset.
1390 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1391 SOffset =
1392 SDValue(CurDAG->getMachineNode(
1393 AMDGPU::S_MOV_B32, DL, MVT::i32,
1394 CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
1395 0);
Changpeng Fangb41574a2015-12-22 20:55:23 +00001396 return true;
Tom Stellard155bbb72014-08-11 22:18:17 +00001397}
1398
1399bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
Tom Stellardc53861a2015-02-11 00:34:32 +00001400 SDValue &VAddr, SDValue &SOffset,
Tom Stellard1f9939f2015-02-27 14:59:41 +00001401 SDValue &Offset, SDValue &GLC,
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001402 SDValue &SLC, SDValue &TFE,
1403 SDValue &DLC) const {
Tom Stellard1f9939f2015-02-27 14:59:41 +00001404 SDValue Ptr, Offen, Idxen, Addr64;
Tom Stellard155bbb72014-08-11 22:18:17 +00001405
Tom Stellard70580f82015-07-20 14:28:41 +00001406 // addr64 bit was removed for volcanic islands.
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +00001407 if (!Subtarget->hasAddr64())
Tom Stellard70580f82015-07-20 14:28:41 +00001408 return false;
1409
Changpeng Fangb41574a2015-12-22 20:55:23 +00001410 if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001411 GLC, SLC, TFE, DLC))
Changpeng Fangb41574a2015-12-22 20:55:23 +00001412 return false;
Tom Stellard155bbb72014-08-11 22:18:17 +00001413
1414 ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
1415 if (C->getSExtValue()) {
1416 SDLoc DL(Addr);
Matt Arsenault485defe2014-11-05 19:01:17 +00001417
1418 const SITargetLowering& Lowering =
1419 *static_cast<const SITargetLowering*>(getTargetLowering());
1420
1421 SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
Tom Stellard155bbb72014-08-11 22:18:17 +00001422 return true;
1423 }
Matt Arsenault485defe2014-11-05 19:01:17 +00001424
Tom Stellard155bbb72014-08-11 22:18:17 +00001425 return false;
1426}
1427
Tom Stellard7980fc82014-09-25 18:30:26 +00001428bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
Tom Stellardc53861a2015-02-11 00:34:32 +00001429 SDValue &VAddr, SDValue &SOffset,
NAKAMURA Takumi0a7d0ad2015-09-22 11:15:07 +00001430 SDValue &Offset,
1431 SDValue &SLC) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001432 SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1);
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001433 SDValue GLC, TFE, DLC;
Tom Stellard7980fc82014-09-25 18:30:26 +00001434
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001435 return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE, DLC);
Tom Stellard7980fc82014-09-25 18:30:26 +00001436}
1437
Matt Arsenault156d3ae2017-05-17 21:02:58 +00001438static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
1439 auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
1440 return PSV && PSV->isStack();
Matt Arsenaultac0fc842016-09-17 16:09:55 +00001441}
1442
Matt Arsenault156d3ae2017-05-17 21:02:58 +00001443std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const {
1444 const MachineFunction &MF = CurDAG->getMachineFunction();
1445 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1446
1447 if (auto FI = dyn_cast<FrameIndexSDNode>(N)) {
1448 SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(),
1449 FI->getValueType(0));
1450
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001451 // If we can resolve this to a frame index access, this will be relative to
1452 // either the stack or frame pointer SGPR.
1453 return std::make_pair(
1454 TFI, CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32));
Matt Arsenault156d3ae2017-05-17 21:02:58 +00001455 }
1456
1457 // If we don't know this private access is a local stack object, it needs to
1458 // be relative to the entry point's scratch wave offset register.
1459 return std::make_pair(N, CurDAG->getRegister(Info->getScratchWaveOffsetReg(),
1460 MVT::i32));
1461}
1462
Matt Arsenaultb81495d2017-09-20 05:01:53 +00001463bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent,
Matt Arsenault156d3ae2017-05-17 21:02:58 +00001464 SDValue Addr, SDValue &Rsrc,
Matt Arsenault0774ea22017-04-24 19:40:59 +00001465 SDValue &VAddr, SDValue &SOffset,
1466 SDValue &ImmOffset) const {
Tom Stellardb02094e2014-07-21 15:45:01 +00001467
1468 SDLoc DL(Addr);
1469 MachineFunction &MF = CurDAG->getMachineFunction();
Matt Arsenault0e3d3892015-11-30 21:15:53 +00001470 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellardb02094e2014-07-21 15:45:01 +00001471
Matt Arsenault0e3d3892015-11-30 21:15:53 +00001472 Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
Tom Stellardb02094e2014-07-21 15:45:01 +00001473
Matt Arsenault0774ea22017-04-24 19:40:59 +00001474 if (ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1475 unsigned Imm = CAddr->getZExtValue();
Matt Arsenault0774ea22017-04-24 19:40:59 +00001476
1477 SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32);
1478 MachineSDNode *MovHighBits = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1479 DL, MVT::i32, HighBits);
1480 VAddr = SDValue(MovHighBits, 0);
Matt Arsenault156d3ae2017-05-17 21:02:58 +00001481
1482 // In a call sequence, stores to the argument stack area are relative to the
1483 // stack pointer.
Matt Arsenaultb81495d2017-09-20 05:01:53 +00001484 const MachinePointerInfo &PtrInfo = cast<MemSDNode>(Parent)->getPointerInfo();
Matt Arsenault156d3ae2017-05-17 21:02:58 +00001485 unsigned SOffsetReg = isStackPtrRelative(PtrInfo) ?
1486 Info->getStackPtrOffsetReg() : Info->getScratchWaveOffsetReg();
1487
1488 SOffset = CurDAG->getRegister(SOffsetReg, MVT::i32);
Matt Arsenault0774ea22017-04-24 19:40:59 +00001489 ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16);
1490 return true;
1491 }
1492
Tom Stellardb02094e2014-07-21 15:45:01 +00001493 if (CurDAG->isBaseWithConstantOffset(Addr)) {
Matt Arsenault0774ea22017-04-24 19:40:59 +00001494 // (add n0, c1)
1495
Tom Stellard78655fc2015-07-16 19:40:09 +00001496 SDValue N0 = Addr.getOperand(0);
Tom Stellardb02094e2014-07-21 15:45:01 +00001497 SDValue N1 = Addr.getOperand(1);
Matt Arsenaultcd099612016-02-24 04:55:29 +00001498
Matt Arsenaultcaf0ed42017-11-30 00:52:40 +00001499 // Offsets in vaddr must be positive if range checking is enabled.
Matt Arsenault45b98182017-11-15 00:45:43 +00001500 //
Matt Arsenaultcaf0ed42017-11-30 00:52:40 +00001501 // The total computation of vaddr + soffset + offset must not overflow. If
1502 // vaddr is negative, even if offset is 0 the sgpr offset add will end up
Matt Arsenault45b98182017-11-15 00:45:43 +00001503 // overflowing.
Matt Arsenaultcaf0ed42017-11-30 00:52:40 +00001504 //
1505 // Prior to gfx9, MUBUF instructions with the vaddr offset enabled would
1506 // always perform a range check. If a negative vaddr base index was used,
1507 // this would fail the range check. The overall address computation would
1508 // compute a valid address, but this doesn't happen due to the range
1509 // check. For out-of-bounds MUBUF loads, a 0 is returned.
1510 //
1511 // Therefore it should be safe to fold any VGPR offset on gfx9 into the
1512 // MUBUF vaddr, but not on older subtargets which can only do this if the
1513 // sign bit is known 0.
Matt Arsenaultcd099612016-02-24 04:55:29 +00001514 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
Matt Arsenault45b98182017-11-15 00:45:43 +00001515 if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue()) &&
Matt Arsenaultcaf0ed42017-11-30 00:52:40 +00001516 (!Subtarget->privateMemoryResourceIsRangeChecked() ||
1517 CurDAG->SignBitIsZero(N0))) {
Matt Arsenault156d3ae2017-05-17 21:02:58 +00001518 std::tie(VAddr, SOffset) = foldFrameIndex(N0);
Matt Arsenaultcd099612016-02-24 04:55:29 +00001519 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1520 return true;
Tom Stellardb02094e2014-07-21 15:45:01 +00001521 }
1522 }
1523
Tom Stellardb02094e2014-07-21 15:45:01 +00001524 // (node)
Matt Arsenault156d3ae2017-05-17 21:02:58 +00001525 std::tie(VAddr, SOffset) = foldFrameIndex(Addr);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001526 ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
Tom Stellardb02094e2014-07-21 15:45:01 +00001527 return true;
1528}
1529
Matt Arsenaultb81495d2017-09-20 05:01:53 +00001530bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDNode *Parent,
Matt Arsenault156d3ae2017-05-17 21:02:58 +00001531 SDValue Addr,
Matt Arsenault0774ea22017-04-24 19:40:59 +00001532 SDValue &SRsrc,
1533 SDValue &SOffset,
1534 SDValue &Offset) const {
1535 ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr);
Marek Olsakffadcb72017-11-09 01:52:17 +00001536 if (!CAddr || !SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue()))
Matt Arsenault0774ea22017-04-24 19:40:59 +00001537 return false;
1538
1539 SDLoc DL(Addr);
1540 MachineFunction &MF = CurDAG->getMachineFunction();
1541 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1542
1543 SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
Matt Arsenault156d3ae2017-05-17 21:02:58 +00001544
Matt Arsenaultb81495d2017-09-20 05:01:53 +00001545 const MachinePointerInfo &PtrInfo = cast<MemSDNode>(Parent)->getPointerInfo();
Matt Arsenault156d3ae2017-05-17 21:02:58 +00001546 unsigned SOffsetReg = isStackPtrRelative(PtrInfo) ?
1547 Info->getStackPtrOffsetReg() : Info->getScratchWaveOffsetReg();
1548
1549 // FIXME: Get from MachinePointerInfo? We should only be using the frame
1550 // offset if we know this is in a call sequence.
1551 SOffset = CurDAG->getRegister(SOffsetReg, MVT::i32);
1552
Matt Arsenault0774ea22017-04-24 19:40:59 +00001553 Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1554 return true;
1555}
1556
Tom Stellard155bbb72014-08-11 22:18:17 +00001557bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1558 SDValue &SOffset, SDValue &Offset,
1559 SDValue &GLC, SDValue &SLC,
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001560 SDValue &TFE, SDValue &DLC) const {
Tom Stellard155bbb72014-08-11 22:18:17 +00001561 SDValue Ptr, VAddr, Offen, Idxen, Addr64;
Tom Stellard794c8c02014-12-02 17:05:41 +00001562 const SIInstrInfo *TII =
Eric Christopher7792e322015-01-30 23:24:40 +00001563 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
Tom Stellardb02094e2014-07-21 15:45:01 +00001564
Changpeng Fangb41574a2015-12-22 20:55:23 +00001565 if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001566 GLC, SLC, TFE, DLC))
Changpeng Fangb41574a2015-12-22 20:55:23 +00001567 return false;
Tom Stellardb02094e2014-07-21 15:45:01 +00001568
Tom Stellard155bbb72014-08-11 22:18:17 +00001569 if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1570 !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1571 !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
Tom Stellard794c8c02014-12-02 17:05:41 +00001572 uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
Tom Stellard155bbb72014-08-11 22:18:17 +00001573 APInt::getAllOnesValue(32).getZExtValue(); // Size
1574 SDLoc DL(Addr);
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00001575
1576 const SITargetLowering& Lowering =
1577 *static_cast<const SITargetLowering*>(getTargetLowering());
1578
1579 SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
Tom Stellard155bbb72014-08-11 22:18:17 +00001580 return true;
1581 }
1582 return false;
Tom Stellardb02094e2014-07-21 15:45:01 +00001583}
1584
Tom Stellard7980fc82014-09-25 18:30:26 +00001585bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
Jan Vesely43b7b5b2016-04-07 19:23:11 +00001586 SDValue &Soffset, SDValue &Offset
1587 ) const {
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001588 SDValue GLC, SLC, TFE, DLC;
Jan Vesely43b7b5b2016-04-07 19:23:11 +00001589
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001590 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC);
Jan Vesely43b7b5b2016-04-07 19:23:11 +00001591}
1592bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
Tom Stellard7980fc82014-09-25 18:30:26 +00001593 SDValue &Soffset, SDValue &Offset,
Matt Arsenault88701812016-06-09 23:42:48 +00001594 SDValue &SLC) const {
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001595 SDValue GLC, TFE, DLC;
Tom Stellard7980fc82014-09-25 18:30:26 +00001596
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001597 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC);
Tom Stellard7980fc82014-09-25 18:30:26 +00001598}
1599
Matt Arsenault4e309b02017-07-29 01:03:53 +00001600template <bool IsSigned>
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001601bool AMDGPUDAGToDAGISel::SelectFlatOffset(SDNode *N,
1602 SDValue Addr,
Matt Arsenaultdb7c6a82017-06-12 16:53:51 +00001603 SDValue &VAddr,
1604 SDValue &Offset,
1605 SDValue &SLC) const {
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001606 return static_cast<const SITargetLowering*>(getTargetLowering())->
1607 SelectFlatOffset(IsSigned, *CurDAG, N, Addr, VAddr, Offset, SLC);
Matt Arsenault7757c592016-06-09 23:42:54 +00001608}
1609
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001610bool AMDGPUDAGToDAGISel::SelectFlatAtomic(SDNode *N,
1611 SDValue Addr,
Matt Arsenaultdb7c6a82017-06-12 16:53:51 +00001612 SDValue &VAddr,
1613 SDValue &Offset,
1614 SDValue &SLC) const {
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001615 return SelectFlatOffset<false>(N, Addr, VAddr, Offset, SLC);
Matt Arsenault4e309b02017-07-29 01:03:53 +00001616}
1617
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001618bool AMDGPUDAGToDAGISel::SelectFlatAtomicSigned(SDNode *N,
1619 SDValue Addr,
Matt Arsenault4e309b02017-07-29 01:03:53 +00001620 SDValue &VAddr,
1621 SDValue &Offset,
1622 SDValue &SLC) const {
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001623 return SelectFlatOffset<true>(N, Addr, VAddr, Offset, SLC);
Matt Arsenaultdb7c6a82017-06-12 16:53:51 +00001624}
1625
Tom Stellarddee26a22015-08-06 19:28:30 +00001626bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
1627 SDValue &Offset, bool &Imm) const {
1628
1629 // FIXME: Handle non-constant offsets.
1630 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode);
1631 if (!C)
1632 return false;
1633
1634 SDLoc SL(ByteOffsetNode);
Tom Stellard5bfbae52018-07-11 20:59:01 +00001635 GCNSubtarget::Generation Gen = Subtarget->getGeneration();
Tom Stellarddee26a22015-08-06 19:28:30 +00001636 int64_t ByteOffset = C->getSExtValue();
Tom Stellard08efb7e2017-01-27 18:41:14 +00001637 int64_t EncodedOffset = AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset);
Tom Stellarddee26a22015-08-06 19:28:30 +00001638
Tom Stellard08efb7e2017-01-27 18:41:14 +00001639 if (AMDGPU::isLegalSMRDImmOffset(*Subtarget, ByteOffset)) {
Tom Stellarddee26a22015-08-06 19:28:30 +00001640 Offset = CurDAG->getTargetConstant(EncodedOffset, SL, MVT::i32);
1641 Imm = true;
1642 return true;
1643 }
1644
Tom Stellard217361c2015-08-06 19:28:38 +00001645 if (!isUInt<32>(EncodedOffset) || !isUInt<32>(ByteOffset))
1646 return false;
1647
Marek Olsak8973a0a2017-05-24 14:53:50 +00001648 if (Gen == AMDGPUSubtarget::SEA_ISLANDS && isUInt<32>(EncodedOffset)) {
1649 // 32-bit Immediates are supported on Sea Islands.
Tom Stellard217361c2015-08-06 19:28:38 +00001650 Offset = CurDAG->getTargetConstant(EncodedOffset, SL, MVT::i32);
1651 } else {
Tom Stellarddee26a22015-08-06 19:28:30 +00001652 SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32);
1653 Offset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32,
1654 C32Bit), 0);
Tom Stellarddee26a22015-08-06 19:28:30 +00001655 }
Tom Stellard217361c2015-08-06 19:28:38 +00001656 Imm = false;
1657 return true;
Tom Stellarddee26a22015-08-06 19:28:30 +00001658}
1659
Matt Arsenault923712b2018-02-09 16:57:57 +00001660SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const {
1661 if (Addr.getValueType() != MVT::i32)
1662 return Addr;
1663
1664 // Zero-extend a 32-bit address.
1665 SDLoc SL(Addr);
1666
1667 const MachineFunction &MF = CurDAG->getMachineFunction();
1668 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1669 unsigned AddrHiVal = Info->get32BitAddressHighBits();
1670 SDValue AddrHi = CurDAG->getTargetConstant(AddrHiVal, SL, MVT::i32);
1671
1672 const SDValue Ops[] = {
1673 CurDAG->getTargetConstant(AMDGPU::SReg_64_XEXECRegClassID, SL, MVT::i32),
1674 Addr,
1675 CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32),
1676 SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, AddrHi),
1677 0),
1678 CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32),
1679 };
1680
1681 return SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, SL, MVT::i64,
1682 Ops), 0);
1683}
1684
Tom Stellarddee26a22015-08-06 19:28:30 +00001685bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase,
1686 SDValue &Offset, bool &Imm) const {
Tom Stellarddee26a22015-08-06 19:28:30 +00001687 SDLoc SL(Addr);
Matt Arsenault923712b2018-02-09 16:57:57 +00001688
Marek Olsak3fc20792018-08-29 20:03:00 +00001689 // A 32-bit (address + offset) should not cause unsigned 32-bit integer
1690 // wraparound, because s_load instructions perform the addition in 64 bits.
1691 if ((Addr.getValueType() != MVT::i32 ||
1692 Addr->getFlags().hasNoUnsignedWrap()) &&
1693 CurDAG->isBaseWithConstantOffset(Addr)) {
Tom Stellarddee26a22015-08-06 19:28:30 +00001694 SDValue N0 = Addr.getOperand(0);
1695 SDValue N1 = Addr.getOperand(1);
1696
1697 if (SelectSMRDOffset(N1, Offset, Imm)) {
Matt Arsenault923712b2018-02-09 16:57:57 +00001698 SBase = Expand32BitAddress(N0);
Tom Stellarddee26a22015-08-06 19:28:30 +00001699 return true;
1700 }
1701 }
Matt Arsenault923712b2018-02-09 16:57:57 +00001702 SBase = Expand32BitAddress(Addr);
Tom Stellarddee26a22015-08-06 19:28:30 +00001703 Offset = CurDAG->getTargetConstant(0, SL, MVT::i32);
1704 Imm = true;
1705 return true;
1706}
1707
1708bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase,
1709 SDValue &Offset) const {
1710 bool Imm;
Marek Olsak8973a0a2017-05-24 14:53:50 +00001711 return SelectSMRD(Addr, SBase, Offset, Imm) && Imm;
1712}
Tom Stellarddee26a22015-08-06 19:28:30 +00001713
Marek Olsak8973a0a2017-05-24 14:53:50 +00001714bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase,
1715 SDValue &Offset) const {
1716
1717 if (Subtarget->getGeneration() != AMDGPUSubtarget::SEA_ISLANDS)
1718 return false;
1719
1720 bool Imm;
Tom Stellard217361c2015-08-06 19:28:38 +00001721 if (!SelectSMRD(Addr, SBase, Offset, Imm))
1722 return false;
1723
Marek Olsak8973a0a2017-05-24 14:53:50 +00001724 return !Imm && isa<ConstantSDNode>(Offset);
Tom Stellard217361c2015-08-06 19:28:38 +00001725}
1726
Tom Stellarddee26a22015-08-06 19:28:30 +00001727bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase,
1728 SDValue &Offset) const {
1729 bool Imm;
Tom Stellard217361c2015-08-06 19:28:38 +00001730 return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm &&
1731 !isa<ConstantSDNode>(Offset);
Tom Stellarddee26a22015-08-06 19:28:30 +00001732}
1733
1734bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr,
1735 SDValue &Offset) const {
1736 bool Imm;
Marek Olsak8973a0a2017-05-24 14:53:50 +00001737 return SelectSMRDOffset(Addr, Offset, Imm) && Imm;
1738}
Tom Stellarddee26a22015-08-06 19:28:30 +00001739
Marek Olsak8973a0a2017-05-24 14:53:50 +00001740bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr,
1741 SDValue &Offset) const {
1742 if (Subtarget->getGeneration() != AMDGPUSubtarget::SEA_ISLANDS)
1743 return false;
1744
1745 bool Imm;
Tom Stellard217361c2015-08-06 19:28:38 +00001746 if (!SelectSMRDOffset(Addr, Offset, Imm))
1747 return false;
1748
Marek Olsak8973a0a2017-05-24 14:53:50 +00001749 return !Imm && isa<ConstantSDNode>(Offset);
Tom Stellard217361c2015-08-06 19:28:38 +00001750}
1751
Nicolai Haehnle7968c342016-07-12 08:12:16 +00001752bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
1753 SDValue &Base,
1754 SDValue &Offset) const {
Matt Arsenault1322b6f2016-07-09 01:13:56 +00001755 SDLoc DL(Index);
1756
1757 if (CurDAG->isBaseWithConstantOffset(Index)) {
1758 SDValue N0 = Index.getOperand(0);
1759 SDValue N1 = Index.getOperand(1);
1760 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1761
1762 // (add n0, c0)
Changpeng Fang6f539292018-12-21 20:57:34 +00001763 // Don't peel off the offset (c0) if doing so could possibly lead
1764 // the base (n0) to be negative.
1765 if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0)) {
1766 Base = N0;
1767 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
1768 return true;
1769 }
Matt Arsenault1322b6f2016-07-09 01:13:56 +00001770 }
1771
Nicolai Haehnle7968c342016-07-12 08:12:16 +00001772 if (isa<ConstantSDNode>(Index))
1773 return false;
Matt Arsenault1322b6f2016-07-09 01:13:56 +00001774
1775 Base = Index;
1776 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1777 return true;
1778}
1779
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001780SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL,
1781 SDValue Val, uint32_t Offset,
1782 uint32_t Width) {
Marek Olsak9b728682015-03-24 13:40:27 +00001783 // Transformation function, pack the offset and width of a BFE into
1784 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
1785 // source, bits [5:0] contain the offset and bits [22:16] the width.
1786 uint32_t PackedVal = Offset | (Width << 16);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001787 SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
Marek Olsak9b728682015-03-24 13:40:27 +00001788
1789 return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
1790}
1791
Justin Bogner95927c02016-05-12 21:03:32 +00001792void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
Marek Olsak9b728682015-03-24 13:40:27 +00001793 // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
1794 // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
1795 // Predicate: 0 < b <= c < 32
1796
1797 const SDValue &Shl = N->getOperand(0);
1798 ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
1799 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
1800
1801 if (B && C) {
1802 uint32_t BVal = B->getZExtValue();
1803 uint32_t CVal = C->getZExtValue();
1804
1805 if (0 < BVal && BVal <= CVal && CVal < 32) {
1806 bool Signed = N->getOpcode() == ISD::SRA;
1807 unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1808
Justin Bogner95927c02016-05-12 21:03:32 +00001809 ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal,
1810 32 - CVal));
1811 return;
Marek Olsak9b728682015-03-24 13:40:27 +00001812 }
1813 }
Justin Bogner95927c02016-05-12 21:03:32 +00001814 SelectCode(N);
Marek Olsak9b728682015-03-24 13:40:27 +00001815}
1816
Justin Bogner95927c02016-05-12 21:03:32 +00001817void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
Marek Olsak9b728682015-03-24 13:40:27 +00001818 switch (N->getOpcode()) {
1819 case ISD::AND:
1820 if (N->getOperand(0).getOpcode() == ISD::SRL) {
1821 // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
1822 // Predicate: isMask(mask)
1823 const SDValue &Srl = N->getOperand(0);
1824 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
1825 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
1826
1827 if (Shift && Mask) {
1828 uint32_t ShiftVal = Shift->getZExtValue();
1829 uint32_t MaskVal = Mask->getZExtValue();
1830
1831 if (isMask_32(MaskVal)) {
1832 uint32_t WidthVal = countPopulation(MaskVal);
1833
Justin Bogner95927c02016-05-12 21:03:32 +00001834 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
1835 Srl.getOperand(0), ShiftVal, WidthVal));
1836 return;
Marek Olsak9b728682015-03-24 13:40:27 +00001837 }
1838 }
1839 }
1840 break;
1841 case ISD::SRL:
1842 if (N->getOperand(0).getOpcode() == ISD::AND) {
1843 // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
1844 // Predicate: isMask(mask >> b)
1845 const SDValue &And = N->getOperand(0);
1846 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
1847 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
1848
1849 if (Shift && Mask) {
1850 uint32_t ShiftVal = Shift->getZExtValue();
1851 uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
1852
1853 if (isMask_32(MaskVal)) {
1854 uint32_t WidthVal = countPopulation(MaskVal);
1855
Justin Bogner95927c02016-05-12 21:03:32 +00001856 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
1857 And.getOperand(0), ShiftVal, WidthVal));
1858 return;
Marek Olsak9b728682015-03-24 13:40:27 +00001859 }
1860 }
Justin Bogner95927c02016-05-12 21:03:32 +00001861 } else if (N->getOperand(0).getOpcode() == ISD::SHL) {
1862 SelectS_BFEFromShifts(N);
1863 return;
1864 }
Marek Olsak9b728682015-03-24 13:40:27 +00001865 break;
1866 case ISD::SRA:
Justin Bogner95927c02016-05-12 21:03:32 +00001867 if (N->getOperand(0).getOpcode() == ISD::SHL) {
1868 SelectS_BFEFromShifts(N);
1869 return;
1870 }
Marek Olsak9b728682015-03-24 13:40:27 +00001871 break;
Matt Arsenault7e8de012016-04-22 22:59:16 +00001872
1873 case ISD::SIGN_EXTEND_INREG: {
1874 // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
1875 SDValue Src = N->getOperand(0);
1876 if (Src.getOpcode() != ISD::SRL)
1877 break;
1878
1879 const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
1880 if (!Amt)
1881 break;
1882
1883 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
Justin Bogner95927c02016-05-12 21:03:32 +00001884 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0),
1885 Amt->getZExtValue(), Width));
1886 return;
Matt Arsenault7e8de012016-04-22 22:59:16 +00001887 }
Marek Olsak9b728682015-03-24 13:40:27 +00001888 }
1889
Justin Bogner95927c02016-05-12 21:03:32 +00001890 SelectCode(N);
Marek Olsak9b728682015-03-24 13:40:27 +00001891}
1892
Matt Arsenault7b1dc2c2016-09-17 02:02:19 +00001893bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const {
1894 assert(N->getOpcode() == ISD::BRCOND);
1895 if (!N->hasOneUse())
1896 return false;
1897
1898 SDValue Cond = N->getOperand(1);
1899 if (Cond.getOpcode() == ISD::CopyToReg)
1900 Cond = Cond.getOperand(2);
1901
1902 if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
1903 return false;
1904
1905 MVT VT = Cond.getOperand(0).getSimpleValueType();
1906 if (VT == MVT::i32)
1907 return true;
1908
1909 if (VT == MVT::i64) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00001910 auto ST = static_cast<const GCNSubtarget *>(Subtarget);
Matt Arsenault7b1dc2c2016-09-17 02:02:19 +00001911
1912 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
1913 return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64();
1914 }
1915
1916 return false;
1917}
1918
Justin Bogner95927c02016-05-12 21:03:32 +00001919void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
Tom Stellardbc4497b2016-02-12 23:45:29 +00001920 SDValue Cond = N->getOperand(1);
1921
Matt Arsenault327188a2016-12-15 21:57:11 +00001922 if (Cond.isUndef()) {
1923 CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other,
1924 N->getOperand(2), N->getOperand(0));
1925 return;
1926 }
1927
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00001928 const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
1929 const SIRegisterInfo *TRI = ST->getRegisterInfo();
1930
Matt Arsenaultd674e0a2017-10-10 20:34:49 +00001931 bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N);
1932 unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ;
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00001933 unsigned CondReg = UseSCCBr ? (unsigned)AMDGPU::SCC : TRI->getVCC();
Tom Stellardbc4497b2016-02-12 23:45:29 +00001934 SDLoc SL(N);
1935
Tim Renouf6eaad1e2018-01-09 21:34:43 +00001936 if (!UseSCCBr) {
1937 // This is the case that we are selecting to S_CBRANCH_VCCNZ. We have not
1938 // analyzed what generates the vcc value, so we do not know whether vcc
1939 // bits for disabled lanes are 0. Thus we need to mask out bits for
1940 // disabled lanes.
1941 //
1942 // For the case that we select S_CBRANCH_SCC1 and it gets
1943 // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls
1944 // SIInstrInfo::moveToVALU which inserts the S_AND).
1945 //
1946 // We could add an analysis of what generates the vcc value here and omit
1947 // the S_AND when is unnecessary. But it would be better to add a separate
1948 // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it
1949 // catches both cases.
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00001950 Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32
1951 : AMDGPU::S_AND_B64,
1952 SL, MVT::i1,
1953 CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO
1954 : AMDGPU::EXEC,
1955 MVT::i1),
1956 Cond),
Tim Renouf6eaad1e2018-01-09 21:34:43 +00001957 0);
1958 }
1959
Matt Arsenaultd674e0a2017-10-10 20:34:49 +00001960 SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond);
1961 CurDAG->SelectNodeTo(N, BrOp, MVT::Other,
Justin Bogner95927c02016-05-12 21:03:32 +00001962 N->getOperand(2), // Basic Block
Matt Arsenaultf530e8b2016-11-07 19:09:33 +00001963 VCC.getValue(0));
Tom Stellardbc4497b2016-02-12 23:45:29 +00001964}
1965
Matt Arsenault0084adc2018-04-30 19:08:16 +00001966void AMDGPUDAGToDAGISel::SelectFMAD_FMA(SDNode *N) {
Matt Arsenaultd7e23032017-09-07 18:05:07 +00001967 MVT VT = N->getSimpleValueType(0);
Matt Arsenault0084adc2018-04-30 19:08:16 +00001968 bool IsFMA = N->getOpcode() == ISD::FMA;
1969 if (VT != MVT::f32 || (!Subtarget->hasMadMixInsts() &&
1970 !Subtarget->hasFmaMixInsts()) ||
1971 ((IsFMA && Subtarget->hasMadMixInsts()) ||
1972 (!IsFMA && Subtarget->hasFmaMixInsts()))) {
Matt Arsenaultd7e23032017-09-07 18:05:07 +00001973 SelectCode(N);
1974 return;
1975 }
1976
1977 SDValue Src0 = N->getOperand(0);
1978 SDValue Src1 = N->getOperand(1);
1979 SDValue Src2 = N->getOperand(2);
1980 unsigned Src0Mods, Src1Mods, Src2Mods;
1981
Matt Arsenault0084adc2018-04-30 19:08:16 +00001982 // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand
1983 // using the conversion from f16.
Matt Arsenaultd7e23032017-09-07 18:05:07 +00001984 bool Sel0 = SelectVOP3PMadMixModsImpl(Src0, Src0, Src0Mods);
1985 bool Sel1 = SelectVOP3PMadMixModsImpl(Src1, Src1, Src1Mods);
1986 bool Sel2 = SelectVOP3PMadMixModsImpl(Src2, Src2, Src2Mods);
1987
Matt Arsenault0084adc2018-04-30 19:08:16 +00001988 assert((IsFMA || !Subtarget->hasFP32Denormals()) &&
Matt Arsenaultd7e23032017-09-07 18:05:07 +00001989 "fmad selected with denormals enabled");
1990 // TODO: We can select this with f32 denormals enabled if all the sources are
1991 // converted from f16 (in which case fmad isn't legal).
1992
1993 if (Sel0 || Sel1 || Sel2) {
1994 // For dummy operands.
1995 SDValue Zero = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
1996 SDValue Ops[] = {
1997 CurDAG->getTargetConstant(Src0Mods, SDLoc(), MVT::i32), Src0,
1998 CurDAG->getTargetConstant(Src1Mods, SDLoc(), MVT::i32), Src1,
1999 CurDAG->getTargetConstant(Src2Mods, SDLoc(), MVT::i32), Src2,
2000 CurDAG->getTargetConstant(0, SDLoc(), MVT::i1),
2001 Zero, Zero
2002 };
2003
Matt Arsenault0084adc2018-04-30 19:08:16 +00002004 CurDAG->SelectNodeTo(N,
2005 IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32,
2006 MVT::f32, Ops);
Matt Arsenaultd7e23032017-09-07 18:05:07 +00002007 } else {
2008 SelectCode(N);
2009 }
2010}
2011
Matt Arsenault88701812016-06-09 23:42:48 +00002012// This is here because there isn't a way to use the generated sub0_sub1 as the
2013// subreg index to EXTRACT_SUBREG in tablegen.
2014void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) {
2015 MemSDNode *Mem = cast<MemSDNode>(N);
2016 unsigned AS = Mem->getAddressSpace();
Matt Arsenault0da63502018-08-31 05:49:54 +00002017 if (AS == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenault7757c592016-06-09 23:42:54 +00002018 SelectCode(N);
2019 return;
2020 }
Matt Arsenault88701812016-06-09 23:42:48 +00002021
2022 MVT VT = N->getSimpleValueType(0);
2023 bool Is32 = (VT == MVT::i32);
2024 SDLoc SL(N);
2025
2026 MachineSDNode *CmpSwap = nullptr;
2027 if (Subtarget->hasAddr64()) {
Vitaly Buka74503982017-10-15 05:35:02 +00002028 SDValue SRsrc, VAddr, SOffset, Offset, SLC;
Matt Arsenault88701812016-06-09 23:42:48 +00002029
2030 if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset, SLC)) {
Matt Arsenaulte5456ce2017-07-20 21:06:04 +00002031 unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN :
2032 AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN;
Matt Arsenault88701812016-06-09 23:42:48 +00002033 SDValue CmpVal = Mem->getOperand(2);
2034
2035 // XXX - Do we care about glue operands?
2036
2037 SDValue Ops[] = {
2038 CmpVal, VAddr, SRsrc, SOffset, Offset, SLC, Mem->getChain()
2039 };
2040
2041 CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2042 }
2043 }
2044
2045 if (!CmpSwap) {
2046 SDValue SRsrc, SOffset, Offset, SLC;
2047 if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset, SLC)) {
Matt Arsenaulte5456ce2017-07-20 21:06:04 +00002048 unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN :
2049 AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN;
Matt Arsenault88701812016-06-09 23:42:48 +00002050
2051 SDValue CmpVal = Mem->getOperand(2);
2052 SDValue Ops[] = {
2053 CmpVal, SRsrc, SOffset, Offset, SLC, Mem->getChain()
2054 };
2055
2056 CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2057 }
2058 }
2059
2060 if (!CmpSwap) {
2061 SelectCode(N);
2062 return;
2063 }
2064
Chandler Carruth66654b72018-08-14 23:30:32 +00002065 MachineMemOperand *MMO = Mem->getMemOperand();
2066 CurDAG->setNodeMemRefs(CmpSwap, {MMO});
Matt Arsenault88701812016-06-09 23:42:48 +00002067
2068 unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
2069 SDValue Extract
2070 = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0));
2071
2072 ReplaceUses(SDValue(N, 0), Extract);
2073 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1));
2074 CurDAG->RemoveDeadNode(N);
2075}
2076
Matt Arsenaultd3c84e62019-06-14 13:26:32 +00002077void AMDGPUDAGToDAGISel::SelectDSAppendConsume(SDNode *N, unsigned IntrID) {
Matt Arsenaultcdd191d2019-01-28 20:14:49 +00002078 // The address is assumed to be uniform, so if it ends up in a VGPR, it will
2079 // be copied to an SGPR with readfirstlane.
2080 unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ?
2081 AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
2082
2083 SDValue Chain = N->getOperand(0);
2084 SDValue Ptr = N->getOperand(2);
2085 MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
Matt Arsenault9e5fa332019-06-14 21:01:24 +00002086 MachineMemOperand *MMO = M->getMemOperand();
Matt Arsenaultcdd191d2019-01-28 20:14:49 +00002087 bool IsGDS = M->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
2088
2089 SDValue Offset;
2090 if (CurDAG->isBaseWithConstantOffset(Ptr)) {
2091 SDValue PtrBase = Ptr.getOperand(0);
2092 SDValue PtrOffset = Ptr.getOperand(1);
2093
2094 const APInt &OffsetVal = cast<ConstantSDNode>(PtrOffset)->getAPIntValue();
2095 if (isDSOffsetLegal(PtrBase, OffsetVal.getZExtValue(), 16)) {
2096 N = glueCopyToM0(N, PtrBase);
2097 Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i32);
2098 }
2099 }
2100
2101 if (!Offset) {
2102 N = glueCopyToM0(N, Ptr);
2103 Offset = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2104 }
2105
2106 SDValue Ops[] = {
2107 Offset,
2108 CurDAG->getTargetConstant(IsGDS, SDLoc(), MVT::i32),
2109 Chain,
2110 N->getOperand(N->getNumOperands() - 1) // New glue
2111 };
2112
Matt Arsenault9e5fa332019-06-14 21:01:24 +00002113 SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2114 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
Matt Arsenaultcdd191d2019-01-28 20:14:49 +00002115}
2116
Matt Arsenault740322f2019-06-20 21:11:42 +00002117static unsigned gwsIntrinToOpcode(unsigned IntrID) {
2118 switch (IntrID) {
2119 case Intrinsic::amdgcn_ds_gws_init:
2120 return AMDGPU::DS_GWS_INIT;
2121 case Intrinsic::amdgcn_ds_gws_barrier:
2122 return AMDGPU::DS_GWS_BARRIER;
2123 case Intrinsic::amdgcn_ds_gws_sema_v:
2124 return AMDGPU::DS_GWS_SEMA_V;
2125 case Intrinsic::amdgcn_ds_gws_sema_br:
2126 return AMDGPU::DS_GWS_SEMA_BR;
2127 case Intrinsic::amdgcn_ds_gws_sema_p:
2128 return AMDGPU::DS_GWS_SEMA_P;
2129 case Intrinsic::amdgcn_ds_gws_sema_release_all:
2130 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
2131 default:
2132 llvm_unreachable("not a gws intrinsic");
2133 }
2134}
2135
Matt Arsenault4d55d022019-06-19 19:55:27 +00002136void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) {
Matt Arsenault740322f2019-06-20 21:11:42 +00002137 if (IntrID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
2138 !Subtarget->hasGWSSemaReleaseAll()) {
2139 // Let this error.
2140 SelectCode(N);
2141 return;
2142 }
2143
2144 // Chain, intrinsic ID, vsrc, offset
2145 const bool HasVSrc = N->getNumOperands() == 4;
2146 assert(HasVSrc || N->getNumOperands() == 3);
2147
Matt Arsenault4d55d022019-06-19 19:55:27 +00002148 SDLoc SL(N);
Matt Arsenault740322f2019-06-20 21:11:42 +00002149 SDValue BaseOffset = N->getOperand(HasVSrc ? 3 : 2);
Matt Arsenault4d55d022019-06-19 19:55:27 +00002150 int ImmOffset = 0;
2151 MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2152 MachineMemOperand *MMO = M->getMemOperand();
2153
2154 // Don't worry if the offset ends up in a VGPR. Only one lane will have
2155 // effect, so SIFixSGPRCopies will validly insert readfirstlane.
2156
2157 // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
2158 // offset field) % 64. Some versions of the programming guide omit the m0
2159 // part, or claim it's from offset 0.
2160 if (ConstantSDNode *ConstOffset = dyn_cast<ConstantSDNode>(BaseOffset)) {
2161 // If we have a constant offset, try to use the default value for m0 as a
2162 // base to possibly avoid setting it up.
2163 glueCopyToM0(N, CurDAG->getTargetConstant(-1, SL, MVT::i32));
2164 ImmOffset = ConstOffset->getZExtValue() + 1;
2165 } else {
2166 if (CurDAG->isBaseWithConstantOffset(BaseOffset)) {
2167 ImmOffset = BaseOffset.getConstantOperandVal(1);
2168 BaseOffset = BaseOffset.getOperand(0);
2169 }
2170
2171 // Prefer to do the shift in an SGPR since it should be possible to use m0
2172 // as the result directly. If it's already an SGPR, it will be eliminated
2173 // later.
2174 SDNode *SGPROffset
2175 = CurDAG->getMachineNode(AMDGPU::V_READFIRSTLANE_B32, SL, MVT::i32,
2176 BaseOffset);
2177 // Shift to offset in m0
2178 SDNode *M0Base
2179 = CurDAG->getMachineNode(AMDGPU::S_LSHL_B32, SL, MVT::i32,
2180 SDValue(SGPROffset, 0),
2181 CurDAG->getTargetConstant(16, SL, MVT::i32));
2182 glueCopyToM0(N, SDValue(M0Base, 0));
2183 }
2184
Matt Arsenault740322f2019-06-20 21:11:42 +00002185 SDValue V0;
2186 SDValue Chain = N->getOperand(0);
2187 SDValue Glue;
2188 if (HasVSrc) {
2189 SDValue VSrc0 = N->getOperand(2);
Matt Arsenault4d55d022019-06-19 19:55:27 +00002190
Matt Arsenault740322f2019-06-20 21:11:42 +00002191 // The manual doesn't mention this, but it seems only v0 works.
2192 V0 = CurDAG->getRegister(AMDGPU::VGPR0, MVT::i32);
2193
2194 SDValue CopyToV0 = CurDAG->getCopyToReg(
2195 N->getOperand(0), SL, V0, VSrc0,
2196 N->getOperand(N->getNumOperands() - 1));
2197 Chain = CopyToV0;
2198 Glue = CopyToV0.getValue(1);
2199 }
Matt Arsenault4d55d022019-06-19 19:55:27 +00002200
2201 SDValue OffsetField = CurDAG->getTargetConstant(ImmOffset, SL, MVT::i32);
2202
2203 // TODO: Can this just be removed from the instruction?
2204 SDValue GDS = CurDAG->getTargetConstant(1, SL, MVT::i1);
2205
Matt Arsenault740322f2019-06-20 21:11:42 +00002206 const unsigned Opc = gwsIntrinToOpcode(IntrID);
2207 SmallVector<SDValue, 5> Ops;
2208 if (HasVSrc)
2209 Ops.push_back(V0);
2210 Ops.push_back(OffsetField);
2211 Ops.push_back(GDS);
2212 Ops.push_back(Chain);
Matt Arsenault4d55d022019-06-19 19:55:27 +00002213
Matt Arsenault740322f2019-06-20 21:11:42 +00002214 if (HasVSrc)
2215 Ops.push_back(Glue);
Matt Arsenault4d55d022019-06-19 19:55:27 +00002216
2217 SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2218 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2219}
2220
Matt Arsenaultd3c84e62019-06-14 13:26:32 +00002221void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
2222 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2223 switch (IntrID) {
2224 case Intrinsic::amdgcn_ds_append:
2225 case Intrinsic::amdgcn_ds_consume: {
2226 if (N->getValueType(0) != MVT::i32)
2227 break;
2228 SelectDSAppendConsume(N, IntrID);
2229 return;
2230 }
Matt Arsenault4d55d022019-06-19 19:55:27 +00002231 }
2232
2233 SelectCode(N);
2234}
2235
2236void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
2237 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2238 switch (IntrID) {
2239 case Intrinsic::amdgcn_ds_gws_init:
2240 case Intrinsic::amdgcn_ds_gws_barrier:
Matt Arsenault740322f2019-06-20 21:11:42 +00002241 case Intrinsic::amdgcn_ds_gws_sema_v:
2242 case Intrinsic::amdgcn_ds_gws_sema_br:
2243 case Intrinsic::amdgcn_ds_gws_sema_p:
2244 case Intrinsic::amdgcn_ds_gws_sema_release_all:
Matt Arsenault4d55d022019-06-19 19:55:27 +00002245 SelectDS_GWS(N, IntrID);
2246 return;
Matt Arsenaultd3c84e62019-06-14 13:26:32 +00002247 default:
2248 break;
2249 }
2250
2251 SelectCode(N);
2252}
2253
Matt Arsenaultd7e23032017-09-07 18:05:07 +00002254bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
2255 unsigned &Mods) const {
2256 Mods = 0;
Tom Stellardb4a313a2014-08-01 00:32:39 +00002257 Src = In;
2258
2259 if (Src.getOpcode() == ISD::FNEG) {
2260 Mods |= SISrcMods::NEG;
2261 Src = Src.getOperand(0);
2262 }
2263
2264 if (Src.getOpcode() == ISD::FABS) {
2265 Mods |= SISrcMods::ABS;
2266 Src = Src.getOperand(0);
2267 }
2268
Tom Stellardb4a313a2014-08-01 00:32:39 +00002269 return true;
2270}
2271
Matt Arsenaultd7e23032017-09-07 18:05:07 +00002272bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
2273 SDValue &SrcMods) const {
2274 unsigned Mods;
2275 if (SelectVOP3ModsImpl(In, Src, Mods)) {
2276 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2277 return true;
2278 }
2279
2280 return false;
2281}
2282
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00002283bool AMDGPUDAGToDAGISel::SelectVOP3Mods_NNaN(SDValue In, SDValue &Src,
2284 SDValue &SrcMods) const {
2285 SelectVOP3Mods(In, Src, SrcMods);
2286 return isNoNanSrc(Src);
2287}
2288
Jay Foad7816ad92019-07-12 15:02:59 +00002289bool AMDGPUDAGToDAGISel::SelectVOP3Mods_f32(SDValue In, SDValue &Src,
2290 SDValue &SrcMods) const {
2291 if (In.getValueType() == MVT::f32)
2292 return SelectVOP3Mods(In, Src, SrcMods);
2293 Src = In;
2294 SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);;
2295 return true;
2296}
2297
Matt Arsenaultdf58e822017-04-25 21:17:38 +00002298bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src) const {
2299 if (In.getOpcode() == ISD::FABS || In.getOpcode() == ISD::FNEG)
2300 return false;
2301
2302 Src = In;
2303 return true;
Tom Stellarddb5a11f2015-07-13 15:47:57 +00002304}
2305
Tom Stellardb4a313a2014-08-01 00:32:39 +00002306bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
2307 SDValue &SrcMods, SDValue &Clamp,
2308 SDValue &Omod) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002309 SDLoc DL(In);
Matt Arsenaultdf58e822017-04-25 21:17:38 +00002310 Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2311 Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
Tom Stellardb4a313a2014-08-01 00:32:39 +00002312
2313 return SelectVOP3Mods(In, Src, SrcMods);
2314}
2315
Matt Arsenault4831ce52015-01-06 23:00:37 +00002316bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src,
2317 SDValue &SrcMods,
2318 SDValue &Clamp,
2319 SDValue &Omod) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002320 Clamp = Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
Matt Arsenault4831ce52015-01-06 23:00:37 +00002321 return SelectVOP3Mods(In, Src, SrcMods);
2322}
2323
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00002324bool AMDGPUDAGToDAGISel::SelectVOP3OMods(SDValue In, SDValue &Src,
2325 SDValue &Clamp, SDValue &Omod) const {
2326 Src = In;
2327
2328 SDLoc DL(In);
Matt Arsenaultdf58e822017-04-25 21:17:38 +00002329 Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2330 Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00002331
2332 return true;
2333}
2334
Matt Arsenaulteb522e62017-02-27 22:15:25 +00002335bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src,
2336 SDValue &SrcMods) const {
2337 unsigned Mods = 0;
2338 Src = In;
2339
Matt Arsenaulteb522e62017-02-27 22:15:25 +00002340 if (Src.getOpcode() == ISD::FNEG) {
Matt Arsenault786eeea2017-05-17 20:00:00 +00002341 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00002342 Src = Src.getOperand(0);
2343 }
2344
Matt Arsenault786eeea2017-05-17 20:00:00 +00002345 if (Src.getOpcode() == ISD::BUILD_VECTOR) {
2346 unsigned VecMods = Mods;
2347
Matt Arsenault98f29462017-05-17 20:30:58 +00002348 SDValue Lo = stripBitcast(Src.getOperand(0));
2349 SDValue Hi = stripBitcast(Src.getOperand(1));
Matt Arsenault786eeea2017-05-17 20:00:00 +00002350
2351 if (Lo.getOpcode() == ISD::FNEG) {
Matt Arsenault98f29462017-05-17 20:30:58 +00002352 Lo = stripBitcast(Lo.getOperand(0));
Matt Arsenault786eeea2017-05-17 20:00:00 +00002353 Mods ^= SISrcMods::NEG;
2354 }
2355
2356 if (Hi.getOpcode() == ISD::FNEG) {
Matt Arsenault98f29462017-05-17 20:30:58 +00002357 Hi = stripBitcast(Hi.getOperand(0));
Matt Arsenault786eeea2017-05-17 20:00:00 +00002358 Mods ^= SISrcMods::NEG_HI;
2359 }
2360
Matt Arsenault98f29462017-05-17 20:30:58 +00002361 if (isExtractHiElt(Lo, Lo))
2362 Mods |= SISrcMods::OP_SEL_0;
2363
2364 if (isExtractHiElt(Hi, Hi))
2365 Mods |= SISrcMods::OP_SEL_1;
2366
2367 Lo = stripExtractLoElt(Lo);
2368 Hi = stripExtractLoElt(Hi);
2369
Matt Arsenault786eeea2017-05-17 20:00:00 +00002370 if (Lo == Hi && !isInlineImmediate(Lo.getNode())) {
2371 // Really a scalar input. Just select from the low half of the register to
2372 // avoid packing.
2373
2374 Src = Lo;
2375 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2376 return true;
2377 }
2378
2379 Mods = VecMods;
2380 }
2381
Matt Arsenaulteb522e62017-02-27 22:15:25 +00002382 // Packed instructions do not have abs modifiers.
Matt Arsenaulteb522e62017-02-27 22:15:25 +00002383 Mods |= SISrcMods::OP_SEL_1;
2384
2385 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2386 return true;
2387}
2388
2389bool AMDGPUDAGToDAGISel::SelectVOP3PMods0(SDValue In, SDValue &Src,
2390 SDValue &SrcMods,
2391 SDValue &Clamp) const {
2392 SDLoc SL(In);
2393
2394 // FIXME: Handle clamp and op_sel
2395 Clamp = CurDAG->getTargetConstant(0, SL, MVT::i32);
2396
2397 return SelectVOP3PMods(In, Src, SrcMods);
2398}
2399
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00002400bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(SDValue In, SDValue &Src,
2401 SDValue &SrcMods) const {
2402 Src = In;
2403 // FIXME: Handle op_sel
2404 SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
2405 return true;
2406}
2407
2408bool AMDGPUDAGToDAGISel::SelectVOP3OpSel0(SDValue In, SDValue &Src,
2409 SDValue &SrcMods,
2410 SDValue &Clamp) const {
2411 SDLoc SL(In);
2412
2413 // FIXME: Handle clamp
2414 Clamp = CurDAG->getTargetConstant(0, SL, MVT::i32);
2415
2416 return SelectVOP3OpSel(In, Src, SrcMods);
2417}
2418
2419bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(SDValue In, SDValue &Src,
2420 SDValue &SrcMods) const {
2421 // FIXME: Handle op_sel
2422 return SelectVOP3Mods(In, Src, SrcMods);
2423}
2424
2425bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods0(SDValue In, SDValue &Src,
2426 SDValue &SrcMods,
2427 SDValue &Clamp) const {
2428 SDLoc SL(In);
2429
2430 // FIXME: Handle clamp
2431 Clamp = CurDAG->getTargetConstant(0, SL, MVT::i32);
2432
2433 return SelectVOP3OpSelMods(In, Src, SrcMods);
2434}
2435
Matt Arsenaultd7e23032017-09-07 18:05:07 +00002436// The return value is not whether the match is possible (which it always is),
2437// but whether or not it a conversion is really used.
2438bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src,
2439 unsigned &Mods) const {
2440 Mods = 0;
2441 SelectVOP3ModsImpl(In, Src, Mods);
2442
2443 if (Src.getOpcode() == ISD::FP_EXTEND) {
2444 Src = Src.getOperand(0);
2445 assert(Src.getValueType() == MVT::f16);
2446 Src = stripBitcast(Src);
2447
Matt Arsenault550c66d2017-10-13 20:45:49 +00002448 // Be careful about folding modifiers if we already have an abs. fneg is
2449 // applied last, so we don't want to apply an earlier fneg.
2450 if ((Mods & SISrcMods::ABS) == 0) {
2451 unsigned ModsTmp;
2452 SelectVOP3ModsImpl(Src, Src, ModsTmp);
2453
2454 if ((ModsTmp & SISrcMods::NEG) != 0)
2455 Mods ^= SISrcMods::NEG;
2456
2457 if ((ModsTmp & SISrcMods::ABS) != 0)
2458 Mods |= SISrcMods::ABS;
2459 }
2460
Matt Arsenaultd7e23032017-09-07 18:05:07 +00002461 // op_sel/op_sel_hi decide the source type and source.
2462 // If the source's op_sel_hi is set, it indicates to do a conversion from fp16.
2463 // If the sources's op_sel is set, it picks the high half of the source
2464 // register.
2465
2466 Mods |= SISrcMods::OP_SEL_1;
Matt Arsenault550c66d2017-10-13 20:45:49 +00002467 if (isExtractHiElt(Src, Src)) {
Matt Arsenaultd7e23032017-09-07 18:05:07 +00002468 Mods |= SISrcMods::OP_SEL_0;
2469
Matt Arsenault550c66d2017-10-13 20:45:49 +00002470 // TODO: Should we try to look for neg/abs here?
2471 }
2472
Matt Arsenaultd7e23032017-09-07 18:05:07 +00002473 return true;
2474 }
2475
2476 return false;
2477}
2478
Matt Arsenault76935122017-09-20 20:28:39 +00002479bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(SDValue In, SDValue &Src,
2480 SDValue &SrcMods) const {
2481 unsigned Mods = 0;
2482 SelectVOP3PMadMixModsImpl(In, Src, Mods);
2483 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2484 return true;
2485}
2486
Matt Arsenaulte8c03a22019-03-08 20:58:11 +00002487SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const {
2488 if (In.isUndef())
2489 return CurDAG->getUNDEF(MVT::i32);
2490
2491 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) {
2492 SDLoc SL(In);
2493 return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32);
2494 }
2495
2496 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) {
2497 SDLoc SL(In);
2498 return CurDAG->getConstant(
2499 C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32);
2500 }
2501
2502 SDValue Src;
2503 if (isExtractHiElt(In, Src))
2504 return Src;
2505
2506 return SDValue();
2507}
2508
Alexander Timofeevdb7ee762018-09-11 11:56:50 +00002509bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +00002510 assert(CurDAG->getTarget().getTargetTriple().getArch() == Triple::amdgcn);
2511
Alexander Timofeevdb7ee762018-09-11 11:56:50 +00002512 const SIRegisterInfo *SIRI =
2513 static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
2514 const SIInstrInfo * SII =
2515 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
2516
2517 unsigned Limit = 0;
2518 bool AllUsesAcceptSReg = true;
2519 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
2520 Limit < 10 && U != E; ++U, ++Limit) {
2521 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
2522
2523 // If the register class is unknown, it could be an unknown
2524 // register class that needs to be an SGPR, e.g. an inline asm
2525 // constraint
2526 if (!RC || SIRI->isSGPRClass(RC))
2527 return false;
2528
2529 if (RC != &AMDGPU::VS_32RegClass) {
2530 AllUsesAcceptSReg = false;
2531 SDNode * User = *U;
2532 if (User->isMachineOpcode()) {
2533 unsigned Opc = User->getMachineOpcode();
2534 MCInstrDesc Desc = SII->get(Opc);
2535 if (Desc.isCommutable()) {
2536 unsigned OpIdx = Desc.getNumDefs() + U.getOperandNo();
2537 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
2538 if (SII->findCommutedOpIndices(Desc, OpIdx, CommuteIdx1)) {
2539 unsigned CommutedOpNo = CommuteIdx1 - Desc.getNumDefs();
2540 const TargetRegisterClass *CommutedRC = getOperandRegClass(*U, CommutedOpNo);
2541 if (CommutedRC == &AMDGPU::VS_32RegClass)
2542 AllUsesAcceptSReg = true;
2543 }
2544 }
2545 }
2546 // If "AllUsesAcceptSReg == false" so far we haven't suceeded
2547 // commuting current user. This means have at least one use
2548 // that strictly require VGPR. Thus, we will not attempt to commute
2549 // other user instructions.
2550 if (!AllUsesAcceptSReg)
2551 break;
2552 }
2553 }
2554 return !AllUsesAcceptSReg && (Limit < 10);
2555}
2556
Alexander Timofeev4d302f62018-09-13 09:06:56 +00002557bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const {
2558 auto Ld = cast<LoadSDNode>(N);
2559
2560 return Ld->getAlignment() >= 4 &&
2561 (
2562 (
2563 (
2564 Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
2565 Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT
2566 )
2567 &&
2568 !N->isDivergent()
2569 )
2570 ||
2571 (
2572 Subtarget->getScalarizeGlobalBehavior() &&
2573 Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
2574 !Ld->isVolatile() &&
2575 !N->isDivergent() &&
2576 static_cast<const SITargetLowering *>(
2577 getTargetLowering())->isMemOpHasNoClobberedMemOperand(N)
2578 )
2579 );
2580}
Alexander Timofeevdb7ee762018-09-11 11:56:50 +00002581
Christian Konigd910b7d2013-02-26 17:52:16 +00002582void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
Bill Wendlinga3cd3502013-06-19 21:36:55 +00002583 const AMDGPUTargetLowering& Lowering =
Matt Arsenault209a7b92014-04-18 07:40:20 +00002584 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
Vincent Lejeuneab3baf82013-09-12 23:44:44 +00002585 bool IsModified = false;
2586 do {
2587 IsModified = false;
Matt Arsenault68f05052017-12-04 22:18:27 +00002588
Vincent Lejeuneab3baf82013-09-12 23:44:44 +00002589 // Go over all selected nodes and try to fold them a bit more
Matt Arsenault68f05052017-12-04 22:18:27 +00002590 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_begin();
2591 while (Position != CurDAG->allnodes_end()) {
2592 SDNode *Node = &*Position++;
2593 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(Node);
Vincent Lejeuneab3baf82013-09-12 23:44:44 +00002594 if (!MachineNode)
2595 continue;
Christian Konigd910b7d2013-02-26 17:52:16 +00002596
Vincent Lejeuneab3baf82013-09-12 23:44:44 +00002597 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
Matt Arsenault68f05052017-12-04 22:18:27 +00002598 if (ResNode != Node) {
2599 if (ResNode)
2600 ReplaceUses(Node, ResNode);
Vincent Lejeuneab3baf82013-09-12 23:44:44 +00002601 IsModified = true;
2602 }
Tom Stellard2183b702013-06-03 17:39:46 +00002603 }
Vincent Lejeuneab3baf82013-09-12 23:44:44 +00002604 CurDAG->RemoveDeadNodes();
2605 } while (IsModified);
Christian Konigd910b7d2013-02-26 17:52:16 +00002606}
Tom Stellard20287692017-08-08 04:57:55 +00002607
Tom Stellardc5a154d2018-06-28 23:47:12 +00002608bool R600DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
2609 Subtarget = &MF.getSubtarget<R600Subtarget>();
2610 return SelectionDAGISel::runOnMachineFunction(MF);
2611}
2612
2613bool R600DAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const {
2614 if (!N->readMem())
2615 return false;
2616 if (CbId == -1)
Matt Arsenault0da63502018-08-31 05:49:54 +00002617 return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
2618 N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
Tom Stellardc5a154d2018-06-28 23:47:12 +00002619
Matt Arsenault0da63502018-08-31 05:49:54 +00002620 return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId;
Tom Stellardc5a154d2018-06-28 23:47:12 +00002621}
2622
2623bool R600DAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
2624 SDValue& IntPtr) {
2625 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
2626 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr),
2627 true);
2628 return true;
2629 }
2630 return false;
2631}
2632
2633bool R600DAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
2634 SDValue& BaseReg, SDValue &Offset) {
2635 if (!isa<ConstantSDNode>(Addr)) {
2636 BaseReg = Addr;
2637 Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true);
2638 return true;
2639 }
2640 return false;
2641}
2642
Tom Stellard20287692017-08-08 04:57:55 +00002643void R600DAGToDAGISel::Select(SDNode *N) {
2644 unsigned int Opc = N->getOpcode();
2645 if (N->isMachineOpcode()) {
2646 N->setNodeId(-1);
2647 return; // Already selected.
2648 }
2649
2650 switch (Opc) {
2651 default: break;
2652 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
2653 case ISD::SCALAR_TO_VECTOR:
2654 case ISD::BUILD_VECTOR: {
2655 EVT VT = N->getValueType(0);
2656 unsigned NumVectorElts = VT.getVectorNumElements();
2657 unsigned RegClassID;
2658 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
2659 // that adds a 128 bits reg copy when going through TwoAddressInstructions
2660 // pass. We want to avoid 128 bits copies as much as possible because they
2661 // can't be bundled by our scheduler.
2662 switch(NumVectorElts) {
Tom Stellardc5a154d2018-06-28 23:47:12 +00002663 case 2: RegClassID = R600::R600_Reg64RegClassID; break;
Tom Stellard20287692017-08-08 04:57:55 +00002664 case 4:
2665 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
Tom Stellardc5a154d2018-06-28 23:47:12 +00002666 RegClassID = R600::R600_Reg128VerticalRegClassID;
Tom Stellard20287692017-08-08 04:57:55 +00002667 else
Tom Stellardc5a154d2018-06-28 23:47:12 +00002668 RegClassID = R600::R600_Reg128RegClassID;
Tom Stellard20287692017-08-08 04:57:55 +00002669 break;
2670 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
2671 }
2672 SelectBuildVector(N, RegClassID);
2673 return;
2674 }
2675 }
2676
2677 SelectCode(N);
2678}
2679
2680bool R600DAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
2681 SDValue &Offset) {
2682 ConstantSDNode *C;
2683 SDLoc DL(Addr);
2684
2685 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
Tom Stellardc5a154d2018-06-28 23:47:12 +00002686 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
Tom Stellard20287692017-08-08 04:57:55 +00002687 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
2688 } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
2689 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
Tom Stellardc5a154d2018-06-28 23:47:12 +00002690 Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
Tom Stellard20287692017-08-08 04:57:55 +00002691 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
2692 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
2693 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
2694 Base = Addr.getOperand(0);
2695 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
2696 } else {
2697 Base = Addr;
2698 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
2699 }
2700
2701 return true;
2702}
2703
2704bool R600DAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
2705 SDValue &Offset) {
2706 ConstantSDNode *IMMOffset;
2707
2708 if (Addr.getOpcode() == ISD::ADD
2709 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
2710 && isInt<16>(IMMOffset->getZExtValue())) {
2711
2712 Base = Addr.getOperand(0);
2713 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
2714 MVT::i32);
2715 return true;
2716 // If the pointer address is constant, we can move it to the offset field.
2717 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
2718 && isInt<16>(IMMOffset->getZExtValue())) {
2719 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
2720 SDLoc(CurDAG->getEntryNode()),
Tom Stellardc5a154d2018-06-28 23:47:12 +00002721 R600::ZERO, MVT::i32);
Tom Stellard20287692017-08-08 04:57:55 +00002722 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
2723 MVT::i32);
2724 return true;
2725 }
2726
2727 // Default case, no offset
2728 Base = Addr;
2729 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
2730 return true;
2731}