blob: a16402e3c98d170944f758cdad19cd2c13539c1a [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Interface definition of the TargetLowering class that is common
12/// to all AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
Matt Arsenault6b6a2c32016-03-11 08:00:27 +000016#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
17#define LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
Tom Stellard75aadc22012-12-11 21:25:42 +000018
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000019#include "AMDGPU.h"
Matt Arsenaulte622dc32017-04-11 22:29:24 +000020#include "llvm/CodeGen/CallingConvLower.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000021#include "llvm/CodeGen/TargetLowering.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000022
23namespace llvm {
24
Tom Stellardc026e8b2013-06-28 15:47:08 +000025class AMDGPUMachineFunction;
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000026class AMDGPUSubtarget;
Matt Arsenault8623e8d2017-08-03 23:00:29 +000027struct ArgDescriptor;
Tom Stellard75aadc22012-12-11 21:25:42 +000028
29class AMDGPUTargetLowering : public TargetLowering {
Konstantin Zhuravlyovd971a112016-11-01 17:49:33 +000030private:
31 /// \returns AMDGPUISD::FFBH_U32 node if the incoming \p Op may have been
32 /// legalized from a smaller type VT. Need to match pre-legalized type because
33 /// the generic legalization inserts the add/sub between the select and
34 /// compare.
Wei Ding5676aca2017-10-12 19:37:14 +000035 SDValue getFFBX_U32(SelectionDAG &DAG, SDValue Op, const SDLoc &DL, unsigned Opc) const;
Konstantin Zhuravlyovd971a112016-11-01 17:49:33 +000036
Stanislav Mekhanoshina96ec3f2017-05-23 15:59:58 +000037public:
Matt Arsenault4f6318f2017-11-06 17:04:37 +000038 static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG);
39 static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG);
Stanislav Mekhanoshina96ec3f2017-05-23 15:59:58 +000040
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000041protected:
42 const AMDGPUSubtarget *Subtarget;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000043 AMDGPUAS AMDGPUASI;
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000044
Tom Stellardd86003e2013-08-14 23:25:00 +000045 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
46 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
Tom Stellard2ffc3302013-08-26 15:05:44 +000047 /// \brief Split a vector store into multiple scalar stores.
Matt Arsenault209a7b92014-04-18 07:40:20 +000048 /// \returns The resulting chain.
Matt Arsenault1578aa72014-06-15 20:08:02 +000049
Matt Arsenault16e31332014-09-10 21:44:27 +000050 SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const;
Matt Arsenault46010932014-06-18 17:05:30 +000051 SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const;
52 SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const;
Matt Arsenaulte8208ec2014-06-18 17:05:26 +000053 SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
Matt Arsenault692bd5e2014-06-18 22:03:45 +000054 SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const;
Matt Arsenaultb0055482015-01-21 18:18:25 +000055
Matt Arsenaultb5d23272017-03-24 20:04:18 +000056 SDValue LowerFROUND32_16(SDValue Op, SelectionDAG &DAG) const;
Matt Arsenaultb0055482015-01-21 18:18:25 +000057 SDValue LowerFROUND64(SDValue Op, SelectionDAG &DAG) const;
58 SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const;
Matt Arsenault46010932014-06-18 17:05:30 +000059 SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const;
Vedran Mileticad21f262017-11-27 13:26:38 +000060 SDValue LowerFLOG(SDValue Op, SelectionDAG &Dag,
61 double Log2BaseInverted) const;
Matt Arsenault46010932014-06-18 17:05:30 +000062
Wei Ding5676aca2017-10-12 19:37:14 +000063 SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const;
Matt Arsenaultf058d672016-01-11 16:50:29 +000064
Matt Arsenault5e0bdb82016-01-11 22:01:48 +000065 SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const;
Matt Arsenaultf7c95e32014-10-03 23:54:41 +000066 SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const;
Tom Stellardc947d8c2013-10-30 17:22:05 +000067 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
Matt Arsenaultf7c95e32014-10-03 23:54:41 +000068 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
Tom Stellard75aadc22012-12-11 21:25:42 +000069
Matt Arsenaultc9961752014-10-03 23:54:56 +000070 SDValue LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, bool Signed) const;
Tom Stellard94c21bc2016-11-01 16:31:48 +000071 SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
Matt Arsenaultc9961752014-10-03 23:54:56 +000072 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
73 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
74
Matt Arsenault14d46452014-06-15 20:23:38 +000075 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
76
Matt Arsenault6e3a4512016-01-18 22:01:13 +000077protected:
Matt Arsenault8af47a02016-07-01 22:55:55 +000078 bool shouldCombineMemoryType(EVT VT) const;
Matt Arsenault327bb5a2016-07-01 22:47:50 +000079 SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Matt Arsenaultca3976f2014-07-15 02:06:31 +000080 SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Matt Arsenault2fdf2a12017-02-21 23:35:48 +000081 SDValue performClampCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Matt Arsenaultb3463552017-07-15 05:52:59 +000082 SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +000083
84 SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL,
85 unsigned Opc, SDValue LHS,
86 uint32_t ValLo, uint32_t ValHi) const;
Matt Arsenault24692112015-07-14 18:20:33 +000087 SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Matt Arsenault6e3a4512016-01-18 22:01:13 +000088 SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Matt Arsenault80edab92016-01-18 21:43:36 +000089 SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Matt Arsenaultd0e0f0a2014-06-30 17:55:48 +000090 SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Matt Arsenault2712d4a2016-08-27 01:32:27 +000091 SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
92 SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const;
93 SDValue performMulLoHi24Combine(SDNode *N, DAGCombinerInfo &DCI) const;
Wei Ding5676aca2017-10-12 19:37:14 +000094 SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS,
Benjamin Kramerbdc49562016-06-12 15:39:02 +000095 SDValue RHS, DAGCombinerInfo &DCI) const;
Matt Arsenaultde5fbe92016-01-11 17:02:00 +000096 SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Matt Arsenault2529fba2017-01-12 00:09:34 +000097 SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Matt Arsenault9dba9bd2017-02-02 02:27:04 +000098 SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Matt Arsenaultd0e0f0a2014-06-30 17:55:48 +000099
Matt Arsenaultc9df7942014-06-11 03:29:54 +0000100 static EVT getEquivalentMemType(LLVMContext &Context, EVT VT);
Tom Stellard75aadc22012-12-11 21:25:42 +0000101
Tom Stellard067c8152014-07-21 14:01:14 +0000102 virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
103 SelectionDAG &DAG) const;
Matt Arsenault83e60582014-07-24 17:10:35 +0000104
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000105 /// Return 64-bit value Op as two 32-bit integers.
106 std::pair<SDValue, SDValue> split64BitValue(SDValue Op,
107 SelectionDAG &DAG) const;
Matt Arsenault33e3ece2016-01-18 22:09:04 +0000108 SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const;
109 SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const;
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000110
Matt Arsenault83e60582014-07-24 17:10:35 +0000111 /// \brief Split a vector load into 2 loads of half the vector.
112 SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const;
113
Matt Arsenault83e60582014-07-24 17:10:35 +0000114 /// \brief Split a vector store into 2 stores of half the vector.
Tom Stellardaf775432013-10-23 00:44:32 +0000115 SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
Matt Arsenault83e60582014-07-24 17:10:35 +0000116
Tom Stellard2ffc3302013-08-26 15:05:44 +0000117 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
Jan Vesely343cd6f02014-06-22 21:43:01 +0000118 SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
Jan Vesely5f715d32015-01-22 23:42:43 +0000119 SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
Jan Veselye5ca27d2014-08-12 17:31:20 +0000120 SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const;
Tom Stellardbf69d762014-11-15 01:07:53 +0000121 void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG,
122 SmallVectorImpl<SDValue> &Results) const;
Tom Stellardbbeb45a2016-09-16 21:53:00 +0000123 void analyzeFormalArgumentsCompute(CCState &State,
124 const SmallVectorImpl<ISD::InputArg> &Ins) const;
Tom Stellard75aadc22012-12-11 21:25:42 +0000125public:
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000126 AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000127
Matt Arsenault3e6f9b52017-01-19 06:35:27 +0000128 bool mayIgnoreSignedZero(SDValue Op) const {
Matt Arsenault74a576e2017-01-25 06:27:02 +0000129 if (getTargetMachine().Options.NoSignedZerosFPMath)
Matt Arsenault3e6f9b52017-01-19 06:35:27 +0000130 return true;
131
Amara Emersond28f0cd42017-05-01 15:17:51 +0000132 const auto Flags = Op.getNode()->getFlags();
133 if (Flags.isDefined())
134 return Flags.hasNoSignedZeros();
Matt Arsenault3e6f9b52017-01-19 06:35:27 +0000135
136 return false;
137 }
138
Matt Arsenaultbf5482e2017-05-11 17:26:25 +0000139 static bool allUsesHaveSourceMods(const SDNode *N,
140 unsigned CostThreshold = 4);
Craig Topper5656db42014-04-29 07:57:24 +0000141 bool isFAbsFree(EVT VT) const override;
142 bool isFNegFree(EVT VT) const override;
143 bool isTruncateFree(EVT Src, EVT Dest) const override;
144 bool isTruncateFree(Type *Src, Type *Dest) const override;
Matt Arsenaultb517c812014-03-27 17:23:31 +0000145
Craig Topper5656db42014-04-29 07:57:24 +0000146 bool isZExtFree(Type *Src, Type *Dest) const override;
147 bool isZExtFree(EVT Src, EVT Dest) const override;
Aaron Ballman3c81e462014-06-26 13:45:47 +0000148 bool isZExtFree(SDValue Val, EVT VT2) const override;
Matt Arsenault4d707542017-10-13 20:18:59 +0000149 bool isFPExtFoldable(unsigned Opcode, EVT DestVT, EVT SrcVT) const override;
Matt Arsenaultb517c812014-03-27 17:23:31 +0000150
Craig Topper5656db42014-04-29 07:57:24 +0000151 bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
Matt Arsenaulta7f1e0c2014-03-24 19:43:31 +0000152
Mehdi Amini44ede332015-07-09 02:09:04 +0000153 MVT getVectorIdxTy(const DataLayout &) const override;
Matt Arsenault1d555c42014-06-23 18:00:55 +0000154 bool isSelectSupported(SelectSupportKind) const override;
Matt Arsenault14d46452014-06-15 20:23:38 +0000155
156 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
157 bool ShouldShrinkFPConstant(EVT VT) const override;
Matt Arsenault810cb622014-12-12 00:00:24 +0000158 bool shouldReduceLoadWidth(SDNode *Load,
159 ISD::LoadExtType ExtType,
160 EVT ExtVT) const override;
Matt Arsenault14d46452014-06-15 20:23:38 +0000161
Matt Arsenault327bb5a2016-07-01 22:47:50 +0000162 bool isLoadBitCastBeneficial(EVT, EVT) const final;
Matt Arsenault65ad1602015-05-24 00:51:27 +0000163
164 bool storeOfVectorConstantIsCheap(EVT MemVT,
165 unsigned NumElem,
166 unsigned AS) const override;
Matt Arsenault61dc2352015-10-12 23:59:50 +0000167 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override;
Matt Arsenaultb56d8432015-01-13 19:46:48 +0000168 bool isCheapToSpeculateCttz() const override;
169 bool isCheapToSpeculateCtlz() const override;
170
Matt Arsenaulte622dc32017-04-11 22:29:24 +0000171 static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000172 static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
173
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000174 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
Craig Topper5656db42014-04-29 07:57:24 +0000175 const SmallVectorImpl<ISD::OutputArg> &Outs,
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000176 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
177 SelectionDAG &DAG) const override;
Matt Arsenaulta176cc52017-08-03 23:32:41 +0000178
Matt Arsenault71bcbd42017-08-11 20:42:08 +0000179 SDValue addTokenForArgument(SDValue Chain,
180 SelectionDAG &DAG,
181 MachineFrameInfo &MFI,
182 int ClobberedFI) const;
183
Matt Arsenaulta176cc52017-08-03 23:32:41 +0000184 SDValue lowerUnhandledCall(CallLoweringInfo &CLI,
185 SmallVectorImpl<SDValue> &InVals,
186 StringRef Reason) const;
Craig Topper5656db42014-04-29 07:57:24 +0000187 SDValue LowerCall(CallLoweringInfo &CLI,
188 SmallVectorImpl<SDValue> &InVals) const override;
Tom Stellard75aadc22012-12-11 21:25:42 +0000189
Matt Arsenault19c54882015-08-26 18:37:13 +0000190 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op,
191 SelectionDAG &DAG) const;
192
Craig Topper5656db42014-04-29 07:57:24 +0000193 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
Matt Arsenault14d46452014-06-15 20:23:38 +0000194 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
Craig Topper5656db42014-04-29 07:57:24 +0000195 void ReplaceNodeResults(SDNode * N,
196 SmallVectorImpl<SDValue> &Results,
197 SelectionDAG &DAG) const override;
Matt Arsenaultd125d742014-03-27 17:23:24 +0000198
Matt Arsenaultda7a6562017-02-01 00:42:40 +0000199 SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS,
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000200 SDValue RHS, SDValue True, SDValue False,
201 SDValue CC, DAGCombinerInfo &DCI) const;
Matt Arsenaultd28a7fd2014-11-14 18:30:06 +0000202
Craig Topper5656db42014-04-29 07:57:24 +0000203 const char* getTargetNodeName(unsigned Opcode) const override;
Tom Stellard75aadc22012-12-11 21:25:42 +0000204
Mark Searlese4f067e2017-12-19 19:26:23 +0000205 // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection
206 // for AMDGPU.
207 // A commit ( git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@319036
208 // 91177308-0d34-0410-b5e6-96231b3b80d8 ) turned on
209 // MergeConsecutiveStores() before Instruction Selection for all targets.
210 // Enough AMDGPU compiles go into an infinite loop ( MergeConsecutiveStores()
211 // merges two stores; LegalizeStoreOps() un-merges; MergeConsecutiveStores()
212 // re-merges, etc. ) to warrant turning it off for now.
213 bool mergeStoresAfterLegalization() const override { return false; }
214
Nikolai Bozhenovf6795302016-08-04 12:47:28 +0000215 bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override {
216 return true;
217 }
Evandro Menezes21f9ce12016-11-10 23:31:06 +0000218 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
219 int &RefinementSteps, bool &UseOneConstNR,
220 bool Reciprocal) const override;
Sanjay Patel0051efc2016-10-20 16:55:45 +0000221 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
222 int &RefinementSteps) const override;
Matt Arsenaulte93d06a2015-01-13 20:53:18 +0000223
Craig Topper5656db42014-04-29 07:57:24 +0000224 virtual SDNode *PostISelFolding(MachineSDNode *N,
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000225 SelectionDAG &DAG) const = 0;
Christian Konigd910b7d2013-02-26 17:52:16 +0000226
Tom Stellard75aadc22012-12-11 21:25:42 +0000227 /// \brief Determine which of the bits specified in \p Mask are known to be
228 /// either zero or one and return them in the \p KnownZero and \p KnownOne
229 /// bitsets.
Jay Foada0653a32014-05-14 21:14:37 +0000230 void computeKnownBitsForTargetNode(const SDValue Op,
Craig Topperd0af7e82017-04-28 05:31:46 +0000231 KnownBits &Known,
Simon Pilgrim37b536e2017-03-31 11:24:16 +0000232 const APInt &DemandedElts,
Jay Foada0653a32014-05-14 21:14:37 +0000233 const SelectionDAG &DAG,
234 unsigned Depth = 0) const override;
Tom Stellard75aadc22012-12-11 21:25:42 +0000235
Simon Pilgrim3c81c34d2017-03-31 13:54:09 +0000236 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts,
237 const SelectionDAG &DAG,
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000238 unsigned Depth = 0) const override;
Tom Stellardb02094e2014-07-21 15:45:01 +0000239
240 /// \brief Helper function that adds Reg to the LiveIn list of the DAG's
241 /// MachineFunction.
242 ///
Matt Arsenaulte0e68a72017-06-19 21:52:45 +0000243 /// \returns a RegisterSDNode representing Reg if \p RawReg is true, otherwise
244 /// a copy from the register.
245 SDValue CreateLiveInRegister(SelectionDAG &DAG,
246 const TargetRegisterClass *RC,
247 unsigned Reg, EVT VT,
248 const SDLoc &SL,
249 bool RawReg = false) const;
250 SDValue CreateLiveInRegister(SelectionDAG &DAG,
251 const TargetRegisterClass *RC,
252 unsigned Reg, EVT VT) const {
253 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()));
254 }
255
256 // Returns the raw live in register rather than a copy from it.
257 SDValue CreateLiveInRegisterRaw(SelectionDAG &DAG,
258 const TargetRegisterClass *RC,
259 unsigned Reg, EVT VT) const {
260 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()), true);
261 }
Tom Stellarddcb9f092015-07-09 21:20:37 +0000262
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000263 /// Similar to CreateLiveInRegister, except value maybe loaded from a stack
264 /// slot rather than passed in a register.
265 SDValue loadStackInputValue(SelectionDAG &DAG,
266 EVT VT,
267 const SDLoc &SL,
268 int64_t Offset) const;
269
270 SDValue storeStackInputValue(SelectionDAG &DAG,
271 const SDLoc &SL,
272 SDValue Chain,
273 SDValue StackPtr,
274 SDValue ArgVal,
275 int64_t Offset) const;
276
277 SDValue loadInputValue(SelectionDAG &DAG,
278 const TargetRegisterClass *RC,
279 EVT VT, const SDLoc &SL,
280 const ArgDescriptor &Arg) const;
281
Tom Stellarddcb9f092015-07-09 21:20:37 +0000282 enum ImplicitParameter {
Jan Veselyfea814d2016-06-21 20:46:20 +0000283 FIRST_IMPLICIT,
284 GRID_DIM = FIRST_IMPLICIT,
285 GRID_OFFSET,
Tom Stellarddcb9f092015-07-09 21:20:37 +0000286 };
287
288 /// \brief Helper function that returns the byte offset of the given
289 /// type of implicit parameter.
Matt Arsenault916cea52015-07-28 18:09:55 +0000290 uint32_t getImplicitParameterOffset(const AMDGPUMachineFunction *MFI,
Tom Stellarddcb9f092015-07-09 21:20:37 +0000291 const ImplicitParameter Param) const;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000292
293 AMDGPUAS getAMDGPUAS() const {
294 return AMDGPUASI;
295 }
Yaxun Liufd23a0c2017-04-24 18:26:27 +0000296
297 MVT getFenceOperandTy(const DataLayout &DL) const override {
298 return MVT::i32;
299 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000300};
301
302namespace AMDGPUISD {
303
Matthias Braund04893f2015-05-07 21:33:59 +0000304enum NodeType : unsigned {
Tom Stellard75aadc22012-12-11 21:25:42 +0000305 // AMDIL ISD Opcodes
306 FIRST_NUMBER = ISD::BUILTIN_OP_END,
Tom Stellard75aadc22012-12-11 21:25:42 +0000307 UMUL, // 32bit unsigned multiplication
Tom Stellard75aadc22012-12-11 21:25:42 +0000308 BRANCH_COND,
309 // End AMDIL ISD Opcodes
Matt Arsenaultc5b641a2017-03-17 20:41:45 +0000310
Matt Arsenault5b20fbb2017-03-21 22:18:10 +0000311 // Function call.
312 CALL,
Matt Arsenault71bcbd42017-08-11 20:42:08 +0000313 TC_RETURN,
Matt Arsenault3e025382017-04-24 17:49:13 +0000314 TRAP,
Matt Arsenault5b20fbb2017-03-21 22:18:10 +0000315
Matt Arsenaultc5b641a2017-03-17 20:41:45 +0000316 // Masked control flow nodes.
317 IF,
318 ELSE,
319 LOOP,
320
Matt Arsenault5b20fbb2017-03-21 22:18:10 +0000321 // A uniform kernel return that terminates the wavefront.
Matt Arsenault9babdf42016-06-22 20:15:28 +0000322 ENDPGM,
Matt Arsenault5b20fbb2017-03-21 22:18:10 +0000323
324 // Return to a shader part's epilog code.
325 RETURN_TO_EPILOG,
326
327 // Return with values from a non-entry function.
328 RET_FLAG,
329
Tom Stellard75aadc22012-12-11 21:25:42 +0000330 DWORDADDR,
331 FRACT,
Matt Arsenault2fdf2a12017-02-21 23:35:48 +0000332
333 /// CLAMP value between 0.0 and 1.0. NaN clamped to 0, following clamp output
334 /// modifier behavior with dx10_enable.
Matt Arsenault5d47d4a2014-06-12 21:15:44 +0000335 CLAMP,
Matt Arsenault2fdf2a12017-02-21 23:35:48 +0000336
Matt Arsenault2712d4a2016-08-27 01:32:27 +0000337 // This is SETCC with the full mask result which is used for a compare with a
Wei Ding07e03712016-07-28 16:42:13 +0000338 // result bit per item in the wavefront.
Matt Arsenault2712d4a2016-08-27 01:32:27 +0000339 SETCC,
Tom Stellard8485fa02016-12-07 02:42:15 +0000340 SETREG,
341 // FP ops with input and output chain.
342 FMA_W_CHAIN,
343 FMUL_W_CHAIN,
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000344
345 // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi.
346 // Denormals handled on some parts.
Vincent Lejeuneb55940c2013-07-09 15:03:11 +0000347 COS_HW,
348 SIN_HW,
Matt Arsenaultda59f3d2014-11-13 23:03:09 +0000349 FMAX_LEGACY,
Matt Arsenaultda59f3d2014-11-13 23:03:09 +0000350 FMIN_LEGACY,
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +0000351 FMAX3,
352 SMAX3,
353 UMAX3,
354 FMIN3,
355 SMIN3,
356 UMIN3,
Matt Arsenaultf639c322016-01-28 20:53:42 +0000357 FMED3,
358 SMED3,
359 UMED3,
Tom Stellard75aadc22012-12-11 21:25:42 +0000360 URECIP,
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000361 DIV_SCALE,
362 DIV_FMAS,
363 DIV_FIXUP,
Wei Ding4d3d4ca2017-02-24 23:00:29 +0000364 // For emitting ISD::FMAD when f32 denormals are enabled because mac/mad is
365 // treated as an illegal operation.
366 FMAD_FTZ,
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000367 TRIG_PREOP, // 1 ULP max error for f64
368
369 // RCP, RSQ - For f32, 1 ULP max error, no denormal handling.
370 // For f64, max error 2^29 ULP, handles denormals.
371 RCP,
372 RSQ,
Matt Arsenault32fc5272016-07-26 16:45:45 +0000373 RCP_LEGACY,
Matt Arsenault257d48d2014-06-24 22:13:39 +0000374 RSQ_LEGACY,
Matt Arsenault32fc5272016-07-26 16:45:45 +0000375 FMUL_LEGACY,
Matt Arsenault79963e82016-02-13 01:03:00 +0000376 RSQ_CLAMP,
Matt Arsenault2e7cc482014-08-15 17:30:25 +0000377 LDEXP,
Matt Arsenault4831ce52015-01-06 23:00:37 +0000378 FP_CLASS,
Vincent Lejeune519f21e2013-05-17 16:50:32 +0000379 DOT4,
Jan Vesely808fff52015-04-30 17:15:56 +0000380 CARRY,
381 BORROW,
Matt Arsenaultfae02982014-03-17 18:58:11 +0000382 BFE_U32, // Extract range of bits with zero extension to 32-bits.
383 BFE_I32, // Extract range of bits with sign extension to 32-bits.
Matt Arsenaultb3458362014-03-31 18:21:13 +0000384 BFI, // (src0 & src1) | (~src0 & src2)
385 BFM, // Insert a range of bits into a 32-bit word.
Matt Arsenaultde5fbe92016-01-11 17:02:00 +0000386 FFBH_U32, // ctlz with -1 if input is zero.
Matt Arsenaultc96e1de2016-07-18 18:35:05 +0000387 FFBH_I32,
Wei Ding5676aca2017-10-12 19:37:14 +0000388 FFBL_B32, // cttz with -1 if input is zero.
Tom Stellard50122a52014-04-07 19:45:41 +0000389 MUL_U24,
390 MUL_I24,
Matt Arsenault2712d4a2016-08-27 01:32:27 +0000391 MULHI_U24,
392 MULHI_I24,
Matt Arsenaulteb260202014-05-22 18:00:15 +0000393 MAD_U24,
394 MAD_I24,
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000395 MAD_U64_U32,
396 MAD_I64_I32,
Matt Arsenault2712d4a2016-08-27 01:32:27 +0000397 MUL_LOHI_I24,
398 MUL_LOHI_U24,
Vincent Lejeuned3eed662013-05-17 16:50:20 +0000399 TEXTURE_FETCH,
Matt Arsenault7bee6ac2016-12-05 20:23:10 +0000400 EXPORT, // exp on SI+
401 EXPORT_DONE, // exp on SI+ with done bit set
402 R600_EXPORT,
Tom Stellardff62c352013-01-23 02:09:03 +0000403 CONST_ADDRESS,
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000404 REGISTER_LOAD,
405 REGISTER_STORE,
Tom Stellard9fa17912013-08-14 23:24:45 +0000406 SAMPLE,
407 SAMPLEB,
408 SAMPLED,
409 SAMPLEL,
Matt Arsenault364a6742014-06-11 17:50:44 +0000410
411 // These cvt_f32_ubyte* nodes need to remain consecutive and in order.
412 CVT_F32_UBYTE0,
413 CVT_F32_UBYTE1,
414 CVT_F32_UBYTE2,
415 CVT_F32_UBYTE3,
Matt Arsenault1f17c662017-02-22 00:27:34 +0000416
417 // Convert two float 32 numbers into a single register holding two packed f16
418 // with round to zero.
419 CVT_PKRTZ_F16_F32,
420
Matt Arsenault86e02ce2017-03-15 19:04:26 +0000421 // Same as the standard node, except the high bits of the resulting integer
422 // are known 0.
423 FP_TO_FP16,
424
Matt Arsenault8edfaee2017-03-31 19:53:03 +0000425 // Wrapper around fp16 results that are known to zero the high bits.
426 FP16_ZEXT,
427
Tom Stellard880a80a2014-06-17 16:53:14 +0000428 /// This node is for VLIW targets and it is used to represent a vector
429 /// that is stored in consecutive registers with the same channel.
430 /// For example:
431 /// |X |Y|Z|W|
432 /// T0|v.x| | | |
433 /// T1|v.y| | | |
434 /// T2|v.z| | | |
435 /// T3|v.w| | | |
436 BUILD_VERTICAL_VECTOR,
Tom Stellard067c8152014-07-21 14:01:14 +0000437 /// Pointer to the start of the shader's constant data.
438 CONST_DATA_PTR,
Marek Olsak2d825902017-04-28 20:21:58 +0000439 INIT_EXEC,
440 INIT_EXEC_FROM_INPUT,
Tom Stellardfc92e772015-05-12 14:18:14 +0000441 SENDMSG,
Jan Veselyd48445d2017-01-04 18:06:55 +0000442 SENDMSGHALT,
Tom Stellard2a9d9472015-05-12 15:00:46 +0000443 INTERP_MOV,
444 INTERP_P1,
445 INTERP_P2,
Tom Stellardbf3e6e52016-06-14 20:29:59 +0000446 PC_ADD_REL_OFFSET,
Matt Arsenault03006fd2016-07-19 16:27:56 +0000447 KILL,
Jan Veselyf1705042017-01-20 21:24:26 +0000448 DUMMY_CHAIN,
Tom Stellard9fa17912013-08-14 23:24:45 +0000449 FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
Tom Stellardd3ee8c12013-08-16 01:12:06 +0000450 STORE_MSKOR,
Tom Stellard9fa17912013-08-14 23:24:45 +0000451 LOAD_CONSTANT,
Tom Stellardafcf12f2013-09-12 02:55:14 +0000452 TBUFFER_STORE_FORMAT,
David Stuttard70e8bc12017-06-22 16:29:22 +0000453 TBUFFER_STORE_FORMAT_X3,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000454 TBUFFER_STORE_FORMAT_D16,
David Stuttard70e8bc12017-06-22 16:29:22 +0000455 TBUFFER_LOAD_FORMAT,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000456 TBUFFER_LOAD_FORMAT_D16,
Tom Stellard354a43c2016-04-01 18:27:37 +0000457 ATOMIC_CMP_SWAP,
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000458 ATOMIC_INC,
459 ATOMIC_DEC,
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000460 ATOMIC_LOAD_FADD,
461 ATOMIC_LOAD_FMIN,
462 ATOMIC_LOAD_FMAX,
Tom Stellard6f9ef142016-12-20 17:19:44 +0000463 BUFFER_LOAD,
464 BUFFER_LOAD_FORMAT,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000465 BUFFER_LOAD_FORMAT_D16,
Marek Olsak5cec6412017-11-09 01:52:48 +0000466 BUFFER_STORE,
467 BUFFER_STORE_FORMAT,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000468 BUFFER_STORE_FORMAT_D16,
Marek Olsak5cec6412017-11-09 01:52:48 +0000469 BUFFER_ATOMIC_SWAP,
470 BUFFER_ATOMIC_ADD,
471 BUFFER_ATOMIC_SUB,
472 BUFFER_ATOMIC_SMIN,
473 BUFFER_ATOMIC_UMIN,
474 BUFFER_ATOMIC_SMAX,
475 BUFFER_ATOMIC_UMAX,
476 BUFFER_ATOMIC_AND,
477 BUFFER_ATOMIC_OR,
478 BUFFER_ATOMIC_XOR,
479 BUFFER_ATOMIC_CMPSWAP,
Changpeng Fang4737e892018-01-18 22:08:53 +0000480 IMAGE_LOAD,
481 IMAGE_LOAD_MIP,
482 IMAGE_STORE,
483 IMAGE_STORE_MIP,
484
485 // Basic sample.
486 IMAGE_SAMPLE,
487 IMAGE_SAMPLE_CL,
488 IMAGE_SAMPLE_D,
489 IMAGE_SAMPLE_D_CL,
490 IMAGE_SAMPLE_L,
491 IMAGE_SAMPLE_B,
492 IMAGE_SAMPLE_B_CL,
493 IMAGE_SAMPLE_LZ,
494 IMAGE_SAMPLE_CD,
495 IMAGE_SAMPLE_CD_CL,
496
497 // Sample with comparison.
498 IMAGE_SAMPLE_C,
499 IMAGE_SAMPLE_C_CL,
500 IMAGE_SAMPLE_C_D,
501 IMAGE_SAMPLE_C_D_CL,
502 IMAGE_SAMPLE_C_L,
503 IMAGE_SAMPLE_C_B,
504 IMAGE_SAMPLE_C_B_CL,
505 IMAGE_SAMPLE_C_LZ,
506 IMAGE_SAMPLE_C_CD,
507 IMAGE_SAMPLE_C_CD_CL,
508
509 // Sample with offsets.
510 IMAGE_SAMPLE_O,
511 IMAGE_SAMPLE_CL_O,
512 IMAGE_SAMPLE_D_O,
513 IMAGE_SAMPLE_D_CL_O,
514 IMAGE_SAMPLE_L_O,
515 IMAGE_SAMPLE_B_O,
516 IMAGE_SAMPLE_B_CL_O,
517 IMAGE_SAMPLE_LZ_O,
518 IMAGE_SAMPLE_CD_O,
519 IMAGE_SAMPLE_CD_CL_O,
520
521 // Sample with comparison and offsets.
522 IMAGE_SAMPLE_C_O,
523 IMAGE_SAMPLE_C_CL_O,
524 IMAGE_SAMPLE_C_D_O,
525 IMAGE_SAMPLE_C_D_CL_O,
526 IMAGE_SAMPLE_C_L_O,
527 IMAGE_SAMPLE_C_B_O,
528 IMAGE_SAMPLE_C_B_CL_O,
529 IMAGE_SAMPLE_C_LZ_O,
530 IMAGE_SAMPLE_C_CD_O,
531 IMAGE_SAMPLE_C_CD_CL_O,
532
533 // Basic gather4.
534 IMAGE_GATHER4,
535 IMAGE_GATHER4_CL,
536 IMAGE_GATHER4_L,
537 IMAGE_GATHER4_B,
538 IMAGE_GATHER4_B_CL,
539 IMAGE_GATHER4_LZ,
540
541 // Gather4 with comparison.
542 IMAGE_GATHER4_C,
543 IMAGE_GATHER4_C_CL,
544 IMAGE_GATHER4_C_L,
545 IMAGE_GATHER4_C_B,
546 IMAGE_GATHER4_C_B_CL,
547 IMAGE_GATHER4_C_LZ,
548
549 // Gather4 with offsets.
550 IMAGE_GATHER4_O,
551 IMAGE_GATHER4_CL_O,
552 IMAGE_GATHER4_L_O,
553 IMAGE_GATHER4_B_O,
554 IMAGE_GATHER4_B_CL_O,
555 IMAGE_GATHER4_LZ_O,
556
557 // Gather4 with comparison and offsets.
558 IMAGE_GATHER4_C_O,
559 IMAGE_GATHER4_C_CL_O,
560 IMAGE_GATHER4_C_L_O,
561 IMAGE_GATHER4_C_B_O,
562 IMAGE_GATHER4_C_B_CL_O,
563 IMAGE_GATHER4_C_LZ_O,
564
Tom Stellard75aadc22012-12-11 21:25:42 +0000565 LAST_AMDGPU_ISD_NUMBER
566};
567
568
569} // End namespace AMDGPUISD
570
Tom Stellard75aadc22012-12-11 21:25:42 +0000571} // End namespace llvm
572
Benjamin Kramera7c40ef2014-08-13 16:26:38 +0000573#endif