blob: 2990d0b91a7f1a0cec2af1d7f7d2b2e2065808ac [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This is the parent TargetLowering class for hardware code gen
12/// targets.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUISelLowering.h"
Tom Stellarded882c22013-06-03 17:40:11 +000017#include "AMDGPU.h"
Tom Stellard81d871d2013-11-13 23:36:50 +000018#include "AMDGPUFrameLowering.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000019#include "AMDGPURegisterInfo.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000020#include "AMDGPUSubtarget.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000021#include "AMDILIntrinsicInfo.h"
Tom Stellardacfeebf2013-07-23 01:48:05 +000022#include "R600MachineFunctionInfo.h"
Tom Stellarded882c22013-06-03 17:40:11 +000023#include "SIMachineFunctionInfo.h"
Tom Stellard04c0e982014-01-22 19:24:21 +000024#include "llvm/Analysis/ValueTracking.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000025#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/SelectionDAG.h"
29#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
Tom Stellardc026e8b2013-06-28 15:47:08 +000030#include "llvm/IR/DataLayout.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000031
32using namespace llvm;
Tom Stellardaf775432013-10-23 00:44:32 +000033static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
34 CCValAssign::LocInfo LocInfo,
35 ISD::ArgFlagsTy ArgFlags, CCState &State) {
Matt Arsenault52226f92013-12-14 18:21:59 +000036 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
37 ArgFlags.getOrigAlign());
38 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
Tom Stellardaf775432013-10-23 00:44:32 +000039
40 return true;
41}
Tom Stellard75aadc22012-12-11 21:25:42 +000042
Christian Konig2c8f6d52013-03-07 09:03:52 +000043#include "AMDGPUGenCallingConv.inc"
44
Tom Stellard75aadc22012-12-11 21:25:42 +000045AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
46 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
47
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000048 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
49
Tom Stellard75aadc22012-12-11 21:25:42 +000050 // Initialize target lowering borrowed from AMDIL
51 InitAMDILLowering();
52
53 // We need to custom lower some of the intrinsics
54 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
55
56 // Library functions. These default to Expand, but we have instructions
57 // for them.
58 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
59 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
60 setOperationAction(ISD::FPOW, MVT::f32, Legal);
61 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
62 setOperationAction(ISD::FABS, MVT::f32, Legal);
63 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
64 setOperationAction(ISD::FRINT, MVT::f32, Legal);
Tom Stellard4d566b22013-11-27 21:23:20 +000065 setOperationAction(ISD::FROUND, MVT::f32, Legal);
Tom Stellardeddfa692013-12-20 05:11:55 +000066 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
Tom Stellard75aadc22012-12-11 21:25:42 +000067
Tom Stellard5643c4a2013-05-20 15:02:19 +000068 // The hardware supports ROTR, but not ROTL
69 setOperationAction(ISD::ROTL, MVT::i32, Expand);
70
Tom Stellard75aadc22012-12-11 21:25:42 +000071 // Lower floating point store/load to integer store/load to reduce the number
72 // of patterns in tablegen.
73 setOperationAction(ISD::STORE, MVT::f32, Promote);
74 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
75
Tom Stellarded2f6142013-07-18 21:43:42 +000076 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
77 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
78
Tom Stellard75aadc22012-12-11 21:25:42 +000079 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
80 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
81
Tom Stellardaf775432013-10-23 00:44:32 +000082 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
83 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
84
85 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
86 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
87
Tom Stellard7512c082013-07-12 18:14:56 +000088 setOperationAction(ISD::STORE, MVT::f64, Promote);
89 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
90
Tom Stellard2ffc3302013-08-26 15:05:44 +000091 // Custom lowering of vector stores is required for local address space
92 // stores.
93 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
94 // XXX: Native v2i32 local address space stores are possible, but not
95 // currently implemented.
96 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
97
Tom Stellardfbab8272013-08-16 01:12:11 +000098 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
99 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
100 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000101
Tom Stellardfbab8272013-08-16 01:12:11 +0000102 // XXX: This can be change to Custom, once ExpandVectorStores can
103 // handle 64-bit stores.
104 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
105
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000106 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
107 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
108 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
109
110
Tom Stellard75aadc22012-12-11 21:25:42 +0000111 setOperationAction(ISD::LOAD, MVT::f32, Promote);
112 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
113
Tom Stellardadf732c2013-07-18 21:43:48 +0000114 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
115 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
116
Tom Stellard75aadc22012-12-11 21:25:42 +0000117 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
118 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
119
Tom Stellardaf775432013-10-23 00:44:32 +0000120 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
121 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
122
123 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
124 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
125
Tom Stellard7512c082013-07-12 18:14:56 +0000126 setOperationAction(ISD::LOAD, MVT::f64, Promote);
127 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
128
Tom Stellardd86003e2013-08-14 23:25:00 +0000129 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
130 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000131 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
132 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
Tom Stellardd86003e2013-08-14 23:25:00 +0000133 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000134 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
135 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
136 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
137 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
138 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000139
Tom Stellardb03edec2013-08-16 01:12:16 +0000140 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
141 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
142 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
143 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
144 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
145 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
146 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
147 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
148 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
149 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
150 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
151 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
152
Tom Stellardaeb45642014-02-04 17:18:43 +0000153 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
154
Tom Stellardbeed74a2013-07-23 01:47:46 +0000155 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
156 setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
157
Tom Stellardc947d8c2013-10-30 17:22:05 +0000158 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
159
Christian Konig70a50322013-03-27 09:12:51 +0000160 setOperationAction(ISD::MUL, MVT::i64, Expand);
161
Tom Stellard75aadc22012-12-11 21:25:42 +0000162 setOperationAction(ISD::UDIV, MVT::i32, Expand);
163 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
164 setOperationAction(ISD::UREM, MVT::i32, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000165 setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
166 setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000167
Tom Stellardf6d80232013-08-21 22:14:17 +0000168 static const MVT::SimpleValueType IntTypes[] = {
169 MVT::v2i32, MVT::v4i32
Aaron Watry0a794a462013-06-25 13:55:57 +0000170 };
Tom Stellarda92ff872013-08-16 23:51:24 +0000171 const size_t NumIntTypes = array_lengthof(IntTypes);
Aaron Watry0a794a462013-06-25 13:55:57 +0000172
Tom Stellarda92ff872013-08-16 23:51:24 +0000173 for (unsigned int x = 0; x < NumIntTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000174 MVT::SimpleValueType VT = IntTypes[x];
Aaron Watry0a794a462013-06-25 13:55:57 +0000175 //Expand the following operations for the current type by default
176 setOperationAction(ISD::ADD, VT, Expand);
177 setOperationAction(ISD::AND, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000178 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
179 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000180 setOperationAction(ISD::MUL, VT, Expand);
181 setOperationAction(ISD::OR, VT, Expand);
182 setOperationAction(ISD::SHL, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000183 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000184 setOperationAction(ISD::SRL, VT, Expand);
185 setOperationAction(ISD::SRA, VT, Expand);
186 setOperationAction(ISD::SUB, VT, Expand);
187 setOperationAction(ISD::UDIV, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000188 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000189 setOperationAction(ISD::UREM, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000190 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000191 setOperationAction(ISD::VSELECT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000192 setOperationAction(ISD::XOR, VT, Expand);
193 }
Tom Stellarda92ff872013-08-16 23:51:24 +0000194
Tom Stellardf6d80232013-08-21 22:14:17 +0000195 static const MVT::SimpleValueType FloatTypes[] = {
196 MVT::v2f32, MVT::v4f32
Tom Stellarda92ff872013-08-16 23:51:24 +0000197 };
198 const size_t NumFloatTypes = array_lengthof(FloatTypes);
199
200 for (unsigned int x = 0; x < NumFloatTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000201 MVT::SimpleValueType VT = FloatTypes[x];
Tom Stellard175e7a82013-11-27 21:23:39 +0000202 setOperationAction(ISD::FABS, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000203 setOperationAction(ISD::FADD, VT, Expand);
204 setOperationAction(ISD::FDIV, VT, Expand);
Tom Stellardbfebd1f2014-02-04 17:18:37 +0000205 setOperationAction(ISD::FPOW, VT, Expand);
Tom Stellardad3aff22013-08-16 23:51:29 +0000206 setOperationAction(ISD::FFLOOR, VT, Expand);
Tom Stellardeddfa692013-12-20 05:11:55 +0000207 setOperationAction(ISD::FTRUNC, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000208 setOperationAction(ISD::FMUL, VT, Expand);
Tom Stellardb249b752013-08-16 23:51:33 +0000209 setOperationAction(ISD::FRINT, VT, Expand);
Tom Stellarde118b8b2013-10-29 16:37:20 +0000210 setOperationAction(ISD::FSQRT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000211 setOperationAction(ISD::FSUB, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000212 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000213 }
Matt Arsenaultfae02982014-03-17 18:58:11 +0000214
Tom Stellard50122a52014-04-07 19:45:41 +0000215 setTargetDAGCombine(ISD::MUL);
Tom Stellard75aadc22012-12-11 21:25:42 +0000216}
217
Tom Stellard28d06de2013-08-05 22:22:07 +0000218//===----------------------------------------------------------------------===//
219// Target Information
220//===----------------------------------------------------------------------===//
221
222MVT AMDGPUTargetLowering::getVectorIdxTy() const {
223 return MVT::i32;
224}
225
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000226bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
227 EVT CastTy) const {
228 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
229 return true;
230
231 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
232 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
233
234 return ((LScalarSize <= CastScalarSize) ||
235 (CastScalarSize >= 32) ||
236 (LScalarSize < 32));
237}
Tom Stellard28d06de2013-08-05 22:22:07 +0000238
Tom Stellard75aadc22012-12-11 21:25:42 +0000239//===---------------------------------------------------------------------===//
Tom Stellardc54731a2013-07-23 23:55:03 +0000240// Target Properties
241//===---------------------------------------------------------------------===//
242
243bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
244 assert(VT.isFloatingPoint());
245 return VT == MVT::f32;
246}
247
248bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
249 assert(VT.isFloatingPoint());
250 return VT == MVT::f32;
251}
252
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000253bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000254 // Truncate is just accessing a subregister.
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000255 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
256}
257
258bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
259 // Truncate is just accessing a subregister.
260 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
261 (Dest->getPrimitiveSizeInBits() % 32 == 0);
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000262}
263
Matt Arsenaultb517c812014-03-27 17:23:31 +0000264bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
265 const DataLayout *DL = getDataLayout();
266 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
267 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
268
269 return SrcSize == 32 && DestSize == 64;
270}
271
272bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
273 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
274 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
275 // this will enable reducing 64-bit operations the 32-bit, which is always
276 // good.
277 return Src == MVT::i32 && Dest == MVT::i64;
278}
279
Matt Arsenaulta7f1e0c2014-03-24 19:43:31 +0000280bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
281 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
282 // limited number of native 64-bit operations. Shrinking an operation to fit
283 // in a single 32-bit register should always be helpful. As currently used,
284 // this is much less general than the name suggests, and is only used in
285 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
286 // not profitable, and may actually be harmful.
287 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
288}
289
Tom Stellardc54731a2013-07-23 23:55:03 +0000290//===---------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000291// TargetLowering Callbacks
292//===---------------------------------------------------------------------===//
293
Christian Konig2c8f6d52013-03-07 09:03:52 +0000294void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
295 const SmallVectorImpl<ISD::InputArg> &Ins) const {
296
297 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
Tom Stellard75aadc22012-12-11 21:25:42 +0000298}
299
300SDValue AMDGPUTargetLowering::LowerReturn(
301 SDValue Chain,
302 CallingConv::ID CallConv,
303 bool isVarArg,
304 const SmallVectorImpl<ISD::OutputArg> &Outs,
305 const SmallVectorImpl<SDValue> &OutVals,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000306 SDLoc DL, SelectionDAG &DAG) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000307 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
308}
309
310//===---------------------------------------------------------------------===//
311// Target specific lowering
312//===---------------------------------------------------------------------===//
313
314SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
315 const {
316 switch (Op.getOpcode()) {
317 default:
318 Op.getNode()->dump();
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000319 llvm_unreachable("Custom lowering code for this"
320 "instruction is not implemented yet!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000321 break;
322 // AMDIL DAG lowering
323 case ISD::SDIV: return LowerSDIV(Op, DAG);
324 case ISD::SREM: return LowerSREM(Op, DAG);
325 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
326 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
327 // AMDGPU DAG lowering
Tom Stellardd86003e2013-08-14 23:25:00 +0000328 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
329 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +0000330 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000331 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
332 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
Tom Stellardc947d8c2013-10-30 17:22:05 +0000333 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000334 }
335 return Op;
336}
337
Matt Arsenaultd125d742014-03-27 17:23:24 +0000338void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
339 SmallVectorImpl<SDValue> &Results,
340 SelectionDAG &DAG) const {
341 switch (N->getOpcode()) {
342 case ISD::SIGN_EXTEND_INREG:
343 // Different parts of legalization seem to interpret which type of
344 // sign_extend_inreg is the one to check for custom lowering. The extended
345 // from type is what really matters, but some places check for custom
346 // lowering of the result type. This results in trying to use
347 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
348 // nothing here and let the illegal result integer be handled normally.
349 return;
350
351 default:
352 return;
353 }
354}
355
Tom Stellard04c0e982014-01-22 19:24:21 +0000356SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
357 const GlobalValue *GV,
358 const SDValue &InitPtr,
359 SDValue Chain,
360 SelectionDAG &DAG) const {
361 const DataLayout *TD = getTargetMachine().getDataLayout();
362 SDLoc DL(InitPtr);
363 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
364 EVT VT = EVT::getEVT(CI->getType());
365 PointerType *PtrTy = PointerType::get(CI->getType(), 0);
366 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
367 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
368 TD->getPrefTypeAlignment(CI->getType()));
369 } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
370 EVT VT = EVT::getEVT(CFP->getType());
371 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
372 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
373 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
374 TD->getPrefTypeAlignment(CFP->getType()));
375 } else if (Init->getType()->isAggregateType()) {
376 EVT PtrVT = InitPtr.getValueType();
377 unsigned NumElements = Init->getType()->getArrayNumElements();
378 SmallVector<SDValue, 8> Chains;
379 for (unsigned i = 0; i < NumElements; ++i) {
380 SDValue Offset = DAG.getConstant(i * TD->getTypeAllocSize(
381 Init->getType()->getArrayElementType()), PtrVT);
382 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
383 Chains.push_back(LowerConstantInitializer(Init->getAggregateElement(i),
384 GV, Ptr, Chain, DAG));
385 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000386 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
387 Chains.data(), Chains.size());
Tom Stellard04c0e982014-01-22 19:24:21 +0000388 } else {
389 Init->dump();
390 llvm_unreachable("Unhandled constant initializer");
391 }
392}
393
Tom Stellardc026e8b2013-06-28 15:47:08 +0000394SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
395 SDValue Op,
396 SelectionDAG &DAG) const {
397
398 const DataLayout *TD = getTargetMachine().getDataLayout();
399 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
Tom Stellardc026e8b2013-06-28 15:47:08 +0000400 const GlobalValue *GV = G->getGlobal();
Tom Stellardc026e8b2013-06-28 15:47:08 +0000401
Tom Stellard04c0e982014-01-22 19:24:21 +0000402 switch (G->getAddressSpace()) {
403 default: llvm_unreachable("Global Address lowering not implemented for this "
404 "address space");
405 case AMDGPUAS::LOCAL_ADDRESS: {
406 // XXX: What does the value of G->getOffset() mean?
407 assert(G->getOffset() == 0 &&
408 "Do not know what to do with an non-zero offset");
Tom Stellardc026e8b2013-06-28 15:47:08 +0000409
Tom Stellard04c0e982014-01-22 19:24:21 +0000410 unsigned Offset;
411 if (MFI->LocalMemoryObjects.count(GV) == 0) {
412 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
413 Offset = MFI->LDSSize;
414 MFI->LocalMemoryObjects[GV] = Offset;
415 // XXX: Account for alignment?
416 MFI->LDSSize += Size;
417 } else {
418 Offset = MFI->LocalMemoryObjects[GV];
419 }
420
421 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
422 }
423 case AMDGPUAS::CONSTANT_ADDRESS: {
424 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
425 Type *EltType = GV->getType()->getElementType();
426 unsigned Size = TD->getTypeAllocSize(EltType);
427 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
428
429 const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV);
430 const Constant *Init = Var->getInitializer();
431 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
432 SDValue InitPtr = DAG.getFrameIndex(FI,
433 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
434 SmallVector<SDNode*, 8> WorkList;
435
436 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
437 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
438 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
439 continue;
440 WorkList.push_back(*I);
441 }
442 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
443 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
444 E = WorkList.end(); I != E; ++I) {
445 SmallVector<SDValue, 8> Ops;
446 Ops.push_back(Chain);
447 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
448 Ops.push_back((*I)->getOperand(i));
449 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000450 DAG.UpdateNodeOperands(*I, Ops.data(), Ops.size());
Tom Stellard04c0e982014-01-22 19:24:21 +0000451 }
452 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
453 getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
454 }
455 }
Tom Stellardc026e8b2013-06-28 15:47:08 +0000456}
457
Tom Stellardd86003e2013-08-14 23:25:00 +0000458SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
459 SelectionDAG &DAG) const {
460 SmallVector<SDValue, 8> Args;
461 SDValue A = Op.getOperand(0);
462 SDValue B = Op.getOperand(1);
463
Matt Arsenault9ec3cf22014-04-11 17:47:30 +0000464 DAG.ExtractVectorElements(A, Args);
465 DAG.ExtractVectorElements(B, Args);
Tom Stellardd86003e2013-08-14 23:25:00 +0000466
467 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
Matt Arsenault7939acd2014-04-07 16:44:24 +0000468 Args.data(), Args.size());
Tom Stellardd86003e2013-08-14 23:25:00 +0000469}
470
471SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
472 SelectionDAG &DAG) const {
473
474 SmallVector<SDValue, 8> Args;
Tom Stellardd86003e2013-08-14 23:25:00 +0000475 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Matt Arsenault9ec3cf22014-04-11 17:47:30 +0000476 EVT VT = Op.getValueType();
477 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
478 VT.getVectorNumElements());
Tom Stellardd86003e2013-08-14 23:25:00 +0000479
480 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
Matt Arsenault7939acd2014-04-07 16:44:24 +0000481 Args.data(), Args.size());
Tom Stellardd86003e2013-08-14 23:25:00 +0000482}
483
Tom Stellard81d871d2013-11-13 23:36:50 +0000484SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
485 SelectionDAG &DAG) const {
486
487 MachineFunction &MF = DAG.getMachineFunction();
488 const AMDGPUFrameLowering *TFL =
489 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
490
491 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
492 assert(FIN);
493
494 unsigned FrameIndex = FIN->getIndex();
495 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
496 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
497 Op.getValueType());
498}
Tom Stellardd86003e2013-08-14 23:25:00 +0000499
Tom Stellard75aadc22012-12-11 21:25:42 +0000500SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
501 SelectionDAG &DAG) const {
502 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
Andrew Trickef9de2a2013-05-25 02:42:55 +0000503 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000504 EVT VT = Op.getValueType();
505
506 switch (IntrinsicID) {
507 default: return Op;
508 case AMDGPUIntrinsic::AMDIL_abs:
509 return LowerIntrinsicIABS(Op, DAG);
510 case AMDGPUIntrinsic::AMDIL_exp:
511 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
512 case AMDGPUIntrinsic::AMDGPU_lrp:
513 return LowerIntrinsicLRP(Op, DAG);
514 case AMDGPUIntrinsic::AMDIL_fraction:
515 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
Tom Stellard75aadc22012-12-11 21:25:42 +0000516 case AMDGPUIntrinsic::AMDIL_max:
517 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
518 Op.getOperand(2));
519 case AMDGPUIntrinsic::AMDGPU_imax:
520 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
521 Op.getOperand(2));
522 case AMDGPUIntrinsic::AMDGPU_umax:
523 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
524 Op.getOperand(2));
525 case AMDGPUIntrinsic::AMDIL_min:
526 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
527 Op.getOperand(2));
528 case AMDGPUIntrinsic::AMDGPU_imin:
529 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
530 Op.getOperand(2));
531 case AMDGPUIntrinsic::AMDGPU_umin:
532 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
533 Op.getOperand(2));
Matt Arsenault4c537172014-03-31 18:21:18 +0000534
535 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
536 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
537 Op.getOperand(1),
538 Op.getOperand(2),
539 Op.getOperand(3));
540
541 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
542 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
543 Op.getOperand(1),
544 Op.getOperand(2),
545 Op.getOperand(3));
546
547 case AMDGPUIntrinsic::AMDGPU_bfi:
548 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
549 Op.getOperand(1),
550 Op.getOperand(2),
551 Op.getOperand(3));
552
553 case AMDGPUIntrinsic::AMDGPU_bfm:
554 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
555 Op.getOperand(1),
556 Op.getOperand(2));
557
Tom Stellard75aadc22012-12-11 21:25:42 +0000558 case AMDGPUIntrinsic::AMDIL_round_nearest:
559 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
560 }
561}
562
563///IABS(a) = SMAX(sub(0, a), a)
564SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
565 SelectionDAG &DAG) const {
566
Andrew Trickef9de2a2013-05-25 02:42:55 +0000567 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000568 EVT VT = Op.getValueType();
569 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
570 Op.getOperand(1));
571
572 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
573}
574
575/// Linear Interpolation
576/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
577SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
578 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000579 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000580 EVT VT = Op.getValueType();
581 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
582 DAG.getConstantFP(1.0f, MVT::f32),
583 Op.getOperand(1));
584 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
585 Op.getOperand(3));
Vincent Lejeune1ce13f52013-02-18 14:11:28 +0000586 return DAG.getNode(ISD::FADD, DL, VT,
587 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
588 OneSubAC);
Tom Stellard75aadc22012-12-11 21:25:42 +0000589}
590
591/// \brief Generate Min/Max node
592SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
593 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000594 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000595 EVT VT = Op.getValueType();
596
597 SDValue LHS = Op.getOperand(0);
598 SDValue RHS = Op.getOperand(1);
599 SDValue True = Op.getOperand(2);
600 SDValue False = Op.getOperand(3);
601 SDValue CC = Op.getOperand(4);
602
603 if (VT != MVT::f32 ||
604 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
605 return SDValue();
606 }
607
608 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
609 switch (CCOpcode) {
610 case ISD::SETOEQ:
611 case ISD::SETONE:
612 case ISD::SETUNE:
613 case ISD::SETNE:
614 case ISD::SETUEQ:
615 case ISD::SETEQ:
616 case ISD::SETFALSE:
617 case ISD::SETFALSE2:
618 case ISD::SETTRUE:
619 case ISD::SETTRUE2:
620 case ISD::SETUO:
621 case ISD::SETO:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000622 llvm_unreachable("Operation should already be optimised!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000623 case ISD::SETULE:
624 case ISD::SETULT:
625 case ISD::SETOLE:
626 case ISD::SETOLT:
627 case ISD::SETLE:
628 case ISD::SETLT: {
629 if (LHS == True)
630 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
631 else
632 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
633 }
634 case ISD::SETGT:
635 case ISD::SETGE:
636 case ISD::SETUGE:
637 case ISD::SETOGE:
638 case ISD::SETUGT:
639 case ISD::SETOGT: {
640 if (LHS == True)
641 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
642 else
643 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
644 }
645 case ISD::SETCC_INVALID:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000646 llvm_unreachable("Invalid setcc condcode!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000647 }
648 return Op;
649}
650
Tom Stellard35bb18c2013-08-26 15:06:04 +0000651SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
652 SelectionDAG &DAG) const {
653 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
654 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
655 EVT EltVT = Op.getValueType().getVectorElementType();
656 EVT PtrVT = Load->getBasePtr().getValueType();
657 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
658 SmallVector<SDValue, 8> Loads;
659 SDLoc SL(Op);
660
661 for (unsigned i = 0, e = NumElts; i != e; ++i) {
662 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
663 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
664 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
665 Load->getChain(), Ptr,
666 MachinePointerInfo(Load->getMemOperand()->getValue()),
667 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
668 Load->getAlignment()));
669 }
Matt Arsenault9504d2f2014-03-11 00:01:31 +0000670 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(),
671 Loads.data(), Loads.size());
Tom Stellard35bb18c2013-08-26 15:06:04 +0000672}
673
Tom Stellard2ffc3302013-08-26 15:05:44 +0000674SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
675 SelectionDAG &DAG) const {
676 StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
677 EVT MemVT = Store->getMemoryVT();
678 unsigned MemBits = MemVT.getSizeInBits();
Tom Stellard75aadc22012-12-11 21:25:42 +0000679
Matt Arsenaultca6dcfc2014-03-05 21:47:22 +0000680 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
681 // truncating store into an i32 store.
682 // XXX: We could also handle optimize other vector bitwidths.
Tom Stellard2ffc3302013-08-26 15:05:44 +0000683 if (!MemVT.isVector() || MemBits > 32) {
684 return SDValue();
685 }
686
687 SDLoc DL(Op);
688 const SDValue &Value = Store->getValue();
689 EVT VT = Value.getValueType();
690 const SDValue &Ptr = Store->getBasePtr();
691 EVT MemEltVT = MemVT.getVectorElementType();
692 unsigned MemEltBits = MemEltVT.getSizeInBits();
693 unsigned MemNumElements = MemVT.getVectorNumElements();
694 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
Matt Arsenault02117142014-03-11 01:38:53 +0000695 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, PackedVT);
696
Tom Stellard2ffc3302013-08-26 15:05:44 +0000697 SDValue PackedValue;
698 for (unsigned i = 0; i < MemNumElements; ++i) {
699 EVT ElemVT = VT.getVectorElementType();
700 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
701 DAG.getConstant(i, MVT::i32));
702 Elt = DAG.getZExtOrTrunc(Elt, DL, PackedVT);
703 Elt = DAG.getNode(ISD::AND, DL, PackedVT, Elt, Mask);
704 SDValue Shift = DAG.getConstant(MemEltBits * i, PackedVT);
705 Elt = DAG.getNode(ISD::SHL, DL, PackedVT, Elt, Shift);
706 if (i == 0) {
707 PackedValue = Elt;
708 } else {
709 PackedValue = DAG.getNode(ISD::OR, DL, PackedVT, PackedValue, Elt);
710 }
711 }
712 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
713 MachinePointerInfo(Store->getMemOperand()->getValue()),
714 Store->isVolatile(), Store->isNonTemporal(),
715 Store->getAlignment());
716}
717
718SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
719 SelectionDAG &DAG) const {
720 StoreSDNode *Store = cast<StoreSDNode>(Op);
721 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
722 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
723 EVT PtrVT = Store->getBasePtr().getValueType();
724 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
725 SDLoc SL(Op);
726
727 SmallVector<SDValue, 8> Chains;
728
729 for (unsigned i = 0, e = NumElts; i != e; ++i) {
730 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
731 Store->getValue(), DAG.getConstant(i, MVT::i32));
732 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
733 Store->getBasePtr(),
734 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
735 PtrVT));
Tom Stellardf3d166a2013-08-26 15:05:49 +0000736 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
Tom Stellard2ffc3302013-08-26 15:05:44 +0000737 MachinePointerInfo(Store->getMemOperand()->getValue()),
Tom Stellardf3d166a2013-08-26 15:05:49 +0000738 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
Tom Stellard2ffc3302013-08-26 15:05:44 +0000739 Store->getAlignment()));
740 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000741 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains.data(), NumElts);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000742}
743
Tom Stellarde9373602014-01-22 19:24:14 +0000744SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
745 SDLoc DL(Op);
746 LoadSDNode *Load = cast<LoadSDNode>(Op);
747 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaultf9a995d2014-03-06 17:34:12 +0000748 EVT VT = Op.getValueType();
749 EVT MemVT = Load->getMemoryVT();
750
751 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
752 // We can do the extload to 32-bits, and then need to separately extend to
753 // 64-bits.
754
755 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
756 Load->getChain(),
757 Load->getBasePtr(),
758 MemVT,
759 Load->getMemOperand());
760 return DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32);
761 }
Tom Stellarde9373602014-01-22 19:24:14 +0000762
Matt Arsenault470acd82014-04-15 22:28:39 +0000763 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
764 assert(VT == MVT::i1 && "Only i1 non-extloads expected");
765 // FIXME: Copied from PPC
766 // First, load into 32 bits, then truncate to 1 bit.
767
768 SDValue Chain = Load->getChain();
769 SDValue BasePtr = Load->getBasePtr();
770 MachineMemOperand *MMO = Load->getMemOperand();
771
772 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
773 BasePtr, MVT::i8, MMO);
774 return DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD);
775 }
776
Tom Stellard04c0e982014-01-22 19:24:21 +0000777 // Lower loads constant address space global variable loads
778 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000779 isa<GlobalVariable>(
780 GetUnderlyingObject(Load->getMemOperand()->getValue()))) {
Tom Stellard04c0e982014-01-22 19:24:21 +0000781
782 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
783 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
784 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
785 DAG.getConstant(2, MVT::i32));
786 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
787 Load->getChain(), Ptr,
788 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
789 }
790
Tom Stellarde9373602014-01-22 19:24:14 +0000791 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
792 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
793 return SDValue();
794
795
Tom Stellarde9373602014-01-22 19:24:14 +0000796 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
797 DAG.getConstant(2, MVT::i32));
798 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
799 Load->getChain(), Ptr,
800 DAG.getTargetConstant(0, MVT::i32),
801 Op.getOperand(2));
802 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
803 Load->getBasePtr(),
804 DAG.getConstant(0x3, MVT::i32));
805 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
806 DAG.getConstant(3, MVT::i32));
Matt Arsenault74891cd2014-03-15 00:08:22 +0000807
Tom Stellarde9373602014-01-22 19:24:14 +0000808 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000809
810 EVT MemEltVT = MemVT.getScalarType();
Tom Stellarde9373602014-01-22 19:24:14 +0000811 if (ExtType == ISD::SEXTLOAD) {
Matt Arsenault74891cd2014-03-15 00:08:22 +0000812 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
813 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode);
Tom Stellarde9373602014-01-22 19:24:14 +0000814 }
815
Matt Arsenault74891cd2014-03-15 00:08:22 +0000816 return DAG.getZeroExtendInReg(Ret, DL, MemEltVT);
Tom Stellarde9373602014-01-22 19:24:14 +0000817}
818
Tom Stellard2ffc3302013-08-26 15:05:44 +0000819SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Tom Stellarde9373602014-01-22 19:24:14 +0000820 SDLoc DL(Op);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000821 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
822 if (Result.getNode()) {
823 return Result;
824 }
825
826 StoreSDNode *Store = cast<StoreSDNode>(Op);
Tom Stellarde9373602014-01-22 19:24:14 +0000827 SDValue Chain = Store->getChain();
Tom Stellard81d871d2013-11-13 23:36:50 +0000828 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
829 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
Tom Stellard2ffc3302013-08-26 15:05:44 +0000830 Store->getValue().getValueType().isVector()) {
831 return SplitVectorStore(Op, DAG);
832 }
Tom Stellarde9373602014-01-22 19:24:14 +0000833
Matt Arsenault74891cd2014-03-15 00:08:22 +0000834 EVT MemVT = Store->getMemoryVT();
Tom Stellarde9373602014-01-22 19:24:14 +0000835 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
Matt Arsenault74891cd2014-03-15 00:08:22 +0000836 MemVT.bitsLT(MVT::i32)) {
Tom Stellarde9373602014-01-22 19:24:14 +0000837 unsigned Mask = 0;
838 if (Store->getMemoryVT() == MVT::i8) {
839 Mask = 0xff;
840 } else if (Store->getMemoryVT() == MVT::i16) {
841 Mask = 0xffff;
842 }
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000843 SDValue BasePtr = Store->getBasePtr();
844 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000845 DAG.getConstant(2, MVT::i32));
846 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
847 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000848
849 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000850 DAG.getConstant(0x3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000851
Tom Stellarde9373602014-01-22 19:24:14 +0000852 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
853 DAG.getConstant(3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000854
Tom Stellarde9373602014-01-22 19:24:14 +0000855 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
856 Store->getValue());
Matt Arsenault74891cd2014-03-15 00:08:22 +0000857
858 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
859
Tom Stellarde9373602014-01-22 19:24:14 +0000860 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
861 MaskedValue, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000862
Tom Stellarde9373602014-01-22 19:24:14 +0000863 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
864 ShiftAmt);
865 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
866 DAG.getConstant(0xffffffff, MVT::i32));
867 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
868
869 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
870 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
871 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
872 }
Tom Stellard2ffc3302013-08-26 15:05:44 +0000873 return SDValue();
874}
Tom Stellard75aadc22012-12-11 21:25:42 +0000875
876SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
877 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000878 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000879 EVT VT = Op.getValueType();
880
881 SDValue Num = Op.getOperand(0);
882 SDValue Den = Op.getOperand(1);
883
884 SmallVector<SDValue, 8> Results;
885
886 // RCP = URECIP(Den) = 2^32 / Den + e
887 // e is rounding error.
888 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
889
890 // RCP_LO = umulo(RCP, Den) */
891 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
892
893 // RCP_HI = mulhu (RCP, Den) */
894 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
895
896 // NEG_RCP_LO = -RCP_LO
897 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
898 RCP_LO);
899
900 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
901 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
902 NEG_RCP_LO, RCP_LO,
903 ISD::SETEQ);
904 // Calculate the rounding error from the URECIP instruction
905 // E = mulhu(ABS_RCP_LO, RCP)
906 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
907
908 // RCP_A_E = RCP + E
909 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
910
911 // RCP_S_E = RCP - E
912 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
913
914 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
915 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
916 RCP_A_E, RCP_S_E,
917 ISD::SETEQ);
918 // Quotient = mulhu(Tmp0, Num)
919 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
920
921 // Num_S_Remainder = Quotient * Den
922 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
923
924 // Remainder = Num - Num_S_Remainder
925 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
926
927 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
928 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
929 DAG.getConstant(-1, VT),
930 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000931 ISD::SETUGE);
932 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
933 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
934 Num_S_Remainder,
Tom Stellard75aadc22012-12-11 21:25:42 +0000935 DAG.getConstant(-1, VT),
936 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000937 ISD::SETUGE);
Tom Stellard75aadc22012-12-11 21:25:42 +0000938 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
939 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
940 Remainder_GE_Zero);
941
942 // Calculate Division result:
943
944 // Quotient_A_One = Quotient + 1
945 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
946 DAG.getConstant(1, VT));
947
948 // Quotient_S_One = Quotient - 1
949 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
950 DAG.getConstant(1, VT));
951
952 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
953 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
954 Quotient, Quotient_A_One, ISD::SETEQ);
955
956 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
957 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
958 Quotient_S_One, Div, ISD::SETEQ);
959
960 // Calculate Rem result:
961
962 // Remainder_S_Den = Remainder - Den
963 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
964
965 // Remainder_A_Den = Remainder + Den
966 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
967
968 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
969 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
970 Remainder, Remainder_S_Den, ISD::SETEQ);
971
972 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
973 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
974 Remainder_A_Den, Rem, ISD::SETEQ);
Matt Arsenault7939acd2014-04-07 16:44:24 +0000975 SDValue Ops[2] = {
976 Div,
977 Rem
978 };
Tom Stellard75aadc22012-12-11 21:25:42 +0000979 return DAG.getMergeValues(Ops, 2, DL);
980}
981
Tom Stellardc947d8c2013-10-30 17:22:05 +0000982SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
983 SelectionDAG &DAG) const {
984 SDValue S0 = Op.getOperand(0);
985 SDLoc DL(Op);
986 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
987 return SDValue();
988
989 // f32 uint_to_fp i64
990 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
991 DAG.getConstant(0, MVT::i32));
992 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
993 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
994 DAG.getConstant(1, MVT::i32));
995 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
996 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
997 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
998 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
999
1000}
Tom Stellardfbab8272013-08-16 01:12:11 +00001001
Matt Arsenaultfae02982014-03-17 18:58:11 +00001002SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
1003 unsigned BitsDiff,
1004 SelectionDAG &DAG) const {
1005 MVT VT = Op.getSimpleValueType();
1006 SDLoc DL(Op);
1007 SDValue Shift = DAG.getConstant(BitsDiff, VT);
1008 // Shift left by 'Shift' bits.
1009 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
1010 // Signed shift Right by 'Shift' bits.
1011 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
1012}
1013
1014SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1015 SelectionDAG &DAG) const {
1016 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1017 MVT VT = Op.getSimpleValueType();
1018 MVT ScalarVT = VT.getScalarType();
1019
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001020 if (!VT.isVector())
1021 return SDValue();
Matt Arsenaultfae02982014-03-17 18:58:11 +00001022
1023 SDValue Src = Op.getOperand(0);
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001024 SDLoc DL(Op);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001025
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001026 // TODO: Don't scalarize on Evergreen?
1027 unsigned NElts = VT.getVectorNumElements();
1028 SmallVector<SDValue, 8> Args;
1029 DAG.ExtractVectorElements(Src, Args, 0, NElts);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001030
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001031 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1032 for (unsigned I = 0; I < NElts; ++I)
1033 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001034
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001035 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args.data(), Args.size());
Matt Arsenaultfae02982014-03-17 18:58:11 +00001036}
1037
Tom Stellard75aadc22012-12-11 21:25:42 +00001038//===----------------------------------------------------------------------===//
Tom Stellard50122a52014-04-07 19:45:41 +00001039// Custom DAG optimizations
1040//===----------------------------------------------------------------------===//
1041
1042static bool isU24(SDValue Op, SelectionDAG &DAG) {
1043 APInt KnownZero, KnownOne;
1044 EVT VT = Op.getValueType();
1045 DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
1046
1047 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
1048}
1049
1050static bool isI24(SDValue Op, SelectionDAG &DAG) {
1051 EVT VT = Op.getValueType();
1052
1053 // In order for this to be a signed 24-bit value, bit 23, must
1054 // be a sign bit.
1055 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
1056 // as unsigned 24-bit values.
1057 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
1058}
1059
1060static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
1061
1062 SelectionDAG &DAG = DCI.DAG;
1063 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1064 EVT VT = Op.getValueType();
1065
1066 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
1067 APInt KnownZero, KnownOne;
1068 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
1069 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
1070 DCI.CommitTargetLoweringOpt(TLO);
1071}
1072
1073SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
1074 DAGCombinerInfo &DCI) const {
1075 SelectionDAG &DAG = DCI.DAG;
1076 SDLoc DL(N);
1077
1078 switch(N->getOpcode()) {
1079 default: break;
1080 case ISD::MUL: {
1081 EVT VT = N->getValueType(0);
1082 SDValue N0 = N->getOperand(0);
1083 SDValue N1 = N->getOperand(1);
1084 SDValue Mul;
1085
1086 // FIXME: Add support for 24-bit multiply with 64-bit output on SI.
1087 if (VT.isVector() || VT.getSizeInBits() > 32)
1088 break;
1089
1090 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
1091 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
1092 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
1093 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
1094 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
1095 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
1096 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
1097 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
1098 } else {
1099 break;
1100 }
1101
Tom Stellardaeeea8a2014-04-17 21:00:13 +00001102 // We need to use sext even for MUL_U24, because MUL_U24 is used
1103 // for signed multiply of 8 and 16-bit types.
Tom Stellard50122a52014-04-07 19:45:41 +00001104 SDValue Reg = DAG.getSExtOrTrunc(Mul, DL, VT);
1105
1106 return Reg;
1107 }
1108 case AMDGPUISD::MUL_I24:
1109 case AMDGPUISD::MUL_U24: {
1110 SDValue N0 = N->getOperand(0);
1111 SDValue N1 = N->getOperand(1);
1112 simplifyI24(N0, DCI);
1113 simplifyI24(N1, DCI);
1114 return SDValue();
1115 }
1116 }
1117 return SDValue();
1118}
1119
1120//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +00001121// Helper functions
1122//===----------------------------------------------------------------------===//
1123
Tom Stellardaf775432013-10-23 00:44:32 +00001124void AMDGPUTargetLowering::getOriginalFunctionArgs(
1125 SelectionDAG &DAG,
1126 const Function *F,
1127 const SmallVectorImpl<ISD::InputArg> &Ins,
1128 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
1129
1130 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
1131 if (Ins[i].ArgVT == Ins[i].VT) {
1132 OrigIns.push_back(Ins[i]);
1133 continue;
1134 }
1135
1136 EVT VT;
1137 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
1138 // Vector has been split into scalars.
1139 VT = Ins[i].ArgVT.getVectorElementType();
1140 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
1141 Ins[i].ArgVT.getVectorElementType() !=
1142 Ins[i].VT.getVectorElementType()) {
1143 // Vector elements have been promoted
1144 VT = Ins[i].ArgVT;
1145 } else {
1146 // Vector has been spilt into smaller vectors.
1147 VT = Ins[i].VT;
1148 }
1149
1150 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
1151 Ins[i].OrigArgIndex, Ins[i].PartOffset);
1152 OrigIns.push_back(Arg);
1153 }
1154}
1155
Tom Stellard75aadc22012-12-11 21:25:42 +00001156bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
1157 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1158 return CFP->isExactlyValue(1.0);
1159 }
1160 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1161 return C->isAllOnesValue();
1162 }
1163 return false;
1164}
1165
1166bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
1167 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1168 return CFP->getValueAPF().isZero();
1169 }
1170 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1171 return C->isNullValue();
1172 }
1173 return false;
1174}
1175
1176SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
1177 const TargetRegisterClass *RC,
1178 unsigned Reg, EVT VT) const {
1179 MachineFunction &MF = DAG.getMachineFunction();
1180 MachineRegisterInfo &MRI = MF.getRegInfo();
1181 unsigned VirtualRegister;
1182 if (!MRI.isLiveIn(Reg)) {
1183 VirtualRegister = MRI.createVirtualRegister(RC);
1184 MRI.addLiveIn(Reg, VirtualRegister);
1185 } else {
1186 VirtualRegister = MRI.getLiveInVirtReg(Reg);
1187 }
1188 return DAG.getRegister(VirtualRegister, VT);
1189}
1190
1191#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
1192
1193const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
1194 switch (Opcode) {
1195 default: return 0;
1196 // AMDIL DAG nodes
Tom Stellard75aadc22012-12-11 21:25:42 +00001197 NODE_NAME_CASE(CALL);
1198 NODE_NAME_CASE(UMUL);
1199 NODE_NAME_CASE(DIV_INF);
1200 NODE_NAME_CASE(RET_FLAG);
1201 NODE_NAME_CASE(BRANCH_COND);
1202
1203 // AMDGPU DAG nodes
1204 NODE_NAME_CASE(DWORDADDR)
1205 NODE_NAME_CASE(FRACT)
1206 NODE_NAME_CASE(FMAX)
1207 NODE_NAME_CASE(SMAX)
1208 NODE_NAME_CASE(UMAX)
1209 NODE_NAME_CASE(FMIN)
1210 NODE_NAME_CASE(SMIN)
1211 NODE_NAME_CASE(UMIN)
Matt Arsenaultfae02982014-03-17 18:58:11 +00001212 NODE_NAME_CASE(BFE_U32)
1213 NODE_NAME_CASE(BFE_I32)
Matt Arsenaultb3458362014-03-31 18:21:13 +00001214 NODE_NAME_CASE(BFI)
1215 NODE_NAME_CASE(BFM)
Tom Stellard50122a52014-04-07 19:45:41 +00001216 NODE_NAME_CASE(MUL_U24)
1217 NODE_NAME_CASE(MUL_I24)
Tom Stellard75aadc22012-12-11 21:25:42 +00001218 NODE_NAME_CASE(URECIP)
Matt Arsenault21a3faa2014-02-24 21:01:21 +00001219 NODE_NAME_CASE(DOT4)
Tom Stellard75aadc22012-12-11 21:25:42 +00001220 NODE_NAME_CASE(EXPORT)
Tom Stellardff62c352013-01-23 02:09:03 +00001221 NODE_NAME_CASE(CONST_ADDRESS)
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001222 NODE_NAME_CASE(REGISTER_LOAD)
1223 NODE_NAME_CASE(REGISTER_STORE)
Tom Stellard9fa17912013-08-14 23:24:45 +00001224 NODE_NAME_CASE(LOAD_CONSTANT)
1225 NODE_NAME_CASE(LOAD_INPUT)
1226 NODE_NAME_CASE(SAMPLE)
1227 NODE_NAME_CASE(SAMPLEB)
1228 NODE_NAME_CASE(SAMPLED)
1229 NODE_NAME_CASE(SAMPLEL)
Tom Stellardd3ee8c12013-08-16 01:12:06 +00001230 NODE_NAME_CASE(STORE_MSKOR)
Tom Stellardafcf12f2013-09-12 02:55:14 +00001231 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
Tom Stellard75aadc22012-12-11 21:25:42 +00001232 }
1233}
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001234
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001235static void computeMaskedBitsForMinMax(const SDValue Op0,
1236 const SDValue Op1,
1237 APInt &KnownZero,
1238 APInt &KnownOne,
1239 const SelectionDAG &DAG,
1240 unsigned Depth) {
1241 APInt Op0Zero, Op0One;
1242 APInt Op1Zero, Op1One;
1243 DAG.ComputeMaskedBits(Op0, Op0Zero, Op0One, Depth);
1244 DAG.ComputeMaskedBits(Op1, Op1Zero, Op1One, Depth);
1245
1246 KnownZero = Op0Zero & Op1Zero;
1247 KnownOne = Op0One & Op1One;
1248}
1249
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001250void AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
1251 const SDValue Op,
1252 APInt &KnownZero,
1253 APInt &KnownOne,
1254 const SelectionDAG &DAG,
1255 unsigned Depth) const {
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001256
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001257 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001258 unsigned Opc = Op.getOpcode();
1259 switch (Opc) {
1260 case ISD::INTRINSIC_WO_CHAIN: {
1261 // FIXME: The intrinsic should just use the node.
1262 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
1263 case AMDGPUIntrinsic::AMDGPU_imax:
1264 case AMDGPUIntrinsic::AMDGPU_umax:
1265 case AMDGPUIntrinsic::AMDGPU_imin:
1266 case AMDGPUIntrinsic::AMDGPU_umin:
1267 computeMaskedBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
1268 KnownZero, KnownOne, DAG, Depth);
1269 break;
1270 default:
1271 break;
1272 }
1273
1274 break;
1275 }
1276 case AMDGPUISD::SMAX:
1277 case AMDGPUISD::UMAX:
1278 case AMDGPUISD::SMIN:
1279 case AMDGPUISD::UMIN:
1280 computeMaskedBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
1281 KnownZero, KnownOne, DAG, Depth);
1282 break;
1283 default:
1284 break;
1285 }
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001286}