blob: 7379bbaed4f63c7082f5a16687fdde6085affd50 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This is the parent TargetLowering class for hardware code gen
12/// targets.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUISelLowering.h"
Tom Stellarded882c22013-06-03 17:40:11 +000017#include "AMDGPU.h"
Tom Stellard81d871d2013-11-13 23:36:50 +000018#include "AMDGPUFrameLowering.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000019#include "AMDGPURegisterInfo.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000020#include "AMDGPUSubtarget.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000021#include "AMDILIntrinsicInfo.h"
Tom Stellardacfeebf2013-07-23 01:48:05 +000022#include "R600MachineFunctionInfo.h"
Tom Stellarded882c22013-06-03 17:40:11 +000023#include "SIMachineFunctionInfo.h"
Tom Stellard04c0e982014-01-22 19:24:21 +000024#include "llvm/Analysis/ValueTracking.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000025#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/SelectionDAG.h"
29#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
Tom Stellardc026e8b2013-06-28 15:47:08 +000030#include "llvm/IR/DataLayout.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000031
32using namespace llvm;
Tom Stellardaf775432013-10-23 00:44:32 +000033static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
34 CCValAssign::LocInfo LocInfo,
35 ISD::ArgFlagsTy ArgFlags, CCState &State) {
Matt Arsenault52226f92013-12-14 18:21:59 +000036 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
37 ArgFlags.getOrigAlign());
38 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
Tom Stellardaf775432013-10-23 00:44:32 +000039
40 return true;
41}
Tom Stellard75aadc22012-12-11 21:25:42 +000042
Christian Konig2c8f6d52013-03-07 09:03:52 +000043#include "AMDGPUGenCallingConv.inc"
44
Tom Stellard75aadc22012-12-11 21:25:42 +000045AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
46 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
47
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000048 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
49
Tom Stellard75aadc22012-12-11 21:25:42 +000050 // Initialize target lowering borrowed from AMDIL
51 InitAMDILLowering();
52
53 // We need to custom lower some of the intrinsics
54 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
55
56 // Library functions. These default to Expand, but we have instructions
57 // for them.
58 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
59 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
60 setOperationAction(ISD::FPOW, MVT::f32, Legal);
61 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
62 setOperationAction(ISD::FABS, MVT::f32, Legal);
63 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
64 setOperationAction(ISD::FRINT, MVT::f32, Legal);
Tom Stellard4d566b22013-11-27 21:23:20 +000065 setOperationAction(ISD::FROUND, MVT::f32, Legal);
Tom Stellardeddfa692013-12-20 05:11:55 +000066 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
Tom Stellard75aadc22012-12-11 21:25:42 +000067
Tom Stellard5643c4a2013-05-20 15:02:19 +000068 // The hardware supports ROTR, but not ROTL
69 setOperationAction(ISD::ROTL, MVT::i32, Expand);
70
Tom Stellard75aadc22012-12-11 21:25:42 +000071 // Lower floating point store/load to integer store/load to reduce the number
72 // of patterns in tablegen.
73 setOperationAction(ISD::STORE, MVT::f32, Promote);
74 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
75
Tom Stellarded2f6142013-07-18 21:43:42 +000076 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
77 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
78
Tom Stellard75aadc22012-12-11 21:25:42 +000079 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
80 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
81
Tom Stellardaf775432013-10-23 00:44:32 +000082 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
83 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
84
85 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
86 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
87
Tom Stellard7512c082013-07-12 18:14:56 +000088 setOperationAction(ISD::STORE, MVT::f64, Promote);
89 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
90
Tom Stellard2ffc3302013-08-26 15:05:44 +000091 // Custom lowering of vector stores is required for local address space
92 // stores.
93 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
94 // XXX: Native v2i32 local address space stores are possible, but not
95 // currently implemented.
96 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
97
Tom Stellardfbab8272013-08-16 01:12:11 +000098 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
99 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
100 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000101
Tom Stellardfbab8272013-08-16 01:12:11 +0000102 // XXX: This can be change to Custom, once ExpandVectorStores can
103 // handle 64-bit stores.
104 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
105
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000106 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
107 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
108 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
109
110
Tom Stellard75aadc22012-12-11 21:25:42 +0000111 setOperationAction(ISD::LOAD, MVT::f32, Promote);
112 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
113
Tom Stellardadf732c2013-07-18 21:43:48 +0000114 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
115 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
116
Tom Stellard75aadc22012-12-11 21:25:42 +0000117 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
118 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
119
Tom Stellardaf775432013-10-23 00:44:32 +0000120 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
121 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
122
123 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
124 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
125
Tom Stellard7512c082013-07-12 18:14:56 +0000126 setOperationAction(ISD::LOAD, MVT::f64, Promote);
127 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
128
Tom Stellardd86003e2013-08-14 23:25:00 +0000129 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
130 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000131 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
132 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
Tom Stellardd86003e2013-08-14 23:25:00 +0000133 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000134 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
135 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
136 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
137 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
138 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000139
Tom Stellardb03edec2013-08-16 01:12:16 +0000140 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
141 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
142 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
143 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
144 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
145 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
146 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
147 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
148 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
149 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
150 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
151 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
152
Tom Stellardaeb45642014-02-04 17:18:43 +0000153 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
154
Tom Stellardbeed74a2013-07-23 01:47:46 +0000155 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
156 setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
157
Tom Stellardc947d8c2013-10-30 17:22:05 +0000158 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
159
Christian Konig70a50322013-03-27 09:12:51 +0000160 setOperationAction(ISD::MUL, MVT::i64, Expand);
161
Tom Stellard75aadc22012-12-11 21:25:42 +0000162 setOperationAction(ISD::UDIV, MVT::i32, Expand);
163 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
164 setOperationAction(ISD::UREM, MVT::i32, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000165 setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
166 setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000167
Tom Stellardf6d80232013-08-21 22:14:17 +0000168 static const MVT::SimpleValueType IntTypes[] = {
169 MVT::v2i32, MVT::v4i32
Aaron Watry0a794a462013-06-25 13:55:57 +0000170 };
Tom Stellarda92ff872013-08-16 23:51:24 +0000171 const size_t NumIntTypes = array_lengthof(IntTypes);
Aaron Watry0a794a462013-06-25 13:55:57 +0000172
Tom Stellarda92ff872013-08-16 23:51:24 +0000173 for (unsigned int x = 0; x < NumIntTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000174 MVT::SimpleValueType VT = IntTypes[x];
Aaron Watry0a794a462013-06-25 13:55:57 +0000175 //Expand the following operations for the current type by default
176 setOperationAction(ISD::ADD, VT, Expand);
177 setOperationAction(ISD::AND, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000178 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
179 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000180 setOperationAction(ISD::MUL, VT, Expand);
181 setOperationAction(ISD::OR, VT, Expand);
182 setOperationAction(ISD::SHL, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000183 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000184 setOperationAction(ISD::SRL, VT, Expand);
185 setOperationAction(ISD::SRA, VT, Expand);
186 setOperationAction(ISD::SUB, VT, Expand);
187 setOperationAction(ISD::UDIV, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000188 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000189 setOperationAction(ISD::UREM, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000190 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000191 setOperationAction(ISD::VSELECT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000192 setOperationAction(ISD::XOR, VT, Expand);
193 }
Tom Stellarda92ff872013-08-16 23:51:24 +0000194
Tom Stellardf6d80232013-08-21 22:14:17 +0000195 static const MVT::SimpleValueType FloatTypes[] = {
196 MVT::v2f32, MVT::v4f32
Tom Stellarda92ff872013-08-16 23:51:24 +0000197 };
198 const size_t NumFloatTypes = array_lengthof(FloatTypes);
199
200 for (unsigned int x = 0; x < NumFloatTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000201 MVT::SimpleValueType VT = FloatTypes[x];
Tom Stellard175e7a82013-11-27 21:23:39 +0000202 setOperationAction(ISD::FABS, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000203 setOperationAction(ISD::FADD, VT, Expand);
204 setOperationAction(ISD::FDIV, VT, Expand);
Tom Stellardbfebd1f2014-02-04 17:18:37 +0000205 setOperationAction(ISD::FPOW, VT, Expand);
Tom Stellardad3aff22013-08-16 23:51:29 +0000206 setOperationAction(ISD::FFLOOR, VT, Expand);
Tom Stellardeddfa692013-12-20 05:11:55 +0000207 setOperationAction(ISD::FTRUNC, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000208 setOperationAction(ISD::FMUL, VT, Expand);
Tom Stellardb249b752013-08-16 23:51:33 +0000209 setOperationAction(ISD::FRINT, VT, Expand);
Tom Stellarde118b8b2013-10-29 16:37:20 +0000210 setOperationAction(ISD::FSQRT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000211 setOperationAction(ISD::FSUB, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000212 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000213 }
Matt Arsenaultfae02982014-03-17 18:58:11 +0000214
Tom Stellard50122a52014-04-07 19:45:41 +0000215 setTargetDAGCombine(ISD::MUL);
Tom Stellard75aadc22012-12-11 21:25:42 +0000216}
217
Tom Stellard28d06de2013-08-05 22:22:07 +0000218//===----------------------------------------------------------------------===//
219// Target Information
220//===----------------------------------------------------------------------===//
221
222MVT AMDGPUTargetLowering::getVectorIdxTy() const {
223 return MVT::i32;
224}
225
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000226bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
227 EVT CastTy) const {
228 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
229 return true;
230
231 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
232 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
233
234 return ((LScalarSize <= CastScalarSize) ||
235 (CastScalarSize >= 32) ||
236 (LScalarSize < 32));
237}
Tom Stellard28d06de2013-08-05 22:22:07 +0000238
Tom Stellard75aadc22012-12-11 21:25:42 +0000239//===---------------------------------------------------------------------===//
Tom Stellardc54731a2013-07-23 23:55:03 +0000240// Target Properties
241//===---------------------------------------------------------------------===//
242
243bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
244 assert(VT.isFloatingPoint());
245 return VT == MVT::f32;
246}
247
248bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
249 assert(VT.isFloatingPoint());
250 return VT == MVT::f32;
251}
252
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000253bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000254 // Truncate is just accessing a subregister.
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000255 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
256}
257
258bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
259 // Truncate is just accessing a subregister.
260 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
261 (Dest->getPrimitiveSizeInBits() % 32 == 0);
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000262}
263
Matt Arsenaultb517c812014-03-27 17:23:31 +0000264bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
265 const DataLayout *DL = getDataLayout();
266 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
267 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
268
269 return SrcSize == 32 && DestSize == 64;
270}
271
272bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
273 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
274 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
275 // this will enable reducing 64-bit operations the 32-bit, which is always
276 // good.
277 return Src == MVT::i32 && Dest == MVT::i64;
278}
279
Matt Arsenaulta7f1e0c2014-03-24 19:43:31 +0000280bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
281 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
282 // limited number of native 64-bit operations. Shrinking an operation to fit
283 // in a single 32-bit register should always be helpful. As currently used,
284 // this is much less general than the name suggests, and is only used in
285 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
286 // not profitable, and may actually be harmful.
287 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
288}
289
Tom Stellardc54731a2013-07-23 23:55:03 +0000290//===---------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000291// TargetLowering Callbacks
292//===---------------------------------------------------------------------===//
293
Christian Konig2c8f6d52013-03-07 09:03:52 +0000294void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
295 const SmallVectorImpl<ISD::InputArg> &Ins) const {
296
297 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
Tom Stellard75aadc22012-12-11 21:25:42 +0000298}
299
300SDValue AMDGPUTargetLowering::LowerReturn(
301 SDValue Chain,
302 CallingConv::ID CallConv,
303 bool isVarArg,
304 const SmallVectorImpl<ISD::OutputArg> &Outs,
305 const SmallVectorImpl<SDValue> &OutVals,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000306 SDLoc DL, SelectionDAG &DAG) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000307 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
308}
309
310//===---------------------------------------------------------------------===//
311// Target specific lowering
312//===---------------------------------------------------------------------===//
313
314SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
315 const {
316 switch (Op.getOpcode()) {
317 default:
318 Op.getNode()->dump();
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000319 llvm_unreachable("Custom lowering code for this"
320 "instruction is not implemented yet!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000321 break;
322 // AMDIL DAG lowering
323 case ISD::SDIV: return LowerSDIV(Op, DAG);
324 case ISD::SREM: return LowerSREM(Op, DAG);
325 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
326 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
327 // AMDGPU DAG lowering
Tom Stellardd86003e2013-08-14 23:25:00 +0000328 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
329 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +0000330 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000331 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
332 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
Tom Stellardc947d8c2013-10-30 17:22:05 +0000333 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000334 }
335 return Op;
336}
337
Matt Arsenaultd125d742014-03-27 17:23:24 +0000338void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
339 SmallVectorImpl<SDValue> &Results,
340 SelectionDAG &DAG) const {
341 switch (N->getOpcode()) {
342 case ISD::SIGN_EXTEND_INREG:
343 // Different parts of legalization seem to interpret which type of
344 // sign_extend_inreg is the one to check for custom lowering. The extended
345 // from type is what really matters, but some places check for custom
346 // lowering of the result type. This results in trying to use
347 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
348 // nothing here and let the illegal result integer be handled normally.
349 return;
350
351 default:
352 return;
353 }
354}
355
Tom Stellard04c0e982014-01-22 19:24:21 +0000356SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
357 const GlobalValue *GV,
358 const SDValue &InitPtr,
359 SDValue Chain,
360 SelectionDAG &DAG) const {
361 const DataLayout *TD = getTargetMachine().getDataLayout();
362 SDLoc DL(InitPtr);
363 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
364 EVT VT = EVT::getEVT(CI->getType());
365 PointerType *PtrTy = PointerType::get(CI->getType(), 0);
366 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
367 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
368 TD->getPrefTypeAlignment(CI->getType()));
369 } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
370 EVT VT = EVT::getEVT(CFP->getType());
371 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
372 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
373 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
374 TD->getPrefTypeAlignment(CFP->getType()));
375 } else if (Init->getType()->isAggregateType()) {
376 EVT PtrVT = InitPtr.getValueType();
377 unsigned NumElements = Init->getType()->getArrayNumElements();
378 SmallVector<SDValue, 8> Chains;
379 for (unsigned i = 0; i < NumElements; ++i) {
380 SDValue Offset = DAG.getConstant(i * TD->getTypeAllocSize(
381 Init->getType()->getArrayElementType()), PtrVT);
382 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
383 Chains.push_back(LowerConstantInitializer(Init->getAggregateElement(i),
384 GV, Ptr, Chain, DAG));
385 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000386 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
387 Chains.data(), Chains.size());
Tom Stellard04c0e982014-01-22 19:24:21 +0000388 } else {
389 Init->dump();
390 llvm_unreachable("Unhandled constant initializer");
391 }
392}
393
Tom Stellardc026e8b2013-06-28 15:47:08 +0000394SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
395 SDValue Op,
396 SelectionDAG &DAG) const {
397
398 const DataLayout *TD = getTargetMachine().getDataLayout();
399 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
Tom Stellardc026e8b2013-06-28 15:47:08 +0000400 const GlobalValue *GV = G->getGlobal();
Tom Stellardc026e8b2013-06-28 15:47:08 +0000401
Tom Stellard04c0e982014-01-22 19:24:21 +0000402 switch (G->getAddressSpace()) {
403 default: llvm_unreachable("Global Address lowering not implemented for this "
404 "address space");
405 case AMDGPUAS::LOCAL_ADDRESS: {
406 // XXX: What does the value of G->getOffset() mean?
407 assert(G->getOffset() == 0 &&
408 "Do not know what to do with an non-zero offset");
Tom Stellardc026e8b2013-06-28 15:47:08 +0000409
Tom Stellard04c0e982014-01-22 19:24:21 +0000410 unsigned Offset;
411 if (MFI->LocalMemoryObjects.count(GV) == 0) {
412 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
413 Offset = MFI->LDSSize;
414 MFI->LocalMemoryObjects[GV] = Offset;
415 // XXX: Account for alignment?
416 MFI->LDSSize += Size;
417 } else {
418 Offset = MFI->LocalMemoryObjects[GV];
419 }
420
421 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
422 }
423 case AMDGPUAS::CONSTANT_ADDRESS: {
424 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
425 Type *EltType = GV->getType()->getElementType();
426 unsigned Size = TD->getTypeAllocSize(EltType);
427 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
428
429 const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV);
430 const Constant *Init = Var->getInitializer();
431 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
432 SDValue InitPtr = DAG.getFrameIndex(FI,
433 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
434 SmallVector<SDNode*, 8> WorkList;
435
436 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
437 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
438 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
439 continue;
440 WorkList.push_back(*I);
441 }
442 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
443 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
444 E = WorkList.end(); I != E; ++I) {
445 SmallVector<SDValue, 8> Ops;
446 Ops.push_back(Chain);
447 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
448 Ops.push_back((*I)->getOperand(i));
449 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000450 DAG.UpdateNodeOperands(*I, Ops.data(), Ops.size());
Tom Stellard04c0e982014-01-22 19:24:21 +0000451 }
452 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
453 getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
454 }
455 }
Tom Stellardc026e8b2013-06-28 15:47:08 +0000456}
457
Tom Stellardd86003e2013-08-14 23:25:00 +0000458SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
459 SelectionDAG &DAG) const {
460 SmallVector<SDValue, 8> Args;
461 SDValue A = Op.getOperand(0);
462 SDValue B = Op.getOperand(1);
463
Matt Arsenault9ec3cf22014-04-11 17:47:30 +0000464 DAG.ExtractVectorElements(A, Args);
465 DAG.ExtractVectorElements(B, Args);
Tom Stellardd86003e2013-08-14 23:25:00 +0000466
467 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
Matt Arsenault7939acd2014-04-07 16:44:24 +0000468 Args.data(), Args.size());
Tom Stellardd86003e2013-08-14 23:25:00 +0000469}
470
471SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
472 SelectionDAG &DAG) const {
473
474 SmallVector<SDValue, 8> Args;
Tom Stellardd86003e2013-08-14 23:25:00 +0000475 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Matt Arsenault9ec3cf22014-04-11 17:47:30 +0000476 EVT VT = Op.getValueType();
477 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
478 VT.getVectorNumElements());
Tom Stellardd86003e2013-08-14 23:25:00 +0000479
480 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
Matt Arsenault7939acd2014-04-07 16:44:24 +0000481 Args.data(), Args.size());
Tom Stellardd86003e2013-08-14 23:25:00 +0000482}
483
Tom Stellard81d871d2013-11-13 23:36:50 +0000484SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
485 SelectionDAG &DAG) const {
486
487 MachineFunction &MF = DAG.getMachineFunction();
488 const AMDGPUFrameLowering *TFL =
489 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
490
491 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
492 assert(FIN);
493
494 unsigned FrameIndex = FIN->getIndex();
495 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
496 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
497 Op.getValueType());
498}
Tom Stellardd86003e2013-08-14 23:25:00 +0000499
Tom Stellard75aadc22012-12-11 21:25:42 +0000500SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
501 SelectionDAG &DAG) const {
502 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
Andrew Trickef9de2a2013-05-25 02:42:55 +0000503 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000504 EVT VT = Op.getValueType();
505
506 switch (IntrinsicID) {
507 default: return Op;
508 case AMDGPUIntrinsic::AMDIL_abs:
509 return LowerIntrinsicIABS(Op, DAG);
510 case AMDGPUIntrinsic::AMDIL_exp:
511 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
512 case AMDGPUIntrinsic::AMDGPU_lrp:
513 return LowerIntrinsicLRP(Op, DAG);
514 case AMDGPUIntrinsic::AMDIL_fraction:
515 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
Tom Stellard75aadc22012-12-11 21:25:42 +0000516 case AMDGPUIntrinsic::AMDIL_max:
517 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
518 Op.getOperand(2));
519 case AMDGPUIntrinsic::AMDGPU_imax:
520 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
521 Op.getOperand(2));
522 case AMDGPUIntrinsic::AMDGPU_umax:
523 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
524 Op.getOperand(2));
525 case AMDGPUIntrinsic::AMDIL_min:
526 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
527 Op.getOperand(2));
528 case AMDGPUIntrinsic::AMDGPU_imin:
529 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
530 Op.getOperand(2));
531 case AMDGPUIntrinsic::AMDGPU_umin:
532 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
533 Op.getOperand(2));
Matt Arsenault4c537172014-03-31 18:21:18 +0000534
535 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
536 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
537 Op.getOperand(1),
538 Op.getOperand(2),
539 Op.getOperand(3));
540
541 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
542 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
543 Op.getOperand(1),
544 Op.getOperand(2),
545 Op.getOperand(3));
546
547 case AMDGPUIntrinsic::AMDGPU_bfi:
548 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
549 Op.getOperand(1),
550 Op.getOperand(2),
551 Op.getOperand(3));
552
553 case AMDGPUIntrinsic::AMDGPU_bfm:
554 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
555 Op.getOperand(1),
556 Op.getOperand(2));
557
Tom Stellard75aadc22012-12-11 21:25:42 +0000558 case AMDGPUIntrinsic::AMDIL_round_nearest:
559 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
560 }
561}
562
563///IABS(a) = SMAX(sub(0, a), a)
564SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
565 SelectionDAG &DAG) const {
566
Andrew Trickef9de2a2013-05-25 02:42:55 +0000567 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000568 EVT VT = Op.getValueType();
569 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
570 Op.getOperand(1));
571
572 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
573}
574
575/// Linear Interpolation
576/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
577SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
578 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000579 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000580 EVT VT = Op.getValueType();
581 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
582 DAG.getConstantFP(1.0f, MVT::f32),
583 Op.getOperand(1));
584 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
585 Op.getOperand(3));
Vincent Lejeune1ce13f52013-02-18 14:11:28 +0000586 return DAG.getNode(ISD::FADD, DL, VT,
587 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
588 OneSubAC);
Tom Stellard75aadc22012-12-11 21:25:42 +0000589}
590
591/// \brief Generate Min/Max node
592SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
593 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000594 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000595 EVT VT = Op.getValueType();
596
597 SDValue LHS = Op.getOperand(0);
598 SDValue RHS = Op.getOperand(1);
599 SDValue True = Op.getOperand(2);
600 SDValue False = Op.getOperand(3);
601 SDValue CC = Op.getOperand(4);
602
603 if (VT != MVT::f32 ||
604 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
605 return SDValue();
606 }
607
608 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
609 switch (CCOpcode) {
610 case ISD::SETOEQ:
611 case ISD::SETONE:
612 case ISD::SETUNE:
613 case ISD::SETNE:
614 case ISD::SETUEQ:
615 case ISD::SETEQ:
616 case ISD::SETFALSE:
617 case ISD::SETFALSE2:
618 case ISD::SETTRUE:
619 case ISD::SETTRUE2:
620 case ISD::SETUO:
621 case ISD::SETO:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000622 llvm_unreachable("Operation should already be optimised!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000623 case ISD::SETULE:
624 case ISD::SETULT:
625 case ISD::SETOLE:
626 case ISD::SETOLT:
627 case ISD::SETLE:
628 case ISD::SETLT: {
629 if (LHS == True)
630 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
631 else
632 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
633 }
634 case ISD::SETGT:
635 case ISD::SETGE:
636 case ISD::SETUGE:
637 case ISD::SETOGE:
638 case ISD::SETUGT:
639 case ISD::SETOGT: {
640 if (LHS == True)
641 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
642 else
643 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
644 }
645 case ISD::SETCC_INVALID:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000646 llvm_unreachable("Invalid setcc condcode!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000647 }
648 return Op;
649}
650
Tom Stellard35bb18c2013-08-26 15:06:04 +0000651SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
652 SelectionDAG &DAG) const {
653 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
654 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
655 EVT EltVT = Op.getValueType().getVectorElementType();
656 EVT PtrVT = Load->getBasePtr().getValueType();
657 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
658 SmallVector<SDValue, 8> Loads;
659 SDLoc SL(Op);
660
661 for (unsigned i = 0, e = NumElts; i != e; ++i) {
662 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
663 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
664 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
665 Load->getChain(), Ptr,
666 MachinePointerInfo(Load->getMemOperand()->getValue()),
667 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
668 Load->getAlignment()));
669 }
Matt Arsenault9504d2f2014-03-11 00:01:31 +0000670 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(),
671 Loads.data(), Loads.size());
Tom Stellard35bb18c2013-08-26 15:06:04 +0000672}
673
Tom Stellard2ffc3302013-08-26 15:05:44 +0000674SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
675 SelectionDAG &DAG) const {
676 StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
677 EVT MemVT = Store->getMemoryVT();
678 unsigned MemBits = MemVT.getSizeInBits();
Tom Stellard75aadc22012-12-11 21:25:42 +0000679
Matt Arsenaultca6dcfc2014-03-05 21:47:22 +0000680 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
681 // truncating store into an i32 store.
682 // XXX: We could also handle optimize other vector bitwidths.
Tom Stellard2ffc3302013-08-26 15:05:44 +0000683 if (!MemVT.isVector() || MemBits > 32) {
684 return SDValue();
685 }
686
687 SDLoc DL(Op);
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000688 SDValue Value = Store->getValue();
Tom Stellard2ffc3302013-08-26 15:05:44 +0000689 EVT VT = Value.getValueType();
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000690 EVT ElemVT = VT.getVectorElementType();
691 SDValue Ptr = Store->getBasePtr();
Tom Stellard2ffc3302013-08-26 15:05:44 +0000692 EVT MemEltVT = MemVT.getVectorElementType();
693 unsigned MemEltBits = MemEltVT.getSizeInBits();
694 unsigned MemNumElements = MemVT.getVectorNumElements();
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000695 unsigned PackedSize = MemVT.getStoreSizeInBits();
696 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, MVT::i32);
697
698 assert(Value.getValueType().getScalarSizeInBits() >= 32);
Matt Arsenault02117142014-03-11 01:38:53 +0000699
Tom Stellard2ffc3302013-08-26 15:05:44 +0000700 SDValue PackedValue;
701 for (unsigned i = 0; i < MemNumElements; ++i) {
Tom Stellard2ffc3302013-08-26 15:05:44 +0000702 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
703 DAG.getConstant(i, MVT::i32));
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000704 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
705 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
706
707 SDValue Shift = DAG.getConstant(MemEltBits * i, MVT::i32);
708 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
709
Tom Stellard2ffc3302013-08-26 15:05:44 +0000710 if (i == 0) {
711 PackedValue = Elt;
712 } else {
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000713 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000714 }
715 }
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000716
717 if (PackedSize < 32) {
718 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
719 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
720 Store->getMemOperand()->getPointerInfo(),
721 PackedVT,
722 Store->isNonTemporal(), Store->isVolatile(),
723 Store->getAlignment());
724 }
725
Tom Stellard2ffc3302013-08-26 15:05:44 +0000726 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000727 Store->getMemOperand()->getPointerInfo(),
Tom Stellard2ffc3302013-08-26 15:05:44 +0000728 Store->isVolatile(), Store->isNonTemporal(),
729 Store->getAlignment());
730}
731
732SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
733 SelectionDAG &DAG) const {
734 StoreSDNode *Store = cast<StoreSDNode>(Op);
735 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
736 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
737 EVT PtrVT = Store->getBasePtr().getValueType();
738 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
739 SDLoc SL(Op);
740
741 SmallVector<SDValue, 8> Chains;
742
743 for (unsigned i = 0, e = NumElts; i != e; ++i) {
744 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
745 Store->getValue(), DAG.getConstant(i, MVT::i32));
746 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
747 Store->getBasePtr(),
748 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
749 PtrVT));
Tom Stellardf3d166a2013-08-26 15:05:49 +0000750 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
Tom Stellard2ffc3302013-08-26 15:05:44 +0000751 MachinePointerInfo(Store->getMemOperand()->getValue()),
Tom Stellardf3d166a2013-08-26 15:05:49 +0000752 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
Tom Stellard2ffc3302013-08-26 15:05:44 +0000753 Store->getAlignment()));
754 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000755 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains.data(), NumElts);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000756}
757
Tom Stellarde9373602014-01-22 19:24:14 +0000758SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
759 SDLoc DL(Op);
760 LoadSDNode *Load = cast<LoadSDNode>(Op);
761 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaultf9a995d2014-03-06 17:34:12 +0000762 EVT VT = Op.getValueType();
763 EVT MemVT = Load->getMemoryVT();
764
765 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
766 // We can do the extload to 32-bits, and then need to separately extend to
767 // 64-bits.
768
769 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
770 Load->getChain(),
771 Load->getBasePtr(),
772 MemVT,
773 Load->getMemOperand());
774 return DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32);
775 }
Tom Stellarde9373602014-01-22 19:24:14 +0000776
Matt Arsenault470acd82014-04-15 22:28:39 +0000777 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
778 assert(VT == MVT::i1 && "Only i1 non-extloads expected");
779 // FIXME: Copied from PPC
780 // First, load into 32 bits, then truncate to 1 bit.
781
782 SDValue Chain = Load->getChain();
783 SDValue BasePtr = Load->getBasePtr();
784 MachineMemOperand *MMO = Load->getMemOperand();
785
786 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
787 BasePtr, MVT::i8, MMO);
788 return DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD);
789 }
790
Tom Stellard04c0e982014-01-22 19:24:21 +0000791 // Lower loads constant address space global variable loads
792 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000793 isa<GlobalVariable>(
794 GetUnderlyingObject(Load->getMemOperand()->getValue()))) {
Tom Stellard04c0e982014-01-22 19:24:21 +0000795
796 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
797 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
798 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
799 DAG.getConstant(2, MVT::i32));
800 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
801 Load->getChain(), Ptr,
802 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
803 }
804
Tom Stellarde9373602014-01-22 19:24:14 +0000805 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
806 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
807 return SDValue();
808
809
Tom Stellarde9373602014-01-22 19:24:14 +0000810 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
811 DAG.getConstant(2, MVT::i32));
812 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
813 Load->getChain(), Ptr,
814 DAG.getTargetConstant(0, MVT::i32),
815 Op.getOperand(2));
816 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
817 Load->getBasePtr(),
818 DAG.getConstant(0x3, MVT::i32));
819 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
820 DAG.getConstant(3, MVT::i32));
Matt Arsenault74891cd2014-03-15 00:08:22 +0000821
Tom Stellarde9373602014-01-22 19:24:14 +0000822 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000823
824 EVT MemEltVT = MemVT.getScalarType();
Tom Stellarde9373602014-01-22 19:24:14 +0000825 if (ExtType == ISD::SEXTLOAD) {
Matt Arsenault74891cd2014-03-15 00:08:22 +0000826 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
827 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode);
Tom Stellarde9373602014-01-22 19:24:14 +0000828 }
829
Matt Arsenault74891cd2014-03-15 00:08:22 +0000830 return DAG.getZeroExtendInReg(Ret, DL, MemEltVT);
Tom Stellarde9373602014-01-22 19:24:14 +0000831}
832
Tom Stellard2ffc3302013-08-26 15:05:44 +0000833SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Tom Stellarde9373602014-01-22 19:24:14 +0000834 SDLoc DL(Op);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000835 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
836 if (Result.getNode()) {
837 return Result;
838 }
839
840 StoreSDNode *Store = cast<StoreSDNode>(Op);
Tom Stellarde9373602014-01-22 19:24:14 +0000841 SDValue Chain = Store->getChain();
Tom Stellard81d871d2013-11-13 23:36:50 +0000842 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
843 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
Tom Stellard2ffc3302013-08-26 15:05:44 +0000844 Store->getValue().getValueType().isVector()) {
845 return SplitVectorStore(Op, DAG);
846 }
Tom Stellarde9373602014-01-22 19:24:14 +0000847
Matt Arsenault74891cd2014-03-15 00:08:22 +0000848 EVT MemVT = Store->getMemoryVT();
Tom Stellarde9373602014-01-22 19:24:14 +0000849 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
Matt Arsenault74891cd2014-03-15 00:08:22 +0000850 MemVT.bitsLT(MVT::i32)) {
Tom Stellarde9373602014-01-22 19:24:14 +0000851 unsigned Mask = 0;
852 if (Store->getMemoryVT() == MVT::i8) {
853 Mask = 0xff;
854 } else if (Store->getMemoryVT() == MVT::i16) {
855 Mask = 0xffff;
856 }
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000857 SDValue BasePtr = Store->getBasePtr();
858 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000859 DAG.getConstant(2, MVT::i32));
860 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
861 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000862
863 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000864 DAG.getConstant(0x3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000865
Tom Stellarde9373602014-01-22 19:24:14 +0000866 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
867 DAG.getConstant(3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000868
Tom Stellarde9373602014-01-22 19:24:14 +0000869 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
870 Store->getValue());
Matt Arsenault74891cd2014-03-15 00:08:22 +0000871
872 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
873
Tom Stellarde9373602014-01-22 19:24:14 +0000874 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
875 MaskedValue, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000876
Tom Stellarde9373602014-01-22 19:24:14 +0000877 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
878 ShiftAmt);
879 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
880 DAG.getConstant(0xffffffff, MVT::i32));
881 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
882
883 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
884 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
885 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
886 }
Tom Stellard2ffc3302013-08-26 15:05:44 +0000887 return SDValue();
888}
Tom Stellard75aadc22012-12-11 21:25:42 +0000889
890SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
891 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000892 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000893 EVT VT = Op.getValueType();
894
895 SDValue Num = Op.getOperand(0);
896 SDValue Den = Op.getOperand(1);
897
898 SmallVector<SDValue, 8> Results;
899
900 // RCP = URECIP(Den) = 2^32 / Den + e
901 // e is rounding error.
902 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
903
904 // RCP_LO = umulo(RCP, Den) */
905 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
906
907 // RCP_HI = mulhu (RCP, Den) */
908 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
909
910 // NEG_RCP_LO = -RCP_LO
911 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
912 RCP_LO);
913
914 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
915 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
916 NEG_RCP_LO, RCP_LO,
917 ISD::SETEQ);
918 // Calculate the rounding error from the URECIP instruction
919 // E = mulhu(ABS_RCP_LO, RCP)
920 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
921
922 // RCP_A_E = RCP + E
923 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
924
925 // RCP_S_E = RCP - E
926 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
927
928 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
929 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
930 RCP_A_E, RCP_S_E,
931 ISD::SETEQ);
932 // Quotient = mulhu(Tmp0, Num)
933 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
934
935 // Num_S_Remainder = Quotient * Den
936 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
937
938 // Remainder = Num - Num_S_Remainder
939 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
940
941 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
942 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
943 DAG.getConstant(-1, VT),
944 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000945 ISD::SETUGE);
946 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
947 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
948 Num_S_Remainder,
Tom Stellard75aadc22012-12-11 21:25:42 +0000949 DAG.getConstant(-1, VT),
950 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000951 ISD::SETUGE);
Tom Stellard75aadc22012-12-11 21:25:42 +0000952 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
953 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
954 Remainder_GE_Zero);
955
956 // Calculate Division result:
957
958 // Quotient_A_One = Quotient + 1
959 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
960 DAG.getConstant(1, VT));
961
962 // Quotient_S_One = Quotient - 1
963 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
964 DAG.getConstant(1, VT));
965
966 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
967 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
968 Quotient, Quotient_A_One, ISD::SETEQ);
969
970 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
971 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
972 Quotient_S_One, Div, ISD::SETEQ);
973
974 // Calculate Rem result:
975
976 // Remainder_S_Den = Remainder - Den
977 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
978
979 // Remainder_A_Den = Remainder + Den
980 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
981
982 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
983 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
984 Remainder, Remainder_S_Den, ISD::SETEQ);
985
986 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
987 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
988 Remainder_A_Den, Rem, ISD::SETEQ);
Matt Arsenault7939acd2014-04-07 16:44:24 +0000989 SDValue Ops[2] = {
990 Div,
991 Rem
992 };
Tom Stellard75aadc22012-12-11 21:25:42 +0000993 return DAG.getMergeValues(Ops, 2, DL);
994}
995
Tom Stellardc947d8c2013-10-30 17:22:05 +0000996SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
997 SelectionDAG &DAG) const {
998 SDValue S0 = Op.getOperand(0);
999 SDLoc DL(Op);
1000 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
1001 return SDValue();
1002
1003 // f32 uint_to_fp i64
1004 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1005 DAG.getConstant(0, MVT::i32));
1006 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
1007 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1008 DAG.getConstant(1, MVT::i32));
1009 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
1010 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
1011 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
1012 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
1013
1014}
Tom Stellardfbab8272013-08-16 01:12:11 +00001015
Matt Arsenaultfae02982014-03-17 18:58:11 +00001016SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
1017 unsigned BitsDiff,
1018 SelectionDAG &DAG) const {
1019 MVT VT = Op.getSimpleValueType();
1020 SDLoc DL(Op);
1021 SDValue Shift = DAG.getConstant(BitsDiff, VT);
1022 // Shift left by 'Shift' bits.
1023 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
1024 // Signed shift Right by 'Shift' bits.
1025 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
1026}
1027
1028SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1029 SelectionDAG &DAG) const {
1030 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1031 MVT VT = Op.getSimpleValueType();
1032 MVT ScalarVT = VT.getScalarType();
1033
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001034 if (!VT.isVector())
1035 return SDValue();
Matt Arsenaultfae02982014-03-17 18:58:11 +00001036
1037 SDValue Src = Op.getOperand(0);
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001038 SDLoc DL(Op);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001039
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001040 // TODO: Don't scalarize on Evergreen?
1041 unsigned NElts = VT.getVectorNumElements();
1042 SmallVector<SDValue, 8> Args;
1043 DAG.ExtractVectorElements(Src, Args, 0, NElts);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001044
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001045 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1046 for (unsigned I = 0; I < NElts; ++I)
1047 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001048
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001049 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args.data(), Args.size());
Matt Arsenaultfae02982014-03-17 18:58:11 +00001050}
1051
Tom Stellard75aadc22012-12-11 21:25:42 +00001052//===----------------------------------------------------------------------===//
Tom Stellard50122a52014-04-07 19:45:41 +00001053// Custom DAG optimizations
1054//===----------------------------------------------------------------------===//
1055
1056static bool isU24(SDValue Op, SelectionDAG &DAG) {
1057 APInt KnownZero, KnownOne;
1058 EVT VT = Op.getValueType();
1059 DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
1060
1061 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
1062}
1063
1064static bool isI24(SDValue Op, SelectionDAG &DAG) {
1065 EVT VT = Op.getValueType();
1066
1067 // In order for this to be a signed 24-bit value, bit 23, must
1068 // be a sign bit.
1069 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
1070 // as unsigned 24-bit values.
1071 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
1072}
1073
1074static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
1075
1076 SelectionDAG &DAG = DCI.DAG;
1077 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1078 EVT VT = Op.getValueType();
1079
1080 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
1081 APInt KnownZero, KnownOne;
1082 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
1083 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
1084 DCI.CommitTargetLoweringOpt(TLO);
1085}
1086
1087SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
1088 DAGCombinerInfo &DCI) const {
1089 SelectionDAG &DAG = DCI.DAG;
1090 SDLoc DL(N);
1091
1092 switch(N->getOpcode()) {
1093 default: break;
1094 case ISD::MUL: {
1095 EVT VT = N->getValueType(0);
1096 SDValue N0 = N->getOperand(0);
1097 SDValue N1 = N->getOperand(1);
1098 SDValue Mul;
1099
1100 // FIXME: Add support for 24-bit multiply with 64-bit output on SI.
1101 if (VT.isVector() || VT.getSizeInBits() > 32)
1102 break;
1103
1104 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
1105 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
1106 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
1107 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
1108 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
1109 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
1110 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
1111 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
1112 } else {
1113 break;
1114 }
1115
Tom Stellardaeeea8a2014-04-17 21:00:13 +00001116 // We need to use sext even for MUL_U24, because MUL_U24 is used
1117 // for signed multiply of 8 and 16-bit types.
Tom Stellard50122a52014-04-07 19:45:41 +00001118 SDValue Reg = DAG.getSExtOrTrunc(Mul, DL, VT);
1119
1120 return Reg;
1121 }
1122 case AMDGPUISD::MUL_I24:
1123 case AMDGPUISD::MUL_U24: {
1124 SDValue N0 = N->getOperand(0);
1125 SDValue N1 = N->getOperand(1);
1126 simplifyI24(N0, DCI);
1127 simplifyI24(N1, DCI);
1128 return SDValue();
1129 }
1130 }
1131 return SDValue();
1132}
1133
1134//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +00001135// Helper functions
1136//===----------------------------------------------------------------------===//
1137
Tom Stellardaf775432013-10-23 00:44:32 +00001138void AMDGPUTargetLowering::getOriginalFunctionArgs(
1139 SelectionDAG &DAG,
1140 const Function *F,
1141 const SmallVectorImpl<ISD::InputArg> &Ins,
1142 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
1143
1144 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
1145 if (Ins[i].ArgVT == Ins[i].VT) {
1146 OrigIns.push_back(Ins[i]);
1147 continue;
1148 }
1149
1150 EVT VT;
1151 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
1152 // Vector has been split into scalars.
1153 VT = Ins[i].ArgVT.getVectorElementType();
1154 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
1155 Ins[i].ArgVT.getVectorElementType() !=
1156 Ins[i].VT.getVectorElementType()) {
1157 // Vector elements have been promoted
1158 VT = Ins[i].ArgVT;
1159 } else {
1160 // Vector has been spilt into smaller vectors.
1161 VT = Ins[i].VT;
1162 }
1163
1164 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
1165 Ins[i].OrigArgIndex, Ins[i].PartOffset);
1166 OrigIns.push_back(Arg);
1167 }
1168}
1169
Tom Stellard75aadc22012-12-11 21:25:42 +00001170bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
1171 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1172 return CFP->isExactlyValue(1.0);
1173 }
1174 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1175 return C->isAllOnesValue();
1176 }
1177 return false;
1178}
1179
1180bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
1181 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1182 return CFP->getValueAPF().isZero();
1183 }
1184 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1185 return C->isNullValue();
1186 }
1187 return false;
1188}
1189
1190SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
1191 const TargetRegisterClass *RC,
1192 unsigned Reg, EVT VT) const {
1193 MachineFunction &MF = DAG.getMachineFunction();
1194 MachineRegisterInfo &MRI = MF.getRegInfo();
1195 unsigned VirtualRegister;
1196 if (!MRI.isLiveIn(Reg)) {
1197 VirtualRegister = MRI.createVirtualRegister(RC);
1198 MRI.addLiveIn(Reg, VirtualRegister);
1199 } else {
1200 VirtualRegister = MRI.getLiveInVirtReg(Reg);
1201 }
1202 return DAG.getRegister(VirtualRegister, VT);
1203}
1204
1205#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
1206
1207const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
1208 switch (Opcode) {
1209 default: return 0;
1210 // AMDIL DAG nodes
Tom Stellard75aadc22012-12-11 21:25:42 +00001211 NODE_NAME_CASE(CALL);
1212 NODE_NAME_CASE(UMUL);
1213 NODE_NAME_CASE(DIV_INF);
1214 NODE_NAME_CASE(RET_FLAG);
1215 NODE_NAME_CASE(BRANCH_COND);
1216
1217 // AMDGPU DAG nodes
1218 NODE_NAME_CASE(DWORDADDR)
1219 NODE_NAME_CASE(FRACT)
1220 NODE_NAME_CASE(FMAX)
1221 NODE_NAME_CASE(SMAX)
1222 NODE_NAME_CASE(UMAX)
1223 NODE_NAME_CASE(FMIN)
1224 NODE_NAME_CASE(SMIN)
1225 NODE_NAME_CASE(UMIN)
Matt Arsenaultfae02982014-03-17 18:58:11 +00001226 NODE_NAME_CASE(BFE_U32)
1227 NODE_NAME_CASE(BFE_I32)
Matt Arsenaultb3458362014-03-31 18:21:13 +00001228 NODE_NAME_CASE(BFI)
1229 NODE_NAME_CASE(BFM)
Tom Stellard50122a52014-04-07 19:45:41 +00001230 NODE_NAME_CASE(MUL_U24)
1231 NODE_NAME_CASE(MUL_I24)
Tom Stellard75aadc22012-12-11 21:25:42 +00001232 NODE_NAME_CASE(URECIP)
Matt Arsenault21a3faa2014-02-24 21:01:21 +00001233 NODE_NAME_CASE(DOT4)
Tom Stellard75aadc22012-12-11 21:25:42 +00001234 NODE_NAME_CASE(EXPORT)
Tom Stellardff62c352013-01-23 02:09:03 +00001235 NODE_NAME_CASE(CONST_ADDRESS)
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001236 NODE_NAME_CASE(REGISTER_LOAD)
1237 NODE_NAME_CASE(REGISTER_STORE)
Tom Stellard9fa17912013-08-14 23:24:45 +00001238 NODE_NAME_CASE(LOAD_CONSTANT)
1239 NODE_NAME_CASE(LOAD_INPUT)
1240 NODE_NAME_CASE(SAMPLE)
1241 NODE_NAME_CASE(SAMPLEB)
1242 NODE_NAME_CASE(SAMPLED)
1243 NODE_NAME_CASE(SAMPLEL)
Tom Stellardd3ee8c12013-08-16 01:12:06 +00001244 NODE_NAME_CASE(STORE_MSKOR)
Tom Stellardafcf12f2013-09-12 02:55:14 +00001245 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
Tom Stellard75aadc22012-12-11 21:25:42 +00001246 }
1247}
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001248
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001249static void computeMaskedBitsForMinMax(const SDValue Op0,
1250 const SDValue Op1,
1251 APInt &KnownZero,
1252 APInt &KnownOne,
1253 const SelectionDAG &DAG,
1254 unsigned Depth) {
1255 APInt Op0Zero, Op0One;
1256 APInt Op1Zero, Op1One;
1257 DAG.ComputeMaskedBits(Op0, Op0Zero, Op0One, Depth);
1258 DAG.ComputeMaskedBits(Op1, Op1Zero, Op1One, Depth);
1259
1260 KnownZero = Op0Zero & Op1Zero;
1261 KnownOne = Op0One & Op1One;
1262}
1263
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001264void AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
1265 const SDValue Op,
1266 APInt &KnownZero,
1267 APInt &KnownOne,
1268 const SelectionDAG &DAG,
1269 unsigned Depth) const {
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001270
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001271 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001272 unsigned Opc = Op.getOpcode();
1273 switch (Opc) {
1274 case ISD::INTRINSIC_WO_CHAIN: {
1275 // FIXME: The intrinsic should just use the node.
1276 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
1277 case AMDGPUIntrinsic::AMDGPU_imax:
1278 case AMDGPUIntrinsic::AMDGPU_umax:
1279 case AMDGPUIntrinsic::AMDGPU_imin:
1280 case AMDGPUIntrinsic::AMDGPU_umin:
1281 computeMaskedBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
1282 KnownZero, KnownOne, DAG, Depth);
1283 break;
1284 default:
1285 break;
1286 }
1287
1288 break;
1289 }
1290 case AMDGPUISD::SMAX:
1291 case AMDGPUISD::UMAX:
1292 case AMDGPUISD::SMIN:
1293 case AMDGPUISD::UMIN:
1294 computeMaskedBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
1295 KnownZero, KnownOne, DAG, Depth);
1296 break;
1297 default:
1298 break;
1299 }
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001300}