blob: 183725cc21713f53f00a7030daf6384fa37dd671 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This is the parent TargetLowering class for hardware code gen
12/// targets.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUISelLowering.h"
Tom Stellarded882c22013-06-03 17:40:11 +000017#include "AMDGPU.h"
Tom Stellard81d871d2013-11-13 23:36:50 +000018#include "AMDGPUFrameLowering.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000019#include "AMDGPURegisterInfo.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000020#include "AMDGPUSubtarget.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000021#include "AMDILIntrinsicInfo.h"
Tom Stellardacfeebf2013-07-23 01:48:05 +000022#include "R600MachineFunctionInfo.h"
Tom Stellarded882c22013-06-03 17:40:11 +000023#include "SIMachineFunctionInfo.h"
Tom Stellard04c0e982014-01-22 19:24:21 +000024#include "llvm/Analysis/ValueTracking.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000025#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/SelectionDAG.h"
29#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
Tom Stellardc026e8b2013-06-28 15:47:08 +000030#include "llvm/IR/DataLayout.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000031
32using namespace llvm;
Tom Stellardaf775432013-10-23 00:44:32 +000033static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
34 CCValAssign::LocInfo LocInfo,
35 ISD::ArgFlagsTy ArgFlags, CCState &State) {
Matt Arsenault52226f92013-12-14 18:21:59 +000036 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
37 ArgFlags.getOrigAlign());
38 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
Tom Stellardaf775432013-10-23 00:44:32 +000039
40 return true;
41}
Tom Stellard75aadc22012-12-11 21:25:42 +000042
Christian Konig2c8f6d52013-03-07 09:03:52 +000043#include "AMDGPUGenCallingConv.inc"
44
Tom Stellard75aadc22012-12-11 21:25:42 +000045AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
46 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
47
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000048 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
49
Tom Stellard75aadc22012-12-11 21:25:42 +000050 // Initialize target lowering borrowed from AMDIL
51 InitAMDILLowering();
52
53 // We need to custom lower some of the intrinsics
54 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
55
56 // Library functions. These default to Expand, but we have instructions
57 // for them.
58 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
59 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
60 setOperationAction(ISD::FPOW, MVT::f32, Legal);
61 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
62 setOperationAction(ISD::FABS, MVT::f32, Legal);
63 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
64 setOperationAction(ISD::FRINT, MVT::f32, Legal);
Tom Stellard4d566b22013-11-27 21:23:20 +000065 setOperationAction(ISD::FROUND, MVT::f32, Legal);
Tom Stellardeddfa692013-12-20 05:11:55 +000066 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
Tom Stellard75aadc22012-12-11 21:25:42 +000067
Tom Stellard5643c4a2013-05-20 15:02:19 +000068 // The hardware supports ROTR, but not ROTL
69 setOperationAction(ISD::ROTL, MVT::i32, Expand);
70
Tom Stellard75aadc22012-12-11 21:25:42 +000071 // Lower floating point store/load to integer store/load to reduce the number
72 // of patterns in tablegen.
73 setOperationAction(ISD::STORE, MVT::f32, Promote);
74 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
75
Tom Stellarded2f6142013-07-18 21:43:42 +000076 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
77 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
78
Tom Stellard75aadc22012-12-11 21:25:42 +000079 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
80 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
81
Tom Stellardaf775432013-10-23 00:44:32 +000082 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
83 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
84
85 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
86 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
87
Tom Stellard7512c082013-07-12 18:14:56 +000088 setOperationAction(ISD::STORE, MVT::f64, Promote);
89 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
90
Tom Stellard2ffc3302013-08-26 15:05:44 +000091 // Custom lowering of vector stores is required for local address space
92 // stores.
93 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
94 // XXX: Native v2i32 local address space stores are possible, but not
95 // currently implemented.
96 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
97
Tom Stellardfbab8272013-08-16 01:12:11 +000098 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
99 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
100 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000101
Tom Stellardfbab8272013-08-16 01:12:11 +0000102 // XXX: This can be change to Custom, once ExpandVectorStores can
103 // handle 64-bit stores.
104 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
105
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000106 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
107 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
108 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
109
110
Tom Stellard75aadc22012-12-11 21:25:42 +0000111 setOperationAction(ISD::LOAD, MVT::f32, Promote);
112 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
113
Tom Stellardadf732c2013-07-18 21:43:48 +0000114 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
115 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
116
Tom Stellard75aadc22012-12-11 21:25:42 +0000117 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
118 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
119
Tom Stellardaf775432013-10-23 00:44:32 +0000120 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
121 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
122
123 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
124 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
125
Tom Stellard7512c082013-07-12 18:14:56 +0000126 setOperationAction(ISD::LOAD, MVT::f64, Promote);
127 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
128
Tom Stellardd86003e2013-08-14 23:25:00 +0000129 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
130 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000131 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
132 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
Tom Stellardd86003e2013-08-14 23:25:00 +0000133 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000134 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
135 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
136 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
137 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
138 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000139
Tom Stellardb03edec2013-08-16 01:12:16 +0000140 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
141 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
142 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
143 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
144 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
145 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
146 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
147 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
148 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
149 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
150 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
151 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
152
Tom Stellardaeb45642014-02-04 17:18:43 +0000153 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
154
Tom Stellardbeed74a2013-07-23 01:47:46 +0000155 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
156 setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
157
Tom Stellardc947d8c2013-10-30 17:22:05 +0000158 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
159
Christian Konig70a50322013-03-27 09:12:51 +0000160 setOperationAction(ISD::MUL, MVT::i64, Expand);
161
Tom Stellard75aadc22012-12-11 21:25:42 +0000162 setOperationAction(ISD::UDIV, MVT::i32, Expand);
163 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
164 setOperationAction(ISD::UREM, MVT::i32, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000165 setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
166 setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000167
Tom Stellardf6d80232013-08-21 22:14:17 +0000168 static const MVT::SimpleValueType IntTypes[] = {
169 MVT::v2i32, MVT::v4i32
Aaron Watry0a794a462013-06-25 13:55:57 +0000170 };
Tom Stellarda92ff872013-08-16 23:51:24 +0000171 const size_t NumIntTypes = array_lengthof(IntTypes);
Aaron Watry0a794a462013-06-25 13:55:57 +0000172
Tom Stellarda92ff872013-08-16 23:51:24 +0000173 for (unsigned int x = 0; x < NumIntTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000174 MVT::SimpleValueType VT = IntTypes[x];
Aaron Watry0a794a462013-06-25 13:55:57 +0000175 //Expand the following operations for the current type by default
176 setOperationAction(ISD::ADD, VT, Expand);
177 setOperationAction(ISD::AND, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000178 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
179 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000180 setOperationAction(ISD::MUL, VT, Expand);
181 setOperationAction(ISD::OR, VT, Expand);
182 setOperationAction(ISD::SHL, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000183 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000184 setOperationAction(ISD::SRL, VT, Expand);
185 setOperationAction(ISD::SRA, VT, Expand);
186 setOperationAction(ISD::SUB, VT, Expand);
187 setOperationAction(ISD::UDIV, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000188 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000189 setOperationAction(ISD::UREM, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000190 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000191 setOperationAction(ISD::VSELECT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000192 setOperationAction(ISD::XOR, VT, Expand);
193 }
Tom Stellarda92ff872013-08-16 23:51:24 +0000194
Tom Stellardf6d80232013-08-21 22:14:17 +0000195 static const MVT::SimpleValueType FloatTypes[] = {
196 MVT::v2f32, MVT::v4f32
Tom Stellarda92ff872013-08-16 23:51:24 +0000197 };
198 const size_t NumFloatTypes = array_lengthof(FloatTypes);
199
200 for (unsigned int x = 0; x < NumFloatTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000201 MVT::SimpleValueType VT = FloatTypes[x];
Tom Stellard175e7a82013-11-27 21:23:39 +0000202 setOperationAction(ISD::FABS, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000203 setOperationAction(ISD::FADD, VT, Expand);
204 setOperationAction(ISD::FDIV, VT, Expand);
Tom Stellardbfebd1f2014-02-04 17:18:37 +0000205 setOperationAction(ISD::FPOW, VT, Expand);
Tom Stellardad3aff22013-08-16 23:51:29 +0000206 setOperationAction(ISD::FFLOOR, VT, Expand);
Tom Stellardeddfa692013-12-20 05:11:55 +0000207 setOperationAction(ISD::FTRUNC, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000208 setOperationAction(ISD::FMUL, VT, Expand);
Tom Stellardb249b752013-08-16 23:51:33 +0000209 setOperationAction(ISD::FRINT, VT, Expand);
Tom Stellarde118b8b2013-10-29 16:37:20 +0000210 setOperationAction(ISD::FSQRT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000211 setOperationAction(ISD::FSUB, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000212 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000213 }
Matt Arsenaultfae02982014-03-17 18:58:11 +0000214
215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom);
216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
217 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
218
219 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Custom);
220 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
221 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
222
223 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Custom);
224 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
225 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
226
Matt Arsenault90b733a2014-03-26 18:31:06 +0000227 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Custom);
228
Matt Arsenaultfae02982014-03-17 18:58:11 +0000229 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
Tom Stellard75aadc22012-12-11 21:25:42 +0000230}
231
Tom Stellard28d06de2013-08-05 22:22:07 +0000232//===----------------------------------------------------------------------===//
233// Target Information
234//===----------------------------------------------------------------------===//
235
236MVT AMDGPUTargetLowering::getVectorIdxTy() const {
237 return MVT::i32;
238}
239
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000240bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
241 EVT CastTy) const {
242 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
243 return true;
244
245 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
246 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
247
248 return ((LScalarSize <= CastScalarSize) ||
249 (CastScalarSize >= 32) ||
250 (LScalarSize < 32));
251}
Tom Stellard28d06de2013-08-05 22:22:07 +0000252
Tom Stellard75aadc22012-12-11 21:25:42 +0000253//===---------------------------------------------------------------------===//
Tom Stellardc54731a2013-07-23 23:55:03 +0000254// Target Properties
255//===---------------------------------------------------------------------===//
256
257bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
258 assert(VT.isFloatingPoint());
259 return VT == MVT::f32;
260}
261
262bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
263 assert(VT.isFloatingPoint());
264 return VT == MVT::f32;
265}
266
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000267bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000268 // Truncate is just accessing a subregister.
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000269 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
270}
271
272bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
273 // Truncate is just accessing a subregister.
274 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
275 (Dest->getPrimitiveSizeInBits() % 32 == 0);
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000276}
277
Matt Arsenaultb517c812014-03-27 17:23:31 +0000278bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
279 const DataLayout *DL = getDataLayout();
280 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
281 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
282
283 return SrcSize == 32 && DestSize == 64;
284}
285
286bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
287 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
288 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
289 // this will enable reducing 64-bit operations the 32-bit, which is always
290 // good.
291 return Src == MVT::i32 && Dest == MVT::i64;
292}
293
Matt Arsenaulta7f1e0c2014-03-24 19:43:31 +0000294bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
295 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
296 // limited number of native 64-bit operations. Shrinking an operation to fit
297 // in a single 32-bit register should always be helpful. As currently used,
298 // this is much less general than the name suggests, and is only used in
299 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
300 // not profitable, and may actually be harmful.
301 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
302}
303
Tom Stellardc54731a2013-07-23 23:55:03 +0000304//===---------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000305// TargetLowering Callbacks
306//===---------------------------------------------------------------------===//
307
Christian Konig2c8f6d52013-03-07 09:03:52 +0000308void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
309 const SmallVectorImpl<ISD::InputArg> &Ins) const {
310
311 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
Tom Stellard75aadc22012-12-11 21:25:42 +0000312}
313
314SDValue AMDGPUTargetLowering::LowerReturn(
315 SDValue Chain,
316 CallingConv::ID CallConv,
317 bool isVarArg,
318 const SmallVectorImpl<ISD::OutputArg> &Outs,
319 const SmallVectorImpl<SDValue> &OutVals,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000320 SDLoc DL, SelectionDAG &DAG) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000321 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
322}
323
324//===---------------------------------------------------------------------===//
325// Target specific lowering
326//===---------------------------------------------------------------------===//
327
328SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
329 const {
330 switch (Op.getOpcode()) {
331 default:
332 Op.getNode()->dump();
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000333 llvm_unreachable("Custom lowering code for this"
334 "instruction is not implemented yet!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000335 break;
336 // AMDIL DAG lowering
337 case ISD::SDIV: return LowerSDIV(Op, DAG);
338 case ISD::SREM: return LowerSREM(Op, DAG);
339 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
340 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
341 // AMDGPU DAG lowering
Tom Stellardd86003e2013-08-14 23:25:00 +0000342 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
343 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +0000344 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000345 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
346 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
Tom Stellardc947d8c2013-10-30 17:22:05 +0000347 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000348 }
349 return Op;
350}
351
Matt Arsenaultd125d742014-03-27 17:23:24 +0000352void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
353 SmallVectorImpl<SDValue> &Results,
354 SelectionDAG &DAG) const {
355 switch (N->getOpcode()) {
356 case ISD::SIGN_EXTEND_INREG:
357 // Different parts of legalization seem to interpret which type of
358 // sign_extend_inreg is the one to check for custom lowering. The extended
359 // from type is what really matters, but some places check for custom
360 // lowering of the result type. This results in trying to use
361 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
362 // nothing here and let the illegal result integer be handled normally.
363 return;
364
365 default:
366 return;
367 }
368}
369
Tom Stellard04c0e982014-01-22 19:24:21 +0000370SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
371 const GlobalValue *GV,
372 const SDValue &InitPtr,
373 SDValue Chain,
374 SelectionDAG &DAG) const {
375 const DataLayout *TD = getTargetMachine().getDataLayout();
376 SDLoc DL(InitPtr);
377 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
378 EVT VT = EVT::getEVT(CI->getType());
379 PointerType *PtrTy = PointerType::get(CI->getType(), 0);
380 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
381 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
382 TD->getPrefTypeAlignment(CI->getType()));
383 } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
384 EVT VT = EVT::getEVT(CFP->getType());
385 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
386 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
387 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
388 TD->getPrefTypeAlignment(CFP->getType()));
389 } else if (Init->getType()->isAggregateType()) {
390 EVT PtrVT = InitPtr.getValueType();
391 unsigned NumElements = Init->getType()->getArrayNumElements();
392 SmallVector<SDValue, 8> Chains;
393 for (unsigned i = 0; i < NumElements; ++i) {
394 SDValue Offset = DAG.getConstant(i * TD->getTypeAllocSize(
395 Init->getType()->getArrayElementType()), PtrVT);
396 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
397 Chains.push_back(LowerConstantInitializer(Init->getAggregateElement(i),
398 GV, Ptr, Chain, DAG));
399 }
400 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &Chains[0],
401 Chains.size());
402 } else {
403 Init->dump();
404 llvm_unreachable("Unhandled constant initializer");
405 }
406}
407
Tom Stellardc026e8b2013-06-28 15:47:08 +0000408SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
409 SDValue Op,
410 SelectionDAG &DAG) const {
411
412 const DataLayout *TD = getTargetMachine().getDataLayout();
413 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
Tom Stellardc026e8b2013-06-28 15:47:08 +0000414 const GlobalValue *GV = G->getGlobal();
Tom Stellardc026e8b2013-06-28 15:47:08 +0000415
Tom Stellard04c0e982014-01-22 19:24:21 +0000416 switch (G->getAddressSpace()) {
417 default: llvm_unreachable("Global Address lowering not implemented for this "
418 "address space");
419 case AMDGPUAS::LOCAL_ADDRESS: {
420 // XXX: What does the value of G->getOffset() mean?
421 assert(G->getOffset() == 0 &&
422 "Do not know what to do with an non-zero offset");
Tom Stellardc026e8b2013-06-28 15:47:08 +0000423
Tom Stellard04c0e982014-01-22 19:24:21 +0000424 unsigned Offset;
425 if (MFI->LocalMemoryObjects.count(GV) == 0) {
426 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
427 Offset = MFI->LDSSize;
428 MFI->LocalMemoryObjects[GV] = Offset;
429 // XXX: Account for alignment?
430 MFI->LDSSize += Size;
431 } else {
432 Offset = MFI->LocalMemoryObjects[GV];
433 }
434
435 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
436 }
437 case AMDGPUAS::CONSTANT_ADDRESS: {
438 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
439 Type *EltType = GV->getType()->getElementType();
440 unsigned Size = TD->getTypeAllocSize(EltType);
441 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
442
443 const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV);
444 const Constant *Init = Var->getInitializer();
445 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
446 SDValue InitPtr = DAG.getFrameIndex(FI,
447 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
448 SmallVector<SDNode*, 8> WorkList;
449
450 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
451 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
452 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
453 continue;
454 WorkList.push_back(*I);
455 }
456 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
457 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
458 E = WorkList.end(); I != E; ++I) {
459 SmallVector<SDValue, 8> Ops;
460 Ops.push_back(Chain);
461 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
462 Ops.push_back((*I)->getOperand(i));
463 }
464 DAG.UpdateNodeOperands(*I, &Ops[0], Ops.size());
465 }
466 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
467 getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
468 }
469 }
Tom Stellardc026e8b2013-06-28 15:47:08 +0000470}
471
Tom Stellardd86003e2013-08-14 23:25:00 +0000472void AMDGPUTargetLowering::ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
473 SmallVectorImpl<SDValue> &Args,
474 unsigned Start,
475 unsigned Count) const {
476 EVT VT = Op.getValueType();
477 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
478 Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op),
479 VT.getVectorElementType(),
480 Op, DAG.getConstant(i, MVT::i32)));
481 }
482}
483
484SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
485 SelectionDAG &DAG) const {
486 SmallVector<SDValue, 8> Args;
487 SDValue A = Op.getOperand(0);
488 SDValue B = Op.getOperand(1);
489
490 ExtractVectorElements(A, DAG, Args, 0,
491 A.getValueType().getVectorNumElements());
492 ExtractVectorElements(B, DAG, Args, 0,
493 B.getValueType().getVectorNumElements());
494
495 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
496 &Args[0], Args.size());
497}
498
499SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
500 SelectionDAG &DAG) const {
501
502 SmallVector<SDValue, 8> Args;
503 EVT VT = Op.getValueType();
504 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
505 ExtractVectorElements(Op.getOperand(0), DAG, Args, Start,
506 VT.getVectorNumElements());
507
508 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
509 &Args[0], Args.size());
510}
511
Tom Stellard81d871d2013-11-13 23:36:50 +0000512SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
513 SelectionDAG &DAG) const {
514
515 MachineFunction &MF = DAG.getMachineFunction();
516 const AMDGPUFrameLowering *TFL =
517 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
518
519 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
520 assert(FIN);
521
522 unsigned FrameIndex = FIN->getIndex();
523 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
524 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
525 Op.getValueType());
526}
Tom Stellardd86003e2013-08-14 23:25:00 +0000527
Tom Stellard75aadc22012-12-11 21:25:42 +0000528SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
529 SelectionDAG &DAG) const {
530 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
Andrew Trickef9de2a2013-05-25 02:42:55 +0000531 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000532 EVT VT = Op.getValueType();
533
534 switch (IntrinsicID) {
535 default: return Op;
536 case AMDGPUIntrinsic::AMDIL_abs:
537 return LowerIntrinsicIABS(Op, DAG);
538 case AMDGPUIntrinsic::AMDIL_exp:
539 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
540 case AMDGPUIntrinsic::AMDGPU_lrp:
541 return LowerIntrinsicLRP(Op, DAG);
542 case AMDGPUIntrinsic::AMDIL_fraction:
543 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
Tom Stellard75aadc22012-12-11 21:25:42 +0000544 case AMDGPUIntrinsic::AMDIL_max:
545 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
546 Op.getOperand(2));
547 case AMDGPUIntrinsic::AMDGPU_imax:
548 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
549 Op.getOperand(2));
550 case AMDGPUIntrinsic::AMDGPU_umax:
551 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
552 Op.getOperand(2));
553 case AMDGPUIntrinsic::AMDIL_min:
554 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
555 Op.getOperand(2));
556 case AMDGPUIntrinsic::AMDGPU_imin:
557 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
558 Op.getOperand(2));
559 case AMDGPUIntrinsic::AMDGPU_umin:
560 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
561 Op.getOperand(2));
Matt Arsenault4c537172014-03-31 18:21:18 +0000562
563 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
564 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
565 Op.getOperand(1),
566 Op.getOperand(2),
567 Op.getOperand(3));
568
569 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
570 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
571 Op.getOperand(1),
572 Op.getOperand(2),
573 Op.getOperand(3));
574
575 case AMDGPUIntrinsic::AMDGPU_bfi:
576 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
577 Op.getOperand(1),
578 Op.getOperand(2),
579 Op.getOperand(3));
580
581 case AMDGPUIntrinsic::AMDGPU_bfm:
582 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
583 Op.getOperand(1),
584 Op.getOperand(2));
585
Tom Stellard75aadc22012-12-11 21:25:42 +0000586 case AMDGPUIntrinsic::AMDIL_round_nearest:
587 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
588 }
589}
590
591///IABS(a) = SMAX(sub(0, a), a)
592SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
593 SelectionDAG &DAG) const {
594
Andrew Trickef9de2a2013-05-25 02:42:55 +0000595 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000596 EVT VT = Op.getValueType();
597 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
598 Op.getOperand(1));
599
600 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
601}
602
603/// Linear Interpolation
604/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
605SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
606 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000607 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000608 EVT VT = Op.getValueType();
609 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
610 DAG.getConstantFP(1.0f, MVT::f32),
611 Op.getOperand(1));
612 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
613 Op.getOperand(3));
Vincent Lejeune1ce13f52013-02-18 14:11:28 +0000614 return DAG.getNode(ISD::FADD, DL, VT,
615 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
616 OneSubAC);
Tom Stellard75aadc22012-12-11 21:25:42 +0000617}
618
619/// \brief Generate Min/Max node
620SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
621 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000622 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000623 EVT VT = Op.getValueType();
624
625 SDValue LHS = Op.getOperand(0);
626 SDValue RHS = Op.getOperand(1);
627 SDValue True = Op.getOperand(2);
628 SDValue False = Op.getOperand(3);
629 SDValue CC = Op.getOperand(4);
630
631 if (VT != MVT::f32 ||
632 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
633 return SDValue();
634 }
635
636 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
637 switch (CCOpcode) {
638 case ISD::SETOEQ:
639 case ISD::SETONE:
640 case ISD::SETUNE:
641 case ISD::SETNE:
642 case ISD::SETUEQ:
643 case ISD::SETEQ:
644 case ISD::SETFALSE:
645 case ISD::SETFALSE2:
646 case ISD::SETTRUE:
647 case ISD::SETTRUE2:
648 case ISD::SETUO:
649 case ISD::SETO:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000650 llvm_unreachable("Operation should already be optimised!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000651 case ISD::SETULE:
652 case ISD::SETULT:
653 case ISD::SETOLE:
654 case ISD::SETOLT:
655 case ISD::SETLE:
656 case ISD::SETLT: {
657 if (LHS == True)
658 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
659 else
660 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
661 }
662 case ISD::SETGT:
663 case ISD::SETGE:
664 case ISD::SETUGE:
665 case ISD::SETOGE:
666 case ISD::SETUGT:
667 case ISD::SETOGT: {
668 if (LHS == True)
669 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
670 else
671 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
672 }
673 case ISD::SETCC_INVALID:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000674 llvm_unreachable("Invalid setcc condcode!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000675 }
676 return Op;
677}
678
Tom Stellard35bb18c2013-08-26 15:06:04 +0000679SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
680 SelectionDAG &DAG) const {
681 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
682 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
683 EVT EltVT = Op.getValueType().getVectorElementType();
684 EVT PtrVT = Load->getBasePtr().getValueType();
685 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
686 SmallVector<SDValue, 8> Loads;
687 SDLoc SL(Op);
688
689 for (unsigned i = 0, e = NumElts; i != e; ++i) {
690 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
691 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
692 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
693 Load->getChain(), Ptr,
694 MachinePointerInfo(Load->getMemOperand()->getValue()),
695 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
696 Load->getAlignment()));
697 }
Matt Arsenault9504d2f2014-03-11 00:01:31 +0000698 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(),
699 Loads.data(), Loads.size());
Tom Stellard35bb18c2013-08-26 15:06:04 +0000700}
701
Tom Stellard2ffc3302013-08-26 15:05:44 +0000702SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
703 SelectionDAG &DAG) const {
704 StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
705 EVT MemVT = Store->getMemoryVT();
706 unsigned MemBits = MemVT.getSizeInBits();
Tom Stellard75aadc22012-12-11 21:25:42 +0000707
Matt Arsenaultca6dcfc2014-03-05 21:47:22 +0000708 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
709 // truncating store into an i32 store.
710 // XXX: We could also handle optimize other vector bitwidths.
Tom Stellard2ffc3302013-08-26 15:05:44 +0000711 if (!MemVT.isVector() || MemBits > 32) {
712 return SDValue();
713 }
714
715 SDLoc DL(Op);
716 const SDValue &Value = Store->getValue();
717 EVT VT = Value.getValueType();
718 const SDValue &Ptr = Store->getBasePtr();
719 EVT MemEltVT = MemVT.getVectorElementType();
720 unsigned MemEltBits = MemEltVT.getSizeInBits();
721 unsigned MemNumElements = MemVT.getVectorNumElements();
722 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
Matt Arsenault02117142014-03-11 01:38:53 +0000723 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, PackedVT);
724
Tom Stellard2ffc3302013-08-26 15:05:44 +0000725 SDValue PackedValue;
726 for (unsigned i = 0; i < MemNumElements; ++i) {
727 EVT ElemVT = VT.getVectorElementType();
728 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
729 DAG.getConstant(i, MVT::i32));
730 Elt = DAG.getZExtOrTrunc(Elt, DL, PackedVT);
731 Elt = DAG.getNode(ISD::AND, DL, PackedVT, Elt, Mask);
732 SDValue Shift = DAG.getConstant(MemEltBits * i, PackedVT);
733 Elt = DAG.getNode(ISD::SHL, DL, PackedVT, Elt, Shift);
734 if (i == 0) {
735 PackedValue = Elt;
736 } else {
737 PackedValue = DAG.getNode(ISD::OR, DL, PackedVT, PackedValue, Elt);
738 }
739 }
740 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
741 MachinePointerInfo(Store->getMemOperand()->getValue()),
742 Store->isVolatile(), Store->isNonTemporal(),
743 Store->getAlignment());
744}
745
746SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
747 SelectionDAG &DAG) const {
748 StoreSDNode *Store = cast<StoreSDNode>(Op);
749 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
750 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
751 EVT PtrVT = Store->getBasePtr().getValueType();
752 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
753 SDLoc SL(Op);
754
755 SmallVector<SDValue, 8> Chains;
756
757 for (unsigned i = 0, e = NumElts; i != e; ++i) {
758 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
759 Store->getValue(), DAG.getConstant(i, MVT::i32));
760 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
761 Store->getBasePtr(),
762 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
763 PtrVT));
Tom Stellardf3d166a2013-08-26 15:05:49 +0000764 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
Tom Stellard2ffc3302013-08-26 15:05:44 +0000765 MachinePointerInfo(Store->getMemOperand()->getValue()),
Tom Stellardf3d166a2013-08-26 15:05:49 +0000766 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
Tom Stellard2ffc3302013-08-26 15:05:44 +0000767 Store->getAlignment()));
768 }
769 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, &Chains[0], NumElts);
770}
771
Tom Stellarde9373602014-01-22 19:24:14 +0000772SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
773 SDLoc DL(Op);
774 LoadSDNode *Load = cast<LoadSDNode>(Op);
775 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaultf9a995d2014-03-06 17:34:12 +0000776 EVT VT = Op.getValueType();
777 EVT MemVT = Load->getMemoryVT();
778
779 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
780 // We can do the extload to 32-bits, and then need to separately extend to
781 // 64-bits.
782
783 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
784 Load->getChain(),
785 Load->getBasePtr(),
786 MemVT,
787 Load->getMemOperand());
788 return DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32);
789 }
Tom Stellarde9373602014-01-22 19:24:14 +0000790
Tom Stellard04c0e982014-01-22 19:24:21 +0000791 // Lower loads constant address space global variable loads
792 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
793 isa<GlobalVariable>(GetUnderlyingObject(Load->getPointerInfo().V))) {
794
795 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
796 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
797 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
798 DAG.getConstant(2, MVT::i32));
799 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
800 Load->getChain(), Ptr,
801 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
802 }
803
Tom Stellarde9373602014-01-22 19:24:14 +0000804 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
805 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
806 return SDValue();
807
808
Tom Stellarde9373602014-01-22 19:24:14 +0000809 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
810 DAG.getConstant(2, MVT::i32));
811 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
812 Load->getChain(), Ptr,
813 DAG.getTargetConstant(0, MVT::i32),
814 Op.getOperand(2));
815 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
816 Load->getBasePtr(),
817 DAG.getConstant(0x3, MVT::i32));
818 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
819 DAG.getConstant(3, MVT::i32));
Matt Arsenault74891cd2014-03-15 00:08:22 +0000820
Tom Stellarde9373602014-01-22 19:24:14 +0000821 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000822
823 EVT MemEltVT = MemVT.getScalarType();
Tom Stellarde9373602014-01-22 19:24:14 +0000824 if (ExtType == ISD::SEXTLOAD) {
Matt Arsenault74891cd2014-03-15 00:08:22 +0000825 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
826 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode);
Tom Stellarde9373602014-01-22 19:24:14 +0000827 }
828
Matt Arsenault74891cd2014-03-15 00:08:22 +0000829 return DAG.getZeroExtendInReg(Ret, DL, MemEltVT);
Tom Stellarde9373602014-01-22 19:24:14 +0000830}
831
Tom Stellard2ffc3302013-08-26 15:05:44 +0000832SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Tom Stellarde9373602014-01-22 19:24:14 +0000833 SDLoc DL(Op);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000834 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
835 if (Result.getNode()) {
836 return Result;
837 }
838
839 StoreSDNode *Store = cast<StoreSDNode>(Op);
Tom Stellarde9373602014-01-22 19:24:14 +0000840 SDValue Chain = Store->getChain();
Tom Stellard81d871d2013-11-13 23:36:50 +0000841 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
842 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
Tom Stellard2ffc3302013-08-26 15:05:44 +0000843 Store->getValue().getValueType().isVector()) {
844 return SplitVectorStore(Op, DAG);
845 }
Tom Stellarde9373602014-01-22 19:24:14 +0000846
Matt Arsenault74891cd2014-03-15 00:08:22 +0000847 EVT MemVT = Store->getMemoryVT();
Tom Stellarde9373602014-01-22 19:24:14 +0000848 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
Matt Arsenault74891cd2014-03-15 00:08:22 +0000849 MemVT.bitsLT(MVT::i32)) {
Tom Stellarde9373602014-01-22 19:24:14 +0000850 unsigned Mask = 0;
851 if (Store->getMemoryVT() == MVT::i8) {
852 Mask = 0xff;
853 } else if (Store->getMemoryVT() == MVT::i16) {
854 Mask = 0xffff;
855 }
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000856 SDValue BasePtr = Store->getBasePtr();
857 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000858 DAG.getConstant(2, MVT::i32));
859 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
860 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000861
862 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000863 DAG.getConstant(0x3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000864
Tom Stellarde9373602014-01-22 19:24:14 +0000865 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
866 DAG.getConstant(3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000867
Tom Stellarde9373602014-01-22 19:24:14 +0000868 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
869 Store->getValue());
Matt Arsenault74891cd2014-03-15 00:08:22 +0000870
871 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
872
Tom Stellarde9373602014-01-22 19:24:14 +0000873 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
874 MaskedValue, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000875
Tom Stellarde9373602014-01-22 19:24:14 +0000876 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
877 ShiftAmt);
878 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
879 DAG.getConstant(0xffffffff, MVT::i32));
880 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
881
882 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
883 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
884 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
885 }
Tom Stellard2ffc3302013-08-26 15:05:44 +0000886 return SDValue();
887}
Tom Stellard75aadc22012-12-11 21:25:42 +0000888
889SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
890 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000891 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000892 EVT VT = Op.getValueType();
893
894 SDValue Num = Op.getOperand(0);
895 SDValue Den = Op.getOperand(1);
896
897 SmallVector<SDValue, 8> Results;
898
899 // RCP = URECIP(Den) = 2^32 / Den + e
900 // e is rounding error.
901 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
902
903 // RCP_LO = umulo(RCP, Den) */
904 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
905
906 // RCP_HI = mulhu (RCP, Den) */
907 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
908
909 // NEG_RCP_LO = -RCP_LO
910 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
911 RCP_LO);
912
913 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
914 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
915 NEG_RCP_LO, RCP_LO,
916 ISD::SETEQ);
917 // Calculate the rounding error from the URECIP instruction
918 // E = mulhu(ABS_RCP_LO, RCP)
919 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
920
921 // RCP_A_E = RCP + E
922 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
923
924 // RCP_S_E = RCP - E
925 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
926
927 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
928 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
929 RCP_A_E, RCP_S_E,
930 ISD::SETEQ);
931 // Quotient = mulhu(Tmp0, Num)
932 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
933
934 // Num_S_Remainder = Quotient * Den
935 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
936
937 // Remainder = Num - Num_S_Remainder
938 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
939
940 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
941 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
942 DAG.getConstant(-1, VT),
943 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000944 ISD::SETUGE);
945 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
946 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
947 Num_S_Remainder,
Tom Stellard75aadc22012-12-11 21:25:42 +0000948 DAG.getConstant(-1, VT),
949 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000950 ISD::SETUGE);
Tom Stellard75aadc22012-12-11 21:25:42 +0000951 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
952 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
953 Remainder_GE_Zero);
954
955 // Calculate Division result:
956
957 // Quotient_A_One = Quotient + 1
958 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
959 DAG.getConstant(1, VT));
960
961 // Quotient_S_One = Quotient - 1
962 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
963 DAG.getConstant(1, VT));
964
965 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
966 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
967 Quotient, Quotient_A_One, ISD::SETEQ);
968
969 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
970 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
971 Quotient_S_One, Div, ISD::SETEQ);
972
973 // Calculate Rem result:
974
975 // Remainder_S_Den = Remainder - Den
976 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
977
978 // Remainder_A_Den = Remainder + Den
979 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
980
981 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
982 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
983 Remainder, Remainder_S_Den, ISD::SETEQ);
984
985 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
986 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
987 Remainder_A_Den, Rem, ISD::SETEQ);
988 SDValue Ops[2];
989 Ops[0] = Div;
990 Ops[1] = Rem;
991 return DAG.getMergeValues(Ops, 2, DL);
992}
993
Tom Stellardc947d8c2013-10-30 17:22:05 +0000994SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
995 SelectionDAG &DAG) const {
996 SDValue S0 = Op.getOperand(0);
997 SDLoc DL(Op);
998 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
999 return SDValue();
1000
1001 // f32 uint_to_fp i64
1002 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1003 DAG.getConstant(0, MVT::i32));
1004 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
1005 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1006 DAG.getConstant(1, MVT::i32));
1007 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
1008 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
1009 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
1010 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
1011
1012}
Tom Stellardfbab8272013-08-16 01:12:11 +00001013
Matt Arsenaultfae02982014-03-17 18:58:11 +00001014SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
1015 unsigned BitsDiff,
1016 SelectionDAG &DAG) const {
1017 MVT VT = Op.getSimpleValueType();
1018 SDLoc DL(Op);
1019 SDValue Shift = DAG.getConstant(BitsDiff, VT);
1020 // Shift left by 'Shift' bits.
1021 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
1022 // Signed shift Right by 'Shift' bits.
1023 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
1024}
1025
1026SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1027 SelectionDAG &DAG) const {
1028 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1029 MVT VT = Op.getSimpleValueType();
1030 MVT ScalarVT = VT.getScalarType();
1031
1032 unsigned SrcBits = ExtraVT.getScalarType().getSizeInBits();
1033 unsigned DestBits = ScalarVT.getSizeInBits();
1034 unsigned BitsDiff = DestBits - SrcBits;
1035
1036 if (!Subtarget->hasBFE())
1037 return ExpandSIGN_EXTEND_INREG(Op, BitsDiff, DAG);
1038
1039 SDValue Src = Op.getOperand(0);
1040 if (VT.isVector()) {
1041 SDLoc DL(Op);
1042 // Need to scalarize this, and revisit each of the scalars later.
1043 // TODO: Don't scalarize on Evergreen?
1044 unsigned NElts = VT.getVectorNumElements();
1045 SmallVector<SDValue, 8> Args;
1046 ExtractVectorElements(Src, DAG, Args, 0, NElts);
1047
1048 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1049 for (unsigned I = 0; I < NElts; ++I)
1050 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
1051
1052 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args.data(), Args.size());
1053 }
1054
1055 if (SrcBits == 32) {
1056 SDLoc DL(Op);
1057
1058 // If the source is 32-bits, this is really half of a 2-register pair, and
1059 // we need to discard the unused half of the pair.
1060 SDValue TruncSrc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
1061 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, TruncSrc);
1062 }
1063
1064 unsigned NElts = VT.isVector() ? VT.getVectorNumElements() : 1;
1065
1066 // TODO: Match 64-bit BFE. SI has a 64-bit BFE, but it's scalar only so it
1067 // might not be worth the effort, and will need to expand to shifts when
1068 // fixing SGPR copies.
1069 if (SrcBits < 32 && DestBits <= 32) {
1070 SDLoc DL(Op);
1071 MVT ExtVT = (NElts == 1) ? MVT::i32 : MVT::getVectorVT(MVT::i32, NElts);
1072
1073 if (DestBits != 32)
1074 Src = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVT, Src);
1075
1076 // FIXME: This should use TargetConstant, but that hits assertions for
1077 // Evergreen.
1078 SDValue Ext = DAG.getNode(AMDGPUISD::BFE_I32, DL, ExtVT,
1079 Op.getOperand(0), // Operand
1080 DAG.getConstant(0, ExtVT), // Offset
1081 DAG.getConstant(SrcBits, ExtVT)); // Width
1082
1083 // Truncate to the original type if necessary.
1084 if (ScalarVT == MVT::i32)
1085 return Ext;
1086 return DAG.getNode(ISD::TRUNCATE, DL, VT, Ext);
1087 }
1088
1089 // For small types, extend to 32-bits first.
1090 if (SrcBits < 32) {
1091 SDLoc DL(Op);
1092 MVT ExtVT = (NElts == 1) ? MVT::i32 : MVT::getVectorVT(MVT::i32, NElts);
1093
1094 SDValue TruncSrc = DAG.getNode(ISD::TRUNCATE, DL, ExtVT, Src);
1095 SDValue Ext32 = DAG.getNode(AMDGPUISD::BFE_I32,
1096 DL,
1097 ExtVT,
1098 TruncSrc, // Operand
1099 DAG.getConstant(0, ExtVT), // Offset
1100 DAG.getConstant(SrcBits, ExtVT)); // Width
1101
1102 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Ext32);
1103 }
1104
1105 // For everything else, use the standard bitshift expansion.
1106 return ExpandSIGN_EXTEND_INREG(Op, BitsDiff, DAG);
1107}
1108
Tom Stellard75aadc22012-12-11 21:25:42 +00001109//===----------------------------------------------------------------------===//
1110// Helper functions
1111//===----------------------------------------------------------------------===//
1112
Tom Stellardaf775432013-10-23 00:44:32 +00001113void AMDGPUTargetLowering::getOriginalFunctionArgs(
1114 SelectionDAG &DAG,
1115 const Function *F,
1116 const SmallVectorImpl<ISD::InputArg> &Ins,
1117 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
1118
1119 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
1120 if (Ins[i].ArgVT == Ins[i].VT) {
1121 OrigIns.push_back(Ins[i]);
1122 continue;
1123 }
1124
1125 EVT VT;
1126 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
1127 // Vector has been split into scalars.
1128 VT = Ins[i].ArgVT.getVectorElementType();
1129 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
1130 Ins[i].ArgVT.getVectorElementType() !=
1131 Ins[i].VT.getVectorElementType()) {
1132 // Vector elements have been promoted
1133 VT = Ins[i].ArgVT;
1134 } else {
1135 // Vector has been spilt into smaller vectors.
1136 VT = Ins[i].VT;
1137 }
1138
1139 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
1140 Ins[i].OrigArgIndex, Ins[i].PartOffset);
1141 OrigIns.push_back(Arg);
1142 }
1143}
1144
Tom Stellard75aadc22012-12-11 21:25:42 +00001145bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
1146 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1147 return CFP->isExactlyValue(1.0);
1148 }
1149 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1150 return C->isAllOnesValue();
1151 }
1152 return false;
1153}
1154
1155bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
1156 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1157 return CFP->getValueAPF().isZero();
1158 }
1159 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1160 return C->isNullValue();
1161 }
1162 return false;
1163}
1164
1165SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
1166 const TargetRegisterClass *RC,
1167 unsigned Reg, EVT VT) const {
1168 MachineFunction &MF = DAG.getMachineFunction();
1169 MachineRegisterInfo &MRI = MF.getRegInfo();
1170 unsigned VirtualRegister;
1171 if (!MRI.isLiveIn(Reg)) {
1172 VirtualRegister = MRI.createVirtualRegister(RC);
1173 MRI.addLiveIn(Reg, VirtualRegister);
1174 } else {
1175 VirtualRegister = MRI.getLiveInVirtReg(Reg);
1176 }
1177 return DAG.getRegister(VirtualRegister, VT);
1178}
1179
1180#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
1181
1182const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
1183 switch (Opcode) {
1184 default: return 0;
1185 // AMDIL DAG nodes
Tom Stellard75aadc22012-12-11 21:25:42 +00001186 NODE_NAME_CASE(CALL);
1187 NODE_NAME_CASE(UMUL);
1188 NODE_NAME_CASE(DIV_INF);
1189 NODE_NAME_CASE(RET_FLAG);
1190 NODE_NAME_CASE(BRANCH_COND);
1191
1192 // AMDGPU DAG nodes
1193 NODE_NAME_CASE(DWORDADDR)
1194 NODE_NAME_CASE(FRACT)
1195 NODE_NAME_CASE(FMAX)
1196 NODE_NAME_CASE(SMAX)
1197 NODE_NAME_CASE(UMAX)
1198 NODE_NAME_CASE(FMIN)
1199 NODE_NAME_CASE(SMIN)
1200 NODE_NAME_CASE(UMIN)
Matt Arsenaultfae02982014-03-17 18:58:11 +00001201 NODE_NAME_CASE(BFE_U32)
1202 NODE_NAME_CASE(BFE_I32)
Matt Arsenaultb3458362014-03-31 18:21:13 +00001203 NODE_NAME_CASE(BFI)
1204 NODE_NAME_CASE(BFM)
Tom Stellard75aadc22012-12-11 21:25:42 +00001205 NODE_NAME_CASE(URECIP)
Matt Arsenault21a3faa2014-02-24 21:01:21 +00001206 NODE_NAME_CASE(DOT4)
Tom Stellard75aadc22012-12-11 21:25:42 +00001207 NODE_NAME_CASE(EXPORT)
Tom Stellardff62c352013-01-23 02:09:03 +00001208 NODE_NAME_CASE(CONST_ADDRESS)
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001209 NODE_NAME_CASE(REGISTER_LOAD)
1210 NODE_NAME_CASE(REGISTER_STORE)
Tom Stellard9fa17912013-08-14 23:24:45 +00001211 NODE_NAME_CASE(LOAD_CONSTANT)
1212 NODE_NAME_CASE(LOAD_INPUT)
1213 NODE_NAME_CASE(SAMPLE)
1214 NODE_NAME_CASE(SAMPLEB)
1215 NODE_NAME_CASE(SAMPLED)
1216 NODE_NAME_CASE(SAMPLEL)
Tom Stellardd3ee8c12013-08-16 01:12:06 +00001217 NODE_NAME_CASE(STORE_MSKOR)
Tom Stellardafcf12f2013-09-12 02:55:14 +00001218 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
Tom Stellard75aadc22012-12-11 21:25:42 +00001219 }
1220}
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001221
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001222static void computeMaskedBitsForMinMax(const SDValue Op0,
1223 const SDValue Op1,
1224 APInt &KnownZero,
1225 APInt &KnownOne,
1226 const SelectionDAG &DAG,
1227 unsigned Depth) {
1228 APInt Op0Zero, Op0One;
1229 APInt Op1Zero, Op1One;
1230 DAG.ComputeMaskedBits(Op0, Op0Zero, Op0One, Depth);
1231 DAG.ComputeMaskedBits(Op1, Op1Zero, Op1One, Depth);
1232
1233 KnownZero = Op0Zero & Op1Zero;
1234 KnownOne = Op0One & Op1One;
1235}
1236
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001237void AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
1238 const SDValue Op,
1239 APInt &KnownZero,
1240 APInt &KnownOne,
1241 const SelectionDAG &DAG,
1242 unsigned Depth) const {
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001243
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001244 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001245 unsigned Opc = Op.getOpcode();
1246 switch (Opc) {
1247 case ISD::INTRINSIC_WO_CHAIN: {
1248 // FIXME: The intrinsic should just use the node.
1249 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
1250 case AMDGPUIntrinsic::AMDGPU_imax:
1251 case AMDGPUIntrinsic::AMDGPU_umax:
1252 case AMDGPUIntrinsic::AMDGPU_imin:
1253 case AMDGPUIntrinsic::AMDGPU_umin:
1254 computeMaskedBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
1255 KnownZero, KnownOne, DAG, Depth);
1256 break;
1257 default:
1258 break;
1259 }
1260
1261 break;
1262 }
1263 case AMDGPUISD::SMAX:
1264 case AMDGPUISD::UMAX:
1265 case AMDGPUISD::SMIN:
1266 case AMDGPUISD::UMIN:
1267 computeMaskedBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
1268 KnownZero, KnownOne, DAG, Depth);
1269 break;
1270 default:
1271 break;
1272 }
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001273}