blob: 1fed068c9851b27ea5c83a1bdac9cb087b326b15 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This is the parent TargetLowering class for hardware code gen
12/// targets.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUISelLowering.h"
Tom Stellarded882c22013-06-03 17:40:11 +000017#include "AMDGPU.h"
Tom Stellard81d871d2013-11-13 23:36:50 +000018#include "AMDGPUFrameLowering.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000019#include "AMDGPURegisterInfo.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000020#include "AMDGPUSubtarget.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000021#include "AMDILIntrinsicInfo.h"
Tom Stellardacfeebf2013-07-23 01:48:05 +000022#include "R600MachineFunctionInfo.h"
Tom Stellarded882c22013-06-03 17:40:11 +000023#include "SIMachineFunctionInfo.h"
Tom Stellard04c0e982014-01-22 19:24:21 +000024#include "llvm/Analysis/ValueTracking.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000025#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/SelectionDAG.h"
29#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
Tom Stellardc026e8b2013-06-28 15:47:08 +000030#include "llvm/IR/DataLayout.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000031
32using namespace llvm;
Tom Stellardaf775432013-10-23 00:44:32 +000033static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
34 CCValAssign::LocInfo LocInfo,
35 ISD::ArgFlagsTy ArgFlags, CCState &State) {
Matt Arsenault52226f92013-12-14 18:21:59 +000036 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
37 ArgFlags.getOrigAlign());
38 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
Tom Stellardaf775432013-10-23 00:44:32 +000039
40 return true;
41}
Tom Stellard75aadc22012-12-11 21:25:42 +000042
Christian Konig2c8f6d52013-03-07 09:03:52 +000043#include "AMDGPUGenCallingConv.inc"
44
Tom Stellard75aadc22012-12-11 21:25:42 +000045AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
46 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
47
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000048 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
49
Tom Stellard75aadc22012-12-11 21:25:42 +000050 // Initialize target lowering borrowed from AMDIL
51 InitAMDILLowering();
52
53 // We need to custom lower some of the intrinsics
54 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
55
56 // Library functions. These default to Expand, but we have instructions
57 // for them.
58 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
59 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
60 setOperationAction(ISD::FPOW, MVT::f32, Legal);
61 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
62 setOperationAction(ISD::FABS, MVT::f32, Legal);
63 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
64 setOperationAction(ISD::FRINT, MVT::f32, Legal);
Tom Stellard4d566b22013-11-27 21:23:20 +000065 setOperationAction(ISD::FROUND, MVT::f32, Legal);
Tom Stellardeddfa692013-12-20 05:11:55 +000066 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
Tom Stellard75aadc22012-12-11 21:25:42 +000067
Tom Stellard5643c4a2013-05-20 15:02:19 +000068 // The hardware supports ROTR, but not ROTL
69 setOperationAction(ISD::ROTL, MVT::i32, Expand);
70
Tom Stellard75aadc22012-12-11 21:25:42 +000071 // Lower floating point store/load to integer store/load to reduce the number
72 // of patterns in tablegen.
73 setOperationAction(ISD::STORE, MVT::f32, Promote);
74 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
75
Tom Stellarded2f6142013-07-18 21:43:42 +000076 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
77 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
78
Tom Stellard75aadc22012-12-11 21:25:42 +000079 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
80 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
81
Tom Stellardaf775432013-10-23 00:44:32 +000082 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
83 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
84
85 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
86 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
87
Tom Stellard7512c082013-07-12 18:14:56 +000088 setOperationAction(ISD::STORE, MVT::f64, Promote);
89 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
90
Tom Stellard2ffc3302013-08-26 15:05:44 +000091 // Custom lowering of vector stores is required for local address space
92 // stores.
93 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
94 // XXX: Native v2i32 local address space stores are possible, but not
95 // currently implemented.
96 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
97
Tom Stellardfbab8272013-08-16 01:12:11 +000098 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
99 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
100 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000101
Tom Stellardfbab8272013-08-16 01:12:11 +0000102 // XXX: This can be change to Custom, once ExpandVectorStores can
103 // handle 64-bit stores.
104 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
105
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000106 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
107 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
108 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
109
110
Tom Stellard75aadc22012-12-11 21:25:42 +0000111 setOperationAction(ISD::LOAD, MVT::f32, Promote);
112 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
113
Tom Stellardadf732c2013-07-18 21:43:48 +0000114 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
115 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
116
Tom Stellard75aadc22012-12-11 21:25:42 +0000117 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
118 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
119
Tom Stellardaf775432013-10-23 00:44:32 +0000120 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
121 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
122
123 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
124 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
125
Tom Stellard7512c082013-07-12 18:14:56 +0000126 setOperationAction(ISD::LOAD, MVT::f64, Promote);
127 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
128
Tom Stellardd86003e2013-08-14 23:25:00 +0000129 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
130 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000131 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
132 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
Tom Stellardd86003e2013-08-14 23:25:00 +0000133 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000134 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
135 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
136 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
137 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
138 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000139
Tom Stellardb03edec2013-08-16 01:12:16 +0000140 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
141 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
142 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
143 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
144 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
145 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
146 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
147 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
148 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
149 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
150 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
151 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
152
Tom Stellardaeb45642014-02-04 17:18:43 +0000153 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
154
Tom Stellardbeed74a2013-07-23 01:47:46 +0000155 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
156 setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
157
Tom Stellardc947d8c2013-10-30 17:22:05 +0000158 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
159
Christian Konig70a50322013-03-27 09:12:51 +0000160 setOperationAction(ISD::MUL, MVT::i64, Expand);
161
Tom Stellard75aadc22012-12-11 21:25:42 +0000162 setOperationAction(ISD::UDIV, MVT::i32, Expand);
163 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
164 setOperationAction(ISD::UREM, MVT::i32, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000165 setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
166 setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000167
Tom Stellardf6d80232013-08-21 22:14:17 +0000168 static const MVT::SimpleValueType IntTypes[] = {
169 MVT::v2i32, MVT::v4i32
Aaron Watry0a794a462013-06-25 13:55:57 +0000170 };
Tom Stellarda92ff872013-08-16 23:51:24 +0000171 const size_t NumIntTypes = array_lengthof(IntTypes);
Aaron Watry0a794a462013-06-25 13:55:57 +0000172
Tom Stellarda92ff872013-08-16 23:51:24 +0000173 for (unsigned int x = 0; x < NumIntTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000174 MVT::SimpleValueType VT = IntTypes[x];
Aaron Watry0a794a462013-06-25 13:55:57 +0000175 //Expand the following operations for the current type by default
176 setOperationAction(ISD::ADD, VT, Expand);
177 setOperationAction(ISD::AND, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000178 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
179 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000180 setOperationAction(ISD::MUL, VT, Expand);
181 setOperationAction(ISD::OR, VT, Expand);
182 setOperationAction(ISD::SHL, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000183 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000184 setOperationAction(ISD::SRL, VT, Expand);
185 setOperationAction(ISD::SRA, VT, Expand);
186 setOperationAction(ISD::SUB, VT, Expand);
187 setOperationAction(ISD::UDIV, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000188 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000189 setOperationAction(ISD::UREM, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000190 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000191 setOperationAction(ISD::VSELECT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000192 setOperationAction(ISD::XOR, VT, Expand);
193 }
Tom Stellarda92ff872013-08-16 23:51:24 +0000194
Tom Stellardf6d80232013-08-21 22:14:17 +0000195 static const MVT::SimpleValueType FloatTypes[] = {
196 MVT::v2f32, MVT::v4f32
Tom Stellarda92ff872013-08-16 23:51:24 +0000197 };
198 const size_t NumFloatTypes = array_lengthof(FloatTypes);
199
200 for (unsigned int x = 0; x < NumFloatTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000201 MVT::SimpleValueType VT = FloatTypes[x];
Tom Stellard175e7a82013-11-27 21:23:39 +0000202 setOperationAction(ISD::FABS, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000203 setOperationAction(ISD::FADD, VT, Expand);
204 setOperationAction(ISD::FDIV, VT, Expand);
Tom Stellardbfebd1f2014-02-04 17:18:37 +0000205 setOperationAction(ISD::FPOW, VT, Expand);
Tom Stellardad3aff22013-08-16 23:51:29 +0000206 setOperationAction(ISD::FFLOOR, VT, Expand);
Tom Stellardeddfa692013-12-20 05:11:55 +0000207 setOperationAction(ISD::FTRUNC, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000208 setOperationAction(ISD::FMUL, VT, Expand);
Tom Stellardb249b752013-08-16 23:51:33 +0000209 setOperationAction(ISD::FRINT, VT, Expand);
Tom Stellarde118b8b2013-10-29 16:37:20 +0000210 setOperationAction(ISD::FSQRT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000211 setOperationAction(ISD::FSUB, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000212 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000213 }
Matt Arsenaultfae02982014-03-17 18:58:11 +0000214
215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom);
216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
217 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
218
219 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Custom);
220 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
221 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
222
223 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Custom);
224 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
225 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
226
Matt Arsenault90b733a2014-03-26 18:31:06 +0000227 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Custom);
228
Matt Arsenaultfae02982014-03-17 18:58:11 +0000229 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
Tom Stellard50122a52014-04-07 19:45:41 +0000230
231 setTargetDAGCombine(ISD::MUL);
Tom Stellard75aadc22012-12-11 21:25:42 +0000232}
233
Tom Stellard28d06de2013-08-05 22:22:07 +0000234//===----------------------------------------------------------------------===//
235// Target Information
236//===----------------------------------------------------------------------===//
237
238MVT AMDGPUTargetLowering::getVectorIdxTy() const {
239 return MVT::i32;
240}
241
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000242bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
243 EVT CastTy) const {
244 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
245 return true;
246
247 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
248 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
249
250 return ((LScalarSize <= CastScalarSize) ||
251 (CastScalarSize >= 32) ||
252 (LScalarSize < 32));
253}
Tom Stellard28d06de2013-08-05 22:22:07 +0000254
Tom Stellard75aadc22012-12-11 21:25:42 +0000255//===---------------------------------------------------------------------===//
Tom Stellardc54731a2013-07-23 23:55:03 +0000256// Target Properties
257//===---------------------------------------------------------------------===//
258
259bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
260 assert(VT.isFloatingPoint());
261 return VT == MVT::f32;
262}
263
264bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
265 assert(VT.isFloatingPoint());
266 return VT == MVT::f32;
267}
268
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000269bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000270 // Truncate is just accessing a subregister.
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000271 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
272}
273
274bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
275 // Truncate is just accessing a subregister.
276 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
277 (Dest->getPrimitiveSizeInBits() % 32 == 0);
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000278}
279
Matt Arsenaultb517c812014-03-27 17:23:31 +0000280bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
281 const DataLayout *DL = getDataLayout();
282 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
283 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
284
285 return SrcSize == 32 && DestSize == 64;
286}
287
288bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
289 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
290 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
291 // this will enable reducing 64-bit operations the 32-bit, which is always
292 // good.
293 return Src == MVT::i32 && Dest == MVT::i64;
294}
295
Matt Arsenaulta7f1e0c2014-03-24 19:43:31 +0000296bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
297 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
298 // limited number of native 64-bit operations. Shrinking an operation to fit
299 // in a single 32-bit register should always be helpful. As currently used,
300 // this is much less general than the name suggests, and is only used in
301 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
302 // not profitable, and may actually be harmful.
303 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
304}
305
Tom Stellardc54731a2013-07-23 23:55:03 +0000306//===---------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000307// TargetLowering Callbacks
308//===---------------------------------------------------------------------===//
309
Christian Konig2c8f6d52013-03-07 09:03:52 +0000310void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
311 const SmallVectorImpl<ISD::InputArg> &Ins) const {
312
313 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
Tom Stellard75aadc22012-12-11 21:25:42 +0000314}
315
316SDValue AMDGPUTargetLowering::LowerReturn(
317 SDValue Chain,
318 CallingConv::ID CallConv,
319 bool isVarArg,
320 const SmallVectorImpl<ISD::OutputArg> &Outs,
321 const SmallVectorImpl<SDValue> &OutVals,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000322 SDLoc DL, SelectionDAG &DAG) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000323 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
324}
325
326//===---------------------------------------------------------------------===//
327// Target specific lowering
328//===---------------------------------------------------------------------===//
329
330SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
331 const {
332 switch (Op.getOpcode()) {
333 default:
334 Op.getNode()->dump();
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000335 llvm_unreachable("Custom lowering code for this"
336 "instruction is not implemented yet!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000337 break;
338 // AMDIL DAG lowering
339 case ISD::SDIV: return LowerSDIV(Op, DAG);
340 case ISD::SREM: return LowerSREM(Op, DAG);
341 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
342 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
343 // AMDGPU DAG lowering
Tom Stellardd86003e2013-08-14 23:25:00 +0000344 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
345 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +0000346 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000347 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
348 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
Tom Stellardc947d8c2013-10-30 17:22:05 +0000349 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000350 }
351 return Op;
352}
353
Matt Arsenaultd125d742014-03-27 17:23:24 +0000354void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
355 SmallVectorImpl<SDValue> &Results,
356 SelectionDAG &DAG) const {
357 switch (N->getOpcode()) {
358 case ISD::SIGN_EXTEND_INREG:
359 // Different parts of legalization seem to interpret which type of
360 // sign_extend_inreg is the one to check for custom lowering. The extended
361 // from type is what really matters, but some places check for custom
362 // lowering of the result type. This results in trying to use
363 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
364 // nothing here and let the illegal result integer be handled normally.
365 return;
366
367 default:
368 return;
369 }
370}
371
Tom Stellard04c0e982014-01-22 19:24:21 +0000372SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
373 const GlobalValue *GV,
374 const SDValue &InitPtr,
375 SDValue Chain,
376 SelectionDAG &DAG) const {
377 const DataLayout *TD = getTargetMachine().getDataLayout();
378 SDLoc DL(InitPtr);
379 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
380 EVT VT = EVT::getEVT(CI->getType());
381 PointerType *PtrTy = PointerType::get(CI->getType(), 0);
382 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
383 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
384 TD->getPrefTypeAlignment(CI->getType()));
385 } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
386 EVT VT = EVT::getEVT(CFP->getType());
387 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
388 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
389 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
390 TD->getPrefTypeAlignment(CFP->getType()));
391 } else if (Init->getType()->isAggregateType()) {
392 EVT PtrVT = InitPtr.getValueType();
393 unsigned NumElements = Init->getType()->getArrayNumElements();
394 SmallVector<SDValue, 8> Chains;
395 for (unsigned i = 0; i < NumElements; ++i) {
396 SDValue Offset = DAG.getConstant(i * TD->getTypeAllocSize(
397 Init->getType()->getArrayElementType()), PtrVT);
398 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
399 Chains.push_back(LowerConstantInitializer(Init->getAggregateElement(i),
400 GV, Ptr, Chain, DAG));
401 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000402 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
403 Chains.data(), Chains.size());
Tom Stellard04c0e982014-01-22 19:24:21 +0000404 } else {
405 Init->dump();
406 llvm_unreachable("Unhandled constant initializer");
407 }
408}
409
Tom Stellardc026e8b2013-06-28 15:47:08 +0000410SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
411 SDValue Op,
412 SelectionDAG &DAG) const {
413
414 const DataLayout *TD = getTargetMachine().getDataLayout();
415 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
Tom Stellardc026e8b2013-06-28 15:47:08 +0000416 const GlobalValue *GV = G->getGlobal();
Tom Stellardc026e8b2013-06-28 15:47:08 +0000417
Tom Stellard04c0e982014-01-22 19:24:21 +0000418 switch (G->getAddressSpace()) {
419 default: llvm_unreachable("Global Address lowering not implemented for this "
420 "address space");
421 case AMDGPUAS::LOCAL_ADDRESS: {
422 // XXX: What does the value of G->getOffset() mean?
423 assert(G->getOffset() == 0 &&
424 "Do not know what to do with an non-zero offset");
Tom Stellardc026e8b2013-06-28 15:47:08 +0000425
Tom Stellard04c0e982014-01-22 19:24:21 +0000426 unsigned Offset;
427 if (MFI->LocalMemoryObjects.count(GV) == 0) {
428 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
429 Offset = MFI->LDSSize;
430 MFI->LocalMemoryObjects[GV] = Offset;
431 // XXX: Account for alignment?
432 MFI->LDSSize += Size;
433 } else {
434 Offset = MFI->LocalMemoryObjects[GV];
435 }
436
437 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
438 }
439 case AMDGPUAS::CONSTANT_ADDRESS: {
440 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
441 Type *EltType = GV->getType()->getElementType();
442 unsigned Size = TD->getTypeAllocSize(EltType);
443 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
444
445 const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV);
446 const Constant *Init = Var->getInitializer();
447 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
448 SDValue InitPtr = DAG.getFrameIndex(FI,
449 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
450 SmallVector<SDNode*, 8> WorkList;
451
452 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
453 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
454 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
455 continue;
456 WorkList.push_back(*I);
457 }
458 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
459 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
460 E = WorkList.end(); I != E; ++I) {
461 SmallVector<SDValue, 8> Ops;
462 Ops.push_back(Chain);
463 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
464 Ops.push_back((*I)->getOperand(i));
465 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000466 DAG.UpdateNodeOperands(*I, Ops.data(), Ops.size());
Tom Stellard04c0e982014-01-22 19:24:21 +0000467 }
468 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
469 getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
470 }
471 }
Tom Stellardc026e8b2013-06-28 15:47:08 +0000472}
473
Tom Stellardd86003e2013-08-14 23:25:00 +0000474void AMDGPUTargetLowering::ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
475 SmallVectorImpl<SDValue> &Args,
476 unsigned Start,
477 unsigned Count) const {
478 EVT VT = Op.getValueType();
479 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
480 Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op),
481 VT.getVectorElementType(),
482 Op, DAG.getConstant(i, MVT::i32)));
483 }
484}
485
486SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
487 SelectionDAG &DAG) const {
488 SmallVector<SDValue, 8> Args;
489 SDValue A = Op.getOperand(0);
490 SDValue B = Op.getOperand(1);
491
492 ExtractVectorElements(A, DAG, Args, 0,
493 A.getValueType().getVectorNumElements());
494 ExtractVectorElements(B, DAG, Args, 0,
495 B.getValueType().getVectorNumElements());
496
497 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
Matt Arsenault7939acd2014-04-07 16:44:24 +0000498 Args.data(), Args.size());
Tom Stellardd86003e2013-08-14 23:25:00 +0000499}
500
501SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
502 SelectionDAG &DAG) const {
503
504 SmallVector<SDValue, 8> Args;
505 EVT VT = Op.getValueType();
506 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
507 ExtractVectorElements(Op.getOperand(0), DAG, Args, Start,
508 VT.getVectorNumElements());
509
510 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
Matt Arsenault7939acd2014-04-07 16:44:24 +0000511 Args.data(), Args.size());
Tom Stellardd86003e2013-08-14 23:25:00 +0000512}
513
Tom Stellard81d871d2013-11-13 23:36:50 +0000514SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
515 SelectionDAG &DAG) const {
516
517 MachineFunction &MF = DAG.getMachineFunction();
518 const AMDGPUFrameLowering *TFL =
519 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
520
521 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
522 assert(FIN);
523
524 unsigned FrameIndex = FIN->getIndex();
525 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
526 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
527 Op.getValueType());
528}
Tom Stellardd86003e2013-08-14 23:25:00 +0000529
Tom Stellard75aadc22012-12-11 21:25:42 +0000530SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
531 SelectionDAG &DAG) const {
532 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
Andrew Trickef9de2a2013-05-25 02:42:55 +0000533 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000534 EVT VT = Op.getValueType();
535
536 switch (IntrinsicID) {
537 default: return Op;
538 case AMDGPUIntrinsic::AMDIL_abs:
539 return LowerIntrinsicIABS(Op, DAG);
540 case AMDGPUIntrinsic::AMDIL_exp:
541 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
542 case AMDGPUIntrinsic::AMDGPU_lrp:
543 return LowerIntrinsicLRP(Op, DAG);
544 case AMDGPUIntrinsic::AMDIL_fraction:
545 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
Tom Stellard75aadc22012-12-11 21:25:42 +0000546 case AMDGPUIntrinsic::AMDIL_max:
547 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
548 Op.getOperand(2));
549 case AMDGPUIntrinsic::AMDGPU_imax:
550 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
551 Op.getOperand(2));
552 case AMDGPUIntrinsic::AMDGPU_umax:
553 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
554 Op.getOperand(2));
555 case AMDGPUIntrinsic::AMDIL_min:
556 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
557 Op.getOperand(2));
558 case AMDGPUIntrinsic::AMDGPU_imin:
559 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
560 Op.getOperand(2));
561 case AMDGPUIntrinsic::AMDGPU_umin:
562 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
563 Op.getOperand(2));
Matt Arsenault4c537172014-03-31 18:21:18 +0000564
565 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
566 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
567 Op.getOperand(1),
568 Op.getOperand(2),
569 Op.getOperand(3));
570
571 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
572 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
573 Op.getOperand(1),
574 Op.getOperand(2),
575 Op.getOperand(3));
576
577 case AMDGPUIntrinsic::AMDGPU_bfi:
578 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
579 Op.getOperand(1),
580 Op.getOperand(2),
581 Op.getOperand(3));
582
583 case AMDGPUIntrinsic::AMDGPU_bfm:
584 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
585 Op.getOperand(1),
586 Op.getOperand(2));
587
Tom Stellard75aadc22012-12-11 21:25:42 +0000588 case AMDGPUIntrinsic::AMDIL_round_nearest:
589 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
590 }
591}
592
593///IABS(a) = SMAX(sub(0, a), a)
594SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
595 SelectionDAG &DAG) const {
596
Andrew Trickef9de2a2013-05-25 02:42:55 +0000597 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000598 EVT VT = Op.getValueType();
599 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
600 Op.getOperand(1));
601
602 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
603}
604
605/// Linear Interpolation
606/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
607SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
608 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000609 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000610 EVT VT = Op.getValueType();
611 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
612 DAG.getConstantFP(1.0f, MVT::f32),
613 Op.getOperand(1));
614 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
615 Op.getOperand(3));
Vincent Lejeune1ce13f52013-02-18 14:11:28 +0000616 return DAG.getNode(ISD::FADD, DL, VT,
617 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
618 OneSubAC);
Tom Stellard75aadc22012-12-11 21:25:42 +0000619}
620
621/// \brief Generate Min/Max node
622SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
623 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000624 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000625 EVT VT = Op.getValueType();
626
627 SDValue LHS = Op.getOperand(0);
628 SDValue RHS = Op.getOperand(1);
629 SDValue True = Op.getOperand(2);
630 SDValue False = Op.getOperand(3);
631 SDValue CC = Op.getOperand(4);
632
633 if (VT != MVT::f32 ||
634 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
635 return SDValue();
636 }
637
638 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
639 switch (CCOpcode) {
640 case ISD::SETOEQ:
641 case ISD::SETONE:
642 case ISD::SETUNE:
643 case ISD::SETNE:
644 case ISD::SETUEQ:
645 case ISD::SETEQ:
646 case ISD::SETFALSE:
647 case ISD::SETFALSE2:
648 case ISD::SETTRUE:
649 case ISD::SETTRUE2:
650 case ISD::SETUO:
651 case ISD::SETO:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000652 llvm_unreachable("Operation should already be optimised!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000653 case ISD::SETULE:
654 case ISD::SETULT:
655 case ISD::SETOLE:
656 case ISD::SETOLT:
657 case ISD::SETLE:
658 case ISD::SETLT: {
659 if (LHS == True)
660 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
661 else
662 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
663 }
664 case ISD::SETGT:
665 case ISD::SETGE:
666 case ISD::SETUGE:
667 case ISD::SETOGE:
668 case ISD::SETUGT:
669 case ISD::SETOGT: {
670 if (LHS == True)
671 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
672 else
673 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
674 }
675 case ISD::SETCC_INVALID:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000676 llvm_unreachable("Invalid setcc condcode!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000677 }
678 return Op;
679}
680
Tom Stellard35bb18c2013-08-26 15:06:04 +0000681SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
682 SelectionDAG &DAG) const {
683 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
684 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
685 EVT EltVT = Op.getValueType().getVectorElementType();
686 EVT PtrVT = Load->getBasePtr().getValueType();
687 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
688 SmallVector<SDValue, 8> Loads;
689 SDLoc SL(Op);
690
691 for (unsigned i = 0, e = NumElts; i != e; ++i) {
692 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
693 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
694 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
695 Load->getChain(), Ptr,
696 MachinePointerInfo(Load->getMemOperand()->getValue()),
697 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
698 Load->getAlignment()));
699 }
Matt Arsenault9504d2f2014-03-11 00:01:31 +0000700 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(),
701 Loads.data(), Loads.size());
Tom Stellard35bb18c2013-08-26 15:06:04 +0000702}
703
Tom Stellard2ffc3302013-08-26 15:05:44 +0000704SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
705 SelectionDAG &DAG) const {
706 StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
707 EVT MemVT = Store->getMemoryVT();
708 unsigned MemBits = MemVT.getSizeInBits();
Tom Stellard75aadc22012-12-11 21:25:42 +0000709
Matt Arsenaultca6dcfc2014-03-05 21:47:22 +0000710 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
711 // truncating store into an i32 store.
712 // XXX: We could also handle optimize other vector bitwidths.
Tom Stellard2ffc3302013-08-26 15:05:44 +0000713 if (!MemVT.isVector() || MemBits > 32) {
714 return SDValue();
715 }
716
717 SDLoc DL(Op);
718 const SDValue &Value = Store->getValue();
719 EVT VT = Value.getValueType();
720 const SDValue &Ptr = Store->getBasePtr();
721 EVT MemEltVT = MemVT.getVectorElementType();
722 unsigned MemEltBits = MemEltVT.getSizeInBits();
723 unsigned MemNumElements = MemVT.getVectorNumElements();
724 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
Matt Arsenault02117142014-03-11 01:38:53 +0000725 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, PackedVT);
726
Tom Stellard2ffc3302013-08-26 15:05:44 +0000727 SDValue PackedValue;
728 for (unsigned i = 0; i < MemNumElements; ++i) {
729 EVT ElemVT = VT.getVectorElementType();
730 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
731 DAG.getConstant(i, MVT::i32));
732 Elt = DAG.getZExtOrTrunc(Elt, DL, PackedVT);
733 Elt = DAG.getNode(ISD::AND, DL, PackedVT, Elt, Mask);
734 SDValue Shift = DAG.getConstant(MemEltBits * i, PackedVT);
735 Elt = DAG.getNode(ISD::SHL, DL, PackedVT, Elt, Shift);
736 if (i == 0) {
737 PackedValue = Elt;
738 } else {
739 PackedValue = DAG.getNode(ISD::OR, DL, PackedVT, PackedValue, Elt);
740 }
741 }
742 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
743 MachinePointerInfo(Store->getMemOperand()->getValue()),
744 Store->isVolatile(), Store->isNonTemporal(),
745 Store->getAlignment());
746}
747
748SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
749 SelectionDAG &DAG) const {
750 StoreSDNode *Store = cast<StoreSDNode>(Op);
751 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
752 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
753 EVT PtrVT = Store->getBasePtr().getValueType();
754 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
755 SDLoc SL(Op);
756
757 SmallVector<SDValue, 8> Chains;
758
759 for (unsigned i = 0, e = NumElts; i != e; ++i) {
760 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
761 Store->getValue(), DAG.getConstant(i, MVT::i32));
762 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
763 Store->getBasePtr(),
764 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
765 PtrVT));
Tom Stellardf3d166a2013-08-26 15:05:49 +0000766 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
Tom Stellard2ffc3302013-08-26 15:05:44 +0000767 MachinePointerInfo(Store->getMemOperand()->getValue()),
Tom Stellardf3d166a2013-08-26 15:05:49 +0000768 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
Tom Stellard2ffc3302013-08-26 15:05:44 +0000769 Store->getAlignment()));
770 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000771 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains.data(), NumElts);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000772}
773
Tom Stellarde9373602014-01-22 19:24:14 +0000774SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
775 SDLoc DL(Op);
776 LoadSDNode *Load = cast<LoadSDNode>(Op);
777 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaultf9a995d2014-03-06 17:34:12 +0000778 EVT VT = Op.getValueType();
779 EVT MemVT = Load->getMemoryVT();
780
781 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
782 // We can do the extload to 32-bits, and then need to separately extend to
783 // 64-bits.
784
785 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
786 Load->getChain(),
787 Load->getBasePtr(),
788 MemVT,
789 Load->getMemOperand());
790 return DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32);
791 }
Tom Stellarde9373602014-01-22 19:24:14 +0000792
Tom Stellard04c0e982014-01-22 19:24:21 +0000793 // Lower loads constant address space global variable loads
794 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
795 isa<GlobalVariable>(GetUnderlyingObject(Load->getPointerInfo().V))) {
796
797 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
798 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
799 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
800 DAG.getConstant(2, MVT::i32));
801 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
802 Load->getChain(), Ptr,
803 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
804 }
805
Tom Stellarde9373602014-01-22 19:24:14 +0000806 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
807 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
808 return SDValue();
809
810
Tom Stellarde9373602014-01-22 19:24:14 +0000811 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
812 DAG.getConstant(2, MVT::i32));
813 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
814 Load->getChain(), Ptr,
815 DAG.getTargetConstant(0, MVT::i32),
816 Op.getOperand(2));
817 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
818 Load->getBasePtr(),
819 DAG.getConstant(0x3, MVT::i32));
820 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
821 DAG.getConstant(3, MVT::i32));
Matt Arsenault74891cd2014-03-15 00:08:22 +0000822
Tom Stellarde9373602014-01-22 19:24:14 +0000823 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000824
825 EVT MemEltVT = MemVT.getScalarType();
Tom Stellarde9373602014-01-22 19:24:14 +0000826 if (ExtType == ISD::SEXTLOAD) {
Matt Arsenault74891cd2014-03-15 00:08:22 +0000827 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
828 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode);
Tom Stellarde9373602014-01-22 19:24:14 +0000829 }
830
Matt Arsenault74891cd2014-03-15 00:08:22 +0000831 return DAG.getZeroExtendInReg(Ret, DL, MemEltVT);
Tom Stellarde9373602014-01-22 19:24:14 +0000832}
833
Tom Stellard2ffc3302013-08-26 15:05:44 +0000834SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Tom Stellarde9373602014-01-22 19:24:14 +0000835 SDLoc DL(Op);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000836 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
837 if (Result.getNode()) {
838 return Result;
839 }
840
841 StoreSDNode *Store = cast<StoreSDNode>(Op);
Tom Stellarde9373602014-01-22 19:24:14 +0000842 SDValue Chain = Store->getChain();
Tom Stellard81d871d2013-11-13 23:36:50 +0000843 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
844 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
Tom Stellard2ffc3302013-08-26 15:05:44 +0000845 Store->getValue().getValueType().isVector()) {
846 return SplitVectorStore(Op, DAG);
847 }
Tom Stellarde9373602014-01-22 19:24:14 +0000848
Matt Arsenault74891cd2014-03-15 00:08:22 +0000849 EVT MemVT = Store->getMemoryVT();
Tom Stellarde9373602014-01-22 19:24:14 +0000850 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
Matt Arsenault74891cd2014-03-15 00:08:22 +0000851 MemVT.bitsLT(MVT::i32)) {
Tom Stellarde9373602014-01-22 19:24:14 +0000852 unsigned Mask = 0;
853 if (Store->getMemoryVT() == MVT::i8) {
854 Mask = 0xff;
855 } else if (Store->getMemoryVT() == MVT::i16) {
856 Mask = 0xffff;
857 }
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000858 SDValue BasePtr = Store->getBasePtr();
859 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000860 DAG.getConstant(2, MVT::i32));
861 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
862 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000863
864 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000865 DAG.getConstant(0x3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000866
Tom Stellarde9373602014-01-22 19:24:14 +0000867 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
868 DAG.getConstant(3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000869
Tom Stellarde9373602014-01-22 19:24:14 +0000870 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
871 Store->getValue());
Matt Arsenault74891cd2014-03-15 00:08:22 +0000872
873 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
874
Tom Stellarde9373602014-01-22 19:24:14 +0000875 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
876 MaskedValue, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000877
Tom Stellarde9373602014-01-22 19:24:14 +0000878 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
879 ShiftAmt);
880 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
881 DAG.getConstant(0xffffffff, MVT::i32));
882 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
883
884 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
885 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
886 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
887 }
Tom Stellard2ffc3302013-08-26 15:05:44 +0000888 return SDValue();
889}
Tom Stellard75aadc22012-12-11 21:25:42 +0000890
891SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
892 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000893 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000894 EVT VT = Op.getValueType();
895
896 SDValue Num = Op.getOperand(0);
897 SDValue Den = Op.getOperand(1);
898
899 SmallVector<SDValue, 8> Results;
900
901 // RCP = URECIP(Den) = 2^32 / Den + e
902 // e is rounding error.
903 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
904
905 // RCP_LO = umulo(RCP, Den) */
906 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
907
908 // RCP_HI = mulhu (RCP, Den) */
909 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
910
911 // NEG_RCP_LO = -RCP_LO
912 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
913 RCP_LO);
914
915 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
916 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
917 NEG_RCP_LO, RCP_LO,
918 ISD::SETEQ);
919 // Calculate the rounding error from the URECIP instruction
920 // E = mulhu(ABS_RCP_LO, RCP)
921 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
922
923 // RCP_A_E = RCP + E
924 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
925
926 // RCP_S_E = RCP - E
927 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
928
929 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
930 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
931 RCP_A_E, RCP_S_E,
932 ISD::SETEQ);
933 // Quotient = mulhu(Tmp0, Num)
934 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
935
936 // Num_S_Remainder = Quotient * Den
937 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
938
939 // Remainder = Num - Num_S_Remainder
940 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
941
942 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
943 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
944 DAG.getConstant(-1, VT),
945 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000946 ISD::SETUGE);
947 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
948 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
949 Num_S_Remainder,
Tom Stellard75aadc22012-12-11 21:25:42 +0000950 DAG.getConstant(-1, VT),
951 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000952 ISD::SETUGE);
Tom Stellard75aadc22012-12-11 21:25:42 +0000953 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
954 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
955 Remainder_GE_Zero);
956
957 // Calculate Division result:
958
959 // Quotient_A_One = Quotient + 1
960 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
961 DAG.getConstant(1, VT));
962
963 // Quotient_S_One = Quotient - 1
964 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
965 DAG.getConstant(1, VT));
966
967 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
968 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
969 Quotient, Quotient_A_One, ISD::SETEQ);
970
971 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
972 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
973 Quotient_S_One, Div, ISD::SETEQ);
974
975 // Calculate Rem result:
976
977 // Remainder_S_Den = Remainder - Den
978 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
979
980 // Remainder_A_Den = Remainder + Den
981 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
982
983 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
984 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
985 Remainder, Remainder_S_Den, ISD::SETEQ);
986
987 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
988 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
989 Remainder_A_Den, Rem, ISD::SETEQ);
Matt Arsenault7939acd2014-04-07 16:44:24 +0000990 SDValue Ops[2] = {
991 Div,
992 Rem
993 };
Tom Stellard75aadc22012-12-11 21:25:42 +0000994 return DAG.getMergeValues(Ops, 2, DL);
995}
996
Tom Stellardc947d8c2013-10-30 17:22:05 +0000997SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
998 SelectionDAG &DAG) const {
999 SDValue S0 = Op.getOperand(0);
1000 SDLoc DL(Op);
1001 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
1002 return SDValue();
1003
1004 // f32 uint_to_fp i64
1005 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1006 DAG.getConstant(0, MVT::i32));
1007 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
1008 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1009 DAG.getConstant(1, MVT::i32));
1010 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
1011 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
1012 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
1013 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
1014
1015}
Tom Stellardfbab8272013-08-16 01:12:11 +00001016
Matt Arsenaultfae02982014-03-17 18:58:11 +00001017SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
1018 unsigned BitsDiff,
1019 SelectionDAG &DAG) const {
1020 MVT VT = Op.getSimpleValueType();
1021 SDLoc DL(Op);
1022 SDValue Shift = DAG.getConstant(BitsDiff, VT);
1023 // Shift left by 'Shift' bits.
1024 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
1025 // Signed shift Right by 'Shift' bits.
1026 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
1027}
1028
1029SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1030 SelectionDAG &DAG) const {
1031 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1032 MVT VT = Op.getSimpleValueType();
1033 MVT ScalarVT = VT.getScalarType();
1034
1035 unsigned SrcBits = ExtraVT.getScalarType().getSizeInBits();
1036 unsigned DestBits = ScalarVT.getSizeInBits();
1037 unsigned BitsDiff = DestBits - SrcBits;
1038
1039 if (!Subtarget->hasBFE())
1040 return ExpandSIGN_EXTEND_INREG(Op, BitsDiff, DAG);
1041
1042 SDValue Src = Op.getOperand(0);
1043 if (VT.isVector()) {
1044 SDLoc DL(Op);
1045 // Need to scalarize this, and revisit each of the scalars later.
1046 // TODO: Don't scalarize on Evergreen?
1047 unsigned NElts = VT.getVectorNumElements();
1048 SmallVector<SDValue, 8> Args;
1049 ExtractVectorElements(Src, DAG, Args, 0, NElts);
1050
1051 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1052 for (unsigned I = 0; I < NElts; ++I)
1053 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
1054
1055 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args.data(), Args.size());
1056 }
1057
1058 if (SrcBits == 32) {
1059 SDLoc DL(Op);
1060
1061 // If the source is 32-bits, this is really half of a 2-register pair, and
1062 // we need to discard the unused half of the pair.
1063 SDValue TruncSrc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
1064 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, TruncSrc);
1065 }
1066
1067 unsigned NElts = VT.isVector() ? VT.getVectorNumElements() : 1;
1068
1069 // TODO: Match 64-bit BFE. SI has a 64-bit BFE, but it's scalar only so it
1070 // might not be worth the effort, and will need to expand to shifts when
1071 // fixing SGPR copies.
1072 if (SrcBits < 32 && DestBits <= 32) {
1073 SDLoc DL(Op);
1074 MVT ExtVT = (NElts == 1) ? MVT::i32 : MVT::getVectorVT(MVT::i32, NElts);
1075
1076 if (DestBits != 32)
1077 Src = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVT, Src);
1078
1079 // FIXME: This should use TargetConstant, but that hits assertions for
1080 // Evergreen.
1081 SDValue Ext = DAG.getNode(AMDGPUISD::BFE_I32, DL, ExtVT,
1082 Op.getOperand(0), // Operand
1083 DAG.getConstant(0, ExtVT), // Offset
1084 DAG.getConstant(SrcBits, ExtVT)); // Width
1085
1086 // Truncate to the original type if necessary.
1087 if (ScalarVT == MVT::i32)
1088 return Ext;
1089 return DAG.getNode(ISD::TRUNCATE, DL, VT, Ext);
1090 }
1091
1092 // For small types, extend to 32-bits first.
1093 if (SrcBits < 32) {
1094 SDLoc DL(Op);
1095 MVT ExtVT = (NElts == 1) ? MVT::i32 : MVT::getVectorVT(MVT::i32, NElts);
1096
1097 SDValue TruncSrc = DAG.getNode(ISD::TRUNCATE, DL, ExtVT, Src);
1098 SDValue Ext32 = DAG.getNode(AMDGPUISD::BFE_I32,
1099 DL,
1100 ExtVT,
1101 TruncSrc, // Operand
1102 DAG.getConstant(0, ExtVT), // Offset
1103 DAG.getConstant(SrcBits, ExtVT)); // Width
1104
1105 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Ext32);
1106 }
1107
1108 // For everything else, use the standard bitshift expansion.
1109 return ExpandSIGN_EXTEND_INREG(Op, BitsDiff, DAG);
1110}
1111
Tom Stellard75aadc22012-12-11 21:25:42 +00001112//===----------------------------------------------------------------------===//
Tom Stellard50122a52014-04-07 19:45:41 +00001113// Custom DAG optimizations
1114//===----------------------------------------------------------------------===//
1115
1116static bool isU24(SDValue Op, SelectionDAG &DAG) {
1117 APInt KnownZero, KnownOne;
1118 EVT VT = Op.getValueType();
1119 DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
1120
1121 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
1122}
1123
1124static bool isI24(SDValue Op, SelectionDAG &DAG) {
1125 EVT VT = Op.getValueType();
1126
1127 // In order for this to be a signed 24-bit value, bit 23, must
1128 // be a sign bit.
1129 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
1130 // as unsigned 24-bit values.
1131 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
1132}
1133
1134static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
1135
1136 SelectionDAG &DAG = DCI.DAG;
1137 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1138 EVT VT = Op.getValueType();
1139
1140 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
1141 APInt KnownZero, KnownOne;
1142 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
1143 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
1144 DCI.CommitTargetLoweringOpt(TLO);
1145}
1146
1147SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
1148 DAGCombinerInfo &DCI) const {
1149 SelectionDAG &DAG = DCI.DAG;
1150 SDLoc DL(N);
1151
1152 switch(N->getOpcode()) {
1153 default: break;
1154 case ISD::MUL: {
1155 EVT VT = N->getValueType(0);
1156 SDValue N0 = N->getOperand(0);
1157 SDValue N1 = N->getOperand(1);
1158 SDValue Mul;
1159
1160 // FIXME: Add support for 24-bit multiply with 64-bit output on SI.
1161 if (VT.isVector() || VT.getSizeInBits() > 32)
1162 break;
1163
1164 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
1165 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
1166 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
1167 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
1168 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
1169 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
1170 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
1171 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
1172 } else {
1173 break;
1174 }
1175
1176 SDValue Reg = DAG.getSExtOrTrunc(Mul, DL, VT);
1177
1178 return Reg;
1179 }
1180 case AMDGPUISD::MUL_I24:
1181 case AMDGPUISD::MUL_U24: {
1182 SDValue N0 = N->getOperand(0);
1183 SDValue N1 = N->getOperand(1);
1184 simplifyI24(N0, DCI);
1185 simplifyI24(N1, DCI);
1186 return SDValue();
1187 }
1188 }
1189 return SDValue();
1190}
1191
1192//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +00001193// Helper functions
1194//===----------------------------------------------------------------------===//
1195
Tom Stellardaf775432013-10-23 00:44:32 +00001196void AMDGPUTargetLowering::getOriginalFunctionArgs(
1197 SelectionDAG &DAG,
1198 const Function *F,
1199 const SmallVectorImpl<ISD::InputArg> &Ins,
1200 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
1201
1202 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
1203 if (Ins[i].ArgVT == Ins[i].VT) {
1204 OrigIns.push_back(Ins[i]);
1205 continue;
1206 }
1207
1208 EVT VT;
1209 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
1210 // Vector has been split into scalars.
1211 VT = Ins[i].ArgVT.getVectorElementType();
1212 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
1213 Ins[i].ArgVT.getVectorElementType() !=
1214 Ins[i].VT.getVectorElementType()) {
1215 // Vector elements have been promoted
1216 VT = Ins[i].ArgVT;
1217 } else {
1218 // Vector has been spilt into smaller vectors.
1219 VT = Ins[i].VT;
1220 }
1221
1222 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
1223 Ins[i].OrigArgIndex, Ins[i].PartOffset);
1224 OrigIns.push_back(Arg);
1225 }
1226}
1227
Tom Stellard75aadc22012-12-11 21:25:42 +00001228bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
1229 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1230 return CFP->isExactlyValue(1.0);
1231 }
1232 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1233 return C->isAllOnesValue();
1234 }
1235 return false;
1236}
1237
1238bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
1239 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1240 return CFP->getValueAPF().isZero();
1241 }
1242 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1243 return C->isNullValue();
1244 }
1245 return false;
1246}
1247
1248SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
1249 const TargetRegisterClass *RC,
1250 unsigned Reg, EVT VT) const {
1251 MachineFunction &MF = DAG.getMachineFunction();
1252 MachineRegisterInfo &MRI = MF.getRegInfo();
1253 unsigned VirtualRegister;
1254 if (!MRI.isLiveIn(Reg)) {
1255 VirtualRegister = MRI.createVirtualRegister(RC);
1256 MRI.addLiveIn(Reg, VirtualRegister);
1257 } else {
1258 VirtualRegister = MRI.getLiveInVirtReg(Reg);
1259 }
1260 return DAG.getRegister(VirtualRegister, VT);
1261}
1262
1263#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
1264
1265const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
1266 switch (Opcode) {
1267 default: return 0;
1268 // AMDIL DAG nodes
Tom Stellard75aadc22012-12-11 21:25:42 +00001269 NODE_NAME_CASE(CALL);
1270 NODE_NAME_CASE(UMUL);
1271 NODE_NAME_CASE(DIV_INF);
1272 NODE_NAME_CASE(RET_FLAG);
1273 NODE_NAME_CASE(BRANCH_COND);
1274
1275 // AMDGPU DAG nodes
1276 NODE_NAME_CASE(DWORDADDR)
1277 NODE_NAME_CASE(FRACT)
1278 NODE_NAME_CASE(FMAX)
1279 NODE_NAME_CASE(SMAX)
1280 NODE_NAME_CASE(UMAX)
1281 NODE_NAME_CASE(FMIN)
1282 NODE_NAME_CASE(SMIN)
1283 NODE_NAME_CASE(UMIN)
Matt Arsenaultfae02982014-03-17 18:58:11 +00001284 NODE_NAME_CASE(BFE_U32)
1285 NODE_NAME_CASE(BFE_I32)
Matt Arsenaultb3458362014-03-31 18:21:13 +00001286 NODE_NAME_CASE(BFI)
1287 NODE_NAME_CASE(BFM)
Tom Stellard50122a52014-04-07 19:45:41 +00001288 NODE_NAME_CASE(MUL_U24)
1289 NODE_NAME_CASE(MUL_I24)
Tom Stellard75aadc22012-12-11 21:25:42 +00001290 NODE_NAME_CASE(URECIP)
Matt Arsenault21a3faa2014-02-24 21:01:21 +00001291 NODE_NAME_CASE(DOT4)
Tom Stellard75aadc22012-12-11 21:25:42 +00001292 NODE_NAME_CASE(EXPORT)
Tom Stellardff62c352013-01-23 02:09:03 +00001293 NODE_NAME_CASE(CONST_ADDRESS)
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001294 NODE_NAME_CASE(REGISTER_LOAD)
1295 NODE_NAME_CASE(REGISTER_STORE)
Tom Stellard9fa17912013-08-14 23:24:45 +00001296 NODE_NAME_CASE(LOAD_CONSTANT)
1297 NODE_NAME_CASE(LOAD_INPUT)
1298 NODE_NAME_CASE(SAMPLE)
1299 NODE_NAME_CASE(SAMPLEB)
1300 NODE_NAME_CASE(SAMPLED)
1301 NODE_NAME_CASE(SAMPLEL)
Tom Stellardd3ee8c12013-08-16 01:12:06 +00001302 NODE_NAME_CASE(STORE_MSKOR)
Tom Stellardafcf12f2013-09-12 02:55:14 +00001303 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
Tom Stellard75aadc22012-12-11 21:25:42 +00001304 }
1305}
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001306
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001307static void computeMaskedBitsForMinMax(const SDValue Op0,
1308 const SDValue Op1,
1309 APInt &KnownZero,
1310 APInt &KnownOne,
1311 const SelectionDAG &DAG,
1312 unsigned Depth) {
1313 APInt Op0Zero, Op0One;
1314 APInt Op1Zero, Op1One;
1315 DAG.ComputeMaskedBits(Op0, Op0Zero, Op0One, Depth);
1316 DAG.ComputeMaskedBits(Op1, Op1Zero, Op1One, Depth);
1317
1318 KnownZero = Op0Zero & Op1Zero;
1319 KnownOne = Op0One & Op1One;
1320}
1321
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001322void AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
1323 const SDValue Op,
1324 APInt &KnownZero,
1325 APInt &KnownOne,
1326 const SelectionDAG &DAG,
1327 unsigned Depth) const {
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001328
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001329 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001330 unsigned Opc = Op.getOpcode();
1331 switch (Opc) {
1332 case ISD::INTRINSIC_WO_CHAIN: {
1333 // FIXME: The intrinsic should just use the node.
1334 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
1335 case AMDGPUIntrinsic::AMDGPU_imax:
1336 case AMDGPUIntrinsic::AMDGPU_umax:
1337 case AMDGPUIntrinsic::AMDGPU_imin:
1338 case AMDGPUIntrinsic::AMDGPU_umin:
1339 computeMaskedBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
1340 KnownZero, KnownOne, DAG, Depth);
1341 break;
1342 default:
1343 break;
1344 }
1345
1346 break;
1347 }
1348 case AMDGPUISD::SMAX:
1349 case AMDGPUISD::UMAX:
1350 case AMDGPUISD::SMIN:
1351 case AMDGPUISD::UMIN:
1352 computeMaskedBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
1353 KnownZero, KnownOne, DAG, Depth);
1354 break;
1355 default:
1356 break;
1357 }
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001358}