blob: 97107a58e71d83482a7a05c5ccb33c1c74284f70 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This is the parent TargetLowering class for hardware code gen
12/// targets.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUISelLowering.h"
Tom Stellarded882c22013-06-03 17:40:11 +000017#include "AMDGPU.h"
Tom Stellard81d871d2013-11-13 23:36:50 +000018#include "AMDGPUFrameLowering.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000019#include "AMDGPURegisterInfo.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000020#include "AMDGPUSubtarget.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000021#include "AMDILIntrinsicInfo.h"
Tom Stellardacfeebf2013-07-23 01:48:05 +000022#include "R600MachineFunctionInfo.h"
Tom Stellarded882c22013-06-03 17:40:11 +000023#include "SIMachineFunctionInfo.h"
Tom Stellard04c0e982014-01-22 19:24:21 +000024#include "llvm/Analysis/ValueTracking.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000025#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/SelectionDAG.h"
29#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
Tom Stellardc026e8b2013-06-28 15:47:08 +000030#include "llvm/IR/DataLayout.h"
Matt Arsenault16353872014-04-22 16:42:00 +000031#include "llvm/IR/DiagnosticInfo.h"
32#include "llvm/IR/DiagnosticPrinter.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000033
34using namespace llvm;
Matt Arsenault16353872014-04-22 16:42:00 +000035
36namespace {
37
38/// Diagnostic information for unimplemented or unsupported feature reporting.
39class DiagnosticInfoUnsupported : public DiagnosticInfo {
40private:
41 const Twine &Description;
42 const Function &Fn;
43
44 static int KindID;
45
46 static int getKindID() {
47 if (KindID == 0)
48 KindID = llvm::getNextAvailablePluginDiagnosticKind();
49 return KindID;
50 }
51
52public:
53 DiagnosticInfoUnsupported(const Function &Fn, const Twine &Desc,
54 DiagnosticSeverity Severity = DS_Error)
55 : DiagnosticInfo(getKindID(), Severity),
56 Description(Desc),
57 Fn(Fn) { }
58
59 const Function &getFunction() const { return Fn; }
60 const Twine &getDescription() const { return Description; }
61
62 void print(DiagnosticPrinter &DP) const override {
63 DP << "unsupported " << getDescription() << " in " << Fn.getName();
64 }
65
66 static bool classof(const DiagnosticInfo *DI) {
67 return DI->getKind() == getKindID();
68 }
69};
70
71int DiagnosticInfoUnsupported::KindID = 0;
72}
73
74
Tom Stellardaf775432013-10-23 00:44:32 +000075static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
76 CCValAssign::LocInfo LocInfo,
77 ISD::ArgFlagsTy ArgFlags, CCState &State) {
Matt Arsenault52226f92013-12-14 18:21:59 +000078 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
79 ArgFlags.getOrigAlign());
80 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
Tom Stellardaf775432013-10-23 00:44:32 +000081
82 return true;
83}
Tom Stellard75aadc22012-12-11 21:25:42 +000084
Christian Konig2c8f6d52013-03-07 09:03:52 +000085#include "AMDGPUGenCallingConv.inc"
86
Tom Stellard75aadc22012-12-11 21:25:42 +000087AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
88 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
89
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000090 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
91
Tom Stellard75aadc22012-12-11 21:25:42 +000092 // Initialize target lowering borrowed from AMDIL
93 InitAMDILLowering();
94
95 // We need to custom lower some of the intrinsics
96 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
97
98 // Library functions. These default to Expand, but we have instructions
99 // for them.
100 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
101 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
102 setOperationAction(ISD::FPOW, MVT::f32, Legal);
103 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
104 setOperationAction(ISD::FABS, MVT::f32, Legal);
105 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
106 setOperationAction(ISD::FRINT, MVT::f32, Legal);
Tom Stellard4d566b22013-11-27 21:23:20 +0000107 setOperationAction(ISD::FROUND, MVT::f32, Legal);
Tom Stellardeddfa692013-12-20 05:11:55 +0000108 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
Tom Stellard75aadc22012-12-11 21:25:42 +0000109
Tom Stellard5643c4a2013-05-20 15:02:19 +0000110 // The hardware supports ROTR, but not ROTL
111 setOperationAction(ISD::ROTL, MVT::i32, Expand);
112
Tom Stellard75aadc22012-12-11 21:25:42 +0000113 // Lower floating point store/load to integer store/load to reduce the number
114 // of patterns in tablegen.
115 setOperationAction(ISD::STORE, MVT::f32, Promote);
116 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
117
Tom Stellarded2f6142013-07-18 21:43:42 +0000118 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
119 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
120
Tom Stellard75aadc22012-12-11 21:25:42 +0000121 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
122 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
123
Tom Stellardaf775432013-10-23 00:44:32 +0000124 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
125 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
126
127 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
128 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
129
Tom Stellard7512c082013-07-12 18:14:56 +0000130 setOperationAction(ISD::STORE, MVT::f64, Promote);
131 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
132
Tom Stellard2ffc3302013-08-26 15:05:44 +0000133 // Custom lowering of vector stores is required for local address space
134 // stores.
135 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
136 // XXX: Native v2i32 local address space stores are possible, but not
137 // currently implemented.
138 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
139
Tom Stellardfbab8272013-08-16 01:12:11 +0000140 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
141 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
142 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000143
Tom Stellardfbab8272013-08-16 01:12:11 +0000144 // XXX: This can be change to Custom, once ExpandVectorStores can
145 // handle 64-bit stores.
146 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
147
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000148 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
149 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
150 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
151
152
Tom Stellard75aadc22012-12-11 21:25:42 +0000153 setOperationAction(ISD::LOAD, MVT::f32, Promote);
154 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
155
Tom Stellardadf732c2013-07-18 21:43:48 +0000156 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
157 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
158
Tom Stellard75aadc22012-12-11 21:25:42 +0000159 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
160 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
161
Tom Stellardaf775432013-10-23 00:44:32 +0000162 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
163 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
164
165 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
166 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
167
Tom Stellard7512c082013-07-12 18:14:56 +0000168 setOperationAction(ISD::LOAD, MVT::f64, Promote);
169 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
170
Tom Stellardd86003e2013-08-14 23:25:00 +0000171 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
172 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000173 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
174 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
Tom Stellardd86003e2013-08-14 23:25:00 +0000175 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000176 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
177 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
178 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
179 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
180 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000181
Tom Stellardb03edec2013-08-16 01:12:16 +0000182 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
183 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
184 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
185 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
186 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
187 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
188 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
189 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
190 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
191 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
192 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
193 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
194
Tom Stellardaeb45642014-02-04 17:18:43 +0000195 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
196
Tom Stellardbeed74a2013-07-23 01:47:46 +0000197 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
198 setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
199
Tom Stellardc947d8c2013-10-30 17:22:05 +0000200 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
201
Christian Konig70a50322013-03-27 09:12:51 +0000202 setOperationAction(ISD::MUL, MVT::i64, Expand);
203
Tom Stellard75aadc22012-12-11 21:25:42 +0000204 setOperationAction(ISD::UDIV, MVT::i32, Expand);
205 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
206 setOperationAction(ISD::UREM, MVT::i32, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000207 setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
208 setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000209
Tom Stellardf6d80232013-08-21 22:14:17 +0000210 static const MVT::SimpleValueType IntTypes[] = {
211 MVT::v2i32, MVT::v4i32
Aaron Watry0a794a462013-06-25 13:55:57 +0000212 };
Tom Stellarda92ff872013-08-16 23:51:24 +0000213 const size_t NumIntTypes = array_lengthof(IntTypes);
Aaron Watry0a794a462013-06-25 13:55:57 +0000214
Tom Stellarda92ff872013-08-16 23:51:24 +0000215 for (unsigned int x = 0; x < NumIntTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000216 MVT::SimpleValueType VT = IntTypes[x];
Aaron Watry0a794a462013-06-25 13:55:57 +0000217 //Expand the following operations for the current type by default
218 setOperationAction(ISD::ADD, VT, Expand);
219 setOperationAction(ISD::AND, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000220 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
221 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000222 setOperationAction(ISD::MUL, VT, Expand);
223 setOperationAction(ISD::OR, VT, Expand);
224 setOperationAction(ISD::SHL, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000225 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000226 setOperationAction(ISD::SRL, VT, Expand);
227 setOperationAction(ISD::SRA, VT, Expand);
228 setOperationAction(ISD::SUB, VT, Expand);
229 setOperationAction(ISD::UDIV, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000230 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000231 setOperationAction(ISD::UREM, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000232 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000233 setOperationAction(ISD::VSELECT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000234 setOperationAction(ISD::XOR, VT, Expand);
235 }
Tom Stellarda92ff872013-08-16 23:51:24 +0000236
Tom Stellardf6d80232013-08-21 22:14:17 +0000237 static const MVT::SimpleValueType FloatTypes[] = {
238 MVT::v2f32, MVT::v4f32
Tom Stellarda92ff872013-08-16 23:51:24 +0000239 };
240 const size_t NumFloatTypes = array_lengthof(FloatTypes);
241
242 for (unsigned int x = 0; x < NumFloatTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000243 MVT::SimpleValueType VT = FloatTypes[x];
Tom Stellard175e7a82013-11-27 21:23:39 +0000244 setOperationAction(ISD::FABS, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000245 setOperationAction(ISD::FADD, VT, Expand);
246 setOperationAction(ISD::FDIV, VT, Expand);
Tom Stellardbfebd1f2014-02-04 17:18:37 +0000247 setOperationAction(ISD::FPOW, VT, Expand);
Tom Stellardad3aff22013-08-16 23:51:29 +0000248 setOperationAction(ISD::FFLOOR, VT, Expand);
Tom Stellardeddfa692013-12-20 05:11:55 +0000249 setOperationAction(ISD::FTRUNC, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000250 setOperationAction(ISD::FMUL, VT, Expand);
Tom Stellardb249b752013-08-16 23:51:33 +0000251 setOperationAction(ISD::FRINT, VT, Expand);
Tom Stellarde118b8b2013-10-29 16:37:20 +0000252 setOperationAction(ISD::FSQRT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000253 setOperationAction(ISD::FSUB, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000254 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000255 }
Matt Arsenaultfae02982014-03-17 18:58:11 +0000256
Tom Stellard50122a52014-04-07 19:45:41 +0000257 setTargetDAGCombine(ISD::MUL);
Tom Stellard75aadc22012-12-11 21:25:42 +0000258}
259
Tom Stellard28d06de2013-08-05 22:22:07 +0000260//===----------------------------------------------------------------------===//
261// Target Information
262//===----------------------------------------------------------------------===//
263
264MVT AMDGPUTargetLowering::getVectorIdxTy() const {
265 return MVT::i32;
266}
267
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000268bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
269 EVT CastTy) const {
270 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
271 return true;
272
273 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
274 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
275
276 return ((LScalarSize <= CastScalarSize) ||
277 (CastScalarSize >= 32) ||
278 (LScalarSize < 32));
279}
Tom Stellard28d06de2013-08-05 22:22:07 +0000280
Tom Stellard75aadc22012-12-11 21:25:42 +0000281//===---------------------------------------------------------------------===//
Tom Stellardc54731a2013-07-23 23:55:03 +0000282// Target Properties
283//===---------------------------------------------------------------------===//
284
285bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
286 assert(VT.isFloatingPoint());
287 return VT == MVT::f32;
288}
289
290bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
291 assert(VT.isFloatingPoint());
292 return VT == MVT::f32;
293}
294
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000295bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000296 // Truncate is just accessing a subregister.
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000297 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
298}
299
300bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
301 // Truncate is just accessing a subregister.
302 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
303 (Dest->getPrimitiveSizeInBits() % 32 == 0);
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000304}
305
Matt Arsenaultb517c812014-03-27 17:23:31 +0000306bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
307 const DataLayout *DL = getDataLayout();
308 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
309 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
310
311 return SrcSize == 32 && DestSize == 64;
312}
313
314bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
315 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
316 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
317 // this will enable reducing 64-bit operations the 32-bit, which is always
318 // good.
319 return Src == MVT::i32 && Dest == MVT::i64;
320}
321
Matt Arsenaulta7f1e0c2014-03-24 19:43:31 +0000322bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
323 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
324 // limited number of native 64-bit operations. Shrinking an operation to fit
325 // in a single 32-bit register should always be helpful. As currently used,
326 // this is much less general than the name suggests, and is only used in
327 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
328 // not profitable, and may actually be harmful.
329 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
330}
331
Tom Stellardc54731a2013-07-23 23:55:03 +0000332//===---------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000333// TargetLowering Callbacks
334//===---------------------------------------------------------------------===//
335
Christian Konig2c8f6d52013-03-07 09:03:52 +0000336void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
337 const SmallVectorImpl<ISD::InputArg> &Ins) const {
338
339 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
Tom Stellard75aadc22012-12-11 21:25:42 +0000340}
341
342SDValue AMDGPUTargetLowering::LowerReturn(
343 SDValue Chain,
344 CallingConv::ID CallConv,
345 bool isVarArg,
346 const SmallVectorImpl<ISD::OutputArg> &Outs,
347 const SmallVectorImpl<SDValue> &OutVals,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000348 SDLoc DL, SelectionDAG &DAG) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000349 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
350}
351
352//===---------------------------------------------------------------------===//
353// Target specific lowering
354//===---------------------------------------------------------------------===//
355
Matt Arsenault16353872014-04-22 16:42:00 +0000356SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
357 SmallVectorImpl<SDValue> &InVals) const {
358 SDValue Callee = CLI.Callee;
359 SelectionDAG &DAG = CLI.DAG;
360
361 const Function &Fn = *DAG.getMachineFunction().getFunction();
362
363 StringRef FuncName("<unknown>");
364
365 if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
366 FuncName = G->getGlobal()->getName();
367
368 DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
369 DAG.getContext()->diagnose(NoCalls);
370 return SDValue();
371}
372
Tom Stellard75aadc22012-12-11 21:25:42 +0000373SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
374 const {
375 switch (Op.getOpcode()) {
376 default:
377 Op.getNode()->dump();
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000378 llvm_unreachable("Custom lowering code for this"
379 "instruction is not implemented yet!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000380 break;
381 // AMDIL DAG lowering
382 case ISD::SDIV: return LowerSDIV(Op, DAG);
383 case ISD::SREM: return LowerSREM(Op, DAG);
384 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
385 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
386 // AMDGPU DAG lowering
Tom Stellardd86003e2013-08-14 23:25:00 +0000387 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
388 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +0000389 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000390 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
391 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
Tom Stellardc947d8c2013-10-30 17:22:05 +0000392 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000393 }
394 return Op;
395}
396
Matt Arsenaultd125d742014-03-27 17:23:24 +0000397void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
398 SmallVectorImpl<SDValue> &Results,
399 SelectionDAG &DAG) const {
400 switch (N->getOpcode()) {
401 case ISD::SIGN_EXTEND_INREG:
402 // Different parts of legalization seem to interpret which type of
403 // sign_extend_inreg is the one to check for custom lowering. The extended
404 // from type is what really matters, but some places check for custom
405 // lowering of the result type. This results in trying to use
406 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
407 // nothing here and let the illegal result integer be handled normally.
408 return;
409
410 default:
411 return;
412 }
413}
414
Tom Stellard04c0e982014-01-22 19:24:21 +0000415SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
416 const GlobalValue *GV,
417 const SDValue &InitPtr,
418 SDValue Chain,
419 SelectionDAG &DAG) const {
420 const DataLayout *TD = getTargetMachine().getDataLayout();
421 SDLoc DL(InitPtr);
422 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
423 EVT VT = EVT::getEVT(CI->getType());
424 PointerType *PtrTy = PointerType::get(CI->getType(), 0);
425 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
426 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
427 TD->getPrefTypeAlignment(CI->getType()));
428 } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
429 EVT VT = EVT::getEVT(CFP->getType());
430 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
431 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
432 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
433 TD->getPrefTypeAlignment(CFP->getType()));
434 } else if (Init->getType()->isAggregateType()) {
435 EVT PtrVT = InitPtr.getValueType();
436 unsigned NumElements = Init->getType()->getArrayNumElements();
437 SmallVector<SDValue, 8> Chains;
438 for (unsigned i = 0; i < NumElements; ++i) {
439 SDValue Offset = DAG.getConstant(i * TD->getTypeAllocSize(
440 Init->getType()->getArrayElementType()), PtrVT);
441 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
442 Chains.push_back(LowerConstantInitializer(Init->getAggregateElement(i),
443 GV, Ptr, Chain, DAG));
444 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000445 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
446 Chains.data(), Chains.size());
Tom Stellard04c0e982014-01-22 19:24:21 +0000447 } else {
448 Init->dump();
449 llvm_unreachable("Unhandled constant initializer");
450 }
451}
452
Tom Stellardc026e8b2013-06-28 15:47:08 +0000453SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
454 SDValue Op,
455 SelectionDAG &DAG) const {
456
457 const DataLayout *TD = getTargetMachine().getDataLayout();
458 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
Tom Stellardc026e8b2013-06-28 15:47:08 +0000459 const GlobalValue *GV = G->getGlobal();
Tom Stellardc026e8b2013-06-28 15:47:08 +0000460
Tom Stellard04c0e982014-01-22 19:24:21 +0000461 switch (G->getAddressSpace()) {
462 default: llvm_unreachable("Global Address lowering not implemented for this "
463 "address space");
464 case AMDGPUAS::LOCAL_ADDRESS: {
465 // XXX: What does the value of G->getOffset() mean?
466 assert(G->getOffset() == 0 &&
467 "Do not know what to do with an non-zero offset");
Tom Stellardc026e8b2013-06-28 15:47:08 +0000468
Tom Stellard04c0e982014-01-22 19:24:21 +0000469 unsigned Offset;
470 if (MFI->LocalMemoryObjects.count(GV) == 0) {
471 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
472 Offset = MFI->LDSSize;
473 MFI->LocalMemoryObjects[GV] = Offset;
474 // XXX: Account for alignment?
475 MFI->LDSSize += Size;
476 } else {
477 Offset = MFI->LocalMemoryObjects[GV];
478 }
479
480 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
481 }
482 case AMDGPUAS::CONSTANT_ADDRESS: {
483 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
484 Type *EltType = GV->getType()->getElementType();
485 unsigned Size = TD->getTypeAllocSize(EltType);
486 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
487
488 const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV);
489 const Constant *Init = Var->getInitializer();
490 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
491 SDValue InitPtr = DAG.getFrameIndex(FI,
492 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
493 SmallVector<SDNode*, 8> WorkList;
494
495 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
496 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
497 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
498 continue;
499 WorkList.push_back(*I);
500 }
501 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
502 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
503 E = WorkList.end(); I != E; ++I) {
504 SmallVector<SDValue, 8> Ops;
505 Ops.push_back(Chain);
506 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
507 Ops.push_back((*I)->getOperand(i));
508 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000509 DAG.UpdateNodeOperands(*I, Ops.data(), Ops.size());
Tom Stellard04c0e982014-01-22 19:24:21 +0000510 }
511 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
512 getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
513 }
514 }
Tom Stellardc026e8b2013-06-28 15:47:08 +0000515}
516
Tom Stellardd86003e2013-08-14 23:25:00 +0000517SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
518 SelectionDAG &DAG) const {
519 SmallVector<SDValue, 8> Args;
520 SDValue A = Op.getOperand(0);
521 SDValue B = Op.getOperand(1);
522
Matt Arsenault9ec3cf22014-04-11 17:47:30 +0000523 DAG.ExtractVectorElements(A, Args);
524 DAG.ExtractVectorElements(B, Args);
Tom Stellardd86003e2013-08-14 23:25:00 +0000525
526 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
Matt Arsenault7939acd2014-04-07 16:44:24 +0000527 Args.data(), Args.size());
Tom Stellardd86003e2013-08-14 23:25:00 +0000528}
529
530SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
531 SelectionDAG &DAG) const {
532
533 SmallVector<SDValue, 8> Args;
Tom Stellardd86003e2013-08-14 23:25:00 +0000534 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Matt Arsenault9ec3cf22014-04-11 17:47:30 +0000535 EVT VT = Op.getValueType();
536 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
537 VT.getVectorNumElements());
Tom Stellardd86003e2013-08-14 23:25:00 +0000538
539 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
Matt Arsenault7939acd2014-04-07 16:44:24 +0000540 Args.data(), Args.size());
Tom Stellardd86003e2013-08-14 23:25:00 +0000541}
542
Tom Stellard81d871d2013-11-13 23:36:50 +0000543SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
544 SelectionDAG &DAG) const {
545
546 MachineFunction &MF = DAG.getMachineFunction();
547 const AMDGPUFrameLowering *TFL =
548 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
549
550 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
551 assert(FIN);
552
553 unsigned FrameIndex = FIN->getIndex();
554 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
555 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
556 Op.getValueType());
557}
Tom Stellardd86003e2013-08-14 23:25:00 +0000558
Tom Stellard75aadc22012-12-11 21:25:42 +0000559SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
560 SelectionDAG &DAG) const {
561 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
Andrew Trickef9de2a2013-05-25 02:42:55 +0000562 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000563 EVT VT = Op.getValueType();
564
565 switch (IntrinsicID) {
566 default: return Op;
567 case AMDGPUIntrinsic::AMDIL_abs:
568 return LowerIntrinsicIABS(Op, DAG);
569 case AMDGPUIntrinsic::AMDIL_exp:
570 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
571 case AMDGPUIntrinsic::AMDGPU_lrp:
572 return LowerIntrinsicLRP(Op, DAG);
573 case AMDGPUIntrinsic::AMDIL_fraction:
574 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
Tom Stellard75aadc22012-12-11 21:25:42 +0000575 case AMDGPUIntrinsic::AMDIL_max:
576 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
577 Op.getOperand(2));
578 case AMDGPUIntrinsic::AMDGPU_imax:
579 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
580 Op.getOperand(2));
581 case AMDGPUIntrinsic::AMDGPU_umax:
582 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
583 Op.getOperand(2));
584 case AMDGPUIntrinsic::AMDIL_min:
585 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
586 Op.getOperand(2));
587 case AMDGPUIntrinsic::AMDGPU_imin:
588 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
589 Op.getOperand(2));
590 case AMDGPUIntrinsic::AMDGPU_umin:
591 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
592 Op.getOperand(2));
Matt Arsenault4c537172014-03-31 18:21:18 +0000593
594 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
595 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
596 Op.getOperand(1),
597 Op.getOperand(2),
598 Op.getOperand(3));
599
600 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
601 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
602 Op.getOperand(1),
603 Op.getOperand(2),
604 Op.getOperand(3));
605
606 case AMDGPUIntrinsic::AMDGPU_bfi:
607 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
608 Op.getOperand(1),
609 Op.getOperand(2),
610 Op.getOperand(3));
611
612 case AMDGPUIntrinsic::AMDGPU_bfm:
613 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
614 Op.getOperand(1),
615 Op.getOperand(2));
616
Tom Stellard75aadc22012-12-11 21:25:42 +0000617 case AMDGPUIntrinsic::AMDIL_round_nearest:
618 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
619 }
620}
621
622///IABS(a) = SMAX(sub(0, a), a)
623SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
624 SelectionDAG &DAG) const {
625
Andrew Trickef9de2a2013-05-25 02:42:55 +0000626 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000627 EVT VT = Op.getValueType();
628 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
629 Op.getOperand(1));
630
631 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
632}
633
634/// Linear Interpolation
635/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
636SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
637 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000638 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000639 EVT VT = Op.getValueType();
640 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
641 DAG.getConstantFP(1.0f, MVT::f32),
642 Op.getOperand(1));
643 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
644 Op.getOperand(3));
Vincent Lejeune1ce13f52013-02-18 14:11:28 +0000645 return DAG.getNode(ISD::FADD, DL, VT,
646 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
647 OneSubAC);
Tom Stellard75aadc22012-12-11 21:25:42 +0000648}
649
650/// \brief Generate Min/Max node
651SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
652 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000653 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000654 EVT VT = Op.getValueType();
655
656 SDValue LHS = Op.getOperand(0);
657 SDValue RHS = Op.getOperand(1);
658 SDValue True = Op.getOperand(2);
659 SDValue False = Op.getOperand(3);
660 SDValue CC = Op.getOperand(4);
661
662 if (VT != MVT::f32 ||
663 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
664 return SDValue();
665 }
666
667 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
668 switch (CCOpcode) {
669 case ISD::SETOEQ:
670 case ISD::SETONE:
671 case ISD::SETUNE:
672 case ISD::SETNE:
673 case ISD::SETUEQ:
674 case ISD::SETEQ:
675 case ISD::SETFALSE:
676 case ISD::SETFALSE2:
677 case ISD::SETTRUE:
678 case ISD::SETTRUE2:
679 case ISD::SETUO:
680 case ISD::SETO:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000681 llvm_unreachable("Operation should already be optimised!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000682 case ISD::SETULE:
683 case ISD::SETULT:
684 case ISD::SETOLE:
685 case ISD::SETOLT:
686 case ISD::SETLE:
687 case ISD::SETLT: {
688 if (LHS == True)
689 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
690 else
691 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
692 }
693 case ISD::SETGT:
694 case ISD::SETGE:
695 case ISD::SETUGE:
696 case ISD::SETOGE:
697 case ISD::SETUGT:
698 case ISD::SETOGT: {
699 if (LHS == True)
700 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
701 else
702 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
703 }
704 case ISD::SETCC_INVALID:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000705 llvm_unreachable("Invalid setcc condcode!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000706 }
707 return Op;
708}
709
Tom Stellard35bb18c2013-08-26 15:06:04 +0000710SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
711 SelectionDAG &DAG) const {
712 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
713 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
714 EVT EltVT = Op.getValueType().getVectorElementType();
715 EVT PtrVT = Load->getBasePtr().getValueType();
716 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
717 SmallVector<SDValue, 8> Loads;
718 SDLoc SL(Op);
719
720 for (unsigned i = 0, e = NumElts; i != e; ++i) {
721 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
722 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
723 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
724 Load->getChain(), Ptr,
725 MachinePointerInfo(Load->getMemOperand()->getValue()),
726 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
727 Load->getAlignment()));
728 }
Matt Arsenault9504d2f2014-03-11 00:01:31 +0000729 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(),
730 Loads.data(), Loads.size());
Tom Stellard35bb18c2013-08-26 15:06:04 +0000731}
732
Tom Stellard2ffc3302013-08-26 15:05:44 +0000733SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
734 SelectionDAG &DAG) const {
735 StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
736 EVT MemVT = Store->getMemoryVT();
737 unsigned MemBits = MemVT.getSizeInBits();
Tom Stellard75aadc22012-12-11 21:25:42 +0000738
Matt Arsenaultca6dcfc2014-03-05 21:47:22 +0000739 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
740 // truncating store into an i32 store.
741 // XXX: We could also handle optimize other vector bitwidths.
Tom Stellard2ffc3302013-08-26 15:05:44 +0000742 if (!MemVT.isVector() || MemBits > 32) {
743 return SDValue();
744 }
745
746 SDLoc DL(Op);
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000747 SDValue Value = Store->getValue();
Tom Stellard2ffc3302013-08-26 15:05:44 +0000748 EVT VT = Value.getValueType();
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000749 EVT ElemVT = VT.getVectorElementType();
750 SDValue Ptr = Store->getBasePtr();
Tom Stellard2ffc3302013-08-26 15:05:44 +0000751 EVT MemEltVT = MemVT.getVectorElementType();
752 unsigned MemEltBits = MemEltVT.getSizeInBits();
753 unsigned MemNumElements = MemVT.getVectorNumElements();
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000754 unsigned PackedSize = MemVT.getStoreSizeInBits();
755 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, MVT::i32);
756
757 assert(Value.getValueType().getScalarSizeInBits() >= 32);
Matt Arsenault02117142014-03-11 01:38:53 +0000758
Tom Stellard2ffc3302013-08-26 15:05:44 +0000759 SDValue PackedValue;
760 for (unsigned i = 0; i < MemNumElements; ++i) {
Tom Stellard2ffc3302013-08-26 15:05:44 +0000761 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
762 DAG.getConstant(i, MVT::i32));
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000763 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
764 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
765
766 SDValue Shift = DAG.getConstant(MemEltBits * i, MVT::i32);
767 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
768
Tom Stellard2ffc3302013-08-26 15:05:44 +0000769 if (i == 0) {
770 PackedValue = Elt;
771 } else {
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000772 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000773 }
774 }
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000775
776 if (PackedSize < 32) {
777 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
778 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
779 Store->getMemOperand()->getPointerInfo(),
780 PackedVT,
781 Store->isNonTemporal(), Store->isVolatile(),
782 Store->getAlignment());
783 }
784
Tom Stellard2ffc3302013-08-26 15:05:44 +0000785 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000786 Store->getMemOperand()->getPointerInfo(),
Tom Stellard2ffc3302013-08-26 15:05:44 +0000787 Store->isVolatile(), Store->isNonTemporal(),
788 Store->getAlignment());
789}
790
791SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
792 SelectionDAG &DAG) const {
793 StoreSDNode *Store = cast<StoreSDNode>(Op);
794 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
795 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
796 EVT PtrVT = Store->getBasePtr().getValueType();
797 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
798 SDLoc SL(Op);
799
800 SmallVector<SDValue, 8> Chains;
801
802 for (unsigned i = 0, e = NumElts; i != e; ++i) {
803 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
804 Store->getValue(), DAG.getConstant(i, MVT::i32));
805 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
806 Store->getBasePtr(),
807 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
808 PtrVT));
Tom Stellardf3d166a2013-08-26 15:05:49 +0000809 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
Tom Stellard2ffc3302013-08-26 15:05:44 +0000810 MachinePointerInfo(Store->getMemOperand()->getValue()),
Tom Stellardf3d166a2013-08-26 15:05:49 +0000811 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
Tom Stellard2ffc3302013-08-26 15:05:44 +0000812 Store->getAlignment()));
813 }
Matt Arsenault7939acd2014-04-07 16:44:24 +0000814 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains.data(), NumElts);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000815}
816
Tom Stellarde9373602014-01-22 19:24:14 +0000817SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
818 SDLoc DL(Op);
819 LoadSDNode *Load = cast<LoadSDNode>(Op);
820 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaultf9a995d2014-03-06 17:34:12 +0000821 EVT VT = Op.getValueType();
822 EVT MemVT = Load->getMemoryVT();
823
824 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
825 // We can do the extload to 32-bits, and then need to separately extend to
826 // 64-bits.
827
828 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
829 Load->getChain(),
830 Load->getBasePtr(),
831 MemVT,
832 Load->getMemOperand());
833 return DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32);
834 }
Tom Stellarde9373602014-01-22 19:24:14 +0000835
Matt Arsenault470acd82014-04-15 22:28:39 +0000836 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
837 assert(VT == MVT::i1 && "Only i1 non-extloads expected");
838 // FIXME: Copied from PPC
839 // First, load into 32 bits, then truncate to 1 bit.
840
841 SDValue Chain = Load->getChain();
842 SDValue BasePtr = Load->getBasePtr();
843 MachineMemOperand *MMO = Load->getMemOperand();
844
845 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
846 BasePtr, MVT::i8, MMO);
847 return DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD);
848 }
849
Tom Stellard04c0e982014-01-22 19:24:21 +0000850 // Lower loads constant address space global variable loads
851 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000852 isa<GlobalVariable>(
853 GetUnderlyingObject(Load->getMemOperand()->getValue()))) {
Tom Stellard04c0e982014-01-22 19:24:21 +0000854
855 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
856 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
857 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
858 DAG.getConstant(2, MVT::i32));
859 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
860 Load->getChain(), Ptr,
861 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
862 }
863
Tom Stellarde9373602014-01-22 19:24:14 +0000864 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
865 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
866 return SDValue();
867
868
Tom Stellarde9373602014-01-22 19:24:14 +0000869 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
870 DAG.getConstant(2, MVT::i32));
871 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
872 Load->getChain(), Ptr,
873 DAG.getTargetConstant(0, MVT::i32),
874 Op.getOperand(2));
875 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
876 Load->getBasePtr(),
877 DAG.getConstant(0x3, MVT::i32));
878 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
879 DAG.getConstant(3, MVT::i32));
Matt Arsenault74891cd2014-03-15 00:08:22 +0000880
Tom Stellarde9373602014-01-22 19:24:14 +0000881 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000882
883 EVT MemEltVT = MemVT.getScalarType();
Tom Stellarde9373602014-01-22 19:24:14 +0000884 if (ExtType == ISD::SEXTLOAD) {
Matt Arsenault74891cd2014-03-15 00:08:22 +0000885 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
886 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode);
Tom Stellarde9373602014-01-22 19:24:14 +0000887 }
888
Matt Arsenault74891cd2014-03-15 00:08:22 +0000889 return DAG.getZeroExtendInReg(Ret, DL, MemEltVT);
Tom Stellarde9373602014-01-22 19:24:14 +0000890}
891
Tom Stellard2ffc3302013-08-26 15:05:44 +0000892SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Tom Stellarde9373602014-01-22 19:24:14 +0000893 SDLoc DL(Op);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000894 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
895 if (Result.getNode()) {
896 return Result;
897 }
898
899 StoreSDNode *Store = cast<StoreSDNode>(Op);
Tom Stellarde9373602014-01-22 19:24:14 +0000900 SDValue Chain = Store->getChain();
Tom Stellard81d871d2013-11-13 23:36:50 +0000901 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
902 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
Tom Stellard2ffc3302013-08-26 15:05:44 +0000903 Store->getValue().getValueType().isVector()) {
904 return SplitVectorStore(Op, DAG);
905 }
Tom Stellarde9373602014-01-22 19:24:14 +0000906
Matt Arsenault74891cd2014-03-15 00:08:22 +0000907 EVT MemVT = Store->getMemoryVT();
Tom Stellarde9373602014-01-22 19:24:14 +0000908 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
Matt Arsenault74891cd2014-03-15 00:08:22 +0000909 MemVT.bitsLT(MVT::i32)) {
Tom Stellarde9373602014-01-22 19:24:14 +0000910 unsigned Mask = 0;
911 if (Store->getMemoryVT() == MVT::i8) {
912 Mask = 0xff;
913 } else if (Store->getMemoryVT() == MVT::i16) {
914 Mask = 0xffff;
915 }
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000916 SDValue BasePtr = Store->getBasePtr();
917 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000918 DAG.getConstant(2, MVT::i32));
919 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
920 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000921
922 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000923 DAG.getConstant(0x3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000924
Tom Stellarde9373602014-01-22 19:24:14 +0000925 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
926 DAG.getConstant(3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000927
Tom Stellarde9373602014-01-22 19:24:14 +0000928 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
929 Store->getValue());
Matt Arsenault74891cd2014-03-15 00:08:22 +0000930
931 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
932
Tom Stellarde9373602014-01-22 19:24:14 +0000933 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
934 MaskedValue, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000935
Tom Stellarde9373602014-01-22 19:24:14 +0000936 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
937 ShiftAmt);
938 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
939 DAG.getConstant(0xffffffff, MVT::i32));
940 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
941
942 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
943 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
944 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
945 }
Tom Stellard2ffc3302013-08-26 15:05:44 +0000946 return SDValue();
947}
Tom Stellard75aadc22012-12-11 21:25:42 +0000948
949SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
950 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000951 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000952 EVT VT = Op.getValueType();
953
954 SDValue Num = Op.getOperand(0);
955 SDValue Den = Op.getOperand(1);
956
957 SmallVector<SDValue, 8> Results;
958
959 // RCP = URECIP(Den) = 2^32 / Den + e
960 // e is rounding error.
961 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
962
963 // RCP_LO = umulo(RCP, Den) */
964 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
965
966 // RCP_HI = mulhu (RCP, Den) */
967 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
968
969 // NEG_RCP_LO = -RCP_LO
970 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
971 RCP_LO);
972
973 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
974 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
975 NEG_RCP_LO, RCP_LO,
976 ISD::SETEQ);
977 // Calculate the rounding error from the URECIP instruction
978 // E = mulhu(ABS_RCP_LO, RCP)
979 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
980
981 // RCP_A_E = RCP + E
982 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
983
984 // RCP_S_E = RCP - E
985 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
986
987 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
988 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
989 RCP_A_E, RCP_S_E,
990 ISD::SETEQ);
991 // Quotient = mulhu(Tmp0, Num)
992 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
993
994 // Num_S_Remainder = Quotient * Den
995 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
996
997 // Remainder = Num - Num_S_Remainder
998 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
999
1000 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1001 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1002 DAG.getConstant(-1, VT),
1003 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +00001004 ISD::SETUGE);
1005 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1006 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1007 Num_S_Remainder,
Tom Stellard75aadc22012-12-11 21:25:42 +00001008 DAG.getConstant(-1, VT),
1009 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +00001010 ISD::SETUGE);
Tom Stellard75aadc22012-12-11 21:25:42 +00001011 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1012 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1013 Remainder_GE_Zero);
1014
1015 // Calculate Division result:
1016
1017 // Quotient_A_One = Quotient + 1
1018 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1019 DAG.getConstant(1, VT));
1020
1021 // Quotient_S_One = Quotient - 1
1022 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1023 DAG.getConstant(1, VT));
1024
1025 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1026 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1027 Quotient, Quotient_A_One, ISD::SETEQ);
1028
1029 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1030 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1031 Quotient_S_One, Div, ISD::SETEQ);
1032
1033 // Calculate Rem result:
1034
1035 // Remainder_S_Den = Remainder - Den
1036 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1037
1038 // Remainder_A_Den = Remainder + Den
1039 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1040
1041 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1042 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1043 Remainder, Remainder_S_Den, ISD::SETEQ);
1044
1045 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1046 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1047 Remainder_A_Den, Rem, ISD::SETEQ);
Matt Arsenault7939acd2014-04-07 16:44:24 +00001048 SDValue Ops[2] = {
1049 Div,
1050 Rem
1051 };
Tom Stellard75aadc22012-12-11 21:25:42 +00001052 return DAG.getMergeValues(Ops, 2, DL);
1053}
1054
Tom Stellardc947d8c2013-10-30 17:22:05 +00001055SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
1056 SelectionDAG &DAG) const {
1057 SDValue S0 = Op.getOperand(0);
1058 SDLoc DL(Op);
1059 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
1060 return SDValue();
1061
1062 // f32 uint_to_fp i64
1063 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1064 DAG.getConstant(0, MVT::i32));
1065 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
1066 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1067 DAG.getConstant(1, MVT::i32));
1068 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
1069 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
1070 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
1071 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
1072
1073}
Tom Stellardfbab8272013-08-16 01:12:11 +00001074
Matt Arsenaultfae02982014-03-17 18:58:11 +00001075SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
1076 unsigned BitsDiff,
1077 SelectionDAG &DAG) const {
1078 MVT VT = Op.getSimpleValueType();
1079 SDLoc DL(Op);
1080 SDValue Shift = DAG.getConstant(BitsDiff, VT);
1081 // Shift left by 'Shift' bits.
1082 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
1083 // Signed shift Right by 'Shift' bits.
1084 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
1085}
1086
1087SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1088 SelectionDAG &DAG) const {
1089 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1090 MVT VT = Op.getSimpleValueType();
1091 MVT ScalarVT = VT.getScalarType();
1092
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001093 if (!VT.isVector())
1094 return SDValue();
Matt Arsenaultfae02982014-03-17 18:58:11 +00001095
1096 SDValue Src = Op.getOperand(0);
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001097 SDLoc DL(Op);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001098
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001099 // TODO: Don't scalarize on Evergreen?
1100 unsigned NElts = VT.getVectorNumElements();
1101 SmallVector<SDValue, 8> Args;
1102 DAG.ExtractVectorElements(Src, Args, 0, NElts);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001103
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001104 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1105 for (unsigned I = 0; I < NElts; ++I)
1106 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001107
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001108 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args.data(), Args.size());
Matt Arsenaultfae02982014-03-17 18:58:11 +00001109}
1110
Tom Stellard75aadc22012-12-11 21:25:42 +00001111//===----------------------------------------------------------------------===//
Tom Stellard50122a52014-04-07 19:45:41 +00001112// Custom DAG optimizations
1113//===----------------------------------------------------------------------===//
1114
1115static bool isU24(SDValue Op, SelectionDAG &DAG) {
1116 APInt KnownZero, KnownOne;
1117 EVT VT = Op.getValueType();
1118 DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
1119
1120 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
1121}
1122
1123static bool isI24(SDValue Op, SelectionDAG &DAG) {
1124 EVT VT = Op.getValueType();
1125
1126 // In order for this to be a signed 24-bit value, bit 23, must
1127 // be a sign bit.
1128 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
1129 // as unsigned 24-bit values.
1130 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
1131}
1132
1133static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
1134
1135 SelectionDAG &DAG = DCI.DAG;
1136 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1137 EVT VT = Op.getValueType();
1138
1139 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
1140 APInt KnownZero, KnownOne;
1141 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
1142 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
1143 DCI.CommitTargetLoweringOpt(TLO);
1144}
1145
1146SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
1147 DAGCombinerInfo &DCI) const {
1148 SelectionDAG &DAG = DCI.DAG;
1149 SDLoc DL(N);
1150
1151 switch(N->getOpcode()) {
1152 default: break;
1153 case ISD::MUL: {
1154 EVT VT = N->getValueType(0);
1155 SDValue N0 = N->getOperand(0);
1156 SDValue N1 = N->getOperand(1);
1157 SDValue Mul;
1158
1159 // FIXME: Add support for 24-bit multiply with 64-bit output on SI.
1160 if (VT.isVector() || VT.getSizeInBits() > 32)
1161 break;
1162
1163 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
1164 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
1165 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
1166 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
1167 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
1168 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
1169 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
1170 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
1171 } else {
1172 break;
1173 }
1174
Tom Stellardaeeea8a2014-04-17 21:00:13 +00001175 // We need to use sext even for MUL_U24, because MUL_U24 is used
1176 // for signed multiply of 8 and 16-bit types.
Tom Stellard50122a52014-04-07 19:45:41 +00001177 SDValue Reg = DAG.getSExtOrTrunc(Mul, DL, VT);
1178
1179 return Reg;
1180 }
1181 case AMDGPUISD::MUL_I24:
1182 case AMDGPUISD::MUL_U24: {
1183 SDValue N0 = N->getOperand(0);
1184 SDValue N1 = N->getOperand(1);
1185 simplifyI24(N0, DCI);
1186 simplifyI24(N1, DCI);
1187 return SDValue();
1188 }
1189 }
1190 return SDValue();
1191}
1192
1193//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +00001194// Helper functions
1195//===----------------------------------------------------------------------===//
1196
Tom Stellardaf775432013-10-23 00:44:32 +00001197void AMDGPUTargetLowering::getOriginalFunctionArgs(
1198 SelectionDAG &DAG,
1199 const Function *F,
1200 const SmallVectorImpl<ISD::InputArg> &Ins,
1201 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
1202
1203 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
1204 if (Ins[i].ArgVT == Ins[i].VT) {
1205 OrigIns.push_back(Ins[i]);
1206 continue;
1207 }
1208
1209 EVT VT;
1210 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
1211 // Vector has been split into scalars.
1212 VT = Ins[i].ArgVT.getVectorElementType();
1213 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
1214 Ins[i].ArgVT.getVectorElementType() !=
1215 Ins[i].VT.getVectorElementType()) {
1216 // Vector elements have been promoted
1217 VT = Ins[i].ArgVT;
1218 } else {
1219 // Vector has been spilt into smaller vectors.
1220 VT = Ins[i].VT;
1221 }
1222
1223 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
1224 Ins[i].OrigArgIndex, Ins[i].PartOffset);
1225 OrigIns.push_back(Arg);
1226 }
1227}
1228
Tom Stellard75aadc22012-12-11 21:25:42 +00001229bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
1230 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1231 return CFP->isExactlyValue(1.0);
1232 }
1233 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1234 return C->isAllOnesValue();
1235 }
1236 return false;
1237}
1238
1239bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
1240 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1241 return CFP->getValueAPF().isZero();
1242 }
1243 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1244 return C->isNullValue();
1245 }
1246 return false;
1247}
1248
1249SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
1250 const TargetRegisterClass *RC,
1251 unsigned Reg, EVT VT) const {
1252 MachineFunction &MF = DAG.getMachineFunction();
1253 MachineRegisterInfo &MRI = MF.getRegInfo();
1254 unsigned VirtualRegister;
1255 if (!MRI.isLiveIn(Reg)) {
1256 VirtualRegister = MRI.createVirtualRegister(RC);
1257 MRI.addLiveIn(Reg, VirtualRegister);
1258 } else {
1259 VirtualRegister = MRI.getLiveInVirtReg(Reg);
1260 }
1261 return DAG.getRegister(VirtualRegister, VT);
1262}
1263
1264#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
1265
1266const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
1267 switch (Opcode) {
Craig Topper062a2ba2014-04-25 05:30:21 +00001268 default: return nullptr;
Tom Stellard75aadc22012-12-11 21:25:42 +00001269 // AMDIL DAG nodes
Tom Stellard75aadc22012-12-11 21:25:42 +00001270 NODE_NAME_CASE(CALL);
1271 NODE_NAME_CASE(UMUL);
1272 NODE_NAME_CASE(DIV_INF);
1273 NODE_NAME_CASE(RET_FLAG);
1274 NODE_NAME_CASE(BRANCH_COND);
1275
1276 // AMDGPU DAG nodes
1277 NODE_NAME_CASE(DWORDADDR)
1278 NODE_NAME_CASE(FRACT)
1279 NODE_NAME_CASE(FMAX)
1280 NODE_NAME_CASE(SMAX)
1281 NODE_NAME_CASE(UMAX)
1282 NODE_NAME_CASE(FMIN)
1283 NODE_NAME_CASE(SMIN)
1284 NODE_NAME_CASE(UMIN)
Matt Arsenaultfae02982014-03-17 18:58:11 +00001285 NODE_NAME_CASE(BFE_U32)
1286 NODE_NAME_CASE(BFE_I32)
Matt Arsenaultb3458362014-03-31 18:21:13 +00001287 NODE_NAME_CASE(BFI)
1288 NODE_NAME_CASE(BFM)
Tom Stellard50122a52014-04-07 19:45:41 +00001289 NODE_NAME_CASE(MUL_U24)
1290 NODE_NAME_CASE(MUL_I24)
Tom Stellard75aadc22012-12-11 21:25:42 +00001291 NODE_NAME_CASE(URECIP)
Matt Arsenault21a3faa2014-02-24 21:01:21 +00001292 NODE_NAME_CASE(DOT4)
Tom Stellard75aadc22012-12-11 21:25:42 +00001293 NODE_NAME_CASE(EXPORT)
Tom Stellardff62c352013-01-23 02:09:03 +00001294 NODE_NAME_CASE(CONST_ADDRESS)
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001295 NODE_NAME_CASE(REGISTER_LOAD)
1296 NODE_NAME_CASE(REGISTER_STORE)
Tom Stellard9fa17912013-08-14 23:24:45 +00001297 NODE_NAME_CASE(LOAD_CONSTANT)
1298 NODE_NAME_CASE(LOAD_INPUT)
1299 NODE_NAME_CASE(SAMPLE)
1300 NODE_NAME_CASE(SAMPLEB)
1301 NODE_NAME_CASE(SAMPLED)
1302 NODE_NAME_CASE(SAMPLEL)
Tom Stellardd3ee8c12013-08-16 01:12:06 +00001303 NODE_NAME_CASE(STORE_MSKOR)
Tom Stellardafcf12f2013-09-12 02:55:14 +00001304 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
Tom Stellard75aadc22012-12-11 21:25:42 +00001305 }
1306}
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001307
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001308static void computeMaskedBitsForMinMax(const SDValue Op0,
1309 const SDValue Op1,
1310 APInt &KnownZero,
1311 APInt &KnownOne,
1312 const SelectionDAG &DAG,
1313 unsigned Depth) {
1314 APInt Op0Zero, Op0One;
1315 APInt Op1Zero, Op1One;
1316 DAG.ComputeMaskedBits(Op0, Op0Zero, Op0One, Depth);
1317 DAG.ComputeMaskedBits(Op1, Op1Zero, Op1One, Depth);
1318
1319 KnownZero = Op0Zero & Op1Zero;
1320 KnownOne = Op0One & Op1One;
1321}
1322
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001323void AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
1324 const SDValue Op,
1325 APInt &KnownZero,
1326 APInt &KnownOne,
1327 const SelectionDAG &DAG,
1328 unsigned Depth) const {
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001329
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001330 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001331 unsigned Opc = Op.getOpcode();
1332 switch (Opc) {
1333 case ISD::INTRINSIC_WO_CHAIN: {
1334 // FIXME: The intrinsic should just use the node.
1335 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
1336 case AMDGPUIntrinsic::AMDGPU_imax:
1337 case AMDGPUIntrinsic::AMDGPU_umax:
1338 case AMDGPUIntrinsic::AMDGPU_imin:
1339 case AMDGPUIntrinsic::AMDGPU_umin:
1340 computeMaskedBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
1341 KnownZero, KnownOne, DAG, Depth);
1342 break;
1343 default:
1344 break;
1345 }
1346
1347 break;
1348 }
1349 case AMDGPUISD::SMAX:
1350 case AMDGPUISD::UMAX:
1351 case AMDGPUISD::SMIN:
1352 case AMDGPUISD::UMIN:
1353 computeMaskedBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
1354 KnownZero, KnownOne, DAG, Depth);
1355 break;
1356 default:
1357 break;
1358 }
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001359}