blob: 2c751a8be584f05a74ec95336e51dbf4aa18dc9b [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This is the parent TargetLowering class for hardware code gen
12/// targets.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUISelLowering.h"
Tom Stellarded882c22013-06-03 17:40:11 +000017#include "AMDGPU.h"
Tom Stellard81d871d2013-11-13 23:36:50 +000018#include "AMDGPUFrameLowering.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000019#include "AMDGPURegisterInfo.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000020#include "AMDGPUSubtarget.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000021#include "AMDILIntrinsicInfo.h"
Tom Stellardacfeebf2013-07-23 01:48:05 +000022#include "R600MachineFunctionInfo.h"
Tom Stellarded882c22013-06-03 17:40:11 +000023#include "SIMachineFunctionInfo.h"
Tom Stellard04c0e982014-01-22 19:24:21 +000024#include "llvm/Analysis/ValueTracking.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000025#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/SelectionDAG.h"
29#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
Tom Stellardc026e8b2013-06-28 15:47:08 +000030#include "llvm/IR/DataLayout.h"
Matt Arsenault16353872014-04-22 16:42:00 +000031#include "llvm/IR/DiagnosticInfo.h"
32#include "llvm/IR/DiagnosticPrinter.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000033
34using namespace llvm;
Matt Arsenault16353872014-04-22 16:42:00 +000035
36namespace {
37
38/// Diagnostic information for unimplemented or unsupported feature reporting.
39class DiagnosticInfoUnsupported : public DiagnosticInfo {
40private:
41 const Twine &Description;
42 const Function &Fn;
43
44 static int KindID;
45
46 static int getKindID() {
47 if (KindID == 0)
48 KindID = llvm::getNextAvailablePluginDiagnosticKind();
49 return KindID;
50 }
51
52public:
53 DiagnosticInfoUnsupported(const Function &Fn, const Twine &Desc,
54 DiagnosticSeverity Severity = DS_Error)
55 : DiagnosticInfo(getKindID(), Severity),
56 Description(Desc),
57 Fn(Fn) { }
58
59 const Function &getFunction() const { return Fn; }
60 const Twine &getDescription() const { return Description; }
61
62 void print(DiagnosticPrinter &DP) const override {
63 DP << "unsupported " << getDescription() << " in " << Fn.getName();
64 }
65
66 static bool classof(const DiagnosticInfo *DI) {
67 return DI->getKind() == getKindID();
68 }
69};
70
71int DiagnosticInfoUnsupported::KindID = 0;
72}
73
74
Tom Stellardaf775432013-10-23 00:44:32 +000075static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
76 CCValAssign::LocInfo LocInfo,
77 ISD::ArgFlagsTy ArgFlags, CCState &State) {
Matt Arsenault52226f92013-12-14 18:21:59 +000078 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
79 ArgFlags.getOrigAlign());
80 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
Tom Stellardaf775432013-10-23 00:44:32 +000081
82 return true;
83}
Tom Stellard75aadc22012-12-11 21:25:42 +000084
Christian Konig2c8f6d52013-03-07 09:03:52 +000085#include "AMDGPUGenCallingConv.inc"
86
Tom Stellard75aadc22012-12-11 21:25:42 +000087AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
88 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
89
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000090 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
91
Tom Stellard75aadc22012-12-11 21:25:42 +000092 // Initialize target lowering borrowed from AMDIL
93 InitAMDILLowering();
94
95 // We need to custom lower some of the intrinsics
96 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
97
98 // Library functions. These default to Expand, but we have instructions
99 // for them.
100 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
101 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
102 setOperationAction(ISD::FPOW, MVT::f32, Legal);
103 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
104 setOperationAction(ISD::FABS, MVT::f32, Legal);
105 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
106 setOperationAction(ISD::FRINT, MVT::f32, Legal);
Tom Stellard4d566b22013-11-27 21:23:20 +0000107 setOperationAction(ISD::FROUND, MVT::f32, Legal);
Tom Stellardeddfa692013-12-20 05:11:55 +0000108 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
Tom Stellard75aadc22012-12-11 21:25:42 +0000109
Tom Stellard5643c4a2013-05-20 15:02:19 +0000110 // The hardware supports ROTR, but not ROTL
111 setOperationAction(ISD::ROTL, MVT::i32, Expand);
112
Tom Stellard75aadc22012-12-11 21:25:42 +0000113 // Lower floating point store/load to integer store/load to reduce the number
114 // of patterns in tablegen.
115 setOperationAction(ISD::STORE, MVT::f32, Promote);
116 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
117
Tom Stellarded2f6142013-07-18 21:43:42 +0000118 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
119 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
120
Tom Stellard75aadc22012-12-11 21:25:42 +0000121 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
122 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
123
Tom Stellardaf775432013-10-23 00:44:32 +0000124 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
125 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
126
127 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
128 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
129
Tom Stellard7512c082013-07-12 18:14:56 +0000130 setOperationAction(ISD::STORE, MVT::f64, Promote);
131 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
132
Tom Stellard2ffc3302013-08-26 15:05:44 +0000133 // Custom lowering of vector stores is required for local address space
134 // stores.
135 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
136 // XXX: Native v2i32 local address space stores are possible, but not
137 // currently implemented.
138 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
139
Tom Stellardfbab8272013-08-16 01:12:11 +0000140 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
141 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
142 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000143
Tom Stellardfbab8272013-08-16 01:12:11 +0000144 // XXX: This can be change to Custom, once ExpandVectorStores can
145 // handle 64-bit stores.
146 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
147
Tom Stellard605e1162014-05-02 15:41:46 +0000148 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
149 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000150 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
151 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
152 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
153
154
Tom Stellard75aadc22012-12-11 21:25:42 +0000155 setOperationAction(ISD::LOAD, MVT::f32, Promote);
156 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
157
Tom Stellardadf732c2013-07-18 21:43:48 +0000158 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
159 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
160
Tom Stellard75aadc22012-12-11 21:25:42 +0000161 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
162 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
163
Tom Stellardaf775432013-10-23 00:44:32 +0000164 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
165 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
166
167 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
168 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
169
Tom Stellard7512c082013-07-12 18:14:56 +0000170 setOperationAction(ISD::LOAD, MVT::f64, Promote);
171 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
172
Tom Stellardd86003e2013-08-14 23:25:00 +0000173 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
174 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000175 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
176 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
Tom Stellardd86003e2013-08-14 23:25:00 +0000177 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000178 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
179 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
180 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
181 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
182 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000183
Tom Stellardb03edec2013-08-16 01:12:16 +0000184 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
185 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
186 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
187 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
188 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
189 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
190 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
191 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
192 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
193 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
194 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
195 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
196
Tom Stellardaeb45642014-02-04 17:18:43 +0000197 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
198
Tom Stellardbeed74a2013-07-23 01:47:46 +0000199 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
200 setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
201
Tom Stellardc947d8c2013-10-30 17:22:05 +0000202 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
203
Christian Konig70a50322013-03-27 09:12:51 +0000204 setOperationAction(ISD::MUL, MVT::i64, Expand);
205
Tom Stellard75aadc22012-12-11 21:25:42 +0000206 setOperationAction(ISD::UDIV, MVT::i32, Expand);
207 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
Tom Stellard5f337882014-04-29 23:12:43 +0000208 setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
Tom Stellard75aadc22012-12-11 21:25:42 +0000209 setOperationAction(ISD::UREM, MVT::i32, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000210 setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
211 setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000212
Tom Stellardf6d80232013-08-21 22:14:17 +0000213 static const MVT::SimpleValueType IntTypes[] = {
214 MVT::v2i32, MVT::v4i32
Aaron Watry0a794a462013-06-25 13:55:57 +0000215 };
Tom Stellarda92ff872013-08-16 23:51:24 +0000216 const size_t NumIntTypes = array_lengthof(IntTypes);
Aaron Watry0a794a462013-06-25 13:55:57 +0000217
Tom Stellarda92ff872013-08-16 23:51:24 +0000218 for (unsigned int x = 0; x < NumIntTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000219 MVT::SimpleValueType VT = IntTypes[x];
Aaron Watry0a794a462013-06-25 13:55:57 +0000220 //Expand the following operations for the current type by default
221 setOperationAction(ISD::ADD, VT, Expand);
222 setOperationAction(ISD::AND, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000223 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
224 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000225 setOperationAction(ISD::MUL, VT, Expand);
226 setOperationAction(ISD::OR, VT, Expand);
227 setOperationAction(ISD::SHL, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000228 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000229 setOperationAction(ISD::SRL, VT, Expand);
230 setOperationAction(ISD::SRA, VT, Expand);
231 setOperationAction(ISD::SUB, VT, Expand);
232 setOperationAction(ISD::UDIV, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000233 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000234 setOperationAction(ISD::UREM, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000235 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000236 setOperationAction(ISD::VSELECT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000237 setOperationAction(ISD::XOR, VT, Expand);
238 }
Tom Stellarda92ff872013-08-16 23:51:24 +0000239
Tom Stellardf6d80232013-08-21 22:14:17 +0000240 static const MVT::SimpleValueType FloatTypes[] = {
241 MVT::v2f32, MVT::v4f32
Tom Stellarda92ff872013-08-16 23:51:24 +0000242 };
243 const size_t NumFloatTypes = array_lengthof(FloatTypes);
244
245 for (unsigned int x = 0; x < NumFloatTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000246 MVT::SimpleValueType VT = FloatTypes[x];
Tom Stellard175e7a82013-11-27 21:23:39 +0000247 setOperationAction(ISD::FABS, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000248 setOperationAction(ISD::FADD, VT, Expand);
Tom Stellard3dbf1f82014-05-02 15:41:47 +0000249 setOperationAction(ISD::FCOS, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000250 setOperationAction(ISD::FDIV, VT, Expand);
Tom Stellardbfebd1f2014-02-04 17:18:37 +0000251 setOperationAction(ISD::FPOW, VT, Expand);
Tom Stellardad3aff22013-08-16 23:51:29 +0000252 setOperationAction(ISD::FFLOOR, VT, Expand);
Tom Stellardeddfa692013-12-20 05:11:55 +0000253 setOperationAction(ISD::FTRUNC, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000254 setOperationAction(ISD::FMUL, VT, Expand);
Tom Stellardb249b752013-08-16 23:51:33 +0000255 setOperationAction(ISD::FRINT, VT, Expand);
Tom Stellarde118b8b2013-10-29 16:37:20 +0000256 setOperationAction(ISD::FSQRT, VT, Expand);
Tom Stellard3dbf1f82014-05-02 15:41:47 +0000257 setOperationAction(ISD::FSIN, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000258 setOperationAction(ISD::FSUB, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000259 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000260 }
Matt Arsenaultfae02982014-03-17 18:58:11 +0000261
Tom Stellard50122a52014-04-07 19:45:41 +0000262 setTargetDAGCombine(ISD::MUL);
Tom Stellard75aadc22012-12-11 21:25:42 +0000263}
264
Tom Stellard28d06de2013-08-05 22:22:07 +0000265//===----------------------------------------------------------------------===//
266// Target Information
267//===----------------------------------------------------------------------===//
268
269MVT AMDGPUTargetLowering::getVectorIdxTy() const {
270 return MVT::i32;
271}
272
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000273bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
274 EVT CastTy) const {
275 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
276 return true;
277
278 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
279 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
280
281 return ((LScalarSize <= CastScalarSize) ||
282 (CastScalarSize >= 32) ||
283 (LScalarSize < 32));
284}
Tom Stellard28d06de2013-08-05 22:22:07 +0000285
Tom Stellard75aadc22012-12-11 21:25:42 +0000286//===---------------------------------------------------------------------===//
Tom Stellardc54731a2013-07-23 23:55:03 +0000287// Target Properties
288//===---------------------------------------------------------------------===//
289
290bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
291 assert(VT.isFloatingPoint());
292 return VT == MVT::f32;
293}
294
295bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
296 assert(VT.isFloatingPoint());
297 return VT == MVT::f32;
298}
299
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000300bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000301 // Truncate is just accessing a subregister.
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000302 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
303}
304
305bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
306 // Truncate is just accessing a subregister.
307 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
308 (Dest->getPrimitiveSizeInBits() % 32 == 0);
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000309}
310
Matt Arsenaultb517c812014-03-27 17:23:31 +0000311bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
312 const DataLayout *DL = getDataLayout();
313 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
314 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
315
316 return SrcSize == 32 && DestSize == 64;
317}
318
319bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
320 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
321 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
322 // this will enable reducing 64-bit operations the 32-bit, which is always
323 // good.
324 return Src == MVT::i32 && Dest == MVT::i64;
325}
326
Matt Arsenaulta7f1e0c2014-03-24 19:43:31 +0000327bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
328 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
329 // limited number of native 64-bit operations. Shrinking an operation to fit
330 // in a single 32-bit register should always be helpful. As currently used,
331 // this is much less general than the name suggests, and is only used in
332 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
333 // not profitable, and may actually be harmful.
334 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
335}
336
Tom Stellardc54731a2013-07-23 23:55:03 +0000337//===---------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000338// TargetLowering Callbacks
339//===---------------------------------------------------------------------===//
340
Christian Konig2c8f6d52013-03-07 09:03:52 +0000341void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
342 const SmallVectorImpl<ISD::InputArg> &Ins) const {
343
344 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
Tom Stellard75aadc22012-12-11 21:25:42 +0000345}
346
347SDValue AMDGPUTargetLowering::LowerReturn(
348 SDValue Chain,
349 CallingConv::ID CallConv,
350 bool isVarArg,
351 const SmallVectorImpl<ISD::OutputArg> &Outs,
352 const SmallVectorImpl<SDValue> &OutVals,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000353 SDLoc DL, SelectionDAG &DAG) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000354 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
355}
356
357//===---------------------------------------------------------------------===//
358// Target specific lowering
359//===---------------------------------------------------------------------===//
360
Matt Arsenault16353872014-04-22 16:42:00 +0000361SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
362 SmallVectorImpl<SDValue> &InVals) const {
363 SDValue Callee = CLI.Callee;
364 SelectionDAG &DAG = CLI.DAG;
365
366 const Function &Fn = *DAG.getMachineFunction().getFunction();
367
368 StringRef FuncName("<unknown>");
369
Matt Arsenaultde1c34102014-04-25 22:22:01 +0000370 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
371 FuncName = G->getSymbol();
372 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Matt Arsenault16353872014-04-22 16:42:00 +0000373 FuncName = G->getGlobal()->getName();
374
375 DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
376 DAG.getContext()->diagnose(NoCalls);
377 return SDValue();
378}
379
Tom Stellard75aadc22012-12-11 21:25:42 +0000380SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
381 const {
382 switch (Op.getOpcode()) {
383 default:
384 Op.getNode()->dump();
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000385 llvm_unreachable("Custom lowering code for this"
386 "instruction is not implemented yet!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000387 break;
388 // AMDIL DAG lowering
389 case ISD::SDIV: return LowerSDIV(Op, DAG);
390 case ISD::SREM: return LowerSREM(Op, DAG);
391 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
392 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
393 // AMDGPU DAG lowering
Tom Stellardd86003e2013-08-14 23:25:00 +0000394 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
395 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +0000396 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000397 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
398 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
Tom Stellardc947d8c2013-10-30 17:22:05 +0000399 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000400 }
401 return Op;
402}
403
Matt Arsenaultd125d742014-03-27 17:23:24 +0000404void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
405 SmallVectorImpl<SDValue> &Results,
406 SelectionDAG &DAG) const {
407 switch (N->getOpcode()) {
408 case ISD::SIGN_EXTEND_INREG:
409 // Different parts of legalization seem to interpret which type of
410 // sign_extend_inreg is the one to check for custom lowering. The extended
411 // from type is what really matters, but some places check for custom
412 // lowering of the result type. This results in trying to use
413 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
414 // nothing here and let the illegal result integer be handled normally.
415 return;
Tom Stellard5f337882014-04-29 23:12:43 +0000416 case ISD::UDIV: {
417 SDValue Op = SDValue(N, 0);
418 SDLoc DL(Op);
419 EVT VT = Op.getValueType();
420 SDValue UDIVREM = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT),
421 N->getOperand(0), N->getOperand(1));
422 Results.push_back(UDIVREM);
423 break;
424 }
425 case ISD::UREM: {
426 SDValue Op = SDValue(N, 0);
427 SDLoc DL(Op);
428 EVT VT = Op.getValueType();
429 SDValue UDIVREM = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT),
430 N->getOperand(0), N->getOperand(1));
431 Results.push_back(UDIVREM.getValue(1));
432 break;
433 }
Tom Stellardbcd318f2014-04-29 23:12:45 +0000434 case ISD::UDIVREM: {
435 SDValue Op = SDValue(N, 0);
436 SDLoc DL(Op);
437 EVT VT = Op.getValueType();
438 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
439
Tom Stellard676f5712014-04-29 23:12:46 +0000440 SDValue one = DAG.getConstant(1, HalfVT);
441 SDValue zero = DAG.getConstant(0, HalfVT);
442
Tom Stellardbcd318f2014-04-29 23:12:45 +0000443 //HiLo split
Tom Stellard676f5712014-04-29 23:12:46 +0000444 SDValue LHS = N->getOperand(0);
445 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero);
446 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one);
Tom Stellardbcd318f2014-04-29 23:12:45 +0000447
448 SDValue RHS = N->getOperand(1);
Tom Stellard676f5712014-04-29 23:12:46 +0000449 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
450 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
Tom Stellardbcd318f2014-04-29 23:12:45 +0000451
Tom Stellard676f5712014-04-29 23:12:46 +0000452 // Get Speculative values
453 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
454 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
Tom Stellardbcd318f2014-04-29 23:12:45 +0000455
Tom Stellard676f5712014-04-29 23:12:46 +0000456 SDValue REM_Hi = zero;
457 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
458
459 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
460 SDValue DIV_Lo = zero;
461
Tom Stellardbcd318f2014-04-29 23:12:45 +0000462 const unsigned halfBitWidth = HalfVT.getSizeInBits();
463
Tom Stellard676f5712014-04-29 23:12:46 +0000464 for (unsigned i = 0; i < halfBitWidth; ++i) {
465 SDValue POS = DAG.getConstant(halfBitWidth - i - 1, HalfVT);
Tom Stellardbcd318f2014-04-29 23:12:45 +0000466 // Get Value of high bit
Tom Stellard676f5712014-04-29 23:12:46 +0000467 SDValue HBit;
468 if (halfBitWidth == 32 && Subtarget->hasBFE()) {
469 HBit = DAG.getNode(AMDGPUISD::BFE_U32, DL, HalfVT, LHS_Lo, POS, one);
470 } else {
471 HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
472 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
473 }
Tom Stellardbcd318f2014-04-29 23:12:45 +0000474
Tom Stellard676f5712014-04-29 23:12:46 +0000475 SDValue Carry = DAG.getNode(ISD::SRL, DL, HalfVT, REM_Lo,
476 DAG.getConstant(halfBitWidth - 1, HalfVT));
477 REM_Hi = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Hi, one);
478 REM_Hi = DAG.getNode(ISD::OR, DL, HalfVT, REM_Hi, Carry);
Tom Stellardbcd318f2014-04-29 23:12:45 +0000479
Tom Stellard676f5712014-04-29 23:12:46 +0000480 REM_Lo = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Lo, one);
481 REM_Lo = DAG.getNode(ISD::OR, DL, HalfVT, REM_Lo, HBit);
Tom Stellardbcd318f2014-04-29 23:12:45 +0000482
Tom Stellard676f5712014-04-29 23:12:46 +0000483
484 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
485
486 SDValue BIT = DAG.getConstant(1 << (halfBitWidth - i - 1), HalfVT);
487 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETGE);
488
489 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
Tom Stellardbcd318f2014-04-29 23:12:45 +0000490
491 // Update REM
Tom Stellard676f5712014-04-29 23:12:46 +0000492
Tom Stellardbcd318f2014-04-29 23:12:45 +0000493 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
494
495 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETGE);
Tom Stellard676f5712014-04-29 23:12:46 +0000496 REM_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, zero);
497 REM_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, one);
Tom Stellardbcd318f2014-04-29 23:12:45 +0000498 }
499
Tom Stellard676f5712014-04-29 23:12:46 +0000500 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
501 SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, DIV_Lo, DIV_Hi);
Tom Stellardbcd318f2014-04-29 23:12:45 +0000502 Results.push_back(DIV);
503 Results.push_back(REM);
504 break;
505 }
Matt Arsenaultd125d742014-03-27 17:23:24 +0000506 default:
507 return;
508 }
509}
510
Tom Stellard04c0e982014-01-22 19:24:21 +0000511SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
512 const GlobalValue *GV,
513 const SDValue &InitPtr,
514 SDValue Chain,
515 SelectionDAG &DAG) const {
516 const DataLayout *TD = getTargetMachine().getDataLayout();
517 SDLoc DL(InitPtr);
518 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
519 EVT VT = EVT::getEVT(CI->getType());
520 PointerType *PtrTy = PointerType::get(CI->getType(), 0);
521 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
522 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
523 TD->getPrefTypeAlignment(CI->getType()));
524 } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
525 EVT VT = EVT::getEVT(CFP->getType());
526 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
527 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
528 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
529 TD->getPrefTypeAlignment(CFP->getType()));
530 } else if (Init->getType()->isAggregateType()) {
531 EVT PtrVT = InitPtr.getValueType();
532 unsigned NumElements = Init->getType()->getArrayNumElements();
533 SmallVector<SDValue, 8> Chains;
534 for (unsigned i = 0; i < NumElements; ++i) {
535 SDValue Offset = DAG.getConstant(i * TD->getTypeAllocSize(
536 Init->getType()->getArrayElementType()), PtrVT);
537 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
538 Chains.push_back(LowerConstantInitializer(Init->getAggregateElement(i),
539 GV, Ptr, Chain, DAG));
540 }
Craig Topper48d114b2014-04-26 18:35:24 +0000541 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
Tom Stellard04c0e982014-01-22 19:24:21 +0000542 } else {
543 Init->dump();
544 llvm_unreachable("Unhandled constant initializer");
545 }
546}
547
Tom Stellardc026e8b2013-06-28 15:47:08 +0000548SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
549 SDValue Op,
550 SelectionDAG &DAG) const {
551
552 const DataLayout *TD = getTargetMachine().getDataLayout();
553 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
Tom Stellardc026e8b2013-06-28 15:47:08 +0000554 const GlobalValue *GV = G->getGlobal();
Tom Stellardc026e8b2013-06-28 15:47:08 +0000555
Tom Stellard04c0e982014-01-22 19:24:21 +0000556 switch (G->getAddressSpace()) {
557 default: llvm_unreachable("Global Address lowering not implemented for this "
558 "address space");
559 case AMDGPUAS::LOCAL_ADDRESS: {
560 // XXX: What does the value of G->getOffset() mean?
561 assert(G->getOffset() == 0 &&
562 "Do not know what to do with an non-zero offset");
Tom Stellardc026e8b2013-06-28 15:47:08 +0000563
Tom Stellard04c0e982014-01-22 19:24:21 +0000564 unsigned Offset;
565 if (MFI->LocalMemoryObjects.count(GV) == 0) {
566 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
567 Offset = MFI->LDSSize;
568 MFI->LocalMemoryObjects[GV] = Offset;
569 // XXX: Account for alignment?
570 MFI->LDSSize += Size;
571 } else {
572 Offset = MFI->LocalMemoryObjects[GV];
573 }
574
575 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
576 }
577 case AMDGPUAS::CONSTANT_ADDRESS: {
578 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
579 Type *EltType = GV->getType()->getElementType();
580 unsigned Size = TD->getTypeAllocSize(EltType);
581 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
582
583 const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV);
584 const Constant *Init = Var->getInitializer();
585 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
586 SDValue InitPtr = DAG.getFrameIndex(FI,
587 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
588 SmallVector<SDNode*, 8> WorkList;
589
590 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
591 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
592 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
593 continue;
594 WorkList.push_back(*I);
595 }
596 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
597 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
598 E = WorkList.end(); I != E; ++I) {
599 SmallVector<SDValue, 8> Ops;
600 Ops.push_back(Chain);
601 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
602 Ops.push_back((*I)->getOperand(i));
603 }
Craig Topper8c0b4d02014-04-28 05:57:50 +0000604 DAG.UpdateNodeOperands(*I, Ops);
Tom Stellard04c0e982014-01-22 19:24:21 +0000605 }
606 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
607 getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
608 }
609 }
Tom Stellardc026e8b2013-06-28 15:47:08 +0000610}
611
Tom Stellardd86003e2013-08-14 23:25:00 +0000612SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
613 SelectionDAG &DAG) const {
614 SmallVector<SDValue, 8> Args;
615 SDValue A = Op.getOperand(0);
616 SDValue B = Op.getOperand(1);
617
Matt Arsenault9ec3cf22014-04-11 17:47:30 +0000618 DAG.ExtractVectorElements(A, Args);
619 DAG.ExtractVectorElements(B, Args);
Tom Stellardd86003e2013-08-14 23:25:00 +0000620
Craig Topper48d114b2014-04-26 18:35:24 +0000621 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
Tom Stellardd86003e2013-08-14 23:25:00 +0000622}
623
624SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
625 SelectionDAG &DAG) const {
626
627 SmallVector<SDValue, 8> Args;
Tom Stellardd86003e2013-08-14 23:25:00 +0000628 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Matt Arsenault9ec3cf22014-04-11 17:47:30 +0000629 EVT VT = Op.getValueType();
630 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
631 VT.getVectorNumElements());
Tom Stellardd86003e2013-08-14 23:25:00 +0000632
Craig Topper48d114b2014-04-26 18:35:24 +0000633 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
Tom Stellardd86003e2013-08-14 23:25:00 +0000634}
635
Tom Stellard81d871d2013-11-13 23:36:50 +0000636SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
637 SelectionDAG &DAG) const {
638
639 MachineFunction &MF = DAG.getMachineFunction();
640 const AMDGPUFrameLowering *TFL =
641 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
642
643 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
644 assert(FIN);
645
646 unsigned FrameIndex = FIN->getIndex();
647 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
648 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
649 Op.getValueType());
650}
Tom Stellardd86003e2013-08-14 23:25:00 +0000651
Tom Stellard75aadc22012-12-11 21:25:42 +0000652SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
653 SelectionDAG &DAG) const {
654 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
Andrew Trickef9de2a2013-05-25 02:42:55 +0000655 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000656 EVT VT = Op.getValueType();
657
658 switch (IntrinsicID) {
659 default: return Op;
660 case AMDGPUIntrinsic::AMDIL_abs:
661 return LowerIntrinsicIABS(Op, DAG);
662 case AMDGPUIntrinsic::AMDIL_exp:
663 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
664 case AMDGPUIntrinsic::AMDGPU_lrp:
665 return LowerIntrinsicLRP(Op, DAG);
666 case AMDGPUIntrinsic::AMDIL_fraction:
667 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
Tom Stellard75aadc22012-12-11 21:25:42 +0000668 case AMDGPUIntrinsic::AMDIL_max:
669 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
670 Op.getOperand(2));
671 case AMDGPUIntrinsic::AMDGPU_imax:
672 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
673 Op.getOperand(2));
674 case AMDGPUIntrinsic::AMDGPU_umax:
675 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
676 Op.getOperand(2));
677 case AMDGPUIntrinsic::AMDIL_min:
678 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
679 Op.getOperand(2));
680 case AMDGPUIntrinsic::AMDGPU_imin:
681 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
682 Op.getOperand(2));
683 case AMDGPUIntrinsic::AMDGPU_umin:
684 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
685 Op.getOperand(2));
Matt Arsenault4c537172014-03-31 18:21:18 +0000686
687 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
688 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
689 Op.getOperand(1),
690 Op.getOperand(2),
691 Op.getOperand(3));
692
693 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
694 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
695 Op.getOperand(1),
696 Op.getOperand(2),
697 Op.getOperand(3));
698
699 case AMDGPUIntrinsic::AMDGPU_bfi:
700 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
701 Op.getOperand(1),
702 Op.getOperand(2),
703 Op.getOperand(3));
704
705 case AMDGPUIntrinsic::AMDGPU_bfm:
706 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
707 Op.getOperand(1),
708 Op.getOperand(2));
709
Tom Stellard75aadc22012-12-11 21:25:42 +0000710 case AMDGPUIntrinsic::AMDIL_round_nearest:
711 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
712 }
713}
714
715///IABS(a) = SMAX(sub(0, a), a)
716SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
717 SelectionDAG &DAG) const {
718
Andrew Trickef9de2a2013-05-25 02:42:55 +0000719 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000720 EVT VT = Op.getValueType();
721 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
722 Op.getOperand(1));
723
724 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
725}
726
727/// Linear Interpolation
728/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
729SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
730 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000731 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000732 EVT VT = Op.getValueType();
733 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
734 DAG.getConstantFP(1.0f, MVT::f32),
735 Op.getOperand(1));
736 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
737 Op.getOperand(3));
Vincent Lejeune1ce13f52013-02-18 14:11:28 +0000738 return DAG.getNode(ISD::FADD, DL, VT,
739 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
740 OneSubAC);
Tom Stellard75aadc22012-12-11 21:25:42 +0000741}
742
743/// \brief Generate Min/Max node
744SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
745 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000746 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000747 EVT VT = Op.getValueType();
748
749 SDValue LHS = Op.getOperand(0);
750 SDValue RHS = Op.getOperand(1);
751 SDValue True = Op.getOperand(2);
752 SDValue False = Op.getOperand(3);
753 SDValue CC = Op.getOperand(4);
754
755 if (VT != MVT::f32 ||
756 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
757 return SDValue();
758 }
759
760 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
761 switch (CCOpcode) {
762 case ISD::SETOEQ:
763 case ISD::SETONE:
764 case ISD::SETUNE:
765 case ISD::SETNE:
766 case ISD::SETUEQ:
767 case ISD::SETEQ:
768 case ISD::SETFALSE:
769 case ISD::SETFALSE2:
770 case ISD::SETTRUE:
771 case ISD::SETTRUE2:
772 case ISD::SETUO:
773 case ISD::SETO:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000774 llvm_unreachable("Operation should already be optimised!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000775 case ISD::SETULE:
776 case ISD::SETULT:
777 case ISD::SETOLE:
778 case ISD::SETOLT:
779 case ISD::SETLE:
780 case ISD::SETLT: {
781 if (LHS == True)
782 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
783 else
784 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
785 }
786 case ISD::SETGT:
787 case ISD::SETGE:
788 case ISD::SETUGE:
789 case ISD::SETOGE:
790 case ISD::SETUGT:
791 case ISD::SETOGT: {
792 if (LHS == True)
793 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
794 else
795 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
796 }
797 case ISD::SETCC_INVALID:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000798 llvm_unreachable("Invalid setcc condcode!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000799 }
800 return Op;
801}
802
Tom Stellard35bb18c2013-08-26 15:06:04 +0000803SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
804 SelectionDAG &DAG) const {
805 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
806 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
807 EVT EltVT = Op.getValueType().getVectorElementType();
808 EVT PtrVT = Load->getBasePtr().getValueType();
809 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
810 SmallVector<SDValue, 8> Loads;
811 SDLoc SL(Op);
812
813 for (unsigned i = 0, e = NumElts; i != e; ++i) {
814 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
815 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
816 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
817 Load->getChain(), Ptr,
818 MachinePointerInfo(Load->getMemOperand()->getValue()),
819 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
820 Load->getAlignment()));
821 }
Craig Topper48d114b2014-04-26 18:35:24 +0000822 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(), Loads);
Tom Stellard35bb18c2013-08-26 15:06:04 +0000823}
824
Tom Stellard2ffc3302013-08-26 15:05:44 +0000825SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
826 SelectionDAG &DAG) const {
827 StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
828 EVT MemVT = Store->getMemoryVT();
829 unsigned MemBits = MemVT.getSizeInBits();
Tom Stellard75aadc22012-12-11 21:25:42 +0000830
Matt Arsenaultca6dcfc2014-03-05 21:47:22 +0000831 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
832 // truncating store into an i32 store.
833 // XXX: We could also handle optimize other vector bitwidths.
Tom Stellard2ffc3302013-08-26 15:05:44 +0000834 if (!MemVT.isVector() || MemBits > 32) {
835 return SDValue();
836 }
837
838 SDLoc DL(Op);
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000839 SDValue Value = Store->getValue();
Tom Stellard2ffc3302013-08-26 15:05:44 +0000840 EVT VT = Value.getValueType();
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000841 EVT ElemVT = VT.getVectorElementType();
842 SDValue Ptr = Store->getBasePtr();
Tom Stellard2ffc3302013-08-26 15:05:44 +0000843 EVT MemEltVT = MemVT.getVectorElementType();
844 unsigned MemEltBits = MemEltVT.getSizeInBits();
845 unsigned MemNumElements = MemVT.getVectorNumElements();
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000846 unsigned PackedSize = MemVT.getStoreSizeInBits();
847 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, MVT::i32);
848
849 assert(Value.getValueType().getScalarSizeInBits() >= 32);
Matt Arsenault02117142014-03-11 01:38:53 +0000850
Tom Stellard2ffc3302013-08-26 15:05:44 +0000851 SDValue PackedValue;
852 for (unsigned i = 0; i < MemNumElements; ++i) {
Tom Stellard2ffc3302013-08-26 15:05:44 +0000853 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
854 DAG.getConstant(i, MVT::i32));
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000855 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
856 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
857
858 SDValue Shift = DAG.getConstant(MemEltBits * i, MVT::i32);
859 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
860
Tom Stellard2ffc3302013-08-26 15:05:44 +0000861 if (i == 0) {
862 PackedValue = Elt;
863 } else {
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000864 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000865 }
866 }
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000867
868 if (PackedSize < 32) {
869 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
870 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
871 Store->getMemOperand()->getPointerInfo(),
872 PackedVT,
873 Store->isNonTemporal(), Store->isVolatile(),
874 Store->getAlignment());
875 }
876
Tom Stellard2ffc3302013-08-26 15:05:44 +0000877 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000878 Store->getMemOperand()->getPointerInfo(),
Tom Stellard2ffc3302013-08-26 15:05:44 +0000879 Store->isVolatile(), Store->isNonTemporal(),
880 Store->getAlignment());
881}
882
883SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
884 SelectionDAG &DAG) const {
885 StoreSDNode *Store = cast<StoreSDNode>(Op);
886 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
887 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
888 EVT PtrVT = Store->getBasePtr().getValueType();
889 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
890 SDLoc SL(Op);
891
892 SmallVector<SDValue, 8> Chains;
893
894 for (unsigned i = 0, e = NumElts; i != e; ++i) {
895 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
896 Store->getValue(), DAG.getConstant(i, MVT::i32));
897 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
898 Store->getBasePtr(),
899 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
900 PtrVT));
Tom Stellardf3d166a2013-08-26 15:05:49 +0000901 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
Tom Stellard2ffc3302013-08-26 15:05:44 +0000902 MachinePointerInfo(Store->getMemOperand()->getValue()),
Tom Stellardf3d166a2013-08-26 15:05:49 +0000903 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
Tom Stellard2ffc3302013-08-26 15:05:44 +0000904 Store->getAlignment()));
905 }
Craig Topper48d114b2014-04-26 18:35:24 +0000906 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000907}
908
Tom Stellarde9373602014-01-22 19:24:14 +0000909SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
910 SDLoc DL(Op);
911 LoadSDNode *Load = cast<LoadSDNode>(Op);
912 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaultf9a995d2014-03-06 17:34:12 +0000913 EVT VT = Op.getValueType();
914 EVT MemVT = Load->getMemoryVT();
915
916 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
917 // We can do the extload to 32-bits, and then need to separately extend to
918 // 64-bits.
919
920 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
921 Load->getChain(),
922 Load->getBasePtr(),
923 MemVT,
924 Load->getMemOperand());
925 return DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32);
926 }
Tom Stellarde9373602014-01-22 19:24:14 +0000927
Matt Arsenault470acd82014-04-15 22:28:39 +0000928 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
929 assert(VT == MVT::i1 && "Only i1 non-extloads expected");
930 // FIXME: Copied from PPC
931 // First, load into 32 bits, then truncate to 1 bit.
932
933 SDValue Chain = Load->getChain();
934 SDValue BasePtr = Load->getBasePtr();
935 MachineMemOperand *MMO = Load->getMemOperand();
936
937 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
938 BasePtr, MVT::i8, MMO);
939 return DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD);
940 }
941
Tom Stellard04c0e982014-01-22 19:24:21 +0000942 // Lower loads constant address space global variable loads
943 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000944 isa<GlobalVariable>(
945 GetUnderlyingObject(Load->getMemOperand()->getValue()))) {
Tom Stellard04c0e982014-01-22 19:24:21 +0000946
947 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
948 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
949 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
950 DAG.getConstant(2, MVT::i32));
951 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
952 Load->getChain(), Ptr,
953 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
954 }
955
Tom Stellarde9373602014-01-22 19:24:14 +0000956 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
957 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
958 return SDValue();
959
960
Tom Stellarde9373602014-01-22 19:24:14 +0000961 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
962 DAG.getConstant(2, MVT::i32));
963 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
964 Load->getChain(), Ptr,
965 DAG.getTargetConstant(0, MVT::i32),
966 Op.getOperand(2));
967 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
968 Load->getBasePtr(),
969 DAG.getConstant(0x3, MVT::i32));
970 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
971 DAG.getConstant(3, MVT::i32));
Matt Arsenault74891cd2014-03-15 00:08:22 +0000972
Tom Stellarde9373602014-01-22 19:24:14 +0000973 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000974
975 EVT MemEltVT = MemVT.getScalarType();
Tom Stellarde9373602014-01-22 19:24:14 +0000976 if (ExtType == ISD::SEXTLOAD) {
Matt Arsenault74891cd2014-03-15 00:08:22 +0000977 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
978 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode);
Tom Stellarde9373602014-01-22 19:24:14 +0000979 }
980
Matt Arsenault74891cd2014-03-15 00:08:22 +0000981 return DAG.getZeroExtendInReg(Ret, DL, MemEltVT);
Tom Stellarde9373602014-01-22 19:24:14 +0000982}
983
Tom Stellard2ffc3302013-08-26 15:05:44 +0000984SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Tom Stellarde9373602014-01-22 19:24:14 +0000985 SDLoc DL(Op);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000986 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
987 if (Result.getNode()) {
988 return Result;
989 }
990
991 StoreSDNode *Store = cast<StoreSDNode>(Op);
Tom Stellarde9373602014-01-22 19:24:14 +0000992 SDValue Chain = Store->getChain();
Tom Stellard81d871d2013-11-13 23:36:50 +0000993 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
994 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
Tom Stellard2ffc3302013-08-26 15:05:44 +0000995 Store->getValue().getValueType().isVector()) {
996 return SplitVectorStore(Op, DAG);
997 }
Tom Stellarde9373602014-01-22 19:24:14 +0000998
Matt Arsenault74891cd2014-03-15 00:08:22 +0000999 EVT MemVT = Store->getMemoryVT();
Tom Stellarde9373602014-01-22 19:24:14 +00001000 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
Matt Arsenault74891cd2014-03-15 00:08:22 +00001001 MemVT.bitsLT(MVT::i32)) {
Tom Stellarde9373602014-01-22 19:24:14 +00001002 unsigned Mask = 0;
1003 if (Store->getMemoryVT() == MVT::i8) {
1004 Mask = 0xff;
1005 } else if (Store->getMemoryVT() == MVT::i16) {
1006 Mask = 0xffff;
1007 }
Matt Arsenaultea330fb2014-03-15 00:08:26 +00001008 SDValue BasePtr = Store->getBasePtr();
1009 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +00001010 DAG.getConstant(2, MVT::i32));
1011 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
1012 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +00001013
1014 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +00001015 DAG.getConstant(0x3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +00001016
Tom Stellarde9373602014-01-22 19:24:14 +00001017 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1018 DAG.getConstant(3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +00001019
Tom Stellarde9373602014-01-22 19:24:14 +00001020 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
1021 Store->getValue());
Matt Arsenault74891cd2014-03-15 00:08:22 +00001022
1023 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
1024
Tom Stellarde9373602014-01-22 19:24:14 +00001025 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
1026 MaskedValue, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +00001027
Tom Stellarde9373602014-01-22 19:24:14 +00001028 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
1029 ShiftAmt);
1030 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
1031 DAG.getConstant(0xffffffff, MVT::i32));
1032 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
1033
1034 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
1035 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
1036 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
1037 }
Tom Stellard2ffc3302013-08-26 15:05:44 +00001038 return SDValue();
1039}
Tom Stellard75aadc22012-12-11 21:25:42 +00001040
1041SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1042 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00001043 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +00001044 EVT VT = Op.getValueType();
1045
1046 SDValue Num = Op.getOperand(0);
1047 SDValue Den = Op.getOperand(1);
1048
Tom Stellard75aadc22012-12-11 21:25:42 +00001049 // RCP = URECIP(Den) = 2^32 / Den + e
1050 // e is rounding error.
1051 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1052
1053 // RCP_LO = umulo(RCP, Den) */
1054 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
1055
1056 // RCP_HI = mulhu (RCP, Den) */
1057 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1058
1059 // NEG_RCP_LO = -RCP_LO
1060 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
1061 RCP_LO);
1062
1063 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1064 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1065 NEG_RCP_LO, RCP_LO,
1066 ISD::SETEQ);
1067 // Calculate the rounding error from the URECIP instruction
1068 // E = mulhu(ABS_RCP_LO, RCP)
1069 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1070
1071 // RCP_A_E = RCP + E
1072 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1073
1074 // RCP_S_E = RCP - E
1075 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1076
1077 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1078 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1079 RCP_A_E, RCP_S_E,
1080 ISD::SETEQ);
1081 // Quotient = mulhu(Tmp0, Num)
1082 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1083
1084 // Num_S_Remainder = Quotient * Den
1085 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
1086
1087 // Remainder = Num - Num_S_Remainder
1088 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1089
1090 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1091 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1092 DAG.getConstant(-1, VT),
1093 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +00001094 ISD::SETUGE);
1095 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1096 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1097 Num_S_Remainder,
Tom Stellard75aadc22012-12-11 21:25:42 +00001098 DAG.getConstant(-1, VT),
1099 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +00001100 ISD::SETUGE);
Tom Stellard75aadc22012-12-11 21:25:42 +00001101 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1102 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1103 Remainder_GE_Zero);
1104
1105 // Calculate Division result:
1106
1107 // Quotient_A_One = Quotient + 1
1108 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1109 DAG.getConstant(1, VT));
1110
1111 // Quotient_S_One = Quotient - 1
1112 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1113 DAG.getConstant(1, VT));
1114
1115 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1116 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1117 Quotient, Quotient_A_One, ISD::SETEQ);
1118
1119 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1120 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1121 Quotient_S_One, Div, ISD::SETEQ);
1122
1123 // Calculate Rem result:
1124
1125 // Remainder_S_Den = Remainder - Den
1126 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1127
1128 // Remainder_A_Den = Remainder + Den
1129 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1130
1131 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1132 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1133 Remainder, Remainder_S_Den, ISD::SETEQ);
1134
1135 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1136 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1137 Remainder_A_Den, Rem, ISD::SETEQ);
Matt Arsenault7939acd2014-04-07 16:44:24 +00001138 SDValue Ops[2] = {
1139 Div,
1140 Rem
1141 };
Craig Topper64941d92014-04-27 19:20:57 +00001142 return DAG.getMergeValues(Ops, DL);
Tom Stellard75aadc22012-12-11 21:25:42 +00001143}
1144
Tom Stellardc947d8c2013-10-30 17:22:05 +00001145SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
1146 SelectionDAG &DAG) const {
1147 SDValue S0 = Op.getOperand(0);
1148 SDLoc DL(Op);
1149 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
1150 return SDValue();
1151
1152 // f32 uint_to_fp i64
1153 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1154 DAG.getConstant(0, MVT::i32));
1155 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
1156 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1157 DAG.getConstant(1, MVT::i32));
1158 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
1159 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
1160 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
1161 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
1162
1163}
Tom Stellardfbab8272013-08-16 01:12:11 +00001164
Matt Arsenaultfae02982014-03-17 18:58:11 +00001165SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
1166 unsigned BitsDiff,
1167 SelectionDAG &DAG) const {
1168 MVT VT = Op.getSimpleValueType();
1169 SDLoc DL(Op);
1170 SDValue Shift = DAG.getConstant(BitsDiff, VT);
1171 // Shift left by 'Shift' bits.
1172 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
1173 // Signed shift Right by 'Shift' bits.
1174 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
1175}
1176
1177SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1178 SelectionDAG &DAG) const {
1179 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1180 MVT VT = Op.getSimpleValueType();
1181 MVT ScalarVT = VT.getScalarType();
1182
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001183 if (!VT.isVector())
1184 return SDValue();
Matt Arsenaultfae02982014-03-17 18:58:11 +00001185
1186 SDValue Src = Op.getOperand(0);
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001187 SDLoc DL(Op);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001188
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001189 // TODO: Don't scalarize on Evergreen?
1190 unsigned NElts = VT.getVectorNumElements();
1191 SmallVector<SDValue, 8> Args;
1192 DAG.ExtractVectorElements(Src, Args, 0, NElts);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001193
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001194 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1195 for (unsigned I = 0; I < NElts; ++I)
1196 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001197
Craig Topper48d114b2014-04-26 18:35:24 +00001198 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001199}
1200
Tom Stellard75aadc22012-12-11 21:25:42 +00001201//===----------------------------------------------------------------------===//
Tom Stellard50122a52014-04-07 19:45:41 +00001202// Custom DAG optimizations
1203//===----------------------------------------------------------------------===//
1204
1205static bool isU24(SDValue Op, SelectionDAG &DAG) {
1206 APInt KnownZero, KnownOne;
1207 EVT VT = Op.getValueType();
1208 DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
1209
1210 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
1211}
1212
1213static bool isI24(SDValue Op, SelectionDAG &DAG) {
1214 EVT VT = Op.getValueType();
1215
1216 // In order for this to be a signed 24-bit value, bit 23, must
1217 // be a sign bit.
1218 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
1219 // as unsigned 24-bit values.
1220 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
1221}
1222
1223static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
1224
1225 SelectionDAG &DAG = DCI.DAG;
1226 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1227 EVT VT = Op.getValueType();
1228
1229 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
1230 APInt KnownZero, KnownOne;
1231 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
1232 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
1233 DCI.CommitTargetLoweringOpt(TLO);
1234}
1235
1236SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
1237 DAGCombinerInfo &DCI) const {
1238 SelectionDAG &DAG = DCI.DAG;
1239 SDLoc DL(N);
1240
1241 switch(N->getOpcode()) {
1242 default: break;
1243 case ISD::MUL: {
1244 EVT VT = N->getValueType(0);
1245 SDValue N0 = N->getOperand(0);
1246 SDValue N1 = N->getOperand(1);
1247 SDValue Mul;
1248
1249 // FIXME: Add support for 24-bit multiply with 64-bit output on SI.
1250 if (VT.isVector() || VT.getSizeInBits() > 32)
1251 break;
1252
1253 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
1254 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
1255 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
1256 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
1257 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
1258 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
1259 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
1260 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
1261 } else {
1262 break;
1263 }
1264
Tom Stellardaeeea8a2014-04-17 21:00:13 +00001265 // We need to use sext even for MUL_U24, because MUL_U24 is used
1266 // for signed multiply of 8 and 16-bit types.
Tom Stellard50122a52014-04-07 19:45:41 +00001267 SDValue Reg = DAG.getSExtOrTrunc(Mul, DL, VT);
1268
1269 return Reg;
1270 }
1271 case AMDGPUISD::MUL_I24:
1272 case AMDGPUISD::MUL_U24: {
1273 SDValue N0 = N->getOperand(0);
1274 SDValue N1 = N->getOperand(1);
1275 simplifyI24(N0, DCI);
1276 simplifyI24(N1, DCI);
1277 return SDValue();
1278 }
1279 }
1280 return SDValue();
1281}
1282
1283//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +00001284// Helper functions
1285//===----------------------------------------------------------------------===//
1286
Tom Stellardaf775432013-10-23 00:44:32 +00001287void AMDGPUTargetLowering::getOriginalFunctionArgs(
1288 SelectionDAG &DAG,
1289 const Function *F,
1290 const SmallVectorImpl<ISD::InputArg> &Ins,
1291 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
1292
1293 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
1294 if (Ins[i].ArgVT == Ins[i].VT) {
1295 OrigIns.push_back(Ins[i]);
1296 continue;
1297 }
1298
1299 EVT VT;
1300 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
1301 // Vector has been split into scalars.
1302 VT = Ins[i].ArgVT.getVectorElementType();
1303 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
1304 Ins[i].ArgVT.getVectorElementType() !=
1305 Ins[i].VT.getVectorElementType()) {
1306 // Vector elements have been promoted
1307 VT = Ins[i].ArgVT;
1308 } else {
1309 // Vector has been spilt into smaller vectors.
1310 VT = Ins[i].VT;
1311 }
1312
1313 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
1314 Ins[i].OrigArgIndex, Ins[i].PartOffset);
1315 OrigIns.push_back(Arg);
1316 }
1317}
1318
Tom Stellard75aadc22012-12-11 21:25:42 +00001319bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
1320 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1321 return CFP->isExactlyValue(1.0);
1322 }
1323 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1324 return C->isAllOnesValue();
1325 }
1326 return false;
1327}
1328
1329bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
1330 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1331 return CFP->getValueAPF().isZero();
1332 }
1333 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1334 return C->isNullValue();
1335 }
1336 return false;
1337}
1338
1339SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
1340 const TargetRegisterClass *RC,
1341 unsigned Reg, EVT VT) const {
1342 MachineFunction &MF = DAG.getMachineFunction();
1343 MachineRegisterInfo &MRI = MF.getRegInfo();
1344 unsigned VirtualRegister;
1345 if (!MRI.isLiveIn(Reg)) {
1346 VirtualRegister = MRI.createVirtualRegister(RC);
1347 MRI.addLiveIn(Reg, VirtualRegister);
1348 } else {
1349 VirtualRegister = MRI.getLiveInVirtReg(Reg);
1350 }
1351 return DAG.getRegister(VirtualRegister, VT);
1352}
1353
1354#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
1355
1356const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
1357 switch (Opcode) {
Craig Topper062a2ba2014-04-25 05:30:21 +00001358 default: return nullptr;
Tom Stellard75aadc22012-12-11 21:25:42 +00001359 // AMDIL DAG nodes
Tom Stellard75aadc22012-12-11 21:25:42 +00001360 NODE_NAME_CASE(CALL);
1361 NODE_NAME_CASE(UMUL);
1362 NODE_NAME_CASE(DIV_INF);
1363 NODE_NAME_CASE(RET_FLAG);
1364 NODE_NAME_CASE(BRANCH_COND);
1365
1366 // AMDGPU DAG nodes
1367 NODE_NAME_CASE(DWORDADDR)
1368 NODE_NAME_CASE(FRACT)
1369 NODE_NAME_CASE(FMAX)
1370 NODE_NAME_CASE(SMAX)
1371 NODE_NAME_CASE(UMAX)
1372 NODE_NAME_CASE(FMIN)
1373 NODE_NAME_CASE(SMIN)
1374 NODE_NAME_CASE(UMIN)
Matt Arsenaultfae02982014-03-17 18:58:11 +00001375 NODE_NAME_CASE(BFE_U32)
1376 NODE_NAME_CASE(BFE_I32)
Matt Arsenaultb3458362014-03-31 18:21:13 +00001377 NODE_NAME_CASE(BFI)
1378 NODE_NAME_CASE(BFM)
Tom Stellard50122a52014-04-07 19:45:41 +00001379 NODE_NAME_CASE(MUL_U24)
1380 NODE_NAME_CASE(MUL_I24)
Tom Stellard75aadc22012-12-11 21:25:42 +00001381 NODE_NAME_CASE(URECIP)
Matt Arsenault21a3faa2014-02-24 21:01:21 +00001382 NODE_NAME_CASE(DOT4)
Tom Stellard75aadc22012-12-11 21:25:42 +00001383 NODE_NAME_CASE(EXPORT)
Tom Stellardff62c352013-01-23 02:09:03 +00001384 NODE_NAME_CASE(CONST_ADDRESS)
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001385 NODE_NAME_CASE(REGISTER_LOAD)
1386 NODE_NAME_CASE(REGISTER_STORE)
Tom Stellard9fa17912013-08-14 23:24:45 +00001387 NODE_NAME_CASE(LOAD_CONSTANT)
1388 NODE_NAME_CASE(LOAD_INPUT)
1389 NODE_NAME_CASE(SAMPLE)
1390 NODE_NAME_CASE(SAMPLEB)
1391 NODE_NAME_CASE(SAMPLED)
1392 NODE_NAME_CASE(SAMPLEL)
Tom Stellardd3ee8c12013-08-16 01:12:06 +00001393 NODE_NAME_CASE(STORE_MSKOR)
Tom Stellardafcf12f2013-09-12 02:55:14 +00001394 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
Tom Stellard75aadc22012-12-11 21:25:42 +00001395 }
1396}
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001397
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001398static void computeMaskedBitsForMinMax(const SDValue Op0,
1399 const SDValue Op1,
1400 APInt &KnownZero,
1401 APInt &KnownOne,
1402 const SelectionDAG &DAG,
1403 unsigned Depth) {
1404 APInt Op0Zero, Op0One;
1405 APInt Op1Zero, Op1One;
1406 DAG.ComputeMaskedBits(Op0, Op0Zero, Op0One, Depth);
1407 DAG.ComputeMaskedBits(Op1, Op1Zero, Op1One, Depth);
1408
1409 KnownZero = Op0Zero & Op1Zero;
1410 KnownOne = Op0One & Op1One;
1411}
1412
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001413void AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
1414 const SDValue Op,
1415 APInt &KnownZero,
1416 APInt &KnownOne,
1417 const SelectionDAG &DAG,
1418 unsigned Depth) const {
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001419
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001420 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
Matt Arsenault378bf9c2014-03-31 19:35:33 +00001421 unsigned Opc = Op.getOpcode();
1422 switch (Opc) {
1423 case ISD::INTRINSIC_WO_CHAIN: {
1424 // FIXME: The intrinsic should just use the node.
1425 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
1426 case AMDGPUIntrinsic::AMDGPU_imax:
1427 case AMDGPUIntrinsic::AMDGPU_umax:
1428 case AMDGPUIntrinsic::AMDGPU_imin:
1429 case AMDGPUIntrinsic::AMDGPU_umin:
1430 computeMaskedBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
1431 KnownZero, KnownOne, DAG, Depth);
1432 break;
1433 default:
1434 break;
1435 }
1436
1437 break;
1438 }
1439 case AMDGPUISD::SMAX:
1440 case AMDGPUISD::UMAX:
1441 case AMDGPUISD::SMIN:
1442 case AMDGPUISD::UMIN:
1443 computeMaskedBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
1444 KnownZero, KnownOne, DAG, Depth);
1445 break;
1446 default:
1447 break;
1448 }
Matt Arsenault0c274fe2014-03-25 18:18:27 +00001449}