blob: f97a53f0eb16e5910616c0b2f624b5bea7b0588d [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This is the parent TargetLowering class for hardware code gen
12/// targets.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUISelLowering.h"
Tom Stellarded882c22013-06-03 17:40:11 +000017#include "AMDGPU.h"
Tom Stellard81d871d2013-11-13 23:36:50 +000018#include "AMDGPUFrameLowering.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000019#include "AMDGPURegisterInfo.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000020#include "AMDGPUSubtarget.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000021#include "AMDILIntrinsicInfo.h"
Tom Stellardacfeebf2013-07-23 01:48:05 +000022#include "R600MachineFunctionInfo.h"
Tom Stellarded882c22013-06-03 17:40:11 +000023#include "SIMachineFunctionInfo.h"
Tom Stellard04c0e982014-01-22 19:24:21 +000024#include "llvm/Analysis/ValueTracking.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000025#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/SelectionDAG.h"
29#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
Tom Stellardc026e8b2013-06-28 15:47:08 +000030#include "llvm/IR/DataLayout.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000031
32using namespace llvm;
Tom Stellardaf775432013-10-23 00:44:32 +000033static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
34 CCValAssign::LocInfo LocInfo,
35 ISD::ArgFlagsTy ArgFlags, CCState &State) {
Matt Arsenault52226f92013-12-14 18:21:59 +000036 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
37 ArgFlags.getOrigAlign());
38 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
Tom Stellardaf775432013-10-23 00:44:32 +000039
40 return true;
41}
Tom Stellard75aadc22012-12-11 21:25:42 +000042
Christian Konig2c8f6d52013-03-07 09:03:52 +000043#include "AMDGPUGenCallingConv.inc"
44
Tom Stellard75aadc22012-12-11 21:25:42 +000045AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
46 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
47
48 // Initialize target lowering borrowed from AMDIL
49 InitAMDILLowering();
50
51 // We need to custom lower some of the intrinsics
52 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
53
54 // Library functions. These default to Expand, but we have instructions
55 // for them.
56 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
57 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
58 setOperationAction(ISD::FPOW, MVT::f32, Legal);
59 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
60 setOperationAction(ISD::FABS, MVT::f32, Legal);
61 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
62 setOperationAction(ISD::FRINT, MVT::f32, Legal);
Tom Stellard4d566b22013-11-27 21:23:20 +000063 setOperationAction(ISD::FROUND, MVT::f32, Legal);
Tom Stellardeddfa692013-12-20 05:11:55 +000064 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
Tom Stellard75aadc22012-12-11 21:25:42 +000065
Tom Stellard5643c4a2013-05-20 15:02:19 +000066 // The hardware supports ROTR, but not ROTL
67 setOperationAction(ISD::ROTL, MVT::i32, Expand);
68
Tom Stellard75aadc22012-12-11 21:25:42 +000069 // Lower floating point store/load to integer store/load to reduce the number
70 // of patterns in tablegen.
71 setOperationAction(ISD::STORE, MVT::f32, Promote);
72 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
73
Tom Stellarded2f6142013-07-18 21:43:42 +000074 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
75 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
76
Tom Stellard75aadc22012-12-11 21:25:42 +000077 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
78 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
79
Tom Stellardaf775432013-10-23 00:44:32 +000080 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
81 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
82
83 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
84 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
85
Tom Stellard7512c082013-07-12 18:14:56 +000086 setOperationAction(ISD::STORE, MVT::f64, Promote);
87 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
88
Tom Stellard2ffc3302013-08-26 15:05:44 +000089 // Custom lowering of vector stores is required for local address space
90 // stores.
91 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
92 // XXX: Native v2i32 local address space stores are possible, but not
93 // currently implemented.
94 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
95
Tom Stellardfbab8272013-08-16 01:12:11 +000096 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
97 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
98 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
99 // XXX: This can be change to Custom, once ExpandVectorStores can
100 // handle 64-bit stores.
101 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
102
Tom Stellard75aadc22012-12-11 21:25:42 +0000103 setOperationAction(ISD::LOAD, MVT::f32, Promote);
104 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
105
Tom Stellardadf732c2013-07-18 21:43:48 +0000106 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
107 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
108
Tom Stellard75aadc22012-12-11 21:25:42 +0000109 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
110 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
111
Tom Stellardaf775432013-10-23 00:44:32 +0000112 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
113 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
114
115 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
116 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
117
Tom Stellard7512c082013-07-12 18:14:56 +0000118 setOperationAction(ISD::LOAD, MVT::f64, Promote);
119 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
120
Tom Stellardd86003e2013-08-14 23:25:00 +0000121 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
122 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
123 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
124 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000125
Tom Stellardb03edec2013-08-16 01:12:16 +0000126 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
127 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
129 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
130 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
131 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
132 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
133 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
134 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
135 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
136 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
137 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
138
Tom Stellardbeed74a2013-07-23 01:47:46 +0000139 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
140 setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
141
Tom Stellardc947d8c2013-10-30 17:22:05 +0000142 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
143
Christian Konig70a50322013-03-27 09:12:51 +0000144 setOperationAction(ISD::MUL, MVT::i64, Expand);
145
Tom Stellard75aadc22012-12-11 21:25:42 +0000146 setOperationAction(ISD::UDIV, MVT::i32, Expand);
147 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
148 setOperationAction(ISD::UREM, MVT::i32, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000149 setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
150 setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000151
Tom Stellardf6d80232013-08-21 22:14:17 +0000152 static const MVT::SimpleValueType IntTypes[] = {
153 MVT::v2i32, MVT::v4i32
Aaron Watry0a794a462013-06-25 13:55:57 +0000154 };
Tom Stellarda92ff872013-08-16 23:51:24 +0000155 const size_t NumIntTypes = array_lengthof(IntTypes);
Aaron Watry0a794a462013-06-25 13:55:57 +0000156
Tom Stellarda92ff872013-08-16 23:51:24 +0000157 for (unsigned int x = 0; x < NumIntTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000158 MVT::SimpleValueType VT = IntTypes[x];
Aaron Watry0a794a462013-06-25 13:55:57 +0000159 //Expand the following operations for the current type by default
160 setOperationAction(ISD::ADD, VT, Expand);
161 setOperationAction(ISD::AND, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000162 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
163 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000164 setOperationAction(ISD::MUL, VT, Expand);
165 setOperationAction(ISD::OR, VT, Expand);
166 setOperationAction(ISD::SHL, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000167 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000168 setOperationAction(ISD::SRL, VT, Expand);
169 setOperationAction(ISD::SRA, VT, Expand);
170 setOperationAction(ISD::SUB, VT, Expand);
171 setOperationAction(ISD::UDIV, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000172 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000173 setOperationAction(ISD::UREM, VT, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000174 setOperationAction(ISD::VSELECT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000175 setOperationAction(ISD::XOR, VT, Expand);
176 }
Tom Stellarda92ff872013-08-16 23:51:24 +0000177
Tom Stellardf6d80232013-08-21 22:14:17 +0000178 static const MVT::SimpleValueType FloatTypes[] = {
179 MVT::v2f32, MVT::v4f32
Tom Stellarda92ff872013-08-16 23:51:24 +0000180 };
181 const size_t NumFloatTypes = array_lengthof(FloatTypes);
182
183 for (unsigned int x = 0; x < NumFloatTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000184 MVT::SimpleValueType VT = FloatTypes[x];
Tom Stellard175e7a82013-11-27 21:23:39 +0000185 setOperationAction(ISD::FABS, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000186 setOperationAction(ISD::FADD, VT, Expand);
187 setOperationAction(ISD::FDIV, VT, Expand);
Tom Stellardbfebd1f2014-02-04 17:18:37 +0000188 setOperationAction(ISD::FPOW, VT, Expand);
Tom Stellardad3aff22013-08-16 23:51:29 +0000189 setOperationAction(ISD::FFLOOR, VT, Expand);
Tom Stellardeddfa692013-12-20 05:11:55 +0000190 setOperationAction(ISD::FTRUNC, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000191 setOperationAction(ISD::FMUL, VT, Expand);
Tom Stellardb249b752013-08-16 23:51:33 +0000192 setOperationAction(ISD::FRINT, VT, Expand);
Tom Stellarde118b8b2013-10-29 16:37:20 +0000193 setOperationAction(ISD::FSQRT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000194 setOperationAction(ISD::FSUB, VT, Expand);
195 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000196}
197
Tom Stellard28d06de2013-08-05 22:22:07 +0000198//===----------------------------------------------------------------------===//
199// Target Information
200//===----------------------------------------------------------------------===//
201
202MVT AMDGPUTargetLowering::getVectorIdxTy() const {
203 return MVT::i32;
204}
205
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000206bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
207 EVT CastTy) const {
208 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
209 return true;
210
211 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
212 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
213
214 return ((LScalarSize <= CastScalarSize) ||
215 (CastScalarSize >= 32) ||
216 (LScalarSize < 32));
217}
Tom Stellard28d06de2013-08-05 22:22:07 +0000218
Tom Stellard75aadc22012-12-11 21:25:42 +0000219//===---------------------------------------------------------------------===//
Tom Stellardc54731a2013-07-23 23:55:03 +0000220// Target Properties
221//===---------------------------------------------------------------------===//
222
223bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
224 assert(VT.isFloatingPoint());
225 return VT == MVT::f32;
226}
227
228bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
229 assert(VT.isFloatingPoint());
230 return VT == MVT::f32;
231}
232
233//===---------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000234// TargetLowering Callbacks
235//===---------------------------------------------------------------------===//
236
Christian Konig2c8f6d52013-03-07 09:03:52 +0000237void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
238 const SmallVectorImpl<ISD::InputArg> &Ins) const {
239
240 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
Tom Stellard75aadc22012-12-11 21:25:42 +0000241}
242
243SDValue AMDGPUTargetLowering::LowerReturn(
244 SDValue Chain,
245 CallingConv::ID CallConv,
246 bool isVarArg,
247 const SmallVectorImpl<ISD::OutputArg> &Outs,
248 const SmallVectorImpl<SDValue> &OutVals,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000249 SDLoc DL, SelectionDAG &DAG) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000250 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
251}
252
253//===---------------------------------------------------------------------===//
254// Target specific lowering
255//===---------------------------------------------------------------------===//
256
257SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
258 const {
259 switch (Op.getOpcode()) {
260 default:
261 Op.getNode()->dump();
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000262 llvm_unreachable("Custom lowering code for this"
263 "instruction is not implemented yet!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000264 break;
265 // AMDIL DAG lowering
266 case ISD::SDIV: return LowerSDIV(Op, DAG);
267 case ISD::SREM: return LowerSREM(Op, DAG);
268 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
269 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
270 // AMDGPU DAG lowering
Tom Stellardd86003e2013-08-14 23:25:00 +0000271 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
272 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +0000273 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000274 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
275 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
Tom Stellardc947d8c2013-10-30 17:22:05 +0000276 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000277 }
278 return Op;
279}
280
Tom Stellard04c0e982014-01-22 19:24:21 +0000281SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
282 const GlobalValue *GV,
283 const SDValue &InitPtr,
284 SDValue Chain,
285 SelectionDAG &DAG) const {
286 const DataLayout *TD = getTargetMachine().getDataLayout();
287 SDLoc DL(InitPtr);
288 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
289 EVT VT = EVT::getEVT(CI->getType());
290 PointerType *PtrTy = PointerType::get(CI->getType(), 0);
291 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
292 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
293 TD->getPrefTypeAlignment(CI->getType()));
294 } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
295 EVT VT = EVT::getEVT(CFP->getType());
296 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
297 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
298 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
299 TD->getPrefTypeAlignment(CFP->getType()));
300 } else if (Init->getType()->isAggregateType()) {
301 EVT PtrVT = InitPtr.getValueType();
302 unsigned NumElements = Init->getType()->getArrayNumElements();
303 SmallVector<SDValue, 8> Chains;
304 for (unsigned i = 0; i < NumElements; ++i) {
305 SDValue Offset = DAG.getConstant(i * TD->getTypeAllocSize(
306 Init->getType()->getArrayElementType()), PtrVT);
307 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
308 Chains.push_back(LowerConstantInitializer(Init->getAggregateElement(i),
309 GV, Ptr, Chain, DAG));
310 }
311 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &Chains[0],
312 Chains.size());
313 } else {
314 Init->dump();
315 llvm_unreachable("Unhandled constant initializer");
316 }
317}
318
Tom Stellardc026e8b2013-06-28 15:47:08 +0000319SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
320 SDValue Op,
321 SelectionDAG &DAG) const {
322
323 const DataLayout *TD = getTargetMachine().getDataLayout();
324 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
Tom Stellardc026e8b2013-06-28 15:47:08 +0000325 const GlobalValue *GV = G->getGlobal();
Tom Stellardc026e8b2013-06-28 15:47:08 +0000326
Tom Stellard04c0e982014-01-22 19:24:21 +0000327 switch (G->getAddressSpace()) {
328 default: llvm_unreachable("Global Address lowering not implemented for this "
329 "address space");
330 case AMDGPUAS::LOCAL_ADDRESS: {
331 // XXX: What does the value of G->getOffset() mean?
332 assert(G->getOffset() == 0 &&
333 "Do not know what to do with an non-zero offset");
Tom Stellardc026e8b2013-06-28 15:47:08 +0000334
Tom Stellard04c0e982014-01-22 19:24:21 +0000335 unsigned Offset;
336 if (MFI->LocalMemoryObjects.count(GV) == 0) {
337 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
338 Offset = MFI->LDSSize;
339 MFI->LocalMemoryObjects[GV] = Offset;
340 // XXX: Account for alignment?
341 MFI->LDSSize += Size;
342 } else {
343 Offset = MFI->LocalMemoryObjects[GV];
344 }
345
346 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
347 }
348 case AMDGPUAS::CONSTANT_ADDRESS: {
349 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
350 Type *EltType = GV->getType()->getElementType();
351 unsigned Size = TD->getTypeAllocSize(EltType);
352 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
353
354 const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV);
355 const Constant *Init = Var->getInitializer();
356 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
357 SDValue InitPtr = DAG.getFrameIndex(FI,
358 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
359 SmallVector<SDNode*, 8> WorkList;
360
361 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
362 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
363 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
364 continue;
365 WorkList.push_back(*I);
366 }
367 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
368 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
369 E = WorkList.end(); I != E; ++I) {
370 SmallVector<SDValue, 8> Ops;
371 Ops.push_back(Chain);
372 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
373 Ops.push_back((*I)->getOperand(i));
374 }
375 DAG.UpdateNodeOperands(*I, &Ops[0], Ops.size());
376 }
377 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
378 getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
379 }
380 }
Tom Stellardc026e8b2013-06-28 15:47:08 +0000381}
382
Tom Stellardd86003e2013-08-14 23:25:00 +0000383void AMDGPUTargetLowering::ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
384 SmallVectorImpl<SDValue> &Args,
385 unsigned Start,
386 unsigned Count) const {
387 EVT VT = Op.getValueType();
388 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
389 Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op),
390 VT.getVectorElementType(),
391 Op, DAG.getConstant(i, MVT::i32)));
392 }
393}
394
395SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
396 SelectionDAG &DAG) const {
397 SmallVector<SDValue, 8> Args;
398 SDValue A = Op.getOperand(0);
399 SDValue B = Op.getOperand(1);
400
401 ExtractVectorElements(A, DAG, Args, 0,
402 A.getValueType().getVectorNumElements());
403 ExtractVectorElements(B, DAG, Args, 0,
404 B.getValueType().getVectorNumElements());
405
406 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
407 &Args[0], Args.size());
408}
409
410SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
411 SelectionDAG &DAG) const {
412
413 SmallVector<SDValue, 8> Args;
414 EVT VT = Op.getValueType();
415 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
416 ExtractVectorElements(Op.getOperand(0), DAG, Args, Start,
417 VT.getVectorNumElements());
418
419 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
420 &Args[0], Args.size());
421}
422
Tom Stellard81d871d2013-11-13 23:36:50 +0000423SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
424 SelectionDAG &DAG) const {
425
426 MachineFunction &MF = DAG.getMachineFunction();
427 const AMDGPUFrameLowering *TFL =
428 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
429
430 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
431 assert(FIN);
432
433 unsigned FrameIndex = FIN->getIndex();
434 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
435 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
436 Op.getValueType());
437}
Tom Stellardd86003e2013-08-14 23:25:00 +0000438
Tom Stellard75aadc22012-12-11 21:25:42 +0000439SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
440 SelectionDAG &DAG) const {
441 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
Andrew Trickef9de2a2013-05-25 02:42:55 +0000442 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000443 EVT VT = Op.getValueType();
444
445 switch (IntrinsicID) {
446 default: return Op;
447 case AMDGPUIntrinsic::AMDIL_abs:
448 return LowerIntrinsicIABS(Op, DAG);
449 case AMDGPUIntrinsic::AMDIL_exp:
450 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
451 case AMDGPUIntrinsic::AMDGPU_lrp:
452 return LowerIntrinsicLRP(Op, DAG);
453 case AMDGPUIntrinsic::AMDIL_fraction:
454 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
Tom Stellard75aadc22012-12-11 21:25:42 +0000455 case AMDGPUIntrinsic::AMDIL_max:
456 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
457 Op.getOperand(2));
458 case AMDGPUIntrinsic::AMDGPU_imax:
459 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
460 Op.getOperand(2));
461 case AMDGPUIntrinsic::AMDGPU_umax:
462 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
463 Op.getOperand(2));
464 case AMDGPUIntrinsic::AMDIL_min:
465 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
466 Op.getOperand(2));
467 case AMDGPUIntrinsic::AMDGPU_imin:
468 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
469 Op.getOperand(2));
470 case AMDGPUIntrinsic::AMDGPU_umin:
471 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
472 Op.getOperand(2));
473 case AMDGPUIntrinsic::AMDIL_round_nearest:
474 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
475 }
476}
477
478///IABS(a) = SMAX(sub(0, a), a)
479SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
480 SelectionDAG &DAG) const {
481
Andrew Trickef9de2a2013-05-25 02:42:55 +0000482 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000483 EVT VT = Op.getValueType();
484 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
485 Op.getOperand(1));
486
487 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
488}
489
490/// Linear Interpolation
491/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
492SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
493 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000494 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000495 EVT VT = Op.getValueType();
496 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
497 DAG.getConstantFP(1.0f, MVT::f32),
498 Op.getOperand(1));
499 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
500 Op.getOperand(3));
Vincent Lejeune1ce13f52013-02-18 14:11:28 +0000501 return DAG.getNode(ISD::FADD, DL, VT,
502 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
503 OneSubAC);
Tom Stellard75aadc22012-12-11 21:25:42 +0000504}
505
506/// \brief Generate Min/Max node
507SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
508 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000509 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000510 EVT VT = Op.getValueType();
511
512 SDValue LHS = Op.getOperand(0);
513 SDValue RHS = Op.getOperand(1);
514 SDValue True = Op.getOperand(2);
515 SDValue False = Op.getOperand(3);
516 SDValue CC = Op.getOperand(4);
517
518 if (VT != MVT::f32 ||
519 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
520 return SDValue();
521 }
522
523 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
524 switch (CCOpcode) {
525 case ISD::SETOEQ:
526 case ISD::SETONE:
527 case ISD::SETUNE:
528 case ISD::SETNE:
529 case ISD::SETUEQ:
530 case ISD::SETEQ:
531 case ISD::SETFALSE:
532 case ISD::SETFALSE2:
533 case ISD::SETTRUE:
534 case ISD::SETTRUE2:
535 case ISD::SETUO:
536 case ISD::SETO:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000537 llvm_unreachable("Operation should already be optimised!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000538 case ISD::SETULE:
539 case ISD::SETULT:
540 case ISD::SETOLE:
541 case ISD::SETOLT:
542 case ISD::SETLE:
543 case ISD::SETLT: {
544 if (LHS == True)
545 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
546 else
547 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
548 }
549 case ISD::SETGT:
550 case ISD::SETGE:
551 case ISD::SETUGE:
552 case ISD::SETOGE:
553 case ISD::SETUGT:
554 case ISD::SETOGT: {
555 if (LHS == True)
556 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
557 else
558 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
559 }
560 case ISD::SETCC_INVALID:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000561 llvm_unreachable("Invalid setcc condcode!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000562 }
563 return Op;
564}
565
Tom Stellard35bb18c2013-08-26 15:06:04 +0000566SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
567 SelectionDAG &DAG) const {
568 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
569 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
570 EVT EltVT = Op.getValueType().getVectorElementType();
571 EVT PtrVT = Load->getBasePtr().getValueType();
572 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
573 SmallVector<SDValue, 8> Loads;
574 SDLoc SL(Op);
575
576 for (unsigned i = 0, e = NumElts; i != e; ++i) {
577 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
578 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
579 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
580 Load->getChain(), Ptr,
581 MachinePointerInfo(Load->getMemOperand()->getValue()),
582 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
583 Load->getAlignment()));
584 }
585 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(), &Loads[0],
586 Loads.size());
587}
588
Tom Stellard2ffc3302013-08-26 15:05:44 +0000589SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
590 SelectionDAG &DAG) const {
591 StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
592 EVT MemVT = Store->getMemoryVT();
593 unsigned MemBits = MemVT.getSizeInBits();
Tom Stellard75aadc22012-12-11 21:25:42 +0000594
Tom Stellard2ffc3302013-08-26 15:05:44 +0000595 // Byte stores are really expensive, so if possible, try to pack
596 // 32-bit vector truncatating store into an i32 store.
597 // XXX: We could also handle optimize other vector bitwidths
598 if (!MemVT.isVector() || MemBits > 32) {
599 return SDValue();
600 }
601
602 SDLoc DL(Op);
603 const SDValue &Value = Store->getValue();
604 EVT VT = Value.getValueType();
605 const SDValue &Ptr = Store->getBasePtr();
606 EVT MemEltVT = MemVT.getVectorElementType();
607 unsigned MemEltBits = MemEltVT.getSizeInBits();
608 unsigned MemNumElements = MemVT.getVectorNumElements();
609 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
610 SDValue Mask;
611 switch(MemEltBits) {
612 case 8:
613 Mask = DAG.getConstant(0xFF, PackedVT);
614 break;
615 case 16:
616 Mask = DAG.getConstant(0xFFFF, PackedVT);
617 break;
618 default:
619 llvm_unreachable("Cannot lower this vector store");
620 }
621 SDValue PackedValue;
622 for (unsigned i = 0; i < MemNumElements; ++i) {
623 EVT ElemVT = VT.getVectorElementType();
624 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
625 DAG.getConstant(i, MVT::i32));
626 Elt = DAG.getZExtOrTrunc(Elt, DL, PackedVT);
627 Elt = DAG.getNode(ISD::AND, DL, PackedVT, Elt, Mask);
628 SDValue Shift = DAG.getConstant(MemEltBits * i, PackedVT);
629 Elt = DAG.getNode(ISD::SHL, DL, PackedVT, Elt, Shift);
630 if (i == 0) {
631 PackedValue = Elt;
632 } else {
633 PackedValue = DAG.getNode(ISD::OR, DL, PackedVT, PackedValue, Elt);
634 }
635 }
636 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
637 MachinePointerInfo(Store->getMemOperand()->getValue()),
638 Store->isVolatile(), Store->isNonTemporal(),
639 Store->getAlignment());
640}
641
642SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
643 SelectionDAG &DAG) const {
644 StoreSDNode *Store = cast<StoreSDNode>(Op);
645 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
646 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
647 EVT PtrVT = Store->getBasePtr().getValueType();
648 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
649 SDLoc SL(Op);
650
651 SmallVector<SDValue, 8> Chains;
652
653 for (unsigned i = 0, e = NumElts; i != e; ++i) {
654 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
655 Store->getValue(), DAG.getConstant(i, MVT::i32));
656 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
657 Store->getBasePtr(),
658 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
659 PtrVT));
Tom Stellardf3d166a2013-08-26 15:05:49 +0000660 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
Tom Stellard2ffc3302013-08-26 15:05:44 +0000661 MachinePointerInfo(Store->getMemOperand()->getValue()),
Tom Stellardf3d166a2013-08-26 15:05:49 +0000662 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
Tom Stellard2ffc3302013-08-26 15:05:44 +0000663 Store->getAlignment()));
664 }
665 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, &Chains[0], NumElts);
666}
667
Tom Stellarde9373602014-01-22 19:24:14 +0000668SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
669 SDLoc DL(Op);
670 LoadSDNode *Load = cast<LoadSDNode>(Op);
671 ISD::LoadExtType ExtType = Load->getExtensionType();
672
Tom Stellard04c0e982014-01-22 19:24:21 +0000673 // Lower loads constant address space global variable loads
674 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
675 isa<GlobalVariable>(GetUnderlyingObject(Load->getPointerInfo().V))) {
676
677 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
678 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
679 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
680 DAG.getConstant(2, MVT::i32));
681 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
682 Load->getChain(), Ptr,
683 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
684 }
685
Tom Stellarde9373602014-01-22 19:24:14 +0000686 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
687 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
688 return SDValue();
689
690
691 EVT VT = Op.getValueType();
692 EVT MemVT = Load->getMemoryVT();
693 unsigned Mask = 0;
694 if (Load->getMemoryVT() == MVT::i8) {
695 Mask = 0xff;
696 } else if (Load->getMemoryVT() == MVT::i16) {
697 Mask = 0xffff;
698 }
699 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
700 DAG.getConstant(2, MVT::i32));
701 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
702 Load->getChain(), Ptr,
703 DAG.getTargetConstant(0, MVT::i32),
704 Op.getOperand(2));
705 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
706 Load->getBasePtr(),
707 DAG.getConstant(0x3, MVT::i32));
708 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
709 DAG.getConstant(3, MVT::i32));
710 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
711 Ret = DAG.getNode(ISD::AND, DL, MVT::i32, Ret,
712 DAG.getConstant(Mask, MVT::i32));
713 if (ExtType == ISD::SEXTLOAD) {
714 SDValue SExtShift = DAG.getConstant(
715 VT.getSizeInBits() - MemVT.getSizeInBits(), MVT::i32);
716 Ret = DAG.getNode(ISD::SHL, DL, MVT::i32, Ret, SExtShift);
717 Ret = DAG.getNode(ISD::SRA, DL, MVT::i32, Ret, SExtShift);
718 }
719
720 return Ret;
721}
722
Tom Stellard2ffc3302013-08-26 15:05:44 +0000723SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Tom Stellarde9373602014-01-22 19:24:14 +0000724 SDLoc DL(Op);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000725 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
726 if (Result.getNode()) {
727 return Result;
728 }
729
730 StoreSDNode *Store = cast<StoreSDNode>(Op);
Tom Stellarde9373602014-01-22 19:24:14 +0000731 SDValue Chain = Store->getChain();
Tom Stellard81d871d2013-11-13 23:36:50 +0000732 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
733 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
Tom Stellard2ffc3302013-08-26 15:05:44 +0000734 Store->getValue().getValueType().isVector()) {
735 return SplitVectorStore(Op, DAG);
736 }
Tom Stellarde9373602014-01-22 19:24:14 +0000737
738 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
739 Store->getMemoryVT().bitsLT(MVT::i32)) {
740 unsigned Mask = 0;
741 if (Store->getMemoryVT() == MVT::i8) {
742 Mask = 0xff;
743 } else if (Store->getMemoryVT() == MVT::i16) {
744 Mask = 0xffff;
745 }
746 SDValue TruncPtr = DAG.getZExtOrTrunc(Store->getBasePtr(), DL, MVT::i32);
747 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
748 DAG.getConstant(2, MVT::i32));
749 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
750 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
751 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, TruncPtr,
752 DAG.getConstant(0x3, MVT::i32));
753 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
754 DAG.getConstant(3, MVT::i32));
755 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
756 Store->getValue());
757 SDValue MaskedValue = DAG.getNode(ISD::AND, DL, MVT::i32, SExtValue,
758 DAG.getConstant(Mask, MVT::i32));
759 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
760 MaskedValue, ShiftAmt);
761 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
762 ShiftAmt);
763 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
764 DAG.getConstant(0xffffffff, MVT::i32));
765 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
766
767 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
768 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
769 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
770 }
Tom Stellard2ffc3302013-08-26 15:05:44 +0000771 return SDValue();
772}
Tom Stellard75aadc22012-12-11 21:25:42 +0000773
774SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
775 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000776 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000777 EVT VT = Op.getValueType();
778
779 SDValue Num = Op.getOperand(0);
780 SDValue Den = Op.getOperand(1);
781
782 SmallVector<SDValue, 8> Results;
783
784 // RCP = URECIP(Den) = 2^32 / Den + e
785 // e is rounding error.
786 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
787
788 // RCP_LO = umulo(RCP, Den) */
789 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
790
791 // RCP_HI = mulhu (RCP, Den) */
792 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
793
794 // NEG_RCP_LO = -RCP_LO
795 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
796 RCP_LO);
797
798 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
799 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
800 NEG_RCP_LO, RCP_LO,
801 ISD::SETEQ);
802 // Calculate the rounding error from the URECIP instruction
803 // E = mulhu(ABS_RCP_LO, RCP)
804 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
805
806 // RCP_A_E = RCP + E
807 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
808
809 // RCP_S_E = RCP - E
810 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
811
812 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
813 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
814 RCP_A_E, RCP_S_E,
815 ISD::SETEQ);
816 // Quotient = mulhu(Tmp0, Num)
817 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
818
819 // Num_S_Remainder = Quotient * Den
820 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
821
822 // Remainder = Num - Num_S_Remainder
823 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
824
825 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
826 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
827 DAG.getConstant(-1, VT),
828 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000829 ISD::SETUGE);
830 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
831 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
832 Num_S_Remainder,
Tom Stellard75aadc22012-12-11 21:25:42 +0000833 DAG.getConstant(-1, VT),
834 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000835 ISD::SETUGE);
Tom Stellard75aadc22012-12-11 21:25:42 +0000836 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
837 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
838 Remainder_GE_Zero);
839
840 // Calculate Division result:
841
842 // Quotient_A_One = Quotient + 1
843 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
844 DAG.getConstant(1, VT));
845
846 // Quotient_S_One = Quotient - 1
847 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
848 DAG.getConstant(1, VT));
849
850 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
851 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
852 Quotient, Quotient_A_One, ISD::SETEQ);
853
854 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
855 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
856 Quotient_S_One, Div, ISD::SETEQ);
857
858 // Calculate Rem result:
859
860 // Remainder_S_Den = Remainder - Den
861 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
862
863 // Remainder_A_Den = Remainder + Den
864 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
865
866 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
867 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
868 Remainder, Remainder_S_Den, ISD::SETEQ);
869
870 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
871 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
872 Remainder_A_Den, Rem, ISD::SETEQ);
873 SDValue Ops[2];
874 Ops[0] = Div;
875 Ops[1] = Rem;
876 return DAG.getMergeValues(Ops, 2, DL);
877}
878
Tom Stellardc947d8c2013-10-30 17:22:05 +0000879SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
880 SelectionDAG &DAG) const {
881 SDValue S0 = Op.getOperand(0);
882 SDLoc DL(Op);
883 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
884 return SDValue();
885
886 // f32 uint_to_fp i64
887 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
888 DAG.getConstant(0, MVT::i32));
889 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
890 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
891 DAG.getConstant(1, MVT::i32));
892 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
893 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
894 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
895 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
896
897}
Tom Stellardfbab8272013-08-16 01:12:11 +0000898
Tom Stellard75aadc22012-12-11 21:25:42 +0000899//===----------------------------------------------------------------------===//
900// Helper functions
901//===----------------------------------------------------------------------===//
902
Tom Stellardaf775432013-10-23 00:44:32 +0000903void AMDGPUTargetLowering::getOriginalFunctionArgs(
904 SelectionDAG &DAG,
905 const Function *F,
906 const SmallVectorImpl<ISD::InputArg> &Ins,
907 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
908
909 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
910 if (Ins[i].ArgVT == Ins[i].VT) {
911 OrigIns.push_back(Ins[i]);
912 continue;
913 }
914
915 EVT VT;
916 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
917 // Vector has been split into scalars.
918 VT = Ins[i].ArgVT.getVectorElementType();
919 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
920 Ins[i].ArgVT.getVectorElementType() !=
921 Ins[i].VT.getVectorElementType()) {
922 // Vector elements have been promoted
923 VT = Ins[i].ArgVT;
924 } else {
925 // Vector has been spilt into smaller vectors.
926 VT = Ins[i].VT;
927 }
928
929 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
930 Ins[i].OrigArgIndex, Ins[i].PartOffset);
931 OrigIns.push_back(Arg);
932 }
933}
934
Tom Stellard75aadc22012-12-11 21:25:42 +0000935bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
936 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
937 return CFP->isExactlyValue(1.0);
938 }
939 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
940 return C->isAllOnesValue();
941 }
942 return false;
943}
944
945bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
946 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
947 return CFP->getValueAPF().isZero();
948 }
949 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
950 return C->isNullValue();
951 }
952 return false;
953}
954
955SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
956 const TargetRegisterClass *RC,
957 unsigned Reg, EVT VT) const {
958 MachineFunction &MF = DAG.getMachineFunction();
959 MachineRegisterInfo &MRI = MF.getRegInfo();
960 unsigned VirtualRegister;
961 if (!MRI.isLiveIn(Reg)) {
962 VirtualRegister = MRI.createVirtualRegister(RC);
963 MRI.addLiveIn(Reg, VirtualRegister);
964 } else {
965 VirtualRegister = MRI.getLiveInVirtReg(Reg);
966 }
967 return DAG.getRegister(VirtualRegister, VT);
968}
969
970#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
971
972const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
973 switch (Opcode) {
974 default: return 0;
975 // AMDIL DAG nodes
Tom Stellard75aadc22012-12-11 21:25:42 +0000976 NODE_NAME_CASE(CALL);
977 NODE_NAME_CASE(UMUL);
978 NODE_NAME_CASE(DIV_INF);
979 NODE_NAME_CASE(RET_FLAG);
980 NODE_NAME_CASE(BRANCH_COND);
981
982 // AMDGPU DAG nodes
983 NODE_NAME_CASE(DWORDADDR)
984 NODE_NAME_CASE(FRACT)
985 NODE_NAME_CASE(FMAX)
986 NODE_NAME_CASE(SMAX)
987 NODE_NAME_CASE(UMAX)
988 NODE_NAME_CASE(FMIN)
989 NODE_NAME_CASE(SMIN)
990 NODE_NAME_CASE(UMIN)
991 NODE_NAME_CASE(URECIP)
Tom Stellard75aadc22012-12-11 21:25:42 +0000992 NODE_NAME_CASE(EXPORT)
Tom Stellardff62c352013-01-23 02:09:03 +0000993 NODE_NAME_CASE(CONST_ADDRESS)
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000994 NODE_NAME_CASE(REGISTER_LOAD)
995 NODE_NAME_CASE(REGISTER_STORE)
Tom Stellard9fa17912013-08-14 23:24:45 +0000996 NODE_NAME_CASE(LOAD_CONSTANT)
997 NODE_NAME_CASE(LOAD_INPUT)
998 NODE_NAME_CASE(SAMPLE)
999 NODE_NAME_CASE(SAMPLEB)
1000 NODE_NAME_CASE(SAMPLED)
1001 NODE_NAME_CASE(SAMPLEL)
Tom Stellardd3ee8c12013-08-16 01:12:06 +00001002 NODE_NAME_CASE(STORE_MSKOR)
Tom Stellardafcf12f2013-09-12 02:55:14 +00001003 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
Tom Stellard75aadc22012-12-11 21:25:42 +00001004 }
1005}