blob: 743bf6f7a92e36bcf7cef421addda4038a5876aa [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This is the parent TargetLowering class for hardware code gen
12/// targets.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUISelLowering.h"
Tom Stellarded882c22013-06-03 17:40:11 +000017#include "AMDGPU.h"
Tom Stellard81d871d2013-11-13 23:36:50 +000018#include "AMDGPUFrameLowering.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000019#include "AMDGPURegisterInfo.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000020#include "AMDGPUSubtarget.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000021#include "AMDILIntrinsicInfo.h"
Tom Stellardacfeebf2013-07-23 01:48:05 +000022#include "R600MachineFunctionInfo.h"
Tom Stellarded882c22013-06-03 17:40:11 +000023#include "SIMachineFunctionInfo.h"
Tom Stellard04c0e982014-01-22 19:24:21 +000024#include "llvm/Analysis/ValueTracking.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000025#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/SelectionDAG.h"
29#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
Tom Stellardc026e8b2013-06-28 15:47:08 +000030#include "llvm/IR/DataLayout.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000031
32using namespace llvm;
Tom Stellardaf775432013-10-23 00:44:32 +000033static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
34 CCValAssign::LocInfo LocInfo,
35 ISD::ArgFlagsTy ArgFlags, CCState &State) {
Matt Arsenault52226f92013-12-14 18:21:59 +000036 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
37 ArgFlags.getOrigAlign());
38 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
Tom Stellardaf775432013-10-23 00:44:32 +000039
40 return true;
41}
Tom Stellard75aadc22012-12-11 21:25:42 +000042
Christian Konig2c8f6d52013-03-07 09:03:52 +000043#include "AMDGPUGenCallingConv.inc"
44
Tom Stellard75aadc22012-12-11 21:25:42 +000045AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
46 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
47
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000048 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
49
Tom Stellard75aadc22012-12-11 21:25:42 +000050 // Initialize target lowering borrowed from AMDIL
51 InitAMDILLowering();
52
53 // We need to custom lower some of the intrinsics
54 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
55
56 // Library functions. These default to Expand, but we have instructions
57 // for them.
58 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
59 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
60 setOperationAction(ISD::FPOW, MVT::f32, Legal);
61 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
62 setOperationAction(ISD::FABS, MVT::f32, Legal);
63 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
64 setOperationAction(ISD::FRINT, MVT::f32, Legal);
Tom Stellard4d566b22013-11-27 21:23:20 +000065 setOperationAction(ISD::FROUND, MVT::f32, Legal);
Tom Stellardeddfa692013-12-20 05:11:55 +000066 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
Tom Stellard75aadc22012-12-11 21:25:42 +000067
Tom Stellard5643c4a2013-05-20 15:02:19 +000068 // The hardware supports ROTR, but not ROTL
69 setOperationAction(ISD::ROTL, MVT::i32, Expand);
70
Tom Stellard75aadc22012-12-11 21:25:42 +000071 // Lower floating point store/load to integer store/load to reduce the number
72 // of patterns in tablegen.
73 setOperationAction(ISD::STORE, MVT::f32, Promote);
74 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
75
Tom Stellarded2f6142013-07-18 21:43:42 +000076 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
77 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
78
Tom Stellard75aadc22012-12-11 21:25:42 +000079 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
80 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
81
Tom Stellardaf775432013-10-23 00:44:32 +000082 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
83 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
84
85 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
86 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
87
Tom Stellard7512c082013-07-12 18:14:56 +000088 setOperationAction(ISD::STORE, MVT::f64, Promote);
89 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
90
Tom Stellard2ffc3302013-08-26 15:05:44 +000091 // Custom lowering of vector stores is required for local address space
92 // stores.
93 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
94 // XXX: Native v2i32 local address space stores are possible, but not
95 // currently implemented.
96 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
97
Tom Stellardfbab8272013-08-16 01:12:11 +000098 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
99 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
100 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000101
Tom Stellardfbab8272013-08-16 01:12:11 +0000102 // XXX: This can be change to Custom, once ExpandVectorStores can
103 // handle 64-bit stores.
104 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
105
Matt Arsenaulte389dd52014-03-12 18:45:52 +0000106 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
107 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
108 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
109
110
Tom Stellard75aadc22012-12-11 21:25:42 +0000111 setOperationAction(ISD::LOAD, MVT::f32, Promote);
112 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
113
Tom Stellardadf732c2013-07-18 21:43:48 +0000114 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
115 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
116
Tom Stellard75aadc22012-12-11 21:25:42 +0000117 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
118 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
119
Tom Stellardaf775432013-10-23 00:44:32 +0000120 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
121 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
122
123 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
124 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
125
Tom Stellard7512c082013-07-12 18:14:56 +0000126 setOperationAction(ISD::LOAD, MVT::f64, Promote);
127 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
128
Tom Stellardd86003e2013-08-14 23:25:00 +0000129 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
130 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000131 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
132 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
Tom Stellardd86003e2013-08-14 23:25:00 +0000133 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
Tom Stellard967bf582014-02-13 23:34:15 +0000134 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
135 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
136 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
137 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
138 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000139
Tom Stellardb03edec2013-08-16 01:12:16 +0000140 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
141 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
142 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
143 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
144 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
145 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
146 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
147 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
148 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
149 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
150 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
151 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
152
Tom Stellardaeb45642014-02-04 17:18:43 +0000153 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
154
Tom Stellardbeed74a2013-07-23 01:47:46 +0000155 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
156 setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
157
Tom Stellardc947d8c2013-10-30 17:22:05 +0000158 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
159
Christian Konig70a50322013-03-27 09:12:51 +0000160 setOperationAction(ISD::MUL, MVT::i64, Expand);
161
Tom Stellard75aadc22012-12-11 21:25:42 +0000162 setOperationAction(ISD::UDIV, MVT::i32, Expand);
163 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
164 setOperationAction(ISD::UREM, MVT::i32, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000165 setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
166 setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000167
Tom Stellardf6d80232013-08-21 22:14:17 +0000168 static const MVT::SimpleValueType IntTypes[] = {
169 MVT::v2i32, MVT::v4i32
Aaron Watry0a794a462013-06-25 13:55:57 +0000170 };
Tom Stellarda92ff872013-08-16 23:51:24 +0000171 const size_t NumIntTypes = array_lengthof(IntTypes);
Aaron Watry0a794a462013-06-25 13:55:57 +0000172
Tom Stellarda92ff872013-08-16 23:51:24 +0000173 for (unsigned int x = 0; x < NumIntTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000174 MVT::SimpleValueType VT = IntTypes[x];
Aaron Watry0a794a462013-06-25 13:55:57 +0000175 //Expand the following operations for the current type by default
176 setOperationAction(ISD::ADD, VT, Expand);
177 setOperationAction(ISD::AND, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000178 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
179 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000180 setOperationAction(ISD::MUL, VT, Expand);
181 setOperationAction(ISD::OR, VT, Expand);
182 setOperationAction(ISD::SHL, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000183 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000184 setOperationAction(ISD::SRL, VT, Expand);
185 setOperationAction(ISD::SRA, VT, Expand);
186 setOperationAction(ISD::SUB, VT, Expand);
187 setOperationAction(ISD::UDIV, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000188 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000189 setOperationAction(ISD::UREM, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000190 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000191 setOperationAction(ISD::VSELECT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000192 setOperationAction(ISD::XOR, VT, Expand);
193 }
Tom Stellarda92ff872013-08-16 23:51:24 +0000194
Tom Stellardf6d80232013-08-21 22:14:17 +0000195 static const MVT::SimpleValueType FloatTypes[] = {
196 MVT::v2f32, MVT::v4f32
Tom Stellarda92ff872013-08-16 23:51:24 +0000197 };
198 const size_t NumFloatTypes = array_lengthof(FloatTypes);
199
200 for (unsigned int x = 0; x < NumFloatTypes; ++x) {
Tom Stellardf6d80232013-08-21 22:14:17 +0000201 MVT::SimpleValueType VT = FloatTypes[x];
Tom Stellard175e7a82013-11-27 21:23:39 +0000202 setOperationAction(ISD::FABS, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000203 setOperationAction(ISD::FADD, VT, Expand);
204 setOperationAction(ISD::FDIV, VT, Expand);
Tom Stellardbfebd1f2014-02-04 17:18:37 +0000205 setOperationAction(ISD::FPOW, VT, Expand);
Tom Stellardad3aff22013-08-16 23:51:29 +0000206 setOperationAction(ISD::FFLOOR, VT, Expand);
Tom Stellardeddfa692013-12-20 05:11:55 +0000207 setOperationAction(ISD::FTRUNC, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000208 setOperationAction(ISD::FMUL, VT, Expand);
Tom Stellardb249b752013-08-16 23:51:33 +0000209 setOperationAction(ISD::FRINT, VT, Expand);
Tom Stellarde118b8b2013-10-29 16:37:20 +0000210 setOperationAction(ISD::FSQRT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000211 setOperationAction(ISD::FSUB, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000212 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000213 }
Matt Arsenaultfae02982014-03-17 18:58:11 +0000214
215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom);
216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
217 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
218
219 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Custom);
220 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
221 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
222
223 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Custom);
224 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
225 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
226
227 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
Tom Stellard75aadc22012-12-11 21:25:42 +0000228}
229
Tom Stellard28d06de2013-08-05 22:22:07 +0000230//===----------------------------------------------------------------------===//
231// Target Information
232//===----------------------------------------------------------------------===//
233
234MVT AMDGPUTargetLowering::getVectorIdxTy() const {
235 return MVT::i32;
236}
237
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000238bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
239 EVT CastTy) const {
240 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
241 return true;
242
243 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
244 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
245
246 return ((LScalarSize <= CastScalarSize) ||
247 (CastScalarSize >= 32) ||
248 (LScalarSize < 32));
249}
Tom Stellard28d06de2013-08-05 22:22:07 +0000250
Tom Stellard75aadc22012-12-11 21:25:42 +0000251//===---------------------------------------------------------------------===//
Tom Stellardc54731a2013-07-23 23:55:03 +0000252// Target Properties
253//===---------------------------------------------------------------------===//
254
255bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
256 assert(VT.isFloatingPoint());
257 return VT == MVT::f32;
258}
259
260bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
261 assert(VT.isFloatingPoint());
262 return VT == MVT::f32;
263}
264
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000265bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000266 // Truncate is just accessing a subregister.
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000267 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
268}
269
270bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
271 // Truncate is just accessing a subregister.
272 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
273 (Dest->getPrimitiveSizeInBits() % 32 == 0);
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000274}
275
Matt Arsenaulta7f1e0c2014-03-24 19:43:31 +0000276bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
277 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
278 // limited number of native 64-bit operations. Shrinking an operation to fit
279 // in a single 32-bit register should always be helpful. As currently used,
280 // this is much less general than the name suggests, and is only used in
281 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
282 // not profitable, and may actually be harmful.
283 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
284}
285
Tom Stellardc54731a2013-07-23 23:55:03 +0000286//===---------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000287// TargetLowering Callbacks
288//===---------------------------------------------------------------------===//
289
Christian Konig2c8f6d52013-03-07 09:03:52 +0000290void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
291 const SmallVectorImpl<ISD::InputArg> &Ins) const {
292
293 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
Tom Stellard75aadc22012-12-11 21:25:42 +0000294}
295
296SDValue AMDGPUTargetLowering::LowerReturn(
297 SDValue Chain,
298 CallingConv::ID CallConv,
299 bool isVarArg,
300 const SmallVectorImpl<ISD::OutputArg> &Outs,
301 const SmallVectorImpl<SDValue> &OutVals,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000302 SDLoc DL, SelectionDAG &DAG) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000303 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
304}
305
306//===---------------------------------------------------------------------===//
307// Target specific lowering
308//===---------------------------------------------------------------------===//
309
310SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
311 const {
312 switch (Op.getOpcode()) {
313 default:
314 Op.getNode()->dump();
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000315 llvm_unreachable("Custom lowering code for this"
316 "instruction is not implemented yet!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000317 break;
318 // AMDIL DAG lowering
319 case ISD::SDIV: return LowerSDIV(Op, DAG);
320 case ISD::SREM: return LowerSREM(Op, DAG);
321 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
322 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
323 // AMDGPU DAG lowering
Tom Stellardd86003e2013-08-14 23:25:00 +0000324 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
325 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +0000326 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000327 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
328 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
Tom Stellardc947d8c2013-10-30 17:22:05 +0000329 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000330 }
331 return Op;
332}
333
Tom Stellard04c0e982014-01-22 19:24:21 +0000334SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
335 const GlobalValue *GV,
336 const SDValue &InitPtr,
337 SDValue Chain,
338 SelectionDAG &DAG) const {
339 const DataLayout *TD = getTargetMachine().getDataLayout();
340 SDLoc DL(InitPtr);
341 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
342 EVT VT = EVT::getEVT(CI->getType());
343 PointerType *PtrTy = PointerType::get(CI->getType(), 0);
344 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
345 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
346 TD->getPrefTypeAlignment(CI->getType()));
347 } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
348 EVT VT = EVT::getEVT(CFP->getType());
349 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
350 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
351 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
352 TD->getPrefTypeAlignment(CFP->getType()));
353 } else if (Init->getType()->isAggregateType()) {
354 EVT PtrVT = InitPtr.getValueType();
355 unsigned NumElements = Init->getType()->getArrayNumElements();
356 SmallVector<SDValue, 8> Chains;
357 for (unsigned i = 0; i < NumElements; ++i) {
358 SDValue Offset = DAG.getConstant(i * TD->getTypeAllocSize(
359 Init->getType()->getArrayElementType()), PtrVT);
360 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
361 Chains.push_back(LowerConstantInitializer(Init->getAggregateElement(i),
362 GV, Ptr, Chain, DAG));
363 }
364 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &Chains[0],
365 Chains.size());
366 } else {
367 Init->dump();
368 llvm_unreachable("Unhandled constant initializer");
369 }
370}
371
Tom Stellardc026e8b2013-06-28 15:47:08 +0000372SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
373 SDValue Op,
374 SelectionDAG &DAG) const {
375
376 const DataLayout *TD = getTargetMachine().getDataLayout();
377 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
Tom Stellardc026e8b2013-06-28 15:47:08 +0000378 const GlobalValue *GV = G->getGlobal();
Tom Stellardc026e8b2013-06-28 15:47:08 +0000379
Tom Stellard04c0e982014-01-22 19:24:21 +0000380 switch (G->getAddressSpace()) {
381 default: llvm_unreachable("Global Address lowering not implemented for this "
382 "address space");
383 case AMDGPUAS::LOCAL_ADDRESS: {
384 // XXX: What does the value of G->getOffset() mean?
385 assert(G->getOffset() == 0 &&
386 "Do not know what to do with an non-zero offset");
Tom Stellardc026e8b2013-06-28 15:47:08 +0000387
Tom Stellard04c0e982014-01-22 19:24:21 +0000388 unsigned Offset;
389 if (MFI->LocalMemoryObjects.count(GV) == 0) {
390 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
391 Offset = MFI->LDSSize;
392 MFI->LocalMemoryObjects[GV] = Offset;
393 // XXX: Account for alignment?
394 MFI->LDSSize += Size;
395 } else {
396 Offset = MFI->LocalMemoryObjects[GV];
397 }
398
399 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
400 }
401 case AMDGPUAS::CONSTANT_ADDRESS: {
402 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
403 Type *EltType = GV->getType()->getElementType();
404 unsigned Size = TD->getTypeAllocSize(EltType);
405 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
406
407 const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV);
408 const Constant *Init = Var->getInitializer();
409 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
410 SDValue InitPtr = DAG.getFrameIndex(FI,
411 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
412 SmallVector<SDNode*, 8> WorkList;
413
414 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
415 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
416 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
417 continue;
418 WorkList.push_back(*I);
419 }
420 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
421 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
422 E = WorkList.end(); I != E; ++I) {
423 SmallVector<SDValue, 8> Ops;
424 Ops.push_back(Chain);
425 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
426 Ops.push_back((*I)->getOperand(i));
427 }
428 DAG.UpdateNodeOperands(*I, &Ops[0], Ops.size());
429 }
430 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
431 getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
432 }
433 }
Tom Stellardc026e8b2013-06-28 15:47:08 +0000434}
435
Tom Stellardd86003e2013-08-14 23:25:00 +0000436void AMDGPUTargetLowering::ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
437 SmallVectorImpl<SDValue> &Args,
438 unsigned Start,
439 unsigned Count) const {
440 EVT VT = Op.getValueType();
441 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
442 Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op),
443 VT.getVectorElementType(),
444 Op, DAG.getConstant(i, MVT::i32)));
445 }
446}
447
448SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
449 SelectionDAG &DAG) const {
450 SmallVector<SDValue, 8> Args;
451 SDValue A = Op.getOperand(0);
452 SDValue B = Op.getOperand(1);
453
454 ExtractVectorElements(A, DAG, Args, 0,
455 A.getValueType().getVectorNumElements());
456 ExtractVectorElements(B, DAG, Args, 0,
457 B.getValueType().getVectorNumElements());
458
459 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
460 &Args[0], Args.size());
461}
462
463SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
464 SelectionDAG &DAG) const {
465
466 SmallVector<SDValue, 8> Args;
467 EVT VT = Op.getValueType();
468 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
469 ExtractVectorElements(Op.getOperand(0), DAG, Args, Start,
470 VT.getVectorNumElements());
471
472 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
473 &Args[0], Args.size());
474}
475
Tom Stellard81d871d2013-11-13 23:36:50 +0000476SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
477 SelectionDAG &DAG) const {
478
479 MachineFunction &MF = DAG.getMachineFunction();
480 const AMDGPUFrameLowering *TFL =
481 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
482
483 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
484 assert(FIN);
485
486 unsigned FrameIndex = FIN->getIndex();
487 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
488 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
489 Op.getValueType());
490}
Tom Stellardd86003e2013-08-14 23:25:00 +0000491
Tom Stellard75aadc22012-12-11 21:25:42 +0000492SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
493 SelectionDAG &DAG) const {
494 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
Andrew Trickef9de2a2013-05-25 02:42:55 +0000495 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000496 EVT VT = Op.getValueType();
497
498 switch (IntrinsicID) {
499 default: return Op;
500 case AMDGPUIntrinsic::AMDIL_abs:
501 return LowerIntrinsicIABS(Op, DAG);
502 case AMDGPUIntrinsic::AMDIL_exp:
503 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
504 case AMDGPUIntrinsic::AMDGPU_lrp:
505 return LowerIntrinsicLRP(Op, DAG);
506 case AMDGPUIntrinsic::AMDIL_fraction:
507 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
Tom Stellard75aadc22012-12-11 21:25:42 +0000508 case AMDGPUIntrinsic::AMDIL_max:
509 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
510 Op.getOperand(2));
511 case AMDGPUIntrinsic::AMDGPU_imax:
512 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
513 Op.getOperand(2));
514 case AMDGPUIntrinsic::AMDGPU_umax:
515 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
516 Op.getOperand(2));
517 case AMDGPUIntrinsic::AMDIL_min:
518 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
519 Op.getOperand(2));
520 case AMDGPUIntrinsic::AMDGPU_imin:
521 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
522 Op.getOperand(2));
523 case AMDGPUIntrinsic::AMDGPU_umin:
524 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
525 Op.getOperand(2));
526 case AMDGPUIntrinsic::AMDIL_round_nearest:
527 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
528 }
529}
530
531///IABS(a) = SMAX(sub(0, a), a)
532SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
533 SelectionDAG &DAG) const {
534
Andrew Trickef9de2a2013-05-25 02:42:55 +0000535 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000536 EVT VT = Op.getValueType();
537 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
538 Op.getOperand(1));
539
540 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
541}
542
543/// Linear Interpolation
544/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
545SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
546 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000547 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000548 EVT VT = Op.getValueType();
549 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
550 DAG.getConstantFP(1.0f, MVT::f32),
551 Op.getOperand(1));
552 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
553 Op.getOperand(3));
Vincent Lejeune1ce13f52013-02-18 14:11:28 +0000554 return DAG.getNode(ISD::FADD, DL, VT,
555 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
556 OneSubAC);
Tom Stellard75aadc22012-12-11 21:25:42 +0000557}
558
559/// \brief Generate Min/Max node
560SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
561 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000562 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000563 EVT VT = Op.getValueType();
564
565 SDValue LHS = Op.getOperand(0);
566 SDValue RHS = Op.getOperand(1);
567 SDValue True = Op.getOperand(2);
568 SDValue False = Op.getOperand(3);
569 SDValue CC = Op.getOperand(4);
570
571 if (VT != MVT::f32 ||
572 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
573 return SDValue();
574 }
575
576 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
577 switch (CCOpcode) {
578 case ISD::SETOEQ:
579 case ISD::SETONE:
580 case ISD::SETUNE:
581 case ISD::SETNE:
582 case ISD::SETUEQ:
583 case ISD::SETEQ:
584 case ISD::SETFALSE:
585 case ISD::SETFALSE2:
586 case ISD::SETTRUE:
587 case ISD::SETTRUE2:
588 case ISD::SETUO:
589 case ISD::SETO:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000590 llvm_unreachable("Operation should already be optimised!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000591 case ISD::SETULE:
592 case ISD::SETULT:
593 case ISD::SETOLE:
594 case ISD::SETOLT:
595 case ISD::SETLE:
596 case ISD::SETLT: {
597 if (LHS == True)
598 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
599 else
600 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
601 }
602 case ISD::SETGT:
603 case ISD::SETGE:
604 case ISD::SETUGE:
605 case ISD::SETOGE:
606 case ISD::SETUGT:
607 case ISD::SETOGT: {
608 if (LHS == True)
609 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
610 else
611 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
612 }
613 case ISD::SETCC_INVALID:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000614 llvm_unreachable("Invalid setcc condcode!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000615 }
616 return Op;
617}
618
Tom Stellard35bb18c2013-08-26 15:06:04 +0000619SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
620 SelectionDAG &DAG) const {
621 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
622 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
623 EVT EltVT = Op.getValueType().getVectorElementType();
624 EVT PtrVT = Load->getBasePtr().getValueType();
625 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
626 SmallVector<SDValue, 8> Loads;
627 SDLoc SL(Op);
628
629 for (unsigned i = 0, e = NumElts; i != e; ++i) {
630 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
631 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
632 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
633 Load->getChain(), Ptr,
634 MachinePointerInfo(Load->getMemOperand()->getValue()),
635 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
636 Load->getAlignment()));
637 }
Matt Arsenault9504d2f2014-03-11 00:01:31 +0000638 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(),
639 Loads.data(), Loads.size());
Tom Stellard35bb18c2013-08-26 15:06:04 +0000640}
641
Tom Stellard2ffc3302013-08-26 15:05:44 +0000642SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
643 SelectionDAG &DAG) const {
644 StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
645 EVT MemVT = Store->getMemoryVT();
646 unsigned MemBits = MemVT.getSizeInBits();
Tom Stellard75aadc22012-12-11 21:25:42 +0000647
Matt Arsenaultca6dcfc2014-03-05 21:47:22 +0000648 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
649 // truncating store into an i32 store.
650 // XXX: We could also handle optimize other vector bitwidths.
Tom Stellard2ffc3302013-08-26 15:05:44 +0000651 if (!MemVT.isVector() || MemBits > 32) {
652 return SDValue();
653 }
654
655 SDLoc DL(Op);
656 const SDValue &Value = Store->getValue();
657 EVT VT = Value.getValueType();
658 const SDValue &Ptr = Store->getBasePtr();
659 EVT MemEltVT = MemVT.getVectorElementType();
660 unsigned MemEltBits = MemEltVT.getSizeInBits();
661 unsigned MemNumElements = MemVT.getVectorNumElements();
662 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
Matt Arsenault02117142014-03-11 01:38:53 +0000663 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, PackedVT);
664
Tom Stellard2ffc3302013-08-26 15:05:44 +0000665 SDValue PackedValue;
666 for (unsigned i = 0; i < MemNumElements; ++i) {
667 EVT ElemVT = VT.getVectorElementType();
668 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
669 DAG.getConstant(i, MVT::i32));
670 Elt = DAG.getZExtOrTrunc(Elt, DL, PackedVT);
671 Elt = DAG.getNode(ISD::AND, DL, PackedVT, Elt, Mask);
672 SDValue Shift = DAG.getConstant(MemEltBits * i, PackedVT);
673 Elt = DAG.getNode(ISD::SHL, DL, PackedVT, Elt, Shift);
674 if (i == 0) {
675 PackedValue = Elt;
676 } else {
677 PackedValue = DAG.getNode(ISD::OR, DL, PackedVT, PackedValue, Elt);
678 }
679 }
680 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
681 MachinePointerInfo(Store->getMemOperand()->getValue()),
682 Store->isVolatile(), Store->isNonTemporal(),
683 Store->getAlignment());
684}
685
686SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
687 SelectionDAG &DAG) const {
688 StoreSDNode *Store = cast<StoreSDNode>(Op);
689 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
690 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
691 EVT PtrVT = Store->getBasePtr().getValueType();
692 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
693 SDLoc SL(Op);
694
695 SmallVector<SDValue, 8> Chains;
696
697 for (unsigned i = 0, e = NumElts; i != e; ++i) {
698 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
699 Store->getValue(), DAG.getConstant(i, MVT::i32));
700 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
701 Store->getBasePtr(),
702 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
703 PtrVT));
Tom Stellardf3d166a2013-08-26 15:05:49 +0000704 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
Tom Stellard2ffc3302013-08-26 15:05:44 +0000705 MachinePointerInfo(Store->getMemOperand()->getValue()),
Tom Stellardf3d166a2013-08-26 15:05:49 +0000706 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
Tom Stellard2ffc3302013-08-26 15:05:44 +0000707 Store->getAlignment()));
708 }
709 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, &Chains[0], NumElts);
710}
711
Tom Stellarde9373602014-01-22 19:24:14 +0000712SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
713 SDLoc DL(Op);
714 LoadSDNode *Load = cast<LoadSDNode>(Op);
715 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaultf9a995d2014-03-06 17:34:12 +0000716 EVT VT = Op.getValueType();
717 EVT MemVT = Load->getMemoryVT();
718
719 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
720 // We can do the extload to 32-bits, and then need to separately extend to
721 // 64-bits.
722
723 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
724 Load->getChain(),
725 Load->getBasePtr(),
726 MemVT,
727 Load->getMemOperand());
728 return DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32);
729 }
Tom Stellarde9373602014-01-22 19:24:14 +0000730
Tom Stellard04c0e982014-01-22 19:24:21 +0000731 // Lower loads constant address space global variable loads
732 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
733 isa<GlobalVariable>(GetUnderlyingObject(Load->getPointerInfo().V))) {
734
735 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
736 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
737 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
738 DAG.getConstant(2, MVT::i32));
739 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
740 Load->getChain(), Ptr,
741 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
742 }
743
Tom Stellarde9373602014-01-22 19:24:14 +0000744 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
745 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
746 return SDValue();
747
748
Tom Stellarde9373602014-01-22 19:24:14 +0000749 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
750 DAG.getConstant(2, MVT::i32));
751 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
752 Load->getChain(), Ptr,
753 DAG.getTargetConstant(0, MVT::i32),
754 Op.getOperand(2));
755 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
756 Load->getBasePtr(),
757 DAG.getConstant(0x3, MVT::i32));
758 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
759 DAG.getConstant(3, MVT::i32));
Matt Arsenault74891cd2014-03-15 00:08:22 +0000760
Tom Stellarde9373602014-01-22 19:24:14 +0000761 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000762
763 EVT MemEltVT = MemVT.getScalarType();
Tom Stellarde9373602014-01-22 19:24:14 +0000764 if (ExtType == ISD::SEXTLOAD) {
Matt Arsenault74891cd2014-03-15 00:08:22 +0000765 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
766 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode);
Tom Stellarde9373602014-01-22 19:24:14 +0000767 }
768
Matt Arsenault74891cd2014-03-15 00:08:22 +0000769 return DAG.getZeroExtendInReg(Ret, DL, MemEltVT);
Tom Stellarde9373602014-01-22 19:24:14 +0000770}
771
Tom Stellard2ffc3302013-08-26 15:05:44 +0000772SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Tom Stellarde9373602014-01-22 19:24:14 +0000773 SDLoc DL(Op);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000774 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
775 if (Result.getNode()) {
776 return Result;
777 }
778
779 StoreSDNode *Store = cast<StoreSDNode>(Op);
Tom Stellarde9373602014-01-22 19:24:14 +0000780 SDValue Chain = Store->getChain();
Tom Stellard81d871d2013-11-13 23:36:50 +0000781 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
782 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
Tom Stellard2ffc3302013-08-26 15:05:44 +0000783 Store->getValue().getValueType().isVector()) {
784 return SplitVectorStore(Op, DAG);
785 }
Tom Stellarde9373602014-01-22 19:24:14 +0000786
Matt Arsenault74891cd2014-03-15 00:08:22 +0000787 EVT MemVT = Store->getMemoryVT();
Tom Stellarde9373602014-01-22 19:24:14 +0000788 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
Matt Arsenault74891cd2014-03-15 00:08:22 +0000789 MemVT.bitsLT(MVT::i32)) {
Tom Stellarde9373602014-01-22 19:24:14 +0000790 unsigned Mask = 0;
791 if (Store->getMemoryVT() == MVT::i8) {
792 Mask = 0xff;
793 } else if (Store->getMemoryVT() == MVT::i16) {
794 Mask = 0xffff;
795 }
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000796 SDValue BasePtr = Store->getBasePtr();
797 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000798 DAG.getConstant(2, MVT::i32));
799 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
800 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000801
802 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
Tom Stellarde9373602014-01-22 19:24:14 +0000803 DAG.getConstant(0x3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000804
Tom Stellarde9373602014-01-22 19:24:14 +0000805 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
806 DAG.getConstant(3, MVT::i32));
Matt Arsenaultea330fb2014-03-15 00:08:26 +0000807
Tom Stellarde9373602014-01-22 19:24:14 +0000808 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
809 Store->getValue());
Matt Arsenault74891cd2014-03-15 00:08:22 +0000810
811 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
812
Tom Stellarde9373602014-01-22 19:24:14 +0000813 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
814 MaskedValue, ShiftAmt);
Matt Arsenault74891cd2014-03-15 00:08:22 +0000815
Tom Stellarde9373602014-01-22 19:24:14 +0000816 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
817 ShiftAmt);
818 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
819 DAG.getConstant(0xffffffff, MVT::i32));
820 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
821
822 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
823 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
824 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
825 }
Tom Stellard2ffc3302013-08-26 15:05:44 +0000826 return SDValue();
827}
Tom Stellard75aadc22012-12-11 21:25:42 +0000828
829SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
830 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +0000831 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000832 EVT VT = Op.getValueType();
833
834 SDValue Num = Op.getOperand(0);
835 SDValue Den = Op.getOperand(1);
836
837 SmallVector<SDValue, 8> Results;
838
839 // RCP = URECIP(Den) = 2^32 / Den + e
840 // e is rounding error.
841 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
842
843 // RCP_LO = umulo(RCP, Den) */
844 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
845
846 // RCP_HI = mulhu (RCP, Den) */
847 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
848
849 // NEG_RCP_LO = -RCP_LO
850 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
851 RCP_LO);
852
853 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
854 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
855 NEG_RCP_LO, RCP_LO,
856 ISD::SETEQ);
857 // Calculate the rounding error from the URECIP instruction
858 // E = mulhu(ABS_RCP_LO, RCP)
859 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
860
861 // RCP_A_E = RCP + E
862 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
863
864 // RCP_S_E = RCP - E
865 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
866
867 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
868 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
869 RCP_A_E, RCP_S_E,
870 ISD::SETEQ);
871 // Quotient = mulhu(Tmp0, Num)
872 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
873
874 // Num_S_Remainder = Quotient * Den
875 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
876
877 // Remainder = Num - Num_S_Remainder
878 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
879
880 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
881 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
882 DAG.getConstant(-1, VT),
883 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000884 ISD::SETUGE);
885 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
886 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
887 Num_S_Remainder,
Tom Stellard75aadc22012-12-11 21:25:42 +0000888 DAG.getConstant(-1, VT),
889 DAG.getConstant(0, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +0000890 ISD::SETUGE);
Tom Stellard75aadc22012-12-11 21:25:42 +0000891 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
892 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
893 Remainder_GE_Zero);
894
895 // Calculate Division result:
896
897 // Quotient_A_One = Quotient + 1
898 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
899 DAG.getConstant(1, VT));
900
901 // Quotient_S_One = Quotient - 1
902 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
903 DAG.getConstant(1, VT));
904
905 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
906 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
907 Quotient, Quotient_A_One, ISD::SETEQ);
908
909 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
910 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
911 Quotient_S_One, Div, ISD::SETEQ);
912
913 // Calculate Rem result:
914
915 // Remainder_S_Den = Remainder - Den
916 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
917
918 // Remainder_A_Den = Remainder + Den
919 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
920
921 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
922 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
923 Remainder, Remainder_S_Den, ISD::SETEQ);
924
925 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
926 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
927 Remainder_A_Den, Rem, ISD::SETEQ);
928 SDValue Ops[2];
929 Ops[0] = Div;
930 Ops[1] = Rem;
931 return DAG.getMergeValues(Ops, 2, DL);
932}
933
Tom Stellardc947d8c2013-10-30 17:22:05 +0000934SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
935 SelectionDAG &DAG) const {
936 SDValue S0 = Op.getOperand(0);
937 SDLoc DL(Op);
938 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
939 return SDValue();
940
941 // f32 uint_to_fp i64
942 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
943 DAG.getConstant(0, MVT::i32));
944 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
945 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
946 DAG.getConstant(1, MVT::i32));
947 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
948 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
949 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
950 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
951
952}
Tom Stellardfbab8272013-08-16 01:12:11 +0000953
Matt Arsenaultfae02982014-03-17 18:58:11 +0000954SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
955 unsigned BitsDiff,
956 SelectionDAG &DAG) const {
957 MVT VT = Op.getSimpleValueType();
958 SDLoc DL(Op);
959 SDValue Shift = DAG.getConstant(BitsDiff, VT);
960 // Shift left by 'Shift' bits.
961 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
962 // Signed shift Right by 'Shift' bits.
963 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
964}
965
966SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
967 SelectionDAG &DAG) const {
968 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
969 MVT VT = Op.getSimpleValueType();
970 MVT ScalarVT = VT.getScalarType();
971
972 unsigned SrcBits = ExtraVT.getScalarType().getSizeInBits();
973 unsigned DestBits = ScalarVT.getSizeInBits();
974 unsigned BitsDiff = DestBits - SrcBits;
975
976 if (!Subtarget->hasBFE())
977 return ExpandSIGN_EXTEND_INREG(Op, BitsDiff, DAG);
978
979 SDValue Src = Op.getOperand(0);
980 if (VT.isVector()) {
981 SDLoc DL(Op);
982 // Need to scalarize this, and revisit each of the scalars later.
983 // TODO: Don't scalarize on Evergreen?
984 unsigned NElts = VT.getVectorNumElements();
985 SmallVector<SDValue, 8> Args;
986 ExtractVectorElements(Src, DAG, Args, 0, NElts);
987
988 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
989 for (unsigned I = 0; I < NElts; ++I)
990 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
991
992 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args.data(), Args.size());
993 }
994
995 if (SrcBits == 32) {
996 SDLoc DL(Op);
997
998 // If the source is 32-bits, this is really half of a 2-register pair, and
999 // we need to discard the unused half of the pair.
1000 SDValue TruncSrc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
1001 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, TruncSrc);
1002 }
1003
1004 unsigned NElts = VT.isVector() ? VT.getVectorNumElements() : 1;
1005
1006 // TODO: Match 64-bit BFE. SI has a 64-bit BFE, but it's scalar only so it
1007 // might not be worth the effort, and will need to expand to shifts when
1008 // fixing SGPR copies.
1009 if (SrcBits < 32 && DestBits <= 32) {
1010 SDLoc DL(Op);
1011 MVT ExtVT = (NElts == 1) ? MVT::i32 : MVT::getVectorVT(MVT::i32, NElts);
1012
1013 if (DestBits != 32)
1014 Src = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVT, Src);
1015
1016 // FIXME: This should use TargetConstant, but that hits assertions for
1017 // Evergreen.
1018 SDValue Ext = DAG.getNode(AMDGPUISD::BFE_I32, DL, ExtVT,
1019 Op.getOperand(0), // Operand
1020 DAG.getConstant(0, ExtVT), // Offset
1021 DAG.getConstant(SrcBits, ExtVT)); // Width
1022
1023 // Truncate to the original type if necessary.
1024 if (ScalarVT == MVT::i32)
1025 return Ext;
1026 return DAG.getNode(ISD::TRUNCATE, DL, VT, Ext);
1027 }
1028
1029 // For small types, extend to 32-bits first.
1030 if (SrcBits < 32) {
1031 SDLoc DL(Op);
1032 MVT ExtVT = (NElts == 1) ? MVT::i32 : MVT::getVectorVT(MVT::i32, NElts);
1033
1034 SDValue TruncSrc = DAG.getNode(ISD::TRUNCATE, DL, ExtVT, Src);
1035 SDValue Ext32 = DAG.getNode(AMDGPUISD::BFE_I32,
1036 DL,
1037 ExtVT,
1038 TruncSrc, // Operand
1039 DAG.getConstant(0, ExtVT), // Offset
1040 DAG.getConstant(SrcBits, ExtVT)); // Width
1041
1042 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Ext32);
1043 }
1044
1045 // For everything else, use the standard bitshift expansion.
1046 return ExpandSIGN_EXTEND_INREG(Op, BitsDiff, DAG);
1047}
1048
Tom Stellard75aadc22012-12-11 21:25:42 +00001049//===----------------------------------------------------------------------===//
1050// Helper functions
1051//===----------------------------------------------------------------------===//
1052
Tom Stellardaf775432013-10-23 00:44:32 +00001053void AMDGPUTargetLowering::getOriginalFunctionArgs(
1054 SelectionDAG &DAG,
1055 const Function *F,
1056 const SmallVectorImpl<ISD::InputArg> &Ins,
1057 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
1058
1059 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
1060 if (Ins[i].ArgVT == Ins[i].VT) {
1061 OrigIns.push_back(Ins[i]);
1062 continue;
1063 }
1064
1065 EVT VT;
1066 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
1067 // Vector has been split into scalars.
1068 VT = Ins[i].ArgVT.getVectorElementType();
1069 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
1070 Ins[i].ArgVT.getVectorElementType() !=
1071 Ins[i].VT.getVectorElementType()) {
1072 // Vector elements have been promoted
1073 VT = Ins[i].ArgVT;
1074 } else {
1075 // Vector has been spilt into smaller vectors.
1076 VT = Ins[i].VT;
1077 }
1078
1079 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
1080 Ins[i].OrigArgIndex, Ins[i].PartOffset);
1081 OrigIns.push_back(Arg);
1082 }
1083}
1084
Tom Stellard75aadc22012-12-11 21:25:42 +00001085bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
1086 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1087 return CFP->isExactlyValue(1.0);
1088 }
1089 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1090 return C->isAllOnesValue();
1091 }
1092 return false;
1093}
1094
1095bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
1096 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1097 return CFP->getValueAPF().isZero();
1098 }
1099 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1100 return C->isNullValue();
1101 }
1102 return false;
1103}
1104
1105SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
1106 const TargetRegisterClass *RC,
1107 unsigned Reg, EVT VT) const {
1108 MachineFunction &MF = DAG.getMachineFunction();
1109 MachineRegisterInfo &MRI = MF.getRegInfo();
1110 unsigned VirtualRegister;
1111 if (!MRI.isLiveIn(Reg)) {
1112 VirtualRegister = MRI.createVirtualRegister(RC);
1113 MRI.addLiveIn(Reg, VirtualRegister);
1114 } else {
1115 VirtualRegister = MRI.getLiveInVirtReg(Reg);
1116 }
1117 return DAG.getRegister(VirtualRegister, VT);
1118}
1119
1120#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
1121
1122const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
1123 switch (Opcode) {
1124 default: return 0;
1125 // AMDIL DAG nodes
Tom Stellard75aadc22012-12-11 21:25:42 +00001126 NODE_NAME_CASE(CALL);
1127 NODE_NAME_CASE(UMUL);
1128 NODE_NAME_CASE(DIV_INF);
1129 NODE_NAME_CASE(RET_FLAG);
1130 NODE_NAME_CASE(BRANCH_COND);
1131
1132 // AMDGPU DAG nodes
1133 NODE_NAME_CASE(DWORDADDR)
1134 NODE_NAME_CASE(FRACT)
1135 NODE_NAME_CASE(FMAX)
1136 NODE_NAME_CASE(SMAX)
1137 NODE_NAME_CASE(UMAX)
1138 NODE_NAME_CASE(FMIN)
1139 NODE_NAME_CASE(SMIN)
1140 NODE_NAME_CASE(UMIN)
Matt Arsenaultfae02982014-03-17 18:58:11 +00001141 NODE_NAME_CASE(BFE_U32)
1142 NODE_NAME_CASE(BFE_I32)
Tom Stellard75aadc22012-12-11 21:25:42 +00001143 NODE_NAME_CASE(URECIP)
Matt Arsenault21a3faa2014-02-24 21:01:21 +00001144 NODE_NAME_CASE(DOT4)
Tom Stellard75aadc22012-12-11 21:25:42 +00001145 NODE_NAME_CASE(EXPORT)
Tom Stellardff62c352013-01-23 02:09:03 +00001146 NODE_NAME_CASE(CONST_ADDRESS)
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00001147 NODE_NAME_CASE(REGISTER_LOAD)
1148 NODE_NAME_CASE(REGISTER_STORE)
Tom Stellard9fa17912013-08-14 23:24:45 +00001149 NODE_NAME_CASE(LOAD_CONSTANT)
1150 NODE_NAME_CASE(LOAD_INPUT)
1151 NODE_NAME_CASE(SAMPLE)
1152 NODE_NAME_CASE(SAMPLEB)
1153 NODE_NAME_CASE(SAMPLED)
1154 NODE_NAME_CASE(SAMPLEL)
Tom Stellardd3ee8c12013-08-16 01:12:06 +00001155 NODE_NAME_CASE(STORE_MSKOR)
Tom Stellardafcf12f2013-09-12 02:55:14 +00001156 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
Tom Stellard75aadc22012-12-11 21:25:42 +00001157 }
1158}