blob: 5ee747ac7957f12da4af63e4d394416448b9dddf [file] [log] [blame]
Justin Holewinskiae556d32012-05-04 20:18:50 +00001//
2// The LLVM Compiler Infrastructure
3//
4// This file is distributed under the University of Illinois Open Source
5// License. See LICENSE.TXT for details.
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that NVPTX uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14
Justin Holewinskiae556d32012-05-04 20:18:50 +000015#include "NVPTXISelLowering.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000016#include "NVPTX.h"
Justin Holewinskiae556d32012-05-04 20:18:50 +000017#include "NVPTXTargetMachine.h"
18#include "NVPTXTargetObjectFile.h"
19#include "NVPTXUtilities.h"
Justin Holewinskiae556d32012-05-04 20:18:50 +000020#include "llvm/CodeGen/Analysis.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
Justin Holewinskiae556d32012-05-04 20:18:50 +000025#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000026#include "llvm/IR/DerivedTypes.h"
27#include "llvm/IR/Function.h"
28#include "llvm/IR/GlobalValue.h"
29#include "llvm/IR/IntrinsicInst.h"
30#include "llvm/IR/Intrinsics.h"
31#include "llvm/IR/Module.h"
Justin Holewinskiae556d32012-05-04 20:18:50 +000032#include "llvm/MC/MCSectionELF.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000033#include "llvm/Support/CallSite.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/raw_ostream.h"
Justin Holewinskiae556d32012-05-04 20:18:50 +000038#include <sstream>
39
40#undef DEBUG_TYPE
41#define DEBUG_TYPE "nvptx-lower"
42
43using namespace llvm;
44
45static unsigned int uniqueCallSite = 0;
46
47static cl::opt<bool>
Justin Holewinskiae556d32012-05-04 20:18:50 +000048sched4reg("nvptx-sched4reg",
49 cl::desc("NVPTX Specific: schedule for register pressue"),
50 cl::init(false));
51
Justin Holewinskibe8dc642013-02-12 14:18:49 +000052static bool IsPTXVectorType(MVT VT) {
53 switch (VT.SimpleTy) {
54 default: return false;
55 case MVT::v2i8:
56 case MVT::v4i8:
57 case MVT::v2i16:
58 case MVT::v4i16:
59 case MVT::v2i32:
60 case MVT::v4i32:
61 case MVT::v2i64:
62 case MVT::v2f32:
63 case MVT::v4f32:
64 case MVT::v2f64:
65 return true;
66 }
67}
68
Justin Holewinskiae556d32012-05-04 20:18:50 +000069// NVPTXTargetLowering Constructor.
70NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
71: TargetLowering(TM, new NVPTXTargetObjectFile()),
72 nvTM(&TM),
73 nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
74
75 // always lower memset, memcpy, and memmove intrinsics to load/store
76 // instructions, rather
77 // then generating calls to memset, mempcy or memmove.
Jim Grosbach341ad3e2013-02-20 21:13:59 +000078 MaxStoresPerMemset = (unsigned)0xFFFFFFFF;
79 MaxStoresPerMemcpy = (unsigned)0xFFFFFFFF;
80 MaxStoresPerMemmove = (unsigned)0xFFFFFFFF;
Justin Holewinskiae556d32012-05-04 20:18:50 +000081
82 setBooleanContents(ZeroOrNegativeOneBooleanContent);
83
84 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
85 // condition branches.
86 setJumpIsExpensive(true);
87
88 // By default, use the Source scheduling
89 if (sched4reg)
90 setSchedulingPreference(Sched::RegPressure);
91 else
92 setSchedulingPreference(Sched::Source);
93
94 addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
95 addRegisterClass(MVT::i8, &NVPTX::Int8RegsRegClass);
96 addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
97 addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
98 addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
99 addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
100 addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
101
Justin Holewinskiae556d32012-05-04 20:18:50 +0000102 // Operations not directly supported by NVPTX.
103 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
104 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
105 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Expand);
106 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
107 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
108 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
109 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
110
111 if (nvptxSubtarget.hasROT64()) {
112 setOperationAction(ISD::ROTL , MVT::i64, Legal);
113 setOperationAction(ISD::ROTR , MVT::i64, Legal);
114 }
115 else {
116 setOperationAction(ISD::ROTL , MVT::i64, Expand);
117 setOperationAction(ISD::ROTR , MVT::i64, Expand);
118 }
119 if (nvptxSubtarget.hasROT32()) {
120 setOperationAction(ISD::ROTL , MVT::i32, Legal);
121 setOperationAction(ISD::ROTR , MVT::i32, Legal);
122 }
123 else {
124 setOperationAction(ISD::ROTL , MVT::i32, Expand);
125 setOperationAction(ISD::ROTR , MVT::i32, Expand);
126 }
127
128 setOperationAction(ISD::ROTL , MVT::i16, Expand);
129 setOperationAction(ISD::ROTR , MVT::i16, Expand);
130 setOperationAction(ISD::ROTL , MVT::i8, Expand);
131 setOperationAction(ISD::ROTR , MVT::i8, Expand);
132 setOperationAction(ISD::BSWAP , MVT::i16, Expand);
133 setOperationAction(ISD::BSWAP , MVT::i32, Expand);
134 setOperationAction(ISD::BSWAP , MVT::i64, Expand);
135
136 // Indirect branch is not supported.
137 // This also disables Jump Table creation.
138 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
139 setOperationAction(ISD::BRIND, MVT::Other, Expand);
140
141 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
142 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
143
144 // We want to legalize constant related memmove and memcopy
145 // intrinsics.
146 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
147
148 // Turn FP extload into load/fextend
149 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
150 // Turn FP truncstore into trunc + store.
151 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
152
153 // PTX does not support load / store predicate registers
Justin Holewinskic6462aa2012-11-14 19:19:16 +0000154 setOperationAction(ISD::LOAD, MVT::i1, Custom);
155 setOperationAction(ISD::STORE, MVT::i1, Custom);
156
Justin Holewinskiae556d32012-05-04 20:18:50 +0000157 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
158 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
Justin Holewinskiae556d32012-05-04 20:18:50 +0000159 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
160 setTruncStoreAction(MVT::i32, MVT::i1, Expand);
161 setTruncStoreAction(MVT::i16, MVT::i1, Expand);
162 setTruncStoreAction(MVT::i8, MVT::i1, Expand);
163
164 // This is legal in NVPTX
165 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
166 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
167
168 // TRAP can be lowered to PTX trap
169 setOperationAction(ISD::TRAP, MVT::Other, Legal);
170
Justin Holewinskibe8dc642013-02-12 14:18:49 +0000171 // Register custom handling for vector loads/stores
172 for (int i = MVT::FIRST_VECTOR_VALUETYPE;
173 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
174 MVT VT = (MVT::SimpleValueType)i;
175 if (IsPTXVectorType(VT)) {
176 setOperationAction(ISD::LOAD, VT, Custom);
177 setOperationAction(ISD::STORE, VT, Custom);
178 setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
179 }
180 }
Justin Holewinskiae556d32012-05-04 20:18:50 +0000181
182 // Now deduce the information based on the above mentioned
183 // actions
184 computeRegisterProperties();
185}
186
187
188const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
189 switch (Opcode) {
190 default: return 0;
191 case NVPTXISD::CALL: return "NVPTXISD::CALL";
192 case NVPTXISD::RET_FLAG: return "NVPTXISD::RET_FLAG";
193 case NVPTXISD::Wrapper: return "NVPTXISD::Wrapper";
194 case NVPTXISD::NVBuiltin: return "NVPTXISD::NVBuiltin";
195 case NVPTXISD::DeclareParam: return "NVPTXISD::DeclareParam";
196 case NVPTXISD::DeclareScalarParam:
197 return "NVPTXISD::DeclareScalarParam";
198 case NVPTXISD::DeclareRet: return "NVPTXISD::DeclareRet";
199 case NVPTXISD::DeclareRetParam: return "NVPTXISD::DeclareRetParam";
200 case NVPTXISD::PrintCall: return "NVPTXISD::PrintCall";
201 case NVPTXISD::LoadParam: return "NVPTXISD::LoadParam";
202 case NVPTXISD::StoreParam: return "NVPTXISD::StoreParam";
203 case NVPTXISD::StoreParamS32: return "NVPTXISD::StoreParamS32";
204 case NVPTXISD::StoreParamU32: return "NVPTXISD::StoreParamU32";
205 case NVPTXISD::MoveToParam: return "NVPTXISD::MoveToParam";
206 case NVPTXISD::CallArgBegin: return "NVPTXISD::CallArgBegin";
207 case NVPTXISD::CallArg: return "NVPTXISD::CallArg";
208 case NVPTXISD::LastCallArg: return "NVPTXISD::LastCallArg";
209 case NVPTXISD::CallArgEnd: return "NVPTXISD::CallArgEnd";
210 case NVPTXISD::CallVoid: return "NVPTXISD::CallVoid";
211 case NVPTXISD::CallVal: return "NVPTXISD::CallVal";
212 case NVPTXISD::CallSymbol: return "NVPTXISD::CallSymbol";
213 case NVPTXISD::Prototype: return "NVPTXISD::Prototype";
214 case NVPTXISD::MoveParam: return "NVPTXISD::MoveParam";
215 case NVPTXISD::MoveRetval: return "NVPTXISD::MoveRetval";
216 case NVPTXISD::MoveToRetval: return "NVPTXISD::MoveToRetval";
217 case NVPTXISD::StoreRetval: return "NVPTXISD::StoreRetval";
218 case NVPTXISD::PseudoUseParam: return "NVPTXISD::PseudoUseParam";
219 case NVPTXISD::RETURN: return "NVPTXISD::RETURN";
220 case NVPTXISD::CallSeqBegin: return "NVPTXISD::CallSeqBegin";
221 case NVPTXISD::CallSeqEnd: return "NVPTXISD::CallSeqEnd";
Justin Holewinskibe8dc642013-02-12 14:18:49 +0000222 case NVPTXISD::LoadV2: return "NVPTXISD::LoadV2";
223 case NVPTXISD::LoadV4: return "NVPTXISD::LoadV4";
224 case NVPTXISD::LDGV2: return "NVPTXISD::LDGV2";
225 case NVPTXISD::LDGV4: return "NVPTXISD::LDGV4";
226 case NVPTXISD::LDUV2: return "NVPTXISD::LDUV2";
227 case NVPTXISD::LDUV4: return "NVPTXISD::LDUV4";
228 case NVPTXISD::StoreV2: return "NVPTXISD::StoreV2";
229 case NVPTXISD::StoreV4: return "NVPTXISD::StoreV4";
Justin Holewinskiae556d32012-05-04 20:18:50 +0000230 }
231}
232
Justin Holewinskibc451192012-11-29 14:26:24 +0000233bool NVPTXTargetLowering::shouldSplitVectorElementType(EVT VT) const {
234 return VT == MVT::i1;
235}
Justin Holewinskiae556d32012-05-04 20:18:50 +0000236
237SDValue
238NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
239 DebugLoc dl = Op.getDebugLoc();
240 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
241 Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
242 return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
243}
244
245std::string NVPTXTargetLowering::getPrototype(Type *retTy,
246 const ArgListTy &Args,
247 const SmallVectorImpl<ISD::OutputArg> &Outs,
248 unsigned retAlignment) const {
249
250 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
251
252 std::stringstream O;
253 O << "prototype_" << uniqueCallSite << " : .callprototype ";
254
255 if (retTy->getTypeID() == Type::VoidTyID)
256 O << "()";
257 else {
258 O << "(";
259 if (isABI) {
260 if (retTy->isPrimitiveType() || retTy->isIntegerTy()) {
261 unsigned size = 0;
262 if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
263 size = ITy->getBitWidth();
264 if (size < 32) size = 32;
265 }
266 else {
267 assert(retTy->isFloatingPointTy() &&
268 "Floating point type expected here");
269 size = retTy->getPrimitiveSizeInBits();
270 }
271
272 O << ".param .b" << size << " _";
273 }
274 else if (isa<PointerType>(retTy))
275 O << ".param .b" << getPointerTy().getSizeInBits()
276 << " _";
277 else {
278 if ((retTy->getTypeID() == Type::StructTyID) ||
279 isa<VectorType>(retTy)) {
280 SmallVector<EVT, 16> vtparts;
281 ComputeValueVTs(*this, retTy, vtparts);
282 unsigned totalsz = 0;
283 for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
284 unsigned elems = 1;
285 EVT elemtype = vtparts[i];
286 if (vtparts[i].isVector()) {
287 elems = vtparts[i].getVectorNumElements();
288 elemtype = vtparts[i].getVectorElementType();
289 }
290 for (unsigned j=0, je=elems; j!=je; ++j) {
291 unsigned sz = elemtype.getSizeInBits();
292 if (elemtype.isInteger() && (sz < 8)) sz = 8;
293 totalsz += sz/8;
294 }
295 }
296 O << ".param .align "
297 << retAlignment
298 << " .b8 _["
299 << totalsz << "]";
300 }
301 else {
302 assert(false &&
303 "Unknown return type");
304 }
305 }
306 }
307 else {
308 SmallVector<EVT, 16> vtparts;
309 ComputeValueVTs(*this, retTy, vtparts);
310 unsigned idx = 0;
311 for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
312 unsigned elems = 1;
313 EVT elemtype = vtparts[i];
314 if (vtparts[i].isVector()) {
315 elems = vtparts[i].getVectorNumElements();
316 elemtype = vtparts[i].getVectorElementType();
317 }
318
319 for (unsigned j=0, je=elems; j!=je; ++j) {
320 unsigned sz = elemtype.getSizeInBits();
321 if (elemtype.isInteger() && (sz < 32)) sz = 32;
322 O << ".reg .b" << sz << " _";
323 if (j<je-1) O << ", ";
324 ++idx;
325 }
326 if (i < e-1)
327 O << ", ";
328 }
329 }
330 O << ") ";
331 }
332 O << "_ (";
333
334 bool first = true;
335 MVT thePointerTy = getPointerTy();
336
337 for (unsigned i=0,e=Args.size(); i!=e; ++i) {
338 const Type *Ty = Args[i].Ty;
339 if (!first) {
340 O << ", ";
341 }
342 first = false;
343
344 if (Outs[i].Flags.isByVal() == false) {
345 unsigned sz = 0;
346 if (isa<IntegerType>(Ty)) {
347 sz = cast<IntegerType>(Ty)->getBitWidth();
348 if (sz < 32) sz = 32;
349 }
350 else if (isa<PointerType>(Ty))
351 sz = thePointerTy.getSizeInBits();
352 else
353 sz = Ty->getPrimitiveSizeInBits();
354 if (isABI)
355 O << ".param .b" << sz << " ";
356 else
357 O << ".reg .b" << sz << " ";
358 O << "_";
359 continue;
360 }
361 const PointerType *PTy = dyn_cast<PointerType>(Ty);
362 assert(PTy &&
363 "Param with byval attribute should be a pointer type");
364 Type *ETy = PTy->getElementType();
365
366 if (isABI) {
367 unsigned align = Outs[i].Flags.getByValAlign();
Micah Villmowcdfe20b2012-10-08 16:38:25 +0000368 unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
Justin Holewinskiae556d32012-05-04 20:18:50 +0000369 O << ".param .align " << align
370 << " .b8 ";
371 O << "_";
372 O << "[" << sz << "]";
373 continue;
374 }
375 else {
376 SmallVector<EVT, 16> vtparts;
377 ComputeValueVTs(*this, ETy, vtparts);
378 for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
379 unsigned elems = 1;
380 EVT elemtype = vtparts[i];
381 if (vtparts[i].isVector()) {
382 elems = vtparts[i].getVectorNumElements();
383 elemtype = vtparts[i].getVectorElementType();
384 }
385
386 for (unsigned j=0,je=elems; j!=je; ++j) {
387 unsigned sz = elemtype.getSizeInBits();
388 if (elemtype.isInteger() && (sz < 32)) sz = 32;
389 O << ".reg .b" << sz << " ";
390 O << "_";
391 if (j<je-1) O << ", ";
392 }
393 if (i<e-1)
394 O << ", ";
395 }
396 continue;
397 }
398 }
399 O << ");";
400 return O.str();
401}
402
403
Justin Holewinskiae556d32012-05-04 20:18:50 +0000404SDValue
Justin Holewinskiaa583972012-05-25 16:35:28 +0000405NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
406 SmallVectorImpl<SDValue> &InVals) const {
407 SelectionDAG &DAG = CLI.DAG;
408 DebugLoc &dl = CLI.DL;
409 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
410 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
411 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
412 SDValue Chain = CLI.Chain;
413 SDValue Callee = CLI.Callee;
414 bool &isTailCall = CLI.IsTailCall;
415 ArgListTy &Args = CLI.Args;
416 Type *retTy = CLI.RetTy;
417 ImmutableCallSite *CS = CLI.CS;
418
Justin Holewinskiae556d32012-05-04 20:18:50 +0000419 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
420
421 SDValue tempChain = Chain;
422 Chain = DAG.getCALLSEQ_START(Chain,
423 DAG.getIntPtrConstant(uniqueCallSite, true));
424 SDValue InFlag = Chain.getValue(1);
425
426 assert((Outs.size() == Args.size()) &&
427 "Unexpected number of arguments to function call");
428 unsigned paramCount = 0;
429 // Declare the .params or .reg need to pass values
430 // to the function
431 for (unsigned i=0, e=Outs.size(); i!=e; ++i) {
432 EVT VT = Outs[i].VT;
433
434 if (Outs[i].Flags.isByVal() == false) {
435 // Plain scalar
436 // for ABI, declare .param .b<size> .param<n>;
437 // for nonABI, declare .reg .b<size> .param<n>;
438 unsigned isReg = 1;
439 if (isABI)
440 isReg = 0;
441 unsigned sz = VT.getSizeInBits();
442 if (VT.isInteger() && (sz < 32)) sz = 32;
443 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
444 SDValue DeclareParamOps[] = { Chain,
445 DAG.getConstant(paramCount, MVT::i32),
446 DAG.getConstant(sz, MVT::i32),
447 DAG.getConstant(isReg, MVT::i32),
448 InFlag };
449 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
450 DeclareParamOps, 5);
451 InFlag = Chain.getValue(1);
452 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
453 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
454 DAG.getConstant(0, MVT::i32), OutVals[i], InFlag };
455
456 unsigned opcode = NVPTXISD::StoreParam;
457 if (isReg)
458 opcode = NVPTXISD::MoveToParam;
459 else {
460 if (Outs[i].Flags.isZExt())
461 opcode = NVPTXISD::StoreParamU32;
462 else if (Outs[i].Flags.isSExt())
463 opcode = NVPTXISD::StoreParamS32;
464 }
465 Chain = DAG.getNode(opcode, dl, CopyParamVTs, CopyParamOps, 5);
466
467 InFlag = Chain.getValue(1);
468 ++paramCount;
469 continue;
470 }
471 // struct or vector
472 SmallVector<EVT, 16> vtparts;
473 const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
474 assert(PTy &&
475 "Type of a byval parameter should be pointer");
476 ComputeValueVTs(*this, PTy->getElementType(), vtparts);
477
478 if (isABI) {
479 // declare .param .align 16 .b8 .param<n>[<size>];
480 unsigned sz = Outs[i].Flags.getByValSize();
481 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
482 // The ByValAlign in the Outs[i].Flags is alway set at this point, so we
483 // don't need to
484 // worry about natural alignment or not. See TargetLowering::LowerCallTo()
485 SDValue DeclareParamOps[] = { Chain,
486 DAG.getConstant(Outs[i].Flags.getByValAlign(), MVT::i32),
487 DAG.getConstant(paramCount, MVT::i32),
488 DAG.getConstant(sz, MVT::i32),
489 InFlag };
490 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
491 DeclareParamOps, 5);
492 InFlag = Chain.getValue(1);
493 unsigned curOffset = 0;
494 for (unsigned j=0,je=vtparts.size(); j!=je; ++j) {
495 unsigned elems = 1;
496 EVT elemtype = vtparts[j];
497 if (vtparts[j].isVector()) {
498 elems = vtparts[j].getVectorNumElements();
499 elemtype = vtparts[j].getVectorElementType();
500 }
501 for (unsigned k=0,ke=elems; k!=ke; ++k) {
502 unsigned sz = elemtype.getSizeInBits();
503 if (elemtype.isInteger() && (sz < 8)) sz = 8;
504 SDValue srcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
505 OutVals[i],
506 DAG.getConstant(curOffset,
507 getPointerTy()));
508 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
509 MachinePointerInfo(), false, false, false, 0);
510 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
511 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount,
512 MVT::i32),
513 DAG.getConstant(curOffset, MVT::i32),
514 theVal, InFlag };
515 Chain = DAG.getNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
516 CopyParamOps, 5);
517 InFlag = Chain.getValue(1);
518 curOffset += sz/8;
519 }
520 }
521 ++paramCount;
522 continue;
523 }
524 // Non-abi, struct or vector
525 // Declare a bunch or .reg .b<size> .param<n>
526 unsigned curOffset = 0;
527 for (unsigned j=0,je=vtparts.size(); j!=je; ++j) {
528 unsigned elems = 1;
529 EVT elemtype = vtparts[j];
530 if (vtparts[j].isVector()) {
531 elems = vtparts[j].getVectorNumElements();
532 elemtype = vtparts[j].getVectorElementType();
533 }
534 for (unsigned k=0,ke=elems; k!=ke; ++k) {
535 unsigned sz = elemtype.getSizeInBits();
536 if (elemtype.isInteger() && (sz < 32)) sz = 32;
537 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
538 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(paramCount,
539 MVT::i32),
540 DAG.getConstant(sz, MVT::i32),
541 DAG.getConstant(1, MVT::i32),
542 InFlag };
543 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
544 DeclareParamOps, 5);
545 InFlag = Chain.getValue(1);
546 SDValue srcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i],
547 DAG.getConstant(curOffset,
548 getPointerTy()));
549 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
550 MachinePointerInfo(), false, false, false, 0);
551 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
552 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
553 DAG.getConstant(0, MVT::i32), theVal,
554 InFlag };
555 Chain = DAG.getNode(NVPTXISD::MoveToParam, dl, CopyParamVTs,
556 CopyParamOps, 5);
557 InFlag = Chain.getValue(1);
558 ++paramCount;
559 }
560 }
561 }
562
563 GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
564 unsigned retAlignment = 0;
565
566 // Handle Result
567 unsigned retCount = 0;
568 if (Ins.size() > 0) {
569 SmallVector<EVT, 16> resvtparts;
570 ComputeValueVTs(*this, retTy, resvtparts);
571
572 // Declare one .param .align 16 .b8 func_retval0[<size>] for ABI or
573 // individual .reg .b<size> func_retval<0..> for non ABI
574 unsigned resultsz = 0;
575 for (unsigned i=0,e=resvtparts.size(); i!=e; ++i) {
576 unsigned elems = 1;
577 EVT elemtype = resvtparts[i];
578 if (resvtparts[i].isVector()) {
579 elems = resvtparts[i].getVectorNumElements();
580 elemtype = resvtparts[i].getVectorElementType();
581 }
582 for (unsigned j=0,je=elems; j!=je; ++j) {
583 unsigned sz = elemtype.getSizeInBits();
584 if (isABI == false) {
585 if (elemtype.isInteger() && (sz < 32)) sz = 32;
586 }
587 else {
588 if (elemtype.isInteger() && (sz < 8)) sz = 8;
589 }
590 if (isABI == false) {
591 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
592 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(2, MVT::i32),
593 DAG.getConstant(sz, MVT::i32),
594 DAG.getConstant(retCount, MVT::i32),
595 InFlag };
596 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
597 DeclareRetOps, 5);
598 InFlag = Chain.getValue(1);
599 ++retCount;
600 }
601 resultsz += sz;
602 }
603 }
604 if (isABI) {
605 if (retTy->isPrimitiveType() || retTy->isIntegerTy() ||
606 retTy->isPointerTy() ) {
607 // Scalar needs to be at least 32bit wide
608 if (resultsz < 32)
609 resultsz = 32;
610 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
611 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
612 DAG.getConstant(resultsz, MVT::i32),
613 DAG.getConstant(0, MVT::i32), InFlag };
614 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
615 DeclareRetOps, 5);
616 InFlag = Chain.getValue(1);
617 }
618 else {
Justin Holewinskiaa583972012-05-25 16:35:28 +0000619 if (Func) { // direct call
620 if (!llvm::getAlign(*(CS->getCalledFunction()), 0, retAlignment))
Micah Villmowcdfe20b2012-10-08 16:38:25 +0000621 retAlignment = getDataLayout()->getABITypeAlignment(retTy);
Justin Holewinskiaa583972012-05-25 16:35:28 +0000622 } else { // indirect call
623 const CallInst *CallI = dyn_cast<CallInst>(CS->getInstruction());
624 if (!llvm::getAlign(*CallI, 0, retAlignment))
Micah Villmowcdfe20b2012-10-08 16:38:25 +0000625 retAlignment = getDataLayout()->getABITypeAlignment(retTy);
Justin Holewinskiaa583972012-05-25 16:35:28 +0000626 }
Justin Holewinskiae556d32012-05-04 20:18:50 +0000627 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
628 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(retAlignment,
629 MVT::i32),
630 DAG.getConstant(resultsz/8, MVT::i32),
631 DAG.getConstant(0, MVT::i32), InFlag };
632 Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
633 DeclareRetOps, 5);
634 InFlag = Chain.getValue(1);
635 }
636 }
637 }
638
639 if (!Func) {
640 // This is indirect function call case : PTX requires a prototype of the
641 // form
642 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
643 // to be emitted, and the label has to used as the last arg of call
644 // instruction.
645 // The prototype is embedded in a string and put as the operand for an
646 // INLINEASM SDNode.
647 SDVTList InlineAsmVTs = DAG.getVTList(MVT::Other, MVT::Glue);
648 std::string proto_string = getPrototype(retTy, Args, Outs, retAlignment);
649 const char *asmstr = nvTM->getManagedStrPool()->
650 getManagedString(proto_string.c_str())->c_str();
651 SDValue InlineAsmOps[] = { Chain,
652 DAG.getTargetExternalSymbol(asmstr,
653 getPointerTy()),
654 DAG.getMDNode(0),
655 DAG.getTargetConstant(0, MVT::i32), InFlag };
656 Chain = DAG.getNode(ISD::INLINEASM, dl, InlineAsmVTs, InlineAsmOps, 5);
657 InFlag = Chain.getValue(1);
658 }
659 // Op to just print "call"
660 SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
661 SDValue PrintCallOps[] = { Chain,
662 DAG.getConstant(isABI ? ((Ins.size()==0) ? 0 : 1)
663 : retCount, MVT::i32),
664 InFlag };
665 Chain = DAG.getNode(Func?(NVPTXISD::PrintCallUni):(NVPTXISD::PrintCall), dl,
666 PrintCallVTs, PrintCallOps, 3);
667 InFlag = Chain.getValue(1);
668
669 // Ops to print out the function name
670 SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
671 SDValue CallVoidOps[] = { Chain, Callee, InFlag };
672 Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps, 3);
673 InFlag = Chain.getValue(1);
674
675 // Ops to print out the param list
676 SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
677 SDValue CallArgBeginOps[] = { Chain, InFlag };
678 Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
679 CallArgBeginOps, 2);
680 InFlag = Chain.getValue(1);
681
682 for (unsigned i=0, e=paramCount; i!=e; ++i) {
683 unsigned opcode;
684 if (i==(e-1))
685 opcode = NVPTXISD::LastCallArg;
686 else
687 opcode = NVPTXISD::CallArg;
688 SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
689 SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
690 DAG.getConstant(i, MVT::i32),
691 InFlag };
692 Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps, 4);
693 InFlag = Chain.getValue(1);
694 }
695 SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
696 SDValue CallArgEndOps[] = { Chain,
697 DAG.getConstant(Func ? 1 : 0, MVT::i32),
698 InFlag };
699 Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps,
700 3);
701 InFlag = Chain.getValue(1);
702
703 if (!Func) {
704 SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
705 SDValue PrototypeOps[] = { Chain,
706 DAG.getConstant(uniqueCallSite, MVT::i32),
707 InFlag };
708 Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps, 3);
709 InFlag = Chain.getValue(1);
710 }
711
712 // Generate loads from param memory/moves from registers for result
713 if (Ins.size() > 0) {
714 if (isABI) {
715 unsigned resoffset = 0;
716 for (unsigned i=0,e=Ins.size(); i!=e; ++i) {
717 unsigned sz = Ins[i].VT.getSizeInBits();
718 if (Ins[i].VT.isInteger() && (sz < 8)) sz = 8;
719 std::vector<EVT> LoadRetVTs;
720 LoadRetVTs.push_back(Ins[i].VT);
721 LoadRetVTs.push_back(MVT::Other); LoadRetVTs.push_back(MVT::Glue);
722 std::vector<SDValue> LoadRetOps;
723 LoadRetOps.push_back(Chain);
724 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
725 LoadRetOps.push_back(DAG.getConstant(resoffset, MVT::i32));
726 LoadRetOps.push_back(InFlag);
727 SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, LoadRetVTs,
728 &LoadRetOps[0], LoadRetOps.size());
729 Chain = retval.getValue(1);
730 InFlag = retval.getValue(2);
731 InVals.push_back(retval);
732 resoffset += sz/8;
733 }
734 }
735 else {
736 SmallVector<EVT, 16> resvtparts;
737 ComputeValueVTs(*this, retTy, resvtparts);
738
739 assert(Ins.size() == resvtparts.size() &&
740 "Unexpected number of return values in non-ABI case");
741 unsigned paramNum = 0;
742 for (unsigned i=0,e=Ins.size(); i!=e; ++i) {
743 assert(EVT(Ins[i].VT) == resvtparts[i] &&
744 "Unexpected EVT type in non-ABI case");
745 unsigned numelems = 1;
746 EVT elemtype = Ins[i].VT;
747 if (Ins[i].VT.isVector()) {
748 numelems = Ins[i].VT.getVectorNumElements();
749 elemtype = Ins[i].VT.getVectorElementType();
750 }
751 std::vector<SDValue> tempRetVals;
752 for (unsigned j=0; j<numelems; ++j) {
753 std::vector<EVT> MoveRetVTs;
754 MoveRetVTs.push_back(elemtype);
755 MoveRetVTs.push_back(MVT::Other); MoveRetVTs.push_back(MVT::Glue);
756 std::vector<SDValue> MoveRetOps;
757 MoveRetOps.push_back(Chain);
758 MoveRetOps.push_back(DAG.getConstant(0, MVT::i32));
759 MoveRetOps.push_back(DAG.getConstant(paramNum, MVT::i32));
760 MoveRetOps.push_back(InFlag);
761 SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, MoveRetVTs,
762 &MoveRetOps[0], MoveRetOps.size());
763 Chain = retval.getValue(1);
764 InFlag = retval.getValue(2);
765 tempRetVals.push_back(retval);
766 ++paramNum;
767 }
768 if (Ins[i].VT.isVector())
769 InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, Ins[i].VT,
770 &tempRetVals[0], tempRetVals.size()));
771 else
772 InVals.push_back(tempRetVals[0]);
773 }
774 }
775 }
776 Chain = DAG.getCALLSEQ_END(Chain,
777 DAG.getIntPtrConstant(uniqueCallSite, true),
778 DAG.getIntPtrConstant(uniqueCallSite+1, true),
779 InFlag);
780 uniqueCallSite++;
781
782 // set isTailCall to false for now, until we figure out how to express
783 // tail call optimization in PTX
784 isTailCall = false;
785 return Chain;
786}
Justin Holewinskiae556d32012-05-04 20:18:50 +0000787
788// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
789// (see LegalizeDAG.cpp). This is slow and uses local memory.
790// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
791SDValue NVPTXTargetLowering::
792LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
793 SDNode *Node = Op.getNode();
794 DebugLoc dl = Node->getDebugLoc();
795 SmallVector<SDValue, 8> Ops;
796 unsigned NumOperands = Node->getNumOperands();
797 for (unsigned i=0; i < NumOperands; ++i) {
798 SDValue SubOp = Node->getOperand(i);
799 EVT VVT = SubOp.getNode()->getValueType(0);
800 EVT EltVT = VVT.getVectorElementType();
801 unsigned NumSubElem = VVT.getVectorNumElements();
802 for (unsigned j=0; j < NumSubElem; ++j) {
803 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
804 DAG.getIntPtrConstant(j)));
805 }
806 }
807 return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0),
808 &Ops[0], Ops.size());
809}
810
811SDValue NVPTXTargetLowering::
812LowerOperation(SDValue Op, SelectionDAG &DAG) const {
813 switch (Op.getOpcode()) {
814 case ISD::RETURNADDR: return SDValue();
815 case ISD::FRAMEADDR: return SDValue();
816 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
817 case ISD::INTRINSIC_W_CHAIN: return Op;
818 case ISD::BUILD_VECTOR:
819 case ISD::EXTRACT_SUBVECTOR:
820 return Op;
821 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
Justin Holewinskic6462aa2012-11-14 19:19:16 +0000822 case ISD::STORE: return LowerSTORE(Op, DAG);
823 case ISD::LOAD: return LowerLOAD(Op, DAG);
Justin Holewinskiae556d32012-05-04 20:18:50 +0000824 default:
David Blaikie891d0a32012-05-04 22:34:16 +0000825 llvm_unreachable("Custom lowering not defined for operation");
Justin Holewinskiae556d32012-05-04 20:18:50 +0000826 }
827}
828
Justin Holewinskic6462aa2012-11-14 19:19:16 +0000829
Justin Holewinskibe8dc642013-02-12 14:18:49 +0000830SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
831 if (Op.getValueType() == MVT::i1)
832 return LowerLOADi1(Op, DAG);
833 else
834 return SDValue();
835}
836
Justin Holewinskic6462aa2012-11-14 19:19:16 +0000837// v = ld i1* addr
838// =>
839// v1 = ld i8* addr
840// v = trunc v1 to i1
841SDValue NVPTXTargetLowering::
Justin Holewinskibe8dc642013-02-12 14:18:49 +0000842LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
Justin Holewinskic6462aa2012-11-14 19:19:16 +0000843 SDNode *Node = Op.getNode();
844 LoadSDNode *LD = cast<LoadSDNode>(Node);
845 DebugLoc dl = Node->getDebugLoc();
NAKAMURA Takumi5bbe0e12012-11-14 23:46:15 +0000846 assert(LD->getExtensionType() == ISD::NON_EXTLOAD) ;
847 assert(Node->getValueType(0) == MVT::i1 &&
848 "Custom lowering for i1 load only");
Justin Holewinskic6462aa2012-11-14 19:19:16 +0000849 SDValue newLD = DAG.getLoad(MVT::i8, dl, LD->getChain(), LD->getBasePtr(),
850 LD->getPointerInfo(),
851 LD->isVolatile(), LD->isNonTemporal(),
852 LD->isInvariant(),
853 LD->getAlignment());
854 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
855 // The legalizer (the caller) is expecting two values from the legalized
856 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
857 // in LegalizeDAG.cpp which also uses MergeValues.
858 SDValue Ops[] = {result, LD->getChain()};
859 return DAG.getMergeValues(Ops, 2, dl);
860}
861
Justin Holewinskibe8dc642013-02-12 14:18:49 +0000862SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
863 EVT ValVT = Op.getOperand(1).getValueType();
864 if (ValVT == MVT::i1)
865 return LowerSTOREi1(Op, DAG);
866 else if (ValVT.isVector())
867 return LowerSTOREVector(Op, DAG);
868 else
869 return SDValue();
870}
871
872SDValue
873NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
874 SDNode *N = Op.getNode();
875 SDValue Val = N->getOperand(1);
876 DebugLoc DL = N->getDebugLoc();
877 EVT ValVT = Val.getValueType();
878
879 if (ValVT.isVector()) {
880 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
881 // legal. We can (and should) split that into 2 stores of <2 x double> here
882 // but I'm leaving that as a TODO for now.
883 if (!ValVT.isSimple())
884 return SDValue();
885 switch (ValVT.getSimpleVT().SimpleTy) {
886 default: return SDValue();
887 case MVT::v2i8:
888 case MVT::v2i16:
889 case MVT::v2i32:
890 case MVT::v2i64:
891 case MVT::v2f32:
892 case MVT::v2f64:
893 case MVT::v4i8:
894 case MVT::v4i16:
895 case MVT::v4i32:
896 case MVT::v4f32:
897 // This is a "native" vector type
898 break;
899 }
900
901 unsigned Opcode = 0;
902 EVT EltVT = ValVT.getVectorElementType();
903 unsigned NumElts = ValVT.getVectorNumElements();
904
905 // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
906 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
907 // stored type to i16 and propogate the "real" type as the memory type.
908 bool NeedExt = false;
909 if (EltVT.getSizeInBits() < 16)
910 NeedExt = true;
911
912 switch (NumElts) {
913 default: return SDValue();
914 case 2:
915 Opcode = NVPTXISD::StoreV2;
916 break;
917 case 4: {
918 Opcode = NVPTXISD::StoreV4;
919 break;
920 }
921 }
922
923 SmallVector<SDValue, 8> Ops;
924
925 // First is the chain
926 Ops.push_back(N->getOperand(0));
927
928 // Then the split values
929 for (unsigned i = 0; i < NumElts; ++i) {
930 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
931 DAG.getIntPtrConstant(i));
932 if (NeedExt)
933 // ANY_EXTEND is correct here since the store will only look at the
934 // lower-order bits anyway.
935 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
936 Ops.push_back(ExtVal);
937 }
938
939 // Then any remaining arguments
940 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
941 Ops.push_back(N->getOperand(i));
942 }
943
944 MemSDNode *MemSD = cast<MemSDNode>(N);
945
946 SDValue NewSt = DAG.getMemIntrinsicNode(Opcode, DL,
947 DAG.getVTList(MVT::Other), &Ops[0],
948 Ops.size(), MemSD->getMemoryVT(),
949 MemSD->getMemOperand());
950
951
952 //return DCI.CombineTo(N, NewSt, true);
953 return NewSt;
954 }
955
956 return SDValue();
957}
958
Justin Holewinskic6462aa2012-11-14 19:19:16 +0000959// st i1 v, addr
960// =>
961// v1 = zxt v to i8
962// st i8, addr
963SDValue NVPTXTargetLowering::
Justin Holewinskibe8dc642013-02-12 14:18:49 +0000964LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
Justin Holewinskic6462aa2012-11-14 19:19:16 +0000965 SDNode *Node = Op.getNode();
966 DebugLoc dl = Node->getDebugLoc();
967 StoreSDNode *ST = cast<StoreSDNode>(Node);
968 SDValue Tmp1 = ST->getChain();
969 SDValue Tmp2 = ST->getBasePtr();
970 SDValue Tmp3 = ST->getValue();
NAKAMURA Takumi5bbe0e12012-11-14 23:46:15 +0000971 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
Justin Holewinskic6462aa2012-11-14 19:19:16 +0000972 unsigned Alignment = ST->getAlignment();
973 bool isVolatile = ST->isVolatile();
974 bool isNonTemporal = ST->isNonTemporal();
975 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl,
976 MVT::i8, Tmp3);
977 SDValue Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2,
978 ST->getPointerInfo(), isVolatile,
979 isNonTemporal, Alignment);
980 return Result;
981}
982
983
Justin Holewinskiae556d32012-05-04 20:18:50 +0000984SDValue
985NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname, int idx,
986 EVT v) const {
987 std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
988 std::stringstream suffix;
989 suffix << idx;
990 *name += suffix.str();
991 return DAG.getTargetExternalSymbol(name->c_str(), v);
992}
993
994SDValue
995NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
996 return getExtSymb(DAG, ".PARAM", idx, v);
997}
998
999SDValue
1000NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
1001 return getExtSymb(DAG, ".HLPPARAM", idx);
1002}
1003
1004// Check to see if the kernel argument is image*_t or sampler_t
1005
1006bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
Craig Toppere4260f92012-05-24 04:22:05 +00001007 static const char *const specialTypes[] = {
1008 "struct._image2d_t",
1009 "struct._image3d_t",
1010 "struct._sampler_t"
Justin Holewinskiae556d32012-05-04 20:18:50 +00001011 };
1012
1013 const Type *Ty = arg->getType();
1014 const PointerType *PTy = dyn_cast<PointerType>(Ty);
1015
1016 if (!PTy)
1017 return false;
1018
1019 if (!context)
1020 return false;
1021
1022 const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
Justin Holewinskifb711152012-12-05 20:50:28 +00001023 const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
Justin Holewinskiae556d32012-05-04 20:18:50 +00001024
Craig Toppere4260f92012-05-24 04:22:05 +00001025 for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
Justin Holewinskiae556d32012-05-04 20:18:50 +00001026 if (TypeName == specialTypes[i])
1027 return true;
1028
1029 return false;
1030}
1031
1032SDValue
1033NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
1034 CallingConv::ID CallConv, bool isVarArg,
1035 const SmallVectorImpl<ISD::InputArg> &Ins,
1036 DebugLoc dl, SelectionDAG &DAG,
1037 SmallVectorImpl<SDValue> &InVals) const {
1038 MachineFunction &MF = DAG.getMachineFunction();
Micah Villmowcdfe20b2012-10-08 16:38:25 +00001039 const DataLayout *TD = getDataLayout();
Justin Holewinskiae556d32012-05-04 20:18:50 +00001040
1041 const Function *F = MF.getFunction();
Bill Wendlinge94d8432012-12-07 23:16:57 +00001042 const AttributeSet &PAL = F->getAttributes();
Justin Holewinskiae556d32012-05-04 20:18:50 +00001043
1044 SDValue Root = DAG.getRoot();
1045 std::vector<SDValue> OutChains;
1046
1047 bool isKernel = llvm::isKernelFunction(*F);
1048 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1049
1050 std::vector<Type *> argTypes;
1051 std::vector<const Argument *> theArgs;
1052 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1053 I != E; ++I) {
1054 theArgs.push_back(I);
1055 argTypes.push_back(I->getType());
1056 }
1057 assert(argTypes.size() == Ins.size() &&
1058 "Ins types and function types did not match");
1059
1060 int idx = 0;
1061 for (unsigned i=0, e=Ins.size(); i!=e; ++i, ++idx) {
1062 Type *Ty = argTypes[i];
1063 EVT ObjectVT = getValueType(Ty);
1064 assert(ObjectVT == Ins[i].VT &&
1065 "Ins type did not match function type");
1066
1067 // If the kernel argument is image*_t or sampler_t, convert it to
1068 // a i32 constant holding the parameter position. This can later
1069 // matched in the AsmPrinter to output the correct mangled name.
1070 if (isImageOrSamplerVal(theArgs[i],
1071 (theArgs[i]->getParent() ?
1072 theArgs[i]->getParent()->getParent() : 0))) {
1073 assert(isKernel && "Only kernels can have image/sampler params");
1074 InVals.push_back(DAG.getConstant(i+1, MVT::i32));
1075 continue;
1076 }
1077
1078 if (theArgs[i]->use_empty()) {
1079 // argument is dead
1080 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, ObjectVT));
1081 continue;
1082 }
1083
1084 // In the following cases, assign a node order of "idx+1"
1085 // to newly created nodes. The SDNOdes for params have to
1086 // appear in the same order as their order of appearance
1087 // in the original function. "idx+1" holds that order.
Bill Wendling749a43d2012-12-30 13:50:49 +00001088 if (PAL.hasAttribute(i+1, Attribute::ByVal) == false) {
Justin Holewinskiae556d32012-05-04 20:18:50 +00001089 // A plain scalar.
1090 if (isABI || isKernel) {
1091 // If ABI, load from the param symbol
1092 SDValue Arg = getParamSymbol(DAG, idx);
Benjamin Kramerc4231cc2013-01-23 15:21:44 +00001093 // Conjure up a value that we can get the address space from.
1094 // FIXME: Using a constant here is a hack.
1095 Value *srcValue = Constant::getNullValue(PointerType::get(
1096 ObjectVT.getTypeForEVT(F->getContext()),
1097 llvm::ADDRESS_SPACE_PARAM));
Justin Holewinskiae556d32012-05-04 20:18:50 +00001098 SDValue p = DAG.getLoad(ObjectVT, dl, Root, Arg,
1099 MachinePointerInfo(srcValue), false, false,
1100 false,
1101 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(
1102 F->getContext())));
1103 if (p.getNode())
1104 DAG.AssignOrdering(p.getNode(), idx+1);
1105 InVals.push_back(p);
1106 }
1107 else {
1108 // If no ABI, just move the param symbol
1109 SDValue Arg = getParamSymbol(DAG, idx, ObjectVT);
1110 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
1111 if (p.getNode())
1112 DAG.AssignOrdering(p.getNode(), idx+1);
1113 InVals.push_back(p);
1114 }
1115 continue;
1116 }
1117
1118 // Param has ByVal attribute
1119 if (isABI || isKernel) {
1120 // Return MoveParam(param symbol).
1121 // Ideally, the param symbol can be returned directly,
1122 // but when SDNode builder decides to use it in a CopyToReg(),
1123 // machine instruction fails because TargetExternalSymbol
1124 // (not lowered) is target dependent, and CopyToReg assumes
1125 // the source is lowered.
1126 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1127 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
1128 if (p.getNode())
1129 DAG.AssignOrdering(p.getNode(), idx+1);
1130 if (isKernel)
1131 InVals.push_back(p);
1132 else {
1133 SDValue p2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
1134 DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32),
1135 p);
1136 InVals.push_back(p2);
1137 }
1138 } else {
1139 // Have to move a set of param symbols to registers and
1140 // store them locally and return the local pointer in InVals
1141 const PointerType *elemPtrType = dyn_cast<PointerType>(argTypes[i]);
1142 assert(elemPtrType &&
1143 "Byval parameter should be a pointer type");
1144 Type *elemType = elemPtrType->getElementType();
1145 // Compute the constituent parts
1146 SmallVector<EVT, 16> vtparts;
1147 SmallVector<uint64_t, 16> offsets;
1148 ComputeValueVTs(*this, elemType, vtparts, &offsets, 0);
1149 unsigned totalsize = 0;
1150 for (unsigned j=0, je=vtparts.size(); j!=je; ++j)
1151 totalsize += vtparts[j].getStoreSizeInBits();
1152 SDValue localcopy = DAG.getFrameIndex(MF.getFrameInfo()->
1153 CreateStackObject(totalsize/8, 16, false),
1154 getPointerTy());
1155 unsigned sizesofar = 0;
1156 std::vector<SDValue> theChains;
1157 for (unsigned j=0, je=vtparts.size(); j!=je; ++j) {
1158 unsigned numElems = 1;
1159 if (vtparts[j].isVector()) numElems = vtparts[j].getVectorNumElements();
1160 for (unsigned k=0, ke=numElems; k!=ke; ++k) {
1161 EVT tmpvt = vtparts[j];
1162 if (tmpvt.isVector()) tmpvt = tmpvt.getVectorElementType();
1163 SDValue arg = DAG.getNode(NVPTXISD::MoveParam, dl, tmpvt,
1164 getParamSymbol(DAG, idx, tmpvt));
1165 SDValue addr = DAG.getNode(ISD::ADD, dl, getPointerTy(), localcopy,
1166 DAG.getConstant(sizesofar, getPointerTy()));
1167 theChains.push_back(DAG.getStore(Chain, dl, arg, addr,
1168 MachinePointerInfo(), false, false, 0));
1169 sizesofar += tmpvt.getStoreSizeInBits()/8;
1170 ++idx;
1171 }
1172 }
1173 --idx;
1174 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &theChains[0],
1175 theChains.size());
1176 InVals.push_back(localcopy);
1177 }
1178 }
1179
1180 // Clang will check explicit VarArg and issue error if any. However, Clang
1181 // will let code with
1182 // implicit var arg like f() pass.
1183 // We treat this case as if the arg list is empty.
1184 //if (F.isVarArg()) {
1185 // assert(0 && "VarArg not supported yet!");
1186 //}
1187
1188 if (!OutChains.empty())
1189 DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1190 &OutChains[0], OutChains.size()));
1191
1192 return Chain;
1193}
1194
1195SDValue
1196NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1197 bool isVarArg,
1198 const SmallVectorImpl<ISD::OutputArg> &Outs,
1199 const SmallVectorImpl<SDValue> &OutVals,
1200 DebugLoc dl, SelectionDAG &DAG) const {
1201
1202 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1203
1204 unsigned sizesofar = 0;
1205 unsigned idx = 0;
1206 for (unsigned i=0, e=Outs.size(); i!=e; ++i) {
1207 SDValue theVal = OutVals[i];
1208 EVT theValType = theVal.getValueType();
1209 unsigned numElems = 1;
1210 if (theValType.isVector()) numElems = theValType.getVectorNumElements();
1211 for (unsigned j=0,je=numElems; j!=je; ++j) {
1212 SDValue tmpval = theVal;
1213 if (theValType.isVector())
1214 tmpval = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
1215 theValType.getVectorElementType(),
1216 tmpval, DAG.getIntPtrConstant(j));
1217 Chain = DAG.getNode(isABI ? NVPTXISD::StoreRetval :NVPTXISD::MoveToRetval,
1218 dl, MVT::Other,
1219 Chain,
1220 DAG.getConstant(isABI ? sizesofar : idx, MVT::i32),
1221 tmpval);
1222 if (theValType.isVector())
1223 sizesofar += theValType.getVectorElementType().getStoreSizeInBits()/8;
1224 else
1225 sizesofar += theValType.getStoreSizeInBits()/8;
1226 ++idx;
1227 }
1228 }
1229
1230 return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
1231}
1232
1233void
1234NVPTXTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
1235 std::string &Constraint,
1236 std::vector<SDValue> &Ops,
1237 SelectionDAG &DAG) const
1238{
1239 if (Constraint.length() > 1)
1240 return;
1241 else
1242 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1243}
1244
1245// NVPTX suuport vector of legal types of any length in Intrinsics because the
1246// NVPTX specific type legalizer
1247// will legalize them to the PTX supported length.
1248bool
1249NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
1250 if (isTypeLegal(VT))
1251 return true;
1252 if (VT.isVector()) {
1253 MVT eVT = VT.getVectorElementType();
1254 if (isTypeLegal(eVT))
1255 return true;
1256 }
1257 return false;
1258}
1259
1260
1261// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
1262// TgtMemIntrinsic
1263// because we need the information that is only available in the "Value" type
1264// of destination
1265// pointer. In particular, the address space information.
1266bool
1267NVPTXTargetLowering::getTgtMemIntrinsic(IntrinsicInfo& Info, const CallInst &I,
1268 unsigned Intrinsic) const {
1269 switch (Intrinsic) {
1270 default:
1271 return false;
1272
1273 case Intrinsic::nvvm_atomic_load_add_f32:
1274 Info.opc = ISD::INTRINSIC_W_CHAIN;
1275 Info.memVT = MVT::f32;
1276 Info.ptrVal = I.getArgOperand(0);
1277 Info.offset = 0;
1278 Info.vol = 0;
1279 Info.readMem = true;
1280 Info.writeMem = true;
1281 Info.align = 0;
1282 return true;
1283
1284 case Intrinsic::nvvm_atomic_load_inc_32:
1285 case Intrinsic::nvvm_atomic_load_dec_32:
1286 Info.opc = ISD::INTRINSIC_W_CHAIN;
1287 Info.memVT = MVT::i32;
1288 Info.ptrVal = I.getArgOperand(0);
1289 Info.offset = 0;
1290 Info.vol = 0;
1291 Info.readMem = true;
1292 Info.writeMem = true;
1293 Info.align = 0;
1294 return true;
1295
1296 case Intrinsic::nvvm_ldu_global_i:
1297 case Intrinsic::nvvm_ldu_global_f:
1298 case Intrinsic::nvvm_ldu_global_p:
1299
1300 Info.opc = ISD::INTRINSIC_W_CHAIN;
1301 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
1302 Info.memVT = MVT::i32;
1303 else if (Intrinsic == Intrinsic::nvvm_ldu_global_p)
1304 Info.memVT = getPointerTy();
1305 else
1306 Info.memVT = MVT::f32;
1307 Info.ptrVal = I.getArgOperand(0);
1308 Info.offset = 0;
1309 Info.vol = 0;
1310 Info.readMem = true;
1311 Info.writeMem = false;
1312 Info.align = 0;
1313 return true;
1314
1315 }
1316 return false;
1317}
1318
1319/// isLegalAddressingMode - Return true if the addressing mode represented
1320/// by AM is legal for this target, for a load/store of the specified type.
1321/// Used to guide target specific optimizations, like loop strength reduction
1322/// (LoopStrengthReduce.cpp) and memory optimization for address mode
1323/// (CodeGenPrepare.cpp)
1324bool
1325NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1326 Type *Ty) const {
1327
1328 // AddrMode - This represents an addressing mode of:
1329 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1330 //
1331 // The legal address modes are
1332 // - [avar]
1333 // - [areg]
1334 // - [areg+immoff]
1335 // - [immAddr]
1336
1337 if (AM.BaseGV) {
1338 if (AM.BaseOffs || AM.HasBaseReg || AM.Scale)
1339 return false;
1340 return true;
1341 }
1342
1343 switch (AM.Scale) {
1344 case 0: // "r", "r+i" or "i" is allowed
1345 break;
1346 case 1:
1347 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
1348 return false;
1349 // Otherwise we have r+i.
1350 break;
1351 default:
1352 // No scale > 1 is allowed
1353 return false;
1354 }
1355 return true;
1356}
1357
1358//===----------------------------------------------------------------------===//
1359// NVPTX Inline Assembly Support
1360//===----------------------------------------------------------------------===//
1361
1362/// getConstraintType - Given a constraint letter, return the type of
1363/// constraint it is for this target.
1364NVPTXTargetLowering::ConstraintType
1365NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
1366 if (Constraint.size() == 1) {
1367 switch (Constraint[0]) {
1368 default:
1369 break;
1370 case 'r':
1371 case 'h':
1372 case 'c':
1373 case 'l':
1374 case 'f':
1375 case 'd':
1376 case '0':
1377 case 'N':
1378 return C_RegisterClass;
1379 }
1380 }
1381 return TargetLowering::getConstraintType(Constraint);
1382}
1383
1384
1385std::pair<unsigned, const TargetRegisterClass*>
1386NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
1387 EVT VT) const {
1388 if (Constraint.size() == 1) {
1389 switch (Constraint[0]) {
1390 case 'c':
1391 return std::make_pair(0U, &NVPTX::Int8RegsRegClass);
1392 case 'h':
1393 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
1394 case 'r':
1395 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
1396 case 'l':
1397 case 'N':
1398 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
1399 case 'f':
1400 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
1401 case 'd':
1402 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
1403 }
1404 }
1405 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
1406}
1407
1408
1409
1410/// getFunctionAlignment - Return the Log2 alignment of this function.
1411unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
1412 return 4;
1413}
Justin Holewinskibe8dc642013-02-12 14:18:49 +00001414
1415/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
1416static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
1417 SmallVectorImpl<SDValue>& Results) {
1418 EVT ResVT = N->getValueType(0);
1419 DebugLoc DL = N->getDebugLoc();
1420
1421 assert(ResVT.isVector() && "Vector load must have vector type");
1422
1423 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
1424 // legal. We can (and should) split that into 2 loads of <2 x double> here
1425 // but I'm leaving that as a TODO for now.
1426 assert(ResVT.isSimple() && "Can only handle simple types");
1427 switch (ResVT.getSimpleVT().SimpleTy) {
1428 default: return;
1429 case MVT::v2i8:
1430 case MVT::v2i16:
1431 case MVT::v2i32:
1432 case MVT::v2i64:
1433 case MVT::v2f32:
1434 case MVT::v2f64:
1435 case MVT::v4i8:
1436 case MVT::v4i16:
1437 case MVT::v4i32:
1438 case MVT::v4f32:
1439 // This is a "native" vector type
1440 break;
1441 }
1442
1443 EVT EltVT = ResVT.getVectorElementType();
1444 unsigned NumElts = ResVT.getVectorNumElements();
1445
1446 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
1447 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
1448 // loaded type to i16 and propogate the "real" type as the memory type.
1449 bool NeedTrunc = false;
1450 if (EltVT.getSizeInBits() < 16) {
1451 EltVT = MVT::i16;
1452 NeedTrunc = true;
1453 }
1454
1455 unsigned Opcode = 0;
1456 SDVTList LdResVTs;
1457
1458 switch (NumElts) {
1459 default: return;
1460 case 2:
1461 Opcode = NVPTXISD::LoadV2;
1462 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
1463 break;
1464 case 4: {
1465 Opcode = NVPTXISD::LoadV4;
1466 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
1467 LdResVTs = DAG.getVTList(ListVTs, 5);
1468 break;
1469 }
1470 }
1471
1472 SmallVector<SDValue, 8> OtherOps;
1473
1474 // Copy regular operands
1475 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1476 OtherOps.push_back(N->getOperand(i));
1477
1478 LoadSDNode *LD = cast<LoadSDNode>(N);
1479
1480 // The select routine does not have access to the LoadSDNode instance, so
1481 // pass along the extension information
1482 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
1483
1484 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, &OtherOps[0],
1485 OtherOps.size(), LD->getMemoryVT(),
1486 LD->getMemOperand());
1487
1488 SmallVector<SDValue, 4> ScalarRes;
1489
1490 for (unsigned i = 0; i < NumElts; ++i) {
1491 SDValue Res = NewLD.getValue(i);
1492 if (NeedTrunc)
1493 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
1494 ScalarRes.push_back(Res);
1495 }
1496
1497 SDValue LoadChain = NewLD.getValue(NumElts);
1498
1499 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
1500
1501 Results.push_back(BuildVec);
1502 Results.push_back(LoadChain);
1503}
1504
1505static void ReplaceINTRINSIC_W_CHAIN(SDNode *N,
1506 SelectionDAG &DAG,
1507 SmallVectorImpl<SDValue> &Results) {
1508 SDValue Chain = N->getOperand(0);
1509 SDValue Intrin = N->getOperand(1);
1510 DebugLoc DL = N->getDebugLoc();
1511
1512 // Get the intrinsic ID
1513 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
1514 switch(IntrinNo) {
1515 default: return;
1516 case Intrinsic::nvvm_ldg_global_i:
1517 case Intrinsic::nvvm_ldg_global_f:
1518 case Intrinsic::nvvm_ldg_global_p:
1519 case Intrinsic::nvvm_ldu_global_i:
1520 case Intrinsic::nvvm_ldu_global_f:
1521 case Intrinsic::nvvm_ldu_global_p: {
1522 EVT ResVT = N->getValueType(0);
1523
1524 if (ResVT.isVector()) {
1525 // Vector LDG/LDU
1526
1527 unsigned NumElts = ResVT.getVectorNumElements();
1528 EVT EltVT = ResVT.getVectorElementType();
1529
1530 // Since LDU/LDG are target nodes, we cannot rely on DAG type legalization.
1531 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
1532 // loaded type to i16 and propogate the "real" type as the memory type.
1533 bool NeedTrunc = false;
1534 if (EltVT.getSizeInBits() < 16) {
1535 EltVT = MVT::i16;
1536 NeedTrunc = true;
1537 }
1538
1539 unsigned Opcode = 0;
1540 SDVTList LdResVTs;
1541
1542 switch (NumElts) {
1543 default: return;
1544 case 2:
1545 switch(IntrinNo) {
1546 default: return;
1547 case Intrinsic::nvvm_ldg_global_i:
1548 case Intrinsic::nvvm_ldg_global_f:
1549 case Intrinsic::nvvm_ldg_global_p:
1550 Opcode = NVPTXISD::LDGV2;
1551 break;
1552 case Intrinsic::nvvm_ldu_global_i:
1553 case Intrinsic::nvvm_ldu_global_f:
1554 case Intrinsic::nvvm_ldu_global_p:
1555 Opcode = NVPTXISD::LDUV2;
1556 break;
1557 }
1558 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
1559 break;
1560 case 4: {
1561 switch(IntrinNo) {
1562 default: return;
1563 case Intrinsic::nvvm_ldg_global_i:
1564 case Intrinsic::nvvm_ldg_global_f:
1565 case Intrinsic::nvvm_ldg_global_p:
1566 Opcode = NVPTXISD::LDGV4;
1567 break;
1568 case Intrinsic::nvvm_ldu_global_i:
1569 case Intrinsic::nvvm_ldu_global_f:
1570 case Intrinsic::nvvm_ldu_global_p:
1571 Opcode = NVPTXISD::LDUV4;
1572 break;
1573 }
1574 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
1575 LdResVTs = DAG.getVTList(ListVTs, 5);
1576 break;
1577 }
1578 }
1579
1580 SmallVector<SDValue, 8> OtherOps;
1581
1582 // Copy regular operands
1583
1584 OtherOps.push_back(Chain); // Chain
1585 // Skip operand 1 (intrinsic ID)
1586 // Others
1587 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
1588 OtherOps.push_back(N->getOperand(i));
1589
1590 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
1591
1592 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, &OtherOps[0],
1593 OtherOps.size(), MemSD->getMemoryVT(),
1594 MemSD->getMemOperand());
1595
1596 SmallVector<SDValue, 4> ScalarRes;
1597
1598 for (unsigned i = 0; i < NumElts; ++i) {
1599 SDValue Res = NewLD.getValue(i);
1600 if (NeedTrunc)
1601 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
1602 ScalarRes.push_back(Res);
1603 }
1604
1605 SDValue LoadChain = NewLD.getValue(NumElts);
1606
1607 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
1608
1609 Results.push_back(BuildVec);
1610 Results.push_back(LoadChain);
1611 } else {
1612 // i8 LDG/LDU
1613 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
1614 "Custom handling of non-i8 ldu/ldg?");
1615
1616 // Just copy all operands as-is
1617 SmallVector<SDValue, 4> Ops;
1618 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1619 Ops.push_back(N->getOperand(i));
1620
1621 // Force output to i16
1622 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
1623
1624 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
1625
1626 // We make sure the memory type is i8, which will be used during isel
1627 // to select the proper instruction.
1628 SDValue NewLD = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL,
1629 LdResVTs, &Ops[0],
1630 Ops.size(), MVT::i8,
1631 MemSD->getMemOperand());
1632
1633 Results.push_back(NewLD.getValue(0));
1634 Results.push_back(NewLD.getValue(1));
1635 }
1636 }
1637 }
1638}
1639
1640void NVPTXTargetLowering::ReplaceNodeResults(SDNode *N,
1641 SmallVectorImpl<SDValue> &Results,
1642 SelectionDAG &DAG) const {
1643 switch (N->getOpcode()) {
1644 default: report_fatal_error("Unhandled custom legalization");
1645 case ISD::LOAD:
1646 ReplaceLoadVector(N, DAG, Results);
1647 return;
1648 case ISD::INTRINSIC_W_CHAIN:
1649 ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
1650 return;
1651 }
1652}