blob: 48928ee2d5401040c6e0066bb1e04851a059caf4 [file] [log] [blame]
Chandler Carruth93dcdc42015-01-31 11:17:59 +00001//===-- NVPTXTargetTransformInfo.cpp - NVPTX specific TTI -----------------===//
Jingyue Wu0c981bd2014-11-10 18:38:25 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
Jingyue Wu0c981bd2014-11-10 18:38:25 +00009
Chandler Carruth93dcdc42015-01-31 11:17:59 +000010#include "NVPTXTargetTransformInfo.h"
Jingyue Wu5da831c2015-04-10 05:03:50 +000011#include "NVPTXUtilities.h"
Jingyue Wu0c981bd2014-11-10 18:38:25 +000012#include "llvm/Analysis/LoopInfo.h"
13#include "llvm/Analysis/TargetTransformInfo.h"
14#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth705b1852015-01-31 03:43:40 +000015#include "llvm/CodeGen/BasicTTIImpl.h"
Jingyue Wu0c981bd2014-11-10 18:38:25 +000016#include "llvm/Support/Debug.h"
17#include "llvm/Target/CostTable.h"
18#include "llvm/Target/TargetLowering.h"
19using namespace llvm;
20
21#define DEBUG_TYPE "NVPTXtti"
22
Jingyue Wu5da831c2015-04-10 05:03:50 +000023// Whether the given intrinsic reads threadIdx.x/y/z.
24static bool readsThreadIndex(const IntrinsicInst *II) {
25 switch (II->getIntrinsicID()) {
26 default: return false;
27 case Intrinsic::nvvm_read_ptx_sreg_tid_x:
28 case Intrinsic::nvvm_read_ptx_sreg_tid_y:
29 case Intrinsic::nvvm_read_ptx_sreg_tid_z:
30 return true;
31 }
32}
33
34static bool readsLaneId(const IntrinsicInst *II) {
Justin Bognera466cc32016-07-07 16:40:17 +000035 return II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_laneid;
Jingyue Wu5da831c2015-04-10 05:03:50 +000036}
37
38// Whether the given intrinsic is an atomic instruction in PTX.
39static bool isNVVMAtomic(const IntrinsicInst *II) {
40 switch (II->getIntrinsicID()) {
41 default: return false;
42 case Intrinsic::nvvm_atomic_load_add_f32:
43 case Intrinsic::nvvm_atomic_load_inc_32:
44 case Intrinsic::nvvm_atomic_load_dec_32:
Artem Belevich3e121152016-09-28 17:25:38 +000045
46 case Intrinsic::nvvm_atomic_add_gen_f_cta:
47 case Intrinsic::nvvm_atomic_add_gen_f_sys:
48 case Intrinsic::nvvm_atomic_add_gen_i_cta:
49 case Intrinsic::nvvm_atomic_add_gen_i_sys:
50 case Intrinsic::nvvm_atomic_and_gen_i_cta:
51 case Intrinsic::nvvm_atomic_and_gen_i_sys:
52 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
53 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
54 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
55 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
56 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
57 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
58 case Intrinsic::nvvm_atomic_max_gen_i_cta:
59 case Intrinsic::nvvm_atomic_max_gen_i_sys:
60 case Intrinsic::nvvm_atomic_min_gen_i_cta:
61 case Intrinsic::nvvm_atomic_min_gen_i_sys:
62 case Intrinsic::nvvm_atomic_or_gen_i_cta:
63 case Intrinsic::nvvm_atomic_or_gen_i_sys:
64 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
65 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
66 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
67 case Intrinsic::nvvm_atomic_xor_gen_i_sys:
Jingyue Wu5da831c2015-04-10 05:03:50 +000068 return true;
69 }
70}
71
72bool NVPTXTTIImpl::isSourceOfDivergence(const Value *V) {
73 // Without inter-procedural analysis, we conservatively assume that arguments
74 // to __device__ functions are divergent.
75 if (const Argument *Arg = dyn_cast<Argument>(V))
76 return !isKernelFunction(*Arg->getParent());
77
78 if (const Instruction *I = dyn_cast<Instruction>(V)) {
79 // Without pointer analysis, we conservatively assume values loaded from
80 // generic or local address space are divergent.
81 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
82 unsigned AS = LI->getPointerAddressSpace();
83 return AS == ADDRESS_SPACE_GENERIC || AS == ADDRESS_SPACE_LOCAL;
84 }
85 // Atomic instructions may cause divergence. Atomic instructions are
86 // executed sequentially across all threads in a warp. Therefore, an earlier
87 // executed thread may see different memory inputs than a later executed
88 // thread. For example, suppose *a = 0 initially.
89 //
90 // atom.global.add.s32 d, [a], 1
91 //
92 // returns 0 for the first thread that enters the critical region, and 1 for
93 // the second thread.
94 if (I->isAtomic())
95 return true;
96 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
97 // Instructions that read threadIdx are obviously divergent.
98 if (readsThreadIndex(II) || readsLaneId(II))
99 return true;
100 // Handle the NVPTX atomic instrinsics that cannot be represented as an
101 // atomic IR instruction.
102 if (isNVVMAtomic(II))
103 return true;
104 }
105 // Conservatively consider the return value of function calls as divergent.
106 // We could analyze callees with bodies more precisely using
107 // inter-procedural analysis.
108 if (isa<CallInst>(I))
109 return true;
110 }
111
112 return false;
113}
114
Chandler Carruth93205eb2015-08-05 18:08:10 +0000115int NVPTXTTIImpl::getArithmeticInstrCost(
Chandler Carruth705b1852015-01-31 03:43:40 +0000116 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
117 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
118 TTI::OperandValueProperties Opd2PropInfo) {
Jingyue Wu8a12cea2014-11-12 18:09:15 +0000119 // Legalize the type.
Chandler Carruth93205eb2015-08-05 18:08:10 +0000120 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
Jingyue Wu8a12cea2014-11-12 18:09:15 +0000121
122 int ISD = TLI->InstructionOpcodeToISD(Opcode);
123
124 switch (ISD) {
125 default:
Chandler Carruth705b1852015-01-31 03:43:40 +0000126 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
127 Opd1PropInfo, Opd2PropInfo);
Jingyue Wu8a12cea2014-11-12 18:09:15 +0000128 case ISD::ADD:
129 case ISD::MUL:
130 case ISD::XOR:
131 case ISD::OR:
132 case ISD::AND:
133 // The machine code (SASS) simulates an i64 with two i32. Therefore, we
134 // estimate that arithmetic operations on i64 are twice as expensive as
135 // those on types that can fit into one machine register.
136 if (LT.second.SimpleTy == MVT::i64)
137 return 2 * LT.first;
138 // Delegate other cases to the basic TTI.
Chandler Carruth705b1852015-01-31 03:43:40 +0000139 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
140 Opd1PropInfo, Opd2PropInfo);
Jingyue Wu8a12cea2014-11-12 18:09:15 +0000141 }
142}
Mark Heffernan4c8ca532015-07-13 18:33:21 +0000143
144void NVPTXTTIImpl::getUnrollingPreferences(Loop *L,
145 TTI::UnrollingPreferences &UP) {
146 BaseT::getUnrollingPreferences(L, UP);
147
148 // Enable partial unrolling and runtime unrolling, but reduce the
149 // threshold. This partially unrolls small loops which are often
150 // unrolled by the PTX to SASS compiler and unrolling earlier can be
151 // beneficial.
152 UP.Partial = UP.Runtime = true;
153 UP.PartialThreshold = UP.Threshold / 4;
154}