blob: fabec277707bb7e386be7a578250923796a9f574 [file] [log] [blame]
Tom Stellard8b1e0212013-07-27 00:01:07 +00001//===-- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass ---------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// \file
11// This file implements a TargetTransformInfo analysis pass specific to the
12// AMDGPU target machine. It uses the target's detailed information to provide
13// more precise answers to certain TTI queries, while letting the target
14// independent and default TTI implementations handle the rest.
15//
16//===----------------------------------------------------------------------===//
17
Chandler Carruth93dcdc42015-01-31 11:17:59 +000018#include "AMDGPUTargetTransformInfo.h"
Tom Stellard8cce9bd2014-01-23 18:49:28 +000019#include "llvm/Analysis/LoopInfo.h"
Tom Stellard8b1e0212013-07-27 00:01:07 +000020#include "llvm/Analysis/TargetTransformInfo.h"
Tom Stellard8cce9bd2014-01-23 18:49:28 +000021#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth705b1852015-01-31 03:43:40 +000022#include "llvm/CodeGen/BasicTTIImpl.h"
Mehdi Aminia28d91d2015-03-10 02:37:25 +000023#include "llvm/IR/Module.h"
Tom Stellardbc4497b2016-02-12 23:45:29 +000024#include "llvm/IR/Intrinsics.h"
Tom Stellard8b1e0212013-07-27 00:01:07 +000025#include "llvm/Support/Debug.h"
Tom Stellard8b1e0212013-07-27 00:01:07 +000026#include "llvm/Target/CostTable.h"
Chandler Carruth8a8cd2b2014-01-07 11:48:04 +000027#include "llvm/Target/TargetLowering.h"
Tom Stellard8b1e0212013-07-27 00:01:07 +000028using namespace llvm;
29
Chandler Carruth84e68b22014-04-22 02:41:26 +000030#define DEBUG_TYPE "AMDGPUtti"
31
Matt Arsenault96518132016-03-25 01:00:32 +000032
Chandler Carruthab5cb362015-02-01 14:31:23 +000033void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L,
Chandler Carruth705b1852015-01-31 03:43:40 +000034 TTI::UnrollingPreferences &UP) {
Matt Arsenaultc8244582014-07-25 23:02:42 +000035 UP.Threshold = 300; // Twice the default.
Tom Stellardeea3f702015-02-05 15:32:18 +000036 UP.MaxCount = UINT_MAX;
Matt Arsenaultc8244582014-07-25 23:02:42 +000037 UP.Partial = true;
38
39 // TODO: Do we want runtime unrolling?
40
Matt Arsenaultac6e39c2014-07-17 06:19:06 +000041 for (const BasicBlock *BB : L->getBlocks()) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +000042 const DataLayout &DL = BB->getModule()->getDataLayout();
Matt Arsenaultac6e39c2014-07-17 06:19:06 +000043 for (const Instruction &I : *BB) {
44 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
Matt Arsenault5e2b0f52014-07-17 06:13:41 +000045 if (!GEP || GEP->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
Tom Stellard8cce9bd2014-01-23 18:49:28 +000046 continue;
Matt Arsenaultac6e39c2014-07-17 06:19:06 +000047
Tom Stellard8cce9bd2014-01-23 18:49:28 +000048 const Value *Ptr = GEP->getPointerOperand();
Mehdi Aminia28d91d2015-03-10 02:37:25 +000049 const AllocaInst *Alloca =
50 dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
Tom Stellard8cce9bd2014-01-23 18:49:28 +000051 if (Alloca) {
52 // We want to do whatever we can to limit the number of alloca
53 // instructions that make it through to the code generator. allocas
54 // require us to use indirect addressing, which is slow and prone to
55 // compiler bugs. If this loop does an address calculation on an
Tom Stellardfd0d86c2014-02-25 21:36:21 +000056 // alloca ptr, then we want to use a higher than normal loop unroll
Matt Arsenault5e1e4312014-04-04 20:13:08 +000057 // threshold. This will give SROA a better chance to eliminate these
58 // allocas.
59 //
60 // Don't use the maximum allowed value here as it will make some
61 // programs way too big.
Matt Arsenaultc8244582014-07-25 23:02:42 +000062 UP.Threshold = 800;
Tom Stellard8cce9bd2014-01-23 18:49:28 +000063 }
64 }
65 }
66}
Matt Arsenault3dd43fc2014-07-18 06:07:13 +000067
Chandler Carruth705b1852015-01-31 03:43:40 +000068unsigned AMDGPUTTIImpl::getNumberOfRegisters(bool Vec) {
Matt Arsenaulta93441f2014-07-19 18:15:16 +000069 if (Vec)
70 return 0;
71
72 // Number of VGPRs on SI.
73 if (ST->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
74 return 256;
75
76 return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
77}
78
Matt Arsenault4339b3f2015-12-24 05:14:55 +000079unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool Vector) {
80 return Vector ? 0 : 32;
81}
Matt Arsenaulta93441f2014-07-19 18:15:16 +000082
Wei Mi062c7442015-05-06 17:12:25 +000083unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) {
Matt Arsenaulta93441f2014-07-19 18:15:16 +000084 // Semi-arbitrary large amount.
85 return 64;
86}
Matt Arsenaulte830f542015-12-01 19:08:39 +000087
Matt Arsenault96518132016-03-25 01:00:32 +000088int AMDGPUTTIImpl::getArithmeticInstrCost(
89 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
90 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
91 TTI::OperandValueProperties Opd2PropInfo) {
92
93 EVT OrigTy = TLI->getValueType(DL, Ty);
94 if (!OrigTy.isSimple()) {
95 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
96 Opd1PropInfo, Opd2PropInfo);
97 }
98
99 // Legalize the type.
100 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
101 int ISD = TLI->InstructionOpcodeToISD(Opcode);
102
103 // Because we don't have any legal vector operations, but the legal types, we
104 // need to account for split vectors.
105 unsigned NElts = LT.second.isVector() ?
106 LT.second.getVectorNumElements() : 1;
107
108 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
109
110 switch (ISD) {
Matt Arsenault8c8fcb22016-03-25 01:16:40 +0000111 case ISD::SHL:
112 case ISD::SRL:
113 case ISD::SRA: {
114 if (SLT == MVT::i64)
115 return get64BitInstrCost() * LT.first * NElts;
116
117 // i32
118 return getFullRateInstrCost() * LT.first * NElts;
119 }
120 case ISD::ADD:
121 case ISD::SUB:
122 case ISD::AND:
123 case ISD::OR:
124 case ISD::XOR: {
125 if (SLT == MVT::i64){
126 // and, or and xor are typically split into 2 VALU instructions.
127 return 2 * getFullRateInstrCost() * LT.first * NElts;
128 }
129
130 return LT.first * NElts * getFullRateInstrCost();
131 }
132 case ISD::MUL: {
133 const int QuarterRateCost = getQuarterRateInstrCost();
134 if (SLT == MVT::i64) {
135 const int FullRateCost = getFullRateInstrCost();
136 return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
137 }
138
139 // i32
140 return QuarterRateCost * NElts * LT.first;
141 }
Matt Arsenault96518132016-03-25 01:00:32 +0000142 case ISD::FADD:
143 case ISD::FSUB:
144 case ISD::FMUL:
145 if (SLT == MVT::f64)
146 return LT.first * NElts * get64BitInstrCost();
147
148 if (SLT == MVT::f32 || SLT == MVT::f16)
149 return LT.first * NElts * getFullRateInstrCost();
150 break;
151
152 case ISD::FDIV:
153 case ISD::FREM:
154 // FIXME: frem should be handled separately. The fdiv in it is most of it,
155 // but the current lowering is also not entirely correct.
156 if (SLT == MVT::f64) {
157 int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
158
159 // Add cost of workaround.
160 if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
161 Cost += 3 * getFullRateInstrCost();
162
163 return LT.first * Cost * NElts;
164 }
165
166 // Assuming no fp32 denormals lowering.
167 if (SLT == MVT::f32 || SLT == MVT::f16) {
168 assert(!ST->hasFP32Denormals() && "will change when supported");
169 int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
170 return LT.first * NElts * Cost;
171 }
172
173 break;
174 default:
175 break;
176 }
177
178 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
179 Opd1PropInfo, Opd2PropInfo);
180}
181
Matt Arsenaulte05ff152015-12-16 18:37:19 +0000182unsigned AMDGPUTTIImpl::getCFInstrCost(unsigned Opcode) {
183 // XXX - For some reason this isn't called for switch.
184 switch (Opcode) {
185 case Instruction::Br:
186 case Instruction::Ret:
187 return 10;
188 default:
189 return BaseT::getCFInstrCost(Opcode);
190 }
191}
192
Matt Arsenaulte830f542015-12-01 19:08:39 +0000193int AMDGPUTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
194 unsigned Index) {
195 switch (Opcode) {
196 case Instruction::ExtractElement:
Matt Arsenault59767ce2016-03-25 00:14:11 +0000197 case Instruction::InsertElement:
198 // Extracts are just reads of a subregister, so are free. Inserts are
199 // considered free because we don't want to have any cost for scalarizing
200 // operations, and we don't have to copy into a different register class.
201
Matt Arsenaulte830f542015-12-01 19:08:39 +0000202 // Dynamic indexing isn't free and is best avoided.
203 return Index == ~0u ? 2 : 0;
204 default:
205 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
206 }
207}
Tom Stellarddbe374b2015-12-15 18:04:38 +0000208
209static bool isIntrinsicSourceOfDivergence(const TargetIntrinsicInfo *TII,
210 const IntrinsicInst *I) {
211 switch (I->getIntrinsicID()) {
212 default:
213 return false;
214 case Intrinsic::not_intrinsic:
215 // This means we have an intrinsic that isn't defined in
216 // IntrinsicsAMDGPU.td
217 break;
218
Matt Arsenaultfe26def2016-02-11 05:32:51 +0000219 case Intrinsic::amdgcn_workitem_id_x:
220 case Intrinsic::amdgcn_workitem_id_y:
221 case Intrinsic::amdgcn_workitem_id_z:
Tom Stellarddbe374b2015-12-15 18:04:38 +0000222 case Intrinsic::amdgcn_interp_p1:
223 case Intrinsic::amdgcn_interp_p2:
224 case Intrinsic::amdgcn_mbcnt_hi:
225 case Intrinsic::amdgcn_mbcnt_lo:
226 case Intrinsic::r600_read_tidig_x:
227 case Intrinsic::r600_read_tidig_y:
228 case Intrinsic::r600_read_tidig_z:
Nicolai Haehnle74127fe82016-03-14 15:37:18 +0000229 case Intrinsic::amdgcn_image_atomic_swap:
230 case Intrinsic::amdgcn_image_atomic_add:
231 case Intrinsic::amdgcn_image_atomic_sub:
232 case Intrinsic::amdgcn_image_atomic_smin:
233 case Intrinsic::amdgcn_image_atomic_umin:
234 case Intrinsic::amdgcn_image_atomic_smax:
235 case Intrinsic::amdgcn_image_atomic_umax:
236 case Intrinsic::amdgcn_image_atomic_and:
237 case Intrinsic::amdgcn_image_atomic_or:
238 case Intrinsic::amdgcn_image_atomic_xor:
239 case Intrinsic::amdgcn_image_atomic_inc:
240 case Intrinsic::amdgcn_image_atomic_dec:
241 case Intrinsic::amdgcn_image_atomic_cmpswap:
Nicolai Haehnlead636382016-03-18 16:24:31 +0000242 case Intrinsic::amdgcn_buffer_atomic_swap:
243 case Intrinsic::amdgcn_buffer_atomic_add:
244 case Intrinsic::amdgcn_buffer_atomic_sub:
245 case Intrinsic::amdgcn_buffer_atomic_smin:
246 case Intrinsic::amdgcn_buffer_atomic_umin:
247 case Intrinsic::amdgcn_buffer_atomic_smax:
248 case Intrinsic::amdgcn_buffer_atomic_umax:
249 case Intrinsic::amdgcn_buffer_atomic_and:
250 case Intrinsic::amdgcn_buffer_atomic_or:
251 case Intrinsic::amdgcn_buffer_atomic_xor:
252 case Intrinsic::amdgcn_buffer_atomic_cmpswap:
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000253 case Intrinsic::amdgcn_ps_live:
Tom Stellarddbe374b2015-12-15 18:04:38 +0000254 return true;
255 }
256
257 StringRef Name = I->getCalledFunction()->getName();
258 switch (TII->lookupName((const char *)Name.bytes_begin(), Name.size())) {
259 default:
260 return false;
261 case AMDGPUIntrinsic::SI_tid:
262 case AMDGPUIntrinsic::SI_fs_interp:
Nicolai Haehnle119d3d82016-05-02 17:37:01 +0000263 case AMDGPUIntrinsic::SI_fs_constant:
Tom Stellarddbe374b2015-12-15 18:04:38 +0000264 return true;
265 }
266}
267
268static bool isArgPassedInSGPR(const Argument *A) {
269 const Function *F = A->getParent();
Tom Stellarddbe374b2015-12-15 18:04:38 +0000270
271 // Arguments to compute shaders are never a source of divergence.
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000272 if (!AMDGPU::isShader(F->getCallingConv()))
Tom Stellarddbe374b2015-12-15 18:04:38 +0000273 return true;
274
Tom Stellardffc1a5a2015-12-19 02:54:15 +0000275 // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
276 if (F->getAttributes().hasAttribute(A->getArgNo() + 1, Attribute::InReg) ||
277 F->getAttributes().hasAttribute(A->getArgNo() + 1, Attribute::ByVal))
Tom Stellarddbe374b2015-12-15 18:04:38 +0000278 return true;
279
Tom Stellardffc1a5a2015-12-19 02:54:15 +0000280 // Everything else is in VGPRs.
281 return false;
Tom Stellarddbe374b2015-12-15 18:04:38 +0000282}
283
284///
285/// \returns true if the result of the value could potentially be
286/// different across workitems in a wavefront.
287bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const {
288
289 if (const Argument *A = dyn_cast<Argument>(V))
290 return !isArgPassedInSGPR(A);
291
292 // Loads from the private address space are divergent, because threads
293 // can execute the load instruction with the same inputs and get different
294 // results.
295 //
296 // All other loads are not divergent, because if threads issue loads with the
297 // same arguments, they will always get the same result.
298 if (const LoadInst *Load = dyn_cast<LoadInst>(V))
299 return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
300
Nicolai Haehnle79cad852016-03-17 16:21:59 +0000301 // Atomics are divergent because they are executed sequentially: when an
302 // atomic operation refers to the same address in each thread, then each
303 // thread after the first sees the value written by the previous thread as
304 // original value.
305 if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
306 return true;
307
Tom Stellarddbe374b2015-12-15 18:04:38 +0000308 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
309 const TargetMachine &TM = getTLI()->getTargetMachine();
310 return isIntrinsicSourceOfDivergence(TM.getIntrinsicInfo(), Intrinsic);
311 }
312
313 // Assume all function calls are a source of divergence.
314 if (isa<CallInst>(V) || isa<InvokeInst>(V))
315 return true;
316
317 return false;
318}