blob: ce17f027b522838e9720a41aeae2c0ba72cf765a [file] [log] [blame]
Eugene Zelenkod16eff82017-08-08 23:53:55 +00001//===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
Tom Stellard8b1e0212013-07-27 00:01:07 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// \file
11// This file implements a TargetTransformInfo analysis pass specific to the
12// AMDGPU target machine. It uses the target's detailed information to provide
13// more precise answers to certain TTI queries, while letting the target
14// independent and default TTI implementations handle the rest.
15//
16//===----------------------------------------------------------------------===//
17
Chandler Carruth93dcdc42015-01-31 11:17:59 +000018#include "AMDGPUTargetTransformInfo.h"
Eugene Zelenkod16eff82017-08-08 23:53:55 +000019#include "AMDGPUSubtarget.h"
Alexander Timofeev2e5eece2018-03-05 15:12:21 +000020#include "Utils/AMDGPUBaseInfo.h"
Eugene Zelenkod16eff82017-08-08 23:53:55 +000021#include "llvm/ADT/STLExtras.h"
Tom Stellard8cce9bd2014-01-23 18:49:28 +000022#include "llvm/Analysis/LoopInfo.h"
Tom Stellard8b1e0212013-07-27 00:01:07 +000023#include "llvm/Analysis/TargetTransformInfo.h"
Tom Stellard8cce9bd2014-01-23 18:49:28 +000024#include "llvm/Analysis/ValueTracking.h"
Eugene Zelenkod16eff82017-08-08 23:53:55 +000025#include "llvm/CodeGen/ISDOpcodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000026#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenkod16eff82017-08-08 23:53:55 +000027#include "llvm/IR/Argument.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CallingConv.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000037#include "llvm/IR/Module.h"
Matt Arsenault376f1bd2017-08-31 05:47:00 +000038#include "llvm/IR/PatternMatch.h"
Eugene Zelenkod16eff82017-08-08 23:53:55 +000039#include "llvm/IR/Type.h"
40#include "llvm/IR/Value.h"
41#include "llvm/MC/SubtargetFeature.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/CommandLine.h"
Tom Stellard8b1e0212013-07-27 00:01:07 +000044#include "llvm/Support/Debug.h"
Eugene Zelenkod16eff82017-08-08 23:53:55 +000045#include "llvm/Support/ErrorHandling.h"
David Blaikie13e77db2018-03-23 23:58:25 +000046#include "llvm/Support/MachineValueType.h"
Eugene Zelenkod16eff82017-08-08 23:53:55 +000047#include "llvm/Support/raw_ostream.h"
48#include "llvm/Target/TargetMachine.h"
49#include <algorithm>
50#include <cassert>
51#include <limits>
52#include <utility>
53
Tom Stellard8b1e0212013-07-27 00:01:07 +000054using namespace llvm;
55
Chandler Carruth84e68b22014-04-22 02:41:26 +000056#define DEBUG_TYPE "AMDGPUtti"
57
Stanislav Mekhanoshinf29602d2017-02-03 02:20:05 +000058static cl::opt<unsigned> UnrollThresholdPrivate(
59 "amdgpu-unroll-threshold-private",
60 cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
Stanislav Mekhanoshin478b8192017-04-07 16:26:28 +000061 cl::init(2500), cl::Hidden);
Matt Arsenault96518132016-03-25 01:00:32 +000062
Stanislav Mekhanoshinbaf31ac2017-03-28 22:13:51 +000063static cl::opt<unsigned> UnrollThresholdLocal(
64 "amdgpu-unroll-threshold-local",
65 cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
66 cl::init(1000), cl::Hidden);
67
Stanislav Mekhanoshin478b8192017-04-07 16:26:28 +000068static cl::opt<unsigned> UnrollThresholdIf(
69 "amdgpu-unroll-threshold-if",
70 cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
71 cl::init(150), cl::Hidden);
72
73static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
74 unsigned Depth = 0) {
75 const Instruction *I = dyn_cast<Instruction>(Cond);
76 if (!I)
77 return false;
78
79 for (const Value *V : I->operand_values()) {
80 if (!L->contains(I))
81 continue;
82 if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
Eugene Zelenkod16eff82017-08-08 23:53:55 +000083 if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
Stanislav Mekhanoshin478b8192017-04-07 16:26:28 +000084 return SubLoop->contains(PHI); }))
85 return true;
86 } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
87 return true;
88 }
89 return false;
90}
91
Geoff Berry66d9bdb2017-06-28 15:53:17 +000092void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
Chandler Carruth705b1852015-01-31 03:43:40 +000093 TTI::UnrollingPreferences &UP) {
Matt Arsenaultc8244582014-07-25 23:02:42 +000094 UP.Threshold = 300; // Twice the default.
Eugene Zelenkod16eff82017-08-08 23:53:55 +000095 UP.MaxCount = std::numeric_limits<unsigned>::max();
Matt Arsenaultc8244582014-07-25 23:02:42 +000096 UP.Partial = true;
97
98 // TODO: Do we want runtime unrolling?
99
Stanislav Mekhanoshinf29602d2017-02-03 02:20:05 +0000100 // Maximum alloca size than can fit registers. Reserve 16 registers.
101 const unsigned MaxAlloca = (256 - 16) * 4;
Stanislav Mekhanoshinbaf31ac2017-03-28 22:13:51 +0000102 unsigned ThresholdPrivate = UnrollThresholdPrivate;
103 unsigned ThresholdLocal = UnrollThresholdLocal;
104 unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
105 AMDGPUAS ASST = ST->getAMDGPUAS();
Matt Arsenaultac6e39c2014-07-17 06:19:06 +0000106 for (const BasicBlock *BB : L->getBlocks()) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +0000107 const DataLayout &DL = BB->getModule()->getDataLayout();
Stanislav Mekhanoshinbaf31ac2017-03-28 22:13:51 +0000108 unsigned LocalGEPsSeen = 0;
109
Eugene Zelenkod16eff82017-08-08 23:53:55 +0000110 if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
Stanislav Mekhanoshin478b8192017-04-07 16:26:28 +0000111 return SubLoop->contains(BB); }))
112 continue; // Block belongs to an inner loop.
113
Matt Arsenaultac6e39c2014-07-17 06:19:06 +0000114 for (const Instruction &I : *BB) {
Stanislav Mekhanoshin478b8192017-04-07 16:26:28 +0000115 // Unroll a loop which contains an "if" statement whose condition
116 // defined by a PHI belonging to the loop. This may help to eliminate
117 // if region and potentially even PHI itself, saving on both divergence
118 // and registers used for the PHI.
119 // Add a small bonus for each of such "if" statements.
120 if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
121 if (UP.Threshold < MaxBoost && Br->isConditional()) {
122 if (L->isLoopExiting(Br->getSuccessor(0)) ||
123 L->isLoopExiting(Br->getSuccessor(1)))
124 continue;
125 if (dependsOnLocalPhi(L, Br->getCondition())) {
126 UP.Threshold += UnrollThresholdIf;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000127 LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
128 << " for loop:\n"
129 << *L << " due to " << *Br << '\n');
Stanislav Mekhanoshin478b8192017-04-07 16:26:28 +0000130 if (UP.Threshold >= MaxBoost)
131 return;
132 }
133 }
134 continue;
135 }
136
Matt Arsenaultac6e39c2014-07-17 06:19:06 +0000137 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
Stanislav Mekhanoshinbaf31ac2017-03-28 22:13:51 +0000138 if (!GEP)
Tom Stellard8cce9bd2014-01-23 18:49:28 +0000139 continue;
Matt Arsenaultac6e39c2014-07-17 06:19:06 +0000140
Stanislav Mekhanoshinbaf31ac2017-03-28 22:13:51 +0000141 unsigned AS = GEP->getAddressSpace();
142 unsigned Threshold = 0;
143 if (AS == ASST.PRIVATE_ADDRESS)
144 Threshold = ThresholdPrivate;
145 else if (AS == ASST.LOCAL_ADDRESS)
146 Threshold = ThresholdLocal;
147 else
148 continue;
149
150 if (UP.Threshold >= Threshold)
151 continue;
152
153 if (AS == ASST.PRIVATE_ADDRESS) {
154 const Value *Ptr = GEP->getPointerOperand();
155 const AllocaInst *Alloca =
156 dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
157 if (!Alloca || !Alloca->isStaticAlloca())
158 continue;
Stanislav Mekhanoshinf29602d2017-02-03 02:20:05 +0000159 Type *Ty = Alloca->getAllocatedType();
160 unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
161 if (AllocaSize > MaxAlloca)
162 continue;
Stanislav Mekhanoshinbaf31ac2017-03-28 22:13:51 +0000163 } else if (AS == ASST.LOCAL_ADDRESS) {
164 LocalGEPsSeen++;
165 // Inhibit unroll for local memory if we have seen addressing not to
166 // a variable, most likely we will be unable to combine it.
167 // Do not unroll too deep inner loops for local memory to give a chance
168 // to unroll an outer loop for a more important reason.
169 if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
170 (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
171 !isa<Argument>(GEP->getPointerOperand())))
172 continue;
173 }
Stanislav Mekhanoshinf29602d2017-02-03 02:20:05 +0000174
Stanislav Mekhanoshinbaf31ac2017-03-28 22:13:51 +0000175 // Check if GEP depends on a value defined by this loop itself.
176 bool HasLoopDef = false;
177 for (const Value *Op : GEP->operands()) {
178 const Instruction *Inst = dyn_cast<Instruction>(Op);
179 if (!Inst || L->isLoopInvariant(Op))
Stanislav Mekhanoshinf29602d2017-02-03 02:20:05 +0000180 continue;
181
Eugene Zelenkod16eff82017-08-08 23:53:55 +0000182 if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
Stanislav Mekhanoshinbaf31ac2017-03-28 22:13:51 +0000183 return SubLoop->contains(Inst); }))
184 continue;
185 HasLoopDef = true;
186 break;
Tom Stellard8cce9bd2014-01-23 18:49:28 +0000187 }
Stanislav Mekhanoshinbaf31ac2017-03-28 22:13:51 +0000188 if (!HasLoopDef)
189 continue;
190
191 // We want to do whatever we can to limit the number of alloca
192 // instructions that make it through to the code generator. allocas
193 // require us to use indirect addressing, which is slow and prone to
194 // compiler bugs. If this loop does an address calculation on an
195 // alloca ptr, then we want to use a higher than normal loop unroll
196 // threshold. This will give SROA a better chance to eliminate these
197 // allocas.
198 //
199 // We also want to have more unrolling for local memory to let ds
200 // instructions with different offsets combine.
201 //
202 // Don't use the maximum allowed value here as it will make some
203 // programs way too big.
204 UP.Threshold = Threshold;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000205 LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
206 << " for loop:\n"
207 << *L << " due to " << *GEP << '\n');
Stanislav Mekhanoshin478b8192017-04-07 16:26:28 +0000208 if (UP.Threshold >= MaxBoost)
Stanislav Mekhanoshinbaf31ac2017-03-28 22:13:51 +0000209 return;
Tom Stellard8cce9bd2014-01-23 18:49:28 +0000210 }
211 }
212}
Matt Arsenault3dd43fc2014-07-18 06:07:13 +0000213
Matt Arsenault67cd3472017-06-20 20:38:06 +0000214unsigned AMDGPUTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
215 // The concept of vector registers doesn't really exist. Some packed vector
216 // operations operate on the normal 32-bit registers.
Matt Arsenaulta93441f2014-07-19 18:15:16 +0000217
218 // Number of VGPRs on SI.
219 if (ST->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
220 return 256;
221
222 return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
223}
224
Matt Arsenault67cd3472017-06-20 20:38:06 +0000225unsigned AMDGPUTTIImpl::getNumberOfRegisters(bool Vec) const {
226 // This is really the number of registers to fill when vectorizing /
227 // interleaving loops, so we lie to avoid trying to use all registers.
228 return getHardwareNumberOfRegisters(Vec) >> 3;
229}
230
Daniel Neilsonc0112ae2017-06-12 14:22:21 +0000231unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool Vector) const {
Matt Arsenault67cd3472017-06-20 20:38:06 +0000232 return 32;
233}
234
235unsigned AMDGPUTTIImpl::getMinVectorRegisterBitWidth() const {
236 return 32;
Matt Arsenault4339b3f2015-12-24 05:14:55 +0000237}
Matt Arsenaulta93441f2014-07-19 18:15:16 +0000238
Farhana Aleen89196642018-03-07 17:09:18 +0000239unsigned AMDGPUTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
240 unsigned ChainSizeInBytes,
241 VectorType *VecTy) const {
242 unsigned VecRegBitWidth = VF * LoadSize;
243 if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
244 // TODO: Support element-size less than 32bit?
245 return 128 / LoadSize;
246
247 return VF;
248}
249
250unsigned AMDGPUTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
251 unsigned ChainSizeInBytes,
252 VectorType *VecTy) const {
253 unsigned VecRegBitWidth = VF * StoreSize;
254 if (VecRegBitWidth > 128)
255 return 128 / StoreSize;
256
257 return VF;
258}
259
Volkan Keles1c386812016-10-03 10:31:34 +0000260unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000261 AMDGPUAS AS = ST->getAMDGPUAS();
262 if (AddrSpace == AS.GLOBAL_ADDRESS ||
263 AddrSpace == AS.CONSTANT_ADDRESS ||
Farhana Aleen89196642018-03-07 17:09:18 +0000264 AddrSpace == AS.CONSTANT_ADDRESS_32BIT) {
265 if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
266 return 128;
267 return 512;
268 }
269
Marek Olsaka9a58fa2018-04-10 22:48:23 +0000270 if (AddrSpace == AS.FLAT_ADDRESS)
Alex Shlyapnikov79f2c722018-04-09 19:47:38 +0000271 return 128;
Marek Olsak52b033b2018-04-09 16:56:32 +0000272
Marek Olsaka9a58fa2018-04-10 22:48:23 +0000273 if (AddrSpace == AS.LOCAL_ADDRESS ||
274 AddrSpace == AS.REGION_ADDRESS)
275 return ST->useDS128() ? 128 : 64;
276
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000277 if (AddrSpace == AS.PRIVATE_ADDRESS)
Matt Arsenault0994bd52016-07-01 00:56:27 +0000278 return 8 * ST->getMaxPrivateElementSize();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000279
280 if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS &&
281 (AddrSpace == AS.PARAM_D_ADDRESS ||
282 AddrSpace == AS.PARAM_I_ADDRESS ||
Farhana Aleen89196642018-03-07 17:09:18 +0000283 (AddrSpace >= AS.CONSTANT_BUFFER_0 &&
284 AddrSpace <= AS.CONSTANT_BUFFER_15)))
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000285 return 128;
286 llvm_unreachable("unhandled address space");
Matt Arsenault0994bd52016-07-01 00:56:27 +0000287}
288
Matt Arsenaultf0a88db2017-02-23 03:58:53 +0000289bool AMDGPUTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
290 unsigned Alignment,
291 unsigned AddrSpace) const {
292 // We allow vectorization of flat stores, even though we may need to decompose
293 // them later if they may access private memory. We don't have enough context
294 // here, and legalization can handle it.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000295 if (AddrSpace == ST->getAMDGPUAS().PRIVATE_ADDRESS) {
Matt Arsenaultf0a88db2017-02-23 03:58:53 +0000296 return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
297 ChainSizeInBytes <= ST->getMaxPrivateElementSize();
298 }
299 return true;
300}
301
302bool AMDGPUTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
303 unsigned Alignment,
304 unsigned AddrSpace) const {
305 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
306}
307
308bool AMDGPUTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
309 unsigned Alignment,
310 unsigned AddrSpace) const {
311 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
312}
313
Wei Mi062c7442015-05-06 17:12:25 +0000314unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) {
Changpeng Fang1be9b9f2017-03-09 00:07:00 +0000315 // Disable unrolling if the loop is not vectorized.
Matt Arsenault67cd3472017-06-20 20:38:06 +0000316 // TODO: Enable this again.
Changpeng Fang1be9b9f2017-03-09 00:07:00 +0000317 if (VF == 1)
318 return 1;
319
Matt Arsenault67cd3472017-06-20 20:38:06 +0000320 return 8;
Matt Arsenaulta93441f2014-07-19 18:15:16 +0000321}
Matt Arsenaulte830f542015-12-01 19:08:39 +0000322
Matt Arsenault3e268cc2017-12-11 21:38:43 +0000323bool AMDGPUTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
324 MemIntrinsicInfo &Info) const {
325 switch (Inst->getIntrinsicID()) {
326 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000327 case Intrinsic::amdgcn_atomic_dec:
328 case Intrinsic::amdgcn_ds_fadd:
329 case Intrinsic::amdgcn_ds_fmin:
330 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenault3e268cc2017-12-11 21:38:43 +0000331 auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
332 auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
333 if (!Ordering || !Volatile)
334 return false; // Invalid.
335
336 unsigned OrderingVal = Ordering->getZExtValue();
337 if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
338 return false;
339
340 Info.PtrVal = Inst->getArgOperand(0);
341 Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
342 Info.ReadMem = true;
343 Info.WriteMem = true;
344 Info.IsVolatile = !Volatile->isNullValue();
345 return true;
346 }
347 default:
348 return false;
349 }
350}
351
Matt Arsenault96518132016-03-25 01:00:32 +0000352int AMDGPUTTIImpl::getArithmeticInstrCost(
353 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
354 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
Mohammed Agabaria2c96c432017-01-11 08:23:37 +0000355 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args ) {
Matt Arsenault96518132016-03-25 01:00:32 +0000356 EVT OrigTy = TLI->getValueType(DL, Ty);
357 if (!OrigTy.isSimple()) {
358 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
359 Opd1PropInfo, Opd2PropInfo);
360 }
361
362 // Legalize the type.
363 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
364 int ISD = TLI->InstructionOpcodeToISD(Opcode);
365
366 // Because we don't have any legal vector operations, but the legal types, we
367 // need to account for split vectors.
368 unsigned NElts = LT.second.isVector() ?
369 LT.second.getVectorNumElements() : 1;
370
371 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
372
373 switch (ISD) {
Matt Arsenault8c8fcb22016-03-25 01:16:40 +0000374 case ISD::SHL:
375 case ISD::SRL:
Eugene Zelenkod16eff82017-08-08 23:53:55 +0000376 case ISD::SRA:
Matt Arsenault8c8fcb22016-03-25 01:16:40 +0000377 if (SLT == MVT::i64)
378 return get64BitInstrCost() * LT.first * NElts;
379
380 // i32
381 return getFullRateInstrCost() * LT.first * NElts;
Matt Arsenault8c8fcb22016-03-25 01:16:40 +0000382 case ISD::ADD:
383 case ISD::SUB:
384 case ISD::AND:
385 case ISD::OR:
Eugene Zelenkod16eff82017-08-08 23:53:55 +0000386 case ISD::XOR:
Matt Arsenault8c8fcb22016-03-25 01:16:40 +0000387 if (SLT == MVT::i64){
388 // and, or and xor are typically split into 2 VALU instructions.
389 return 2 * getFullRateInstrCost() * LT.first * NElts;
390 }
391
392 return LT.first * NElts * getFullRateInstrCost();
Matt Arsenault8c8fcb22016-03-25 01:16:40 +0000393 case ISD::MUL: {
394 const int QuarterRateCost = getQuarterRateInstrCost();
395 if (SLT == MVT::i64) {
396 const int FullRateCost = getFullRateInstrCost();
397 return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
398 }
399
400 // i32
401 return QuarterRateCost * NElts * LT.first;
402 }
Matt Arsenault96518132016-03-25 01:00:32 +0000403 case ISD::FADD:
404 case ISD::FSUB:
405 case ISD::FMUL:
406 if (SLT == MVT::f64)
407 return LT.first * NElts * get64BitInstrCost();
408
409 if (SLT == MVT::f32 || SLT == MVT::f16)
410 return LT.first * NElts * getFullRateInstrCost();
411 break;
Matt Arsenault96518132016-03-25 01:00:32 +0000412 case ISD::FDIV:
413 case ISD::FREM:
414 // FIXME: frem should be handled separately. The fdiv in it is most of it,
415 // but the current lowering is also not entirely correct.
416 if (SLT == MVT::f64) {
417 int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
Matt Arsenault96518132016-03-25 01:00:32 +0000418 // Add cost of workaround.
419 if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
420 Cost += 3 * getFullRateInstrCost();
421
422 return LT.first * Cost * NElts;
423 }
424
Matt Arsenault376f1bd2017-08-31 05:47:00 +0000425 if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
426 // TODO: This is more complicated, unsafe flags etc.
427 if ((SLT == MVT::f32 && !ST->hasFP32Denormals()) ||
428 (SLT == MVT::f16 && ST->has16BitInsts())) {
429 return LT.first * getQuarterRateInstrCost() * NElts;
430 }
431 }
432
433 if (SLT == MVT::f16 && ST->has16BitInsts()) {
434 // 2 x v_cvt_f32_f16
435 // f32 rcp
436 // f32 fmul
437 // v_cvt_f16_f32
438 // f16 div_fixup
439 int Cost = 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost();
440 return LT.first * Cost * NElts;
441 }
442
Matt Arsenault96518132016-03-25 01:00:32 +0000443 if (SLT == MVT::f32 || SLT == MVT::f16) {
Matt Arsenault96518132016-03-25 01:00:32 +0000444 int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
Matt Arsenault376f1bd2017-08-31 05:47:00 +0000445
446 if (!ST->hasFP32Denormals()) {
447 // FP mode switches.
448 Cost += 2 * getFullRateInstrCost();
449 }
450
Matt Arsenault96518132016-03-25 01:00:32 +0000451 return LT.first * NElts * Cost;
452 }
Matt Arsenault96518132016-03-25 01:00:32 +0000453 break;
454 default:
455 break;
456 }
457
458 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
459 Opd1PropInfo, Opd2PropInfo);
460}
461
Matt Arsenaulte05ff152015-12-16 18:37:19 +0000462unsigned AMDGPUTTIImpl::getCFInstrCost(unsigned Opcode) {
463 // XXX - For some reason this isn't called for switch.
464 switch (Opcode) {
465 case Instruction::Br:
466 case Instruction::Ret:
467 return 10;
468 default:
469 return BaseT::getCFInstrCost(Opcode);
470 }
471}
472
Farhana Aleene2dfe8a2018-05-01 21:41:12 +0000473int AMDGPUTTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *Ty,
474 bool IsPairwise) {
475 EVT OrigTy = TLI->getValueType(DL, Ty);
476
477 // Computes cost on targets that have packed math instructions(which support
478 // 16-bit types only).
479 if (IsPairwise ||
480 !ST->hasVOP3PInsts() ||
481 OrigTy.getScalarSizeInBits() != 16)
482 return BaseT::getArithmeticReductionCost(Opcode, Ty, IsPairwise);
483
484 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
485 return LT.first * getFullRateInstrCost();
486}
487
Farhana Aleene24f3ff2018-05-09 21:18:34 +0000488int AMDGPUTTIImpl::getMinMaxReductionCost(Type *Ty, Type *CondTy,
489 bool IsPairwise,
490 bool IsUnsigned) {
491 EVT OrigTy = TLI->getValueType(DL, Ty);
492
493 // Computes cost on targets that have packed math instructions(which support
494 // 16-bit types only).
495 if (IsPairwise ||
496 !ST->hasVOP3PInsts() ||
497 OrigTy.getScalarSizeInBits() != 16)
498 return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned);
499
500 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
501 return LT.first * getHalfRateInstrCost();
502}
503
Matt Arsenaulte830f542015-12-01 19:08:39 +0000504int AMDGPUTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
505 unsigned Index) {
506 switch (Opcode) {
507 case Instruction::ExtractElement:
Matt Arsenault3c5e4232017-05-10 21:29:33 +0000508 case Instruction::InsertElement: {
509 unsigned EltSize
510 = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
511 if (EltSize < 32) {
512 if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
513 return 0;
514 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
515 }
516
Matt Arsenault59767ce2016-03-25 00:14:11 +0000517 // Extracts are just reads of a subregister, so are free. Inserts are
518 // considered free because we don't want to have any cost for scalarizing
519 // operations, and we don't have to copy into a different register class.
520
Matt Arsenaulte830f542015-12-01 19:08:39 +0000521 // Dynamic indexing isn't free and is best avoided.
522 return Index == ~0u ? 2 : 0;
Matt Arsenault3c5e4232017-05-10 21:29:33 +0000523 }
Matt Arsenaulte830f542015-12-01 19:08:39 +0000524 default:
525 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
526 }
527}
Tom Stellarddbe374b2015-12-15 18:04:38 +0000528
Alexander Timofeev2e5eece2018-03-05 15:12:21 +0000529
Tom Stellarddbe374b2015-12-15 18:04:38 +0000530
531static bool isArgPassedInSGPR(const Argument *A) {
532 const Function *F = A->getParent();
Tom Stellarddbe374b2015-12-15 18:04:38 +0000533
534 // Arguments to compute shaders are never a source of divergence.
Matt Arsenault4c1ecde2017-04-19 17:42:34 +0000535 CallingConv::ID CC = F->getCallingConv();
536 switch (CC) {
537 case CallingConv::AMDGPU_KERNEL:
538 case CallingConv::SPIR_KERNEL:
Tom Stellarddbe374b2015-12-15 18:04:38 +0000539 return true;
Matt Arsenault4c1ecde2017-04-19 17:42:34 +0000540 case CallingConv::AMDGPU_VS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000541 case CallingConv::AMDGPU_LS:
Marek Olsaka302a7362017-05-02 15:41:10 +0000542 case CallingConv::AMDGPU_HS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000543 case CallingConv::AMDGPU_ES:
Matt Arsenault4c1ecde2017-04-19 17:42:34 +0000544 case CallingConv::AMDGPU_GS:
545 case CallingConv::AMDGPU_PS:
546 case CallingConv::AMDGPU_CS:
547 // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
548 // Everything else is in VGPRs.
549 return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
550 F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
551 default:
552 // TODO: Should calls support inreg for SGPR inputs?
553 return false;
554 }
Tom Stellarddbe374b2015-12-15 18:04:38 +0000555}
556
Tom Stellarddbe374b2015-12-15 18:04:38 +0000557/// \returns true if the result of the value could potentially be
558/// different across workitems in a wavefront.
559bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const {
Tom Stellarddbe374b2015-12-15 18:04:38 +0000560 if (const Argument *A = dyn_cast<Argument>(V))
561 return !isArgPassedInSGPR(A);
562
563 // Loads from the private address space are divergent, because threads
564 // can execute the load instruction with the same inputs and get different
565 // results.
566 //
567 // All other loads are not divergent, because if threads issue loads with the
568 // same arguments, they will always get the same result.
569 if (const LoadInst *Load = dyn_cast<LoadInst>(V))
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000570 return Load->getPointerAddressSpace() == ST->getAMDGPUAS().PRIVATE_ADDRESS;
Tom Stellarddbe374b2015-12-15 18:04:38 +0000571
Nicolai Haehnle79cad852016-03-17 16:21:59 +0000572 // Atomics are divergent because they are executed sequentially: when an
573 // atomic operation refers to the same address in each thread, then each
574 // thread after the first sees the value written by the previous thread as
575 // original value.
576 if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
577 return true;
578
Matt Arsenaultd2c8a332017-02-16 02:01:13 +0000579 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
Alexander Timofeev2e5eece2018-03-05 15:12:21 +0000580 return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
Tom Stellarddbe374b2015-12-15 18:04:38 +0000581
582 // Assume all function calls are a source of divergence.
583 if (isa<CallInst>(V) || isa<InvokeInst>(V))
584 return true;
585
586 return false;
587}
Matt Arsenault3c5e4232017-05-10 21:29:33 +0000588
Alexander Timofeev0f9c84c2017-06-15 19:33:10 +0000589bool AMDGPUTTIImpl::isAlwaysUniform(const Value *V) const {
590 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
591 switch (Intrinsic->getIntrinsicID()) {
592 default:
593 return false;
594 case Intrinsic::amdgcn_readfirstlane:
595 case Intrinsic::amdgcn_readlane:
596 return true;
597 }
598 }
599 return false;
600}
601
Matt Arsenault3c5e4232017-05-10 21:29:33 +0000602unsigned AMDGPUTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
603 Type *SubTp) {
604 if (ST->hasVOP3PInsts()) {
605 VectorType *VT = cast<VectorType>(Tp);
606 if (VT->getNumElements() == 2 &&
607 DL.getTypeSizeInBits(VT->getElementType()) == 16) {
608 // With op_sel VOP3P instructions freely can access the low half or high
609 // half of a register, so any swizzle is free.
610
611 switch (Kind) {
612 case TTI::SK_Broadcast:
613 case TTI::SK_Reverse:
614 case TTI::SK_PermuteSingleSrc:
615 return 0;
616 default:
617 break;
618 }
619 }
620 }
621
622 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
623}
Matt Arsenaultaac47c12017-08-07 17:08:44 +0000624
625bool AMDGPUTTIImpl::areInlineCompatible(const Function *Caller,
626 const Function *Callee) const {
627 const TargetMachine &TM = getTLI()->getTargetMachine();
628 const FeatureBitset &CallerBits =
629 TM.getSubtargetImpl(*Caller)->getFeatureBits();
630 const FeatureBitset &CalleeBits =
631 TM.getSubtargetImpl(*Callee)->getFeatureBits();
632
633 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
634 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
635 return ((RealCallerBits & RealCalleeBits) == RealCalleeBits);
636}