Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 1 | //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===// |
Tom Stellard | 8b1e021 | 2013-07-27 00:01:07 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // \file |
| 11 | // This file implements a TargetTransformInfo analysis pass specific to the |
| 12 | // AMDGPU target machine. It uses the target's detailed information to provide |
| 13 | // more precise answers to certain TTI queries, while letting the target |
| 14 | // independent and default TTI implementations handle the rest. |
| 15 | // |
| 16 | //===----------------------------------------------------------------------===// |
| 17 | |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 18 | #include "AMDGPUTargetTransformInfo.h" |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 19 | #include "AMDGPUSubtarget.h" |
Alexander Timofeev | 2e5eece | 2018-03-05 15:12:21 +0000 | [diff] [blame] | 20 | #include "Utils/AMDGPUBaseInfo.h" |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 21 | #include "llvm/ADT/STLExtras.h" |
Tom Stellard | 8cce9bd | 2014-01-23 18:49:28 +0000 | [diff] [blame] | 22 | #include "llvm/Analysis/LoopInfo.h" |
Tom Stellard | 8b1e021 | 2013-07-27 00:01:07 +0000 | [diff] [blame] | 23 | #include "llvm/Analysis/TargetTransformInfo.h" |
Tom Stellard | 8cce9bd | 2014-01-23 18:49:28 +0000 | [diff] [blame] | 24 | #include "llvm/Analysis/ValueTracking.h" |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 25 | #include "llvm/CodeGen/ISDOpcodes.h" |
| 26 | #include "llvm/CodeGen/MachineValueType.h" |
| 27 | #include "llvm/CodeGen/ValueTypes.h" |
| 28 | #include "llvm/IR/Argument.h" |
| 29 | #include "llvm/IR/Attributes.h" |
| 30 | #include "llvm/IR/BasicBlock.h" |
| 31 | #include "llvm/IR/CallingConv.h" |
| 32 | #include "llvm/IR/DataLayout.h" |
| 33 | #include "llvm/IR/DerivedTypes.h" |
| 34 | #include "llvm/IR/Function.h" |
| 35 | #include "llvm/IR/Instruction.h" |
| 36 | #include "llvm/IR/Instructions.h" |
| 37 | #include "llvm/IR/IntrinsicInst.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 38 | #include "llvm/IR/Module.h" |
Matt Arsenault | 376f1bd | 2017-08-31 05:47:00 +0000 | [diff] [blame] | 39 | #include "llvm/IR/PatternMatch.h" |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 40 | #include "llvm/IR/Type.h" |
| 41 | #include "llvm/IR/Value.h" |
| 42 | #include "llvm/MC/SubtargetFeature.h" |
| 43 | #include "llvm/Support/Casting.h" |
| 44 | #include "llvm/Support/CommandLine.h" |
Tom Stellard | 8b1e021 | 2013-07-27 00:01:07 +0000 | [diff] [blame] | 45 | #include "llvm/Support/Debug.h" |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 46 | #include "llvm/Support/ErrorHandling.h" |
| 47 | #include "llvm/Support/raw_ostream.h" |
| 48 | #include "llvm/Target/TargetMachine.h" |
| 49 | #include <algorithm> |
| 50 | #include <cassert> |
| 51 | #include <limits> |
| 52 | #include <utility> |
| 53 | |
Tom Stellard | 8b1e021 | 2013-07-27 00:01:07 +0000 | [diff] [blame] | 54 | using namespace llvm; |
| 55 | |
Chandler Carruth | 84e68b2 | 2014-04-22 02:41:26 +0000 | [diff] [blame] | 56 | #define DEBUG_TYPE "AMDGPUtti" |
| 57 | |
Stanislav Mekhanoshin | f29602d | 2017-02-03 02:20:05 +0000 | [diff] [blame] | 58 | static cl::opt<unsigned> UnrollThresholdPrivate( |
| 59 | "amdgpu-unroll-threshold-private", |
| 60 | cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"), |
Stanislav Mekhanoshin | 478b819 | 2017-04-07 16:26:28 +0000 | [diff] [blame] | 61 | cl::init(2500), cl::Hidden); |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 62 | |
Stanislav Mekhanoshin | baf31ac | 2017-03-28 22:13:51 +0000 | [diff] [blame] | 63 | static cl::opt<unsigned> UnrollThresholdLocal( |
| 64 | "amdgpu-unroll-threshold-local", |
| 65 | cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"), |
| 66 | cl::init(1000), cl::Hidden); |
| 67 | |
Stanislav Mekhanoshin | 478b819 | 2017-04-07 16:26:28 +0000 | [diff] [blame] | 68 | static cl::opt<unsigned> UnrollThresholdIf( |
| 69 | "amdgpu-unroll-threshold-if", |
| 70 | cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"), |
| 71 | cl::init(150), cl::Hidden); |
| 72 | |
| 73 | static bool dependsOnLocalPhi(const Loop *L, const Value *Cond, |
| 74 | unsigned Depth = 0) { |
| 75 | const Instruction *I = dyn_cast<Instruction>(Cond); |
| 76 | if (!I) |
| 77 | return false; |
| 78 | |
| 79 | for (const Value *V : I->operand_values()) { |
| 80 | if (!L->contains(I)) |
| 81 | continue; |
| 82 | if (const PHINode *PHI = dyn_cast<PHINode>(V)) { |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 83 | if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) { |
Stanislav Mekhanoshin | 478b819 | 2017-04-07 16:26:28 +0000 | [diff] [blame] | 84 | return SubLoop->contains(PHI); })) |
| 85 | return true; |
| 86 | } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1)) |
| 87 | return true; |
| 88 | } |
| 89 | return false; |
| 90 | } |
| 91 | |
Geoff Berry | 66d9bdb | 2017-06-28 15:53:17 +0000 | [diff] [blame] | 92 | void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, |
Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 93 | TTI::UnrollingPreferences &UP) { |
Matt Arsenault | c824458 | 2014-07-25 23:02:42 +0000 | [diff] [blame] | 94 | UP.Threshold = 300; // Twice the default. |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 95 | UP.MaxCount = std::numeric_limits<unsigned>::max(); |
Matt Arsenault | c824458 | 2014-07-25 23:02:42 +0000 | [diff] [blame] | 96 | UP.Partial = true; |
| 97 | |
| 98 | // TODO: Do we want runtime unrolling? |
| 99 | |
Stanislav Mekhanoshin | f29602d | 2017-02-03 02:20:05 +0000 | [diff] [blame] | 100 | // Maximum alloca size than can fit registers. Reserve 16 registers. |
| 101 | const unsigned MaxAlloca = (256 - 16) * 4; |
Stanislav Mekhanoshin | baf31ac | 2017-03-28 22:13:51 +0000 | [diff] [blame] | 102 | unsigned ThresholdPrivate = UnrollThresholdPrivate; |
| 103 | unsigned ThresholdLocal = UnrollThresholdLocal; |
| 104 | unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal); |
| 105 | AMDGPUAS ASST = ST->getAMDGPUAS(); |
Matt Arsenault | ac6e39c | 2014-07-17 06:19:06 +0000 | [diff] [blame] | 106 | for (const BasicBlock *BB : L->getBlocks()) { |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 107 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
Stanislav Mekhanoshin | baf31ac | 2017-03-28 22:13:51 +0000 | [diff] [blame] | 108 | unsigned LocalGEPsSeen = 0; |
| 109 | |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 110 | if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) { |
Stanislav Mekhanoshin | 478b819 | 2017-04-07 16:26:28 +0000 | [diff] [blame] | 111 | return SubLoop->contains(BB); })) |
| 112 | continue; // Block belongs to an inner loop. |
| 113 | |
Matt Arsenault | ac6e39c | 2014-07-17 06:19:06 +0000 | [diff] [blame] | 114 | for (const Instruction &I : *BB) { |
Stanislav Mekhanoshin | 478b819 | 2017-04-07 16:26:28 +0000 | [diff] [blame] | 115 | // Unroll a loop which contains an "if" statement whose condition |
| 116 | // defined by a PHI belonging to the loop. This may help to eliminate |
| 117 | // if region and potentially even PHI itself, saving on both divergence |
| 118 | // and registers used for the PHI. |
| 119 | // Add a small bonus for each of such "if" statements. |
| 120 | if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) { |
| 121 | if (UP.Threshold < MaxBoost && Br->isConditional()) { |
| 122 | if (L->isLoopExiting(Br->getSuccessor(0)) || |
| 123 | L->isLoopExiting(Br->getSuccessor(1))) |
| 124 | continue; |
| 125 | if (dependsOnLocalPhi(L, Br->getCondition())) { |
| 126 | UP.Threshold += UnrollThresholdIf; |
| 127 | DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold |
| 128 | << " for loop:\n" << *L << " due to " << *Br << '\n'); |
| 129 | if (UP.Threshold >= MaxBoost) |
| 130 | return; |
| 131 | } |
| 132 | } |
| 133 | continue; |
| 134 | } |
| 135 | |
Matt Arsenault | ac6e39c | 2014-07-17 06:19:06 +0000 | [diff] [blame] | 136 | const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I); |
Stanislav Mekhanoshin | baf31ac | 2017-03-28 22:13:51 +0000 | [diff] [blame] | 137 | if (!GEP) |
Tom Stellard | 8cce9bd | 2014-01-23 18:49:28 +0000 | [diff] [blame] | 138 | continue; |
Matt Arsenault | ac6e39c | 2014-07-17 06:19:06 +0000 | [diff] [blame] | 139 | |
Stanislav Mekhanoshin | baf31ac | 2017-03-28 22:13:51 +0000 | [diff] [blame] | 140 | unsigned AS = GEP->getAddressSpace(); |
| 141 | unsigned Threshold = 0; |
| 142 | if (AS == ASST.PRIVATE_ADDRESS) |
| 143 | Threshold = ThresholdPrivate; |
| 144 | else if (AS == ASST.LOCAL_ADDRESS) |
| 145 | Threshold = ThresholdLocal; |
| 146 | else |
| 147 | continue; |
| 148 | |
| 149 | if (UP.Threshold >= Threshold) |
| 150 | continue; |
| 151 | |
| 152 | if (AS == ASST.PRIVATE_ADDRESS) { |
| 153 | const Value *Ptr = GEP->getPointerOperand(); |
| 154 | const AllocaInst *Alloca = |
| 155 | dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL)); |
| 156 | if (!Alloca || !Alloca->isStaticAlloca()) |
| 157 | continue; |
Stanislav Mekhanoshin | f29602d | 2017-02-03 02:20:05 +0000 | [diff] [blame] | 158 | Type *Ty = Alloca->getAllocatedType(); |
| 159 | unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0; |
| 160 | if (AllocaSize > MaxAlloca) |
| 161 | continue; |
Stanislav Mekhanoshin | baf31ac | 2017-03-28 22:13:51 +0000 | [diff] [blame] | 162 | } else if (AS == ASST.LOCAL_ADDRESS) { |
| 163 | LocalGEPsSeen++; |
| 164 | // Inhibit unroll for local memory if we have seen addressing not to |
| 165 | // a variable, most likely we will be unable to combine it. |
| 166 | // Do not unroll too deep inner loops for local memory to give a chance |
| 167 | // to unroll an outer loop for a more important reason. |
| 168 | if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 || |
| 169 | (!isa<GlobalVariable>(GEP->getPointerOperand()) && |
| 170 | !isa<Argument>(GEP->getPointerOperand()))) |
| 171 | continue; |
| 172 | } |
Stanislav Mekhanoshin | f29602d | 2017-02-03 02:20:05 +0000 | [diff] [blame] | 173 | |
Stanislav Mekhanoshin | baf31ac | 2017-03-28 22:13:51 +0000 | [diff] [blame] | 174 | // Check if GEP depends on a value defined by this loop itself. |
| 175 | bool HasLoopDef = false; |
| 176 | for (const Value *Op : GEP->operands()) { |
| 177 | const Instruction *Inst = dyn_cast<Instruction>(Op); |
| 178 | if (!Inst || L->isLoopInvariant(Op)) |
Stanislav Mekhanoshin | f29602d | 2017-02-03 02:20:05 +0000 | [diff] [blame] | 179 | continue; |
| 180 | |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 181 | if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) { |
Stanislav Mekhanoshin | baf31ac | 2017-03-28 22:13:51 +0000 | [diff] [blame] | 182 | return SubLoop->contains(Inst); })) |
| 183 | continue; |
| 184 | HasLoopDef = true; |
| 185 | break; |
Tom Stellard | 8cce9bd | 2014-01-23 18:49:28 +0000 | [diff] [blame] | 186 | } |
Stanislav Mekhanoshin | baf31ac | 2017-03-28 22:13:51 +0000 | [diff] [blame] | 187 | if (!HasLoopDef) |
| 188 | continue; |
| 189 | |
| 190 | // We want to do whatever we can to limit the number of alloca |
| 191 | // instructions that make it through to the code generator. allocas |
| 192 | // require us to use indirect addressing, which is slow and prone to |
| 193 | // compiler bugs. If this loop does an address calculation on an |
| 194 | // alloca ptr, then we want to use a higher than normal loop unroll |
| 195 | // threshold. This will give SROA a better chance to eliminate these |
| 196 | // allocas. |
| 197 | // |
| 198 | // We also want to have more unrolling for local memory to let ds |
| 199 | // instructions with different offsets combine. |
| 200 | // |
| 201 | // Don't use the maximum allowed value here as it will make some |
| 202 | // programs way too big. |
| 203 | UP.Threshold = Threshold; |
| 204 | DEBUG(dbgs() << "Set unroll threshold " << Threshold << " for loop:\n" |
| 205 | << *L << " due to " << *GEP << '\n'); |
Stanislav Mekhanoshin | 478b819 | 2017-04-07 16:26:28 +0000 | [diff] [blame] | 206 | if (UP.Threshold >= MaxBoost) |
Stanislav Mekhanoshin | baf31ac | 2017-03-28 22:13:51 +0000 | [diff] [blame] | 207 | return; |
Tom Stellard | 8cce9bd | 2014-01-23 18:49:28 +0000 | [diff] [blame] | 208 | } |
| 209 | } |
| 210 | } |
Matt Arsenault | 3dd43fc | 2014-07-18 06:07:13 +0000 | [diff] [blame] | 211 | |
Matt Arsenault | 67cd347 | 2017-06-20 20:38:06 +0000 | [diff] [blame] | 212 | unsigned AMDGPUTTIImpl::getHardwareNumberOfRegisters(bool Vec) const { |
| 213 | // The concept of vector registers doesn't really exist. Some packed vector |
| 214 | // operations operate on the normal 32-bit registers. |
Matt Arsenault | a93441f | 2014-07-19 18:15:16 +0000 | [diff] [blame] | 215 | |
| 216 | // Number of VGPRs on SI. |
| 217 | if (ST->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) |
| 218 | return 256; |
| 219 | |
| 220 | return 4 * 128; // XXX - 4 channels. Should these count as vector instead? |
| 221 | } |
| 222 | |
Matt Arsenault | 67cd347 | 2017-06-20 20:38:06 +0000 | [diff] [blame] | 223 | unsigned AMDGPUTTIImpl::getNumberOfRegisters(bool Vec) const { |
| 224 | // This is really the number of registers to fill when vectorizing / |
| 225 | // interleaving loops, so we lie to avoid trying to use all registers. |
| 226 | return getHardwareNumberOfRegisters(Vec) >> 3; |
| 227 | } |
| 228 | |
Daniel Neilson | c0112ae | 2017-06-12 14:22:21 +0000 | [diff] [blame] | 229 | unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool Vector) const { |
Matt Arsenault | 67cd347 | 2017-06-20 20:38:06 +0000 | [diff] [blame] | 230 | return 32; |
| 231 | } |
| 232 | |
| 233 | unsigned AMDGPUTTIImpl::getMinVectorRegisterBitWidth() const { |
| 234 | return 32; |
Matt Arsenault | 4339b3f | 2015-12-24 05:14:55 +0000 | [diff] [blame] | 235 | } |
Matt Arsenault | a93441f | 2014-07-19 18:15:16 +0000 | [diff] [blame] | 236 | |
Farhana Aleen | 8919664 | 2018-03-07 17:09:18 +0000 | [diff] [blame^] | 237 | unsigned AMDGPUTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize, |
| 238 | unsigned ChainSizeInBytes, |
| 239 | VectorType *VecTy) const { |
| 240 | unsigned VecRegBitWidth = VF * LoadSize; |
| 241 | if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32) |
| 242 | // TODO: Support element-size less than 32bit? |
| 243 | return 128 / LoadSize; |
| 244 | |
| 245 | return VF; |
| 246 | } |
| 247 | |
| 248 | unsigned AMDGPUTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize, |
| 249 | unsigned ChainSizeInBytes, |
| 250 | VectorType *VecTy) const { |
| 251 | unsigned VecRegBitWidth = VF * StoreSize; |
| 252 | if (VecRegBitWidth > 128) |
| 253 | return 128 / StoreSize; |
| 254 | |
| 255 | return VF; |
| 256 | } |
| 257 | |
Volkan Keles | 1c38681 | 2016-10-03 10:31:34 +0000 | [diff] [blame] | 258 | unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { |
Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 259 | AMDGPUAS AS = ST->getAMDGPUAS(); |
| 260 | if (AddrSpace == AS.GLOBAL_ADDRESS || |
| 261 | AddrSpace == AS.CONSTANT_ADDRESS || |
Farhana Aleen | 8919664 | 2018-03-07 17:09:18 +0000 | [diff] [blame^] | 262 | AddrSpace == AS.CONSTANT_ADDRESS_32BIT) { |
| 263 | if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) |
| 264 | return 128; |
| 265 | return 512; |
| 266 | } |
| 267 | |
| 268 | if (AddrSpace == AS.FLAT_ADDRESS) |
Matt Arsenault | 0994bd5 | 2016-07-01 00:56:27 +0000 | [diff] [blame] | 269 | return 128; |
Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 270 | if (AddrSpace == AS.LOCAL_ADDRESS || |
| 271 | AddrSpace == AS.REGION_ADDRESS) |
Matt Arsenault | 0994bd5 | 2016-07-01 00:56:27 +0000 | [diff] [blame] | 272 | return 64; |
Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 273 | if (AddrSpace == AS.PRIVATE_ADDRESS) |
Matt Arsenault | 0994bd5 | 2016-07-01 00:56:27 +0000 | [diff] [blame] | 274 | return 8 * ST->getMaxPrivateElementSize(); |
Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 275 | |
| 276 | if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS && |
| 277 | (AddrSpace == AS.PARAM_D_ADDRESS || |
| 278 | AddrSpace == AS.PARAM_I_ADDRESS || |
Farhana Aleen | 8919664 | 2018-03-07 17:09:18 +0000 | [diff] [blame^] | 279 | (AddrSpace >= AS.CONSTANT_BUFFER_0 && |
| 280 | AddrSpace <= AS.CONSTANT_BUFFER_15))) |
Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 281 | return 128; |
| 282 | llvm_unreachable("unhandled address space"); |
Matt Arsenault | 0994bd5 | 2016-07-01 00:56:27 +0000 | [diff] [blame] | 283 | } |
| 284 | |
Matt Arsenault | f0a88db | 2017-02-23 03:58:53 +0000 | [diff] [blame] | 285 | bool AMDGPUTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, |
| 286 | unsigned Alignment, |
| 287 | unsigned AddrSpace) const { |
| 288 | // We allow vectorization of flat stores, even though we may need to decompose |
| 289 | // them later if they may access private memory. We don't have enough context |
| 290 | // here, and legalization can handle it. |
Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 291 | if (AddrSpace == ST->getAMDGPUAS().PRIVATE_ADDRESS) { |
Matt Arsenault | f0a88db | 2017-02-23 03:58:53 +0000 | [diff] [blame] | 292 | return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) && |
| 293 | ChainSizeInBytes <= ST->getMaxPrivateElementSize(); |
| 294 | } |
| 295 | return true; |
| 296 | } |
| 297 | |
| 298 | bool AMDGPUTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, |
| 299 | unsigned Alignment, |
| 300 | unsigned AddrSpace) const { |
| 301 | return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace); |
| 302 | } |
| 303 | |
| 304 | bool AMDGPUTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, |
| 305 | unsigned Alignment, |
| 306 | unsigned AddrSpace) const { |
| 307 | return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace); |
| 308 | } |
| 309 | |
Wei Mi | 062c744 | 2015-05-06 17:12:25 +0000 | [diff] [blame] | 310 | unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) { |
Changpeng Fang | 1be9b9f | 2017-03-09 00:07:00 +0000 | [diff] [blame] | 311 | // Disable unrolling if the loop is not vectorized. |
Matt Arsenault | 67cd347 | 2017-06-20 20:38:06 +0000 | [diff] [blame] | 312 | // TODO: Enable this again. |
Changpeng Fang | 1be9b9f | 2017-03-09 00:07:00 +0000 | [diff] [blame] | 313 | if (VF == 1) |
| 314 | return 1; |
| 315 | |
Matt Arsenault | 67cd347 | 2017-06-20 20:38:06 +0000 | [diff] [blame] | 316 | return 8; |
Matt Arsenault | a93441f | 2014-07-19 18:15:16 +0000 | [diff] [blame] | 317 | } |
Matt Arsenault | e830f54 | 2015-12-01 19:08:39 +0000 | [diff] [blame] | 318 | |
Matt Arsenault | 3e268cc | 2017-12-11 21:38:43 +0000 | [diff] [blame] | 319 | bool AMDGPUTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, |
| 320 | MemIntrinsicInfo &Info) const { |
| 321 | switch (Inst->getIntrinsicID()) { |
| 322 | case Intrinsic::amdgcn_atomic_inc: |
Daniil Fukalov | 6e1dc68 | 2018-01-26 11:09:38 +0000 | [diff] [blame] | 323 | case Intrinsic::amdgcn_atomic_dec: |
| 324 | case Intrinsic::amdgcn_ds_fadd: |
| 325 | case Intrinsic::amdgcn_ds_fmin: |
| 326 | case Intrinsic::amdgcn_ds_fmax: { |
Matt Arsenault | 3e268cc | 2017-12-11 21:38:43 +0000 | [diff] [blame] | 327 | auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2)); |
| 328 | auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4)); |
| 329 | if (!Ordering || !Volatile) |
| 330 | return false; // Invalid. |
| 331 | |
| 332 | unsigned OrderingVal = Ordering->getZExtValue(); |
| 333 | if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent)) |
| 334 | return false; |
| 335 | |
| 336 | Info.PtrVal = Inst->getArgOperand(0); |
| 337 | Info.Ordering = static_cast<AtomicOrdering>(OrderingVal); |
| 338 | Info.ReadMem = true; |
| 339 | Info.WriteMem = true; |
| 340 | Info.IsVolatile = !Volatile->isNullValue(); |
| 341 | return true; |
| 342 | } |
| 343 | default: |
| 344 | return false; |
| 345 | } |
| 346 | } |
| 347 | |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 348 | int AMDGPUTTIImpl::getArithmeticInstrCost( |
| 349 | unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info, |
| 350 | TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, |
Mohammed Agabaria | 2c96c43 | 2017-01-11 08:23:37 +0000 | [diff] [blame] | 351 | TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args ) { |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 352 | EVT OrigTy = TLI->getValueType(DL, Ty); |
| 353 | if (!OrigTy.isSimple()) { |
| 354 | return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info, |
| 355 | Opd1PropInfo, Opd2PropInfo); |
| 356 | } |
| 357 | |
| 358 | // Legalize the type. |
| 359 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); |
| 360 | int ISD = TLI->InstructionOpcodeToISD(Opcode); |
| 361 | |
| 362 | // Because we don't have any legal vector operations, but the legal types, we |
| 363 | // need to account for split vectors. |
| 364 | unsigned NElts = LT.second.isVector() ? |
| 365 | LT.second.getVectorNumElements() : 1; |
| 366 | |
| 367 | MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy; |
| 368 | |
| 369 | switch (ISD) { |
Matt Arsenault | 8c8fcb2 | 2016-03-25 01:16:40 +0000 | [diff] [blame] | 370 | case ISD::SHL: |
| 371 | case ISD::SRL: |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 372 | case ISD::SRA: |
Matt Arsenault | 8c8fcb2 | 2016-03-25 01:16:40 +0000 | [diff] [blame] | 373 | if (SLT == MVT::i64) |
| 374 | return get64BitInstrCost() * LT.first * NElts; |
| 375 | |
| 376 | // i32 |
| 377 | return getFullRateInstrCost() * LT.first * NElts; |
Matt Arsenault | 8c8fcb2 | 2016-03-25 01:16:40 +0000 | [diff] [blame] | 378 | case ISD::ADD: |
| 379 | case ISD::SUB: |
| 380 | case ISD::AND: |
| 381 | case ISD::OR: |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 382 | case ISD::XOR: |
Matt Arsenault | 8c8fcb2 | 2016-03-25 01:16:40 +0000 | [diff] [blame] | 383 | if (SLT == MVT::i64){ |
| 384 | // and, or and xor are typically split into 2 VALU instructions. |
| 385 | return 2 * getFullRateInstrCost() * LT.first * NElts; |
| 386 | } |
| 387 | |
| 388 | return LT.first * NElts * getFullRateInstrCost(); |
Matt Arsenault | 8c8fcb2 | 2016-03-25 01:16:40 +0000 | [diff] [blame] | 389 | case ISD::MUL: { |
| 390 | const int QuarterRateCost = getQuarterRateInstrCost(); |
| 391 | if (SLT == MVT::i64) { |
| 392 | const int FullRateCost = getFullRateInstrCost(); |
| 393 | return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts; |
| 394 | } |
| 395 | |
| 396 | // i32 |
| 397 | return QuarterRateCost * NElts * LT.first; |
| 398 | } |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 399 | case ISD::FADD: |
| 400 | case ISD::FSUB: |
| 401 | case ISD::FMUL: |
| 402 | if (SLT == MVT::f64) |
| 403 | return LT.first * NElts * get64BitInstrCost(); |
| 404 | |
| 405 | if (SLT == MVT::f32 || SLT == MVT::f16) |
| 406 | return LT.first * NElts * getFullRateInstrCost(); |
| 407 | break; |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 408 | case ISD::FDIV: |
| 409 | case ISD::FREM: |
| 410 | // FIXME: frem should be handled separately. The fdiv in it is most of it, |
| 411 | // but the current lowering is also not entirely correct. |
| 412 | if (SLT == MVT::f64) { |
| 413 | int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost(); |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 414 | // Add cost of workaround. |
| 415 | if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) |
| 416 | Cost += 3 * getFullRateInstrCost(); |
| 417 | |
| 418 | return LT.first * Cost * NElts; |
| 419 | } |
| 420 | |
Matt Arsenault | 376f1bd | 2017-08-31 05:47:00 +0000 | [diff] [blame] | 421 | if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) { |
| 422 | // TODO: This is more complicated, unsafe flags etc. |
| 423 | if ((SLT == MVT::f32 && !ST->hasFP32Denormals()) || |
| 424 | (SLT == MVT::f16 && ST->has16BitInsts())) { |
| 425 | return LT.first * getQuarterRateInstrCost() * NElts; |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | if (SLT == MVT::f16 && ST->has16BitInsts()) { |
| 430 | // 2 x v_cvt_f32_f16 |
| 431 | // f32 rcp |
| 432 | // f32 fmul |
| 433 | // v_cvt_f16_f32 |
| 434 | // f16 div_fixup |
| 435 | int Cost = 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost(); |
| 436 | return LT.first * Cost * NElts; |
| 437 | } |
| 438 | |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 439 | if (SLT == MVT::f32 || SLT == MVT::f16) { |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 440 | int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost(); |
Matt Arsenault | 376f1bd | 2017-08-31 05:47:00 +0000 | [diff] [blame] | 441 | |
| 442 | if (!ST->hasFP32Denormals()) { |
| 443 | // FP mode switches. |
| 444 | Cost += 2 * getFullRateInstrCost(); |
| 445 | } |
| 446 | |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 447 | return LT.first * NElts * Cost; |
| 448 | } |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 449 | break; |
| 450 | default: |
| 451 | break; |
| 452 | } |
| 453 | |
| 454 | return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info, |
| 455 | Opd1PropInfo, Opd2PropInfo); |
| 456 | } |
| 457 | |
Matt Arsenault | e05ff15 | 2015-12-16 18:37:19 +0000 | [diff] [blame] | 458 | unsigned AMDGPUTTIImpl::getCFInstrCost(unsigned Opcode) { |
| 459 | // XXX - For some reason this isn't called for switch. |
| 460 | switch (Opcode) { |
| 461 | case Instruction::Br: |
| 462 | case Instruction::Ret: |
| 463 | return 10; |
| 464 | default: |
| 465 | return BaseT::getCFInstrCost(Opcode); |
| 466 | } |
| 467 | } |
| 468 | |
Matt Arsenault | e830f54 | 2015-12-01 19:08:39 +0000 | [diff] [blame] | 469 | int AMDGPUTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, |
| 470 | unsigned Index) { |
| 471 | switch (Opcode) { |
| 472 | case Instruction::ExtractElement: |
Matt Arsenault | 3c5e423 | 2017-05-10 21:29:33 +0000 | [diff] [blame] | 473 | case Instruction::InsertElement: { |
| 474 | unsigned EltSize |
| 475 | = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType()); |
| 476 | if (EltSize < 32) { |
| 477 | if (EltSize == 16 && Index == 0 && ST->has16BitInsts()) |
| 478 | return 0; |
| 479 | return BaseT::getVectorInstrCost(Opcode, ValTy, Index); |
| 480 | } |
| 481 | |
Matt Arsenault | 59767ce | 2016-03-25 00:14:11 +0000 | [diff] [blame] | 482 | // Extracts are just reads of a subregister, so are free. Inserts are |
| 483 | // considered free because we don't want to have any cost for scalarizing |
| 484 | // operations, and we don't have to copy into a different register class. |
| 485 | |
Matt Arsenault | e830f54 | 2015-12-01 19:08:39 +0000 | [diff] [blame] | 486 | // Dynamic indexing isn't free and is best avoided. |
| 487 | return Index == ~0u ? 2 : 0; |
Matt Arsenault | 3c5e423 | 2017-05-10 21:29:33 +0000 | [diff] [blame] | 488 | } |
Matt Arsenault | e830f54 | 2015-12-01 19:08:39 +0000 | [diff] [blame] | 489 | default: |
| 490 | return BaseT::getVectorInstrCost(Opcode, ValTy, Index); |
| 491 | } |
| 492 | } |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 493 | |
Alexander Timofeev | 2e5eece | 2018-03-05 15:12:21 +0000 | [diff] [blame] | 494 | |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 495 | |
| 496 | static bool isArgPassedInSGPR(const Argument *A) { |
| 497 | const Function *F = A->getParent(); |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 498 | |
| 499 | // Arguments to compute shaders are never a source of divergence. |
Matt Arsenault | 4c1ecde | 2017-04-19 17:42:34 +0000 | [diff] [blame] | 500 | CallingConv::ID CC = F->getCallingConv(); |
| 501 | switch (CC) { |
| 502 | case CallingConv::AMDGPU_KERNEL: |
| 503 | case CallingConv::SPIR_KERNEL: |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 504 | return true; |
Matt Arsenault | 4c1ecde | 2017-04-19 17:42:34 +0000 | [diff] [blame] | 505 | case CallingConv::AMDGPU_VS: |
Tim Renouf | ef1ae8f | 2017-09-29 09:51:22 +0000 | [diff] [blame] | 506 | case CallingConv::AMDGPU_LS: |
Marek Olsak | a302a736 | 2017-05-02 15:41:10 +0000 | [diff] [blame] | 507 | case CallingConv::AMDGPU_HS: |
Tim Renouf | ef1ae8f | 2017-09-29 09:51:22 +0000 | [diff] [blame] | 508 | case CallingConv::AMDGPU_ES: |
Matt Arsenault | 4c1ecde | 2017-04-19 17:42:34 +0000 | [diff] [blame] | 509 | case CallingConv::AMDGPU_GS: |
| 510 | case CallingConv::AMDGPU_PS: |
| 511 | case CallingConv::AMDGPU_CS: |
| 512 | // For non-compute shaders, SGPR inputs are marked with either inreg or byval. |
| 513 | // Everything else is in VGPRs. |
| 514 | return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) || |
| 515 | F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal); |
| 516 | default: |
| 517 | // TODO: Should calls support inreg for SGPR inputs? |
| 518 | return false; |
| 519 | } |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 520 | } |
| 521 | |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 522 | /// \returns true if the result of the value could potentially be |
| 523 | /// different across workitems in a wavefront. |
| 524 | bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const { |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 525 | if (const Argument *A = dyn_cast<Argument>(V)) |
| 526 | return !isArgPassedInSGPR(A); |
| 527 | |
| 528 | // Loads from the private address space are divergent, because threads |
| 529 | // can execute the load instruction with the same inputs and get different |
| 530 | // results. |
| 531 | // |
| 532 | // All other loads are not divergent, because if threads issue loads with the |
| 533 | // same arguments, they will always get the same result. |
| 534 | if (const LoadInst *Load = dyn_cast<LoadInst>(V)) |
Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 535 | return Load->getPointerAddressSpace() == ST->getAMDGPUAS().PRIVATE_ADDRESS; |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 536 | |
Nicolai Haehnle | 79cad85 | 2016-03-17 16:21:59 +0000 | [diff] [blame] | 537 | // Atomics are divergent because they are executed sequentially: when an |
| 538 | // atomic operation refers to the same address in each thread, then each |
| 539 | // thread after the first sees the value written by the previous thread as |
| 540 | // original value. |
| 541 | if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V)) |
| 542 | return true; |
| 543 | |
Matt Arsenault | d2c8a33 | 2017-02-16 02:01:13 +0000 | [diff] [blame] | 544 | if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) |
Alexander Timofeev | 2e5eece | 2018-03-05 15:12:21 +0000 | [diff] [blame] | 545 | return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID()); |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 546 | |
| 547 | // Assume all function calls are a source of divergence. |
| 548 | if (isa<CallInst>(V) || isa<InvokeInst>(V)) |
| 549 | return true; |
| 550 | |
| 551 | return false; |
| 552 | } |
Matt Arsenault | 3c5e423 | 2017-05-10 21:29:33 +0000 | [diff] [blame] | 553 | |
Alexander Timofeev | 0f9c84c | 2017-06-15 19:33:10 +0000 | [diff] [blame] | 554 | bool AMDGPUTTIImpl::isAlwaysUniform(const Value *V) const { |
| 555 | if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) { |
| 556 | switch (Intrinsic->getIntrinsicID()) { |
| 557 | default: |
| 558 | return false; |
| 559 | case Intrinsic::amdgcn_readfirstlane: |
| 560 | case Intrinsic::amdgcn_readlane: |
| 561 | return true; |
| 562 | } |
| 563 | } |
| 564 | return false; |
| 565 | } |
| 566 | |
Matt Arsenault | 3c5e423 | 2017-05-10 21:29:33 +0000 | [diff] [blame] | 567 | unsigned AMDGPUTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, |
| 568 | Type *SubTp) { |
| 569 | if (ST->hasVOP3PInsts()) { |
| 570 | VectorType *VT = cast<VectorType>(Tp); |
| 571 | if (VT->getNumElements() == 2 && |
| 572 | DL.getTypeSizeInBits(VT->getElementType()) == 16) { |
| 573 | // With op_sel VOP3P instructions freely can access the low half or high |
| 574 | // half of a register, so any swizzle is free. |
| 575 | |
| 576 | switch (Kind) { |
| 577 | case TTI::SK_Broadcast: |
| 578 | case TTI::SK_Reverse: |
| 579 | case TTI::SK_PermuteSingleSrc: |
| 580 | return 0; |
| 581 | default: |
| 582 | break; |
| 583 | } |
| 584 | } |
| 585 | } |
| 586 | |
| 587 | return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); |
| 588 | } |
Matt Arsenault | aac47c1 | 2017-08-07 17:08:44 +0000 | [diff] [blame] | 589 | |
| 590 | bool AMDGPUTTIImpl::areInlineCompatible(const Function *Caller, |
| 591 | const Function *Callee) const { |
| 592 | const TargetMachine &TM = getTLI()->getTargetMachine(); |
| 593 | const FeatureBitset &CallerBits = |
| 594 | TM.getSubtargetImpl(*Caller)->getFeatureBits(); |
| 595 | const FeatureBitset &CalleeBits = |
| 596 | TM.getSubtargetImpl(*Callee)->getFeatureBits(); |
| 597 | |
| 598 | FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList; |
| 599 | FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList; |
| 600 | return ((RealCallerBits & RealCalleeBits) == RealCalleeBits); |
| 601 | } |