Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 1 | //===- AMDGPUTargetTransformInfo.h - AMDGPU specific TTI --------*- C++ -*-===// |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 2 | // |
Chandler Carruth | 2946cd7 | 2019-01-19 08:50:56 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 8 | // |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 9 | /// \file |
| 10 | /// This file a TargetTransformInfo::Concept conforming object specific to the |
| 11 | /// AMDGPU target machine. It uses the target's detailed information to |
| 12 | /// provide more precise answers to certain TTI queries, while letting the |
| 13 | /// target independent and default TTI implementations handle the rest. |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 14 | // |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 15 | //===----------------------------------------------------------------------===// |
| 16 | |
Matt Arsenault | 6b6a2c3 | 2016-03-11 08:00:27 +0000 | [diff] [blame] | 17 | #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H |
| 18 | #define LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 19 | |
| 20 | #include "AMDGPU.h" |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 21 | #include "AMDGPUSubtarget.h" |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 22 | #include "AMDGPUTargetMachine.h" |
Tom Stellard | 44b30b4 | 2018-05-22 02:03:23 +0000 | [diff] [blame] | 23 | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 24 | #include "Utils/AMDGPUBaseInfo.h" |
| 25 | #include "llvm/ADT/ArrayRef.h" |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 26 | #include "llvm/Analysis/TargetTransformInfo.h" |
| 27 | #include "llvm/CodeGen/BasicTTIImpl.h" |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 28 | #include "llvm/IR/Function.h" |
| 29 | #include "llvm/MC/SubtargetFeature.h" |
| 30 | #include "llvm/Support/MathExtras.h" |
| 31 | #include <cassert> |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 32 | |
| 33 | namespace llvm { |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 34 | |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 35 | class AMDGPUTargetLowering; |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 36 | class Loop; |
| 37 | class ScalarEvolution; |
| 38 | class Type; |
| 39 | class Value; |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 40 | |
Matt Arsenault | 6b6a2c3 | 2016-03-11 08:00:27 +0000 | [diff] [blame] | 41 | class AMDGPUTTIImpl final : public BasicTTIImplBase<AMDGPUTTIImpl> { |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 42 | using BaseT = BasicTTIImplBase<AMDGPUTTIImpl>; |
| 43 | using TTI = TargetTransformInfo; |
| 44 | |
Chandler Carruth | c340ca8 | 2015-02-01 14:01:15 +0000 | [diff] [blame] | 45 | friend BaseT; |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 46 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 47 | Triple TargetTriple; |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 48 | |
| 49 | public: |
| 50 | explicit AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F) |
| 51 | : BaseT(TM, F.getParent()->getDataLayout()), |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 52 | TargetTriple(TM->getTargetTriple()) {} |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 53 | |
| 54 | void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, |
| 55 | TTI::UnrollingPreferences &UP); |
| 56 | }; |
| 57 | |
| 58 | class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> { |
| 59 | using BaseT = BasicTTIImplBase<GCNTTIImpl>; |
| 60 | using TTI = TargetTransformInfo; |
| 61 | |
| 62 | friend BaseT; |
| 63 | |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 64 | const GCNSubtarget *ST; |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 65 | const AMDGPUTargetLowering *TLI; |
| 66 | AMDGPUTTIImpl CommonTTI; |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 67 | bool IsGraphicsShader; |
Chandler Carruth | c340ca8 | 2015-02-01 14:01:15 +0000 | [diff] [blame] | 68 | |
Matt Arsenault | aac47c1 | 2017-08-07 17:08:44 +0000 | [diff] [blame] | 69 | const FeatureBitset InlineFeatureIgnoreList = { |
| 70 | // Codegen control options which don't matter. |
| 71 | AMDGPU::FeatureEnableLoadStoreOpt, |
| 72 | AMDGPU::FeatureEnableSIScheduler, |
| 73 | AMDGPU::FeatureEnableUnsafeDSOffsetFolding, |
| 74 | AMDGPU::FeatureFlatForGlobal, |
| 75 | AMDGPU::FeaturePromoteAlloca, |
| 76 | AMDGPU::FeatureUnalignedBufferAccess, |
| 77 | AMDGPU::FeatureUnalignedScratchAccess, |
| 78 | |
| 79 | AMDGPU::FeatureAutoWaitcntBeforeBarrier, |
Matt Arsenault | aac47c1 | 2017-08-07 17:08:44 +0000 | [diff] [blame] | 80 | |
| 81 | // Property of the kernel/environment which can't actually differ. |
| 82 | AMDGPU::FeatureSGPRInitBug, |
| 83 | AMDGPU::FeatureXNACK, |
| 84 | AMDGPU::FeatureTrapHandler, |
Matt Arsenault | d24296e | 2019-02-12 23:30:11 +0000 | [diff] [blame] | 85 | AMDGPU::FeatureCodeObjectV3, |
Matt Arsenault | aac47c1 | 2017-08-07 17:08:44 +0000 | [diff] [blame] | 86 | |
| 87 | // Perf-tuning features |
| 88 | AMDGPU::FeatureFastFMAF32, |
| 89 | AMDGPU::HalfRate64Ops |
| 90 | }; |
| 91 | |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 92 | const GCNSubtarget *getST() const { return ST; } |
Chandler Carruth | c340ca8 | 2015-02-01 14:01:15 +0000 | [diff] [blame] | 93 | const AMDGPUTargetLowering *getTLI() const { return TLI; } |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 94 | |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 95 | static inline int getFullRateInstrCost() { |
| 96 | return TargetTransformInfo::TCC_Basic; |
| 97 | } |
| 98 | |
| 99 | static inline int getHalfRateInstrCost() { |
| 100 | return 2 * TargetTransformInfo::TCC_Basic; |
| 101 | } |
| 102 | |
| 103 | // TODO: The size is usually 8 bytes, but takes 4x as many cycles. Maybe |
| 104 | // should be 2 or 4. |
| 105 | static inline int getQuarterRateInstrCost() { |
| 106 | return 3 * TargetTransformInfo::TCC_Basic; |
| 107 | } |
| 108 | |
| 109 | // On some parts, normal fp64 operations are half rate, and others |
| 110 | // quarter. This also applies to some integer operations. |
| 111 | inline int get64BitInstrCost() const { |
| 112 | return ST->hasHalfRate64Ops() ? |
| 113 | getHalfRateInstrCost() : getQuarterRateInstrCost(); |
| 114 | } |
| 115 | |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 116 | public: |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 117 | explicit GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F) |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 118 | : BaseT(TM, F.getParent()->getDataLayout()), |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 119 | ST(static_cast<const GCNSubtarget*>(TM->getSubtargetImpl(F))), |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 120 | TLI(ST->getTargetLowering()), |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 121 | CommonTTI(TM, F), |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 122 | IsGraphicsShader(AMDGPU::isShader(F.getCallingConv())) {} |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 123 | |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 124 | bool hasBranchDivergence() { return true; } |
| 125 | |
Geoff Berry | 66d9bdb | 2017-06-28 15:53:17 +0000 | [diff] [blame] | 126 | void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, |
| 127 | TTI::UnrollingPreferences &UP); |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 128 | |
| 129 | TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) { |
| 130 | assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); |
Matt Arsenault | 1735da4 | 2016-05-18 16:10:19 +0000 | [diff] [blame] | 131 | return TTI::PSK_FastHardware; |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 132 | } |
| 133 | |
Matt Arsenault | 67cd347 | 2017-06-20 20:38:06 +0000 | [diff] [blame] | 134 | unsigned getHardwareNumberOfRegisters(bool Vector) const; |
| 135 | unsigned getNumberOfRegisters(bool Vector) const; |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 136 | unsigned getRegisterBitWidth(bool Vector) const; |
Matt Arsenault | 67cd347 | 2017-06-20 20:38:06 +0000 | [diff] [blame] | 137 | unsigned getMinVectorRegisterBitWidth() const; |
Farhana Aleen | 8919664 | 2018-03-07 17:09:18 +0000 | [diff] [blame] | 138 | unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, |
| 139 | unsigned ChainSizeInBytes, |
| 140 | VectorType *VecTy) const; |
| 141 | unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, |
| 142 | unsigned ChainSizeInBytes, |
| 143 | VectorType *VecTy) const; |
Volkan Keles | 1c38681 | 2016-10-03 10:31:34 +0000 | [diff] [blame] | 144 | unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const; |
Matt Arsenault | f0a88db | 2017-02-23 03:58:53 +0000 | [diff] [blame] | 145 | |
| 146 | bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, |
| 147 | unsigned Alignment, |
| 148 | unsigned AddrSpace) const; |
| 149 | bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, |
| 150 | unsigned Alignment, |
| 151 | unsigned AddrSpace) const; |
| 152 | bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, |
| 153 | unsigned Alignment, |
| 154 | unsigned AddrSpace) const; |
| 155 | |
Wei Mi | 062c744 | 2015-05-06 17:12:25 +0000 | [diff] [blame] | 156 | unsigned getMaxInterleaveFactor(unsigned VF); |
Matt Arsenault | e830f54 | 2015-12-01 19:08:39 +0000 | [diff] [blame] | 157 | |
Matt Arsenault | 3e268cc | 2017-12-11 21:38:43 +0000 | [diff] [blame] | 158 | bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const; |
| 159 | |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 160 | int getArithmeticInstrCost( |
| 161 | unsigned Opcode, Type *Ty, |
| 162 | TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, |
| 163 | TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, |
| 164 | TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, |
Mohammed Agabaria | 2c96c43 | 2017-01-11 08:23:37 +0000 | [diff] [blame] | 165 | TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, |
| 166 | ArrayRef<const Value *> Args = ArrayRef<const Value *>()); |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 167 | |
Matt Arsenault | e05ff15 | 2015-12-16 18:37:19 +0000 | [diff] [blame] | 168 | unsigned getCFInstrCost(unsigned Opcode); |
| 169 | |
Matt Arsenault | e830f54 | 2015-12-01 19:08:39 +0000 | [diff] [blame] | 170 | int getVectorInstrCost(unsigned Opcode, Type *ValTy, unsigned Index); |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 171 | bool isSourceOfDivergence(const Value *V) const; |
Alexander Timofeev | 0f9c84c | 2017-06-15 19:33:10 +0000 | [diff] [blame] | 172 | bool isAlwaysUniform(const Value *V) const; |
Michael Kuperstein | aa71bdd | 2016-07-06 17:30:56 +0000 | [diff] [blame] | 173 | |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 174 | unsigned getFlatAddressSpace() const { |
| 175 | // Don't bother running InferAddressSpaces pass on graphics shaders which |
| 176 | // don't use flat addressing. |
| 177 | if (IsGraphicsShader) |
| 178 | return -1; |
Matt Arsenault | 1575cb8 | 2017-01-31 23:48:37 +0000 | [diff] [blame] | 179 | return ST->hasFlatAddressSpace() ? |
Matt Arsenault | 0da6350 | 2018-08-31 05:49:54 +0000 | [diff] [blame] | 180 | AMDGPUAS::FLAT_ADDRESS : AMDGPUAS::UNKNOWN_ADDRESS_SPACE; |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 181 | } |
| 182 | |
Michael Kuperstein | aa71bdd | 2016-07-06 17:30:56 +0000 | [diff] [blame] | 183 | unsigned getVectorSplitCost() { return 0; } |
Matt Arsenault | 3c5e423 | 2017-05-10 21:29:33 +0000 | [diff] [blame] | 184 | |
| 185 | unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, |
| 186 | Type *SubTp); |
Matt Arsenault | aac47c1 | 2017-08-07 17:08:44 +0000 | [diff] [blame] | 187 | |
| 188 | bool areInlineCompatible(const Function *Caller, |
| 189 | const Function *Callee) const; |
Stanislav Mekhanoshin | 5670e6d | 2017-09-20 04:25:58 +0000 | [diff] [blame] | 190 | |
| 191 | unsigned getInliningThresholdMultiplier() { return 9; } |
Farhana Aleen | e2dfe8a | 2018-05-01 21:41:12 +0000 | [diff] [blame] | 192 | |
| 193 | int getArithmeticReductionCost(unsigned Opcode, |
| 194 | Type *Ty, |
| 195 | bool IsPairwise); |
Farhana Aleen | e24f3ff | 2018-05-09 21:18:34 +0000 | [diff] [blame] | 196 | int getMinMaxReductionCost(Type *Ty, Type *CondTy, |
| 197 | bool IsPairwiseForm, |
| 198 | bool IsUnsigned); |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 199 | }; |
| 200 | |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 201 | class R600TTIImpl final : public BasicTTIImplBase<R600TTIImpl> { |
| 202 | using BaseT = BasicTTIImplBase<R600TTIImpl>; |
| 203 | using TTI = TargetTransformInfo; |
| 204 | |
| 205 | friend BaseT; |
| 206 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 207 | const R600Subtarget *ST; |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 208 | const AMDGPUTargetLowering *TLI; |
| 209 | AMDGPUTTIImpl CommonTTI; |
| 210 | |
| 211 | public: |
| 212 | explicit R600TTIImpl(const AMDGPUTargetMachine *TM, const Function &F) |
| 213 | : BaseT(TM, F.getParent()->getDataLayout()), |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 214 | ST(static_cast<const R600Subtarget*>(TM->getSubtargetImpl(F))), |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 215 | TLI(ST->getTargetLowering()), |
| 216 | CommonTTI(TM, F) {} |
| 217 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 218 | const R600Subtarget *getST() const { return ST; } |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 219 | const AMDGPUTargetLowering *getTLI() const { return TLI; } |
| 220 | |
| 221 | void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, |
| 222 | TTI::UnrollingPreferences &UP); |
| 223 | unsigned getHardwareNumberOfRegisters(bool Vec) const; |
| 224 | unsigned getNumberOfRegisters(bool Vec) const; |
| 225 | unsigned getRegisterBitWidth(bool Vector) const; |
| 226 | unsigned getMinVectorRegisterBitWidth() const; |
| 227 | unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const; |
| 228 | bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, unsigned Alignment, |
| 229 | unsigned AddrSpace) const; |
| 230 | bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, |
| 231 | unsigned Alignment, |
| 232 | unsigned AddrSpace) const; |
| 233 | bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, |
| 234 | unsigned Alignment, |
| 235 | unsigned AddrSpace) const; |
| 236 | unsigned getMaxInterleaveFactor(unsigned VF); |
| 237 | unsigned getCFInstrCost(unsigned Opcode); |
| 238 | int getVectorInstrCost(unsigned Opcode, Type *ValTy, unsigned Index); |
| 239 | }; |
| 240 | |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 241 | } // end namespace llvm |
| 242 | |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 243 | #endif // LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H |