Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 1 | //===- AMDGPUTargetTransformInfo.h - AMDGPU specific TTI --------*- C++ -*-===// |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 9 | // |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 10 | /// \file |
| 11 | /// This file a TargetTransformInfo::Concept conforming object specific to the |
| 12 | /// AMDGPU target machine. It uses the target's detailed information to |
| 13 | /// provide more precise answers to certain TTI queries, while letting the |
| 14 | /// target independent and default TTI implementations handle the rest. |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 15 | // |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 16 | //===----------------------------------------------------------------------===// |
| 17 | |
Matt Arsenault | 6b6a2c3 | 2016-03-11 08:00:27 +0000 | [diff] [blame] | 18 | #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H |
| 19 | #define LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 20 | |
| 21 | #include "AMDGPU.h" |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 22 | #include "AMDGPUSubtarget.h" |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 23 | #include "AMDGPUTargetMachine.h" |
Tom Stellard | 44b30b4 | 2018-05-22 02:03:23 +0000 | [diff] [blame^] | 24 | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 25 | #include "Utils/AMDGPUBaseInfo.h" |
| 26 | #include "llvm/ADT/ArrayRef.h" |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 27 | #include "llvm/Analysis/TargetTransformInfo.h" |
| 28 | #include "llvm/CodeGen/BasicTTIImpl.h" |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 29 | #include "llvm/IR/Function.h" |
| 30 | #include "llvm/MC/SubtargetFeature.h" |
| 31 | #include "llvm/Support/MathExtras.h" |
| 32 | #include <cassert> |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 33 | |
| 34 | namespace llvm { |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 35 | |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 36 | class AMDGPUTargetLowering; |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 37 | class Loop; |
| 38 | class ScalarEvolution; |
| 39 | class Type; |
| 40 | class Value; |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 41 | |
Matt Arsenault | 6b6a2c3 | 2016-03-11 08:00:27 +0000 | [diff] [blame] | 42 | class AMDGPUTTIImpl final : public BasicTTIImplBase<AMDGPUTTIImpl> { |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 43 | using BaseT = BasicTTIImplBase<AMDGPUTTIImpl>; |
| 44 | using TTI = TargetTransformInfo; |
| 45 | |
Chandler Carruth | c340ca8 | 2015-02-01 14:01:15 +0000 | [diff] [blame] | 46 | friend BaseT; |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 47 | |
| 48 | const AMDGPUSubtarget *ST; |
Chandler Carruth | c340ca8 | 2015-02-01 14:01:15 +0000 | [diff] [blame] | 49 | const AMDGPUTargetLowering *TLI; |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 50 | bool IsGraphicsShader; |
Chandler Carruth | c340ca8 | 2015-02-01 14:01:15 +0000 | [diff] [blame] | 51 | |
Matt Arsenault | aac47c1 | 2017-08-07 17:08:44 +0000 | [diff] [blame] | 52 | const FeatureBitset InlineFeatureIgnoreList = { |
| 53 | // Codegen control options which don't matter. |
| 54 | AMDGPU::FeatureEnableLoadStoreOpt, |
| 55 | AMDGPU::FeatureEnableSIScheduler, |
| 56 | AMDGPU::FeatureEnableUnsafeDSOffsetFolding, |
| 57 | AMDGPU::FeatureFlatForGlobal, |
| 58 | AMDGPU::FeaturePromoteAlloca, |
| 59 | AMDGPU::FeatureUnalignedBufferAccess, |
| 60 | AMDGPU::FeatureUnalignedScratchAccess, |
| 61 | |
| 62 | AMDGPU::FeatureAutoWaitcntBeforeBarrier, |
| 63 | AMDGPU::FeatureDebuggerEmitPrologue, |
| 64 | AMDGPU::FeatureDebuggerInsertNops, |
| 65 | AMDGPU::FeatureDebuggerReserveRegs, |
| 66 | |
| 67 | // Property of the kernel/environment which can't actually differ. |
| 68 | AMDGPU::FeatureSGPRInitBug, |
| 69 | AMDGPU::FeatureXNACK, |
| 70 | AMDGPU::FeatureTrapHandler, |
| 71 | |
| 72 | // Perf-tuning features |
| 73 | AMDGPU::FeatureFastFMAF32, |
| 74 | AMDGPU::HalfRate64Ops |
| 75 | }; |
| 76 | |
Chandler Carruth | c956ab66 | 2015-02-01 14:22:17 +0000 | [diff] [blame] | 77 | const AMDGPUSubtarget *getST() const { return ST; } |
Chandler Carruth | c340ca8 | 2015-02-01 14:01:15 +0000 | [diff] [blame] | 78 | const AMDGPUTargetLowering *getTLI() const { return TLI; } |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 79 | |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 80 | static inline int getFullRateInstrCost() { |
| 81 | return TargetTransformInfo::TCC_Basic; |
| 82 | } |
| 83 | |
| 84 | static inline int getHalfRateInstrCost() { |
| 85 | return 2 * TargetTransformInfo::TCC_Basic; |
| 86 | } |
| 87 | |
| 88 | // TODO: The size is usually 8 bytes, but takes 4x as many cycles. Maybe |
| 89 | // should be 2 or 4. |
| 90 | static inline int getQuarterRateInstrCost() { |
| 91 | return 3 * TargetTransformInfo::TCC_Basic; |
| 92 | } |
| 93 | |
| 94 | // On some parts, normal fp64 operations are half rate, and others |
| 95 | // quarter. This also applies to some integer operations. |
| 96 | inline int get64BitInstrCost() const { |
| 97 | return ST->hasHalfRate64Ops() ? |
| 98 | getHalfRateInstrCost() : getQuarterRateInstrCost(); |
| 99 | } |
| 100 | |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 101 | public: |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 102 | explicit AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F) |
| 103 | : BaseT(TM, F.getParent()->getDataLayout()), |
| 104 | ST(TM->getSubtargetImpl(F)), |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 105 | TLI(ST->getTargetLowering()), |
| 106 | IsGraphicsShader(AMDGPU::isShader(F.getCallingConv())) {} |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 107 | |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 108 | bool hasBranchDivergence() { return true; } |
| 109 | |
Geoff Berry | 66d9bdb | 2017-06-28 15:53:17 +0000 | [diff] [blame] | 110 | void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, |
| 111 | TTI::UnrollingPreferences &UP); |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 112 | |
| 113 | TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) { |
| 114 | assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); |
Matt Arsenault | 1735da4 | 2016-05-18 16:10:19 +0000 | [diff] [blame] | 115 | return TTI::PSK_FastHardware; |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 116 | } |
| 117 | |
Matt Arsenault | 67cd347 | 2017-06-20 20:38:06 +0000 | [diff] [blame] | 118 | unsigned getHardwareNumberOfRegisters(bool Vector) const; |
| 119 | unsigned getNumberOfRegisters(bool Vector) const; |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 120 | unsigned getRegisterBitWidth(bool Vector) const; |
Matt Arsenault | 67cd347 | 2017-06-20 20:38:06 +0000 | [diff] [blame] | 121 | unsigned getMinVectorRegisterBitWidth() const; |
Farhana Aleen | 8919664 | 2018-03-07 17:09:18 +0000 | [diff] [blame] | 122 | unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, |
| 123 | unsigned ChainSizeInBytes, |
| 124 | VectorType *VecTy) const; |
| 125 | unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, |
| 126 | unsigned ChainSizeInBytes, |
| 127 | VectorType *VecTy) const; |
Volkan Keles | 1c38681 | 2016-10-03 10:31:34 +0000 | [diff] [blame] | 128 | unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const; |
Matt Arsenault | f0a88db | 2017-02-23 03:58:53 +0000 | [diff] [blame] | 129 | |
| 130 | bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, |
| 131 | unsigned Alignment, |
| 132 | unsigned AddrSpace) const; |
| 133 | bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, |
| 134 | unsigned Alignment, |
| 135 | unsigned AddrSpace) const; |
| 136 | bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, |
| 137 | unsigned Alignment, |
| 138 | unsigned AddrSpace) const; |
| 139 | |
Wei Mi | 062c744 | 2015-05-06 17:12:25 +0000 | [diff] [blame] | 140 | unsigned getMaxInterleaveFactor(unsigned VF); |
Matt Arsenault | e830f54 | 2015-12-01 19:08:39 +0000 | [diff] [blame] | 141 | |
Matt Arsenault | 3e268cc | 2017-12-11 21:38:43 +0000 | [diff] [blame] | 142 | bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const; |
| 143 | |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 144 | int getArithmeticInstrCost( |
| 145 | unsigned Opcode, Type *Ty, |
| 146 | TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, |
| 147 | TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, |
| 148 | TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, |
Mohammed Agabaria | 2c96c43 | 2017-01-11 08:23:37 +0000 | [diff] [blame] | 149 | TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, |
| 150 | ArrayRef<const Value *> Args = ArrayRef<const Value *>()); |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 151 | |
Matt Arsenault | e05ff15 | 2015-12-16 18:37:19 +0000 | [diff] [blame] | 152 | unsigned getCFInstrCost(unsigned Opcode); |
| 153 | |
Matt Arsenault | e830f54 | 2015-12-01 19:08:39 +0000 | [diff] [blame] | 154 | int getVectorInstrCost(unsigned Opcode, Type *ValTy, unsigned Index); |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 155 | bool isSourceOfDivergence(const Value *V) const; |
Alexander Timofeev | 0f9c84c | 2017-06-15 19:33:10 +0000 | [diff] [blame] | 156 | bool isAlwaysUniform(const Value *V) const; |
Michael Kuperstein | aa71bdd | 2016-07-06 17:30:56 +0000 | [diff] [blame] | 157 | |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 158 | unsigned getFlatAddressSpace() const { |
| 159 | // Don't bother running InferAddressSpaces pass on graphics shaders which |
| 160 | // don't use flat addressing. |
| 161 | if (IsGraphicsShader) |
| 162 | return -1; |
Matt Arsenault | 1575cb8 | 2017-01-31 23:48:37 +0000 | [diff] [blame] | 163 | return ST->hasFlatAddressSpace() ? |
Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 164 | ST->getAMDGPUAS().FLAT_ADDRESS : ST->getAMDGPUAS().UNKNOWN_ADDRESS_SPACE; |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 165 | } |
| 166 | |
Michael Kuperstein | aa71bdd | 2016-07-06 17:30:56 +0000 | [diff] [blame] | 167 | unsigned getVectorSplitCost() { return 0; } |
Matt Arsenault | 3c5e423 | 2017-05-10 21:29:33 +0000 | [diff] [blame] | 168 | |
| 169 | unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, |
| 170 | Type *SubTp); |
Matt Arsenault | aac47c1 | 2017-08-07 17:08:44 +0000 | [diff] [blame] | 171 | |
| 172 | bool areInlineCompatible(const Function *Caller, |
| 173 | const Function *Callee) const; |
Stanislav Mekhanoshin | 5670e6d | 2017-09-20 04:25:58 +0000 | [diff] [blame] | 174 | |
| 175 | unsigned getInliningThresholdMultiplier() { return 9; } |
Farhana Aleen | e2dfe8a | 2018-05-01 21:41:12 +0000 | [diff] [blame] | 176 | |
| 177 | int getArithmeticReductionCost(unsigned Opcode, |
| 178 | Type *Ty, |
| 179 | bool IsPairwise); |
Farhana Aleen | e24f3ff | 2018-05-09 21:18:34 +0000 | [diff] [blame] | 180 | int getMinMaxReductionCost(Type *Ty, Type *CondTy, |
| 181 | bool IsPairwiseForm, |
| 182 | bool IsUnsigned); |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 183 | }; |
| 184 | |
| 185 | } // end namespace llvm |
| 186 | |
Eugene Zelenko | d16eff8 | 2017-08-08 23:53:55 +0000 | [diff] [blame] | 187 | #endif // LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H |