Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 1 | //===-- AMDGPUTargetTransformInfo.h - AMDGPU specific TTI -------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | /// \file |
| 10 | /// This file a TargetTransformInfo::Concept conforming object specific to the |
| 11 | /// AMDGPU target machine. It uses the target's detailed information to |
| 12 | /// provide more precise answers to certain TTI queries, while letting the |
| 13 | /// target independent and default TTI implementations handle the rest. |
| 14 | /// |
| 15 | //===----------------------------------------------------------------------===// |
| 16 | |
Matt Arsenault | 6b6a2c3 | 2016-03-11 08:00:27 +0000 | [diff] [blame] | 17 | #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H |
| 18 | #define LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 19 | |
| 20 | #include "AMDGPU.h" |
| 21 | #include "AMDGPUTargetMachine.h" |
| 22 | #include "llvm/Analysis/TargetTransformInfo.h" |
| 23 | #include "llvm/CodeGen/BasicTTIImpl.h" |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 24 | |
| 25 | namespace llvm { |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 26 | class AMDGPUTargetLowering; |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 27 | |
Matt Arsenault | 6b6a2c3 | 2016-03-11 08:00:27 +0000 | [diff] [blame] | 28 | class AMDGPUTTIImpl final : public BasicTTIImplBase<AMDGPUTTIImpl> { |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 29 | typedef BasicTTIImplBase<AMDGPUTTIImpl> BaseT; |
| 30 | typedef TargetTransformInfo TTI; |
Chandler Carruth | c340ca8 | 2015-02-01 14:01:15 +0000 | [diff] [blame] | 31 | friend BaseT; |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 32 | |
| 33 | const AMDGPUSubtarget *ST; |
Chandler Carruth | c340ca8 | 2015-02-01 14:01:15 +0000 | [diff] [blame] | 34 | const AMDGPUTargetLowering *TLI; |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 35 | bool IsGraphicsShader; |
Chandler Carruth | c340ca8 | 2015-02-01 14:01:15 +0000 | [diff] [blame] | 36 | |
Matt Arsenault | aac47c1 | 2017-08-07 17:08:44 +0000 | [diff] [blame^] | 37 | |
| 38 | const FeatureBitset InlineFeatureIgnoreList = { |
| 39 | // Codegen control options which don't matter. |
| 40 | AMDGPU::FeatureEnableLoadStoreOpt, |
| 41 | AMDGPU::FeatureEnableSIScheduler, |
| 42 | AMDGPU::FeatureEnableUnsafeDSOffsetFolding, |
| 43 | AMDGPU::FeatureFlatForGlobal, |
| 44 | AMDGPU::FeaturePromoteAlloca, |
| 45 | AMDGPU::FeatureUnalignedBufferAccess, |
| 46 | AMDGPU::FeatureUnalignedScratchAccess, |
| 47 | |
| 48 | AMDGPU::FeatureAutoWaitcntBeforeBarrier, |
| 49 | AMDGPU::FeatureDebuggerEmitPrologue, |
| 50 | AMDGPU::FeatureDebuggerInsertNops, |
| 51 | AMDGPU::FeatureDebuggerReserveRegs, |
| 52 | |
| 53 | // Property of the kernel/environment which can't actually differ. |
| 54 | AMDGPU::FeatureSGPRInitBug, |
| 55 | AMDGPU::FeatureXNACK, |
| 56 | AMDGPU::FeatureTrapHandler, |
| 57 | |
| 58 | // Perf-tuning features |
| 59 | AMDGPU::FeatureFastFMAF32, |
| 60 | AMDGPU::HalfRate64Ops |
| 61 | }; |
| 62 | |
Chandler Carruth | c956ab66 | 2015-02-01 14:22:17 +0000 | [diff] [blame] | 63 | const AMDGPUSubtarget *getST() const { return ST; } |
Chandler Carruth | c340ca8 | 2015-02-01 14:01:15 +0000 | [diff] [blame] | 64 | const AMDGPUTargetLowering *getTLI() const { return TLI; } |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 65 | |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 66 | |
| 67 | static inline int getFullRateInstrCost() { |
| 68 | return TargetTransformInfo::TCC_Basic; |
| 69 | } |
| 70 | |
| 71 | static inline int getHalfRateInstrCost() { |
| 72 | return 2 * TargetTransformInfo::TCC_Basic; |
| 73 | } |
| 74 | |
| 75 | // TODO: The size is usually 8 bytes, but takes 4x as many cycles. Maybe |
| 76 | // should be 2 or 4. |
| 77 | static inline int getQuarterRateInstrCost() { |
| 78 | return 3 * TargetTransformInfo::TCC_Basic; |
| 79 | } |
| 80 | |
| 81 | // On some parts, normal fp64 operations are half rate, and others |
| 82 | // quarter. This also applies to some integer operations. |
| 83 | inline int get64BitInstrCost() const { |
| 84 | return ST->hasHalfRate64Ops() ? |
| 85 | getHalfRateInstrCost() : getQuarterRateInstrCost(); |
| 86 | } |
| 87 | |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 88 | public: |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 89 | explicit AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F) |
| 90 | : BaseT(TM, F.getParent()->getDataLayout()), |
| 91 | ST(TM->getSubtargetImpl(F)), |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 92 | TLI(ST->getTargetLowering()), |
| 93 | IsGraphicsShader(AMDGPU::isShader(F.getCallingConv())) {} |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 94 | |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 95 | bool hasBranchDivergence() { return true; } |
| 96 | |
Geoff Berry | 66d9bdb | 2017-06-28 15:53:17 +0000 | [diff] [blame] | 97 | void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, |
| 98 | TTI::UnrollingPreferences &UP); |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 99 | |
| 100 | TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) { |
| 101 | assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); |
Matt Arsenault | 1735da4 | 2016-05-18 16:10:19 +0000 | [diff] [blame] | 102 | return TTI::PSK_FastHardware; |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 103 | } |
| 104 | |
Matt Arsenault | 67cd347 | 2017-06-20 20:38:06 +0000 | [diff] [blame] | 105 | unsigned getHardwareNumberOfRegisters(bool Vector) const; |
| 106 | unsigned getNumberOfRegisters(bool Vector) const; |
| 107 | unsigned getRegisterBitWidth(bool Vector) const ; |
| 108 | unsigned getMinVectorRegisterBitWidth() const; |
Volkan Keles | 1c38681 | 2016-10-03 10:31:34 +0000 | [diff] [blame] | 109 | unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const; |
Matt Arsenault | f0a88db | 2017-02-23 03:58:53 +0000 | [diff] [blame] | 110 | |
| 111 | bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, |
| 112 | unsigned Alignment, |
| 113 | unsigned AddrSpace) const; |
| 114 | bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, |
| 115 | unsigned Alignment, |
| 116 | unsigned AddrSpace) const; |
| 117 | bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, |
| 118 | unsigned Alignment, |
| 119 | unsigned AddrSpace) const; |
| 120 | |
Wei Mi | 062c744 | 2015-05-06 17:12:25 +0000 | [diff] [blame] | 121 | unsigned getMaxInterleaveFactor(unsigned VF); |
Matt Arsenault | e830f54 | 2015-12-01 19:08:39 +0000 | [diff] [blame] | 122 | |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 123 | int getArithmeticInstrCost( |
| 124 | unsigned Opcode, Type *Ty, |
| 125 | TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, |
| 126 | TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, |
| 127 | TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, |
Mohammed Agabaria | 2c96c43 | 2017-01-11 08:23:37 +0000 | [diff] [blame] | 128 | TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, |
| 129 | ArrayRef<const Value *> Args = ArrayRef<const Value *>()); |
Matt Arsenault | 9651813 | 2016-03-25 01:00:32 +0000 | [diff] [blame] | 130 | |
Matt Arsenault | e05ff15 | 2015-12-16 18:37:19 +0000 | [diff] [blame] | 131 | unsigned getCFInstrCost(unsigned Opcode); |
| 132 | |
Matt Arsenault | e830f54 | 2015-12-01 19:08:39 +0000 | [diff] [blame] | 133 | int getVectorInstrCost(unsigned Opcode, Type *ValTy, unsigned Index); |
Tom Stellard | dbe374b | 2015-12-15 18:04:38 +0000 | [diff] [blame] | 134 | bool isSourceOfDivergence(const Value *V) const; |
Alexander Timofeev | 0f9c84c | 2017-06-15 19:33:10 +0000 | [diff] [blame] | 135 | bool isAlwaysUniform(const Value *V) const; |
Michael Kuperstein | aa71bdd | 2016-07-06 17:30:56 +0000 | [diff] [blame] | 136 | |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 137 | unsigned getFlatAddressSpace() const { |
| 138 | // Don't bother running InferAddressSpaces pass on graphics shaders which |
| 139 | // don't use flat addressing. |
| 140 | if (IsGraphicsShader) |
| 141 | return -1; |
Matt Arsenault | 1575cb8 | 2017-01-31 23:48:37 +0000 | [diff] [blame] | 142 | return ST->hasFlatAddressSpace() ? |
Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 143 | ST->getAMDGPUAS().FLAT_ADDRESS : ST->getAMDGPUAS().UNKNOWN_ADDRESS_SPACE; |
Matt Arsenault | b6491cc | 2017-01-31 01:20:54 +0000 | [diff] [blame] | 144 | } |
| 145 | |
Michael Kuperstein | aa71bdd | 2016-07-06 17:30:56 +0000 | [diff] [blame] | 146 | unsigned getVectorSplitCost() { return 0; } |
Matt Arsenault | 3c5e423 | 2017-05-10 21:29:33 +0000 | [diff] [blame] | 147 | |
| 148 | unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, |
| 149 | Type *SubTp); |
Matt Arsenault | aac47c1 | 2017-08-07 17:08:44 +0000 | [diff] [blame^] | 150 | |
| 151 | bool areInlineCompatible(const Function *Caller, |
| 152 | const Function *Callee) const; |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 153 | }; |
| 154 | |
| 155 | } // end namespace llvm |
| 156 | |
| 157 | #endif |