Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 1 | //===-- AMDGPUSubtarget.cpp - AMDGPU Subtarget Information ----------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 11 | /// Implements the AMDGPU specific subclass of TargetSubtarget. |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "AMDGPUSubtarget.h" |
Quentin Colombet | f3f7d4d | 2017-07-05 18:40:56 +0000 | [diff] [blame] | 16 | #include "AMDGPU.h" |
| 17 | #include "AMDGPUTargetMachine.h" |
Quentin Colombet | f3f7d4d | 2017-07-05 18:40:56 +0000 | [diff] [blame] | 18 | #include "AMDGPUCallLowering.h" |
| 19 | #include "AMDGPUInstructionSelector.h" |
| 20 | #include "AMDGPULegalizerInfo.h" |
| 21 | #include "AMDGPURegisterBankInfo.h" |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 22 | #include "SIMachineFunctionInfo.h" |
Tom Stellard | 44b30b4 | 2018-05-22 02:03:23 +0000 | [diff] [blame] | 23 | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
Matt Arsenault | d9a23ab | 2014-07-13 02:08:26 +0000 | [diff] [blame] | 24 | #include "llvm/ADT/SmallString.h" |
Tom Stellard | 83f0bce | 2015-01-29 16:55:25 +0000 | [diff] [blame] | 25 | #include "llvm/CodeGen/MachineScheduler.h" |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 26 | #include "llvm/MC/MCSubtargetInfo.h" |
Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 27 | #include "llvm/IR/MDBuilder.h" |
David Blaikie | 1be62f0 | 2017-11-03 22:32:11 +0000 | [diff] [blame] | 28 | #include "llvm/CodeGen/TargetFrameLowering.h" |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 29 | #include <algorithm> |
Matt Arsenault | d9a23ab | 2014-07-13 02:08:26 +0000 | [diff] [blame] | 30 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 31 | using namespace llvm; |
| 32 | |
Chandler Carruth | e96dd89 | 2014-04-21 22:55:11 +0000 | [diff] [blame] | 33 | #define DEBUG_TYPE "amdgpu-subtarget" |
| 34 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 35 | #define GET_SUBTARGETINFO_TARGET_DESC |
| 36 | #define GET_SUBTARGETINFO_CTOR |
| 37 | #include "AMDGPUGenSubtargetInfo.inc" |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 38 | #define GET_SUBTARGETINFO_TARGET_DESC |
| 39 | #define GET_SUBTARGETINFO_CTOR |
| 40 | #include "R600GenSubtargetInfo.inc" |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 41 | |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 42 | AMDGPUSubtarget::~AMDGPUSubtarget() = default; |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 43 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 44 | R600Subtarget & |
| 45 | R600Subtarget::initializeSubtargetDependencies(const Triple &TT, |
| 46 | StringRef GPU, StringRef FS) { |
| 47 | SmallString<256> FullFS("+promote-alloca,+dx10-clamp,"); |
| 48 | FullFS += FS; |
| 49 | ParseSubtargetFeatures(GPU, FullFS); |
| 50 | |
| 51 | // FIXME: I don't think think Evergreen has any useful support for |
| 52 | // denormals, but should be checked. Should we issue a warning somewhere |
| 53 | // if someone tries to enable these? |
| 54 | if (getGeneration() <= R600Subtarget::NORTHERN_ISLANDS) { |
| 55 | FP32Denormals = false; |
| 56 | } |
| 57 | |
| 58 | HasMulU24 = getGeneration() >= EVERGREEN; |
| 59 | HasMulI24 = hasCaymanISA(); |
| 60 | |
| 61 | return *this; |
| 62 | } |
| 63 | |
Eric Christopher | ac4b69e | 2014-07-25 22:22:39 +0000 | [diff] [blame] | 64 | AMDGPUSubtarget & |
Daniel Sanders | a73f1fd | 2015-06-10 12:11:26 +0000 | [diff] [blame] | 65 | AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT, |
| 66 | StringRef GPU, StringRef FS) { |
Eric Christopher | ac4b69e | 2014-07-25 22:22:39 +0000 | [diff] [blame] | 67 | // Determine default and user-specified characteristics |
Matt Arsenault | f171cf2 | 2014-07-14 23:40:49 +0000 | [diff] [blame] | 68 | // On SI+, we want FP64 denormals to be on by default. FP32 denormals can be |
| 69 | // enabled, but some instructions do not respect them and they run at the |
| 70 | // double precision rate, so don't enable by default. |
| 71 | // |
| 72 | // We want to be able to turn these off, but making this a subtarget feature |
| 73 | // for SI has the unhelpful behavior that it unsets everything else if you |
| 74 | // disable it. |
Matt Arsenault | d9a23ab | 2014-07-13 02:08:26 +0000 | [diff] [blame] | 75 | |
Jan Vesely | d1c9b61 | 2017-12-04 22:57:29 +0000 | [diff] [blame] | 76 | SmallString<256> FullFS("+promote-alloca,+dx10-clamp,+load-store-opt,"); |
| 77 | |
Changpeng Fang | b41574a | 2015-12-22 20:55:23 +0000 | [diff] [blame] | 78 | if (isAmdHsaOS()) // Turn on FlatForGlobal for HSA. |
Matt Arsenault | 8728c5f | 2017-08-07 14:58:04 +0000 | [diff] [blame] | 79 | FullFS += "+flat-address-space,+flat-for-global,+unaligned-buffer-access,+trap-handler,"; |
Matt Arsenault | a6867fd | 2017-01-23 22:31:03 +0000 | [diff] [blame] | 80 | |
Jan Vesely | d1c9b61 | 2017-12-04 22:57:29 +0000 | [diff] [blame] | 81 | // FIXME: I don't think think Evergreen has any useful support for |
| 82 | // denormals, but should be checked. Should we issue a warning somewhere |
| 83 | // if someone tries to enable these? |
| 84 | if (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) { |
| 85 | FullFS += "+fp64-fp16-denormals,"; |
| 86 | } else { |
| 87 | FullFS += "-fp32-denormals,"; |
| 88 | } |
| 89 | |
Matt Arsenault | d9a23ab | 2014-07-13 02:08:26 +0000 | [diff] [blame] | 90 | FullFS += FS; |
| 91 | |
| 92 | ParseSubtargetFeatures(GPU, FullFS); |
Tom Stellard | 2e59a45 | 2014-06-13 01:32:00 +0000 | [diff] [blame] | 93 | |
Jan Vesely | d1c9b61 | 2017-12-04 22:57:29 +0000 | [diff] [blame] | 94 | // We don't support FP64 for EG/NI atm. |
| 95 | assert(!hasFP64() || (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)); |
| 96 | |
Matt Arsenault | d8f7ea3 | 2017-01-27 17:42:26 +0000 | [diff] [blame] | 97 | // Unless +-flat-for-global is specified, turn on FlatForGlobal for all OS-es |
| 98 | // on VI and newer hardware to avoid assertion failures due to missing ADDR64 |
| 99 | // variants of MUBUF instructions. |
| 100 | if (!hasAddr64() && !FS.contains("flat-for-global")) { |
| 101 | FlatForGlobal = true; |
| 102 | } |
| 103 | |
Matt Arsenault | 24ee078 | 2016-02-12 02:40:47 +0000 | [diff] [blame] | 104 | // Set defaults if needed. |
| 105 | if (MaxPrivateElementSize == 0) |
Matt Arsenault | e8ed8e5 | 2016-05-11 00:28:54 +0000 | [diff] [blame] | 106 | MaxPrivateElementSize = 4; |
Matt Arsenault | 24ee078 | 2016-02-12 02:40:47 +0000 | [diff] [blame] | 107 | |
Matt Arsenault | 8728c5f | 2017-08-07 14:58:04 +0000 | [diff] [blame] | 108 | if (LDSBankCount == 0) |
| 109 | LDSBankCount = 32; |
| 110 | |
| 111 | if (TT.getArch() == Triple::amdgcn) { |
| 112 | if (LocalMemorySize == 0) |
| 113 | LocalMemorySize = 32768; |
| 114 | |
| 115 | // Do something sensible for unspecified target. |
| 116 | if (!HasMovrel && !HasVGPRIndexMode) |
| 117 | HasMovrel = true; |
| 118 | } |
| 119 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 120 | HasFminFmaxLegacy = getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS; |
| 121 | |
Eric Christopher | ac4b69e | 2014-07-25 22:22:39 +0000 | [diff] [blame] | 122 | return *this; |
| 123 | } |
| 124 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 125 | AMDGPUCommonSubtarget::AMDGPUCommonSubtarget(const Triple &TT, |
| 126 | const FeatureBitset &FeatureBits) : |
| 127 | TargetTriple(TT), |
| 128 | SubtargetFeatureBits(FeatureBits), |
| 129 | Has16BitInsts(false), |
| 130 | HasMadMixInsts(false), |
| 131 | FP32Denormals(false), |
| 132 | FPExceptions(false), |
| 133 | HasSDWA(false), |
| 134 | HasVOP3PInsts(false), |
| 135 | HasMulI24(true), |
| 136 | HasMulU24(true), |
| 137 | HasFminFmaxLegacy(true), |
| 138 | EnablePromoteAlloca(false), |
| 139 | LocalMemorySize(0), |
| 140 | WavefrontSize(0) |
| 141 | { } |
| 142 | |
Daniel Sanders | a73f1fd | 2015-06-10 12:11:26 +0000 | [diff] [blame] | 143 | AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS, |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 144 | const TargetMachine &TM) : |
| 145 | AMDGPUGenSubtargetInfo(TT, GPU, FS), |
| 146 | AMDGPUCommonSubtarget(TT, getFeatureBits()), |
| 147 | FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 148 | TargetTriple(TT), |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 149 | Gen(SOUTHERN_ISLANDS), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 150 | IsaVersion(ISAVersion0_0_0), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 151 | LDSBankCount(0), |
| 152 | MaxPrivateElementSize(0), |
Tom Stellard | 40ce8af | 2015-01-28 16:04:26 +0000 | [diff] [blame] | 153 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 154 | FastFMAF32(false), |
| 155 | HalfRate64Ops(false), |
| 156 | |
Matt Arsenault | a6867fd | 2017-01-23 22:31:03 +0000 | [diff] [blame] | 157 | FP64FP16Denormals(false), |
Matt Arsenault | 2fdf2a1 | 2017-02-21 23:35:48 +0000 | [diff] [blame] | 158 | DX10Clamp(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 159 | FlatForGlobal(false), |
Konstantin Zhuravlyov | be6c0ca | 2017-06-02 17:40:26 +0000 | [diff] [blame] | 160 | AutoWaitcntBeforeBarrier(false), |
Konstantin Zhuravlyov | eda425e | 2017-10-14 15:59:07 +0000 | [diff] [blame] | 161 | CodeObjectV3(false), |
Tom Stellard | 64a9d08 | 2016-10-14 18:10:39 +0000 | [diff] [blame] | 162 | UnalignedScratchAccess(false), |
Matt Arsenault | 7f681ac | 2016-07-01 23:03:44 +0000 | [diff] [blame] | 163 | UnalignedBufferAccess(false), |
| 164 | |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 165 | HasApertureRegs(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 166 | EnableXNACK(false), |
Wei Ding | 205bfdb | 2017-02-10 02:15:29 +0000 | [diff] [blame] | 167 | TrapHandler(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 168 | DebuggerInsertNops(false), |
Konstantin Zhuravlyov | f2f3d14 | 2016-06-25 03:11:28 +0000 | [diff] [blame] | 169 | DebuggerEmitPrologue(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 170 | |
Matt Arsenault | 45b9818 | 2017-11-15 00:45:43 +0000 | [diff] [blame] | 171 | EnableHugePrivateBuffer(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 172 | EnableVGPRSpilling(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 173 | EnableLoadStoreOpt(false), |
| 174 | EnableUnsafeDSOffsetFolding(false), |
| 175 | EnableSIScheduler(false), |
Marek Olsak | a9a58fa | 2018-04-10 22:48:23 +0000 | [diff] [blame] | 176 | EnableDS128(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 177 | DumpCode(false), |
| 178 | |
| 179 | FP64(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 180 | GCN3Encoding(false), |
| 181 | CIInsts(false), |
Matt Arsenault | 2021f08 | 2017-02-18 19:12:26 +0000 | [diff] [blame] | 182 | GFX9Insts(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 183 | SGPRInitBug(false), |
| 184 | HasSMemRealTime(false), |
Dmitry Preobrazhensky | ff64aa5 | 2017-08-16 13:51:56 +0000 | [diff] [blame] | 185 | HasIntClamp(false), |
Matt Arsenault | 0084adc | 2018-04-30 19:08:16 +0000 | [diff] [blame] | 186 | HasFmaMixInsts(false), |
Matt Arsenault | cc88ce3 | 2016-10-12 18:00:51 +0000 | [diff] [blame] | 187 | HasMovrel(false), |
| 188 | HasVGPRIndexMode(false), |
Matt Arsenault | c88ba36 | 2016-10-29 04:05:06 +0000 | [diff] [blame] | 189 | HasScalarStores(false), |
Dmitry Preobrazhensky | 6bad04e | 2018-04-02 16:10:25 +0000 | [diff] [blame] | 190 | HasScalarAtomics(false), |
Benjamin Kramer | 11590b8 | 2017-01-20 10:37:53 +0000 | [diff] [blame] | 191 | HasInv2PiInlineImm(false), |
Sam Kolton | 3c4933f | 2017-06-22 06:26:41 +0000 | [diff] [blame] | 192 | HasSDWAOmod(false), |
| 193 | HasSDWAScalar(false), |
| 194 | HasSDWASdst(false), |
| 195 | HasSDWAMac(false), |
Sam Kolton | a179d25 | 2017-06-27 15:02:23 +0000 | [diff] [blame] | 196 | HasSDWAOutModsVOPC(false), |
Sam Kolton | 07dbde2 | 2017-01-20 10:01:25 +0000 | [diff] [blame] | 197 | HasDPP(false), |
Matt Arsenault | 0084adc | 2018-04-30 19:08:16 +0000 | [diff] [blame] | 198 | HasDLInsts(false), |
Konstantin Zhuravlyov | c2c2eb7 | 2018-05-04 20:06:57 +0000 | [diff] [blame] | 199 | D16PreservesUnusedBits(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 200 | FlatAddressSpace(false), |
Matt Arsenault | acdc765 | 2017-05-10 21:19:05 +0000 | [diff] [blame] | 201 | FlatInstOffsets(false), |
| 202 | FlatGlobalInsts(false), |
| 203 | FlatScratchInsts(false), |
Matt Arsenault | c37fe66 | 2017-07-20 17:42:47 +0000 | [diff] [blame] | 204 | AddNoCarryInsts(false), |
Changpeng Fang | 44dfa1d | 2018-01-12 21:12:19 +0000 | [diff] [blame] | 205 | HasUnpackedD16VMem(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 206 | |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 207 | ScalarizeGlobal(false), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 208 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 209 | FeatureDisable(false) { |
Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 210 | AS = AMDGPU::getAMDGPUAS(TT); |
Tom Stellard | 40ce8af | 2015-01-28 16:04:26 +0000 | [diff] [blame] | 211 | initializeSubtargetDependencies(TT, GPU, FS); |
Tom Stellard | a40f971 | 2014-01-22 21:55:43 +0000 | [diff] [blame] | 212 | } |
Tom Stellard | b8fd6ef | 2014-12-02 22:00:07 +0000 | [diff] [blame] | 213 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 214 | unsigned AMDGPUCommonSubtarget::getMaxLocalMemSizeWithWaveCount(unsigned NWaves, |
Stanislav Mekhanoshin | 2b913b1 | 2017-02-01 22:59:50 +0000 | [diff] [blame] | 215 | const Function &F) const { |
| 216 | if (NWaves == 1) |
Matt Arsenault | 8a028bf | 2016-05-16 21:19:59 +0000 | [diff] [blame] | 217 | return getLocalMemorySize(); |
Stanislav Mekhanoshin | 2b913b1 | 2017-02-01 22:59:50 +0000 | [diff] [blame] | 218 | unsigned WorkGroupSize = getFlatWorkGroupSizes(F).second; |
| 219 | unsigned WorkGroupsPerCu = getMaxWorkGroupsPerCU(WorkGroupSize); |
| 220 | unsigned MaxWaves = getMaxWavesPerEU(); |
| 221 | return getLocalMemorySize() * MaxWaves / WorkGroupsPerCu / NWaves; |
Matt Arsenault | 8a028bf | 2016-05-16 21:19:59 +0000 | [diff] [blame] | 222 | } |
| 223 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 224 | unsigned AMDGPUCommonSubtarget::getOccupancyWithLocalMemSize(uint32_t Bytes, |
Stanislav Mekhanoshin | 2b913b1 | 2017-02-01 22:59:50 +0000 | [diff] [blame] | 225 | const Function &F) const { |
| 226 | unsigned WorkGroupSize = getFlatWorkGroupSizes(F).second; |
| 227 | unsigned WorkGroupsPerCu = getMaxWorkGroupsPerCU(WorkGroupSize); |
| 228 | unsigned MaxWaves = getMaxWavesPerEU(); |
| 229 | unsigned Limit = getLocalMemorySize() * MaxWaves / WorkGroupsPerCu; |
| 230 | unsigned NumWaves = Limit / (Bytes ? Bytes : 1u); |
| 231 | NumWaves = std::min(NumWaves, MaxWaves); |
| 232 | NumWaves = std::max(NumWaves, 1u); |
| 233 | return NumWaves; |
Matt Arsenault | 8a028bf | 2016-05-16 21:19:59 +0000 | [diff] [blame] | 234 | } |
| 235 | |
Tom Stellard | 44b30b4 | 2018-05-22 02:03:23 +0000 | [diff] [blame] | 236 | unsigned |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 237 | AMDGPUCommonSubtarget::getOccupancyWithLocalMemSize(const MachineFunction &MF) const { |
Tom Stellard | 44b30b4 | 2018-05-22 02:03:23 +0000 | [diff] [blame] | 238 | const auto *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 239 | return getOccupancyWithLocalMemSize(MFI->getLDSSize(), MF.getFunction()); |
| 240 | } |
| 241 | |
Matt Arsenault | b791802 | 2017-10-23 17:09:35 +0000 | [diff] [blame] | 242 | std::pair<unsigned, unsigned> |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 243 | AMDGPUCommonSubtarget::getDefaultFlatWorkGroupSize(CallingConv::ID CC) const { |
Matt Arsenault | b791802 | 2017-10-23 17:09:35 +0000 | [diff] [blame] | 244 | switch (CC) { |
| 245 | case CallingConv::AMDGPU_CS: |
| 246 | case CallingConv::AMDGPU_KERNEL: |
| 247 | case CallingConv::SPIR_KERNEL: |
| 248 | return std::make_pair(getWavefrontSize() * 2, getWavefrontSize() * 4); |
| 249 | case CallingConv::AMDGPU_VS: |
| 250 | case CallingConv::AMDGPU_LS: |
| 251 | case CallingConv::AMDGPU_HS: |
| 252 | case CallingConv::AMDGPU_ES: |
| 253 | case CallingConv::AMDGPU_GS: |
| 254 | case CallingConv::AMDGPU_PS: |
| 255 | return std::make_pair(1, getWavefrontSize()); |
| 256 | default: |
| 257 | return std::make_pair(1, 16 * getWavefrontSize()); |
| 258 | } |
| 259 | } |
| 260 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 261 | std::pair<unsigned, unsigned> AMDGPUCommonSubtarget::getFlatWorkGroupSizes( |
Konstantin Zhuravlyov | 1d65026 | 2016-09-06 20:22:28 +0000 | [diff] [blame] | 262 | const Function &F) const { |
Matt Arsenault | b791802 | 2017-10-23 17:09:35 +0000 | [diff] [blame] | 263 | // FIXME: 1024 if function. |
Konstantin Zhuravlyov | 1d65026 | 2016-09-06 20:22:28 +0000 | [diff] [blame] | 264 | // Default minimum/maximum flat work group sizes. |
| 265 | std::pair<unsigned, unsigned> Default = |
Matt Arsenault | b791802 | 2017-10-23 17:09:35 +0000 | [diff] [blame] | 266 | getDefaultFlatWorkGroupSize(F.getCallingConv()); |
Konstantin Zhuravlyov | 1d65026 | 2016-09-06 20:22:28 +0000 | [diff] [blame] | 267 | |
| 268 | // TODO: Do not process "amdgpu-max-work-group-size" attribute once mesa |
| 269 | // starts using "amdgpu-flat-work-group-size" attribute. |
| 270 | Default.second = AMDGPU::getIntegerAttribute( |
| 271 | F, "amdgpu-max-work-group-size", Default.second); |
| 272 | Default.first = std::min(Default.first, Default.second); |
| 273 | |
| 274 | // Requested minimum/maximum flat work group sizes. |
| 275 | std::pair<unsigned, unsigned> Requested = AMDGPU::getIntegerPairAttribute( |
| 276 | F, "amdgpu-flat-work-group-size", Default); |
| 277 | |
| 278 | // Make sure requested minimum is less than requested maximum. |
| 279 | if (Requested.first > Requested.second) |
| 280 | return Default; |
| 281 | |
| 282 | // Make sure requested values do not violate subtarget's specifications. |
| 283 | if (Requested.first < getMinFlatWorkGroupSize()) |
| 284 | return Default; |
| 285 | if (Requested.second > getMaxFlatWorkGroupSize()) |
| 286 | return Default; |
| 287 | |
| 288 | return Requested; |
| 289 | } |
| 290 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 291 | std::pair<unsigned, unsigned> AMDGPUCommonSubtarget::getWavesPerEU( |
Konstantin Zhuravlyov | 1d65026 | 2016-09-06 20:22:28 +0000 | [diff] [blame] | 292 | const Function &F) const { |
Konstantin Zhuravlyov | 1d65026 | 2016-09-06 20:22:28 +0000 | [diff] [blame] | 293 | // Default minimum/maximum number of waves per execution unit. |
Konstantin Zhuravlyov | fd87137 | 2017-02-09 21:33:23 +0000 | [diff] [blame] | 294 | std::pair<unsigned, unsigned> Default(1, getMaxWavesPerEU()); |
Konstantin Zhuravlyov | 1d65026 | 2016-09-06 20:22:28 +0000 | [diff] [blame] | 295 | |
| 296 | // Default/requested minimum/maximum flat work group sizes. |
| 297 | std::pair<unsigned, unsigned> FlatWorkGroupSizes = getFlatWorkGroupSizes(F); |
| 298 | |
| 299 | // If minimum/maximum flat work group sizes were explicitly requested using |
| 300 | // "amdgpu-flat-work-group-size" attribute, then set default minimum/maximum |
| 301 | // number of waves per execution unit to values implied by requested |
| 302 | // minimum/maximum flat work group sizes. |
| 303 | unsigned MinImpliedByFlatWorkGroupSize = |
| 304 | getMaxWavesPerEU(FlatWorkGroupSizes.second); |
| 305 | bool RequestedFlatWorkGroupSize = false; |
| 306 | |
| 307 | // TODO: Do not process "amdgpu-max-work-group-size" attribute once mesa |
| 308 | // starts using "amdgpu-flat-work-group-size" attribute. |
| 309 | if (F.hasFnAttribute("amdgpu-max-work-group-size") || |
| 310 | F.hasFnAttribute("amdgpu-flat-work-group-size")) { |
| 311 | Default.first = MinImpliedByFlatWorkGroupSize; |
| 312 | RequestedFlatWorkGroupSize = true; |
| 313 | } |
| 314 | |
| 315 | // Requested minimum/maximum number of waves per execution unit. |
| 316 | std::pair<unsigned, unsigned> Requested = AMDGPU::getIntegerPairAttribute( |
| 317 | F, "amdgpu-waves-per-eu", Default, true); |
| 318 | |
| 319 | // Make sure requested minimum is less than requested maximum. |
| 320 | if (Requested.second && Requested.first > Requested.second) |
| 321 | return Default; |
| 322 | |
| 323 | // Make sure requested values do not violate subtarget's specifications. |
| 324 | if (Requested.first < getMinWavesPerEU() || |
| 325 | Requested.first > getMaxWavesPerEU()) |
| 326 | return Default; |
| 327 | if (Requested.second > getMaxWavesPerEU()) |
| 328 | return Default; |
| 329 | |
| 330 | // Make sure requested values are compatible with values implied by requested |
| 331 | // minimum/maximum flat work group sizes. |
| 332 | if (RequestedFlatWorkGroupSize && |
Konstantin Zhuravlyov | 2ec725c | 2017-07-16 19:38:47 +0000 | [diff] [blame] | 333 | Requested.first < MinImpliedByFlatWorkGroupSize) |
Konstantin Zhuravlyov | 1d65026 | 2016-09-06 20:22:28 +0000 | [diff] [blame] | 334 | return Default; |
| 335 | |
| 336 | return Requested; |
| 337 | } |
| 338 | |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 339 | bool AMDGPUCommonSubtarget::makeLIDRangeMetadata(Instruction *I) const { |
Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 340 | Function *Kernel = I->getParent()->getParent(); |
| 341 | unsigned MinSize = 0; |
| 342 | unsigned MaxSize = getFlatWorkGroupSizes(*Kernel).second; |
| 343 | bool IdQuery = false; |
| 344 | |
| 345 | // If reqd_work_group_size is present it narrows value down. |
| 346 | if (auto *CI = dyn_cast<CallInst>(I)) { |
| 347 | const Function *F = CI->getCalledFunction(); |
| 348 | if (F) { |
| 349 | unsigned Dim = UINT_MAX; |
| 350 | switch (F->getIntrinsicID()) { |
| 351 | case Intrinsic::amdgcn_workitem_id_x: |
| 352 | case Intrinsic::r600_read_tidig_x: |
| 353 | IdQuery = true; |
Simon Pilgrim | 0f5b350 | 2017-07-07 10:18:57 +0000 | [diff] [blame] | 354 | LLVM_FALLTHROUGH; |
Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 355 | case Intrinsic::r600_read_local_size_x: |
| 356 | Dim = 0; |
| 357 | break; |
| 358 | case Intrinsic::amdgcn_workitem_id_y: |
| 359 | case Intrinsic::r600_read_tidig_y: |
| 360 | IdQuery = true; |
Simon Pilgrim | 0f5b350 | 2017-07-07 10:18:57 +0000 | [diff] [blame] | 361 | LLVM_FALLTHROUGH; |
Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 362 | case Intrinsic::r600_read_local_size_y: |
| 363 | Dim = 1; |
| 364 | break; |
| 365 | case Intrinsic::amdgcn_workitem_id_z: |
| 366 | case Intrinsic::r600_read_tidig_z: |
| 367 | IdQuery = true; |
Simon Pilgrim | 0f5b350 | 2017-07-07 10:18:57 +0000 | [diff] [blame] | 368 | LLVM_FALLTHROUGH; |
Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 369 | case Intrinsic::r600_read_local_size_z: |
| 370 | Dim = 2; |
| 371 | break; |
| 372 | default: |
| 373 | break; |
| 374 | } |
| 375 | if (Dim <= 3) { |
| 376 | if (auto Node = Kernel->getMetadata("reqd_work_group_size")) |
| 377 | if (Node->getNumOperands() == 3) |
| 378 | MinSize = MaxSize = mdconst::extract<ConstantInt>( |
| 379 | Node->getOperand(Dim))->getZExtValue(); |
| 380 | } |
| 381 | } |
| 382 | } |
| 383 | |
| 384 | if (!MaxSize) |
| 385 | return false; |
| 386 | |
| 387 | // Range metadata is [Lo, Hi). For ID query we need to pass max size |
| 388 | // as Hi. For size query we need to pass Hi + 1. |
| 389 | if (IdQuery) |
| 390 | MinSize = 0; |
| 391 | else |
| 392 | ++MaxSize; |
| 393 | |
| 394 | MDBuilder MDB(I->getContext()); |
| 395 | MDNode *MaxWorkGroupSizeRange = MDB.createRange(APInt(32, MinSize), |
| 396 | APInt(32, MaxSize)); |
| 397 | I->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange); |
| 398 | return true; |
| 399 | } |
| 400 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 401 | R600Subtarget::R600Subtarget(const Triple &TT, StringRef GPU, StringRef FS, |
| 402 | const TargetMachine &TM) : |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 403 | R600GenSubtargetInfo(TT, GPU, FS), |
| 404 | AMDGPUCommonSubtarget(TT, getFeatureBits()), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 405 | InstrInfo(*this), |
| 406 | FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0), |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 407 | FMA(false), |
| 408 | CaymanISA(false), |
| 409 | CFALUBug(false), |
| 410 | DX10Clamp(false), |
| 411 | HasVertexCache(false), |
| 412 | R600ALUInst(false), |
| 413 | FP64(false), |
| 414 | TexVTXClauseSize(0), |
| 415 | Gen(R600), |
| 416 | TLInfo(TM, initializeSubtargetDependencies(TT, GPU, FS)), |
| 417 | InstrItins(getInstrItineraryForCPU(GPU)), |
| 418 | AS (AMDGPU::getAMDGPUAS(TT)) { } |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 419 | |
| 420 | SISubtarget::SISubtarget(const Triple &TT, StringRef GPU, StringRef FS, |
Matt Arsenault | c3fe46b | 2018-03-08 16:24:16 +0000 | [diff] [blame] | 421 | const GCNTargetMachine &TM) |
Quentin Colombet | f3f7d4d | 2017-07-05 18:40:56 +0000 | [diff] [blame] | 422 | : AMDGPUSubtarget(TT, GPU, FS, TM), InstrInfo(*this), |
| 423 | FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0), |
| 424 | TLInfo(TM, *this) { |
Quentin Colombet | 61d71a1 | 2017-08-15 22:31:51 +0000 | [diff] [blame] | 425 | CallLoweringInfo.reset(new AMDGPUCallLowering(*getTargetLowering())); |
Matt Arsenault | c3fe46b | 2018-03-08 16:24:16 +0000 | [diff] [blame] | 426 | Legalizer.reset(new AMDGPULegalizerInfo(*this, TM)); |
Quentin Colombet | f3f7d4d | 2017-07-05 18:40:56 +0000 | [diff] [blame] | 427 | |
Quentin Colombet | 61d71a1 | 2017-08-15 22:31:51 +0000 | [diff] [blame] | 428 | RegBankInfo.reset(new AMDGPURegisterBankInfo(*getRegisterInfo())); |
| 429 | InstSelector.reset(new AMDGPUInstructionSelector( |
Tom Stellard | 1dc9020 | 2018-05-10 20:53:06 +0000 | [diff] [blame] | 430 | *this, *static_cast<AMDGPURegisterBankInfo *>(RegBankInfo.get()), TM)); |
Quentin Colombet | f3f7d4d | 2017-07-05 18:40:56 +0000 | [diff] [blame] | 431 | } |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 432 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 433 | void SISubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy, |
Matt Arsenault | 55dff27 | 2016-06-28 00:11:26 +0000 | [diff] [blame] | 434 | unsigned NumRegionInstrs) const { |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 435 | // Track register pressure so the scheduler can try to decrease |
| 436 | // pressure once register usage is above the threshold defined by |
| 437 | // SIRegisterInfo::getRegPressureSetLimit() |
| 438 | Policy.ShouldTrackPressure = true; |
Tom Stellard | 83f0bce | 2015-01-29 16:55:25 +0000 | [diff] [blame] | 439 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 440 | // Enabling both top down and bottom up scheduling seems to give us less |
| 441 | // register spills than just using one of these approaches on its own. |
| 442 | Policy.OnlyTopDown = false; |
| 443 | Policy.OnlyBottomUp = false; |
Tom Stellard | 83f0bce | 2015-01-29 16:55:25 +0000 | [diff] [blame] | 444 | |
Alexander Timofeev | 9f61fea | 2017-02-14 14:29:05 +0000 | [diff] [blame] | 445 | // Enabling ShouldTrackLaneMasks crashes the SI Machine Scheduler. |
| 446 | if (!enableSIScheduler()) |
| 447 | Policy.ShouldTrackLaneMasks = true; |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 448 | } |
Tom Stellard | 0bc954e | 2016-03-30 16:35:09 +0000 | [diff] [blame] | 449 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 450 | bool SISubtarget::isVGPRSpillingEnabled(const Function& F) const { |
| 451 | return EnableVGPRSpilling || !AMDGPU::isShader(F.getCallingConv()); |
| 452 | } |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 453 | |
Matt Arsenault | f5be3ad | 2018-06-29 17:31:42 +0000 | [diff] [blame^] | 454 | uint64_t SISubtarget::getExplicitKernArgSize(const Function &F) const { |
| 455 | assert(F.getCallingConv() == CallingConv::AMDGPU_KERNEL); |
Tom Stellard | e88bbc3 | 2016-09-23 01:33:26 +0000 | [diff] [blame] | 456 | |
Matt Arsenault | f5be3ad | 2018-06-29 17:31:42 +0000 | [diff] [blame^] | 457 | const DataLayout &DL = F.getParent()->getDataLayout(); |
| 458 | uint64_t ExplicitArgBytes = 0; |
| 459 | for (const Argument &Arg : F.args()) { |
| 460 | Type *ArgTy = Arg.getType(); |
| 461 | |
| 462 | unsigned Align = DL.getABITypeAlignment(ArgTy); |
| 463 | uint64_t AllocSize = DL.getTypeAllocSize(ArgTy); |
| 464 | ExplicitArgBytes = alignTo(ExplicitArgBytes, Align) + AllocSize; |
| 465 | } |
| 466 | |
| 467 | return ExplicitArgBytes; |
| 468 | } |
| 469 | |
| 470 | unsigned SISubtarget::getKernArgSegmentSize(const Function &F, |
| 471 | int64_t ExplicitArgBytes) const { |
| 472 | if (ExplicitArgBytes == -1) |
| 473 | ExplicitArgBytes = getExplicitKernArgSize(F); |
| 474 | |
| 475 | unsigned ExplicitOffset = getExplicitKernelArgOffset(F); |
| 476 | |
| 477 | uint64_t TotalSize = ExplicitOffset + ExplicitArgBytes; |
| 478 | unsigned ImplicitBytes = getImplicitArgNumBytes(F); |
Matt Arsenault | 1ea0402 | 2018-05-29 19:35:00 +0000 | [diff] [blame] | 479 | if (ImplicitBytes != 0) { |
| 480 | unsigned Alignment = getAlignmentForImplicitArgPtr(); |
| 481 | TotalSize = alignTo(ExplicitArgBytes, Alignment) + ImplicitBytes; |
| 482 | } |
| 483 | |
| 484 | // Being able to dereference past the end is useful for emitting scalar loads. |
| 485 | return alignTo(TotalSize, 4); |
Tom Stellard | e88bbc3 | 2016-09-23 01:33:26 +0000 | [diff] [blame] | 486 | } |
| 487 | |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 488 | unsigned SISubtarget::getOccupancyWithNumSGPRs(unsigned SGPRs) const { |
| 489 | if (getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { |
| 490 | if (SGPRs <= 80) |
| 491 | return 10; |
| 492 | if (SGPRs <= 88) |
| 493 | return 9; |
| 494 | if (SGPRs <= 100) |
| 495 | return 8; |
| 496 | return 7; |
| 497 | } |
| 498 | if (SGPRs <= 48) |
| 499 | return 10; |
| 500 | if (SGPRs <= 56) |
| 501 | return 9; |
| 502 | if (SGPRs <= 64) |
| 503 | return 8; |
| 504 | if (SGPRs <= 72) |
| 505 | return 7; |
| 506 | if (SGPRs <= 80) |
| 507 | return 6; |
| 508 | return 5; |
| 509 | } |
| 510 | |
| 511 | unsigned SISubtarget::getOccupancyWithNumVGPRs(unsigned VGPRs) const { |
| 512 | if (VGPRs <= 24) |
| 513 | return 10; |
| 514 | if (VGPRs <= 28) |
| 515 | return 9; |
| 516 | if (VGPRs <= 32) |
| 517 | return 8; |
| 518 | if (VGPRs <= 36) |
| 519 | return 7; |
| 520 | if (VGPRs <= 40) |
| 521 | return 6; |
| 522 | if (VGPRs <= 48) |
| 523 | return 5; |
| 524 | if (VGPRs <= 64) |
| 525 | return 4; |
| 526 | if (VGPRs <= 84) |
| 527 | return 3; |
| 528 | if (VGPRs <= 128) |
| 529 | return 2; |
| 530 | return 1; |
| 531 | } |
Matt Arsenault | 4eae301 | 2016-10-28 20:31:47 +0000 | [diff] [blame] | 532 | |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 533 | unsigned SISubtarget::getReservedNumSGPRs(const MachineFunction &MF) const { |
| 534 | const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>(); |
| 535 | if (MFI.hasFlatScratchInit()) { |
| 536 | if (getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
| 537 | return 6; // FLAT_SCRATCH, XNACK, VCC (in that order). |
| 538 | if (getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) |
| 539 | return 4; // FLAT_SCRATCH, VCC (in that order). |
| 540 | } |
| 541 | |
| 542 | if (isXNACKEnabled()) |
| 543 | return 4; // XNACK, VCC (in that order). |
| 544 | return 2; // VCC. |
| 545 | } |
| 546 | |
| 547 | unsigned SISubtarget::getMaxNumSGPRs(const MachineFunction &MF) const { |
Matthias Braun | f1caa28 | 2017-12-15 22:22:58 +0000 | [diff] [blame] | 548 | const Function &F = MF.getFunction(); |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 549 | const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>(); |
| 550 | |
| 551 | // Compute maximum number of SGPRs function can use using default/requested |
| 552 | // minimum number of waves per execution unit. |
| 553 | std::pair<unsigned, unsigned> WavesPerEU = MFI.getWavesPerEU(); |
| 554 | unsigned MaxNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, false); |
| 555 | unsigned MaxAddressableNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, true); |
| 556 | |
| 557 | // Check if maximum number of SGPRs was explicitly requested using |
| 558 | // "amdgpu-num-sgpr" attribute. |
| 559 | if (F.hasFnAttribute("amdgpu-num-sgpr")) { |
| 560 | unsigned Requested = AMDGPU::getIntegerAttribute( |
| 561 | F, "amdgpu-num-sgpr", MaxNumSGPRs); |
| 562 | |
| 563 | // Make sure requested value does not violate subtarget's specifications. |
| 564 | if (Requested && (Requested <= getReservedNumSGPRs(MF))) |
| 565 | Requested = 0; |
| 566 | |
| 567 | // If more SGPRs are required to support the input user/system SGPRs, |
| 568 | // increase to accommodate them. |
| 569 | // |
| 570 | // FIXME: This really ends up using the requested number of SGPRs + number |
| 571 | // of reserved special registers in total. Theoretically you could re-use |
| 572 | // the last input registers for these special registers, but this would |
| 573 | // require a lot of complexity to deal with the weird aliasing. |
| 574 | unsigned InputNumSGPRs = MFI.getNumPreloadedSGPRs(); |
| 575 | if (Requested && Requested < InputNumSGPRs) |
| 576 | Requested = InputNumSGPRs; |
| 577 | |
| 578 | // Make sure requested value is compatible with values implied by |
| 579 | // default/requested minimum/maximum number of waves per execution unit. |
| 580 | if (Requested && Requested > getMaxNumSGPRs(WavesPerEU.first, false)) |
| 581 | Requested = 0; |
| 582 | if (WavesPerEU.second && |
| 583 | Requested && Requested < getMinNumSGPRs(WavesPerEU.second)) |
| 584 | Requested = 0; |
| 585 | |
| 586 | if (Requested) |
| 587 | MaxNumSGPRs = Requested; |
| 588 | } |
| 589 | |
Matt Arsenault | 4eae301 | 2016-10-28 20:31:47 +0000 | [diff] [blame] | 590 | if (hasSGPRInitBug()) |
Konstantin Zhuravlyov | 9f89ede | 2017-02-08 14:05:23 +0000 | [diff] [blame] | 591 | MaxNumSGPRs = AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG; |
Matt Arsenault | 4eae301 | 2016-10-28 20:31:47 +0000 | [diff] [blame] | 592 | |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 593 | return std::min(MaxNumSGPRs - getReservedNumSGPRs(MF), |
| 594 | MaxAddressableNumSGPRs); |
| 595 | } |
Matt Arsenault | 4eae301 | 2016-10-28 20:31:47 +0000 | [diff] [blame] | 596 | |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 597 | unsigned SISubtarget::getMaxNumVGPRs(const MachineFunction &MF) const { |
Matthias Braun | f1caa28 | 2017-12-15 22:22:58 +0000 | [diff] [blame] | 598 | const Function &F = MF.getFunction(); |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 599 | const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>(); |
| 600 | |
| 601 | // Compute maximum number of VGPRs function can use using default/requested |
| 602 | // minimum number of waves per execution unit. |
| 603 | std::pair<unsigned, unsigned> WavesPerEU = MFI.getWavesPerEU(); |
| 604 | unsigned MaxNumVGPRs = getMaxNumVGPRs(WavesPerEU.first); |
| 605 | |
| 606 | // Check if maximum number of VGPRs was explicitly requested using |
| 607 | // "amdgpu-num-vgpr" attribute. |
| 608 | if (F.hasFnAttribute("amdgpu-num-vgpr")) { |
| 609 | unsigned Requested = AMDGPU::getIntegerAttribute( |
| 610 | F, "amdgpu-num-vgpr", MaxNumVGPRs); |
| 611 | |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 612 | // Make sure requested value is compatible with values implied by |
| 613 | // default/requested minimum/maximum number of waves per execution unit. |
| 614 | if (Requested && Requested > getMaxNumVGPRs(WavesPerEU.first)) |
| 615 | Requested = 0; |
| 616 | if (WavesPerEU.second && |
| 617 | Requested && Requested < getMinNumVGPRs(WavesPerEU.second)) |
| 618 | Requested = 0; |
| 619 | |
| 620 | if (Requested) |
| 621 | MaxNumVGPRs = Requested; |
| 622 | } |
| 623 | |
Konstantin Zhuravlyov | e004b3d | 2018-06-21 20:28:19 +0000 | [diff] [blame] | 624 | return MaxNumVGPRs; |
Matt Arsenault | 4eae301 | 2016-10-28 20:31:47 +0000 | [diff] [blame] | 625 | } |
Stanislav Mekhanoshin | d4ae470 | 2017-09-19 20:54:38 +0000 | [diff] [blame] | 626 | |
Benjamin Kramer | f9ab3dd | 2017-10-31 23:21:30 +0000 | [diff] [blame] | 627 | namespace { |
Stanislav Mekhanoshin | d4ae470 | 2017-09-19 20:54:38 +0000 | [diff] [blame] | 628 | struct MemOpClusterMutation : ScheduleDAGMutation { |
| 629 | const SIInstrInfo *TII; |
| 630 | |
| 631 | MemOpClusterMutation(const SIInstrInfo *tii) : TII(tii) {} |
| 632 | |
| 633 | void apply(ScheduleDAGInstrs *DAGInstrs) override { |
| 634 | ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); |
| 635 | |
| 636 | SUnit *SUa = nullptr; |
| 637 | // Search for two consequent memory operations and link them |
| 638 | // to prevent scheduler from moving them apart. |
| 639 | // In DAG pre-process SUnits are in the original order of |
| 640 | // the instructions before scheduling. |
| 641 | for (SUnit &SU : DAG->SUnits) { |
| 642 | MachineInstr &MI2 = *SU.getInstr(); |
| 643 | if (!MI2.mayLoad() && !MI2.mayStore()) { |
| 644 | SUa = nullptr; |
| 645 | continue; |
| 646 | } |
| 647 | if (!SUa) { |
| 648 | SUa = &SU; |
| 649 | continue; |
| 650 | } |
| 651 | |
| 652 | MachineInstr &MI1 = *SUa->getInstr(); |
| 653 | if ((TII->isVMEM(MI1) && TII->isVMEM(MI2)) || |
| 654 | (TII->isFLAT(MI1) && TII->isFLAT(MI2)) || |
| 655 | (TII->isSMRD(MI1) && TII->isSMRD(MI2)) || |
| 656 | (TII->isDS(MI1) && TII->isDS(MI2))) { |
| 657 | SU.addPredBarrier(SUa); |
| 658 | |
| 659 | for (const SDep &SI : SU.Preds) { |
| 660 | if (SI.getSUnit() != SUa) |
| 661 | SUa->addPred(SDep(SI.getSUnit(), SDep::Artificial)); |
| 662 | } |
| 663 | |
| 664 | if (&SU != &DAG->ExitSU) { |
| 665 | for (const SDep &SI : SUa->Succs) { |
| 666 | if (SI.getSUnit() != &SU) |
| 667 | SI.getSUnit()->addPred(SDep(&SU, SDep::Artificial)); |
| 668 | } |
| 669 | } |
| 670 | } |
| 671 | |
| 672 | SUa = &SU; |
| 673 | } |
| 674 | } |
| 675 | }; |
Benjamin Kramer | f9ab3dd | 2017-10-31 23:21:30 +0000 | [diff] [blame] | 676 | } // namespace |
Stanislav Mekhanoshin | d4ae470 | 2017-09-19 20:54:38 +0000 | [diff] [blame] | 677 | |
| 678 | void SISubtarget::getPostRAMutations( |
| 679 | std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const { |
| 680 | Mutations.push_back(llvm::make_unique<MemOpClusterMutation>(&InstrInfo)); |
| 681 | } |
Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 682 | |
| 683 | const AMDGPUCommonSubtarget &AMDGPUCommonSubtarget::get(const MachineFunction &MF) { |
| 684 | if (MF.getTarget().getTargetTriple().getArch() == Triple::amdgcn) |
| 685 | return static_cast<const AMDGPUCommonSubtarget&>(MF.getSubtarget<AMDGPUSubtarget>()); |
| 686 | else |
| 687 | return static_cast<const AMDGPUCommonSubtarget&>(MF.getSubtarget<R600Subtarget>()); |
| 688 | } |
| 689 | |
| 690 | const AMDGPUCommonSubtarget &AMDGPUCommonSubtarget::get(const TargetMachine &TM, const Function &F) { |
| 691 | if (TM.getTargetTriple().getArch() == Triple::amdgcn) |
| 692 | return static_cast<const AMDGPUCommonSubtarget&>(TM.getSubtarget<AMDGPUSubtarget>(F)); |
| 693 | else |
| 694 | return static_cast<const AMDGPUCommonSubtarget&>(TM.getSubtarget<R600Subtarget>(F)); |
| 695 | } |