blob: 6f8f9baee744b602adfd27d54f1f08bdfdbb14c7 [file] [log] [blame]
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +00001//===- AMDGPUPerfHintAnalysis.cpp - analysis of functions memory traffic --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Analyzes if a function potentially memory bound and if a kernel
12/// kernel may benefit from limiting number of waves to reduce cache thrashing.
13///
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPU.h"
17#include "AMDGPUPerfHintAnalysis.h"
18#include "Utils/AMDGPUBaseInfo.h"
19#include "llvm/ADT/SmallSet.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/Analysis/ValueTracking.h"
22#include "llvm/CodeGen/TargetLowering.h"
23#include "llvm/CodeGen/TargetPassConfig.h"
24#include "llvm/CodeGen/TargetSubtargetInfo.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/Instructions.h"
27#include "llvm/IR/IntrinsicInst.h"
28#include "llvm/IR/Module.h"
29#include "llvm/IR/ValueMap.h"
30#include "llvm/Support/CommandLine.h"
31
32using namespace llvm;
33
34#define DEBUG_TYPE "amdgpu-perf-hint"
35
36static cl::opt<unsigned>
37 MemBoundThresh("amdgpu-membound-threshold", cl::init(50), cl::Hidden,
38 cl::desc("Function mem bound threshold in %"));
39
40static cl::opt<unsigned>
41 LimitWaveThresh("amdgpu-limit-wave-threshold", cl::init(50), cl::Hidden,
42 cl::desc("Kernel limit wave threshold in %"));
43
44static cl::opt<unsigned>
45 IAWeight("amdgpu-indirect-access-weight", cl::init(1000), cl::Hidden,
46 cl::desc("Indirect access memory instruction weight"));
47
48static cl::opt<unsigned>
49 LSWeight("amdgpu-large-stride-weight", cl::init(1000), cl::Hidden,
50 cl::desc("Large stride memory access weight"));
51
52static cl::opt<unsigned>
53 LargeStrideThresh("amdgpu-large-stride-threshold", cl::init(64), cl::Hidden,
54 cl::desc("Large stride memory access threshold"));
55
56STATISTIC(NumMemBound, "Number of functions marked as memory bound");
57STATISTIC(NumLimitWave, "Number of functions marked as needing limit wave");
58
59char llvm::AMDGPUPerfHintAnalysis::ID = 0;
60char &llvm::AMDGPUPerfHintAnalysisID = AMDGPUPerfHintAnalysis::ID;
61
62INITIALIZE_PASS(AMDGPUPerfHintAnalysis, DEBUG_TYPE,
63 "Analysis if a function is memory bound", true, true)
64
65namespace {
66
67struct AMDGPUPerfHint {
68 friend AMDGPUPerfHintAnalysis;
69
70public:
71 AMDGPUPerfHint(AMDGPUPerfHintAnalysis::FuncInfoMap &FIM_,
72 const TargetLowering *TLI_)
73 : FIM(FIM_), DL(nullptr), TLI(TLI_) {}
74
75 void runOnFunction(Function &F);
76
77private:
78 struct MemAccessInfo {
79 const Value *V;
80 const Value *Base;
81 int64_t Offset;
82 MemAccessInfo() : V(nullptr), Base(nullptr), Offset(0) {}
83 bool isLargeStride(MemAccessInfo &Reference) const;
84#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
85 Printable print() const {
86 return Printable([this](raw_ostream &OS) {
87 OS << "Value: " << *V << '\n'
88 << "Base: " << *Base << " Offset: " << Offset << '\n';
89 });
90 }
91#endif
92 };
93
94 MemAccessInfo makeMemAccessInfo(Instruction *) const;
95
96 MemAccessInfo LastAccess; // Last memory access info
97
98 AMDGPUPerfHintAnalysis::FuncInfoMap &FIM;
99
100 const DataLayout *DL;
101
102 AMDGPUAS AS;
103
104 const TargetLowering *TLI;
105
106 AMDGPUPerfHintAnalysis::FuncInfoMap::iterator visit(const Function &F);
107 static bool isMemBound(const AMDGPUPerfHintAnalysis::FuncInfo &F);
108 static bool needLimitWave(const AMDGPUPerfHintAnalysis::FuncInfo &F);
109
110 bool isIndirectAccess(const Instruction *Inst) const;
111
112 /// Check if the instruction is large stride.
113 /// The purpose is to identify memory access pattern like:
114 /// x = a[i];
115 /// y = a[i+1000];
116 /// z = a[i+2000];
117 /// In the above example, the second and third memory access will be marked
118 /// large stride memory access.
119 bool isLargeStride(const Instruction *Inst);
120
121 bool isGlobalAddr(const Value *V) const;
122 bool isLocalAddr(const Value *V) const;
123 bool isConstantAddr(const Value *V) const;
124};
125
126static const Value *getMemoryInstrPtr(const Instruction *Inst) {
127 if (auto LI = dyn_cast<LoadInst>(Inst)) {
128 return LI->getPointerOperand();
129 }
130 if (auto SI = dyn_cast<StoreInst>(Inst)) {
131 return SI->getPointerOperand();
132 }
133 if (auto AI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
134 return AI->getPointerOperand();
135 }
136 if (auto AI = dyn_cast<AtomicRMWInst>(Inst)) {
137 return AI->getPointerOperand();
138 }
139 if (auto MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
140 return MI->getRawDest();
141 }
142
143 return nullptr;
144}
145
146bool AMDGPUPerfHint::isIndirectAccess(const Instruction *Inst) const {
147 LLVM_DEBUG(dbgs() << "[isIndirectAccess] " << *Inst << '\n');
148 SmallSet<const Value *, 32> WorkSet;
149 SmallSet<const Value *, 32> Visited;
150 if (const Value *MO = getMemoryInstrPtr(Inst)) {
151 if (isGlobalAddr(MO))
152 WorkSet.insert(MO);
153 }
154
155 while (!WorkSet.empty()) {
156 const Value *V = *WorkSet.begin();
157 WorkSet.erase(*WorkSet.begin());
158 if (!Visited.insert(V).second)
159 continue;
160 LLVM_DEBUG(dbgs() << " check: " << *V << '\n');
161
162 if (auto LD = dyn_cast<LoadInst>(V)) {
163 auto M = LD->getPointerOperand();
164 if (isGlobalAddr(M) || isLocalAddr(M) || isConstantAddr(M)) {
165 LLVM_DEBUG(dbgs() << " is IA\n");
166 return true;
167 }
168 continue;
169 }
170
171 if (auto GEP = dyn_cast<GetElementPtrInst>(V)) {
172 auto P = GEP->getPointerOperand();
173 WorkSet.insert(P);
174 for (unsigned I = 1, E = GEP->getNumIndices() + 1; I != E; ++I)
175 WorkSet.insert(GEP->getOperand(I));
176 continue;
177 }
178
179 if (auto U = dyn_cast<UnaryInstruction>(V)) {
180 WorkSet.insert(U->getOperand(0));
181 continue;
182 }
183
184 if (auto BO = dyn_cast<BinaryOperator>(V)) {
185 WorkSet.insert(BO->getOperand(0));
186 WorkSet.insert(BO->getOperand(1));
187 continue;
188 }
189
190 if (auto S = dyn_cast<SelectInst>(V)) {
191 WorkSet.insert(S->getFalseValue());
192 WorkSet.insert(S->getTrueValue());
193 continue;
194 }
195
196 if (auto E = dyn_cast<ExtractElementInst>(V)) {
197 WorkSet.insert(E->getVectorOperand());
198 continue;
199 }
200
201 if (auto Phi = dyn_cast<PHINode>(V)) {
202 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
203 WorkSet.insert(Phi->getIncomingValue(I));
204 continue;
205 }
206
207 LLVM_DEBUG(dbgs() << " dropped\n");
208 }
209
210 LLVM_DEBUG(dbgs() << " is not IA\n");
211 return false;
212}
213
214AMDGPUPerfHintAnalysis::FuncInfoMap::iterator
215AMDGPUPerfHint::visit(const Function &F) {
216 auto FIP = FIM.insert(std::make_pair(&F, AMDGPUPerfHintAnalysis::FuncInfo()));
217 if (!FIP.second)
218 return FIP.first;
219
220 AMDGPUPerfHintAnalysis::FuncInfo &FI = FIP.first->second;
221
222 LLVM_DEBUG(dbgs() << "[AMDGPUPerfHint] process " << F.getName() << '\n');
223
224 for (auto &B : F) {
225 LastAccess = MemAccessInfo();
226 for (auto &I : B) {
227 if (getMemoryInstrPtr(&I)) {
228 if (isIndirectAccess(&I))
229 ++FI.IAMInstCount;
230 if (isLargeStride(&I))
231 ++FI.LSMInstCount;
232 ++FI.MemInstCount;
233 ++FI.InstCount;
234 continue;
235 }
236 CallSite CS(const_cast<Instruction *>(&I));
237 if (CS) {
238 Function *Callee = CS.getCalledFunction();
239 if (!Callee || Callee->isDeclaration()) {
240 ++FI.InstCount;
241 continue;
242 }
243 if (&F == Callee) // Handle immediate recursion
244 continue;
245
246 auto Loc = visit(*Callee);
247
248 assert(Loc != FIM.end() && "No func info");
249 FI.MemInstCount += Loc->second.MemInstCount;
250 FI.InstCount += Loc->second.InstCount;
251 FI.IAMInstCount += Loc->second.IAMInstCount;
252 FI.LSMInstCount += Loc->second.LSMInstCount;
253 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
254 TargetLoweringBase::AddrMode AM;
255 auto *Ptr = GetPointerBaseWithConstantOffset(GEP, AM.BaseOffs, *DL);
256 AM.BaseGV = dyn_cast_or_null<GlobalValue>(const_cast<Value *>(Ptr));
257 AM.HasBaseReg = !AM.BaseGV;
258 if (TLI->isLegalAddressingMode(*DL, AM, GEP->getResultElementType(),
259 GEP->getPointerAddressSpace()))
260 // Offset will likely be folded into load or store
261 continue;
262 ++FI.InstCount;
263 } else {
264 ++FI.InstCount;
265 }
266 }
267 }
268
269 return FIP.first;
270}
271
272void AMDGPUPerfHint::runOnFunction(Function &F) {
273 if (FIM.find(&F) != FIM.end())
274 return;
275
276 const Module &M = *F.getParent();
277 DL = &M.getDataLayout();
278 AS = AMDGPU::getAMDGPUAS(M);
279
280 auto Loc = visit(F);
281
282 assert(Loc != FIM.end() && "No func info");
283 LLVM_DEBUG(dbgs() << F.getName() << " MemInst: " << Loc->second.MemInstCount
284 << '\n'
285 << " IAMInst: " << Loc->second.IAMInstCount << '\n'
286 << " LSMInst: " << Loc->second.LSMInstCount << '\n'
287 << " TotalInst: " << Loc->second.InstCount << '\n');
288
289 auto &FI = Loc->second;
290
291 if (isMemBound(FI)) {
292 LLVM_DEBUG(dbgs() << F.getName() << " is memory bound\n");
293 NumMemBound++;
294 }
295
296 if (AMDGPU::isEntryFunctionCC(F.getCallingConv()) && needLimitWave(FI)) {
297 LLVM_DEBUG(dbgs() << F.getName() << " needs limit wave\n");
298 NumLimitWave++;
299 }
300}
301
302bool AMDGPUPerfHint::isMemBound(const AMDGPUPerfHintAnalysis::FuncInfo &FI) {
303 return FI.MemInstCount * 100 / FI.InstCount > MemBoundThresh;
304}
305
306bool AMDGPUPerfHint::needLimitWave(const AMDGPUPerfHintAnalysis::FuncInfo &FI) {
307 return ((FI.MemInstCount + FI.IAMInstCount * IAWeight +
308 FI.LSMInstCount * LSWeight) *
309 100 / FI.InstCount) > LimitWaveThresh;
310}
311
312bool AMDGPUPerfHint::isGlobalAddr(const Value *V) const {
313 if (auto PT = dyn_cast<PointerType>(V->getType())) {
314 unsigned As = PT->getAddressSpace();
315 // Flat likely points to global too.
316 return As == AS.GLOBAL_ADDRESS || As == AS.FLAT_ADDRESS;
317 }
318 return false;
319}
320
321bool AMDGPUPerfHint::isLocalAddr(const Value *V) const {
322 if (auto PT = dyn_cast<PointerType>(V->getType()))
323 return PT->getAddressSpace() == AS.LOCAL_ADDRESS;
324 return false;
325}
326
327bool AMDGPUPerfHint::isLargeStride(const Instruction *Inst) {
328 LLVM_DEBUG(dbgs() << "[isLargeStride] " << *Inst << '\n');
329
330 MemAccessInfo MAI = makeMemAccessInfo(const_cast<Instruction *>(Inst));
331 bool IsLargeStride = MAI.isLargeStride(LastAccess);
332 if (MAI.Base)
333 LastAccess = std::move(MAI);
334
335 return IsLargeStride;
336}
337
338AMDGPUPerfHint::MemAccessInfo
339AMDGPUPerfHint::makeMemAccessInfo(Instruction *Inst) const {
340 MemAccessInfo MAI;
341 const Value *MO = getMemoryInstrPtr(Inst);
342
343 LLVM_DEBUG(dbgs() << "[isLargeStride] MO: " << *MO << '\n');
344 // Do not treat local-addr memory access as large stride.
345 if (isLocalAddr(MO))
346 return MAI;
347
348 MAI.V = MO;
349 MAI.Base = GetPointerBaseWithConstantOffset(MO, MAI.Offset, *DL);
350 return MAI;
351}
352
353bool AMDGPUPerfHint::isConstantAddr(const Value *V) const {
354 if (auto PT = dyn_cast<PointerType>(V->getType())) {
355 unsigned As = PT->getAddressSpace();
356 return As == AS.CONSTANT_ADDRESS || As == AS.CONSTANT_ADDRESS_32BIT;
357 }
358 return false;
359}
360
361bool AMDGPUPerfHint::MemAccessInfo::isLargeStride(
362 MemAccessInfo &Reference) const {
363
364 if (!Base || !Reference.Base || Base != Reference.Base)
365 return false;
366
367 uint64_t Diff = Offset > Reference.Offset ? Offset - Reference.Offset
368 : Reference.Offset - Offset;
369 bool Result = Diff > LargeStrideThresh;
370 LLVM_DEBUG(dbgs() << "[isLargeStride compare]\n"
371 << print() << "<=>\n"
372 << Reference.print() << "Result:" << Result << '\n');
373 return Result;
374}
375} // namespace
376
377bool AMDGPUPerfHintAnalysis::runOnFunction(Function &F) {
378 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
379 if (!TPC)
380 return false;
381
382 const TargetMachine &TM = TPC->getTM<TargetMachine>();
383 const TargetSubtargetInfo *ST = TM.getSubtargetImpl(F);
384
385 AMDGPUPerfHint Analyzer(FIM, ST->getTargetLowering());
386 Analyzer.runOnFunction(F);
387 return false;
388}
389
390bool AMDGPUPerfHintAnalysis::isMemoryBound(const Function *F) const {
391 auto FI = FIM.find(F);
392 if (FI == FIM.end())
393 return false;
394
395 return AMDGPUPerfHint::isMemBound(FI->second);
396}
397
398bool AMDGPUPerfHintAnalysis::needsWaveLimiter(const Function *F) const {
399 auto FI = FIM.find(F);
400 if (FI == FIM.end())
401 return false;
402
403 return AMDGPUPerfHint::needLimitWave(FI->second);
404}