blob: 7f37658371d65c6a7494b9d8df911b350939fa3b [file] [log] [blame]
Tom Stellard8b1e0212013-07-27 00:01:07 +00001//===-- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass ---------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// \file
11// This file implements a TargetTransformInfo analysis pass specific to the
12// AMDGPU target machine. It uses the target's detailed information to provide
13// more precise answers to certain TTI queries, while letting the target
14// independent and default TTI implementations handle the rest.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "AMDGPUtti"
19#include "AMDGPU.h"
20#include "AMDGPUTargetMachine.h"
Tom Stellard8cce9bd2014-01-23 18:49:28 +000021#include "llvm/Analysis/LoopInfo.h"
Tom Stellard8b1e0212013-07-27 00:01:07 +000022#include "llvm/Analysis/TargetTransformInfo.h"
Tom Stellard8cce9bd2014-01-23 18:49:28 +000023#include "llvm/Analysis/ValueTracking.h"
Tom Stellard8b1e0212013-07-27 00:01:07 +000024#include "llvm/Support/Debug.h"
Tom Stellard8b1e0212013-07-27 00:01:07 +000025#include "llvm/Target/CostTable.h"
Chandler Carruth8a8cd2b2014-01-07 11:48:04 +000026#include "llvm/Target/TargetLowering.h"
Tom Stellard8b1e0212013-07-27 00:01:07 +000027using namespace llvm;
28
29// Declare the pass initialization routine locally as target-specific passes
30// don't have a target-wide initialization entry point, and so we rely on the
31// pass constructor initialization.
32namespace llvm {
33void initializeAMDGPUTTIPass(PassRegistry &);
34}
35
36namespace {
37
Juergen Ributzka3e752e72014-01-24 18:22:59 +000038class AMDGPUTTI LLVM_FINAL : public ImmutablePass, public TargetTransformInfo {
Tom Stellard8b1e0212013-07-27 00:01:07 +000039 const AMDGPUTargetMachine *TM;
40 const AMDGPUSubtarget *ST;
41 const AMDGPUTargetLowering *TLI;
42
43 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
44 /// are set if the result needs to be inserted and/or extracted from vectors.
45 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
46
47public:
48 AMDGPUTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) {
49 llvm_unreachable("This pass cannot be directly constructed");
50 }
51
52 AMDGPUTTI(const AMDGPUTargetMachine *TM)
53 : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
54 TLI(TM->getTargetLowering()) {
55 initializeAMDGPUTTIPass(*PassRegistry::getPassRegistry());
56 }
57
Juergen Ributzka3e752e72014-01-24 18:22:59 +000058 virtual void initializePass() LLVM_OVERRIDE { pushTTIStack(this); }
Tom Stellard8b1e0212013-07-27 00:01:07 +000059
60 virtual void finalizePass() { popTTIStack(); }
61
Juergen Ributzka3e752e72014-01-24 18:22:59 +000062 virtual void getAnalysisUsage(AnalysisUsage &AU) const LLVM_OVERRIDE {
Tom Stellard8b1e0212013-07-27 00:01:07 +000063 TargetTransformInfo::getAnalysisUsage(AU);
64 }
65
66 /// Pass identification.
67 static char ID;
68
69 /// Provide necessary pointer adjustments for the two base classes.
Juergen Ributzka3e752e72014-01-24 18:22:59 +000070 virtual void *getAdjustedAnalysisPointer(const void *ID) LLVM_OVERRIDE {
Tom Stellard8b1e0212013-07-27 00:01:07 +000071 if (ID == &TargetTransformInfo::ID)
72 return (TargetTransformInfo *)this;
73 return this;
74 }
75
Juergen Ributzka3e752e72014-01-24 18:22:59 +000076 virtual bool hasBranchDivergence() const LLVM_OVERRIDE;
Tom Stellard8b1e0212013-07-27 00:01:07 +000077
Tom Stellard8cce9bd2014-01-23 18:49:28 +000078 virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const;
79
Tom Stellard8b1e0212013-07-27 00:01:07 +000080 /// @}
81};
82
83} // end anonymous namespace
84
85INITIALIZE_AG_PASS(AMDGPUTTI, TargetTransformInfo, "AMDGPUtti",
86 "AMDGPU Target Transform Info", true, true, false)
87char AMDGPUTTI::ID = 0;
88
89ImmutablePass *
90llvm::createAMDGPUTargetTransformInfoPass(const AMDGPUTargetMachine *TM) {
91 return new AMDGPUTTI(TM);
92}
93
94bool AMDGPUTTI::hasBranchDivergence() const { return true; }
Tom Stellard8cce9bd2014-01-23 18:49:28 +000095
96void AMDGPUTTI::getUnrollingPreferences(Loop *L,
97 UnrollingPreferences &UP) const {
98 for (Loop::block_iterator BI = L->block_begin(), BE = L->block_end();
99 BI != BE; ++BI) {
100 BasicBlock *BB = *BI;
101 for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
102 I != E; ++I) {
103 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I);
104 if (!GEP)
105 continue;
106 const Value *Ptr = GEP->getPointerOperand();
107 const AllocaInst *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr));
108 if (Alloca) {
109 // We want to do whatever we can to limit the number of alloca
110 // instructions that make it through to the code generator. allocas
111 // require us to use indirect addressing, which is slow and prone to
112 // compiler bugs. If this loop does an address calculation on an
113 // alloca ptr, then we want to unconditionally unroll the loop. In most
114 // cases, this will make it possible for SROA to eliminate these allocas.
115 UP.Threshold = UINT_MAX;
116 }
117 }
118 }
119}