blob: 29f52606c2fe53de3e825ef64488a98ce69b1017 [file] [log] [blame]
Chandler Carruthaeef83c2013-01-07 01:37:14 +00001//===- BasicTargetTransformInfo.cpp - Basic target-independent TTI impl ---===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file provides the implementation of a basic TargetTransformInfo pass
11/// predicated on the target abstractions present in the target independent
12/// code generator. It uses these (primarily TargetLowering) to model as much
13/// of the TTI query interface as possible. It is included by most targets so
14/// that they can specialize only a small subset of the query space.
15///
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "basictti"
19#include "llvm/CodeGen/Passes.h"
Chandler Carruthbe049292013-01-07 03:08:10 +000020#include "llvm/Analysis/TargetTransformInfo.h"
Chandler Carruthaeef83c2013-01-07 01:37:14 +000021#include "llvm/Target/TargetLowering.h"
Chandler Carruthaeef83c2013-01-07 01:37:14 +000022#include <utility>
23
24using namespace llvm;
25
26namespace {
27
28class BasicTTI : public ImmutablePass, public TargetTransformInfo {
29 const TargetLowering *TLI;
30
31 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
32 /// are set if the result needs to be inserted and/or extracted from vectors.
33 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
34
35public:
36 BasicTTI() : ImmutablePass(ID), TLI(0) {
37 llvm_unreachable("This pass cannot be directly constructed");
38 }
39
40 BasicTTI(const TargetLowering *TLI) : ImmutablePass(ID), TLI(TLI) {
41 initializeBasicTTIPass(*PassRegistry::getPassRegistry());
42 }
43
44 virtual void initializePass() {
45 pushTTIStack(this);
46 }
47
48 virtual void finalizePass() {
49 popTTIStack();
50 }
51
52 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
53 TargetTransformInfo::getAnalysisUsage(AU);
54 }
55
56 /// Pass identification.
57 static char ID;
58
59 /// Provide necessary pointer adjustments for the two base classes.
60 virtual void *getAdjustedAnalysisPointer(const void *ID) {
61 if (ID == &TargetTransformInfo::ID)
62 return (TargetTransformInfo*)this;
63 return this;
64 }
65
66 /// \name Scalar TTI Implementations
67 /// @{
68
69 virtual bool isLegalAddImmediate(int64_t imm) const;
70 virtual bool isLegalICmpImmediate(int64_t imm) const;
71 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
72 int64_t BaseOffset, bool HasBaseReg,
73 int64_t Scale) const;
74 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
75 virtual bool isTypeLegal(Type *Ty) const;
76 virtual unsigned getJumpBufAlignment() const;
77 virtual unsigned getJumpBufSize() const;
78 virtual bool shouldBuildLookupTables() const;
79
80 /// @}
81
82 /// \name Vector TTI Implementations
83 /// @{
84
85 virtual unsigned getNumberOfRegisters(bool Vector) const;
86 virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const;
87 virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
88 int Index, Type *SubTp) const;
89 virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
90 Type *Src) const;
91 virtual unsigned getCFInstrCost(unsigned Opcode) const;
92 virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
93 Type *CondTy) const;
94 virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
95 unsigned Index) const;
96 virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
97 unsigned Alignment,
98 unsigned AddressSpace) const;
99 virtual unsigned getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy,
100 ArrayRef<Type*> Tys) const;
101 virtual unsigned getNumberOfParts(Type *Tp) const;
102
103 /// @}
104};
105
106}
107
108INITIALIZE_AG_PASS(BasicTTI, TargetTransformInfo, "basictti",
109 "Target independent code generator's TTI", true, true, false)
110char BasicTTI::ID = 0;
111
112ImmutablePass *
113llvm::createBasicTargetTransformInfoPass(const TargetLowering *TLI) {
114 return new BasicTTI(TLI);
115}
116
117
118bool BasicTTI::isLegalAddImmediate(int64_t imm) const {
119 return TLI->isLegalAddImmediate(imm);
120}
121
122bool BasicTTI::isLegalICmpImmediate(int64_t imm) const {
123 return TLI->isLegalICmpImmediate(imm);
124}
125
126bool BasicTTI::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
127 int64_t BaseOffset, bool HasBaseReg,
128 int64_t Scale) const {
129 AddrMode AM;
130 AM.BaseGV = BaseGV;
131 AM.BaseOffs = BaseOffset;
132 AM.HasBaseReg = HasBaseReg;
133 AM.Scale = Scale;
134 return TLI->isLegalAddressingMode(AM, Ty);
135}
136
137bool BasicTTI::isTruncateFree(Type *Ty1, Type *Ty2) const {
138 return TLI->isTruncateFree(Ty1, Ty2);
139}
140
141bool BasicTTI::isTypeLegal(Type *Ty) const {
142 EVT T = TLI->getValueType(Ty);
143 return TLI->isTypeLegal(T);
144}
145
146unsigned BasicTTI::getJumpBufAlignment() const {
147 return TLI->getJumpBufAlignment();
148}
149
150unsigned BasicTTI::getJumpBufSize() const {
151 return TLI->getJumpBufSize();
152}
153
154bool BasicTTI::shouldBuildLookupTables() const {
155 return TLI->supportJumpTables() &&
156 (TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
157 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
158}
159
160//===----------------------------------------------------------------------===//
161//
162// Calls used by the vectorizers.
163//
164//===----------------------------------------------------------------------===//
165
166unsigned BasicTTI::getScalarizationOverhead(Type *Ty, bool Insert,
167 bool Extract) const {
168 assert (Ty->isVectorTy() && "Can only scalarize vectors");
169 unsigned Cost = 0;
170
171 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
172 if (Insert)
173 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
174 if (Extract)
175 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
176 }
177
178 return Cost;
179}
180
181unsigned BasicTTI::getNumberOfRegisters(bool Vector) const {
182 return 1;
183}
184
185unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const {
186 // Check if any of the operands are vector operands.
187 int ISD = TLI->InstructionOpcodeToISD(Opcode);
188 assert(ISD && "Invalid opcode");
189
190 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
191
192 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
193 // The operation is legal. Assume it costs 1.
194 // If the type is split to multiple registers, assume that thre is some
195 // overhead to this.
196 // TODO: Once we have extract/insert subvector cost we need to use them.
197 if (LT.first > 1)
198 return LT.first * 2;
199 return LT.first * 1;
200 }
201
202 if (!TLI->isOperationExpand(ISD, LT.second)) {
203 // If the operation is custom lowered then assume
204 // thare the code is twice as expensive.
205 return LT.first * 2;
206 }
207
208 // Else, assume that we need to scalarize this op.
209 if (Ty->isVectorTy()) {
210 unsigned Num = Ty->getVectorNumElements();
211 unsigned Cost = TopTTI->getArithmeticInstrCost(Opcode, Ty->getScalarType());
212 // return the cost of multiple scalar invocation plus the cost of inserting
213 // and extracting the values.
214 return getScalarizationOverhead(Ty, true, true) + Num * Cost;
215 }
216
217 // We don't know anything about this scalar instruction.
218 return 1;
219}
220
221unsigned BasicTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
222 Type *SubTp) const {
223 return 1;
224}
225
226unsigned BasicTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
227 Type *Src) const {
228 int ISD = TLI->InstructionOpcodeToISD(Opcode);
229 assert(ISD && "Invalid opcode");
230
231 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src);
232 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst);
233
234 // Handle scalar conversions.
235 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
236
237 // Scalar bitcasts are usually free.
238 if (Opcode == Instruction::BitCast)
239 return 0;
240
241 if (Opcode == Instruction::Trunc &&
242 TLI->isTruncateFree(SrcLT.second, DstLT.second))
243 return 0;
244
245 if (Opcode == Instruction::ZExt &&
246 TLI->isZExtFree(SrcLT.second, DstLT.second))
247 return 0;
248
249 // Just check the op cost. If the operation is legal then assume it costs 1.
250 if (!TLI->isOperationExpand(ISD, DstLT.second))
251 return 1;
252
253 // Assume that illegal scalar instruction are expensive.
254 return 4;
255 }
256
257 // Check vector-to-vector casts.
258 if (Dst->isVectorTy() && Src->isVectorTy()) {
259
260 // If the cast is between same-sized registers, then the check is simple.
261 if (SrcLT.first == DstLT.first &&
262 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
263
264 // Bitcast between types that are legalized to the same type are free.
265 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
266 return 0;
267
268 // Assume that Zext is done using AND.
269 if (Opcode == Instruction::ZExt)
270 return 1;
271
272 // Assume that sext is done using SHL and SRA.
273 if (Opcode == Instruction::SExt)
274 return 2;
275
276 // Just check the op cost. If the operation is legal then assume it costs
277 // 1 and multiply by the type-legalization overhead.
278 if (!TLI->isOperationExpand(ISD, DstLT.second))
279 return SrcLT.first * 1;
280 }
281
282 // If we are converting vectors and the operation is illegal, or
283 // if the vectors are legalized to different types, estimate the
284 // scalarization costs.
285 unsigned Num = Dst->getVectorNumElements();
286 unsigned Cost = TopTTI->getCastInstrCost(Opcode, Dst->getScalarType(),
287 Src->getScalarType());
288
289 // Return the cost of multiple scalar invocation plus the cost of
290 // inserting and extracting the values.
291 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
292 }
293
294 // We already handled vector-to-vector and scalar-to-scalar conversions. This
295 // is where we handle bitcast between vectors and scalars. We need to assume
296 // that the conversion is scalarized in one way or another.
297 if (Opcode == Instruction::BitCast)
298 // Illegal bitcasts are done by storing and loading from a stack slot.
299 return (Src->isVectorTy()? getScalarizationOverhead(Src, false, true):0) +
300 (Dst->isVectorTy()? getScalarizationOverhead(Dst, true, false):0);
301
302 llvm_unreachable("Unhandled cast");
303 }
304
305unsigned BasicTTI::getCFInstrCost(unsigned Opcode) const {
306 // Branches are assumed to be predicted.
307 return 0;
308}
309
310unsigned BasicTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
311 Type *CondTy) const {
312 int ISD = TLI->InstructionOpcodeToISD(Opcode);
313 assert(ISD && "Invalid opcode");
314
315 // Selects on vectors are actually vector selects.
316 if (ISD == ISD::SELECT) {
317 assert(CondTy && "CondTy must exist");
318 if (CondTy->isVectorTy())
319 ISD = ISD::VSELECT;
320 }
321
322 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
323
324 if (!TLI->isOperationExpand(ISD, LT.second)) {
325 // The operation is legal. Assume it costs 1. Multiply
326 // by the type-legalization overhead.
327 return LT.first * 1;
328 }
329
330 // Otherwise, assume that the cast is scalarized.
331 if (ValTy->isVectorTy()) {
332 unsigned Num = ValTy->getVectorNumElements();
333 if (CondTy)
334 CondTy = CondTy->getScalarType();
335 unsigned Cost = TopTTI->getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
336 CondTy);
337
338 // Return the cost of multiple scalar invocation plus the cost of inserting
339 // and extracting the values.
340 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
341 }
342
343 // Unknown scalar opcode.
344 return 1;
345}
346
347unsigned BasicTTI::getVectorInstrCost(unsigned Opcode, Type *Val,
348 unsigned Index) const {
349 return 1;
350}
351
352unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src,
353 unsigned Alignment,
354 unsigned AddressSpace) const {
355 assert(!Src->isVoidTy() && "Invalid type");
356 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
357
358 // Assume that all loads of legal types cost 1.
359 return LT.first;
360}
361
362unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy,
363 ArrayRef<Type *> Tys) const {
364 // assume that we need to scalarize this intrinsic.
365 unsigned ScalarizationCost = 0;
366 unsigned ScalarCalls = 1;
367 if (RetTy->isVectorTy()) {
368 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
369 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
370 }
371 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
372 if (Tys[i]->isVectorTy()) {
373 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
374 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
375 }
376 }
377 return ScalarCalls + ScalarizationCost;
378}
379
380unsigned BasicTTI::getNumberOfParts(Type *Tp) const {
381 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
382 return LT.first;
383}