blob: c942f645aa88706859acc84210b952c12aaddf5e [file] [log] [blame]
Eugene Zelenko52889212017-08-01 21:20:10 +00001//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
Krzysztof Parzyszek73e66f32015-08-05 18:35:37 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9/// This file implements a TargetTransformInfo analysis pass specific to the
10/// Hexagon target machine. It uses the target's detailed information to provide
11/// more precise answers to certain TTI queries, while letting the target
12/// independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#include "HexagonTargetTransformInfo.h"
Eugene Zelenko52889212017-08-01 21:20:10 +000017#include "HexagonSubtarget.h"
18#include "llvm/Analysis/TargetTransformInfo.h"
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +000019#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko52889212017-08-01 21:20:10 +000020#include "llvm/IR/InstrTypes.h"
Krzysztof Parzyszekdb019ae2016-08-19 14:22:07 +000021#include "llvm/IR/Instructions.h"
Eugene Zelenko52889212017-08-01 21:20:10 +000022#include "llvm/IR/User.h"
23#include "llvm/Support/Casting.h"
24#include "llvm/Support/CommandLine.h"
Ikhlas Ajbar1376d932018-04-03 22:55:09 +000025#include "llvm/Transforms/Utils/UnrollLoop.h"
Krzysztof Parzyszek73e66f32015-08-05 18:35:37 +000026
27using namespace llvm;
28
29#define DEBUG_TYPE "hexagontti"
30
Alina Sbirleabdb16f02018-10-27 04:51:12 +000031static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +000032 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33
Sumanth Gundapanenid2dd79b2017-06-30 20:54:24 +000034static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
35 cl::init(true), cl::Hidden,
36 cl::desc("Control lookup table emission on Hexagon target"));
37
Krzysztof Parzyszekbea23d02018-06-12 15:12:50 +000038// Constant "cost factor" to make floating point operations more expensive
39// in terms of vectorization cost. This isn't the best way, but it should
40// do. Ultimately, the cost should use cycles.
41static const unsigned FloatFactor = 4;
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +000042
43bool HexagonTTIImpl::useHVX() const {
44 return ST.useHVXOps() && HexagonAutoHVX;
45}
46
47bool HexagonTTIImpl::isTypeForHVX(Type *VecTy) const {
48 assert(VecTy->isVectorTy());
49 // Avoid types like <2 x i32*>.
50 if (!cast<VectorType>(VecTy)->getElementType()->isIntegerTy())
51 return false;
52 EVT VecVT = EVT::getEVT(VecTy);
53 if (!VecVT.isSimple() || VecVT.getSizeInBits() <= 64)
54 return false;
55 if (ST.isHVXVectorType(VecVT.getSimpleVT()))
56 return true;
Craig Topper0b5f8162018-11-05 23:26:13 +000057 auto Action = TLI.getPreferredVectorAction(VecVT.getSimpleVT());
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +000058 return Action == TargetLoweringBase::TypeWidenVector;
59}
60
Krzysztof Parzyszekbea23d02018-06-12 15:12:50 +000061unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
62 if (Ty->isVectorTy())
63 return Ty->getVectorNumElements();
64 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
65 "Expecting scalar type");
66 return 1;
67}
68
Krzysztof Parzyszek73e66f32015-08-05 18:35:37 +000069TargetTransformInfo::PopcntSupportKind
70HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +000071 // Return fast hardware support as every input < 64 bits will be promoted
Krzysztof Parzyszek73e66f32015-08-05 18:35:37 +000072 // to 64 bits.
73 return TargetTransformInfo::PSK_FastHardware;
74}
75
76// The Hexagon target can unroll loops with run-time trip counts.
Geoff Berry66d9bdb2017-06-28 15:53:17 +000077void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
Krzysztof Parzyszek73e66f32015-08-05 18:35:37 +000078 TTI::UnrollingPreferences &UP) {
79 UP.Runtime = UP.Partial = true;
Ikhlas Ajbarb7322e82018-04-03 03:39:43 +000080 // Only try to peel innermost loops with small runtime trip counts.
Ikhlas Ajbar1376d932018-04-03 22:55:09 +000081 if (L && L->empty() && canPeel(L) &&
Ikhlas Ajbarb7322e82018-04-03 03:39:43 +000082 SE.getSmallConstantTripCount(L) == 0 &&
83 SE.getSmallConstantMaxTripCount(L) > 0 &&
84 SE.getSmallConstantMaxTripCount(L) <= 5) {
85 UP.PeelCount = 2;
86 }
Krzysztof Parzyszek73e66f32015-08-05 18:35:37 +000087}
88
Krzysztof Parzyszek56f0fc42018-03-26 15:32:03 +000089bool HexagonTTIImpl::shouldFavorPostInc() const {
90 return true;
91}
92
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +000093/// --- Vector TTI begin ---
94
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +000095unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
96 if (Vector)
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +000097 return useHVX() ? 32 : 0;
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +000098 return 32;
99}
100
101unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000102 return useHVX() ? 2 : 0;
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +0000103}
104
105unsigned HexagonTTIImpl::getRegisterBitWidth(bool Vector) const {
106 return Vector ? getMinVectorRegisterBitWidth() : 32;
107}
108
109unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000110 return useHVX() ? ST.getVectorLength()*8 : 0;
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +0000111}
112
Krzysztof Parzyszekdfed9412018-04-13 20:16:32 +0000113unsigned HexagonTTIImpl::getMinimumVF(unsigned ElemWidth) const {
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000114 return (8 * ST.getVectorLength()) / ElemWidth;
115}
116
117unsigned HexagonTTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
118 bool Extract) {
119 return BaseT::getScalarizationOverhead(Ty, Insert, Extract);
120}
121
122unsigned HexagonTTIImpl::getOperandsScalarizationOverhead(
123 ArrayRef<const Value*> Args, unsigned VF) {
124 return BaseT::getOperandsScalarizationOverhead(Args, VF);
125}
126
127unsigned HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
128 ArrayRef<Type*> Tys) {
129 return BaseT::getCallInstrCost(F, RetTy, Tys);
130}
131
132unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
133 ArrayRef<Value*> Args, FastMathFlags FMF, unsigned VF) {
134 return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
135}
136
137unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
138 ArrayRef<Type*> Tys, FastMathFlags FMF,
139 unsigned ScalarizationCostPassed) {
140 if (ID == Intrinsic::bswap) {
141 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, RetTy);
142 return LT.first + 2;
143 }
144 return BaseT::getIntrinsicInstrCost(ID, RetTy, Tys, FMF,
145 ScalarizationCostPassed);
146}
147
148unsigned HexagonTTIImpl::getAddressComputationCost(Type *Tp,
149 ScalarEvolution *SE, const SCEV *S) {
150 return 0;
Krzysztof Parzyszekdfed9412018-04-13 20:16:32 +0000151}
152
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +0000153unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
154 unsigned Alignment, unsigned AddressSpace, const Instruction *I) {
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000155 assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
156 if (Opcode == Instruction::Store)
157 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
158
159 if (Src->isVectorTy()) {
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +0000160 VectorType *VecTy = cast<VectorType>(Src);
161 unsigned VecWidth = VecTy->getBitWidth();
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000162 if (useHVX() && isTypeForHVX(VecTy)) {
163 unsigned RegWidth = getRegisterBitWidth(true);
164 Alignment = std::min(Alignment, RegWidth/8);
165 // Cost of HVX loads.
166 if (VecWidth % RegWidth == 0)
167 return VecWidth / RegWidth;
168 // Cost of constructing HVX vector from scalar loads.
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +0000169 unsigned AlignWidth = 8 * std::max(1u, Alignment);
170 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
171 return 3*NumLoads;
172 }
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000173
174 // Non-HVX vectors.
175 // Add extra cost for floating point types.
Krzysztof Parzyszekbea23d02018-06-12 15:12:50 +0000176 unsigned Cost = VecTy->getElementType()->isFloatingPointTy() ? FloatFactor
177 : 1;
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000178 Alignment = std::min(Alignment, 8u);
179 unsigned AlignWidth = 8 * std::max(1u, Alignment);
180 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
181 if (Alignment == 4 || Alignment == 8)
182 return Cost * NumLoads;
183 // Loads of less than 32 bits will need extra inserts to compose a vector.
184 unsigned LogA = Log2_32(Alignment);
185 return (3 - LogA) * Cost * NumLoads;
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +0000186 }
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000187
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +0000188 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
Krzysztof Parzyszek73e66f32015-08-05 18:35:37 +0000189}
Krzysztof Parzyszekd3d0a4b2016-07-22 14:22:43 +0000190
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000191unsigned HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode,
192 Type *Src, unsigned Alignment, unsigned AddressSpace) {
193 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
194}
195
196unsigned HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
197 int Index, Type *SubTp) {
198 return 1;
199}
200
201unsigned HexagonTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
202 Value *Ptr, bool VariableMask, unsigned Alignment) {
203 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
204 Alignment);
205}
206
207unsigned HexagonTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode,
208 Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
Dorit Nuzman34da6dd2018-10-31 09:57:56 +0000209 unsigned Alignment, unsigned AddressSpace, bool UseMaskForCond,
210 bool UseMaskForGaps) {
211 if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
Krzysztof Parzyszek2ff9aa12018-08-22 20:15:04 +0000212 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
Dorit Nuzman34da6dd2018-10-31 09:57:56 +0000213 Alignment, AddressSpace,
214 UseMaskForCond, UseMaskForGaps);
Krzysztof Parzyszek2ff9aa12018-08-22 20:15:04 +0000215 return getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, nullptr);
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000216}
217
218unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
219 Type *CondTy, const Instruction *I) {
220 if (ValTy->isVectorTy()) {
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000221 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
222 if (Opcode == Instruction::FCmp)
Krzysztof Parzyszekbea23d02018-06-12 15:12:50 +0000223 return LT.first + FloatFactor * getTypeNumElements(ValTy);
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000224 }
225 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
226}
227
228unsigned HexagonTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
229 TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
230 TTI::OperandValueProperties Opd1PropInfo,
231 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value*> Args) {
Krzysztof Parzyszekbea23d02018-06-12 15:12:50 +0000232 if (Ty->isVectorTy()) {
233 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
234 if (LT.second.isFloatingPoint())
235 return LT.first + FloatFactor * getTypeNumElements(Ty);
236 }
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000237 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
238 Opd1PropInfo, Opd2PropInfo, Args);
239}
240
Krzysztof Parzyszekbea23d02018-06-12 15:12:50 +0000241unsigned HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
242 Type *SrcTy, const Instruction *I) {
243 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
244 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
245 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
246
247 std::pair<int, MVT> SrcLT = TLI.getTypeLegalizationCost(DL, SrcTy);
248 std::pair<int, MVT> DstLT = TLI.getTypeLegalizationCost(DL, DstTy);
249 return std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
250 }
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000251 return 1;
252}
253
254unsigned HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
255 unsigned Index) {
256 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
257 : Val;
258 if (Opcode == Instruction::InsertElement) {
259 // Need two rotations for non-zero index.
260 unsigned Cost = (Index != 0) ? 2 : 0;
261 if (ElemTy->isIntegerTy(32))
262 return Cost;
263 // If it's not a 32-bit value, there will need to be an extract.
264 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
265 }
266
267 if (Opcode == Instruction::ExtractElement)
268 return 2;
269
270 return 1;
271}
272
273/// --- Vector TTI end ---
274
Krzysztof Parzyszekd3d0a4b2016-07-22 14:22:43 +0000275unsigned HexagonTTIImpl::getPrefetchDistance() const {
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000276 return ST.getL1PrefetchDistance();
Krzysztof Parzyszekd3d0a4b2016-07-22 14:22:43 +0000277}
278
279unsigned HexagonTTIImpl::getCacheLineSize() const {
Krzysztof Parzyszek4bdf1aa2018-04-13 20:46:50 +0000280 return ST.getL1CacheLineSize();
Krzysztof Parzyszekd3d0a4b2016-07-22 14:22:43 +0000281}
Krzysztof Parzyszekdb019ae2016-08-19 14:22:07 +0000282
Evgeny Astigeevich70ed78e2017-06-29 13:42:12 +0000283int HexagonTTIImpl::getUserCost(const User *U,
284 ArrayRef<const Value *> Operands) {
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +0000285 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
Krzysztof Parzyszekdb019ae2016-08-19 14:22:07 +0000286 if (!CI->isIntegerCast())
287 return false;
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +0000288 // Only extensions from an integer type shorter than 32-bit to i32
289 // can be folded into the load.
290 const DataLayout &DL = getDataLayout();
291 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
292 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
293 if (DBW != 32 || SBW >= DBW)
294 return false;
295
Krzysztof Parzyszekdb019ae2016-08-19 14:22:07 +0000296 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
297 // Technically, this code could allow multiple uses of the load, and
298 // check if all the uses are the same extension operation, but this
299 // should be sufficient for most cases.
Krzysztof Parzyszek0a15d242018-03-27 17:07:52 +0000300 return LI && LI->hasOneUse();
Krzysztof Parzyszekdb019ae2016-08-19 14:22:07 +0000301 };
302
303 if (const CastInst *CI = dyn_cast<const CastInst>(U))
304 if (isCastFoldedIntoLoad(CI))
305 return TargetTransformInfo::TCC_Free;
Evgeny Astigeevich70ed78e2017-06-29 13:42:12 +0000306 return BaseT::getUserCost(U, Operands);
Krzysztof Parzyszekdb019ae2016-08-19 14:22:07 +0000307}
Sumanth Gundapanenid2dd79b2017-06-30 20:54:24 +0000308
309bool HexagonTTIImpl::shouldBuildLookupTables() const {
Krzysztof Parzyszek56f0fc42018-03-26 15:32:03 +0000310 return EmitLookupTables;
Sumanth Gundapanenid2dd79b2017-06-30 20:54:24 +0000311}