blob: a76f080530bbc3890d308824f3846da17912ae32 [file] [log] [blame]
Chandler Carruth93dcdc42015-01-31 11:17:59 +00001//===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
Tim Northover3b0846e2014-05-24 12:50:23 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
Tim Northover3b0846e2014-05-24 12:50:23 +00009
Chandler Carruth93dcdc42015-01-31 11:17:59 +000010#include "AArch64TargetTransformInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000011#include "MCTargetDesc/AArch64AddressingModes.h"
Kevin Qinaef68412015-03-09 06:14:28 +000012#include "llvm/Analysis/LoopInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000013#include "llvm/Analysis/TargetTransformInfo.h"
Chandler Carruth705b1852015-01-31 03:43:40 +000014#include "llvm/CodeGen/BasicTTIImpl.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000015#include "llvm/Support/Debug.h"
16#include "llvm/Target/CostTable.h"
17#include "llvm/Target/TargetLowering.h"
18#include <algorithm>
19using namespace llvm;
20
21#define DEBUG_TYPE "aarch64tti"
22
Geoff Berry378374d2017-06-28 18:53:09 +000023static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
24 cl::init(true), cl::Hidden);
25
Florian Hahn2665feb2017-06-27 22:27:32 +000026bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
27 const Function *Callee) const {
28 const TargetMachine &TM = getTLI()->getTargetMachine();
29
30 const FeatureBitset &CallerBits =
31 TM.getSubtargetImpl(*Caller)->getFeatureBits();
32 const FeatureBitset &CalleeBits =
33 TM.getSubtargetImpl(*Callee)->getFeatureBits();
34
35 // Inline a callee if its target-features are a subset of the callers
36 // target-features.
37 return (CallerBits & CalleeBits) == CalleeBits;
38}
39
Tim Northover3b0846e2014-05-24 12:50:23 +000040/// \brief Calculate the cost of materializing a 64-bit value. This helper
41/// method might only calculate a fraction of a larger immediate. Therefore it
42/// is valid to return a cost of ZERO.
Chandler Carruth93205eb2015-08-05 18:08:10 +000043int AArch64TTIImpl::getIntImmCost(int64_t Val) {
Tim Northover3b0846e2014-05-24 12:50:23 +000044 // Check if the immediate can be encoded within an instruction.
45 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
46 return 0;
47
48 if (Val < 0)
49 Val = ~Val;
50
51 // Calculate how many moves we will need to materialize this constant.
52 unsigned LZ = countLeadingZeros((uint64_t)Val);
53 return (64 - LZ + 15) / 16;
54}
55
56/// \brief Calculate the cost of materializing the given constant.
Chandler Carruth93205eb2015-08-05 18:08:10 +000057int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
Tim Northover3b0846e2014-05-24 12:50:23 +000058 assert(Ty->isIntegerTy());
59
60 unsigned BitSize = Ty->getPrimitiveSizeInBits();
61 if (BitSize == 0)
62 return ~0U;
63
64 // Sign-extend all constants to a multiple of 64-bit.
65 APInt ImmVal = Imm;
66 if (BitSize & 0x3f)
67 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
68
69 // Split the constant into 64-bit chunks and calculate the cost for each
70 // chunk.
Chandler Carruth93205eb2015-08-05 18:08:10 +000071 int Cost = 0;
Tim Northover3b0846e2014-05-24 12:50:23 +000072 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
73 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
74 int64_t Val = Tmp.getSExtValue();
75 Cost += getIntImmCost(Val);
76 }
77 // We need at least one instruction to materialze the constant.
Chandler Carruth93205eb2015-08-05 18:08:10 +000078 return std::max(1, Cost);
Tim Northover3b0846e2014-05-24 12:50:23 +000079}
80
Chandler Carruth93205eb2015-08-05 18:08:10 +000081int AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
82 const APInt &Imm, Type *Ty) {
Tim Northover3b0846e2014-05-24 12:50:23 +000083 assert(Ty->isIntegerTy());
84
85 unsigned BitSize = Ty->getPrimitiveSizeInBits();
86 // There is no cost model for constants with a bit size of 0. Return TCC_Free
87 // here, so that constant hoisting will ignore this constant.
88 if (BitSize == 0)
Chandler Carruth705b1852015-01-31 03:43:40 +000089 return TTI::TCC_Free;
Tim Northover3b0846e2014-05-24 12:50:23 +000090
91 unsigned ImmIdx = ~0U;
92 switch (Opcode) {
93 default:
Chandler Carruth705b1852015-01-31 03:43:40 +000094 return TTI::TCC_Free;
Tim Northover3b0846e2014-05-24 12:50:23 +000095 case Instruction::GetElementPtr:
96 // Always hoist the base address of a GetElementPtr.
97 if (Idx == 0)
Chandler Carruth705b1852015-01-31 03:43:40 +000098 return 2 * TTI::TCC_Basic;
99 return TTI::TCC_Free;
Tim Northover3b0846e2014-05-24 12:50:23 +0000100 case Instruction::Store:
101 ImmIdx = 0;
102 break;
103 case Instruction::Add:
104 case Instruction::Sub:
105 case Instruction::Mul:
106 case Instruction::UDiv:
107 case Instruction::SDiv:
108 case Instruction::URem:
109 case Instruction::SRem:
110 case Instruction::And:
111 case Instruction::Or:
112 case Instruction::Xor:
113 case Instruction::ICmp:
114 ImmIdx = 1;
115 break;
116 // Always return TCC_Free for the shift value of a shift instruction.
117 case Instruction::Shl:
118 case Instruction::LShr:
119 case Instruction::AShr:
120 if (Idx == 1)
Chandler Carruth705b1852015-01-31 03:43:40 +0000121 return TTI::TCC_Free;
Tim Northover3b0846e2014-05-24 12:50:23 +0000122 break;
123 case Instruction::Trunc:
124 case Instruction::ZExt:
125 case Instruction::SExt:
126 case Instruction::IntToPtr:
127 case Instruction::PtrToInt:
128 case Instruction::BitCast:
129 case Instruction::PHI:
130 case Instruction::Call:
131 case Instruction::Select:
132 case Instruction::Ret:
133 case Instruction::Load:
134 break;
135 }
136
137 if (Idx == ImmIdx) {
Chandler Carruth93205eb2015-08-05 18:08:10 +0000138 int NumConstants = (BitSize + 63) / 64;
139 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
Chandler Carruth705b1852015-01-31 03:43:40 +0000140 return (Cost <= NumConstants * TTI::TCC_Basic)
Chandler Carruth93205eb2015-08-05 18:08:10 +0000141 ? static_cast<int>(TTI::TCC_Free)
Chandler Carruth705b1852015-01-31 03:43:40 +0000142 : Cost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000143 }
Chandler Carruth705b1852015-01-31 03:43:40 +0000144 return AArch64TTIImpl::getIntImmCost(Imm, Ty);
Tim Northover3b0846e2014-05-24 12:50:23 +0000145}
146
Chandler Carruth93205eb2015-08-05 18:08:10 +0000147int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
148 const APInt &Imm, Type *Ty) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000149 assert(Ty->isIntegerTy());
150
151 unsigned BitSize = Ty->getPrimitiveSizeInBits();
152 // There is no cost model for constants with a bit size of 0. Return TCC_Free
153 // here, so that constant hoisting will ignore this constant.
154 if (BitSize == 0)
Chandler Carruth705b1852015-01-31 03:43:40 +0000155 return TTI::TCC_Free;
Tim Northover3b0846e2014-05-24 12:50:23 +0000156
157 switch (IID) {
158 default:
Chandler Carruth705b1852015-01-31 03:43:40 +0000159 return TTI::TCC_Free;
Tim Northover3b0846e2014-05-24 12:50:23 +0000160 case Intrinsic::sadd_with_overflow:
161 case Intrinsic::uadd_with_overflow:
162 case Intrinsic::ssub_with_overflow:
163 case Intrinsic::usub_with_overflow:
164 case Intrinsic::smul_with_overflow:
165 case Intrinsic::umul_with_overflow:
166 if (Idx == 1) {
Chandler Carruth93205eb2015-08-05 18:08:10 +0000167 int NumConstants = (BitSize + 63) / 64;
168 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
Chandler Carruth705b1852015-01-31 03:43:40 +0000169 return (Cost <= NumConstants * TTI::TCC_Basic)
Chandler Carruth93205eb2015-08-05 18:08:10 +0000170 ? static_cast<int>(TTI::TCC_Free)
Chandler Carruth705b1852015-01-31 03:43:40 +0000171 : Cost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000172 }
173 break;
174 case Intrinsic::experimental_stackmap:
175 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
Chandler Carruth705b1852015-01-31 03:43:40 +0000176 return TTI::TCC_Free;
Tim Northover3b0846e2014-05-24 12:50:23 +0000177 break;
178 case Intrinsic::experimental_patchpoint_void:
179 case Intrinsic::experimental_patchpoint_i64:
180 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
Chandler Carruth705b1852015-01-31 03:43:40 +0000181 return TTI::TCC_Free;
Tim Northover3b0846e2014-05-24 12:50:23 +0000182 break;
183 }
Chandler Carruth705b1852015-01-31 03:43:40 +0000184 return AArch64TTIImpl::getIntImmCost(Imm, Ty);
Tim Northover3b0846e2014-05-24 12:50:23 +0000185}
186
Chandler Carruth705b1852015-01-31 03:43:40 +0000187TargetTransformInfo::PopcntSupportKind
188AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000189 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
190 if (TyWidth == 32 || TyWidth == 64)
Chandler Carruth705b1852015-01-31 03:43:40 +0000191 return TTI::PSK_FastHardware;
Tim Northover3b0846e2014-05-24 12:50:23 +0000192 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
Chandler Carruth705b1852015-01-31 03:43:40 +0000193 return TTI::PSK_Software;
Tim Northover3b0846e2014-05-24 12:50:23 +0000194}
195
Matthew Simpson78fd46b2017-05-09 20:18:12 +0000196bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
197 ArrayRef<const Value *> Args) {
198
199 // A helper that returns a vector type from the given type. The number of
200 // elements in type Ty determine the vector width.
201 auto toVectorTy = [&](Type *ArgTy) {
202 return VectorType::get(ArgTy->getScalarType(),
203 DstTy->getVectorNumElements());
204 };
205
206 // Exit early if DstTy is not a vector type whose elements are at least
207 // 16-bits wide.
208 if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16)
209 return false;
210
211 // Determine if the operation has a widening variant. We consider both the
212 // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
213 // instructions.
214 //
215 // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we
216 // verify that their extending operands are eliminated during code
217 // generation.
218 switch (Opcode) {
219 case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
220 case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
221 break;
222 default:
223 return false;
224 }
225
226 // To be a widening instruction (either the "wide" or "long" versions), the
227 // second operand must be a sign- or zero extend having a single user. We
228 // only consider extends having a single user because they may otherwise not
229 // be eliminated.
230 if (Args.size() != 2 ||
231 (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) ||
232 !Args[1]->hasOneUse())
233 return false;
234 auto *Extend = cast<CastInst>(Args[1]);
235
236 // Legalize the destination type and ensure it can be used in a widening
237 // operation.
238 auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy);
239 unsigned DstElTySize = DstTyL.second.getScalarSizeInBits();
240 if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits())
241 return false;
242
243 // Legalize the source type and ensure it can be used in a widening
244 // operation.
245 Type *SrcTy = toVectorTy(Extend->getSrcTy());
246 auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy);
247 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
248 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
249 return false;
250
251 // Get the total number of vector elements in the legalized types.
252 unsigned NumDstEls = DstTyL.first * DstTyL.second.getVectorNumElements();
253 unsigned NumSrcEls = SrcTyL.first * SrcTyL.second.getVectorNumElements();
254
255 // Return true if the legalized types have the same number of vector elements
256 // and the destination element type size is twice that of the source type.
257 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize;
258}
259
Jonas Paulssonfccc7d62017-04-12 11:49:08 +0000260int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
261 const Instruction *I) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000262 int ISD = TLI->InstructionOpcodeToISD(Opcode);
263 assert(ISD && "Invalid opcode");
264
Matthew Simpson78fd46b2017-05-09 20:18:12 +0000265 // If the cast is observable, and it is used by a widening instruction (e.g.,
266 // uaddl, saddw, etc.), it may be free.
267 if (I && I->hasOneUse()) {
268 auto *SingleUser = cast<Instruction>(*I->user_begin());
269 SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
270 if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) {
271 // If the cast is the second operand, it is free. We will generate either
272 // a "wide" or "long" version of the widening instruction.
273 if (I == SingleUser->getOperand(1))
274 return 0;
275 // If the cast is not the second operand, it will be free if it looks the
276 // same as the second operand. In this case, we will generate a "long"
277 // version of the widening instruction.
278 if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1)))
279 if (I->getOpcode() == Cast->getOpcode() &&
280 cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy())
281 return 0;
282 }
283 }
284
Mehdi Amini44ede332015-07-09 02:09:04 +0000285 EVT SrcTy = TLI->getValueType(DL, Src);
286 EVT DstTy = TLI->getValueType(DL, Dst);
Tim Northover3b0846e2014-05-24 12:50:23 +0000287
288 if (!SrcTy.isSimple() || !DstTy.isSimple())
Chandler Carruth705b1852015-01-31 03:43:40 +0000289 return BaseT::getCastInstrCost(Opcode, Dst, Src);
Tim Northover3b0846e2014-05-24 12:50:23 +0000290
Craig Topper4b275762015-10-28 04:02:12 +0000291 static const TypeConversionCostTblEntry
Craig Topper7bf52c92015-10-25 00:27:14 +0000292 ConversionTbl[] = {
Matthew Simpson343af072015-11-18 18:03:06 +0000293 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
294 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
295 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
296 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
Silviu Barangab322aa62015-08-17 16:05:09 +0000297
298 // The number of shll instructions for the extension.
Matthew Simpson343af072015-11-18 18:03:06 +0000299 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
300 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
301 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
302 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
303 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
304 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
305 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
306 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
307 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
308 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
309 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
310 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
311 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
312 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
Silviu Barangab322aa62015-08-17 16:05:09 +0000313 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
314 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
315
Tim Northover3b0846e2014-05-24 12:50:23 +0000316 // LowerVectorINT_TO_FP:
317 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
Tim Northoveref0d7602014-06-15 09:27:06 +0000318 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
Tim Northover3b0846e2014-05-24 12:50:23 +0000319 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
320 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
Tim Northoveref0d7602014-06-15 09:27:06 +0000321 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
Tim Northover3b0846e2014-05-24 12:50:23 +0000322 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
Tim Northoveref0d7602014-06-15 09:27:06 +0000323
324 // Complex: to v2f32
325 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
326 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
Tim Northoverdbecc3b2014-06-15 09:27:15 +0000327 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
Tim Northoveref0d7602014-06-15 09:27:06 +0000328 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
329 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
Tim Northoverdbecc3b2014-06-15 09:27:15 +0000330 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
Tim Northoveref0d7602014-06-15 09:27:06 +0000331
332 // Complex: to v4f32
333 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 },
334 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
335 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
336 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
337
Silviu Barangab322aa62015-08-17 16:05:09 +0000338 // Complex: to v8f32
339 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 },
340 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
341 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 },
342 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
343
344 // Complex: to v16f32
345 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
346 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
347
Tim Northoveref0d7602014-06-15 09:27:06 +0000348 // Complex: to v2f64
349 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
350 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
351 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
352 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
353 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
354 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
355
356
Tim Northover3b0846e2014-05-24 12:50:23 +0000357 // LowerVectorFP_TO_INT
Tim Northoveref0d7602014-06-15 09:27:06 +0000358 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
Tim Northover3b0846e2014-05-24 12:50:23 +0000359 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
360 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
Tim Northoveref0d7602014-06-15 09:27:06 +0000361 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
Tim Northover3b0846e2014-05-24 12:50:23 +0000362 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
363 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
Tim Northoveref0d7602014-06-15 09:27:06 +0000364
Tim Northoverdbecc3b2014-06-15 09:27:15 +0000365 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
Tim Northoveref0d7602014-06-15 09:27:06 +0000366 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
Tim Northoverdbecc3b2014-06-15 09:27:15 +0000367 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
368 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 },
Tim Northoveref0d7602014-06-15 09:27:06 +0000369 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
Tim Northoverdbecc3b2014-06-15 09:27:15 +0000370 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
371 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 },
372
373 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
374 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
375 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 },
Tim Northoveref0d7602014-06-15 09:27:06 +0000376 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
Tim Northoverdbecc3b2014-06-15 09:27:15 +0000377 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 },
378
379 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
380 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
381 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
382 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 },
383 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
384 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
385 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 },
Tim Northover3b0846e2014-05-24 12:50:23 +0000386 };
387
Craig Topperee0c8592015-10-27 04:14:24 +0000388 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
389 DstTy.getSimpleVT(),
390 SrcTy.getSimpleVT()))
391 return Entry->Cost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000392
Chandler Carruth705b1852015-01-31 03:43:40 +0000393 return BaseT::getCastInstrCost(Opcode, Dst, Src);
Tim Northover3b0846e2014-05-24 12:50:23 +0000394}
395
Matthew Simpsone5dfb082016-04-27 15:20:21 +0000396int AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, Type *Dst,
397 VectorType *VecTy,
398 unsigned Index) {
399
400 // Make sure we were given a valid extend opcode.
Matthew Simpson47bd3992016-04-27 16:25:04 +0000401 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
402 "Invalid opcode");
Matthew Simpsone5dfb082016-04-27 15:20:21 +0000403
404 // We are extending an element we extract from a vector, so the source type
405 // of the extend is the element type of the vector.
406 auto *Src = VecTy->getElementType();
407
408 // Sign- and zero-extends are for integer types only.
409 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
410
411 // Get the cost for the extract. We compute the cost (if any) for the extend
412 // below.
413 auto Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
414
415 // Legalize the types.
416 auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
417 auto DstVT = TLI->getValueType(DL, Dst);
418 auto SrcVT = TLI->getValueType(DL, Src);
419
420 // If the resulting type is still a vector and the destination type is legal,
421 // we may get the extension for free. If not, get the default cost for the
422 // extend.
423 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
424 return Cost + getCastInstrCost(Opcode, Dst, Src);
425
426 // The destination type should be larger than the element type. If not, get
427 // the default cost for the extend.
428 if (DstVT.getSizeInBits() < SrcVT.getSizeInBits())
429 return Cost + getCastInstrCost(Opcode, Dst, Src);
430
431 switch (Opcode) {
432 default:
433 llvm_unreachable("Opcode should be either SExt or ZExt");
434
435 // For sign-extends, we only need a smov, which performs the extension
436 // automatically.
437 case Instruction::SExt:
438 return Cost;
439
440 // For zero-extends, the extend is performed automatically by a umov unless
441 // the destination type is i64 and the element type is i8 or i16.
442 case Instruction::ZExt:
443 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
444 return Cost;
445 }
446
447 // If we are unable to perform the extend for free, get the default cost.
448 return Cost + getCastInstrCost(Opcode, Dst, Src);
449}
450
Chandler Carruth93205eb2015-08-05 18:08:10 +0000451int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
452 unsigned Index) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000453 assert(Val->isVectorTy() && "This must be a vector type");
454
455 if (Index != -1U) {
456 // Legalize the type.
Chandler Carruth93205eb2015-08-05 18:08:10 +0000457 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
Tim Northover3b0846e2014-05-24 12:50:23 +0000458
459 // This type is legalized to a scalar type.
460 if (!LT.second.isVector())
461 return 0;
462
463 // The type may be split. Normalize the index to the new type.
464 unsigned Width = LT.second.getVectorNumElements();
465 Index = Index % Width;
466
467 // The element at index zero is already inside the vector.
468 if (Index == 0)
469 return 0;
470 }
471
472 // All other insert/extracts cost this much.
Matthias Braun651cff42016-06-02 18:03:53 +0000473 return ST->getVectorInsertExtractBaseCost();
Tim Northover3b0846e2014-05-24 12:50:23 +0000474}
475
Chandler Carruth93205eb2015-08-05 18:08:10 +0000476int AArch64TTIImpl::getArithmeticInstrCost(
Chandler Carruth705b1852015-01-31 03:43:40 +0000477 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
478 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
Mohammed Agabaria2c96c432017-01-11 08:23:37 +0000479 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000480 // Legalize the type.
Chandler Carruth93205eb2015-08-05 18:08:10 +0000481 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
Tim Northover3b0846e2014-05-24 12:50:23 +0000482
Matthew Simpson78fd46b2017-05-09 20:18:12 +0000483 // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.),
484 // add in the widening overhead specified by the sub-target. Since the
485 // extends feeding widening instructions are performed automatically, they
486 // aren't present in the generated code and have a zero cost. By adding a
487 // widening overhead here, we attach the total cost of the combined operation
488 // to the widening instruction.
489 int Cost = 0;
490 if (isWideningInstruction(Ty, Opcode, Args))
491 Cost += ST->getWideningBaseCost();
492
Tim Northover3b0846e2014-05-24 12:50:23 +0000493 int ISD = TLI->InstructionOpcodeToISD(Opcode);
494
Chad Rosier70d54ac2014-09-29 13:59:31 +0000495 if (ISD == ISD::SDIV &&
496 Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
497 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
498 // On AArch64, scalar signed division by constants power-of-two are
499 // normally expanded to the sequence ADD + CMP + SELECT + SRA.
500 // The OperandValue properties many not be same as that of previous
501 // operation; conservatively assume OP_None.
Matthew Simpson78fd46b2017-05-09 20:18:12 +0000502 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
503 TargetTransformInfo::OP_None,
504 TargetTransformInfo::OP_None);
Chad Rosier70d54ac2014-09-29 13:59:31 +0000505 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
506 TargetTransformInfo::OP_None,
507 TargetTransformInfo::OP_None);
508 Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info,
509 TargetTransformInfo::OP_None,
510 TargetTransformInfo::OP_None);
511 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info,
512 TargetTransformInfo::OP_None,
513 TargetTransformInfo::OP_None);
514 return Cost;
515 }
516
Tim Northover3b0846e2014-05-24 12:50:23 +0000517 switch (ISD) {
518 default:
Matthew Simpson78fd46b2017-05-09 20:18:12 +0000519 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
520 Opd1PropInfo, Opd2PropInfo);
Tim Northover3b0846e2014-05-24 12:50:23 +0000521 case ISD::ADD:
522 case ISD::MUL:
523 case ISD::XOR:
524 case ISD::OR:
525 case ISD::AND:
526 // These nodes are marked as 'custom' for combining purposes only.
527 // We know that they are legal. See LowerAdd in ISelLowering.
Matthew Simpson78fd46b2017-05-09 20:18:12 +0000528 return (Cost + 1) * LT.first;
Tim Northover3b0846e2014-05-24 12:50:23 +0000529 }
530}
531
Mohammed Agabaria23599ba2017-01-05 14:03:41 +0000532int AArch64TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
533 const SCEV *Ptr) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000534 // Address computations in vectorized code with non-consecutive addresses will
535 // likely result in more instructions compared to scalar code where the
536 // computation can more often be merged into the index mode. The resulting
537 // extra micro-ops can significantly decrease throughput.
538 unsigned NumVectorInstToHideOverhead = 10;
Mohammed Agabaria23599ba2017-01-05 14:03:41 +0000539 int MaxMergeDistance = 64;
Tim Northover3b0846e2014-05-24 12:50:23 +0000540
Mohammed Agabaria23599ba2017-01-05 14:03:41 +0000541 if (Ty->isVectorTy() && SE &&
542 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
Tim Northover3b0846e2014-05-24 12:50:23 +0000543 return NumVectorInstToHideOverhead;
544
545 // In many cases the address computation is not merged into the instruction
546 // addressing mode.
547 return 1;
548}
549
Chandler Carruth93205eb2015-08-05 18:08:10 +0000550int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Jonas Paulssonfccc7d62017-04-12 11:49:08 +0000551 Type *CondTy, const Instruction *I) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000552
553 int ISD = TLI->InstructionOpcodeToISD(Opcode);
Silviu Barangaa3e27ed2015-09-09 15:35:02 +0000554 // We don't lower some vector selects well that are wider than the register
555 // width.
Tim Northover3b0846e2014-05-24 12:50:23 +0000556 if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
557 // We would need this many instructions to hide the scalarization happening.
Chandler Carruth93205eb2015-08-05 18:08:10 +0000558 const int AmortizationCost = 20;
Craig Topper4b275762015-10-28 04:02:12 +0000559 static const TypeConversionCostTblEntry
Tim Northover3b0846e2014-05-24 12:50:23 +0000560 VectorSelectTbl[] = {
Silviu Barangaa3e27ed2015-09-09 15:35:02 +0000561 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
562 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
563 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
Tim Northover3b0846e2014-05-24 12:50:23 +0000564 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
565 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
566 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
567 };
568
Mehdi Amini44ede332015-07-09 02:09:04 +0000569 EVT SelCondTy = TLI->getValueType(DL, CondTy);
570 EVT SelValTy = TLI->getValueType(DL, ValTy);
Tim Northover3b0846e2014-05-24 12:50:23 +0000571 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000572 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
573 SelCondTy.getSimpleVT(),
574 SelValTy.getSimpleVT()))
575 return Entry->Cost;
Tim Northover3b0846e2014-05-24 12:50:23 +0000576 }
577 }
Jonas Paulssonfccc7d62017-04-12 11:49:08 +0000578 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000579}
580
Evandro Menezes330e1b82017-01-10 23:42:21 +0000581int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
Jonas Paulssonfccc7d62017-04-12 11:49:08 +0000582 unsigned Alignment, unsigned AddressSpace,
583 const Instruction *I) {
Evandro Menezes330e1b82017-01-10 23:42:21 +0000584 auto LT = TLI->getTypeLegalizationCost(DL, Ty);
Tim Northover3b0846e2014-05-24 12:50:23 +0000585
Matthew Simpson2c8de192016-12-15 18:36:59 +0000586 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
Evandro Menezes330e1b82017-01-10 23:42:21 +0000587 LT.second.is128BitVector() && Alignment < 16) {
588 // Unaligned stores are extremely inefficient. We don't split all
589 // unaligned 128-bit stores because the negative impact that has shown in
590 // practice on inlined block copy code.
591 // We make such stores expensive so that we will only vectorize if there
Tim Northover3b0846e2014-05-24 12:50:23 +0000592 // are 6 other instructions getting vectorized.
Evandro Menezes330e1b82017-01-10 23:42:21 +0000593 const int AmortizationCost = 6;
Tim Northover3b0846e2014-05-24 12:50:23 +0000594
595 return LT.first * 2 * AmortizationCost;
596 }
597
Evandro Menezes330e1b82017-01-10 23:42:21 +0000598 if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(8) &&
599 Ty->getVectorNumElements() < 8) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000600 // We scalarize the loads/stores because there is not v.4b register and we
601 // have to promote the elements to v.4h.
Evandro Menezes330e1b82017-01-10 23:42:21 +0000602 unsigned NumVecElts = Ty->getVectorNumElements();
Tim Northover3b0846e2014-05-24 12:50:23 +0000603 unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
604 // We generate 2 instructions per vector element.
605 return NumVectorizableInstsToAmortize * NumVecElts * 2;
606 }
607
608 return LT.first;
609}
James Molloy2b8933c2014-08-05 12:30:34 +0000610
Chandler Carruth93205eb2015-08-05 18:08:10 +0000611int AArch64TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
612 unsigned Factor,
613 ArrayRef<unsigned> Indices,
614 unsigned Alignment,
615 unsigned AddressSpace) {
Hao Liu7ec8ee32015-06-26 02:32:07 +0000616 assert(Factor >= 2 && "Invalid interleave factor");
617 assert(isa<VectorType>(VecTy) && "Expect a vector type");
618
619 if (Factor <= TLI->getMaxSupportedInterleaveFactor()) {
620 unsigned NumElts = VecTy->getVectorNumElements();
Matthew Simpson1468d3e2017-04-10 18:34:37 +0000621 auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
Hao Liu7ec8ee32015-06-26 02:32:07 +0000622
623 // ldN/stN only support legal vector types of size 64 or 128 in bits.
Matthew Simpsonaee97712017-03-02 15:15:35 +0000624 // Accesses having vector types that are a multiple of 128 bits can be
625 // matched to more than one ldN/stN instruction.
Matthew Simpson1468d3e2017-04-10 18:34:37 +0000626 if (NumElts % Factor == 0 &&
627 TLI->isLegalInterleavedAccessType(SubVecTy, DL))
628 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
Hao Liu7ec8ee32015-06-26 02:32:07 +0000629 }
630
631 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
632 Alignment, AddressSpace);
633}
634
Chandler Carruth93205eb2015-08-05 18:08:10 +0000635int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
636 int Cost = 0;
James Molloy2b8933c2014-08-05 12:30:34 +0000637 for (auto *I : Tys) {
638 if (!I->isVectorTy())
639 continue;
640 if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
641 Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
642 getMemoryOpCost(Instruction::Load, I, 128, 0);
643 }
644 return Cost;
645}
James Molloya88896b2014-08-21 00:02:51 +0000646
Wei Mi062c7442015-05-06 17:12:25 +0000647unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
Matthias Braun651cff42016-06-02 18:03:53 +0000648 return ST->getMaxInterleaveFactor();
James Molloya88896b2014-08-21 00:02:51 +0000649}
Kevin Qin72a799a2014-10-09 10:13:27 +0000650
Geoff Berry378374d2017-06-28 18:53:09 +0000651// For Falkor, we want to avoid having too many strided loads in a loop since
652// that can exhaust the HW prefetcher resources. We adjust the unroller
653// MaxCount preference below to attempt to ensure unrolling doesn't create too
654// many strided loads.
655static void
656getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
657 TargetTransformInfo::UnrollingPreferences &UP) {
Geoff Berry0abd9802017-06-28 19:36:10 +0000658 enum { MaxStridedLoads = 7 };
Geoff Berry378374d2017-06-28 18:53:09 +0000659 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
660 int StridedLoads = 0;
661 // FIXME? We could make this more precise by looking at the CFG and
662 // e.g. not counting loads in each side of an if-then-else diamond.
663 for (const auto BB : L->blocks()) {
664 for (auto &I : *BB) {
665 LoadInst *LMemI = dyn_cast<LoadInst>(&I);
666 if (!LMemI)
667 continue;
668
669 Value *PtrValue = LMemI->getPointerOperand();
670 if (L->isLoopInvariant(PtrValue))
671 continue;
672
673 const SCEV *LSCEV = SE.getSCEV(PtrValue);
674 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
675 if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
676 continue;
677
678 // FIXME? We could take pairing of unrolled load copies into account
679 // by looking at the AddRec, but we would probably have to limit this
680 // to loops with no stores or other memory optimization barriers.
681 ++StridedLoads;
682 // We've seen enough strided loads that seeing more won't make a
683 // difference.
684 if (StridedLoads > MaxStridedLoads / 2)
685 return StridedLoads;
686 }
687 }
688 return StridedLoads;
689 };
690
691 int StridedLoads = countStridedLoads(L, SE);
692 DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
693 << " strided loads\n");
694 // Pick the largest power of 2 unroll count that won't result in too many
695 // strided loads.
696 if (StridedLoads) {
697 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
698 DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " << UP.MaxCount
699 << '\n');
700 }
701}
702
Geoff Berry66d9bdb2017-06-28 15:53:17 +0000703void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
Chandler Carruth705b1852015-01-31 03:43:40 +0000704 TTI::UnrollingPreferences &UP) {
Kevin Qinaef68412015-03-09 06:14:28 +0000705 // Enable partial unrolling and runtime unrolling.
Geoff Berry66d9bdb2017-06-28 15:53:17 +0000706 BaseT::getUnrollingPreferences(L, SE, UP);
Kevin Qinaef68412015-03-09 06:14:28 +0000707
708 // For inner loop, it is more likely to be a hot one, and the runtime check
709 // can be promoted out from LICM pass, so the overhead is less, let's try
710 // a larger threshold to unroll more loops.
711 if (L->getLoopDepth() > 1)
712 UP.PartialThreshold *= 2;
713
Kevin Qin72a799a2014-10-09 10:13:27 +0000714 // Disable partial & runtime unrolling on -Os.
715 UP.PartialOptSizeThreshold = 0;
Geoff Berry378374d2017-06-28 18:53:09 +0000716
717 if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
718 EnableFalkorHWPFUnrollFix)
719 getFalkorUnrollingPreferences(L, SE, UP);
Kevin Qin72a799a2014-10-09 10:13:27 +0000720}
Chad Rosierf9327d62015-01-26 22:51:15 +0000721
Chandler Carruth705b1852015-01-31 03:43:40 +0000722Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
723 Type *ExpectedType) {
Chad Rosierf9327d62015-01-26 22:51:15 +0000724 switch (Inst->getIntrinsicID()) {
725 default:
726 return nullptr;
727 case Intrinsic::aarch64_neon_st2:
728 case Intrinsic::aarch64_neon_st3:
729 case Intrinsic::aarch64_neon_st4: {
730 // Create a struct type
731 StructType *ST = dyn_cast<StructType>(ExpectedType);
732 if (!ST)
733 return nullptr;
734 unsigned NumElts = Inst->getNumArgOperands() - 1;
735 if (ST->getNumElements() != NumElts)
736 return nullptr;
737 for (unsigned i = 0, e = NumElts; i != e; ++i) {
738 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
739 return nullptr;
740 }
741 Value *Res = UndefValue::get(ExpectedType);
742 IRBuilder<> Builder(Inst);
743 for (unsigned i = 0, e = NumElts; i != e; ++i) {
744 Value *L = Inst->getArgOperand(i);
745 Res = Builder.CreateInsertValue(Res, L, i);
746 }
747 return Res;
748 }
749 case Intrinsic::aarch64_neon_ld2:
750 case Intrinsic::aarch64_neon_ld3:
751 case Intrinsic::aarch64_neon_ld4:
752 if (Inst->getType() == ExpectedType)
753 return Inst;
754 return nullptr;
755 }
756}
757
Chandler Carruth705b1852015-01-31 03:43:40 +0000758bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
759 MemIntrinsicInfo &Info) {
Chad Rosierf9327d62015-01-26 22:51:15 +0000760 switch (Inst->getIntrinsicID()) {
761 default:
762 break;
763 case Intrinsic::aarch64_neon_ld2:
764 case Intrinsic::aarch64_neon_ld3:
765 case Intrinsic::aarch64_neon_ld4:
766 Info.ReadMem = true;
767 Info.WriteMem = false;
Chad Rosierf9327d62015-01-26 22:51:15 +0000768 Info.PtrVal = Inst->getArgOperand(0);
769 break;
770 case Intrinsic::aarch64_neon_st2:
771 case Intrinsic::aarch64_neon_st3:
772 case Intrinsic::aarch64_neon_st4:
773 Info.ReadMem = false;
774 Info.WriteMem = true;
Chad Rosierf9327d62015-01-26 22:51:15 +0000775 Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
776 break;
777 }
778
779 switch (Inst->getIntrinsicID()) {
780 default:
781 return false;
782 case Intrinsic::aarch64_neon_ld2:
783 case Intrinsic::aarch64_neon_st2:
784 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
785 break;
786 case Intrinsic::aarch64_neon_ld3:
787 case Intrinsic::aarch64_neon_st3:
788 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
789 break;
790 case Intrinsic::aarch64_neon_ld4:
791 case Intrinsic::aarch64_neon_st4:
792 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
793 break;
794 }
795 return true;
796}
Adam Nemet53e758f2016-03-18 00:27:29 +0000797
Jun Bum Limdee55652017-04-03 19:20:07 +0000798/// See if \p I should be considered for address type promotion. We check if \p
799/// I is a sext with right type and used in memory accesses. If it used in a
800/// "complex" getelementptr, we allow it to be promoted without finding other
801/// sext instructions that sign extended the same initial value. A getelementptr
802/// is considered as "complex" if it has more than 2 operands.
803bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
804 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
805 bool Considerable = false;
806 AllowPromotionWithoutCommonHeader = false;
807 if (!isa<SExtInst>(&I))
808 return false;
809 Type *ConsideredSExtType =
810 Type::getInt64Ty(I.getParent()->getParent()->getContext());
811 if (I.getType() != ConsideredSExtType)
812 return false;
813 // See if the sext is the one with the right type and used in at least one
814 // GetElementPtrInst.
815 for (const User *U : I.users()) {
816 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
817 Considerable = true;
818 // A getelementptr is considered as "complex" if it has more than 2
819 // operands. We will promote a SExt used in such complex GEP as we
820 // expect some computation to be merged if they are done on 64 bits.
821 if (GEPInst->getNumOperands() > 2) {
822 AllowPromotionWithoutCommonHeader = true;
823 break;
824 }
825 }
826 }
827 return Considerable;
828}
829
Adam Nemet53e758f2016-03-18 00:27:29 +0000830unsigned AArch64TTIImpl::getCacheLineSize() {
Matthias Braun651cff42016-06-02 18:03:53 +0000831 return ST->getCacheLineSize();
Adam Nemet53e758f2016-03-18 00:27:29 +0000832}
833
834unsigned AArch64TTIImpl::getPrefetchDistance() {
Matthias Braun651cff42016-06-02 18:03:53 +0000835 return ST->getPrefetchDistance();
Adam Nemet53e758f2016-03-18 00:27:29 +0000836}
Adam Nemet6d8beec2016-03-18 00:27:38 +0000837
838unsigned AArch64TTIImpl::getMinPrefetchStride() {
Matthias Braun651cff42016-06-02 18:03:53 +0000839 return ST->getMinPrefetchStride();
Adam Nemet6d8beec2016-03-18 00:27:38 +0000840}
Adam Nemet709e3042016-03-18 00:27:43 +0000841
842unsigned AArch64TTIImpl::getMaxPrefetchIterationsAhead() {
Matthias Braun651cff42016-06-02 18:03:53 +0000843 return ST->getMaxPrefetchIterationsAhead();
Adam Nemet709e3042016-03-18 00:27:43 +0000844}
Amara Emersonc9916d72017-05-16 21:29:22 +0000845
846bool AArch64TTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
847 TTI::ReductionFlags Flags) const {
848 assert(isa<VectorType>(Ty) && "Expected Ty to be a vector type");
849 unsigned ScalarBits = Ty->getScalarSizeInBits();
850 switch (Opcode) {
851 case Instruction::FAdd:
852 case Instruction::FMul:
853 case Instruction::And:
854 case Instruction::Or:
855 case Instruction::Xor:
856 case Instruction::Mul:
857 return false;
858 case Instruction::Add:
859 return ScalarBits * Ty->getVectorNumElements() >= 128;
860 case Instruction::ICmp:
861 return (ScalarBits < 64) &&
862 (ScalarBits * Ty->getVectorNumElements() >= 128);
863 case Instruction::FCmp:
864 return Flags.NoNaN;
865 default:
866 llvm_unreachable("Unhandled reduction opcode");
867 }
868 return false;
869}