blob: ed1d6e5ca365b33cd6db3697f3bd8cf801f8d17a [file] [log] [blame]
Eugene Zelenko076468c2017-09-20 21:35:51 +00001//===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
Chandler Carruth664e3542013-01-07 01:37:14 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Chandler Carruth664e3542013-01-07 01:37:14 +00006//
7//===----------------------------------------------------------------------===//
Chandler Carruth664e3542013-01-07 01:37:14 +00008
Chandler Carruth93dcdc42015-01-31 11:17:59 +00009#include "ARMTargetTransformInfo.h"
Eugene Zelenko076468c2017-09-20 21:35:51 +000010#include "ARMSubtarget.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "llvm/ADT/APInt.h"
13#include "llvm/ADT/SmallVector.h"
14#include "llvm/Analysis/LoopInfo.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000015#include "llvm/CodeGen/CostTable.h"
Eugene Zelenko076468c2017-09-20 21:35:51 +000016#include "llvm/CodeGen/ISDOpcodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000017#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko076468c2017-09-20 21:35:51 +000018#include "llvm/IR/BasicBlock.h"
19#include "llvm/IR/CallSite.h"
20#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/DerivedTypes.h"
22#include "llvm/IR/Instruction.h"
23#include "llvm/IR/Instructions.h"
Sjoerd Meijerea31ddb2019-04-30 10:28:50 +000024#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko076468c2017-09-20 21:35:51 +000025#include "llvm/IR/Type.h"
26#include "llvm/MC/SubtargetFeature.h"
27#include "llvm/Support/Casting.h"
David Blaikie13e77db2018-03-23 23:58:25 +000028#include "llvm/Support/MachineValueType.h"
Eugene Zelenko076468c2017-09-20 21:35:51 +000029#include "llvm/Target/TargetMachine.h"
30#include <algorithm>
31#include <cassert>
32#include <cstdint>
33#include <utility>
34
Chandler Carruth664e3542013-01-07 01:37:14 +000035using namespace llvm;
36
Chandler Carruth84e68b22014-04-22 02:41:26 +000037#define DEBUG_TYPE "armtti"
38
David Greenb325c052019-09-15 14:14:47 +000039static cl::opt<bool> EnableMaskedLoadStores(
40 "enable-arm-maskedldst", cl::Hidden, cl::init(false),
41 cl::desc("Enable the generation of masked loads and stores"));
42
Sam Parker757ac022019-06-12 12:00:42 +000043static cl::opt<bool> DisableLowOverheadLoops(
Sam Parkere3a4a132019-07-30 08:14:28 +000044 "disable-arm-loloops", cl::Hidden, cl::init(false),
Sam Parker757ac022019-06-12 12:00:42 +000045 cl::desc("Disable the generation of low-overhead loops"));
46
Florian Hahn4adcfcf2017-07-13 08:26:17 +000047bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
48 const Function *Callee) const {
49 const TargetMachine &TM = getTLI()->getTargetMachine();
50 const FeatureBitset &CallerBits =
51 TM.getSubtargetImpl(*Caller)->getFeatureBits();
52 const FeatureBitset &CalleeBits =
53 TM.getSubtargetImpl(*Callee)->getFeatureBits();
54
55 // To inline a callee, all features not in the whitelist must match exactly.
56 bool MatchExact = (CallerBits & ~InlineFeatureWhitelist) ==
57 (CalleeBits & ~InlineFeatureWhitelist);
58 // For features in the whitelist, the callee's features must be a subset of
59 // the callers'.
60 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeatureWhitelist) ==
61 (CalleeBits & InlineFeatureWhitelist);
62 return MatchExact && MatchSubset;
63}
64
Chandler Carruth93205eb2015-08-05 18:08:10 +000065int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
Chandler Carruth664e3542013-01-07 01:37:14 +000066 assert(Ty->isIntegerTy());
67
Tim Northover5c02f9a2016-04-13 23:08:27 +000068 unsigned Bits = Ty->getPrimitiveSizeInBits();
Weiming Zhao5410edd2016-06-28 22:30:45 +000069 if (Bits == 0 || Imm.getActiveBits() >= 64)
Tim Northover5c02f9a2016-04-13 23:08:27 +000070 return 4;
Chandler Carruth664e3542013-01-07 01:37:14 +000071
Tim Northover5c02f9a2016-04-13 23:08:27 +000072 int64_t SImmVal = Imm.getSExtValue();
73 uint64_t ZImmVal = Imm.getZExtValue();
Chandler Carruth664e3542013-01-07 01:37:14 +000074 if (!ST->isThumb()) {
75 if ((SImmVal >= 0 && SImmVal < 65536) ||
76 (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
77 (ARM_AM::getSOImmVal(~ZImmVal) != -1))
78 return 1;
79 return ST->hasV6T2Ops() ? 2 : 3;
Duncan P. N. Exon Smith429d2602014-03-08 15:15:42 +000080 }
81 if (ST->isThumb2()) {
Chandler Carruth664e3542013-01-07 01:37:14 +000082 if ((SImmVal >= 0 && SImmVal < 65536) ||
83 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
84 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
85 return 1;
86 return ST->hasV6T2Ops() ? 2 : 3;
Chandler Carruth664e3542013-01-07 01:37:14 +000087 }
Zhaoshi Zheng05b46dc2018-09-24 16:15:23 +000088 // Thumb1, any i8 imm cost 1.
89 if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
Duncan P. N. Exon Smith429d2602014-03-08 15:15:42 +000090 return 1;
James Molloy7c7255e2016-09-08 12:58:04 +000091 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
Duncan P. N. Exon Smith429d2602014-03-08 15:15:42 +000092 return 2;
93 // Load from constantpool.
94 return 3;
Chandler Carruth664e3542013-01-07 01:37:14 +000095}
Renato Golin5e9d55e2013-01-29 23:31:38 +000096
Sjoerd Meijer38c2cd02016-07-14 07:44:20 +000097// Constants smaller than 256 fit in the immediate field of
98// Thumb1 instructions so we return a zero cost and 1 otherwise.
99int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
100 const APInt &Imm, Type *Ty) {
101 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
102 return 0;
103
104 return 1;
105}
106
Tim Northover903f81b2016-04-15 18:17:18 +0000107int ARMTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
108 Type *Ty) {
109 // Division by a constant can be turned into multiplication, but only if we
110 // know it's constant. So it's not so much that the immediate is cheap (it's
111 // not), but that the alternative is worse.
112 // FIXME: this is probably unneeded with GlobalISel.
113 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
114 Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
115 Idx == 1)
116 return 0;
117
David Greenb4f36a22019-02-04 11:58:48 +0000118 if (Opcode == Instruction::And) {
119 // UXTB/UXTH
120 if (Imm == 255 || Imm == 65535)
121 return 0;
122 // Conversion to BIC is free, and means we can use ~Imm instead.
123 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(~Imm, Ty));
124 }
James Molloy753c18f2016-09-08 12:58:12 +0000125
James Molloy57d9dfa2016-09-09 13:35:36 +0000126 if (Opcode == Instruction::Add)
127 // Conversion to SUB is free, and means we can use -Imm instead.
128 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(-Imm, Ty));
129
James Molloy1454e902016-09-09 13:35:28 +0000130 if (Opcode == Instruction::ICmp && Imm.isNegative() &&
131 Ty->getIntegerBitWidth() == 32) {
132 int64_t NegImm = -Imm.getSExtValue();
133 if (ST->isThumb2() && NegImm < 1<<12)
134 // icmp X, #-C -> cmn X, #C
135 return 0;
136 if (ST->isThumb() && NegImm < 1<<8)
137 // icmp X, #-C -> adds X, #C
138 return 0;
139 }
140
David Green05647642018-02-20 11:07:35 +0000141 // xor a, -1 can always be folded to MVN
David Green01e0f252018-02-22 09:38:57 +0000142 if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
143 return 0;
David Green05647642018-02-20 11:07:35 +0000144
Tim Northover903f81b2016-04-15 18:17:18 +0000145 return getIntImmCost(Imm, Ty);
146}
147
Jonas Paulssonfccc7d62017-04-12 11:49:08 +0000148int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
149 const Instruction *I) {
Renato Golin5e9d55e2013-01-29 23:31:38 +0000150 int ISD = TLI->InstructionOpcodeToISD(Opcode);
151 assert(ISD && "Invalid opcode");
152
Arnold Schwaighoferf5284ff2013-03-15 15:10:47 +0000153 // Single to/from double precision conversions.
Craig Topper4b275762015-10-28 04:02:12 +0000154 static const CostTblEntry NEONFltDblTbl[] = {
Arnold Schwaighoferf5284ff2013-03-15 15:10:47 +0000155 // Vector fptrunc/fpext conversions.
156 { ISD::FP_ROUND, MVT::v2f64, 2 },
157 { ISD::FP_EXTEND, MVT::v2f32, 2 },
158 { ISD::FP_EXTEND, MVT::v4f32, 4 }
159 };
160
161 if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND ||
162 ISD == ISD::FP_EXTEND)) {
Chandler Carruth93205eb2015-08-05 18:08:10 +0000163 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
Craig Topperee0c8592015-10-27 04:14:24 +0000164 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
165 return LT.first * Entry->Cost;
Arnold Schwaighoferf5284ff2013-03-15 15:10:47 +0000166 }
167
Mehdi Amini44ede332015-07-09 02:09:04 +0000168 EVT SrcTy = TLI->getValueType(DL, Src);
169 EVT DstTy = TLI->getValueType(DL, Dst);
Renato Golin5e9d55e2013-01-29 23:31:38 +0000170
171 if (!SrcTy.isSimple() || !DstTy.isSimple())
Chandler Carruth705b1852015-01-31 03:43:40 +0000172 return BaseT::getCastInstrCost(Opcode, Dst, Src);
Renato Golin5e9d55e2013-01-29 23:31:38 +0000173
David Green86876422019-08-12 17:39:56 +0000174 // The extend of a load is free
175 if (I && isa<LoadInst>(I->getOperand(0))) {
176 static const TypeConversionCostTblEntry LoadConversionTbl[] = {
177 {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
178 {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
179 {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
180 {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
181 {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
182 {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
183 {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
184 {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
185 {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
186 {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
187 {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
188 {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
189 };
190 if (const auto *Entry = ConvertCostTableLookup(
191 LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
192 return Entry->Cost;
David Greenb782e612019-08-16 15:13:37 +0000193
194 static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
195 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
196 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
197 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
198 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
199 {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
200 {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
201 };
202 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
203 if (const auto *Entry =
204 ConvertCostTableLookup(MVELoadConversionTbl, ISD,
205 DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
206 return Entry->Cost;
207 }
David Green86876422019-08-12 17:39:56 +0000208 }
209
Renato Golin5e9d55e2013-01-29 23:31:38 +0000210 // Some arithmetic, load and store operations have specific instructions
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000211 // to cast up/down their types automatically at no extra cost.
212 // TODO: Get these tables to know at least what the related operations are.
Craig Topper4b275762015-10-28 04:02:12 +0000213 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
Renato Golin5e9d55e2013-01-29 23:31:38 +0000214 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
215 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
216 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
217 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
218 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
219 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000220
Renato Golin227eb6f2013-03-19 08:15:38 +0000221 // The number of vmovl instructions for the extension.
222 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
223 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
224 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
225 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
226 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
227 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
228 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
229 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
230 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
231 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
232
Jim Grosbach563983c2013-04-21 23:47:41 +0000233 // Operations that we legalize using splitting.
234 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
235 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
Arnold Schwaighofer90774f32013-03-12 21:19:22 +0000236
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000237 // Vector float <-> i32 conversions.
238 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
239 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
Arnold Schwaighoferae0052f2013-03-18 22:47:09 +0000240
241 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
242 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
243 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
244 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
245 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
246 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
247 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
248 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
249 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
250 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
251 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
252 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
253 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
254 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
255 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
256 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
257 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
258 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
259 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
260 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
261
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000262 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
263 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
Arnold Schwaighofer6c9c3a82013-03-18 22:47:06 +0000264 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
265 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
266 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
267 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000268
269 // Vector double <-> i32 conversions.
270 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
271 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
Arnold Schwaighoferae0052f2013-03-18 22:47:09 +0000272
273 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
274 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
275 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
276 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
277 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
278 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
279
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000280 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
Arnold Schwaighofer6c9c3a82013-03-18 22:47:06 +0000281 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
282 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 },
283 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 },
284 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 },
285 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 }
Renato Golin5e9d55e2013-01-29 23:31:38 +0000286 };
287
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000288 if (SrcTy.isVector() && ST->hasNEON()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000289 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
290 DstTy.getSimpleVT(),
291 SrcTy.getSimpleVT()))
292 return Entry->Cost;
Renato Golin5e9d55e2013-01-29 23:31:38 +0000293 }
294
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000295 // Scalar float to integer conversions.
Craig Topper4b275762015-10-28 04:02:12 +0000296 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000297 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 },
298 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 },
299 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 },
300 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 },
301 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 },
302 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 },
303 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 },
304 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 },
305 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 },
306 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 },
307 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 },
308 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 },
309 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 },
310 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 },
311 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 },
312 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 },
313 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 },
314 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 },
315 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 },
316 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 }
317 };
318 if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000319 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
320 DstTy.getSimpleVT(),
321 SrcTy.getSimpleVT()))
322 return Entry->Cost;
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000323 }
324
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000325 // Scalar integer to float conversions.
Craig Topper4b275762015-10-28 04:02:12 +0000326 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000327 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 },
328 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 },
329 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 },
330 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 },
331 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 },
332 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 },
333 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 },
334 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 },
335 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 },
336 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 },
337 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 },
338 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 },
339 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 },
340 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 },
341 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 },
342 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 },
343 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 },
344 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 },
345 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 },
346 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 }
347 };
348
349 if (SrcTy.isInteger() && ST->hasNEON()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000350 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
351 ISD, DstTy.getSimpleVT(),
352 SrcTy.getSimpleVT()))
353 return Entry->Cost;
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000354 }
355
David Green2bfc13f2019-08-19 09:13:22 +0000356 // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
357 // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
358 // are linearised so take more.
359 static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
360 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
361 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
362 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
363 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
364 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
365 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
366 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
367 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
368 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
369 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
370 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
371 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
372 };
373
374 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
375 if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
376 ISD, DstTy.getSimpleVT(),
377 SrcTy.getSimpleVT()))
378 return Entry->Cost * ST->getMVEVectorCostFactor();
379 }
380
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000381 // Scalar integer conversion costs.
Craig Topper4b275762015-10-28 04:02:12 +0000382 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000383 // i16 -> i64 requires two dependent operations.
384 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
385
386 // Truncates on i64 are assumed to be free.
387 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 },
388 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 },
389 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 },
390 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 }
391 };
392
393 if (SrcTy.isInteger()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000394 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
395 DstTy.getSimpleVT(),
396 SrcTy.getSimpleVT()))
397 return Entry->Cost;
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000398 }
399
David Greena6553932019-08-13 18:12:08 +0000400 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
401 ? ST->getMVEVectorCostFactor()
402 : 1;
403 return BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src);
Renato Golin5e9d55e2013-01-29 23:31:38 +0000404}
Arnold Schwaighofer98f10122013-02-04 02:52:05 +0000405
Chandler Carruth93205eb2015-08-05 18:08:10 +0000406int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
407 unsigned Index) {
Arnold Schwaighofer594fa2d2013-02-08 14:50:48 +0000408 // Penalize inserting into an D-subregister. We end up with a three times
409 // lower estimated throughput on swift.
Diana Picus4879b052016-07-06 09:22:23 +0000410 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
411 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
Arnold Schwaighofer594fa2d2013-02-08 14:50:48 +0000412 return 3;
Arnold Schwaighofer98f10122013-02-04 02:52:05 +0000413
David Green83bbfaa2019-08-12 15:59:52 +0000414 if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
415 Opcode == Instruction::ExtractElement)) {
Silviu Barangad5ac2692015-08-17 15:57:05 +0000416 // Cross-class copies are expensive on many microarchitectures,
417 // so assume they are expensive by default.
418 if (ValTy->getVectorElementType()->isIntegerTy())
419 return 3;
420
421 // Even if it's not a cross class copy, this likely leads to mixing
422 // of NEON and VFP code and should be therefore penalized.
423 if (ValTy->isVectorTy() &&
424 ValTy->getScalarSizeInBits() <= 32)
425 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
426 }
James Molloya9f47b62014-09-12 13:29:40 +0000427
David Greena6553932019-08-13 18:12:08 +0000428 if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
429 Opcode == Instruction::ExtractElement)) {
430 // We say MVE moves costs at least the MVEVectorCostFactor, even though
431 // they are scalar instructions. This helps prevent mixing scalar and
432 // vector, to prevent vectorising where we end up just scalarising the
433 // result anyway.
434 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index),
435 ST->getMVEVectorCostFactor()) *
436 ValTy->getVectorNumElements() / 2;
437 }
438
Chandler Carruth705b1852015-01-31 03:43:40 +0000439 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
Arnold Schwaighofer98f10122013-02-04 02:52:05 +0000440}
Arnold Schwaighofer213fced2013-02-07 16:10:15 +0000441
Jonas Paulssonfccc7d62017-04-12 11:49:08 +0000442int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
443 const Instruction *I) {
Arnold Schwaighofer213fced2013-02-07 16:10:15 +0000444 int ISD = TLI->InstructionOpcodeToISD(Opcode);
Hiroshi Inoue7f9f92f2018-02-22 07:48:29 +0000445 // On NEON a vector select gets lowered to vbsl.
Arnold Schwaighofer213fced2013-02-07 16:10:15 +0000446 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
Arnold Schwaighofer8070b382013-03-14 19:17:02 +0000447 // Lowering of some vector selects is currently far from perfect.
Craig Topper4b275762015-10-28 04:02:12 +0000448 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
Arnold Schwaighofer8070b382013-03-14 19:17:02 +0000449 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
450 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
451 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
452 };
453
Mehdi Amini44ede332015-07-09 02:09:04 +0000454 EVT SelCondTy = TLI->getValueType(DL, CondTy);
455 EVT SelValTy = TLI->getValueType(DL, ValTy);
Renato Golin0178a252013-08-02 17:10:04 +0000456 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000457 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
458 SelCondTy.getSimpleVT(),
459 SelValTy.getSimpleVT()))
460 return Entry->Cost;
Renato Golin0178a252013-08-02 17:10:04 +0000461 }
Arnold Schwaighofer8070b382013-03-14 19:17:02 +0000462
Chandler Carruth93205eb2015-08-05 18:08:10 +0000463 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
Arnold Schwaighofer213fced2013-02-07 16:10:15 +0000464 return LT.first;
465 }
466
David Greena6553932019-08-13 18:12:08 +0000467 int BaseCost = ST->hasMVEIntegerOps() && ValTy->isVectorTy()
468 ? ST->getMVEVectorCostFactor()
469 : 1;
470 return BaseCost * BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
Arnold Schwaighofer213fced2013-02-07 16:10:15 +0000471}
Arnold Schwaighofer594fa2d2013-02-08 14:50:48 +0000472
Mohammed Agabaria23599ba2017-01-05 14:03:41 +0000473int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
474 const SCEV *Ptr) {
Arnold Schwaighoferda2b3112013-07-12 19:16:04 +0000475 // Address computations in vectorized code with non-consecutive addresses will
476 // likely result in more instructions compared to scalar code where the
477 // computation can more often be merged into the index mode. The resulting
478 // extra micro-ops can significantly decrease throughput.
479 unsigned NumVectorInstToHideOverhead = 10;
Mohammed Agabaria23599ba2017-01-05 14:03:41 +0000480 int MaxMergeDistance = 64;
Arnold Schwaighoferda2b3112013-07-12 19:16:04 +0000481
David Green83bbfaa2019-08-12 15:59:52 +0000482 if (ST->hasNEON()) {
483 if (Ty->isVectorTy() && SE &&
484 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
485 return NumVectorInstToHideOverhead;
Arnold Schwaighoferda2b3112013-07-12 19:16:04 +0000486
David Green83bbfaa2019-08-12 15:59:52 +0000487 // In many cases the address computation is not merged into the instruction
488 // addressing mode.
489 return 1;
490 }
491 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
Arnold Schwaighofer594fa2d2013-02-08 14:50:48 +0000492}
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000493
Sam Parker527a35e2019-10-14 10:00:21 +0000494bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) {
David Greenb325c052019-09-15 14:14:47 +0000495 if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
496 return false;
497
Sam Parker39af8a32019-10-17 07:55:55 +0000498 if (auto *VecTy = dyn_cast<VectorType>(DataTy)) {
499 // Don't support v2i1 yet.
500 if (VecTy->getNumElements() == 2)
501 return false;
502
503 // We don't support extending fp types.
504 unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
505 if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
David Greenb325c052019-09-15 14:14:47 +0000506 return false;
507 }
508
509 unsigned EltWidth = DataTy->getScalarSizeInBits();
Sam Parker39af8a32019-10-17 07:55:55 +0000510 return (EltWidth == 32 && (!Alignment || Alignment >= 4)) ||
511 (EltWidth == 16 && (!Alignment || Alignment >= 2)) ||
512 (EltWidth == 8);
David Greenb325c052019-09-15 14:14:47 +0000513}
514
Sjoerd Meijerea31ddb2019-04-30 10:28:50 +0000515int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
516 const MemCpyInst *MI = dyn_cast<MemCpyInst>(I);
517 assert(MI && "MemcpyInst expected");
518 ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength());
519
520 // To model the cost of a library call, we assume 1 for the call, and
521 // 3 for the argument setup.
522 const unsigned LibCallCost = 4;
523
524 // If 'size' is not a constant, a library call will be generated.
525 if (!C)
526 return LibCallCost;
527
528 const unsigned Size = C->getValue().getZExtValue();
529 const unsigned DstAlign = MI->getDestAlignment();
530 const unsigned SrcAlign = MI->getSourceAlignment();
531 const Function *F = I->getParent()->getParent();
532 const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
533 std::vector<EVT> MemOps;
534
535 // MemOps will be poplulated with a list of data types that needs to be
536 // loaded and stored. That's why we multiply the number of elements by 2 to
537 // get the cost for this memcpy.
538 if (getTLI()->findOptimalMemOpLowering(
539 MemOps, Limit, Size, DstAlign, SrcAlign, false /*IsMemset*/,
540 false /*ZeroMemset*/, false /*MemcpyStrSrc*/, false /*AllowOverlap*/,
541 MI->getDestAddressSpace(), MI->getSourceAddressSpace(),
542 F->getAttributes()))
543 return MemOps.size() * 2;
544
545 // If we can't find an optimal memop lowering, return the default cost
546 return LibCallCost;
547}
548
Chandler Carruth93205eb2015-08-05 18:08:10 +0000549int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
550 Type *SubTp) {
David Green83bbfaa2019-08-12 15:59:52 +0000551 if (ST->hasNEON()) {
552 if (Kind == TTI::SK_Broadcast) {
553 static const CostTblEntry NEONDupTbl[] = {
554 // VDUP handles these cases.
555 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
556 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
557 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
558 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
559 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
560 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000561
David Green83bbfaa2019-08-12 15:59:52 +0000562 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
563 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
564 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
565 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
Simon Pilgrim071e8222018-10-25 10:52:36 +0000566
David Green83bbfaa2019-08-12 15:59:52 +0000567 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
Simon Pilgrim071e8222018-10-25 10:52:36 +0000568
David Green83bbfaa2019-08-12 15:59:52 +0000569 if (const auto *Entry =
570 CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
571 return LT.first * Entry->Cost;
572 }
573 if (Kind == TTI::SK_Reverse) {
574 static const CostTblEntry NEONShuffleTbl[] = {
575 // Reverse shuffle cost one instruction if we are shuffling within a
576 // double word (vrev) or two if we shuffle a quad word (vrev, vext).
577 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
578 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
579 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
580 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
581 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
582 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
Simon Pilgrim071e8222018-10-25 10:52:36 +0000583
David Green83bbfaa2019-08-12 15:59:52 +0000584 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
585 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
586 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
587 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000588
David Green83bbfaa2019-08-12 15:59:52 +0000589 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000590
David Green83bbfaa2019-08-12 15:59:52 +0000591 if (const auto *Entry =
592 CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
593 return LT.first * Entry->Cost;
594 }
595 if (Kind == TTI::SK_Select) {
596 static const CostTblEntry NEONSelShuffleTbl[] = {
597 // Select shuffle cost table for ARM. Cost is the number of
598 // instructions
599 // required to create the shuffled vector.
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000600
David Green83bbfaa2019-08-12 15:59:52 +0000601 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
602 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
603 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
604 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000605
David Green83bbfaa2019-08-12 15:59:52 +0000606 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
607 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
608 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
Karthik Bhate03a25d2014-06-20 04:32:48 +0000609
David Green83bbfaa2019-08-12 15:59:52 +0000610 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
Karthik Bhate03a25d2014-06-20 04:32:48 +0000611
David Green83bbfaa2019-08-12 15:59:52 +0000612 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
Karthik Bhate03a25d2014-06-20 04:32:48 +0000613
David Green83bbfaa2019-08-12 15:59:52 +0000614 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
615 if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
616 ISD::VECTOR_SHUFFLE, LT.second))
617 return LT.first * Entry->Cost;
618 }
Karthik Bhate03a25d2014-06-20 04:32:48 +0000619 }
David Green3e39f392019-08-12 16:54:07 +0000620 if (ST->hasMVEIntegerOps()) {
621 if (Kind == TTI::SK_Broadcast) {
622 static const CostTblEntry MVEDupTbl[] = {
623 // VDUP handles these cases.
624 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
625 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
626 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
627 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
628 {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
629
630 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
631
632 if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
633 LT.second))
David Greena6553932019-08-13 18:12:08 +0000634 return LT.first * Entry->Cost * ST->getMVEVectorCostFactor();
David Green3e39f392019-08-12 16:54:07 +0000635 }
636 }
David Greena6553932019-08-13 18:12:08 +0000637 int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
638 ? ST->getMVEVectorCostFactor()
639 : 1;
640 return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000641}
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000642
Chandler Carruth93205eb2015-08-05 18:08:10 +0000643int ARMTTIImpl::getArithmeticInstrCost(
Chandler Carruth705b1852015-01-31 03:43:40 +0000644 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
645 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
Mohammed Agabaria2c96c432017-01-11 08:23:37 +0000646 TTI::OperandValueProperties Opd2PropInfo,
647 ArrayRef<const Value *> Args) {
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000648 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
Chandler Carruth93205eb2015-08-05 18:08:10 +0000649 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000650
651 const unsigned FunctionCallDivCost = 20;
652 const unsigned ReciprocalDivCost = 10;
Craig Topper4b275762015-10-28 04:02:12 +0000653 static const CostTblEntry CostTbl[] = {
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000654 // Division.
655 // These costs are somewhat random. Choose a cost of 20 to indicate that
656 // vectorizing devision (added function call) is going to be very expensive.
657 // Double registers types.
658 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
659 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
660 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
661 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
662 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
663 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
664 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
665 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
666 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost},
667 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost},
668 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
669 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
670 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost},
671 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost},
672 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost},
673 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost},
674 // Quad register types.
675 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
676 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
677 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
678 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
679 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
680 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
681 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
682 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
683 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
684 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
685 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
686 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
687 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
688 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
689 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
690 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
691 // Multiplication.
692 };
693
David Greena6553932019-08-13 18:12:08 +0000694 if (ST->hasNEON()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000695 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
696 return LT.first * Entry->Cost;
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000697
David Greena6553932019-08-13 18:12:08 +0000698 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
699 Opd1PropInfo, Opd2PropInfo);
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000700
David Greena6553932019-08-13 18:12:08 +0000701 // This is somewhat of a hack. The problem that we are facing is that SROA
702 // creates a sequence of shift, and, or instructions to construct values.
703 // These sequences are recognized by the ISel and have zero-cost. Not so for
704 // the vectorized code. Because we have support for v2i64 but not i64 those
705 // sequences look particularly beneficial to vectorize.
706 // To work around this we increase the cost of v2i64 operations to make them
707 // seem less beneficial.
708 if (LT.second == MVT::v2i64 &&
709 Op2Info == TargetTransformInfo::OK_UniformConstantValue)
710 Cost += 4;
Arnold Schwaighofer77af0f62013-10-29 01:33:53 +0000711
David Greena6553932019-08-13 18:12:08 +0000712 return Cost;
713 }
714
715 int BaseCost = ST->hasMVEIntegerOps() && Ty->isVectorTy()
716 ? ST->getMVEVectorCostFactor()
717 : 1;
718
719 // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
720 // without treating floats as more expensive that scalars or increasing the
721 // costs for custom operations. The results is also multiplied by the
722 // MVEVectorCostFactor where appropriate.
723 if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
724 return LT.first * BaseCost;
725
726 // Else this is expand, assume that we need to scalarize this op.
727 if (Ty->isVectorTy()) {
728 unsigned Num = Ty->getVectorNumElements();
729 unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType());
730 // Return the cost of multiple scalar invocation plus the cost of
731 // inserting and extracting the values.
732 return BaseT::getScalarizationOverhead(Ty, Args) + Num * Cost;
733 }
734
735 return BaseCost;
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000736}
737
Guillaume Chateleta4783ef2019-10-22 17:16:52 +0200738int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
739 MaybeAlign Alignment, unsigned AddressSpace,
740 const Instruction *I) {
Chandler Carruth93205eb2015-08-05 18:08:10 +0000741 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
Arnold Schwaighofer89ae2172013-10-29 01:33:57 +0000742
Guillaume Chateleta4783ef2019-10-22 17:16:52 +0200743 if (ST->hasNEON() && Src->isVectorTy() &&
744 (Alignment && *Alignment != Align(16)) &&
Arnold Schwaighofer89ae2172013-10-29 01:33:57 +0000745 Src->getVectorElementType()->isDoubleTy()) {
746 // Unaligned loads/stores are extremely inefficient.
747 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
748 return LT.first * 4;
749 }
David Greena6553932019-08-13 18:12:08 +0000750 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
751 ? ST->getMVEVectorCostFactor()
752 : 1;
753 return BaseCost * LT.first;
Arnold Schwaighofer89ae2172013-10-29 01:33:57 +0000754}
Hao Liu2cd34bb2015-06-26 02:45:36 +0000755
Chandler Carruth93205eb2015-08-05 18:08:10 +0000756int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
757 unsigned Factor,
758 ArrayRef<unsigned> Indices,
759 unsigned Alignment,
Dorit Nuzman38bbf812018-10-14 08:50:06 +0000760 unsigned AddressSpace,
Dorit Nuzman34da6dd2018-10-31 09:57:56 +0000761 bool UseMaskForCond,
762 bool UseMaskForGaps) {
Hao Liu2cd34bb2015-06-26 02:45:36 +0000763 assert(Factor >= 2 && "Invalid interleave factor");
764 assert(isa<VectorType>(VecTy) && "Expect a vector type");
765
766 // vldN/vstN doesn't support vector types of i64/f64 element.
Ahmed Bougacha97564c32015-12-09 01:19:50 +0000767 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
Hao Liu2cd34bb2015-06-26 02:45:36 +0000768
Dorit Nuzman38bbf812018-10-14 08:50:06 +0000769 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
Dorit Nuzman34da6dd2018-10-31 09:57:56 +0000770 !UseMaskForCond && !UseMaskForGaps) {
Hao Liu2cd34bb2015-06-26 02:45:36 +0000771 unsigned NumElts = VecTy->getVectorNumElements();
Matthew Simpson1468d3e2017-04-10 18:34:37 +0000772 auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
Hao Liu2cd34bb2015-06-26 02:45:36 +0000773
774 // vldN/vstN only support legal vector types of size 64 or 128 in bits.
Matthew Simpsonaee97712017-03-02 15:15:35 +0000775 // Accesses having vector types that are a multiple of 128 bits can be
776 // matched to more than one vldN/vstN instruction.
Matthew Simpson1468d3e2017-04-10 18:34:37 +0000777 if (NumElts % Factor == 0 &&
778 TLI->isLegalInterleavedAccessType(SubVecTy, DL))
779 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
Hao Liu2cd34bb2015-06-26 02:45:36 +0000780 }
781
782 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
Dorit Nuzman34da6dd2018-10-31 09:57:56 +0000783 Alignment, AddressSpace,
784 UseMaskForCond, UseMaskForGaps);
Hao Liu2cd34bb2015-06-26 02:45:36 +0000785}
Sam Parker19a08e42017-07-25 08:51:30 +0000786
Sam Parker757ac022019-06-12 12:00:42 +0000787bool ARMTTIImpl::isLoweredToCall(const Function *F) {
788 if (!F->isIntrinsic())
789 BaseT::isLoweredToCall(F);
790
791 // Assume all Arm-specific intrinsics map to an instruction.
792 if (F->getName().startswith("llvm.arm"))
793 return false;
794
795 switch (F->getIntrinsicID()) {
796 default: break;
797 case Intrinsic::powi:
798 case Intrinsic::sin:
799 case Intrinsic::cos:
800 case Intrinsic::pow:
801 case Intrinsic::log:
802 case Intrinsic::log10:
803 case Intrinsic::log2:
804 case Intrinsic::exp:
805 case Intrinsic::exp2:
806 return true;
807 case Intrinsic::sqrt:
808 case Intrinsic::fabs:
809 case Intrinsic::copysign:
810 case Intrinsic::floor:
811 case Intrinsic::ceil:
812 case Intrinsic::trunc:
813 case Intrinsic::rint:
814 case Intrinsic::nearbyint:
815 case Intrinsic::round:
816 case Intrinsic::canonicalize:
817 case Intrinsic::lround:
818 case Intrinsic::llround:
819 case Intrinsic::lrint:
820 case Intrinsic::llrint:
821 if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
822 return true;
823 if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
824 return true;
825 // Some operations can be handled by vector instructions and assume
826 // unsupported vectors will be expanded into supported scalar ones.
827 // TODO Handle scalar operations properly.
828 return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
829 case Intrinsic::masked_store:
830 case Intrinsic::masked_load:
831 case Intrinsic::masked_gather:
832 case Intrinsic::masked_scatter:
833 return !ST->hasMVEIntegerOps();
834 case Intrinsic::sadd_with_overflow:
835 case Intrinsic::uadd_with_overflow:
836 case Intrinsic::ssub_with_overflow:
837 case Intrinsic::usub_with_overflow:
838 case Intrinsic::sadd_sat:
839 case Intrinsic::uadd_sat:
840 case Intrinsic::ssub_sat:
841 case Intrinsic::usub_sat:
842 return false;
843 }
844
845 return BaseT::isLoweredToCall(F);
846}
847
848bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
849 AssumptionCache &AC,
850 TargetLibraryInfo *LibInfo,
Chen Zhengc5b918d2019-06-19 01:26:31 +0000851 HardwareLoopInfo &HWLoopInfo) {
Sam Parker757ac022019-06-12 12:00:42 +0000852 // Low-overhead branches are only supported in the 'low-overhead branch'
853 // extension of v8.1-m.
854 if (!ST->hasLOB() || DisableLowOverheadLoops)
855 return false;
856
Sam Parker757ac022019-06-12 12:00:42 +0000857 if (!SE.hasLoopInvariantBackedgeTakenCount(L))
858 return false;
859
860 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
861 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
862 return false;
863
864 const SCEV *TripCountSCEV =
865 SE.getAddExpr(BackedgeTakenCount,
866 SE.getOne(BackedgeTakenCount->getType()));
867
868 // We need to store the trip count in LR, a 32-bit register.
869 if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32)
870 return false;
871
872 // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
873 // point in generating a hardware loop if that's going to happen.
874 auto MaybeCall = [this](Instruction &I) {
875 const ARMTargetLowering *TLI = getTLI();
876 unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
877 EVT VT = TLI->getValueType(DL, I.getType(), true);
878 if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
879 return true;
880
881 // Check if an intrinsic will be lowered to a call and assume that any
882 // other CallInst will generate a bl.
883 if (auto *Call = dyn_cast<CallInst>(&I)) {
884 if (isa<IntrinsicInst>(Call)) {
885 if (const Function *F = Call->getCalledFunction())
886 return isLoweredToCall(F);
887 }
888 return true;
889 }
890
891 // FPv5 provides conversions between integer, double-precision,
892 // single-precision, and half-precision formats.
893 switch (I.getOpcode()) {
894 default:
895 break;
896 case Instruction::FPToSI:
897 case Instruction::FPToUI:
898 case Instruction::SIToFP:
899 case Instruction::UIToFP:
900 case Instruction::FPTrunc:
901 case Instruction::FPExt:
902 return !ST->hasFPARMv8Base();
903 }
904
905 // FIXME: Unfortunately the approach of checking the Operation Action does
906 // not catch all cases of Legalization that use library calls. Our
907 // Legalization step categorizes some transformations into library calls as
908 // Custom, Expand or even Legal when doing type legalization. So for now
909 // we have to special case for instance the SDIV of 64bit integers and the
910 // use of floating point emulation.
911 if (VT.isInteger() && VT.getSizeInBits() >= 64) {
912 switch (ISD) {
913 default:
914 break;
915 case ISD::SDIV:
916 case ISD::UDIV:
917 case ISD::SREM:
918 case ISD::UREM:
919 case ISD::SDIVREM:
920 case ISD::UDIVREM:
921 return true;
922 }
923 }
924
925 // Assume all other non-float operations are supported.
926 if (!VT.isFloatingPoint())
927 return false;
928
929 // We'll need a library call to handle most floats when using soft.
930 if (TLI->useSoftFloat()) {
931 switch (I.getOpcode()) {
932 default:
933 return true;
934 case Instruction::Alloca:
935 case Instruction::Load:
936 case Instruction::Store:
937 case Instruction::Select:
938 case Instruction::PHI:
939 return false;
940 }
941 }
942
943 // We'll need a libcall to perform double precision operations on a single
944 // precision only FPU.
945 if (I.getType()->isDoubleTy() && !ST->hasFP64())
946 return true;
947
948 // Likewise for half precision arithmetic.
949 if (I.getType()->isHalfTy() && !ST->hasFullFP16())
950 return true;
951
952 return false;
953 };
954
Sam Parker9d284732019-06-13 08:28:46 +0000955 auto IsHardwareLoopIntrinsic = [](Instruction &I) {
956 if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
Sam Parker179e0fa2019-06-13 08:32:56 +0000957 switch (Call->getIntrinsicID()) {
Sam Parker9d284732019-06-13 08:28:46 +0000958 default:
959 break;
960 case Intrinsic::set_loop_iterations:
Sam Parker98722692019-07-01 08:21:28 +0000961 case Intrinsic::test_set_loop_iterations:
Sam Parker9d284732019-06-13 08:28:46 +0000962 case Intrinsic::loop_decrement:
963 case Intrinsic::loop_decrement_reg:
964 return true;
965 }
966 }
967 return false;
968 };
969
Sam Parker757ac022019-06-12 12:00:42 +0000970 // Scan the instructions to see if there's any that we know will turn into a
Sam Parker9d284732019-06-13 08:28:46 +0000971 // call or if this loop is already a low-overhead loop.
972 auto ScanLoop = [&](Loop *L) {
973 for (auto *BB : L->getBlocks()) {
974 for (auto &I : *BB) {
975 if (MaybeCall(I) || IsHardwareLoopIntrinsic(I))
976 return false;
977 }
978 }
979 return true;
980 };
981
982 // Visit inner loops.
983 for (auto Inner : *L)
984 if (!ScanLoop(Inner))
985 return false;
986
987 if (!ScanLoop(L))
988 return false;
Sam Parker757ac022019-06-12 12:00:42 +0000989
990 // TODO: Check whether the trip count calculation is expensive. If L is the
991 // inner loop but we know it has a low trip count, calculating that trip
992 // count (in the parent loop) may be detrimental.
993
994 LLVMContext &C = L->getHeader()->getContext();
995 HWLoopInfo.CounterInReg = true;
Sam Parker9d284732019-06-13 08:28:46 +0000996 HWLoopInfo.IsNestingLegal = false;
Sam Parker98722692019-07-01 08:21:28 +0000997 HWLoopInfo.PerformEntryTest = true;
Sam Parker757ac022019-06-12 12:00:42 +0000998 HWLoopInfo.CountType = Type::getInt32Ty(C);
999 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1000 return true;
1001}
1002
Sam Parker19a08e42017-07-25 08:51:30 +00001003void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1004 TTI::UnrollingPreferences &UP) {
1005 // Only currently enable these preferences for M-Class cores.
Sam Parker84fd0c32017-08-16 07:42:44 +00001006 if (!ST->isMClass())
Sam Parker19a08e42017-07-25 08:51:30 +00001007 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
1008
1009 // Disable loop unrolling for Oz and Os.
1010 UP.OptSizeThreshold = 0;
1011 UP.PartialOptSizeThreshold = 0;
Evandro Menezes85bd3972019-04-04 22:40:06 +00001012 if (L->getHeader()->getParent()->hasOptSize())
Sam Parker487ab862017-10-23 08:05:14 +00001013 return;
1014
1015 // Only enable on Thumb-2 targets.
1016 if (!ST->isThumb2())
1017 return;
1018
1019 SmallVector<BasicBlock*, 4> ExitingBlocks;
1020 L->getExitingBlocks(ExitingBlocks);
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001021 LLVM_DEBUG(dbgs() << "Loop has:\n"
1022 << "Blocks: " << L->getNumBlocks() << "\n"
1023 << "Exit blocks: " << ExitingBlocks.size() << "\n");
Sam Parker487ab862017-10-23 08:05:14 +00001024
1025 // Only allow another exit other than the latch. This acts as an early exit
1026 // as it mirrors the profitability calculation of the runtime unroller.
1027 if (ExitingBlocks.size() > 2)
1028 return;
1029
1030 // Limit the CFG of the loop body for targets with a branch predictor.
1031 // Allowing 4 blocks permits if-then-else diamonds in the body.
1032 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
Sam Parker84fd0c32017-08-16 07:42:44 +00001033 return;
Sam Parker19a08e42017-07-25 08:51:30 +00001034
1035 // Scan the loop: don't unroll loops with calls as this could prevent
1036 // inlining.
Sam Parker84fd0c32017-08-16 07:42:44 +00001037 unsigned Cost = 0;
Sam Parker487ab862017-10-23 08:05:14 +00001038 for (auto *BB : L->getBlocks()) {
1039 for (auto &I : *BB) {
1040 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1041 ImmutableCallSite CS(&I);
1042 if (const Function *F = CS.getCalledFunction()) {
1043 if (!isLoweredToCall(F))
1044 continue;
1045 }
1046 return;
Sam Parker19a08e42017-07-25 08:51:30 +00001047 }
David Green11c46022019-08-11 08:53:18 +00001048 // Don't unroll vectorised loop. MVE does not benefit from it as much as
1049 // scalar code.
1050 if (I.getType()->isVectorTy())
1051 return;
1052
Sam Parker487ab862017-10-23 08:05:14 +00001053 SmallVector<const Value*, 4> Operands(I.value_op_begin(),
1054 I.value_op_end());
1055 Cost += getUserCost(&I, Operands);
Sam Parker19a08e42017-07-25 08:51:30 +00001056 }
1057 }
1058
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001059 LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
Sam Parker487ab862017-10-23 08:05:14 +00001060
Sam Parker19a08e42017-07-25 08:51:30 +00001061 UP.Partial = true;
1062 UP.Runtime = true;
David Greend847aa52019-06-10 10:22:14 +00001063 UP.UpperBound = true;
Sam Parker84fd0c32017-08-16 07:42:44 +00001064 UP.UnrollRemainder = true;
1065 UP.DefaultUnrollRuntimeCount = 4;
David Green963401d2018-07-01 12:47:30 +00001066 UP.UnrollAndJam = true;
1067 UP.UnrollAndJamInnerLoopThreshold = 60;
Sam Parker84fd0c32017-08-16 07:42:44 +00001068
1069 // Force unrolling small loops can be very useful because of the branch
1070 // taken cost of the backedge.
1071 if (Cost < 12)
1072 UP.Force = true;
Sam Parker19a08e42017-07-25 08:51:30 +00001073}
Sam Tebbsf312c1e2019-08-19 09:38:28 +00001074
1075bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
1076 TTI::ReductionFlags Flags) const {
1077 assert(isa<VectorType>(Ty) && "Expected Ty to be a vector type");
1078 unsigned ScalarBits = Ty->getScalarSizeInBits();
1079 if (!ST->hasMVEIntegerOps())
1080 return false;
1081
1082 switch (Opcode) {
1083 case Instruction::FAdd:
1084 case Instruction::FMul:
1085 case Instruction::And:
1086 case Instruction::Or:
1087 case Instruction::Xor:
1088 case Instruction::Mul:
Sam Tebbsf312c1e2019-08-19 09:38:28 +00001089 case Instruction::FCmp:
1090 return false;
Sam Tebbs1572b682019-09-13 09:11:46 +00001091 case Instruction::ICmp:
Sam Tebbsf312c1e2019-08-19 09:38:28 +00001092 case Instruction::Add:
Sam Tebbs1572b682019-09-13 09:11:46 +00001093 return ScalarBits < 64 && ScalarBits * Ty->getVectorNumElements() == 128;
Sam Tebbsf312c1e2019-08-19 09:38:28 +00001094 default:
1095 llvm_unreachable("Unhandled reduction opcode");
1096 }
1097 return false;
1098}