blob: fb5b08df5d3511ea20f163c627c437a4e1fdf4aa [file] [log] [blame]
Eugene Zelenko076468c2017-09-20 21:35:51 +00001//===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
Chandler Carruth664e3542013-01-07 01:37:14 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Chandler Carruth664e3542013-01-07 01:37:14 +00006//
7//===----------------------------------------------------------------------===//
Chandler Carruth664e3542013-01-07 01:37:14 +00008
Chandler Carruth93dcdc42015-01-31 11:17:59 +00009#include "ARMTargetTransformInfo.h"
Eugene Zelenko076468c2017-09-20 21:35:51 +000010#include "ARMSubtarget.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "llvm/ADT/APInt.h"
13#include "llvm/ADT/SmallVector.h"
14#include "llvm/Analysis/LoopInfo.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000015#include "llvm/CodeGen/CostTable.h"
Eugene Zelenko076468c2017-09-20 21:35:51 +000016#include "llvm/CodeGen/ISDOpcodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000017#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko076468c2017-09-20 21:35:51 +000018#include "llvm/IR/BasicBlock.h"
19#include "llvm/IR/CallSite.h"
20#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/DerivedTypes.h"
22#include "llvm/IR/Instruction.h"
23#include "llvm/IR/Instructions.h"
Sjoerd Meijerea31ddb2019-04-30 10:28:50 +000024#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko076468c2017-09-20 21:35:51 +000025#include "llvm/IR/Type.h"
26#include "llvm/MC/SubtargetFeature.h"
27#include "llvm/Support/Casting.h"
David Blaikie13e77db2018-03-23 23:58:25 +000028#include "llvm/Support/MachineValueType.h"
Eugene Zelenko076468c2017-09-20 21:35:51 +000029#include "llvm/Target/TargetMachine.h"
30#include <algorithm>
31#include <cassert>
32#include <cstdint>
33#include <utility>
34
Chandler Carruth664e3542013-01-07 01:37:14 +000035using namespace llvm;
36
Chandler Carruth84e68b22014-04-22 02:41:26 +000037#define DEBUG_TYPE "armtti"
38
Sam Parker757ac022019-06-12 12:00:42 +000039static cl::opt<bool> DisableLowOverheadLoops(
Sam Parkere3a4a132019-07-30 08:14:28 +000040 "disable-arm-loloops", cl::Hidden, cl::init(false),
Sam Parker757ac022019-06-12 12:00:42 +000041 cl::desc("Disable the generation of low-overhead loops"));
42
Florian Hahn4adcfcf2017-07-13 08:26:17 +000043bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
44 const Function *Callee) const {
45 const TargetMachine &TM = getTLI()->getTargetMachine();
46 const FeatureBitset &CallerBits =
47 TM.getSubtargetImpl(*Caller)->getFeatureBits();
48 const FeatureBitset &CalleeBits =
49 TM.getSubtargetImpl(*Callee)->getFeatureBits();
50
51 // To inline a callee, all features not in the whitelist must match exactly.
52 bool MatchExact = (CallerBits & ~InlineFeatureWhitelist) ==
53 (CalleeBits & ~InlineFeatureWhitelist);
54 // For features in the whitelist, the callee's features must be a subset of
55 // the callers'.
56 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeatureWhitelist) ==
57 (CalleeBits & InlineFeatureWhitelist);
58 return MatchExact && MatchSubset;
59}
60
Chandler Carruth93205eb2015-08-05 18:08:10 +000061int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
Chandler Carruth664e3542013-01-07 01:37:14 +000062 assert(Ty->isIntegerTy());
63
Tim Northover5c02f9a2016-04-13 23:08:27 +000064 unsigned Bits = Ty->getPrimitiveSizeInBits();
Weiming Zhao5410edd2016-06-28 22:30:45 +000065 if (Bits == 0 || Imm.getActiveBits() >= 64)
Tim Northover5c02f9a2016-04-13 23:08:27 +000066 return 4;
Chandler Carruth664e3542013-01-07 01:37:14 +000067
Tim Northover5c02f9a2016-04-13 23:08:27 +000068 int64_t SImmVal = Imm.getSExtValue();
69 uint64_t ZImmVal = Imm.getZExtValue();
Chandler Carruth664e3542013-01-07 01:37:14 +000070 if (!ST->isThumb()) {
71 if ((SImmVal >= 0 && SImmVal < 65536) ||
72 (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
73 (ARM_AM::getSOImmVal(~ZImmVal) != -1))
74 return 1;
75 return ST->hasV6T2Ops() ? 2 : 3;
Duncan P. N. Exon Smith429d2602014-03-08 15:15:42 +000076 }
77 if (ST->isThumb2()) {
Chandler Carruth664e3542013-01-07 01:37:14 +000078 if ((SImmVal >= 0 && SImmVal < 65536) ||
79 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
80 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
81 return 1;
82 return ST->hasV6T2Ops() ? 2 : 3;
Chandler Carruth664e3542013-01-07 01:37:14 +000083 }
Zhaoshi Zheng05b46dc2018-09-24 16:15:23 +000084 // Thumb1, any i8 imm cost 1.
85 if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
Duncan P. N. Exon Smith429d2602014-03-08 15:15:42 +000086 return 1;
James Molloy7c7255e2016-09-08 12:58:04 +000087 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
Duncan P. N. Exon Smith429d2602014-03-08 15:15:42 +000088 return 2;
89 // Load from constantpool.
90 return 3;
Chandler Carruth664e3542013-01-07 01:37:14 +000091}
Renato Golin5e9d55e2013-01-29 23:31:38 +000092
Sjoerd Meijer38c2cd02016-07-14 07:44:20 +000093// Constants smaller than 256 fit in the immediate field of
94// Thumb1 instructions so we return a zero cost and 1 otherwise.
95int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
96 const APInt &Imm, Type *Ty) {
97 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
98 return 0;
99
100 return 1;
101}
102
Tim Northover903f81b2016-04-15 18:17:18 +0000103int ARMTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
104 Type *Ty) {
105 // Division by a constant can be turned into multiplication, but only if we
106 // know it's constant. So it's not so much that the immediate is cheap (it's
107 // not), but that the alternative is worse.
108 // FIXME: this is probably unneeded with GlobalISel.
109 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
110 Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
111 Idx == 1)
112 return 0;
113
David Greenb4f36a22019-02-04 11:58:48 +0000114 if (Opcode == Instruction::And) {
115 // UXTB/UXTH
116 if (Imm == 255 || Imm == 65535)
117 return 0;
118 // Conversion to BIC is free, and means we can use ~Imm instead.
119 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(~Imm, Ty));
120 }
James Molloy753c18f2016-09-08 12:58:12 +0000121
James Molloy57d9dfa2016-09-09 13:35:36 +0000122 if (Opcode == Instruction::Add)
123 // Conversion to SUB is free, and means we can use -Imm instead.
124 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(-Imm, Ty));
125
James Molloy1454e902016-09-09 13:35:28 +0000126 if (Opcode == Instruction::ICmp && Imm.isNegative() &&
127 Ty->getIntegerBitWidth() == 32) {
128 int64_t NegImm = -Imm.getSExtValue();
129 if (ST->isThumb2() && NegImm < 1<<12)
130 // icmp X, #-C -> cmn X, #C
131 return 0;
132 if (ST->isThumb() && NegImm < 1<<8)
133 // icmp X, #-C -> adds X, #C
134 return 0;
135 }
136
David Green05647642018-02-20 11:07:35 +0000137 // xor a, -1 can always be folded to MVN
David Green01e0f252018-02-22 09:38:57 +0000138 if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
139 return 0;
David Green05647642018-02-20 11:07:35 +0000140
Tim Northover903f81b2016-04-15 18:17:18 +0000141 return getIntImmCost(Imm, Ty);
142}
143
Jonas Paulssonfccc7d62017-04-12 11:49:08 +0000144int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
145 const Instruction *I) {
Renato Golin5e9d55e2013-01-29 23:31:38 +0000146 int ISD = TLI->InstructionOpcodeToISD(Opcode);
147 assert(ISD && "Invalid opcode");
148
Arnold Schwaighoferf5284ff2013-03-15 15:10:47 +0000149 // Single to/from double precision conversions.
Craig Topper4b275762015-10-28 04:02:12 +0000150 static const CostTblEntry NEONFltDblTbl[] = {
Arnold Schwaighoferf5284ff2013-03-15 15:10:47 +0000151 // Vector fptrunc/fpext conversions.
152 { ISD::FP_ROUND, MVT::v2f64, 2 },
153 { ISD::FP_EXTEND, MVT::v2f32, 2 },
154 { ISD::FP_EXTEND, MVT::v4f32, 4 }
155 };
156
157 if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND ||
158 ISD == ISD::FP_EXTEND)) {
Chandler Carruth93205eb2015-08-05 18:08:10 +0000159 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
Craig Topperee0c8592015-10-27 04:14:24 +0000160 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
161 return LT.first * Entry->Cost;
Arnold Schwaighoferf5284ff2013-03-15 15:10:47 +0000162 }
163
Mehdi Amini44ede332015-07-09 02:09:04 +0000164 EVT SrcTy = TLI->getValueType(DL, Src);
165 EVT DstTy = TLI->getValueType(DL, Dst);
Renato Golin5e9d55e2013-01-29 23:31:38 +0000166
167 if (!SrcTy.isSimple() || !DstTy.isSimple())
Chandler Carruth705b1852015-01-31 03:43:40 +0000168 return BaseT::getCastInstrCost(Opcode, Dst, Src);
Renato Golin5e9d55e2013-01-29 23:31:38 +0000169
170 // Some arithmetic, load and store operations have specific instructions
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000171 // to cast up/down their types automatically at no extra cost.
172 // TODO: Get these tables to know at least what the related operations are.
Craig Topper4b275762015-10-28 04:02:12 +0000173 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
Renato Golin5e9d55e2013-01-29 23:31:38 +0000174 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
175 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
176 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
177 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
178 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
179 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000180
Renato Golin227eb6f2013-03-19 08:15:38 +0000181 // The number of vmovl instructions for the extension.
182 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
183 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
184 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
185 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
186 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
187 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
188 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
189 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
190 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
191 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
192
Jim Grosbach563983c2013-04-21 23:47:41 +0000193 // Operations that we legalize using splitting.
194 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
195 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
Arnold Schwaighofer90774f32013-03-12 21:19:22 +0000196
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000197 // Vector float <-> i32 conversions.
198 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
199 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
Arnold Schwaighoferae0052f2013-03-18 22:47:09 +0000200
201 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
202 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
203 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
204 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
205 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
206 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
207 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
208 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
209 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
210 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
211 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
212 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
213 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
214 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
215 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
216 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
217 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
218 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
219 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
220 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
221
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000222 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
223 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
Arnold Schwaighofer6c9c3a82013-03-18 22:47:06 +0000224 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
225 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
226 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
227 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000228
229 // Vector double <-> i32 conversions.
230 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
231 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
Arnold Schwaighoferae0052f2013-03-18 22:47:09 +0000232
233 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
234 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
235 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
236 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
237 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
238 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
239
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000240 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
Arnold Schwaighofer6c9c3a82013-03-18 22:47:06 +0000241 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
242 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 },
243 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 },
244 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 },
245 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 }
Renato Golin5e9d55e2013-01-29 23:31:38 +0000246 };
247
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000248 if (SrcTy.isVector() && ST->hasNEON()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000249 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
250 DstTy.getSimpleVT(),
251 SrcTy.getSimpleVT()))
252 return Entry->Cost;
Renato Golin5e9d55e2013-01-29 23:31:38 +0000253 }
254
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000255 // Scalar float to integer conversions.
Craig Topper4b275762015-10-28 04:02:12 +0000256 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000257 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 },
258 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 },
259 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 },
260 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 },
261 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 },
262 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 },
263 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 },
264 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 },
265 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 },
266 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 },
267 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 },
268 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 },
269 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 },
270 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 },
271 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 },
272 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 },
273 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 },
274 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 },
275 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 },
276 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 }
277 };
278 if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000279 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
280 DstTy.getSimpleVT(),
281 SrcTy.getSimpleVT()))
282 return Entry->Cost;
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000283 }
284
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000285 // Scalar integer to float conversions.
Craig Topper4b275762015-10-28 04:02:12 +0000286 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000287 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 },
288 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 },
289 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 },
290 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 },
291 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 },
292 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 },
293 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 },
294 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 },
295 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 },
296 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 },
297 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 },
298 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 },
299 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 },
300 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 },
301 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 },
302 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 },
303 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 },
304 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 },
305 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 },
306 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 }
307 };
308
309 if (SrcTy.isInteger() && ST->hasNEON()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000310 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
311 ISD, DstTy.getSimpleVT(),
312 SrcTy.getSimpleVT()))
313 return Entry->Cost;
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000314 }
315
316 // Scalar integer conversion costs.
Craig Topper4b275762015-10-28 04:02:12 +0000317 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000318 // i16 -> i64 requires two dependent operations.
319 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
320
321 // Truncates on i64 are assumed to be free.
322 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 },
323 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 },
324 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 },
325 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 }
326 };
327
328 if (SrcTy.isInteger()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000329 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
330 DstTy.getSimpleVT(),
331 SrcTy.getSimpleVT()))
332 return Entry->Cost;
Arnold Schwaighofera804bbe2013-02-05 14:05:55 +0000333 }
334
Chandler Carruth705b1852015-01-31 03:43:40 +0000335 return BaseT::getCastInstrCost(Opcode, Dst, Src);
Renato Golin5e9d55e2013-01-29 23:31:38 +0000336}
Arnold Schwaighofer98f10122013-02-04 02:52:05 +0000337
Chandler Carruth93205eb2015-08-05 18:08:10 +0000338int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
339 unsigned Index) {
Arnold Schwaighofer594fa2d2013-02-08 14:50:48 +0000340 // Penalize inserting into an D-subregister. We end up with a three times
341 // lower estimated throughput on swift.
Diana Picus4879b052016-07-06 09:22:23 +0000342 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
343 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
Arnold Schwaighofer594fa2d2013-02-08 14:50:48 +0000344 return 3;
Arnold Schwaighofer98f10122013-02-04 02:52:05 +0000345
James Molloya9f47b62014-09-12 13:29:40 +0000346 if ((Opcode == Instruction::InsertElement ||
Silviu Barangad5ac2692015-08-17 15:57:05 +0000347 Opcode == Instruction::ExtractElement)) {
348 // Cross-class copies are expensive on many microarchitectures,
349 // so assume they are expensive by default.
350 if (ValTy->getVectorElementType()->isIntegerTy())
351 return 3;
352
353 // Even if it's not a cross class copy, this likely leads to mixing
354 // of NEON and VFP code and should be therefore penalized.
355 if (ValTy->isVectorTy() &&
356 ValTy->getScalarSizeInBits() <= 32)
357 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
358 }
James Molloya9f47b62014-09-12 13:29:40 +0000359
Chandler Carruth705b1852015-01-31 03:43:40 +0000360 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
Arnold Schwaighofer98f10122013-02-04 02:52:05 +0000361}
Arnold Schwaighofer213fced2013-02-07 16:10:15 +0000362
Jonas Paulssonfccc7d62017-04-12 11:49:08 +0000363int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
364 const Instruction *I) {
Arnold Schwaighofer213fced2013-02-07 16:10:15 +0000365 int ISD = TLI->InstructionOpcodeToISD(Opcode);
Hiroshi Inoue7f9f92f2018-02-22 07:48:29 +0000366 // On NEON a vector select gets lowered to vbsl.
Arnold Schwaighofer213fced2013-02-07 16:10:15 +0000367 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
Arnold Schwaighofer8070b382013-03-14 19:17:02 +0000368 // Lowering of some vector selects is currently far from perfect.
Craig Topper4b275762015-10-28 04:02:12 +0000369 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
Arnold Schwaighofer8070b382013-03-14 19:17:02 +0000370 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
371 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
372 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
373 };
374
Mehdi Amini44ede332015-07-09 02:09:04 +0000375 EVT SelCondTy = TLI->getValueType(DL, CondTy);
376 EVT SelValTy = TLI->getValueType(DL, ValTy);
Renato Golin0178a252013-08-02 17:10:04 +0000377 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
Craig Topperee0c8592015-10-27 04:14:24 +0000378 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
379 SelCondTy.getSimpleVT(),
380 SelValTy.getSimpleVT()))
381 return Entry->Cost;
Renato Golin0178a252013-08-02 17:10:04 +0000382 }
Arnold Schwaighofer8070b382013-03-14 19:17:02 +0000383
Chandler Carruth93205eb2015-08-05 18:08:10 +0000384 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
Arnold Schwaighofer213fced2013-02-07 16:10:15 +0000385 return LT.first;
386 }
387
Jonas Paulssonfccc7d62017-04-12 11:49:08 +0000388 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
Arnold Schwaighofer213fced2013-02-07 16:10:15 +0000389}
Arnold Schwaighofer594fa2d2013-02-08 14:50:48 +0000390
Mohammed Agabaria23599ba2017-01-05 14:03:41 +0000391int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
392 const SCEV *Ptr) {
Arnold Schwaighoferda2b3112013-07-12 19:16:04 +0000393 // Address computations in vectorized code with non-consecutive addresses will
394 // likely result in more instructions compared to scalar code where the
395 // computation can more often be merged into the index mode. The resulting
396 // extra micro-ops can significantly decrease throughput.
397 unsigned NumVectorInstToHideOverhead = 10;
Mohammed Agabaria23599ba2017-01-05 14:03:41 +0000398 int MaxMergeDistance = 64;
Arnold Schwaighoferda2b3112013-07-12 19:16:04 +0000399
Fangrui Songf78650a2018-07-30 19:41:25 +0000400 if (Ty->isVectorTy() && SE &&
Mohammed Agabaria23599ba2017-01-05 14:03:41 +0000401 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
Arnold Schwaighoferda2b3112013-07-12 19:16:04 +0000402 return NumVectorInstToHideOverhead;
403
Arnold Schwaighofer594fa2d2013-02-08 14:50:48 +0000404 // In many cases the address computation is not merged into the instruction
405 // addressing mode.
406 return 1;
407}
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000408
Sjoerd Meijerea31ddb2019-04-30 10:28:50 +0000409int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
410 const MemCpyInst *MI = dyn_cast<MemCpyInst>(I);
411 assert(MI && "MemcpyInst expected");
412 ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength());
413
414 // To model the cost of a library call, we assume 1 for the call, and
415 // 3 for the argument setup.
416 const unsigned LibCallCost = 4;
417
418 // If 'size' is not a constant, a library call will be generated.
419 if (!C)
420 return LibCallCost;
421
422 const unsigned Size = C->getValue().getZExtValue();
423 const unsigned DstAlign = MI->getDestAlignment();
424 const unsigned SrcAlign = MI->getSourceAlignment();
425 const Function *F = I->getParent()->getParent();
426 const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
427 std::vector<EVT> MemOps;
428
429 // MemOps will be poplulated with a list of data types that needs to be
430 // loaded and stored. That's why we multiply the number of elements by 2 to
431 // get the cost for this memcpy.
432 if (getTLI()->findOptimalMemOpLowering(
433 MemOps, Limit, Size, DstAlign, SrcAlign, false /*IsMemset*/,
434 false /*ZeroMemset*/, false /*MemcpyStrSrc*/, false /*AllowOverlap*/,
435 MI->getDestAddressSpace(), MI->getSourceAddressSpace(),
436 F->getAttributes()))
437 return MemOps.size() * 2;
438
439 // If we can't find an optimal memop lowering, return the default cost
440 return LibCallCost;
441}
442
Chandler Carruth93205eb2015-08-05 18:08:10 +0000443int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
444 Type *SubTp) {
Simon Pilgrim071e8222018-10-25 10:52:36 +0000445 if (Kind == TTI::SK_Broadcast) {
446 static const CostTblEntry NEONDupTbl[] = {
447 // VDUP handles these cases.
448 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
449 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
450 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
451 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
452 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
453 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000454
Simon Pilgrim071e8222018-10-25 10:52:36 +0000455 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
456 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
457 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
458 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
459
460 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
461
462 if (const auto *Entry = CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE,
463 LT.second))
464 return LT.first * Entry->Cost;
465
466 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
467 }
Chandler Carruth705b1852015-01-31 03:43:40 +0000468 if (Kind == TTI::SK_Reverse) {
Craig Topper4b275762015-10-28 04:02:12 +0000469 static const CostTblEntry NEONShuffleTbl[] = {
Karthik Bhate03a25d2014-06-20 04:32:48 +0000470 // Reverse shuffle cost one instruction if we are shuffling within a
471 // double word (vrev) or two if we shuffle a quad word (vrev, vext).
472 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
473 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
474 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
475 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
Simon Pilgrim816e57b2018-10-23 09:42:10 +0000476 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
477 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000478
Karthik Bhate03a25d2014-06-20 04:32:48 +0000479 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
480 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
481 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
482 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000483
Chandler Carruth93205eb2015-08-05 18:08:10 +0000484 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000485
Craig Topperee0c8592015-10-27 04:14:24 +0000486 if (const auto *Entry = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE,
487 LT.second))
488 return LT.first * Entry->Cost;
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000489
Craig Topperee0c8592015-10-27 04:14:24 +0000490 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
Karthik Bhate03a25d2014-06-20 04:32:48 +0000491 }
Simon Pilgrime39fa6c2018-06-12 16:12:29 +0000492 if (Kind == TTI::SK_Select) {
493 static const CostTblEntry NEONSelShuffleTbl[] = {
494 // Select shuffle cost table for ARM. Cost is the number of instructions
Karthik Bhate03a25d2014-06-20 04:32:48 +0000495 // required to create the shuffled vector.
496
497 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
498 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
499 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
500 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
501
502 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
503 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
504 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
505
506 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
507
508 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
509
Chandler Carruth93205eb2015-08-05 18:08:10 +0000510 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
Simon Pilgrime39fa6c2018-06-12 16:12:29 +0000511 if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
Craig Topperee0c8592015-10-27 04:14:24 +0000512 ISD::VECTOR_SHUFFLE, LT.second))
513 return LT.first * Entry->Cost;
514 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
Karthik Bhate03a25d2014-06-20 04:32:48 +0000515 }
Chandler Carruth705b1852015-01-31 03:43:40 +0000516 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
Arnold Schwaighofer89aef932013-02-12 02:40:39 +0000517}
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000518
Chandler Carruth93205eb2015-08-05 18:08:10 +0000519int ARMTTIImpl::getArithmeticInstrCost(
Chandler Carruth705b1852015-01-31 03:43:40 +0000520 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
521 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
Mohammed Agabaria2c96c432017-01-11 08:23:37 +0000522 TTI::OperandValueProperties Opd2PropInfo,
523 ArrayRef<const Value *> Args) {
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000524 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
Chandler Carruth93205eb2015-08-05 18:08:10 +0000525 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000526
527 const unsigned FunctionCallDivCost = 20;
528 const unsigned ReciprocalDivCost = 10;
Craig Topper4b275762015-10-28 04:02:12 +0000529 static const CostTblEntry CostTbl[] = {
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000530 // Division.
531 // These costs are somewhat random. Choose a cost of 20 to indicate that
532 // vectorizing devision (added function call) is going to be very expensive.
533 // Double registers types.
534 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
535 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
536 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
537 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
538 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
539 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
540 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
541 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
542 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost},
543 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost},
544 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
545 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
546 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost},
547 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost},
548 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost},
549 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost},
550 // Quad register types.
551 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
552 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
553 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
554 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
555 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
556 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
557 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
558 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
559 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
560 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
561 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
562 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
563 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
564 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
565 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
566 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
567 // Multiplication.
568 };
569
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000570 if (ST->hasNEON())
Craig Topperee0c8592015-10-27 04:14:24 +0000571 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
572 return LT.first * Entry->Cost;
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000573
Chandler Carruth93205eb2015-08-05 18:08:10 +0000574 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
575 Opd1PropInfo, Opd2PropInfo);
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000576
Arnold Schwaighofer77af0f62013-10-29 01:33:53 +0000577 // This is somewhat of a hack. The problem that we are facing is that SROA
578 // creates a sequence of shift, and, or instructions to construct values.
579 // These sequences are recognized by the ISel and have zero-cost. Not so for
580 // the vectorized code. Because we have support for v2i64 but not i64 those
Alp Tokercb402912014-01-24 17:20:08 +0000581 // sequences look particularly beneficial to vectorize.
Arnold Schwaighofer77af0f62013-10-29 01:33:53 +0000582 // To work around this we increase the cost of v2i64 operations to make them
583 // seem less beneficial.
584 if (LT.second == MVT::v2i64 &&
585 Op2Info == TargetTransformInfo::OK_UniformConstantValue)
586 Cost += 4;
587
588 return Cost;
Arnold Schwaighofer9881dcf2013-04-25 21:16:18 +0000589}
590
Chandler Carruth93205eb2015-08-05 18:08:10 +0000591int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
Jonas Paulssonfccc7d62017-04-12 11:49:08 +0000592 unsigned AddressSpace, const Instruction *I) {
Chandler Carruth93205eb2015-08-05 18:08:10 +0000593 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
Arnold Schwaighofer89ae2172013-10-29 01:33:57 +0000594
595 if (Src->isVectorTy() && Alignment != 16 &&
596 Src->getVectorElementType()->isDoubleTy()) {
597 // Unaligned loads/stores are extremely inefficient.
598 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
599 return LT.first * 4;
600 }
601 return LT.first;
602}
Hao Liu2cd34bb2015-06-26 02:45:36 +0000603
Chandler Carruth93205eb2015-08-05 18:08:10 +0000604int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
605 unsigned Factor,
606 ArrayRef<unsigned> Indices,
607 unsigned Alignment,
Dorit Nuzman38bbf812018-10-14 08:50:06 +0000608 unsigned AddressSpace,
Dorit Nuzman34da6dd2018-10-31 09:57:56 +0000609 bool UseMaskForCond,
610 bool UseMaskForGaps) {
Hao Liu2cd34bb2015-06-26 02:45:36 +0000611 assert(Factor >= 2 && "Invalid interleave factor");
612 assert(isa<VectorType>(VecTy) && "Expect a vector type");
613
614 // vldN/vstN doesn't support vector types of i64/f64 element.
Ahmed Bougacha97564c32015-12-09 01:19:50 +0000615 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
Hao Liu2cd34bb2015-06-26 02:45:36 +0000616
Dorit Nuzman38bbf812018-10-14 08:50:06 +0000617 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
Dorit Nuzman34da6dd2018-10-31 09:57:56 +0000618 !UseMaskForCond && !UseMaskForGaps) {
Hao Liu2cd34bb2015-06-26 02:45:36 +0000619 unsigned NumElts = VecTy->getVectorNumElements();
Matthew Simpson1468d3e2017-04-10 18:34:37 +0000620 auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
Hao Liu2cd34bb2015-06-26 02:45:36 +0000621
622 // vldN/vstN only support legal vector types of size 64 or 128 in bits.
Matthew Simpsonaee97712017-03-02 15:15:35 +0000623 // Accesses having vector types that are a multiple of 128 bits can be
624 // matched to more than one vldN/vstN instruction.
Matthew Simpson1468d3e2017-04-10 18:34:37 +0000625 if (NumElts % Factor == 0 &&
626 TLI->isLegalInterleavedAccessType(SubVecTy, DL))
627 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
Hao Liu2cd34bb2015-06-26 02:45:36 +0000628 }
629
630 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
Dorit Nuzman34da6dd2018-10-31 09:57:56 +0000631 Alignment, AddressSpace,
632 UseMaskForCond, UseMaskForGaps);
Hao Liu2cd34bb2015-06-26 02:45:36 +0000633}
Sam Parker19a08e42017-07-25 08:51:30 +0000634
Sam Parker757ac022019-06-12 12:00:42 +0000635bool ARMTTIImpl::isLoweredToCall(const Function *F) {
636 if (!F->isIntrinsic())
637 BaseT::isLoweredToCall(F);
638
639 // Assume all Arm-specific intrinsics map to an instruction.
640 if (F->getName().startswith("llvm.arm"))
641 return false;
642
643 switch (F->getIntrinsicID()) {
644 default: break;
645 case Intrinsic::powi:
646 case Intrinsic::sin:
647 case Intrinsic::cos:
648 case Intrinsic::pow:
649 case Intrinsic::log:
650 case Intrinsic::log10:
651 case Intrinsic::log2:
652 case Intrinsic::exp:
653 case Intrinsic::exp2:
654 return true;
655 case Intrinsic::sqrt:
656 case Intrinsic::fabs:
657 case Intrinsic::copysign:
658 case Intrinsic::floor:
659 case Intrinsic::ceil:
660 case Intrinsic::trunc:
661 case Intrinsic::rint:
662 case Intrinsic::nearbyint:
663 case Intrinsic::round:
664 case Intrinsic::canonicalize:
665 case Intrinsic::lround:
666 case Intrinsic::llround:
667 case Intrinsic::lrint:
668 case Intrinsic::llrint:
669 if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
670 return true;
671 if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
672 return true;
673 // Some operations can be handled by vector instructions and assume
674 // unsupported vectors will be expanded into supported scalar ones.
675 // TODO Handle scalar operations properly.
676 return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
677 case Intrinsic::masked_store:
678 case Intrinsic::masked_load:
679 case Intrinsic::masked_gather:
680 case Intrinsic::masked_scatter:
681 return !ST->hasMVEIntegerOps();
682 case Intrinsic::sadd_with_overflow:
683 case Intrinsic::uadd_with_overflow:
684 case Intrinsic::ssub_with_overflow:
685 case Intrinsic::usub_with_overflow:
686 case Intrinsic::sadd_sat:
687 case Intrinsic::uadd_sat:
688 case Intrinsic::ssub_sat:
689 case Intrinsic::usub_sat:
690 return false;
691 }
692
693 return BaseT::isLoweredToCall(F);
694}
695
696bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
697 AssumptionCache &AC,
698 TargetLibraryInfo *LibInfo,
Chen Zhengc5b918d2019-06-19 01:26:31 +0000699 HardwareLoopInfo &HWLoopInfo) {
Sam Parker757ac022019-06-12 12:00:42 +0000700 // Low-overhead branches are only supported in the 'low-overhead branch'
701 // extension of v8.1-m.
702 if (!ST->hasLOB() || DisableLowOverheadLoops)
703 return false;
704
Sam Parker757ac022019-06-12 12:00:42 +0000705 if (!SE.hasLoopInvariantBackedgeTakenCount(L))
706 return false;
707
708 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
709 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
710 return false;
711
712 const SCEV *TripCountSCEV =
713 SE.getAddExpr(BackedgeTakenCount,
714 SE.getOne(BackedgeTakenCount->getType()));
715
716 // We need to store the trip count in LR, a 32-bit register.
717 if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32)
718 return false;
719
720 // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
721 // point in generating a hardware loop if that's going to happen.
722 auto MaybeCall = [this](Instruction &I) {
723 const ARMTargetLowering *TLI = getTLI();
724 unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
725 EVT VT = TLI->getValueType(DL, I.getType(), true);
726 if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
727 return true;
728
729 // Check if an intrinsic will be lowered to a call and assume that any
730 // other CallInst will generate a bl.
731 if (auto *Call = dyn_cast<CallInst>(&I)) {
732 if (isa<IntrinsicInst>(Call)) {
733 if (const Function *F = Call->getCalledFunction())
734 return isLoweredToCall(F);
735 }
736 return true;
737 }
738
739 // FPv5 provides conversions between integer, double-precision,
740 // single-precision, and half-precision formats.
741 switch (I.getOpcode()) {
742 default:
743 break;
744 case Instruction::FPToSI:
745 case Instruction::FPToUI:
746 case Instruction::SIToFP:
747 case Instruction::UIToFP:
748 case Instruction::FPTrunc:
749 case Instruction::FPExt:
750 return !ST->hasFPARMv8Base();
751 }
752
753 // FIXME: Unfortunately the approach of checking the Operation Action does
754 // not catch all cases of Legalization that use library calls. Our
755 // Legalization step categorizes some transformations into library calls as
756 // Custom, Expand or even Legal when doing type legalization. So for now
757 // we have to special case for instance the SDIV of 64bit integers and the
758 // use of floating point emulation.
759 if (VT.isInteger() && VT.getSizeInBits() >= 64) {
760 switch (ISD) {
761 default:
762 break;
763 case ISD::SDIV:
764 case ISD::UDIV:
765 case ISD::SREM:
766 case ISD::UREM:
767 case ISD::SDIVREM:
768 case ISD::UDIVREM:
769 return true;
770 }
771 }
772
773 // Assume all other non-float operations are supported.
774 if (!VT.isFloatingPoint())
775 return false;
776
777 // We'll need a library call to handle most floats when using soft.
778 if (TLI->useSoftFloat()) {
779 switch (I.getOpcode()) {
780 default:
781 return true;
782 case Instruction::Alloca:
783 case Instruction::Load:
784 case Instruction::Store:
785 case Instruction::Select:
786 case Instruction::PHI:
787 return false;
788 }
789 }
790
791 // We'll need a libcall to perform double precision operations on a single
792 // precision only FPU.
793 if (I.getType()->isDoubleTy() && !ST->hasFP64())
794 return true;
795
796 // Likewise for half precision arithmetic.
797 if (I.getType()->isHalfTy() && !ST->hasFullFP16())
798 return true;
799
800 return false;
801 };
802
Sam Parker9d284732019-06-13 08:28:46 +0000803 auto IsHardwareLoopIntrinsic = [](Instruction &I) {
804 if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
Sam Parker179e0fa2019-06-13 08:32:56 +0000805 switch (Call->getIntrinsicID()) {
Sam Parker9d284732019-06-13 08:28:46 +0000806 default:
807 break;
808 case Intrinsic::set_loop_iterations:
Sam Parker98722692019-07-01 08:21:28 +0000809 case Intrinsic::test_set_loop_iterations:
Sam Parker9d284732019-06-13 08:28:46 +0000810 case Intrinsic::loop_decrement:
811 case Intrinsic::loop_decrement_reg:
812 return true;
813 }
814 }
815 return false;
816 };
817
Sam Parker757ac022019-06-12 12:00:42 +0000818 // Scan the instructions to see if there's any that we know will turn into a
Sam Parker9d284732019-06-13 08:28:46 +0000819 // call or if this loop is already a low-overhead loop.
820 auto ScanLoop = [&](Loop *L) {
821 for (auto *BB : L->getBlocks()) {
822 for (auto &I : *BB) {
823 if (MaybeCall(I) || IsHardwareLoopIntrinsic(I))
824 return false;
825 }
826 }
827 return true;
828 };
829
830 // Visit inner loops.
831 for (auto Inner : *L)
832 if (!ScanLoop(Inner))
833 return false;
834
835 if (!ScanLoop(L))
836 return false;
Sam Parker757ac022019-06-12 12:00:42 +0000837
838 // TODO: Check whether the trip count calculation is expensive. If L is the
839 // inner loop but we know it has a low trip count, calculating that trip
840 // count (in the parent loop) may be detrimental.
841
842 LLVMContext &C = L->getHeader()->getContext();
843 HWLoopInfo.CounterInReg = true;
Sam Parker9d284732019-06-13 08:28:46 +0000844 HWLoopInfo.IsNestingLegal = false;
Sam Parker98722692019-07-01 08:21:28 +0000845 HWLoopInfo.PerformEntryTest = true;
Sam Parker757ac022019-06-12 12:00:42 +0000846 HWLoopInfo.CountType = Type::getInt32Ty(C);
847 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
848 return true;
849}
850
Sam Parker19a08e42017-07-25 08:51:30 +0000851void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
852 TTI::UnrollingPreferences &UP) {
853 // Only currently enable these preferences for M-Class cores.
Sam Parker84fd0c32017-08-16 07:42:44 +0000854 if (!ST->isMClass())
Sam Parker19a08e42017-07-25 08:51:30 +0000855 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
856
857 // Disable loop unrolling for Oz and Os.
858 UP.OptSizeThreshold = 0;
859 UP.PartialOptSizeThreshold = 0;
Evandro Menezes85bd3972019-04-04 22:40:06 +0000860 if (L->getHeader()->getParent()->hasOptSize())
Sam Parker487ab862017-10-23 08:05:14 +0000861 return;
862
863 // Only enable on Thumb-2 targets.
864 if (!ST->isThumb2())
865 return;
866
867 SmallVector<BasicBlock*, 4> ExitingBlocks;
868 L->getExitingBlocks(ExitingBlocks);
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000869 LLVM_DEBUG(dbgs() << "Loop has:\n"
870 << "Blocks: " << L->getNumBlocks() << "\n"
871 << "Exit blocks: " << ExitingBlocks.size() << "\n");
Sam Parker487ab862017-10-23 08:05:14 +0000872
873 // Only allow another exit other than the latch. This acts as an early exit
874 // as it mirrors the profitability calculation of the runtime unroller.
875 if (ExitingBlocks.size() > 2)
876 return;
877
878 // Limit the CFG of the loop body for targets with a branch predictor.
879 // Allowing 4 blocks permits if-then-else diamonds in the body.
880 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
Sam Parker84fd0c32017-08-16 07:42:44 +0000881 return;
Sam Parker19a08e42017-07-25 08:51:30 +0000882
883 // Scan the loop: don't unroll loops with calls as this could prevent
884 // inlining.
Sam Parker84fd0c32017-08-16 07:42:44 +0000885 unsigned Cost = 0;
Sam Parker487ab862017-10-23 08:05:14 +0000886 for (auto *BB : L->getBlocks()) {
887 for (auto &I : *BB) {
888 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
889 ImmutableCallSite CS(&I);
890 if (const Function *F = CS.getCalledFunction()) {
891 if (!isLoweredToCall(F))
892 continue;
893 }
894 return;
Sam Parker19a08e42017-07-25 08:51:30 +0000895 }
Sam Parker487ab862017-10-23 08:05:14 +0000896 SmallVector<const Value*, 4> Operands(I.value_op_begin(),
897 I.value_op_end());
898 Cost += getUserCost(&I, Operands);
Sam Parker19a08e42017-07-25 08:51:30 +0000899 }
900 }
901
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000902 LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
Sam Parker487ab862017-10-23 08:05:14 +0000903
Sam Parker19a08e42017-07-25 08:51:30 +0000904 UP.Partial = true;
905 UP.Runtime = true;
David Greend847aa52019-06-10 10:22:14 +0000906 UP.UpperBound = true;
Sam Parker84fd0c32017-08-16 07:42:44 +0000907 UP.UnrollRemainder = true;
908 UP.DefaultUnrollRuntimeCount = 4;
David Green963401d2018-07-01 12:47:30 +0000909 UP.UnrollAndJam = true;
910 UP.UnrollAndJamInnerLoopThreshold = 60;
Sam Parker84fd0c32017-08-16 07:42:44 +0000911
912 // Force unrolling small loops can be very useful because of the branch
913 // taken cost of the backedge.
914 if (Cost < 12)
915 UP.Force = true;
Sam Parker19a08e42017-07-25 08:51:30 +0000916}