Eugene Zelenko | 076468c | 2017-09-20 21:35:51 +0000 | [diff] [blame] | 1 | //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===// |
Chandler Carruth | 664e354 | 2013-01-07 01:37:14 +0000 | [diff] [blame] | 2 | // |
Chandler Carruth | 2946cd7 | 2019-01-19 08:50:56 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
Chandler Carruth | 664e354 | 2013-01-07 01:37:14 +0000 | [diff] [blame] | 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
Chandler Carruth | 664e354 | 2013-01-07 01:37:14 +0000 | [diff] [blame] | 8 | |
Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 9 | #include "ARMTargetTransformInfo.h" |
Eugene Zelenko | 076468c | 2017-09-20 21:35:51 +0000 | [diff] [blame] | 10 | #include "ARMSubtarget.h" |
| 11 | #include "MCTargetDesc/ARMAddressingModes.h" |
| 12 | #include "llvm/ADT/APInt.h" |
| 13 | #include "llvm/ADT/SmallVector.h" |
| 14 | #include "llvm/Analysis/LoopInfo.h" |
David Blaikie | b3bde2e | 2017-11-17 01:07:10 +0000 | [diff] [blame] | 15 | #include "llvm/CodeGen/CostTable.h" |
Eugene Zelenko | 076468c | 2017-09-20 21:35:51 +0000 | [diff] [blame] | 16 | #include "llvm/CodeGen/ISDOpcodes.h" |
Craig Topper | 2fa1436 | 2018-03-29 17:21:10 +0000 | [diff] [blame] | 17 | #include "llvm/CodeGen/ValueTypes.h" |
Eugene Zelenko | 076468c | 2017-09-20 21:35:51 +0000 | [diff] [blame] | 18 | #include "llvm/IR/BasicBlock.h" |
| 19 | #include "llvm/IR/CallSite.h" |
| 20 | #include "llvm/IR/DataLayout.h" |
| 21 | #include "llvm/IR/DerivedTypes.h" |
| 22 | #include "llvm/IR/Instruction.h" |
| 23 | #include "llvm/IR/Instructions.h" |
Sjoerd Meijer | ea31ddb | 2019-04-30 10:28:50 +0000 | [diff] [blame] | 24 | #include "llvm/IR/IntrinsicInst.h" |
Eugene Zelenko | 076468c | 2017-09-20 21:35:51 +0000 | [diff] [blame] | 25 | #include "llvm/IR/Type.h" |
| 26 | #include "llvm/MC/SubtargetFeature.h" |
| 27 | #include "llvm/Support/Casting.h" |
David Blaikie | 13e77db | 2018-03-23 23:58:25 +0000 | [diff] [blame] | 28 | #include "llvm/Support/MachineValueType.h" |
Eugene Zelenko | 076468c | 2017-09-20 21:35:51 +0000 | [diff] [blame] | 29 | #include "llvm/Target/TargetMachine.h" |
| 30 | #include <algorithm> |
| 31 | #include <cassert> |
| 32 | #include <cstdint> |
| 33 | #include <utility> |
| 34 | |
Chandler Carruth | 664e354 | 2013-01-07 01:37:14 +0000 | [diff] [blame] | 35 | using namespace llvm; |
| 36 | |
Chandler Carruth | 84e68b2 | 2014-04-22 02:41:26 +0000 | [diff] [blame] | 37 | #define DEBUG_TYPE "armtti" |
| 38 | |
Sam Parker | 757ac02 | 2019-06-12 12:00:42 +0000 | [diff] [blame] | 39 | static cl::opt<bool> DisableLowOverheadLoops( |
Sam Parker | e3a4a13 | 2019-07-30 08:14:28 +0000 | [diff] [blame] | 40 | "disable-arm-loloops", cl::Hidden, cl::init(false), |
Sam Parker | 757ac02 | 2019-06-12 12:00:42 +0000 | [diff] [blame] | 41 | cl::desc("Disable the generation of low-overhead loops")); |
| 42 | |
Florian Hahn | 4adcfcf | 2017-07-13 08:26:17 +0000 | [diff] [blame] | 43 | bool ARMTTIImpl::areInlineCompatible(const Function *Caller, |
| 44 | const Function *Callee) const { |
| 45 | const TargetMachine &TM = getTLI()->getTargetMachine(); |
| 46 | const FeatureBitset &CallerBits = |
| 47 | TM.getSubtargetImpl(*Caller)->getFeatureBits(); |
| 48 | const FeatureBitset &CalleeBits = |
| 49 | TM.getSubtargetImpl(*Callee)->getFeatureBits(); |
| 50 | |
| 51 | // To inline a callee, all features not in the whitelist must match exactly. |
| 52 | bool MatchExact = (CallerBits & ~InlineFeatureWhitelist) == |
| 53 | (CalleeBits & ~InlineFeatureWhitelist); |
| 54 | // For features in the whitelist, the callee's features must be a subset of |
| 55 | // the callers'. |
| 56 | bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeatureWhitelist) == |
| 57 | (CalleeBits & InlineFeatureWhitelist); |
| 58 | return MatchExact && MatchSubset; |
| 59 | } |
| 60 | |
Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 61 | int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { |
Chandler Carruth | 664e354 | 2013-01-07 01:37:14 +0000 | [diff] [blame] | 62 | assert(Ty->isIntegerTy()); |
| 63 | |
Tim Northover | 5c02f9a | 2016-04-13 23:08:27 +0000 | [diff] [blame] | 64 | unsigned Bits = Ty->getPrimitiveSizeInBits(); |
Weiming Zhao | 5410edd | 2016-06-28 22:30:45 +0000 | [diff] [blame] | 65 | if (Bits == 0 || Imm.getActiveBits() >= 64) |
Tim Northover | 5c02f9a | 2016-04-13 23:08:27 +0000 | [diff] [blame] | 66 | return 4; |
Chandler Carruth | 664e354 | 2013-01-07 01:37:14 +0000 | [diff] [blame] | 67 | |
Tim Northover | 5c02f9a | 2016-04-13 23:08:27 +0000 | [diff] [blame] | 68 | int64_t SImmVal = Imm.getSExtValue(); |
| 69 | uint64_t ZImmVal = Imm.getZExtValue(); |
Chandler Carruth | 664e354 | 2013-01-07 01:37:14 +0000 | [diff] [blame] | 70 | if (!ST->isThumb()) { |
| 71 | if ((SImmVal >= 0 && SImmVal < 65536) || |
| 72 | (ARM_AM::getSOImmVal(ZImmVal) != -1) || |
| 73 | (ARM_AM::getSOImmVal(~ZImmVal) != -1)) |
| 74 | return 1; |
| 75 | return ST->hasV6T2Ops() ? 2 : 3; |
Duncan P. N. Exon Smith | 429d260 | 2014-03-08 15:15:42 +0000 | [diff] [blame] | 76 | } |
| 77 | if (ST->isThumb2()) { |
Chandler Carruth | 664e354 | 2013-01-07 01:37:14 +0000 | [diff] [blame] | 78 | if ((SImmVal >= 0 && SImmVal < 65536) || |
| 79 | (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || |
| 80 | (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) |
| 81 | return 1; |
| 82 | return ST->hasV6T2Ops() ? 2 : 3; |
Chandler Carruth | 664e354 | 2013-01-07 01:37:14 +0000 | [diff] [blame] | 83 | } |
Zhaoshi Zheng | 05b46dc | 2018-09-24 16:15:23 +0000 | [diff] [blame] | 84 | // Thumb1, any i8 imm cost 1. |
| 85 | if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256)) |
Duncan P. N. Exon Smith | 429d260 | 2014-03-08 15:15:42 +0000 | [diff] [blame] | 86 | return 1; |
James Molloy | 7c7255e | 2016-09-08 12:58:04 +0000 | [diff] [blame] | 87 | if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) |
Duncan P. N. Exon Smith | 429d260 | 2014-03-08 15:15:42 +0000 | [diff] [blame] | 88 | return 2; |
| 89 | // Load from constantpool. |
| 90 | return 3; |
Chandler Carruth | 664e354 | 2013-01-07 01:37:14 +0000 | [diff] [blame] | 91 | } |
Renato Golin | 5e9d55e | 2013-01-29 23:31:38 +0000 | [diff] [blame] | 92 | |
Sjoerd Meijer | 38c2cd0 | 2016-07-14 07:44:20 +0000 | [diff] [blame] | 93 | // Constants smaller than 256 fit in the immediate field of |
| 94 | // Thumb1 instructions so we return a zero cost and 1 otherwise. |
| 95 | int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, |
| 96 | const APInt &Imm, Type *Ty) { |
| 97 | if (Imm.isNonNegative() && Imm.getLimitedValue() < 256) |
| 98 | return 0; |
| 99 | |
| 100 | return 1; |
| 101 | } |
| 102 | |
Tim Northover | 903f81b | 2016-04-15 18:17:18 +0000 | [diff] [blame] | 103 | int ARMTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, |
| 104 | Type *Ty) { |
| 105 | // Division by a constant can be turned into multiplication, but only if we |
| 106 | // know it's constant. So it's not so much that the immediate is cheap (it's |
| 107 | // not), but that the alternative is worse. |
| 108 | // FIXME: this is probably unneeded with GlobalISel. |
| 109 | if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv || |
| 110 | Opcode == Instruction::SRem || Opcode == Instruction::URem) && |
| 111 | Idx == 1) |
| 112 | return 0; |
| 113 | |
David Green | b4f36a2 | 2019-02-04 11:58:48 +0000 | [diff] [blame] | 114 | if (Opcode == Instruction::And) { |
| 115 | // UXTB/UXTH |
| 116 | if (Imm == 255 || Imm == 65535) |
| 117 | return 0; |
| 118 | // Conversion to BIC is free, and means we can use ~Imm instead. |
| 119 | return std::min(getIntImmCost(Imm, Ty), getIntImmCost(~Imm, Ty)); |
| 120 | } |
James Molloy | 753c18f | 2016-09-08 12:58:12 +0000 | [diff] [blame] | 121 | |
James Molloy | 57d9dfa | 2016-09-09 13:35:36 +0000 | [diff] [blame] | 122 | if (Opcode == Instruction::Add) |
| 123 | // Conversion to SUB is free, and means we can use -Imm instead. |
| 124 | return std::min(getIntImmCost(Imm, Ty), getIntImmCost(-Imm, Ty)); |
| 125 | |
James Molloy | 1454e90 | 2016-09-09 13:35:28 +0000 | [diff] [blame] | 126 | if (Opcode == Instruction::ICmp && Imm.isNegative() && |
| 127 | Ty->getIntegerBitWidth() == 32) { |
| 128 | int64_t NegImm = -Imm.getSExtValue(); |
| 129 | if (ST->isThumb2() && NegImm < 1<<12) |
| 130 | // icmp X, #-C -> cmn X, #C |
| 131 | return 0; |
| 132 | if (ST->isThumb() && NegImm < 1<<8) |
| 133 | // icmp X, #-C -> adds X, #C |
| 134 | return 0; |
| 135 | } |
| 136 | |
David Green | 0564764 | 2018-02-20 11:07:35 +0000 | [diff] [blame] | 137 | // xor a, -1 can always be folded to MVN |
David Green | 01e0f25 | 2018-02-22 09:38:57 +0000 | [diff] [blame] | 138 | if (Opcode == Instruction::Xor && Imm.isAllOnesValue()) |
| 139 | return 0; |
David Green | 0564764 | 2018-02-20 11:07:35 +0000 | [diff] [blame] | 140 | |
Tim Northover | 903f81b | 2016-04-15 18:17:18 +0000 | [diff] [blame] | 141 | return getIntImmCost(Imm, Ty); |
| 142 | } |
| 143 | |
Jonas Paulsson | fccc7d6 | 2017-04-12 11:49:08 +0000 | [diff] [blame] | 144 | int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, |
| 145 | const Instruction *I) { |
Renato Golin | 5e9d55e | 2013-01-29 23:31:38 +0000 | [diff] [blame] | 146 | int ISD = TLI->InstructionOpcodeToISD(Opcode); |
| 147 | assert(ISD && "Invalid opcode"); |
| 148 | |
Arnold Schwaighofer | f5284ff | 2013-03-15 15:10:47 +0000 | [diff] [blame] | 149 | // Single to/from double precision conversions. |
Craig Topper | 4b27576 | 2015-10-28 04:02:12 +0000 | [diff] [blame] | 150 | static const CostTblEntry NEONFltDblTbl[] = { |
Arnold Schwaighofer | f5284ff | 2013-03-15 15:10:47 +0000 | [diff] [blame] | 151 | // Vector fptrunc/fpext conversions. |
| 152 | { ISD::FP_ROUND, MVT::v2f64, 2 }, |
| 153 | { ISD::FP_EXTEND, MVT::v2f32, 2 }, |
| 154 | { ISD::FP_EXTEND, MVT::v4f32, 4 } |
| 155 | }; |
| 156 | |
| 157 | if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND || |
| 158 | ISD == ISD::FP_EXTEND)) { |
Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 159 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); |
Craig Topper | ee0c859 | 2015-10-27 04:14:24 +0000 | [diff] [blame] | 160 | if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second)) |
| 161 | return LT.first * Entry->Cost; |
Arnold Schwaighofer | f5284ff | 2013-03-15 15:10:47 +0000 | [diff] [blame] | 162 | } |
| 163 | |
Mehdi Amini | 44ede33 | 2015-07-09 02:09:04 +0000 | [diff] [blame] | 164 | EVT SrcTy = TLI->getValueType(DL, Src); |
| 165 | EVT DstTy = TLI->getValueType(DL, Dst); |
Renato Golin | 5e9d55e | 2013-01-29 23:31:38 +0000 | [diff] [blame] | 166 | |
| 167 | if (!SrcTy.isSimple() || !DstTy.isSimple()) |
Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 168 | return BaseT::getCastInstrCost(Opcode, Dst, Src); |
Renato Golin | 5e9d55e | 2013-01-29 23:31:38 +0000 | [diff] [blame] | 169 | |
David Green | 8687642 | 2019-08-12 17:39:56 +0000 | [diff] [blame] | 170 | // The extend of a load is free |
| 171 | if (I && isa<LoadInst>(I->getOperand(0))) { |
| 172 | static const TypeConversionCostTblEntry LoadConversionTbl[] = { |
| 173 | {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0}, |
| 174 | {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0}, |
| 175 | {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0}, |
| 176 | {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0}, |
| 177 | {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0}, |
| 178 | {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0}, |
| 179 | {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1}, |
| 180 | {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1}, |
| 181 | {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1}, |
| 182 | {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1}, |
| 183 | {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1}, |
| 184 | {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1}, |
| 185 | }; |
| 186 | if (const auto *Entry = ConvertCostTableLookup( |
| 187 | LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT())) |
| 188 | return Entry->Cost; |
David Green | b782e61 | 2019-08-16 15:13:37 +0000 | [diff] [blame] | 189 | |
| 190 | static const TypeConversionCostTblEntry MVELoadConversionTbl[] = { |
| 191 | {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0}, |
| 192 | {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0}, |
| 193 | {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0}, |
| 194 | {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0}, |
| 195 | {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0}, |
| 196 | {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0}, |
| 197 | }; |
| 198 | if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { |
| 199 | if (const auto *Entry = |
| 200 | ConvertCostTableLookup(MVELoadConversionTbl, ISD, |
| 201 | DstTy.getSimpleVT(), SrcTy.getSimpleVT())) |
| 202 | return Entry->Cost; |
| 203 | } |
David Green | 8687642 | 2019-08-12 17:39:56 +0000 | [diff] [blame] | 204 | } |
| 205 | |
Renato Golin | 5e9d55e | 2013-01-29 23:31:38 +0000 | [diff] [blame] | 206 | // Some arithmetic, load and store operations have specific instructions |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 207 | // to cast up/down their types automatically at no extra cost. |
| 208 | // TODO: Get these tables to know at least what the related operations are. |
Craig Topper | 4b27576 | 2015-10-28 04:02:12 +0000 | [diff] [blame] | 209 | static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = { |
Renato Golin | 5e9d55e | 2013-01-29 23:31:38 +0000 | [diff] [blame] | 210 | { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, |
| 211 | { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, |
| 212 | { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, |
| 213 | { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, |
| 214 | { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, |
| 215 | { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 216 | |
Renato Golin | 227eb6f | 2013-03-19 08:15:38 +0000 | [diff] [blame] | 217 | // The number of vmovl instructions for the extension. |
| 218 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, |
| 219 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, |
| 220 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, |
| 221 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, |
| 222 | { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, |
| 223 | { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, |
| 224 | { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, |
| 225 | { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, |
| 226 | { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, |
| 227 | { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, |
| 228 | |
Jim Grosbach | 563983c | 2013-04-21 23:47:41 +0000 | [diff] [blame] | 229 | // Operations that we legalize using splitting. |
| 230 | { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, |
| 231 | { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, |
Arnold Schwaighofer | 90774f3 | 2013-03-12 21:19:22 +0000 | [diff] [blame] | 232 | |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 233 | // Vector float <-> i32 conversions. |
| 234 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, |
| 235 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, |
Arnold Schwaighofer | ae0052f | 2013-03-18 22:47:09 +0000 | [diff] [blame] | 236 | |
| 237 | { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, |
| 238 | { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, |
| 239 | { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, |
| 240 | { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, |
| 241 | { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, |
| 242 | { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, |
| 243 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, |
| 244 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, |
| 245 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, |
| 246 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, |
| 247 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, |
| 248 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, |
| 249 | { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, |
| 250 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, |
| 251 | { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, |
| 252 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, |
| 253 | { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, |
| 254 | { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, |
| 255 | { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, |
| 256 | { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, |
| 257 | |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 258 | { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, |
| 259 | { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, |
Arnold Schwaighofer | 6c9c3a8 | 2013-03-18 22:47:06 +0000 | [diff] [blame] | 260 | { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, |
| 261 | { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, |
| 262 | { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, |
| 263 | { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 264 | |
| 265 | // Vector double <-> i32 conversions. |
| 266 | { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, |
| 267 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, |
Arnold Schwaighofer | ae0052f | 2013-03-18 22:47:09 +0000 | [diff] [blame] | 268 | |
| 269 | { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, |
| 270 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, |
| 271 | { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, |
| 272 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, |
| 273 | { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, |
| 274 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, |
| 275 | |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 276 | { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, |
Arnold Schwaighofer | 6c9c3a8 | 2013-03-18 22:47:06 +0000 | [diff] [blame] | 277 | { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, |
| 278 | { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, |
| 279 | { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, |
| 280 | { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 }, |
| 281 | { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 } |
Renato Golin | 5e9d55e | 2013-01-29 23:31:38 +0000 | [diff] [blame] | 282 | }; |
| 283 | |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 284 | if (SrcTy.isVector() && ST->hasNEON()) { |
Craig Topper | ee0c859 | 2015-10-27 04:14:24 +0000 | [diff] [blame] | 285 | if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD, |
| 286 | DstTy.getSimpleVT(), |
| 287 | SrcTy.getSimpleVT())) |
| 288 | return Entry->Cost; |
Renato Golin | 5e9d55e | 2013-01-29 23:31:38 +0000 | [diff] [blame] | 289 | } |
| 290 | |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 291 | // Scalar float to integer conversions. |
Craig Topper | 4b27576 | 2015-10-28 04:02:12 +0000 | [diff] [blame] | 292 | static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = { |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 293 | { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 }, |
| 294 | { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 }, |
| 295 | { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 }, |
| 296 | { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 }, |
| 297 | { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 }, |
| 298 | { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 }, |
| 299 | { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 }, |
| 300 | { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 }, |
| 301 | { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 }, |
| 302 | { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 }, |
| 303 | { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 }, |
| 304 | { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 }, |
| 305 | { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 }, |
| 306 | { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 }, |
| 307 | { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 }, |
| 308 | { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 }, |
| 309 | { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 }, |
| 310 | { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 }, |
| 311 | { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 }, |
| 312 | { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 } |
| 313 | }; |
| 314 | if (SrcTy.isFloatingPoint() && ST->hasNEON()) { |
Craig Topper | ee0c859 | 2015-10-27 04:14:24 +0000 | [diff] [blame] | 315 | if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD, |
| 316 | DstTy.getSimpleVT(), |
| 317 | SrcTy.getSimpleVT())) |
| 318 | return Entry->Cost; |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 319 | } |
| 320 | |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 321 | // Scalar integer to float conversions. |
Craig Topper | 4b27576 | 2015-10-28 04:02:12 +0000 | [diff] [blame] | 322 | static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = { |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 323 | { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 }, |
| 324 | { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 }, |
| 325 | { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 }, |
| 326 | { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 }, |
| 327 | { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 }, |
| 328 | { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 }, |
| 329 | { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 }, |
| 330 | { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 }, |
| 331 | { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 }, |
| 332 | { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 }, |
| 333 | { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 }, |
| 334 | { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 }, |
| 335 | { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 }, |
| 336 | { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 }, |
| 337 | { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 }, |
| 338 | { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 }, |
| 339 | { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 }, |
| 340 | { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 }, |
| 341 | { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 }, |
| 342 | { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 } |
| 343 | }; |
| 344 | |
| 345 | if (SrcTy.isInteger() && ST->hasNEON()) { |
Craig Topper | ee0c859 | 2015-10-27 04:14:24 +0000 | [diff] [blame] | 346 | if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl, |
| 347 | ISD, DstTy.getSimpleVT(), |
| 348 | SrcTy.getSimpleVT())) |
| 349 | return Entry->Cost; |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 350 | } |
| 351 | |
David Green | 2bfc13f | 2019-08-19 09:13:22 +0000 | [diff] [blame] | 352 | // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one |
| 353 | // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext |
| 354 | // are linearised so take more. |
| 355 | static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = { |
| 356 | { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, |
| 357 | { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, |
| 358 | { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, |
| 359 | { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, |
| 360 | { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 }, |
| 361 | { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 }, |
| 362 | { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, |
| 363 | { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, |
| 364 | { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 }, |
| 365 | { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, |
| 366 | { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 }, |
| 367 | { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 }, |
| 368 | }; |
| 369 | |
| 370 | if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { |
| 371 | if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl, |
| 372 | ISD, DstTy.getSimpleVT(), |
| 373 | SrcTy.getSimpleVT())) |
| 374 | return Entry->Cost * ST->getMVEVectorCostFactor(); |
| 375 | } |
| 376 | |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 377 | // Scalar integer conversion costs. |
Craig Topper | 4b27576 | 2015-10-28 04:02:12 +0000 | [diff] [blame] | 378 | static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = { |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 379 | // i16 -> i64 requires two dependent operations. |
| 380 | { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 }, |
| 381 | |
| 382 | // Truncates on i64 are assumed to be free. |
| 383 | { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 }, |
| 384 | { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 }, |
| 385 | { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 }, |
| 386 | { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 } |
| 387 | }; |
| 388 | |
| 389 | if (SrcTy.isInteger()) { |
Craig Topper | ee0c859 | 2015-10-27 04:14:24 +0000 | [diff] [blame] | 390 | if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD, |
| 391 | DstTy.getSimpleVT(), |
| 392 | SrcTy.getSimpleVT())) |
| 393 | return Entry->Cost; |
Arnold Schwaighofer | a804bbe | 2013-02-05 14:05:55 +0000 | [diff] [blame] | 394 | } |
| 395 | |
David Green | a655393 | 2019-08-13 18:12:08 +0000 | [diff] [blame] | 396 | int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy() |
| 397 | ? ST->getMVEVectorCostFactor() |
| 398 | : 1; |
| 399 | return BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src); |
Renato Golin | 5e9d55e | 2013-01-29 23:31:38 +0000 | [diff] [blame] | 400 | } |
Arnold Schwaighofer | 98f1012 | 2013-02-04 02:52:05 +0000 | [diff] [blame] | 401 | |
Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 402 | int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, |
| 403 | unsigned Index) { |
Arnold Schwaighofer | 594fa2d | 2013-02-08 14:50:48 +0000 | [diff] [blame] | 404 | // Penalize inserting into an D-subregister. We end up with a three times |
| 405 | // lower estimated throughput on swift. |
Diana Picus | 4879b05 | 2016-07-06 09:22:23 +0000 | [diff] [blame] | 406 | if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement && |
| 407 | ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32) |
Arnold Schwaighofer | 594fa2d | 2013-02-08 14:50:48 +0000 | [diff] [blame] | 408 | return 3; |
Arnold Schwaighofer | 98f1012 | 2013-02-04 02:52:05 +0000 | [diff] [blame] | 409 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 410 | if (ST->hasNEON() && (Opcode == Instruction::InsertElement || |
| 411 | Opcode == Instruction::ExtractElement)) { |
Silviu Baranga | d5ac269 | 2015-08-17 15:57:05 +0000 | [diff] [blame] | 412 | // Cross-class copies are expensive on many microarchitectures, |
| 413 | // so assume they are expensive by default. |
| 414 | if (ValTy->getVectorElementType()->isIntegerTy()) |
| 415 | return 3; |
| 416 | |
| 417 | // Even if it's not a cross class copy, this likely leads to mixing |
| 418 | // of NEON and VFP code and should be therefore penalized. |
| 419 | if (ValTy->isVectorTy() && |
| 420 | ValTy->getScalarSizeInBits() <= 32) |
| 421 | return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U); |
| 422 | } |
James Molloy | a9f47b6 | 2014-09-12 13:29:40 +0000 | [diff] [blame] | 423 | |
David Green | a655393 | 2019-08-13 18:12:08 +0000 | [diff] [blame] | 424 | if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement || |
| 425 | Opcode == Instruction::ExtractElement)) { |
| 426 | // We say MVE moves costs at least the MVEVectorCostFactor, even though |
| 427 | // they are scalar instructions. This helps prevent mixing scalar and |
| 428 | // vector, to prevent vectorising where we end up just scalarising the |
| 429 | // result anyway. |
| 430 | return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), |
| 431 | ST->getMVEVectorCostFactor()) * |
| 432 | ValTy->getVectorNumElements() / 2; |
| 433 | } |
| 434 | |
Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 435 | return BaseT::getVectorInstrCost(Opcode, ValTy, Index); |
Arnold Schwaighofer | 98f1012 | 2013-02-04 02:52:05 +0000 | [diff] [blame] | 436 | } |
Arnold Schwaighofer | 213fced | 2013-02-07 16:10:15 +0000 | [diff] [blame] | 437 | |
Jonas Paulsson | fccc7d6 | 2017-04-12 11:49:08 +0000 | [diff] [blame] | 438 | int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, |
| 439 | const Instruction *I) { |
Arnold Schwaighofer | 213fced | 2013-02-07 16:10:15 +0000 | [diff] [blame] | 440 | int ISD = TLI->InstructionOpcodeToISD(Opcode); |
Hiroshi Inoue | 7f9f92f | 2018-02-22 07:48:29 +0000 | [diff] [blame] | 441 | // On NEON a vector select gets lowered to vbsl. |
Arnold Schwaighofer | 213fced | 2013-02-07 16:10:15 +0000 | [diff] [blame] | 442 | if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) { |
Arnold Schwaighofer | 8070b38 | 2013-03-14 19:17:02 +0000 | [diff] [blame] | 443 | // Lowering of some vector selects is currently far from perfect. |
Craig Topper | 4b27576 | 2015-10-28 04:02:12 +0000 | [diff] [blame] | 444 | static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = { |
Arnold Schwaighofer | 8070b38 | 2013-03-14 19:17:02 +0000 | [diff] [blame] | 445 | { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 }, |
| 446 | { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, |
| 447 | { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 } |
| 448 | }; |
| 449 | |
Mehdi Amini | 44ede33 | 2015-07-09 02:09:04 +0000 | [diff] [blame] | 450 | EVT SelCondTy = TLI->getValueType(DL, CondTy); |
| 451 | EVT SelValTy = TLI->getValueType(DL, ValTy); |
Renato Golin | 0178a25 | 2013-08-02 17:10:04 +0000 | [diff] [blame] | 452 | if (SelCondTy.isSimple() && SelValTy.isSimple()) { |
Craig Topper | ee0c859 | 2015-10-27 04:14:24 +0000 | [diff] [blame] | 453 | if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD, |
| 454 | SelCondTy.getSimpleVT(), |
| 455 | SelValTy.getSimpleVT())) |
| 456 | return Entry->Cost; |
Renato Golin | 0178a25 | 2013-08-02 17:10:04 +0000 | [diff] [blame] | 457 | } |
Arnold Schwaighofer | 8070b38 | 2013-03-14 19:17:02 +0000 | [diff] [blame] | 458 | |
Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 459 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); |
Arnold Schwaighofer | 213fced | 2013-02-07 16:10:15 +0000 | [diff] [blame] | 460 | return LT.first; |
| 461 | } |
| 462 | |
David Green | a655393 | 2019-08-13 18:12:08 +0000 | [diff] [blame] | 463 | int BaseCost = ST->hasMVEIntegerOps() && ValTy->isVectorTy() |
| 464 | ? ST->getMVEVectorCostFactor() |
| 465 | : 1; |
| 466 | return BaseCost * BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); |
Arnold Schwaighofer | 213fced | 2013-02-07 16:10:15 +0000 | [diff] [blame] | 467 | } |
Arnold Schwaighofer | 594fa2d | 2013-02-08 14:50:48 +0000 | [diff] [blame] | 468 | |
Mohammed Agabaria | 23599ba | 2017-01-05 14:03:41 +0000 | [diff] [blame] | 469 | int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, |
| 470 | const SCEV *Ptr) { |
Arnold Schwaighofer | da2b311 | 2013-07-12 19:16:04 +0000 | [diff] [blame] | 471 | // Address computations in vectorized code with non-consecutive addresses will |
| 472 | // likely result in more instructions compared to scalar code where the |
| 473 | // computation can more often be merged into the index mode. The resulting |
| 474 | // extra micro-ops can significantly decrease throughput. |
| 475 | unsigned NumVectorInstToHideOverhead = 10; |
Mohammed Agabaria | 23599ba | 2017-01-05 14:03:41 +0000 | [diff] [blame] | 476 | int MaxMergeDistance = 64; |
Arnold Schwaighofer | da2b311 | 2013-07-12 19:16:04 +0000 | [diff] [blame] | 477 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 478 | if (ST->hasNEON()) { |
| 479 | if (Ty->isVectorTy() && SE && |
| 480 | !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) |
| 481 | return NumVectorInstToHideOverhead; |
Arnold Schwaighofer | da2b311 | 2013-07-12 19:16:04 +0000 | [diff] [blame] | 482 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 483 | // In many cases the address computation is not merged into the instruction |
| 484 | // addressing mode. |
| 485 | return 1; |
| 486 | } |
| 487 | return BaseT::getAddressComputationCost(Ty, SE, Ptr); |
Arnold Schwaighofer | 594fa2d | 2013-02-08 14:50:48 +0000 | [diff] [blame] | 488 | } |
Arnold Schwaighofer | 89aef93 | 2013-02-12 02:40:39 +0000 | [diff] [blame] | 489 | |
Sjoerd Meijer | ea31ddb | 2019-04-30 10:28:50 +0000 | [diff] [blame] | 490 | int ARMTTIImpl::getMemcpyCost(const Instruction *I) { |
| 491 | const MemCpyInst *MI = dyn_cast<MemCpyInst>(I); |
| 492 | assert(MI && "MemcpyInst expected"); |
| 493 | ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength()); |
| 494 | |
| 495 | // To model the cost of a library call, we assume 1 for the call, and |
| 496 | // 3 for the argument setup. |
| 497 | const unsigned LibCallCost = 4; |
| 498 | |
| 499 | // If 'size' is not a constant, a library call will be generated. |
| 500 | if (!C) |
| 501 | return LibCallCost; |
| 502 | |
| 503 | const unsigned Size = C->getValue().getZExtValue(); |
| 504 | const unsigned DstAlign = MI->getDestAlignment(); |
| 505 | const unsigned SrcAlign = MI->getSourceAlignment(); |
| 506 | const Function *F = I->getParent()->getParent(); |
| 507 | const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize()); |
| 508 | std::vector<EVT> MemOps; |
| 509 | |
| 510 | // MemOps will be poplulated with a list of data types that needs to be |
| 511 | // loaded and stored. That's why we multiply the number of elements by 2 to |
| 512 | // get the cost for this memcpy. |
| 513 | if (getTLI()->findOptimalMemOpLowering( |
| 514 | MemOps, Limit, Size, DstAlign, SrcAlign, false /*IsMemset*/, |
| 515 | false /*ZeroMemset*/, false /*MemcpyStrSrc*/, false /*AllowOverlap*/, |
| 516 | MI->getDestAddressSpace(), MI->getSourceAddressSpace(), |
| 517 | F->getAttributes())) |
| 518 | return MemOps.size() * 2; |
| 519 | |
| 520 | // If we can't find an optimal memop lowering, return the default cost |
| 521 | return LibCallCost; |
| 522 | } |
| 523 | |
Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 524 | int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, |
| 525 | Type *SubTp) { |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 526 | if (ST->hasNEON()) { |
| 527 | if (Kind == TTI::SK_Broadcast) { |
| 528 | static const CostTblEntry NEONDupTbl[] = { |
| 529 | // VDUP handles these cases. |
| 530 | {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, |
| 531 | {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, |
| 532 | {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, |
| 533 | {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, |
| 534 | {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1}, |
| 535 | {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1}, |
Arnold Schwaighofer | 89aef93 | 2013-02-12 02:40:39 +0000 | [diff] [blame] | 536 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 537 | {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, |
| 538 | {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, |
| 539 | {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, |
| 540 | {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}}; |
Simon Pilgrim | 071e822 | 2018-10-25 10:52:36 +0000 | [diff] [blame] | 541 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 542 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); |
Simon Pilgrim | 071e822 | 2018-10-25 10:52:36 +0000 | [diff] [blame] | 543 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 544 | if (const auto *Entry = |
| 545 | CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second)) |
| 546 | return LT.first * Entry->Cost; |
| 547 | } |
| 548 | if (Kind == TTI::SK_Reverse) { |
| 549 | static const CostTblEntry NEONShuffleTbl[] = { |
| 550 | // Reverse shuffle cost one instruction if we are shuffling within a |
| 551 | // double word (vrev) or two if we shuffle a quad word (vrev, vext). |
| 552 | {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, |
| 553 | {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, |
| 554 | {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, |
| 555 | {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, |
| 556 | {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1}, |
| 557 | {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1}, |
Simon Pilgrim | 071e822 | 2018-10-25 10:52:36 +0000 | [diff] [blame] | 558 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 559 | {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, |
| 560 | {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, |
| 561 | {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2}, |
| 562 | {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}}; |
Arnold Schwaighofer | 89aef93 | 2013-02-12 02:40:39 +0000 | [diff] [blame] | 563 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 564 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); |
Arnold Schwaighofer | 89aef93 | 2013-02-12 02:40:39 +0000 | [diff] [blame] | 565 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 566 | if (const auto *Entry = |
| 567 | CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second)) |
| 568 | return LT.first * Entry->Cost; |
| 569 | } |
| 570 | if (Kind == TTI::SK_Select) { |
| 571 | static const CostTblEntry NEONSelShuffleTbl[] = { |
| 572 | // Select shuffle cost table for ARM. Cost is the number of |
| 573 | // instructions |
| 574 | // required to create the shuffled vector. |
Arnold Schwaighofer | 89aef93 | 2013-02-12 02:40:39 +0000 | [diff] [blame] | 575 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 576 | {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, |
| 577 | {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, |
| 578 | {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, |
| 579 | {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, |
Arnold Schwaighofer | 89aef93 | 2013-02-12 02:40:39 +0000 | [diff] [blame] | 580 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 581 | {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, |
| 582 | {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, |
| 583 | {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2}, |
Karthik Bhat | e03a25d | 2014-06-20 04:32:48 +0000 | [diff] [blame] | 584 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 585 | {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16}, |
Karthik Bhat | e03a25d | 2014-06-20 04:32:48 +0000 | [diff] [blame] | 586 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 587 | {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}}; |
Karthik Bhat | e03a25d | 2014-06-20 04:32:48 +0000 | [diff] [blame] | 588 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 589 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); |
| 590 | if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl, |
| 591 | ISD::VECTOR_SHUFFLE, LT.second)) |
| 592 | return LT.first * Entry->Cost; |
| 593 | } |
Karthik Bhat | e03a25d | 2014-06-20 04:32:48 +0000 | [diff] [blame] | 594 | } |
David Green | 3e39f39 | 2019-08-12 16:54:07 +0000 | [diff] [blame] | 595 | if (ST->hasMVEIntegerOps()) { |
| 596 | if (Kind == TTI::SK_Broadcast) { |
| 597 | static const CostTblEntry MVEDupTbl[] = { |
| 598 | // VDUP handles these cases. |
| 599 | {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, |
| 600 | {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, |
| 601 | {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}, |
| 602 | {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, |
| 603 | {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}}; |
| 604 | |
| 605 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); |
| 606 | |
| 607 | if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE, |
| 608 | LT.second)) |
David Green | a655393 | 2019-08-13 18:12:08 +0000 | [diff] [blame] | 609 | return LT.first * Entry->Cost * ST->getMVEVectorCostFactor(); |
David Green | 3e39f39 | 2019-08-12 16:54:07 +0000 | [diff] [blame] | 610 | } |
| 611 | } |
David Green | a655393 | 2019-08-13 18:12:08 +0000 | [diff] [blame] | 612 | int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy() |
| 613 | ? ST->getMVEVectorCostFactor() |
| 614 | : 1; |
| 615 | return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp); |
Arnold Schwaighofer | 89aef93 | 2013-02-12 02:40:39 +0000 | [diff] [blame] | 616 | } |
Arnold Schwaighofer | 9881dcf | 2013-04-25 21:16:18 +0000 | [diff] [blame] | 617 | |
Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 618 | int ARMTTIImpl::getArithmeticInstrCost( |
Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 619 | unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, |
| 620 | TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, |
Mohammed Agabaria | 2c96c43 | 2017-01-11 08:23:37 +0000 | [diff] [blame] | 621 | TTI::OperandValueProperties Opd2PropInfo, |
| 622 | ArrayRef<const Value *> Args) { |
Arnold Schwaighofer | 9881dcf | 2013-04-25 21:16:18 +0000 | [diff] [blame] | 623 | int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); |
Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 624 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); |
Arnold Schwaighofer | 9881dcf | 2013-04-25 21:16:18 +0000 | [diff] [blame] | 625 | |
| 626 | const unsigned FunctionCallDivCost = 20; |
| 627 | const unsigned ReciprocalDivCost = 10; |
Craig Topper | 4b27576 | 2015-10-28 04:02:12 +0000 | [diff] [blame] | 628 | static const CostTblEntry CostTbl[] = { |
Arnold Schwaighofer | 9881dcf | 2013-04-25 21:16:18 +0000 | [diff] [blame] | 629 | // Division. |
| 630 | // These costs are somewhat random. Choose a cost of 20 to indicate that |
| 631 | // vectorizing devision (added function call) is going to be very expensive. |
| 632 | // Double registers types. |
| 633 | { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost}, |
| 634 | { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost}, |
| 635 | { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost}, |
| 636 | { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost}, |
| 637 | { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost}, |
| 638 | { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost}, |
| 639 | { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost}, |
| 640 | { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost}, |
| 641 | { ISD::SDIV, MVT::v4i16, ReciprocalDivCost}, |
| 642 | { ISD::UDIV, MVT::v4i16, ReciprocalDivCost}, |
| 643 | { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost}, |
| 644 | { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost}, |
| 645 | { ISD::SDIV, MVT::v8i8, ReciprocalDivCost}, |
| 646 | { ISD::UDIV, MVT::v8i8, ReciprocalDivCost}, |
| 647 | { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost}, |
| 648 | { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost}, |
| 649 | // Quad register types. |
| 650 | { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, |
| 651 | { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, |
| 652 | { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, |
| 653 | { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, |
| 654 | { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost}, |
| 655 | { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost}, |
| 656 | { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost}, |
| 657 | { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost}, |
| 658 | { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, |
| 659 | { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, |
| 660 | { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost}, |
| 661 | { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost}, |
| 662 | { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, |
| 663 | { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, |
| 664 | { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, |
| 665 | { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, |
| 666 | // Multiplication. |
| 667 | }; |
| 668 | |
David Green | a655393 | 2019-08-13 18:12:08 +0000 | [diff] [blame] | 669 | if (ST->hasNEON()) { |
Craig Topper | ee0c859 | 2015-10-27 04:14:24 +0000 | [diff] [blame] | 670 | if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second)) |
| 671 | return LT.first * Entry->Cost; |
Arnold Schwaighofer | 9881dcf | 2013-04-25 21:16:18 +0000 | [diff] [blame] | 672 | |
David Green | a655393 | 2019-08-13 18:12:08 +0000 | [diff] [blame] | 673 | int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, |
| 674 | Opd1PropInfo, Opd2PropInfo); |
Arnold Schwaighofer | 9881dcf | 2013-04-25 21:16:18 +0000 | [diff] [blame] | 675 | |
David Green | a655393 | 2019-08-13 18:12:08 +0000 | [diff] [blame] | 676 | // This is somewhat of a hack. The problem that we are facing is that SROA |
| 677 | // creates a sequence of shift, and, or instructions to construct values. |
| 678 | // These sequences are recognized by the ISel and have zero-cost. Not so for |
| 679 | // the vectorized code. Because we have support for v2i64 but not i64 those |
| 680 | // sequences look particularly beneficial to vectorize. |
| 681 | // To work around this we increase the cost of v2i64 operations to make them |
| 682 | // seem less beneficial. |
| 683 | if (LT.second == MVT::v2i64 && |
| 684 | Op2Info == TargetTransformInfo::OK_UniformConstantValue) |
| 685 | Cost += 4; |
Arnold Schwaighofer | 77af0f6 | 2013-10-29 01:33:53 +0000 | [diff] [blame] | 686 | |
David Green | a655393 | 2019-08-13 18:12:08 +0000 | [diff] [blame] | 687 | return Cost; |
| 688 | } |
| 689 | |
| 690 | int BaseCost = ST->hasMVEIntegerOps() && Ty->isVectorTy() |
| 691 | ? ST->getMVEVectorCostFactor() |
| 692 | : 1; |
| 693 | |
| 694 | // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost, |
| 695 | // without treating floats as more expensive that scalars or increasing the |
| 696 | // costs for custom operations. The results is also multiplied by the |
| 697 | // MVEVectorCostFactor where appropriate. |
| 698 | if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second)) |
| 699 | return LT.first * BaseCost; |
| 700 | |
| 701 | // Else this is expand, assume that we need to scalarize this op. |
| 702 | if (Ty->isVectorTy()) { |
| 703 | unsigned Num = Ty->getVectorNumElements(); |
| 704 | unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType()); |
| 705 | // Return the cost of multiple scalar invocation plus the cost of |
| 706 | // inserting and extracting the values. |
| 707 | return BaseT::getScalarizationOverhead(Ty, Args) + Num * Cost; |
| 708 | } |
| 709 | |
| 710 | return BaseCost; |
Arnold Schwaighofer | 9881dcf | 2013-04-25 21:16:18 +0000 | [diff] [blame] | 711 | } |
| 712 | |
Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 713 | int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, |
Jonas Paulsson | fccc7d6 | 2017-04-12 11:49:08 +0000 | [diff] [blame] | 714 | unsigned AddressSpace, const Instruction *I) { |
Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 715 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); |
Arnold Schwaighofer | 89ae217 | 2013-10-29 01:33:57 +0000 | [diff] [blame] | 716 | |
David Green | 83bbfaa | 2019-08-12 15:59:52 +0000 | [diff] [blame] | 717 | if (ST->hasNEON() && Src->isVectorTy() && Alignment != 16 && |
Arnold Schwaighofer | 89ae217 | 2013-10-29 01:33:57 +0000 | [diff] [blame] | 718 | Src->getVectorElementType()->isDoubleTy()) { |
| 719 | // Unaligned loads/stores are extremely inefficient. |
| 720 | // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr. |
| 721 | return LT.first * 4; |
| 722 | } |
David Green | a655393 | 2019-08-13 18:12:08 +0000 | [diff] [blame] | 723 | int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy() |
| 724 | ? ST->getMVEVectorCostFactor() |
| 725 | : 1; |
| 726 | return BaseCost * LT.first; |
Arnold Schwaighofer | 89ae217 | 2013-10-29 01:33:57 +0000 | [diff] [blame] | 727 | } |
Hao Liu | 2cd34bb | 2015-06-26 02:45:36 +0000 | [diff] [blame] | 728 | |
Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 729 | int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, |
| 730 | unsigned Factor, |
| 731 | ArrayRef<unsigned> Indices, |
| 732 | unsigned Alignment, |
Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 733 | unsigned AddressSpace, |
Dorit Nuzman | 34da6dd | 2018-10-31 09:57:56 +0000 | [diff] [blame] | 734 | bool UseMaskForCond, |
| 735 | bool UseMaskForGaps) { |
Hao Liu | 2cd34bb | 2015-06-26 02:45:36 +0000 | [diff] [blame] | 736 | assert(Factor >= 2 && "Invalid interleave factor"); |
| 737 | assert(isa<VectorType>(VecTy) && "Expect a vector type"); |
| 738 | |
| 739 | // vldN/vstN doesn't support vector types of i64/f64 element. |
Ahmed Bougacha | 97564c3 | 2015-12-09 01:19:50 +0000 | [diff] [blame] | 740 | bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64; |
Hao Liu | 2cd34bb | 2015-06-26 02:45:36 +0000 | [diff] [blame] | 741 | |
Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 742 | if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits && |
Dorit Nuzman | 34da6dd | 2018-10-31 09:57:56 +0000 | [diff] [blame] | 743 | !UseMaskForCond && !UseMaskForGaps) { |
Hao Liu | 2cd34bb | 2015-06-26 02:45:36 +0000 | [diff] [blame] | 744 | unsigned NumElts = VecTy->getVectorNumElements(); |
Matthew Simpson | 1468d3e | 2017-04-10 18:34:37 +0000 | [diff] [blame] | 745 | auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); |
Hao Liu | 2cd34bb | 2015-06-26 02:45:36 +0000 | [diff] [blame] | 746 | |
| 747 | // vldN/vstN only support legal vector types of size 64 or 128 in bits. |
Matthew Simpson | aee9771 | 2017-03-02 15:15:35 +0000 | [diff] [blame] | 748 | // Accesses having vector types that are a multiple of 128 bits can be |
| 749 | // matched to more than one vldN/vstN instruction. |
Matthew Simpson | 1468d3e | 2017-04-10 18:34:37 +0000 | [diff] [blame] | 750 | if (NumElts % Factor == 0 && |
| 751 | TLI->isLegalInterleavedAccessType(SubVecTy, DL)) |
| 752 | return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL); |
Hao Liu | 2cd34bb | 2015-06-26 02:45:36 +0000 | [diff] [blame] | 753 | } |
| 754 | |
| 755 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, |
Dorit Nuzman | 34da6dd | 2018-10-31 09:57:56 +0000 | [diff] [blame] | 756 | Alignment, AddressSpace, |
| 757 | UseMaskForCond, UseMaskForGaps); |
Hao Liu | 2cd34bb | 2015-06-26 02:45:36 +0000 | [diff] [blame] | 758 | } |
Sam Parker | 19a08e4 | 2017-07-25 08:51:30 +0000 | [diff] [blame] | 759 | |
Sam Parker | 757ac02 | 2019-06-12 12:00:42 +0000 | [diff] [blame] | 760 | bool ARMTTIImpl::isLoweredToCall(const Function *F) { |
| 761 | if (!F->isIntrinsic()) |
| 762 | BaseT::isLoweredToCall(F); |
| 763 | |
| 764 | // Assume all Arm-specific intrinsics map to an instruction. |
| 765 | if (F->getName().startswith("llvm.arm")) |
| 766 | return false; |
| 767 | |
| 768 | switch (F->getIntrinsicID()) { |
| 769 | default: break; |
| 770 | case Intrinsic::powi: |
| 771 | case Intrinsic::sin: |
| 772 | case Intrinsic::cos: |
| 773 | case Intrinsic::pow: |
| 774 | case Intrinsic::log: |
| 775 | case Intrinsic::log10: |
| 776 | case Intrinsic::log2: |
| 777 | case Intrinsic::exp: |
| 778 | case Intrinsic::exp2: |
| 779 | return true; |
| 780 | case Intrinsic::sqrt: |
| 781 | case Intrinsic::fabs: |
| 782 | case Intrinsic::copysign: |
| 783 | case Intrinsic::floor: |
| 784 | case Intrinsic::ceil: |
| 785 | case Intrinsic::trunc: |
| 786 | case Intrinsic::rint: |
| 787 | case Intrinsic::nearbyint: |
| 788 | case Intrinsic::round: |
| 789 | case Intrinsic::canonicalize: |
| 790 | case Intrinsic::lround: |
| 791 | case Intrinsic::llround: |
| 792 | case Intrinsic::lrint: |
| 793 | case Intrinsic::llrint: |
| 794 | if (F->getReturnType()->isDoubleTy() && !ST->hasFP64()) |
| 795 | return true; |
| 796 | if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16()) |
| 797 | return true; |
| 798 | // Some operations can be handled by vector instructions and assume |
| 799 | // unsupported vectors will be expanded into supported scalar ones. |
| 800 | // TODO Handle scalar operations properly. |
| 801 | return !ST->hasFPARMv8Base() && !ST->hasVFP2Base(); |
| 802 | case Intrinsic::masked_store: |
| 803 | case Intrinsic::masked_load: |
| 804 | case Intrinsic::masked_gather: |
| 805 | case Intrinsic::masked_scatter: |
| 806 | return !ST->hasMVEIntegerOps(); |
| 807 | case Intrinsic::sadd_with_overflow: |
| 808 | case Intrinsic::uadd_with_overflow: |
| 809 | case Intrinsic::ssub_with_overflow: |
| 810 | case Intrinsic::usub_with_overflow: |
| 811 | case Intrinsic::sadd_sat: |
| 812 | case Intrinsic::uadd_sat: |
| 813 | case Intrinsic::ssub_sat: |
| 814 | case Intrinsic::usub_sat: |
| 815 | return false; |
| 816 | } |
| 817 | |
| 818 | return BaseT::isLoweredToCall(F); |
| 819 | } |
| 820 | |
| 821 | bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, |
| 822 | AssumptionCache &AC, |
| 823 | TargetLibraryInfo *LibInfo, |
Chen Zheng | c5b918d | 2019-06-19 01:26:31 +0000 | [diff] [blame] | 824 | HardwareLoopInfo &HWLoopInfo) { |
Sam Parker | 757ac02 | 2019-06-12 12:00:42 +0000 | [diff] [blame] | 825 | // Low-overhead branches are only supported in the 'low-overhead branch' |
| 826 | // extension of v8.1-m. |
| 827 | if (!ST->hasLOB() || DisableLowOverheadLoops) |
| 828 | return false; |
| 829 | |
Sam Parker | 757ac02 | 2019-06-12 12:00:42 +0000 | [diff] [blame] | 830 | if (!SE.hasLoopInvariantBackedgeTakenCount(L)) |
| 831 | return false; |
| 832 | |
| 833 | const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); |
| 834 | if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) |
| 835 | return false; |
| 836 | |
| 837 | const SCEV *TripCountSCEV = |
| 838 | SE.getAddExpr(BackedgeTakenCount, |
| 839 | SE.getOne(BackedgeTakenCount->getType())); |
| 840 | |
| 841 | // We need to store the trip count in LR, a 32-bit register. |
| 842 | if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) |
| 843 | return false; |
| 844 | |
| 845 | // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little |
| 846 | // point in generating a hardware loop if that's going to happen. |
| 847 | auto MaybeCall = [this](Instruction &I) { |
| 848 | const ARMTargetLowering *TLI = getTLI(); |
| 849 | unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode()); |
| 850 | EVT VT = TLI->getValueType(DL, I.getType(), true); |
| 851 | if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall) |
| 852 | return true; |
| 853 | |
| 854 | // Check if an intrinsic will be lowered to a call and assume that any |
| 855 | // other CallInst will generate a bl. |
| 856 | if (auto *Call = dyn_cast<CallInst>(&I)) { |
| 857 | if (isa<IntrinsicInst>(Call)) { |
| 858 | if (const Function *F = Call->getCalledFunction()) |
| 859 | return isLoweredToCall(F); |
| 860 | } |
| 861 | return true; |
| 862 | } |
| 863 | |
| 864 | // FPv5 provides conversions between integer, double-precision, |
| 865 | // single-precision, and half-precision formats. |
| 866 | switch (I.getOpcode()) { |
| 867 | default: |
| 868 | break; |
| 869 | case Instruction::FPToSI: |
| 870 | case Instruction::FPToUI: |
| 871 | case Instruction::SIToFP: |
| 872 | case Instruction::UIToFP: |
| 873 | case Instruction::FPTrunc: |
| 874 | case Instruction::FPExt: |
| 875 | return !ST->hasFPARMv8Base(); |
| 876 | } |
| 877 | |
| 878 | // FIXME: Unfortunately the approach of checking the Operation Action does |
| 879 | // not catch all cases of Legalization that use library calls. Our |
| 880 | // Legalization step categorizes some transformations into library calls as |
| 881 | // Custom, Expand or even Legal when doing type legalization. So for now |
| 882 | // we have to special case for instance the SDIV of 64bit integers and the |
| 883 | // use of floating point emulation. |
| 884 | if (VT.isInteger() && VT.getSizeInBits() >= 64) { |
| 885 | switch (ISD) { |
| 886 | default: |
| 887 | break; |
| 888 | case ISD::SDIV: |
| 889 | case ISD::UDIV: |
| 890 | case ISD::SREM: |
| 891 | case ISD::UREM: |
| 892 | case ISD::SDIVREM: |
| 893 | case ISD::UDIVREM: |
| 894 | return true; |
| 895 | } |
| 896 | } |
| 897 | |
| 898 | // Assume all other non-float operations are supported. |
| 899 | if (!VT.isFloatingPoint()) |
| 900 | return false; |
| 901 | |
| 902 | // We'll need a library call to handle most floats when using soft. |
| 903 | if (TLI->useSoftFloat()) { |
| 904 | switch (I.getOpcode()) { |
| 905 | default: |
| 906 | return true; |
| 907 | case Instruction::Alloca: |
| 908 | case Instruction::Load: |
| 909 | case Instruction::Store: |
| 910 | case Instruction::Select: |
| 911 | case Instruction::PHI: |
| 912 | return false; |
| 913 | } |
| 914 | } |
| 915 | |
| 916 | // We'll need a libcall to perform double precision operations on a single |
| 917 | // precision only FPU. |
| 918 | if (I.getType()->isDoubleTy() && !ST->hasFP64()) |
| 919 | return true; |
| 920 | |
| 921 | // Likewise for half precision arithmetic. |
| 922 | if (I.getType()->isHalfTy() && !ST->hasFullFP16()) |
| 923 | return true; |
| 924 | |
| 925 | return false; |
| 926 | }; |
| 927 | |
Sam Parker | 9d28473 | 2019-06-13 08:28:46 +0000 | [diff] [blame] | 928 | auto IsHardwareLoopIntrinsic = [](Instruction &I) { |
| 929 | if (auto *Call = dyn_cast<IntrinsicInst>(&I)) { |
Sam Parker | 179e0fa | 2019-06-13 08:32:56 +0000 | [diff] [blame] | 930 | switch (Call->getIntrinsicID()) { |
Sam Parker | 9d28473 | 2019-06-13 08:28:46 +0000 | [diff] [blame] | 931 | default: |
| 932 | break; |
| 933 | case Intrinsic::set_loop_iterations: |
Sam Parker | 9872269 | 2019-07-01 08:21:28 +0000 | [diff] [blame] | 934 | case Intrinsic::test_set_loop_iterations: |
Sam Parker | 9d28473 | 2019-06-13 08:28:46 +0000 | [diff] [blame] | 935 | case Intrinsic::loop_decrement: |
| 936 | case Intrinsic::loop_decrement_reg: |
| 937 | return true; |
| 938 | } |
| 939 | } |
| 940 | return false; |
| 941 | }; |
| 942 | |
Sam Parker | 757ac02 | 2019-06-12 12:00:42 +0000 | [diff] [blame] | 943 | // Scan the instructions to see if there's any that we know will turn into a |
Sam Parker | 9d28473 | 2019-06-13 08:28:46 +0000 | [diff] [blame] | 944 | // call or if this loop is already a low-overhead loop. |
| 945 | auto ScanLoop = [&](Loop *L) { |
| 946 | for (auto *BB : L->getBlocks()) { |
| 947 | for (auto &I : *BB) { |
| 948 | if (MaybeCall(I) || IsHardwareLoopIntrinsic(I)) |
| 949 | return false; |
| 950 | } |
| 951 | } |
| 952 | return true; |
| 953 | }; |
| 954 | |
| 955 | // Visit inner loops. |
| 956 | for (auto Inner : *L) |
| 957 | if (!ScanLoop(Inner)) |
| 958 | return false; |
| 959 | |
| 960 | if (!ScanLoop(L)) |
| 961 | return false; |
Sam Parker | 757ac02 | 2019-06-12 12:00:42 +0000 | [diff] [blame] | 962 | |
| 963 | // TODO: Check whether the trip count calculation is expensive. If L is the |
| 964 | // inner loop but we know it has a low trip count, calculating that trip |
| 965 | // count (in the parent loop) may be detrimental. |
| 966 | |
| 967 | LLVMContext &C = L->getHeader()->getContext(); |
| 968 | HWLoopInfo.CounterInReg = true; |
Sam Parker | 9d28473 | 2019-06-13 08:28:46 +0000 | [diff] [blame] | 969 | HWLoopInfo.IsNestingLegal = false; |
Sam Parker | 9872269 | 2019-07-01 08:21:28 +0000 | [diff] [blame] | 970 | HWLoopInfo.PerformEntryTest = true; |
Sam Parker | 757ac02 | 2019-06-12 12:00:42 +0000 | [diff] [blame] | 971 | HWLoopInfo.CountType = Type::getInt32Ty(C); |
| 972 | HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); |
| 973 | return true; |
| 974 | } |
| 975 | |
Sam Parker | 19a08e4 | 2017-07-25 08:51:30 +0000 | [diff] [blame] | 976 | void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, |
| 977 | TTI::UnrollingPreferences &UP) { |
| 978 | // Only currently enable these preferences for M-Class cores. |
Sam Parker | 84fd0c3 | 2017-08-16 07:42:44 +0000 | [diff] [blame] | 979 | if (!ST->isMClass()) |
Sam Parker | 19a08e4 | 2017-07-25 08:51:30 +0000 | [diff] [blame] | 980 | return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP); |
| 981 | |
| 982 | // Disable loop unrolling for Oz and Os. |
| 983 | UP.OptSizeThreshold = 0; |
| 984 | UP.PartialOptSizeThreshold = 0; |
Evandro Menezes | 85bd397 | 2019-04-04 22:40:06 +0000 | [diff] [blame] | 985 | if (L->getHeader()->getParent()->hasOptSize()) |
Sam Parker | 487ab86 | 2017-10-23 08:05:14 +0000 | [diff] [blame] | 986 | return; |
| 987 | |
| 988 | // Only enable on Thumb-2 targets. |
| 989 | if (!ST->isThumb2()) |
| 990 | return; |
| 991 | |
| 992 | SmallVector<BasicBlock*, 4> ExitingBlocks; |
| 993 | L->getExitingBlocks(ExitingBlocks); |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 994 | LLVM_DEBUG(dbgs() << "Loop has:\n" |
| 995 | << "Blocks: " << L->getNumBlocks() << "\n" |
| 996 | << "Exit blocks: " << ExitingBlocks.size() << "\n"); |
Sam Parker | 487ab86 | 2017-10-23 08:05:14 +0000 | [diff] [blame] | 997 | |
| 998 | // Only allow another exit other than the latch. This acts as an early exit |
| 999 | // as it mirrors the profitability calculation of the runtime unroller. |
| 1000 | if (ExitingBlocks.size() > 2) |
| 1001 | return; |
| 1002 | |
| 1003 | // Limit the CFG of the loop body for targets with a branch predictor. |
| 1004 | // Allowing 4 blocks permits if-then-else diamonds in the body. |
| 1005 | if (ST->hasBranchPredictor() && L->getNumBlocks() > 4) |
Sam Parker | 84fd0c3 | 2017-08-16 07:42:44 +0000 | [diff] [blame] | 1006 | return; |
Sam Parker | 19a08e4 | 2017-07-25 08:51:30 +0000 | [diff] [blame] | 1007 | |
| 1008 | // Scan the loop: don't unroll loops with calls as this could prevent |
| 1009 | // inlining. |
Sam Parker | 84fd0c3 | 2017-08-16 07:42:44 +0000 | [diff] [blame] | 1010 | unsigned Cost = 0; |
Sam Parker | 487ab86 | 2017-10-23 08:05:14 +0000 | [diff] [blame] | 1011 | for (auto *BB : L->getBlocks()) { |
| 1012 | for (auto &I : *BB) { |
| 1013 | if (isa<CallInst>(I) || isa<InvokeInst>(I)) { |
| 1014 | ImmutableCallSite CS(&I); |
| 1015 | if (const Function *F = CS.getCalledFunction()) { |
| 1016 | if (!isLoweredToCall(F)) |
| 1017 | continue; |
| 1018 | } |
| 1019 | return; |
Sam Parker | 19a08e4 | 2017-07-25 08:51:30 +0000 | [diff] [blame] | 1020 | } |
David Green | 11c4602 | 2019-08-11 08:53:18 +0000 | [diff] [blame] | 1021 | // Don't unroll vectorised loop. MVE does not benefit from it as much as |
| 1022 | // scalar code. |
| 1023 | if (I.getType()->isVectorTy()) |
| 1024 | return; |
| 1025 | |
Sam Parker | 487ab86 | 2017-10-23 08:05:14 +0000 | [diff] [blame] | 1026 | SmallVector<const Value*, 4> Operands(I.value_op_begin(), |
| 1027 | I.value_op_end()); |
| 1028 | Cost += getUserCost(&I, Operands); |
Sam Parker | 19a08e4 | 2017-07-25 08:51:30 +0000 | [diff] [blame] | 1029 | } |
| 1030 | } |
| 1031 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1032 | LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n"); |
Sam Parker | 487ab86 | 2017-10-23 08:05:14 +0000 | [diff] [blame] | 1033 | |
Sam Parker | 19a08e4 | 2017-07-25 08:51:30 +0000 | [diff] [blame] | 1034 | UP.Partial = true; |
| 1035 | UP.Runtime = true; |
David Green | d847aa5 | 2019-06-10 10:22:14 +0000 | [diff] [blame] | 1036 | UP.UpperBound = true; |
Sam Parker | 84fd0c3 | 2017-08-16 07:42:44 +0000 | [diff] [blame] | 1037 | UP.UnrollRemainder = true; |
| 1038 | UP.DefaultUnrollRuntimeCount = 4; |
David Green | 963401d | 2018-07-01 12:47:30 +0000 | [diff] [blame] | 1039 | UP.UnrollAndJam = true; |
| 1040 | UP.UnrollAndJamInnerLoopThreshold = 60; |
Sam Parker | 84fd0c3 | 2017-08-16 07:42:44 +0000 | [diff] [blame] | 1041 | |
| 1042 | // Force unrolling small loops can be very useful because of the branch |
| 1043 | // taken cost of the backedge. |
| 1044 | if (Cost < 12) |
| 1045 | UP.Force = true; |
Sam Parker | 19a08e4 | 2017-07-25 08:51:30 +0000 | [diff] [blame] | 1046 | } |
Sam Tebbs | f312c1e | 2019-08-19 09:38:28 +0000 | [diff] [blame] | 1047 | |
| 1048 | bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty, |
| 1049 | TTI::ReductionFlags Flags) const { |
| 1050 | assert(isa<VectorType>(Ty) && "Expected Ty to be a vector type"); |
| 1051 | unsigned ScalarBits = Ty->getScalarSizeInBits(); |
| 1052 | if (!ST->hasMVEIntegerOps()) |
| 1053 | return false; |
| 1054 | |
| 1055 | switch (Opcode) { |
| 1056 | case Instruction::FAdd: |
| 1057 | case Instruction::FMul: |
| 1058 | case Instruction::And: |
| 1059 | case Instruction::Or: |
| 1060 | case Instruction::Xor: |
| 1061 | case Instruction::Mul: |
Sam Tebbs | f312c1e | 2019-08-19 09:38:28 +0000 | [diff] [blame] | 1062 | case Instruction::FCmp: |
| 1063 | return false; |
Sam Tebbs | 1572b68 | 2019-09-13 09:11:46 +0000 | [diff] [blame^] | 1064 | case Instruction::ICmp: |
Sam Tebbs | f312c1e | 2019-08-19 09:38:28 +0000 | [diff] [blame] | 1065 | case Instruction::Add: |
Sam Tebbs | 1572b68 | 2019-09-13 09:11:46 +0000 | [diff] [blame^] | 1066 | return ScalarBits < 64 && ScalarBits * Ty->getVectorNumElements() == 128; |
Sam Tebbs | f312c1e | 2019-08-19 09:38:28 +0000 | [diff] [blame] | 1067 | default: |
| 1068 | llvm_unreachable("Unhandled reduction opcode"); |
| 1069 | } |
| 1070 | return false; |
| 1071 | } |