| Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 1 | //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===// | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 2 | // | 
| Chandler Carruth | 2946cd7 | 2019-01-19 08:50:56 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | 
|  | 4 | // See https://llvm.org/LICENSE.txt for license information. | 
|  | 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 6 | // | 
|  | 7 | //===----------------------------------------------------------------------===// | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 8 |  | 
| Chandler Carruth | 93dcdc4 | 2015-01-31 11:17:59 +0000 | [diff] [blame] | 9 | #include "PPCTargetTransformInfo.h" | 
| Sam Parker | c5ef502 | 2019-06-07 07:35:30 +0000 | [diff] [blame] | 10 | #include "llvm/Analysis/CodeMetrics.h" | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 11 | #include "llvm/Analysis/TargetTransformInfo.h" | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 12 | #include "llvm/CodeGen/BasicTTIImpl.h" | 
| David Blaikie | b3bde2e | 2017-11-17 01:07:10 +0000 | [diff] [blame] | 13 | #include "llvm/CodeGen/CostTable.h" | 
|  | 14 | #include "llvm/CodeGen/TargetLowering.h" | 
| Sam Parker | c5ef502 | 2019-06-07 07:35:30 +0000 | [diff] [blame] | 15 | #include "llvm/CodeGen/TargetSchedule.h" | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 16 | #include "llvm/Support/CommandLine.h" | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 17 | #include "llvm/Support/Debug.h" | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 18 | using namespace llvm; | 
|  | 19 |  | 
| Chandler Carruth | 84e68b2 | 2014-04-22 02:41:26 +0000 | [diff] [blame] | 20 | #define DEBUG_TYPE "ppctti" | 
|  | 21 |  | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 22 | static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting", | 
|  | 23 | cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden); | 
|  | 24 |  | 
| Adam Nemet | af76110 | 2016-01-21 18:28:36 +0000 | [diff] [blame] | 25 | // This is currently only used for the data prefetch pass which is only enabled | 
|  | 26 | // for BG/Q by default. | 
|  | 27 | static cl::opt<unsigned> | 
|  | 28 | CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64), | 
|  | 29 | cl::desc("The loop prefetch cache line size")); | 
|  | 30 |  | 
| Zaara Syeda | 1f59ae3 | 2018-01-30 16:17:22 +0000 | [diff] [blame] | 31 | static cl::opt<bool> | 
|  | 32 | EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false), | 
|  | 33 | cl::desc("Enable using coldcc calling conv for cold " | 
|  | 34 | "internal functions")); | 
|  | 35 |  | 
| Sam Parker | c5ef502 | 2019-06-07 07:35:30 +0000 | [diff] [blame] | 36 | // The latency of mtctr is only justified if there are more than 4 | 
|  | 37 | // comparisons that will be removed as a result. | 
|  | 38 | static cl::opt<unsigned> | 
|  | 39 | SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden, | 
|  | 40 | cl::desc("Loops with a constant trip count smaller than " | 
|  | 41 | "this value will not use the count register.")); | 
|  | 42 |  | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 43 | //===----------------------------------------------------------------------===// | 
|  | 44 | // | 
|  | 45 | // PPC cost model. | 
|  | 46 | // | 
|  | 47 | //===----------------------------------------------------------------------===// | 
|  | 48 |  | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 49 | TargetTransformInfo::PopcntSupportKind | 
|  | 50 | PPCTTIImpl::getPopcntSupport(unsigned TyWidth) { | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 51 | assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); | 
| Hal Finkel | fa7057a | 2016-03-29 01:36:01 +0000 | [diff] [blame] | 52 | if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64) | 
|  | 53 | return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ? | 
|  | 54 | TTI::PSK_SlowHardware : TTI::PSK_FastHardware; | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 55 | return TTI::PSK_Software; | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 56 | } | 
|  | 57 |  | 
| Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 58 | int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 59 | if (DisablePPCConstHoist) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 60 | return BaseT::getIntImmCost(Imm, Ty); | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 61 |  | 
|  | 62 | assert(Ty->isIntegerTy()); | 
|  | 63 |  | 
|  | 64 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); | 
|  | 65 | if (BitSize == 0) | 
|  | 66 | return ~0U; | 
|  | 67 |  | 
|  | 68 | if (Imm == 0) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 69 | return TTI::TCC_Free; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 70 |  | 
|  | 71 | if (Imm.getBitWidth() <= 64) { | 
|  | 72 | if (isInt<16>(Imm.getSExtValue())) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 73 | return TTI::TCC_Basic; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 74 |  | 
|  | 75 | if (isInt<32>(Imm.getSExtValue())) { | 
|  | 76 | // A constant that can be materialized using lis. | 
|  | 77 | if ((Imm.getZExtValue() & 0xFFFF) == 0) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 78 | return TTI::TCC_Basic; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 79 |  | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 80 | return 2 * TTI::TCC_Basic; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 81 | } | 
|  | 82 | } | 
|  | 83 |  | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 84 | return 4 * TTI::TCC_Basic; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 85 | } | 
|  | 86 |  | 
| Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 87 | int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, | 
|  | 88 | Type *Ty) { | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 89 | if (DisablePPCConstHoist) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 90 | return BaseT::getIntImmCost(IID, Idx, Imm, Ty); | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 91 |  | 
|  | 92 | assert(Ty->isIntegerTy()); | 
|  | 93 |  | 
|  | 94 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); | 
|  | 95 | if (BitSize == 0) | 
|  | 96 | return ~0U; | 
|  | 97 |  | 
|  | 98 | switch (IID) { | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 99 | default: | 
|  | 100 | return TTI::TCC_Free; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 101 | case Intrinsic::sadd_with_overflow: | 
|  | 102 | case Intrinsic::uadd_with_overflow: | 
|  | 103 | case Intrinsic::ssub_with_overflow: | 
|  | 104 | case Intrinsic::usub_with_overflow: | 
|  | 105 | if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue())) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 106 | return TTI::TCC_Free; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 107 | break; | 
| Hal Finkel | 934361a | 2015-01-14 01:07:51 +0000 | [diff] [blame] | 108 | case Intrinsic::experimental_stackmap: | 
|  | 109 | if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 110 | return TTI::TCC_Free; | 
| Hal Finkel | 934361a | 2015-01-14 01:07:51 +0000 | [diff] [blame] | 111 | break; | 
|  | 112 | case Intrinsic::experimental_patchpoint_void: | 
|  | 113 | case Intrinsic::experimental_patchpoint_i64: | 
|  | 114 | if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 115 | return TTI::TCC_Free; | 
| Hal Finkel | 934361a | 2015-01-14 01:07:51 +0000 | [diff] [blame] | 116 | break; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 117 | } | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 118 | return PPCTTIImpl::getIntImmCost(Imm, Ty); | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 119 | } | 
|  | 120 |  | 
| Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 121 | int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, | 
|  | 122 | Type *Ty) { | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 123 | if (DisablePPCConstHoist) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 124 | return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty); | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 125 |  | 
|  | 126 | assert(Ty->isIntegerTy()); | 
|  | 127 |  | 
|  | 128 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); | 
|  | 129 | if (BitSize == 0) | 
|  | 130 | return ~0U; | 
|  | 131 |  | 
|  | 132 | unsigned ImmIdx = ~0U; | 
|  | 133 | bool ShiftedFree = false, RunFree = false, UnsignedFree = false, | 
|  | 134 | ZeroFree = false; | 
|  | 135 | switch (Opcode) { | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 136 | default: | 
|  | 137 | return TTI::TCC_Free; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 138 | case Instruction::GetElementPtr: | 
|  | 139 | // Always hoist the base address of a GetElementPtr. This prevents the | 
|  | 140 | // creation of new constants for every base constant that gets constant | 
|  | 141 | // folded with the offset. | 
|  | 142 | if (Idx == 0) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 143 | return 2 * TTI::TCC_Basic; | 
|  | 144 | return TTI::TCC_Free; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 145 | case Instruction::And: | 
|  | 146 | RunFree = true; // (for the rotate-and-mask instructions) | 
| Justin Bogner | b03fd12 | 2016-08-17 05:10:15 +0000 | [diff] [blame] | 147 | LLVM_FALLTHROUGH; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 148 | case Instruction::Add: | 
|  | 149 | case Instruction::Or: | 
|  | 150 | case Instruction::Xor: | 
|  | 151 | ShiftedFree = true; | 
| Justin Bogner | b03fd12 | 2016-08-17 05:10:15 +0000 | [diff] [blame] | 152 | LLVM_FALLTHROUGH; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 153 | case Instruction::Sub: | 
|  | 154 | case Instruction::Mul: | 
|  | 155 | case Instruction::Shl: | 
|  | 156 | case Instruction::LShr: | 
|  | 157 | case Instruction::AShr: | 
|  | 158 | ImmIdx = 1; | 
|  | 159 | break; | 
|  | 160 | case Instruction::ICmp: | 
|  | 161 | UnsignedFree = true; | 
|  | 162 | ImmIdx = 1; | 
| Justin Bogner | b03fd12 | 2016-08-17 05:10:15 +0000 | [diff] [blame] | 163 | // Zero comparisons can use record-form instructions. | 
|  | 164 | LLVM_FALLTHROUGH; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 165 | case Instruction::Select: | 
|  | 166 | ZeroFree = true; | 
|  | 167 | break; | 
|  | 168 | case Instruction::PHI: | 
|  | 169 | case Instruction::Call: | 
|  | 170 | case Instruction::Ret: | 
|  | 171 | case Instruction::Load: | 
|  | 172 | case Instruction::Store: | 
|  | 173 | break; | 
|  | 174 | } | 
|  | 175 |  | 
|  | 176 | if (ZeroFree && Imm == 0) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 177 | return TTI::TCC_Free; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 178 |  | 
|  | 179 | if (Idx == ImmIdx && Imm.getBitWidth() <= 64) { | 
|  | 180 | if (isInt<16>(Imm.getSExtValue())) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 181 | return TTI::TCC_Free; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 182 |  | 
|  | 183 | if (RunFree) { | 
|  | 184 | if (Imm.getBitWidth() <= 32 && | 
|  | 185 | (isShiftedMask_32(Imm.getZExtValue()) || | 
|  | 186 | isShiftedMask_32(~Imm.getZExtValue()))) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 187 | return TTI::TCC_Free; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 188 |  | 
|  | 189 | if (ST->isPPC64() && | 
|  | 190 | (isShiftedMask_64(Imm.getZExtValue()) || | 
|  | 191 | isShiftedMask_64(~Imm.getZExtValue()))) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 192 | return TTI::TCC_Free; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 193 | } | 
|  | 194 |  | 
|  | 195 | if (UnsignedFree && isUInt<16>(Imm.getZExtValue())) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 196 | return TTI::TCC_Free; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 197 |  | 
|  | 198 | if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0) | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 199 | return TTI::TCC_Free; | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 200 | } | 
|  | 201 |  | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 202 | return PPCTTIImpl::getIntImmCost(Imm, Ty); | 
| Hal Finkel | 0192cba | 2014-04-13 23:02:40 +0000 | [diff] [blame] | 203 | } | 
|  | 204 |  | 
| Graham Yiu | 488782e | 2017-10-19 18:16:31 +0000 | [diff] [blame] | 205 | unsigned PPCTTIImpl::getUserCost(const User *U, | 
|  | 206 | ArrayRef<const Value *> Operands) { | 
|  | 207 | if (U->getType()->isVectorTy()) { | 
|  | 208 | // Instructions that need to be split should cost more. | 
|  | 209 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType()); | 
|  | 210 | return LT.first * BaseT::getUserCost(U, Operands); | 
|  | 211 | } | 
| Fangrui Song | f78650a | 2018-07-30 19:41:25 +0000 | [diff] [blame] | 212 |  | 
| Graham Yiu | 488782e | 2017-10-19 18:16:31 +0000 | [diff] [blame] | 213 | return BaseT::getUserCost(U, Operands); | 
|  | 214 | } | 
|  | 215 |  | 
| Sam Parker | c5ef502 | 2019-06-07 07:35:30 +0000 | [diff] [blame] | 216 | bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, | 
|  | 217 | TargetLibraryInfo *LibInfo) { | 
|  | 218 | const PPCTargetMachine &TM = ST->getTargetMachine(); | 
|  | 219 |  | 
|  | 220 | // Loop through the inline asm constraints and look for something that | 
|  | 221 | // clobbers ctr. | 
|  | 222 | auto asmClobbersCTR = [](InlineAsm *IA) { | 
|  | 223 | InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); | 
|  | 224 | for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { | 
|  | 225 | InlineAsm::ConstraintInfo &C = CIV[i]; | 
|  | 226 | if (C.Type != InlineAsm::isInput) | 
|  | 227 | for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) | 
|  | 228 | if (StringRef(C.Codes[j]).equals_lower("{ctr}")) | 
|  | 229 | return true; | 
|  | 230 | } | 
|  | 231 | return false; | 
|  | 232 | }; | 
|  | 233 |  | 
|  | 234 | // Determining the address of a TLS variable results in a function call in | 
|  | 235 | // certain TLS models. | 
|  | 236 | std::function<bool(const Value*)> memAddrUsesCTR = | 
|  | 237 | [&memAddrUsesCTR, &TM](const Value *MemAddr) -> bool { | 
|  | 238 | const auto *GV = dyn_cast<GlobalValue>(MemAddr); | 
|  | 239 | if (!GV) { | 
|  | 240 | // Recurse to check for constants that refer to TLS global variables. | 
|  | 241 | if (const auto *CV = dyn_cast<Constant>(MemAddr)) | 
|  | 242 | for (const auto &CO : CV->operands()) | 
|  | 243 | if (memAddrUsesCTR(CO)) | 
|  | 244 | return true; | 
|  | 245 |  | 
|  | 246 | return false; | 
|  | 247 | } | 
|  | 248 |  | 
|  | 249 | if (!GV->isThreadLocal()) | 
|  | 250 | return false; | 
|  | 251 | TLSModel::Model Model = TM.getTLSModel(GV); | 
|  | 252 | return Model == TLSModel::GeneralDynamic || | 
|  | 253 | Model == TLSModel::LocalDynamic; | 
|  | 254 | }; | 
|  | 255 |  | 
|  | 256 | auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) { | 
|  | 257 | if (IntegerType *ITy = dyn_cast<IntegerType>(Ty)) | 
|  | 258 | return ITy->getBitWidth() > (Is32Bit ? 32U : 64U); | 
|  | 259 |  | 
|  | 260 | return false; | 
|  | 261 | }; | 
|  | 262 |  | 
|  | 263 | for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); | 
|  | 264 | J != JE; ++J) { | 
|  | 265 | if (CallInst *CI = dyn_cast<CallInst>(J)) { | 
|  | 266 | // Inline ASM is okay, unless it clobbers the ctr register. | 
|  | 267 | if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) { | 
|  | 268 | if (asmClobbersCTR(IA)) | 
|  | 269 | return true; | 
|  | 270 | continue; | 
|  | 271 | } | 
|  | 272 |  | 
|  | 273 | if (Function *F = CI->getCalledFunction()) { | 
|  | 274 | // Most intrinsics don't become function calls, but some might. | 
|  | 275 | // sin, cos, exp and log are always calls. | 
|  | 276 | unsigned Opcode = 0; | 
|  | 277 | if (F->getIntrinsicID() != Intrinsic::not_intrinsic) { | 
|  | 278 | switch (F->getIntrinsicID()) { | 
|  | 279 | default: continue; | 
|  | 280 | // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr | 
|  | 281 | // we're definitely using CTR. | 
|  | 282 | case Intrinsic::set_loop_iterations: | 
|  | 283 | case Intrinsic::loop_decrement: | 
|  | 284 | return true; | 
|  | 285 |  | 
|  | 286 | // VisualStudio defines setjmp as _setjmp | 
|  | 287 | #if defined(_MSC_VER) && defined(setjmp) && \ | 
|  | 288 | !defined(setjmp_undefined_for_msvc) | 
|  | 289 | #  pragma push_macro("setjmp") | 
|  | 290 | #  undef setjmp | 
|  | 291 | #  define setjmp_undefined_for_msvc | 
|  | 292 | #endif | 
|  | 293 |  | 
|  | 294 | case Intrinsic::setjmp: | 
|  | 295 |  | 
|  | 296 | #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc) | 
|  | 297 | // let's return it to _setjmp state | 
|  | 298 | #  pragma pop_macro("setjmp") | 
|  | 299 | #  undef setjmp_undefined_for_msvc | 
|  | 300 | #endif | 
|  | 301 |  | 
|  | 302 | case Intrinsic::longjmp: | 
|  | 303 |  | 
|  | 304 | // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp | 
|  | 305 | // because, although it does clobber the counter register, the | 
|  | 306 | // control can't then return to inside the loop unless there is also | 
|  | 307 | // an eh_sjlj_setjmp. | 
|  | 308 | case Intrinsic::eh_sjlj_setjmp: | 
|  | 309 |  | 
|  | 310 | case Intrinsic::memcpy: | 
|  | 311 | case Intrinsic::memmove: | 
|  | 312 | case Intrinsic::memset: | 
|  | 313 | case Intrinsic::powi: | 
|  | 314 | case Intrinsic::log: | 
|  | 315 | case Intrinsic::log2: | 
|  | 316 | case Intrinsic::log10: | 
|  | 317 | case Intrinsic::exp: | 
|  | 318 | case Intrinsic::exp2: | 
|  | 319 | case Intrinsic::pow: | 
|  | 320 | case Intrinsic::sin: | 
|  | 321 | case Intrinsic::cos: | 
|  | 322 | return true; | 
|  | 323 | case Intrinsic::copysign: | 
|  | 324 | if (CI->getArgOperand(0)->getType()->getScalarType()-> | 
|  | 325 | isPPC_FP128Ty()) | 
|  | 326 | return true; | 
|  | 327 | else | 
|  | 328 | continue; // ISD::FCOPYSIGN is never a library call. | 
|  | 329 | case Intrinsic::sqrt:               Opcode = ISD::FSQRT;      break; | 
|  | 330 | case Intrinsic::floor:              Opcode = ISD::FFLOOR;     break; | 
|  | 331 | case Intrinsic::ceil:               Opcode = ISD::FCEIL;      break; | 
|  | 332 | case Intrinsic::trunc:              Opcode = ISD::FTRUNC;     break; | 
|  | 333 | case Intrinsic::rint:               Opcode = ISD::FRINT;      break; | 
|  | 334 | case Intrinsic::nearbyint:          Opcode = ISD::FNEARBYINT; break; | 
|  | 335 | case Intrinsic::round:              Opcode = ISD::FROUND;     break; | 
|  | 336 | case Intrinsic::minnum:             Opcode = ISD::FMINNUM;    break; | 
|  | 337 | case Intrinsic::maxnum:             Opcode = ISD::FMAXNUM;    break; | 
|  | 338 | case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO;      break; | 
|  | 339 | case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO;      break; | 
|  | 340 | } | 
|  | 341 | } | 
|  | 342 |  | 
|  | 343 | // PowerPC does not use [US]DIVREM or other library calls for | 
|  | 344 | // operations on regular types which are not otherwise library calls | 
|  | 345 | // (i.e. soft float or atomics). If adapting for targets that do, | 
|  | 346 | // additional care is required here. | 
|  | 347 |  | 
|  | 348 | LibFunc Func; | 
|  | 349 | if (!F->hasLocalLinkage() && F->hasName() && LibInfo && | 
|  | 350 | LibInfo->getLibFunc(F->getName(), Func) && | 
|  | 351 | LibInfo->hasOptimizedCodeGen(Func)) { | 
|  | 352 | // Non-read-only functions are never treated as intrinsics. | 
|  | 353 | if (!CI->onlyReadsMemory()) | 
|  | 354 | return true; | 
|  | 355 |  | 
|  | 356 | // Conversion happens only for FP calls. | 
|  | 357 | if (!CI->getArgOperand(0)->getType()->isFloatingPointTy()) | 
|  | 358 | return true; | 
|  | 359 |  | 
|  | 360 | switch (Func) { | 
|  | 361 | default: return true; | 
|  | 362 | case LibFunc_copysign: | 
|  | 363 | case LibFunc_copysignf: | 
|  | 364 | continue; // ISD::FCOPYSIGN is never a library call. | 
|  | 365 | case LibFunc_copysignl: | 
|  | 366 | return true; | 
|  | 367 | case LibFunc_fabs: | 
|  | 368 | case LibFunc_fabsf: | 
|  | 369 | case LibFunc_fabsl: | 
|  | 370 | continue; // ISD::FABS is never a library call. | 
|  | 371 | case LibFunc_sqrt: | 
|  | 372 | case LibFunc_sqrtf: | 
|  | 373 | case LibFunc_sqrtl: | 
|  | 374 | Opcode = ISD::FSQRT; break; | 
|  | 375 | case LibFunc_floor: | 
|  | 376 | case LibFunc_floorf: | 
|  | 377 | case LibFunc_floorl: | 
|  | 378 | Opcode = ISD::FFLOOR; break; | 
|  | 379 | case LibFunc_nearbyint: | 
|  | 380 | case LibFunc_nearbyintf: | 
|  | 381 | case LibFunc_nearbyintl: | 
|  | 382 | Opcode = ISD::FNEARBYINT; break; | 
|  | 383 | case LibFunc_ceil: | 
|  | 384 | case LibFunc_ceilf: | 
|  | 385 | case LibFunc_ceill: | 
|  | 386 | Opcode = ISD::FCEIL; break; | 
|  | 387 | case LibFunc_rint: | 
|  | 388 | case LibFunc_rintf: | 
|  | 389 | case LibFunc_rintl: | 
|  | 390 | Opcode = ISD::FRINT; break; | 
|  | 391 | case LibFunc_round: | 
|  | 392 | case LibFunc_roundf: | 
|  | 393 | case LibFunc_roundl: | 
|  | 394 | Opcode = ISD::FROUND; break; | 
|  | 395 | case LibFunc_trunc: | 
|  | 396 | case LibFunc_truncf: | 
|  | 397 | case LibFunc_truncl: | 
|  | 398 | Opcode = ISD::FTRUNC; break; | 
|  | 399 | case LibFunc_fmin: | 
|  | 400 | case LibFunc_fminf: | 
|  | 401 | case LibFunc_fminl: | 
|  | 402 | Opcode = ISD::FMINNUM; break; | 
|  | 403 | case LibFunc_fmax: | 
|  | 404 | case LibFunc_fmaxf: | 
|  | 405 | case LibFunc_fmaxl: | 
|  | 406 | Opcode = ISD::FMAXNUM; break; | 
|  | 407 | } | 
|  | 408 | } | 
|  | 409 |  | 
|  | 410 | if (Opcode) { | 
|  | 411 | EVT EVTy = | 
|  | 412 | TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true); | 
|  | 413 |  | 
|  | 414 | if (EVTy == MVT::Other) | 
|  | 415 | return true; | 
|  | 416 |  | 
|  | 417 | if (TLI->isOperationLegalOrCustom(Opcode, EVTy)) | 
|  | 418 | continue; | 
|  | 419 | else if (EVTy.isVector() && | 
|  | 420 | TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType())) | 
|  | 421 | continue; | 
|  | 422 |  | 
|  | 423 | return true; | 
|  | 424 | } | 
|  | 425 | } | 
|  | 426 |  | 
|  | 427 | return true; | 
|  | 428 | } else if (isa<BinaryOperator>(J) && | 
|  | 429 | J->getType()->getScalarType()->isPPC_FP128Ty()) { | 
|  | 430 | // Most operations on ppc_f128 values become calls. | 
|  | 431 | return true; | 
|  | 432 | } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) || | 
|  | 433 | isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) { | 
|  | 434 | CastInst *CI = cast<CastInst>(J); | 
|  | 435 | if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || | 
|  | 436 | CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || | 
|  | 437 | isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) || | 
|  | 438 | isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType())) | 
|  | 439 | return true; | 
|  | 440 | } else if (isLargeIntegerTy(!TM.isPPC64(), | 
|  | 441 | J->getType()->getScalarType()) && | 
|  | 442 | (J->getOpcode() == Instruction::UDiv || | 
|  | 443 | J->getOpcode() == Instruction::SDiv || | 
|  | 444 | J->getOpcode() == Instruction::URem || | 
|  | 445 | J->getOpcode() == Instruction::SRem)) { | 
|  | 446 | return true; | 
|  | 447 | } else if (!TM.isPPC64() && | 
|  | 448 | isLargeIntegerTy(false, J->getType()->getScalarType()) && | 
|  | 449 | (J->getOpcode() == Instruction::Shl || | 
|  | 450 | J->getOpcode() == Instruction::AShr || | 
|  | 451 | J->getOpcode() == Instruction::LShr)) { | 
|  | 452 | // Only on PPC32, for 128-bit integers (specifically not 64-bit | 
|  | 453 | // integers), these might be runtime calls. | 
|  | 454 | return true; | 
|  | 455 | } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) { | 
|  | 456 | // On PowerPC, indirect jumps use the counter register. | 
|  | 457 | return true; | 
|  | 458 | } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { | 
|  | 459 | if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries()) | 
|  | 460 | return true; | 
|  | 461 | } | 
|  | 462 |  | 
|  | 463 | // FREM is always a call. | 
|  | 464 | if (J->getOpcode() == Instruction::FRem) | 
|  | 465 | return true; | 
|  | 466 |  | 
|  | 467 | if (ST->useSoftFloat()) { | 
|  | 468 | switch(J->getOpcode()) { | 
|  | 469 | case Instruction::FAdd: | 
|  | 470 | case Instruction::FSub: | 
|  | 471 | case Instruction::FMul: | 
|  | 472 | case Instruction::FDiv: | 
|  | 473 | case Instruction::FPTrunc: | 
|  | 474 | case Instruction::FPExt: | 
|  | 475 | case Instruction::FPToUI: | 
|  | 476 | case Instruction::FPToSI: | 
|  | 477 | case Instruction::UIToFP: | 
|  | 478 | case Instruction::SIToFP: | 
|  | 479 | case Instruction::FCmp: | 
|  | 480 | return true; | 
|  | 481 | } | 
|  | 482 | } | 
|  | 483 |  | 
|  | 484 | for (Value *Operand : J->operands()) | 
|  | 485 | if (memAddrUsesCTR(Operand)) | 
|  | 486 | return true; | 
|  | 487 | } | 
|  | 488 |  | 
|  | 489 | return false; | 
|  | 490 | } | 
|  | 491 |  | 
|  | 492 | bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, | 
|  | 493 | AssumptionCache &AC, | 
|  | 494 | TargetLibraryInfo *LibInfo, | 
| Chen Zheng | c5b918d | 2019-06-19 01:26:31 +0000 | [diff] [blame] | 495 | HardwareLoopInfo &HWLoopInfo) { | 
| Sam Parker | c5ef502 | 2019-06-07 07:35:30 +0000 | [diff] [blame] | 496 | const PPCTargetMachine &TM = ST->getTargetMachine(); | 
|  | 497 | TargetSchedModel SchedModel; | 
|  | 498 | SchedModel.init(ST); | 
|  | 499 |  | 
|  | 500 | // Do not convert small short loops to CTR loop. | 
|  | 501 | unsigned ConstTripCount = SE.getSmallConstantTripCount(L); | 
|  | 502 | if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) { | 
|  | 503 | SmallPtrSet<const Value *, 32> EphValues; | 
|  | 504 | CodeMetrics::collectEphemeralValues(L, &AC, EphValues); | 
|  | 505 | CodeMetrics Metrics; | 
|  | 506 | for (BasicBlock *BB : L->blocks()) | 
|  | 507 | Metrics.analyzeBasicBlock(BB, *this, EphValues); | 
|  | 508 | // 6 is an approximate latency for the mtctr instruction. | 
|  | 509 | if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth())) | 
|  | 510 | return false; | 
|  | 511 | } | 
|  | 512 |  | 
|  | 513 | // We don't want to spill/restore the counter register, and so we don't | 
|  | 514 | // want to use the counter register if the loop contains calls. | 
|  | 515 | for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); | 
|  | 516 | I != IE; ++I) | 
|  | 517 | if (mightUseCTR(*I, LibInfo)) | 
|  | 518 | return false; | 
|  | 519 |  | 
|  | 520 | SmallVector<BasicBlock*, 4> ExitingBlocks; | 
|  | 521 | L->getExitingBlocks(ExitingBlocks); | 
|  | 522 |  | 
|  | 523 | // If there is an exit edge known to be frequently taken, | 
|  | 524 | // we should not transform this loop. | 
|  | 525 | for (auto &BB : ExitingBlocks) { | 
|  | 526 | Instruction *TI = BB->getTerminator(); | 
|  | 527 | if (!TI) continue; | 
|  | 528 |  | 
|  | 529 | if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { | 
|  | 530 | uint64_t TrueWeight = 0, FalseWeight = 0; | 
|  | 531 | if (!BI->isConditional() || | 
|  | 532 | !BI->extractProfMetadata(TrueWeight, FalseWeight)) | 
|  | 533 | continue; | 
|  | 534 |  | 
|  | 535 | // If the exit path is more frequent than the loop path, | 
|  | 536 | // we return here without further analysis for this loop. | 
|  | 537 | bool TrueIsExit = !L->contains(BI->getSuccessor(0)); | 
|  | 538 | if (( TrueIsExit && FalseWeight < TrueWeight) || | 
|  | 539 | (!TrueIsExit && FalseWeight > TrueWeight)) | 
|  | 540 | return false; | 
|  | 541 | } | 
|  | 542 | } | 
|  | 543 |  | 
|  | 544 | LLVMContext &C = L->getHeader()->getContext(); | 
|  | 545 | HWLoopInfo.CountType = TM.isPPC64() ? | 
|  | 546 | Type::getInt64Ty(C) : Type::getInt32Ty(C); | 
|  | 547 | HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); | 
|  | 548 | return true; | 
|  | 549 | } | 
|  | 550 |  | 
| Geoff Berry | 66d9bdb | 2017-06-28 15:53:17 +0000 | [diff] [blame] | 551 | void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 552 | TTI::UnrollingPreferences &UP) { | 
| Chandler Carruth | c956ab66 | 2015-02-01 14:22:17 +0000 | [diff] [blame] | 553 | if (ST->getDarwinDirective() == PPC::DIR_A2) { | 
| Hal Finkel | 71780ec | 2013-09-11 21:20:40 +0000 | [diff] [blame] | 554 | // The A2 is in-order with a deep pipeline, and concatenation unrolling | 
|  | 555 | // helps expose latency-hiding opportunities to the instruction scheduler. | 
|  | 556 | UP.Partial = UP.Runtime = true; | 
| Hal Finkel | 3b3c9c3 | 2015-05-21 20:30:23 +0000 | [diff] [blame] | 557 |  | 
|  | 558 | // We unroll a lot on the A2 (hundreds of instructions), and the benefits | 
|  | 559 | // often outweigh the cost of a division to compute the trip count. | 
|  | 560 | UP.AllowExpensiveTripCount = true; | 
| Hal Finkel | 71780ec | 2013-09-11 21:20:40 +0000 | [diff] [blame] | 561 | } | 
| Hal Finkel | b359b73 | 2015-01-09 15:51:16 +0000 | [diff] [blame] | 562 |  | 
| Geoff Berry | 66d9bdb | 2017-06-28 15:53:17 +0000 | [diff] [blame] | 563 | BaseT::getUnrollingPreferences(L, SE, UP); | 
| Hal Finkel | 71780ec | 2013-09-11 21:20:40 +0000 | [diff] [blame] | 564 | } | 
|  | 565 |  | 
| Zaara Syeda | 1f59ae3 | 2018-01-30 16:17:22 +0000 | [diff] [blame] | 566 | // This function returns true to allow using coldcc calling convention. | 
|  | 567 | // Returning true results in coldcc being used for functions which are cold at | 
|  | 568 | // all call sites when the callers of the functions are not calling any other | 
|  | 569 | // non coldcc functions. | 
|  | 570 | bool PPCTTIImpl::useColdCCForColdCall(Function &F) { | 
|  | 571 | return EnablePPCColdCC; | 
|  | 572 | } | 
|  | 573 |  | 
| Olivier Sallenave | 049d803 | 2015-03-06 23:12:04 +0000 | [diff] [blame] | 574 | bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) { | 
| Hal Finkel | 75afa2b | 2015-09-03 23:23:00 +0000 | [diff] [blame] | 575 | // On the A2, always unroll aggressively. For QPX unaligned loads, we depend | 
|  | 576 | // on combining the loads generated for consecutive accesses, and failure to | 
|  | 577 | // do so is particularly expensive. This makes it much more likely (compared | 
|  | 578 | // to only using concatenation unrolling). | 
|  | 579 | if (ST->getDarwinDirective() == PPC::DIR_A2) | 
|  | 580 | return true; | 
|  | 581 |  | 
| Olivier Sallenave | 049d803 | 2015-03-06 23:12:04 +0000 | [diff] [blame] | 582 | return LoopHasReductions; | 
|  | 583 | } | 
|  | 584 |  | 
| Clement Courbet | 3bc5ad5 | 2019-06-25 08:04:13 +0000 | [diff] [blame] | 585 | PPCTTIImpl::TTI::MemCmpExpansionOptions | 
|  | 586 | PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { | 
|  | 587 | TTI::MemCmpExpansionOptions Options; | 
|  | 588 | Options.LoadSizes = {8, 4, 2, 1}; | 
|  | 589 | Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); | 
|  | 590 | return Options; | 
| Zaara Syeda | 3a7578c | 2017-05-31 17:12:38 +0000 | [diff] [blame] | 591 | } | 
|  | 592 |  | 
| Hal Finkel | 4a7be23 | 2015-09-04 00:10:41 +0000 | [diff] [blame] | 593 | bool PPCTTIImpl::enableInterleavedAccessVectorization() { | 
|  | 594 | return true; | 
|  | 595 | } | 
|  | 596 |  | 
| Zi Xuan Wu | 9802268 | 2019-10-12 02:53:04 +0000 | [diff] [blame] | 597 | unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const { | 
|  | 598 | assert(ClassID == GPRRC || ClassID == FPRRC || | 
|  | 599 | ClassID == VRRC || ClassID == VSXRC); | 
|  | 600 | if (ST->hasVSX()) { | 
|  | 601 | assert(ClassID == GPRRC || ClassID == VSXRC); | 
|  | 602 | return ClassID == GPRRC ? 32 : 64; | 
|  | 603 | } | 
|  | 604 | assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC); | 
|  | 605 | return 32; | 
|  | 606 | } | 
|  | 607 |  | 
|  | 608 | unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const { | 
|  | 609 | if (Vector) | 
|  | 610 | return ST->hasVSX() ? VSXRC : VRRC; | 
|  | 611 | else if (Ty && Ty->getScalarType()->isFloatTy()) | 
|  | 612 | return ST->hasVSX() ? VSXRC : FPRRC; | 
|  | 613 | else | 
|  | 614 | return GPRRC; | 
|  | 615 | } | 
|  | 616 |  | 
|  | 617 | const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const { | 
|  | 618 |  | 
|  | 619 | switch (ClassID) { | 
|  | 620 | default: | 
|  | 621 | llvm_unreachable("unknown register class"); | 
|  | 622 | return "PPC::unknown register class"; | 
|  | 623 | case GPRRC:       return "PPC::GPRRC"; | 
|  | 624 | case FPRRC:       return "PPC::FPRRC"; | 
|  | 625 | case VRRC:        return "PPC::VRRC"; | 
|  | 626 | case VSXRC:       return "PPC::VSXRC"; | 
|  | 627 | } | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 628 | } | 
|  | 629 |  | 
| Daniel Neilson | c0112ae | 2017-06-12 14:22:21 +0000 | [diff] [blame] | 630 | unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const { | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 631 | if (Vector) { | 
| Hal Finkel | c93a9a2 | 2015-02-25 01:06:45 +0000 | [diff] [blame] | 632 | if (ST->hasQPX()) return 256; | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 633 | if (ST->hasAltivec()) return 128; | 
|  | 634 | return 0; | 
|  | 635 | } | 
|  | 636 |  | 
|  | 637 | if (ST->isPPC64()) | 
|  | 638 | return 64; | 
|  | 639 | return 32; | 
|  | 640 |  | 
|  | 641 | } | 
|  | 642 |  | 
| David Greene | 2e6f6b4 | 2019-10-09 19:51:48 +0000 | [diff] [blame] | 643 | unsigned PPCTTIImpl::getCacheLineSize() const { | 
| Sean Fertile | 457ddd3 | 2017-05-31 18:20:17 +0000 | [diff] [blame] | 644 | // Check first if the user specified a custom line size. | 
|  | 645 | if (CacheLineSize.getNumOccurrences() > 0) | 
|  | 646 | return CacheLineSize; | 
|  | 647 |  | 
|  | 648 | // On P7, P8 or P9 we have a cache line size of 128. | 
|  | 649 | unsigned Directive = ST->getDarwinDirective(); | 
|  | 650 | if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || | 
|  | 651 | Directive == PPC::DIR_PWR9) | 
|  | 652 | return 128; | 
|  | 653 |  | 
|  | 654 | // On other processors return a default of 64 bytes. | 
|  | 655 | return 64; | 
| Adam Nemet | af76110 | 2016-01-21 18:28:36 +0000 | [diff] [blame] | 656 | } | 
|  | 657 |  | 
| David Greene | 2e6f6b4 | 2019-10-09 19:51:48 +0000 | [diff] [blame] | 658 | unsigned PPCTTIImpl::getPrefetchDistance() const { | 
| Adam Nemet | b81f1e0 | 2016-03-29 23:45:56 +0000 | [diff] [blame] | 659 | // This seems like a reasonable default for the BG/Q (this pass is enabled, by | 
|  | 660 | // default, only on the BG/Q). | 
|  | 661 | return 300; | 
|  | 662 | } | 
| Adam Nemet | dadfbb5 | 2016-01-27 22:21:25 +0000 | [diff] [blame] | 663 |  | 
| Wei Mi | 062c744 | 2015-05-06 17:12:25 +0000 | [diff] [blame] | 664 | unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) { | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 665 | unsigned Directive = ST->getDarwinDirective(); | 
|  | 666 | // The 440 has no SIMD support, but floating-point instructions | 
|  | 667 | // have a 5-cycle latency, so unroll by 5x for latency hiding. | 
|  | 668 | if (Directive == PPC::DIR_440) | 
|  | 669 | return 5; | 
|  | 670 |  | 
|  | 671 | // The A2 has no SIMD support, but floating-point instructions | 
|  | 672 | // have a 6-cycle latency, so unroll by 6x for latency hiding. | 
|  | 673 | if (Directive == PPC::DIR_A2) | 
|  | 674 | return 6; | 
|  | 675 |  | 
|  | 676 | // FIXME: For lack of any better information, do no harm... | 
|  | 677 | if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) | 
|  | 678 | return 1; | 
|  | 679 |  | 
| Olivier Sallenave | 05e6915 | 2015-02-12 22:57:58 +0000 | [diff] [blame] | 680 | // For P7 and P8, floating-point instructions have a 6-cycle latency and | 
|  | 681 | // there are two execution units, so unroll by 12x for latency hiding. | 
| Nemanja Ivanovic | 6e29baf | 2016-05-09 18:54:58 +0000 | [diff] [blame] | 682 | // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready | 
|  | 683 | if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || | 
|  | 684 | Directive == PPC::DIR_PWR9) | 
| Olivier Sallenave | 05e6915 | 2015-02-12 22:57:58 +0000 | [diff] [blame] | 685 | return 12; | 
|  | 686 |  | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 687 | // For most things, modern systems have two execution units (and | 
|  | 688 | // out-of-order execution). | 
|  | 689 | return 2; | 
|  | 690 | } | 
|  | 691 |  | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 692 | // Adjust the cost of vector instructions on targets which there is overlap | 
|  | 693 | // between the vector and scalar units, thereby reducing the overall throughput | 
|  | 694 | // of vector code wrt. scalar code. | 
|  | 695 | int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1, | 
|  | 696 | Type *Ty2) { | 
|  | 697 | if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy()) | 
|  | 698 | return Cost; | 
|  | 699 |  | 
|  | 700 | std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1); | 
|  | 701 | // If type legalization involves splitting the vector, we don't want to | 
|  | 702 | // double the cost at every step - only the last step. | 
|  | 703 | if (LT1.first != 1 || !LT1.second.isVector()) | 
|  | 704 | return Cost; | 
| Roland Froese | 7f29195 | 2019-02-01 18:55:43 +0000 | [diff] [blame] | 705 |  | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 706 | int ISD = TLI->InstructionOpcodeToISD(Opcode); | 
|  | 707 | if (TLI->isOperationExpand(ISD, LT1.second)) | 
|  | 708 | return Cost; | 
|  | 709 |  | 
|  | 710 | if (Ty2) { | 
|  | 711 | std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2); | 
|  | 712 | if (LT2.first != 1 || !LT2.second.isVector()) | 
|  | 713 | return Cost; | 
|  | 714 | } | 
|  | 715 |  | 
|  | 716 | return Cost * 2; | 
|  | 717 | } | 
|  | 718 |  | 
| Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 719 | int PPCTTIImpl::getArithmeticInstrCost( | 
| Chandler Carruth | 705b185 | 2015-01-31 03:43:40 +0000 | [diff] [blame] | 720 | unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, | 
|  | 721 | TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, | 
| Mohammed Agabaria | 2c96c43 | 2017-01-11 08:23:37 +0000 | [diff] [blame] | 722 | TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) { | 
| Dmitri Gribenko | c451bdf | 2013-01-25 23:17:21 +0000 | [diff] [blame] | 723 | assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 724 |  | 
|  | 725 | // Fallback to the default implementation. | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 726 | int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, | 
|  | 727 | Opd1PropInfo, Opd2PropInfo); | 
|  | 728 | return vectorCostAdjustment(Cost, Opcode, Ty, nullptr); | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 729 | } | 
|  | 730 |  | 
| Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 731 | int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, | 
|  | 732 | Type *SubTp) { | 
| Hal Finkel | 4a7be23 | 2015-09-04 00:10:41 +0000 | [diff] [blame] | 733 | // Legalize the type. | 
|  | 734 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); | 
|  | 735 |  | 
|  | 736 | // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations | 
|  | 737 | // (at least in the sense that there need only be one non-loop-invariant | 
|  | 738 | // instruction). We need one such shuffle instruction for each actual | 
|  | 739 | // register (this is not true for arbitrary shuffles, but is true for the | 
|  | 740 | // structured types of shuffles covered by TTI::ShuffleKind). | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 741 | return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp, | 
|  | 742 | nullptr); | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 743 | } | 
|  | 744 |  | 
| Jonas Paulsson | fccc7d6 | 2017-04-12 11:49:08 +0000 | [diff] [blame] | 745 | int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, | 
|  | 746 | const Instruction *I) { | 
| Dmitri Gribenko | c451bdf | 2013-01-25 23:17:21 +0000 | [diff] [blame] | 747 | assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 748 |  | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 749 | int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src); | 
|  | 750 | return vectorCostAdjustment(Cost, Opcode, Dst, Src); | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 751 | } | 
|  | 752 |  | 
| Jonas Paulsson | fccc7d6 | 2017-04-12 11:49:08 +0000 | [diff] [blame] | 753 | int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, | 
|  | 754 | const Instruction *I) { | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 755 | int Cost = BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); | 
|  | 756 | return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr); | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 757 | } | 
|  | 758 |  | 
| Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 759 | int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 760 | assert(Val->isVectorTy() && "This must be a vector type"); | 
|  | 761 |  | 
| Bill Schmidt | 62fe7a5b | 2013-02-08 18:19:17 +0000 | [diff] [blame] | 762 | int ISD = TLI->InstructionOpcodeToISD(Opcode); | 
|  | 763 | assert(ISD && "Invalid opcode"); | 
| Bill Schmidt | b3cece1 | 2013-02-07 20:33:57 +0000 | [diff] [blame] | 764 |  | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 765 | int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index); | 
|  | 766 | Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr); | 
|  | 767 |  | 
| Hal Finkel | 27774d9 | 2014-03-13 07:58:58 +0000 | [diff] [blame] | 768 | if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) { | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 769 | // Double-precision scalars are already located in index #0 (or #1 if LE). | 
| Simon Pilgrim | 2755b73 | 2019-04-29 17:04:14 +0000 | [diff] [blame] | 770 | if (ISD == ISD::EXTRACT_VECTOR_ELT && | 
|  | 771 | Index == (ST->isLittleEndian() ? 1 : 0)) | 
| Hal Finkel | 27774d9 | 2014-03-13 07:58:58 +0000 | [diff] [blame] | 772 | return 0; | 
|  | 773 |  | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 774 | return Cost; | 
|  | 775 |  | 
| Hal Finkel | c93a9a2 | 2015-02-25 01:06:45 +0000 | [diff] [blame] | 776 | } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) { | 
|  | 777 | // Floating point scalars are already located in index #0. | 
|  | 778 | if (Index == 0) | 
|  | 779 | return 0; | 
|  | 780 |  | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 781 | return Cost; | 
| Roland Froese | 18db4e9 | 2019-08-26 19:26:08 +0000 | [diff] [blame] | 782 |  | 
|  | 783 | } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) { | 
|  | 784 | if (ST->hasP9Altivec()) { | 
|  | 785 | if (ISD == ISD::INSERT_VECTOR_ELT) | 
|  | 786 | // A move-to VSR and a permute/insert.  Assume vector operation cost | 
|  | 787 | // for both (cost will be 2x on P9). | 
|  | 788 | return vectorCostAdjustment(2, Opcode, Val, nullptr); | 
|  | 789 |  | 
|  | 790 | // It's an extract.  Maybe we can do a cheap move-from VSR. | 
|  | 791 | unsigned EltSize = Val->getScalarSizeInBits(); | 
|  | 792 | if (EltSize == 64) { | 
|  | 793 | unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0; | 
|  | 794 | if (Index == MfvsrdIndex) | 
|  | 795 | return 1; | 
|  | 796 | } else if (EltSize == 32) { | 
|  | 797 | unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1; | 
|  | 798 | if (Index == MfvsrwzIndex) | 
|  | 799 | return 1; | 
|  | 800 | } | 
|  | 801 |  | 
|  | 802 | // We need a vector extract (or mfvsrld).  Assume vector operation cost. | 
|  | 803 | // The cost of the load constant for a vector extract is disregarded | 
|  | 804 | // (invariant, easily schedulable). | 
|  | 805 | return vectorCostAdjustment(1, Opcode, Val, nullptr); | 
|  | 806 |  | 
|  | 807 | } else if (ST->hasDirectMove()) | 
|  | 808 | // Assume permute has standard cost. | 
|  | 809 | // Assume move-to/move-from VSR have 2x standard cost. | 
|  | 810 | return 3; | 
| Hal Finkel | 27774d9 | 2014-03-13 07:58:58 +0000 | [diff] [blame] | 811 | } | 
|  | 812 |  | 
| Bill Schmidt | 62fe7a5b | 2013-02-08 18:19:17 +0000 | [diff] [blame] | 813 | // Estimated cost of a load-hit-store delay.  This was obtained | 
|  | 814 | // experimentally as a minimum needed to prevent unprofitable | 
|  | 815 | // vectorization for the paq8p benchmark.  It may need to be | 
|  | 816 | // raised further if other unprofitable cases remain. | 
| Hal Finkel | de0b413 | 2014-04-04 23:51:18 +0000 | [diff] [blame] | 817 | unsigned LHSPenalty = 2; | 
|  | 818 | if (ISD == ISD::INSERT_VECTOR_ELT) | 
|  | 819 | LHSPenalty += 7; | 
| Bill Schmidt | b3cece1 | 2013-02-07 20:33:57 +0000 | [diff] [blame] | 820 |  | 
| Bill Schmidt | 62fe7a5b | 2013-02-08 18:19:17 +0000 | [diff] [blame] | 821 | // Vector element insert/extract with Altivec is very expensive, | 
|  | 822 | // because they require store and reload with the attendant | 
|  | 823 | // processor stall for load-hit-store.  Until VSX is available, | 
|  | 824 | // these need to be estimated as very costly. | 
|  | 825 | if (ISD == ISD::EXTRACT_VECTOR_ELT || | 
|  | 826 | ISD == ISD::INSERT_VECTOR_ELT) | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 827 | return LHSPenalty + Cost; | 
| Bill Schmidt | b3cece1 | 2013-02-07 20:33:57 +0000 | [diff] [blame] | 828 |  | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 829 | return Cost; | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 830 | } | 
|  | 831 |  | 
| Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 832 | int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, | 
| Jonas Paulsson | fccc7d6 | 2017-04-12 11:49:08 +0000 | [diff] [blame] | 833 | unsigned AddressSpace, const Instruction *I) { | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 834 | // Legalize the type. | 
| Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 835 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 836 | assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && | 
|  | 837 | "Invalid Opcode"); | 
|  | 838 |  | 
| Chandler Carruth | 93205eb | 2015-08-05 18:08:10 +0000 | [diff] [blame] | 839 | int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace); | 
| Nemanja Ivanovic | 7d007dd | 2019-01-26 01:18:48 +0000 | [diff] [blame] | 840 | Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr); | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 841 |  | 
| Hal Finkel | 79dbf5b | 2015-09-02 21:03:28 +0000 | [diff] [blame] | 842 | bool IsAltivecType = ST->hasAltivec() && | 
|  | 843 | (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 || | 
|  | 844 | LT.second == MVT::v4i32 || LT.second == MVT::v4f32); | 
|  | 845 | bool IsVSXType = ST->hasVSX() && | 
|  | 846 | (LT.second == MVT::v2f64 || LT.second == MVT::v2i64); | 
|  | 847 | bool IsQPXType = ST->hasQPX() && | 
|  | 848 | (LT.second == MVT::v4f64 || LT.second == MVT::v4f32); | 
|  | 849 |  | 
| Guozhi Wei | 835de1f | 2016-12-03 00:41:43 +0000 | [diff] [blame] | 850 | // VSX has 32b/64b load instructions. Legalization can handle loading of | 
|  | 851 | // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and | 
|  | 852 | // PPCTargetLowering can't compute the cost appropriately. So here we | 
|  | 853 | // explicitly check this case. | 
|  | 854 | unsigned MemBytes = Src->getPrimitiveSizeInBits(); | 
|  | 855 | if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType && | 
|  | 856 | (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32))) | 
|  | 857 | return 1; | 
|  | 858 |  | 
|  | 859 | // Aligned loads and stores are easy. | 
|  | 860 | unsigned SrcBytes = LT.second.getStoreSize(); | 
|  | 861 | if (!SrcBytes || !Alignment || Alignment >= SrcBytes) | 
|  | 862 | return Cost; | 
|  | 863 |  | 
| Hal Finkel | f11bc76 | 2015-09-03 21:23:18 +0000 | [diff] [blame] | 864 | // If we can use the permutation-based load sequence, then this is also | 
|  | 865 | // relatively cheap (not counting loop-invariant instructions): one load plus | 
|  | 866 | // one permute (the last load in a series has extra cost, but we're | 
| Hal Finkel | 69ada2f | 2016-03-28 22:39:35 +0000 | [diff] [blame] | 867 | // neglecting that here). Note that on the P7, we could do unaligned loads | 
| Hal Finkel | f11bc76 | 2015-09-03 21:23:18 +0000 | [diff] [blame] | 868 | // for Altivec types using the VSX instructions, but that's more expensive | 
|  | 869 | // than using the permutation-based load sequence. On the P8, that's no | 
|  | 870 | // longer true. | 
|  | 871 | if (Opcode == Instruction::Load && | 
|  | 872 | ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) && | 
|  | 873 | Alignment >= LT.second.getScalarType().getStoreSize()) | 
|  | 874 | return Cost + LT.first; // Add the cost of the permutations. | 
|  | 875 |  | 
| Hal Finkel | 79dbf5b | 2015-09-02 21:03:28 +0000 | [diff] [blame] | 876 | // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the | 
|  | 877 | // P7, unaligned vector loads are more expensive than the permutation-based | 
|  | 878 | // load sequence, so that might be used instead, but regardless, the net cost | 
|  | 879 | // is about the same (not counting loop-invariant instructions). | 
|  | 880 | if (IsVSXType || (ST->hasVSX() && IsAltivecType)) | 
|  | 881 | return Cost; | 
|  | 882 |  | 
| Guozhi Wei | 7ec2c72 | 2017-02-17 22:29:39 +0000 | [diff] [blame] | 883 | // Newer PPC supports unaligned memory access. | 
|  | 884 | if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0)) | 
|  | 885 | return Cost; | 
|  | 886 |  | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 887 | // PPC in general does not support unaligned loads and stores. They'll need | 
|  | 888 | // to be decomposed based on the alignment factor. | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 889 |  | 
| Hal Finkel | 79dbf5b | 2015-09-02 21:03:28 +0000 | [diff] [blame] | 890 | // Add the cost of each scalar load or store. | 
|  | 891 | Cost += LT.first*(SrcBytes/Alignment-1); | 
|  | 892 |  | 
|  | 893 | // For a vector type, there is also scalarization overhead (only for | 
|  | 894 | // stores, loads are expanded using the vector-load + permutation sequence, | 
|  | 895 | // which is much less expensive). | 
|  | 896 | if (Src->isVectorTy() && Opcode == Instruction::Store) | 
|  | 897 | for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i) | 
|  | 898 | Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i); | 
| Hal Finkel | de0b413 | 2014-04-04 23:51:18 +0000 | [diff] [blame] | 899 |  | 
| Hal Finkel | 4e5ca9e | 2013-01-25 23:05:59 +0000 | [diff] [blame] | 900 | return Cost; | 
|  | 901 | } | 
|  | 902 |  | 
| Hal Finkel | 4a7be23 | 2015-09-04 00:10:41 +0000 | [diff] [blame] | 903 | int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, | 
|  | 904 | unsigned Factor, | 
|  | 905 | ArrayRef<unsigned> Indices, | 
|  | 906 | unsigned Alignment, | 
| Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 907 | unsigned AddressSpace, | 
| Dorit Nuzman | 34da6dd | 2018-10-31 09:57:56 +0000 | [diff] [blame] | 908 | bool UseMaskForCond, | 
|  | 909 | bool UseMaskForGaps) { | 
|  | 910 | if (UseMaskForCond || UseMaskForGaps) | 
| Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 911 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, | 
| Dorit Nuzman | 34da6dd | 2018-10-31 09:57:56 +0000 | [diff] [blame] | 912 | Alignment, AddressSpace, | 
|  | 913 | UseMaskForCond, UseMaskForGaps); | 
| Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 914 |  | 
| Hal Finkel | 4a7be23 | 2015-09-04 00:10:41 +0000 | [diff] [blame] | 915 | assert(isa<VectorType>(VecTy) && | 
|  | 916 | "Expect a vector type for interleaved memory op"); | 
|  | 917 |  | 
|  | 918 | // Legalize the type. | 
|  | 919 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy); | 
|  | 920 |  | 
|  | 921 | // Firstly, the cost of load/store operation. | 
|  | 922 | int Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace); | 
|  | 923 |  | 
|  | 924 | // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations | 
|  | 925 | // (at least in the sense that there need only be one non-loop-invariant | 
|  | 926 | // instruction). For each result vector, we need one shuffle per incoming | 
|  | 927 | // vector (except that the first shuffle can take two incoming vectors | 
|  | 928 | // because it does not need to take itself). | 
|  | 929 | Cost += Factor*(LT.first-1); | 
|  | 930 |  | 
|  | 931 | return Cost; | 
|  | 932 | } | 
|  | 933 |  | 
| Chen Zheng | dfdccbb | 2019-07-03 01:49:03 +0000 | [diff] [blame] | 934 | bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, | 
|  | 935 | LoopInfo *LI, DominatorTree *DT, | 
|  | 936 | AssumptionCache *AC, TargetLibraryInfo *LibInfo) { | 
|  | 937 | // Process nested loops first. | 
|  | 938 | for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) | 
|  | 939 | if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo)) | 
|  | 940 | return false; // Stop search. | 
|  | 941 |  | 
|  | 942 | HardwareLoopInfo HWLoopInfo(L); | 
|  | 943 |  | 
|  | 944 | if (!HWLoopInfo.canAnalyze(*LI)) | 
|  | 945 | return false; | 
|  | 946 |  | 
|  | 947 | if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo)) | 
|  | 948 | return false; | 
|  | 949 |  | 
|  | 950 | if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT)) | 
|  | 951 | return false; | 
|  | 952 |  | 
|  | 953 | *BI = HWLoopInfo.ExitBranch; | 
|  | 954 | return true; | 
|  | 955 | } |