blob: ee8f8c656a87f2f9009ea13eeff0a4007180c943 [file] [log] [blame]
Chandler Carruth664e3542013-01-07 01:37:14 +00001//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file implements a TargetTransformInfo analysis pass specific to the
11/// X86 target machine. It uses the target's detailed information to provide
12/// more precise answers to certain TTI queries, while letting the target
13/// independent and default TTI implementations handle the rest.
14///
15//===----------------------------------------------------------------------===//
16
Chandler Carruth93dcdc42015-01-31 11:17:59 +000017#include "X86TargetTransformInfo.h"
Chandler Carruthd3e73552013-01-07 03:08:10 +000018#include "llvm/Analysis/TargetTransformInfo.h"
Chandler Carruth705b1852015-01-31 03:43:40 +000019#include "llvm/CodeGen/BasicTTIImpl.h"
Juergen Ributzkaf26beda2014-01-25 02:02:55 +000020#include "llvm/IR/IntrinsicInst.h"
Chandler Carruth664e3542013-01-07 01:37:14 +000021#include "llvm/Support/Debug.h"
Renato Golind4c392e2013-01-24 23:01:00 +000022#include "llvm/Target/CostTable.h"
Chandler Carruth8a8cd2b2014-01-07 11:48:04 +000023#include "llvm/Target/TargetLowering.h"
Chandler Carruth664e3542013-01-07 01:37:14 +000024using namespace llvm;
25
Chandler Carruth84e68b22014-04-22 02:41:26 +000026#define DEBUG_TYPE "x86tti"
27
Chandler Carruth664e3542013-01-07 01:37:14 +000028//===----------------------------------------------------------------------===//
29//
30// X86 cost model.
31//
32//===----------------------------------------------------------------------===//
33
Chandler Carruth705b1852015-01-31 03:43:40 +000034TargetTransformInfo::PopcntSupportKind
35X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
Chandler Carruth664e3542013-01-07 01:37:14 +000036 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
37 // TODO: Currently the __builtin_popcount() implementation using SSE3
38 // instructions is inefficient. Once the problem is fixed, we should
Craig Topper0a63e1d2013-09-08 00:47:31 +000039 // call ST->hasSSE3() instead of ST->hasPOPCNT().
Chandler Carruth705b1852015-01-31 03:43:40 +000040 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
Chandler Carruth664e3542013-01-07 01:37:14 +000041}
42
Chandler Carruth705b1852015-01-31 03:43:40 +000043unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
Nadav Rotemb1791a72013-01-09 22:29:00 +000044 if (Vector && !ST->hasSSE1())
45 return 0;
46
Adam Nemet2820a5b2014-07-09 18:22:33 +000047 if (ST->is64Bit()) {
48 if (Vector && ST->hasAVX512())
49 return 32;
Chandler Carruth664e3542013-01-07 01:37:14 +000050 return 16;
Adam Nemet2820a5b2014-07-09 18:22:33 +000051 }
Chandler Carruth664e3542013-01-07 01:37:14 +000052 return 8;
53}
54
Chandler Carruth705b1852015-01-31 03:43:40 +000055unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
Nadav Rotemb1791a72013-01-09 22:29:00 +000056 if (Vector) {
Adam Nemet2820a5b2014-07-09 18:22:33 +000057 if (ST->hasAVX512()) return 512;
Nadav Rotemb1791a72013-01-09 22:29:00 +000058 if (ST->hasAVX()) return 256;
59 if (ST->hasSSE1()) return 128;
60 return 0;
61 }
62
63 if (ST->is64Bit())
64 return 64;
65 return 32;
66
67}
68
Wei Mi062c7442015-05-06 17:12:25 +000069unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
70 // If the loop will not be vectorized, don't interleave the loop.
71 // Let regular unroll to unroll the loop, which saves the overflow
72 // check and memory check cost.
73 if (VF == 1)
74 return 1;
75
Nadav Rotemb696c362013-01-09 01:15:42 +000076 if (ST->isAtom())
77 return 1;
78
79 // Sandybridge and Haswell have multiple execution ports and pipelined
80 // vector units.
81 if (ST->hasAVX())
82 return 4;
83
84 return 2;
85}
86
Chandler Carruth705b1852015-01-31 03:43:40 +000087unsigned X86TTIImpl::getArithmeticInstrCost(
88 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
89 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
90 TTI::OperandValueProperties Opd2PropInfo) {
Chandler Carruth664e3542013-01-07 01:37:14 +000091 // Legalize the type.
Mehdi Amini44ede332015-07-09 02:09:04 +000092 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
Chandler Carruth664e3542013-01-07 01:37:14 +000093
94 int ISD = TLI->InstructionOpcodeToISD(Opcode);
95 assert(ISD && "Invalid opcode");
96
Karthik Bhat7f33ff72014-08-25 04:56:54 +000097 if (ISD == ISD::SDIV &&
98 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
99 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
100 // On X86, vector signed division by constants power-of-two are
101 // normally expanded to the sequence SRA + SRL + ADD + SRA.
102 // The OperandValue properties many not be same as that of previous
103 // operation;conservatively assume OP_None.
104 unsigned Cost =
105 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, Op2Info,
106 TargetTransformInfo::OP_None,
107 TargetTransformInfo::OP_None);
108 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
109 TargetTransformInfo::OP_None,
110 TargetTransformInfo::OP_None);
111 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
112 TargetTransformInfo::OP_None,
113 TargetTransformInfo::OP_None);
114
115 return Cost;
116 }
117
Benjamin Kramer7c372272014-04-26 14:53:05 +0000118 static const CostTblEntry<MVT::SimpleValueType>
119 AVX2UniformConstCostTable[] = {
Simon Pilgrim8fbf1c12015-07-06 22:35:19 +0000120 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
121
Benjamin Kramer7c372272014-04-26 14:53:05 +0000122 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
123 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
124 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
125 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
126 };
127
128 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
129 ST->hasAVX2()) {
130 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
131 if (Idx != -1)
132 return LT.first * AVX2UniformConstCostTable[Idx].Cost;
133 }
134
Elena Demikhovsky27012472014-09-16 07:57:37 +0000135 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = {
136 { ISD::SHL, MVT::v16i32, 1 },
137 { ISD::SRL, MVT::v16i32, 1 },
138 { ISD::SRA, MVT::v16i32, 1 },
139 { ISD::SHL, MVT::v8i64, 1 },
140 { ISD::SRL, MVT::v8i64, 1 },
141 { ISD::SRA, MVT::v8i64, 1 },
142 };
143
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000144 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
Michael Liao70dd7f92013-03-20 22:01:10 +0000145 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
146 // customize them to detect the cases where shift amount is a scalar one.
147 { ISD::SHL, MVT::v4i32, 1 },
148 { ISD::SRL, MVT::v4i32, 1 },
149 { ISD::SRA, MVT::v4i32, 1 },
150 { ISD::SHL, MVT::v8i32, 1 },
151 { ISD::SRL, MVT::v8i32, 1 },
152 { ISD::SRA, MVT::v8i32, 1 },
153 { ISD::SHL, MVT::v2i64, 1 },
154 { ISD::SRL, MVT::v2i64, 1 },
155 { ISD::SHL, MVT::v4i64, 1 },
156 { ISD::SRL, MVT::v4i64, 1 },
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000157
Simon Pilgrim59656802015-06-11 07:46:37 +0000158 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
Simon Pilgrim0be4fa72015-05-25 17:49:13 +0000159 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000160
Simon Pilgrim59656802015-06-11 07:46:37 +0000161 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
Simon Pilgrim0be4fa72015-05-25 17:49:13 +0000162 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000163
Simon Pilgrim59656802015-06-11 07:46:37 +0000164 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
Simon Pilgrim0be4fa72015-05-25 17:49:13 +0000165 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
166 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
Arnold Schwaighofera04b9ef2013-06-25 19:14:09 +0000167
168 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
169 { ISD::SDIV, MVT::v32i8, 32*20 },
170 { ISD::SDIV, MVT::v16i16, 16*20 },
171 { ISD::SDIV, MVT::v8i32, 8*20 },
172 { ISD::SDIV, MVT::v4i64, 4*20 },
173 { ISD::UDIV, MVT::v32i8, 32*20 },
174 { ISD::UDIV, MVT::v16i16, 16*20 },
175 { ISD::UDIV, MVT::v8i32, 8*20 },
176 { ISD::UDIV, MVT::v4i64, 4*20 },
Michael Liao70dd7f92013-03-20 22:01:10 +0000177 };
178
Elena Demikhovsky27012472014-09-16 07:57:37 +0000179 if (ST->hasAVX512()) {
180 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second);
181 if (Idx != -1)
182 return LT.first * AVX512CostTable[Idx].Cost;
183 }
Michael Liao70dd7f92013-03-20 22:01:10 +0000184 // Look for AVX2 lowering tricks.
185 if (ST->hasAVX2()) {
Andrea Di Biagiob7882b32014-02-12 23:43:47 +0000186 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
187 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
188 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
189 // On AVX2, a packed v16i16 shift left by a constant build_vector
190 // is lowered into a vector multiply (vpmullw).
191 return LT.first;
192
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000193 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
Michael Liao70dd7f92013-03-20 22:01:10 +0000194 if (Idx != -1)
195 return LT.first * AVX2CostTable[Idx].Cost;
196 }
197
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000198 static const CostTblEntry<MVT::SimpleValueType>
199 SSE2UniformConstCostTable[] = {
Arnold Schwaighofer44f902e2013-04-04 23:26:24 +0000200 // We don't correctly identify costs of casts because they are marked as
201 // custom.
202 // Constant splats are cheaper for the following instructions.
203 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
204 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
205 { ISD::SHL, MVT::v4i32, 1 }, // pslld
206 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
207
208 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
209 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
210 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
211 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
212
213 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
214 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
215 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
Simon Pilgrim8fbf1c12015-07-06 22:35:19 +0000216 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
Benjamin Kramer7c372272014-04-26 14:53:05 +0000217
218 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
219 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
Benjamin Kramerce4b3fe2014-04-27 18:47:54 +0000220 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
Benjamin Kramer7c372272014-04-26 14:53:05 +0000221 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
Arnold Schwaighofer44f902e2013-04-04 23:26:24 +0000222 };
223
224 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
225 ST->hasSSE2()) {
Benjamin Kramerce4b3fe2014-04-27 18:47:54 +0000226 // pmuldq sequence.
227 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
228 return LT.first * 15;
229
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000230 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
Arnold Schwaighofer44f902e2013-04-04 23:26:24 +0000231 if (Idx != -1)
232 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
233 }
234
Andrea Di Biagiob7882b32014-02-12 23:43:47 +0000235 if (ISD == ISD::SHL &&
236 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
237 EVT VT = LT.second;
238 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
239 (VT == MVT::v4i32 && ST->hasSSE41()))
240 // Vector shift left by non uniform constant can be lowered
241 // into vector multiply (pmullw/pmulld).
242 return LT.first;
243 if (VT == MVT::v4i32 && ST->hasSSE2())
244 // A vector shift left by non uniform constant is converted
245 // into a vector multiply; the new multiply is eventually
246 // lowered into a sequence of shuffles and 2 x pmuludq.
247 ISD = ISD::MUL;
248 }
Arnold Schwaighofer44f902e2013-04-04 23:26:24 +0000249
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000250 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000251 // We don't correctly identify costs of casts because they are marked as
252 // custom.
253 // For some cases, where the shift amount is a scalar we would be able
254 // to generate better code. Unfortunately, when this is the case the value
255 // (the splat) will get hoisted out of the loop, thereby making it invisible
256 // to ISel. The cost model must return worst case assumptions because it is
257 // used for vectorization and we don't want to make vectorized code worse
258 // than scalar code.
Simon Pilgrim59656802015-06-11 07:46:37 +0000259 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
260 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
261 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000262 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
Michael Liao5bf95782014-12-04 05:20:33 +0000263 { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000264
Simon Pilgrim59656802015-06-11 07:46:37 +0000265 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
266 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000267 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
268 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
269
Simon Pilgrim59656802015-06-11 07:46:37 +0000270 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
271 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000272 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
273 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
Arnold Schwaighofera04b9ef2013-06-25 19:14:09 +0000274
275 // It is not a good idea to vectorize division. We have to scalarize it and
276 // in the process we will often end up having to spilling regular
277 // registers. The overhead of division is going to dominate most kernels
278 // anyways so try hard to prevent vectorization of division - it is
279 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
280 // to hide "20 cycles" for each lane.
281 { ISD::SDIV, MVT::v16i8, 16*20 },
282 { ISD::SDIV, MVT::v8i16, 8*20 },
283 { ISD::SDIV, MVT::v4i32, 4*20 },
284 { ISD::SDIV, MVT::v2i64, 2*20 },
285 { ISD::UDIV, MVT::v16i8, 16*20 },
286 { ISD::UDIV, MVT::v8i16, 8*20 },
287 { ISD::UDIV, MVT::v4i32, 4*20 },
288 { ISD::UDIV, MVT::v2i64, 2*20 },
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000289 };
290
291 if (ST->hasSSE2()) {
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000292 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000293 if (Idx != -1)
294 return LT.first * SSE2CostTable[Idx].Cost;
295 }
296
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000297 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
Renato Goline1fb0592013-01-20 20:57:20 +0000298 // We don't have to scalarize unsupported ops. We can issue two half-sized
299 // operations and we only need to extract the upper YMM half.
300 // Two ops + 1 extract + 1 insert = 4.
Andrea Di Biagiob7882b32014-02-12 23:43:47 +0000301 { ISD::MUL, MVT::v16i16, 4 },
Renato Goline1fb0592013-01-20 20:57:20 +0000302 { ISD::MUL, MVT::v8i32, 4 },
303 { ISD::SUB, MVT::v8i32, 4 },
304 { ISD::ADD, MVT::v8i32, 4 },
Renato Goline1fb0592013-01-20 20:57:20 +0000305 { ISD::SUB, MVT::v4i64, 4 },
306 { ISD::ADD, MVT::v4i64, 4 },
Arnold Schwaighofer20ef54f2013-03-02 04:02:52 +0000307 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
308 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
309 // Because we believe v4i64 to be a legal type, we must also include the
310 // split factor of two in the cost table. Therefore, the cost here is 18
311 // instead of 9.
312 { ISD::MUL, MVT::v4i64, 18 },
313 };
Chandler Carruth664e3542013-01-07 01:37:14 +0000314
315 // Look for AVX1 lowering tricks.
Arnold Schwaighofer20ef54f2013-03-02 04:02:52 +0000316 if (ST->hasAVX() && !ST->hasAVX2()) {
Andrea Di Biagiob7882b32014-02-12 23:43:47 +0000317 EVT VT = LT.second;
318
319 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
320 // sequence of extract + two vector multiply + insert.
321 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
322 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
323 ISD = ISD::MUL;
324
325 int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
Renato Goline1fb0592013-01-20 20:57:20 +0000326 if (Idx != -1)
327 return LT.first * AVX1CostTable[Idx].Cost;
Chandler Carruth664e3542013-01-07 01:37:14 +0000328 }
Arnold Schwaighofer20ef54f2013-03-02 04:02:52 +0000329
330 // Custom lowering of vectors.
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000331 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
Arnold Schwaighofer20ef54f2013-03-02 04:02:52 +0000332 // A v2i64/v4i64 and multiply is custom lowered as a series of long
333 // multiplies(3), shifts(4) and adds(2).
334 { ISD::MUL, MVT::v2i64, 9 },
335 { ISD::MUL, MVT::v4i64, 9 },
336 };
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000337 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
Arnold Schwaighofer20ef54f2013-03-02 04:02:52 +0000338 if (Idx != -1)
339 return LT.first * CustomLowered[Idx].Cost;
340
341 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
342 // 2x pmuludq, 2x shuffle.
343 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
344 !ST->hasSSE41())
Andrea Di Biagiob7882b32014-02-12 23:43:47 +0000345 return LT.first * 6;
Arnold Schwaighofer20ef54f2013-03-02 04:02:52 +0000346
Chandler Carruth664e3542013-01-07 01:37:14 +0000347 // Fallback to the default implementation.
Chandler Carruth705b1852015-01-31 03:43:40 +0000348 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
Chandler Carruth664e3542013-01-07 01:37:14 +0000349}
350
Chandler Carruth705b1852015-01-31 03:43:40 +0000351unsigned X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
352 Type *SubTp) {
Karthik Bhate03a25d2014-06-20 04:32:48 +0000353 // We only estimate the cost of reverse and alternate shuffles.
Chandler Carruth705b1852015-01-31 03:43:40 +0000354 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
355 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
Chandler Carruth664e3542013-01-07 01:37:14 +0000356
Chandler Carruth705b1852015-01-31 03:43:40 +0000357 if (Kind == TTI::SK_Reverse) {
Mehdi Amini44ede332015-07-09 02:09:04 +0000358 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
Karthik Bhate03a25d2014-06-20 04:32:48 +0000359 unsigned Cost = 1;
360 if (LT.second.getSizeInBits() > 128)
361 Cost = 3; // Extract + insert + copy.
Chandler Carruth664e3542013-01-07 01:37:14 +0000362
Karthik Bhate03a25d2014-06-20 04:32:48 +0000363 // Multiple by the number of parts.
364 return Cost * LT.first;
365 }
366
Chandler Carruth705b1852015-01-31 03:43:40 +0000367 if (Kind == TTI::SK_Alternate) {
Andrea Di Biagioc8e8bda2014-07-03 22:24:18 +0000368 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
369 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
Mehdi Amini44ede332015-07-09 02:09:04 +0000370 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
Karthik Bhate03a25d2014-06-20 04:32:48 +0000371
Andrea Di Biagioc8e8bda2014-07-03 22:24:18 +0000372 // The backend knows how to generate a single VEX.256 version of
373 // instruction VPBLENDW if the target supports AVX2.
374 if (ST->hasAVX2() && LT.second == MVT::v16i16)
375 return LT.first;
376
377 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = {
378 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
379 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
380
381 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
382 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
383
384 // This shuffle is custom lowered into a sequence of:
385 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
386 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
387
388 // This shuffle is custom lowered into a long sequence of:
389 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
390 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
391 };
392
393 if (ST->hasAVX()) {
394 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
395 if (Idx != -1)
396 return LT.first * AVXAltShuffleTbl[Idx].Cost;
397 }
398
399 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = {
400 // These are lowered into movsd.
401 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
402 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
403
404 // packed float vectors with four elements are lowered into BLENDI dag
405 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
406 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
407 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
408
409 // This shuffle generates a single pshufw.
410 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
411
412 // There is no instruction that matches a v16i8 alternate shuffle.
413 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
414 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
415 };
416
417 if (ST->hasSSE41()) {
418 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
419 if (Idx != -1)
420 return LT.first * SSE41AltShuffleTbl[Idx].Cost;
421 }
422
423 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = {
424 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
425 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
426
427 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
428 // the sequence 'shufps + pshufd'
429 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
430 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
431
432 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
433 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
434 };
Michael Liao5bf95782014-12-04 05:20:33 +0000435
Andrea Di Biagioc8e8bda2014-07-03 22:24:18 +0000436 if (ST->hasSSSE3()) {
437 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
438 if (Idx != -1)
439 return LT.first * SSSE3AltShuffleTbl[Idx].Cost;
440 }
441
442 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = {
443 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
444 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
445
446 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
447 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
Michael Liao5bf95782014-12-04 05:20:33 +0000448
Andrea Di Biagioc8e8bda2014-07-03 22:24:18 +0000449 // This is expanded into a long sequence of four extract + four insert.
450 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
451
452 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
453 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
454 };
455
Michael Liao5bf95782014-12-04 05:20:33 +0000456 // Fall-back (SSE3 and SSE2).
Andrea Di Biagioc8e8bda2014-07-03 22:24:18 +0000457 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
458 if (Idx != -1)
459 return LT.first * SSEAltShuffleTbl[Idx].Cost;
Chandler Carruth705b1852015-01-31 03:43:40 +0000460 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
Karthik Bhate03a25d2014-06-20 04:32:48 +0000461 }
462
Chandler Carruth705b1852015-01-31 03:43:40 +0000463 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
Chandler Carruth664e3542013-01-07 01:37:14 +0000464}
465
Chandler Carruth705b1852015-01-31 03:43:40 +0000466unsigned X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
Chandler Carruth664e3542013-01-07 01:37:14 +0000467 int ISD = TLI->InstructionOpcodeToISD(Opcode);
468 assert(ISD && "Invalid opcode");
469
Mehdi Amini44ede332015-07-09 02:09:04 +0000470 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
471 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
Arnold Schwaighoferf47d2d72013-04-08 18:05:48 +0000472
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000473 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
474 SSE2ConvTbl[] = {
Arnold Schwaighoferf47d2d72013-04-08 18:05:48 +0000475 // These are somewhat magic numbers justified by looking at the output of
476 // Intel's IACA, running some kernels and making sure when we take
477 // legalization into account the throughput will be overestimated.
478 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
479 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
480 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
481 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
482 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
483 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
484 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
485 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
486 // There are faster sequences for float conversions.
487 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
Quentin Colombet360460b2014-11-11 02:23:47 +0000488 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
Arnold Schwaighoferf47d2d72013-04-08 18:05:48 +0000489 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
490 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
491 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
492 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
493 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
494 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
495 };
496
497 if (ST->hasSSE2() && !ST->hasAVX()) {
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000498 int Idx =
499 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
Arnold Schwaighoferf47d2d72013-04-08 18:05:48 +0000500 if (Idx != -1)
501 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
502 }
503
Elena Demikhovsky27012472014-09-16 07:57:37 +0000504 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
505 AVX512ConversionTbl[] = {
506 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
507 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
508 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
509 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 },
510
511 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
512 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
513 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
514 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
515 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 },
516
517 // v16i1 -> v16i32 - load + broadcast
518 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
519 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
520
521 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
522 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
523 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
524 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
525 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
526 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
527
Elena Demikhovskyd5e95b52014-11-13 11:46:16 +0000528 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
529 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
530 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
531 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
532 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
533 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
534 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
Elena Demikhovsky27012472014-09-16 07:57:37 +0000535 };
536
537 if (ST->hasAVX512()) {
538 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second,
539 LTSrc.second);
540 if (Idx != -1)
541 return AVX512ConversionTbl[Idx].Cost;
542 }
Mehdi Amini44ede332015-07-09 02:09:04 +0000543 EVT SrcTy = TLI->getValueType(DL, Src);
544 EVT DstTy = TLI->getValueType(DL, Dst);
Chandler Carruth664e3542013-01-07 01:37:14 +0000545
Arnold Schwaighoferc0c7ff42013-04-17 20:04:53 +0000546 // The function getSimpleVT only handles simple value types.
547 if (!SrcTy.isSimple() || !DstTy.isSimple())
Chandler Carruth705b1852015-01-31 03:43:40 +0000548 return BaseT::getCastInstrCost(Opcode, Dst, Src);
Arnold Schwaighoferc0c7ff42013-04-17 20:04:53 +0000549
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000550 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
Tim Northoverf0e21612014-02-06 18:18:36 +0000551 AVX2ConversionTbl[] = {
552 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
553 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
554 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
555 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
556 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
557 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
558 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
559 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
560 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
561 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
562 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
563 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
564 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
565 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
566 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
567 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
568
569 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
570 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
571 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
572 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
573 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
574 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
Elena Demikhovsky27012472014-09-16 07:57:37 +0000575
576 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
577 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
Quentin Colombet360460b2014-11-11 02:23:47 +0000578
579 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
Tim Northoverf0e21612014-02-06 18:18:36 +0000580 };
581
582 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000583 AVXConversionTbl[] = {
Tim Northoverf0e21612014-02-06 18:18:36 +0000584 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
585 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
586 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
587 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
588 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
589 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
590 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
591 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
592 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
593 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
594 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
595 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
596 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
597 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
598 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
599 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
600
601 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
602 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
603 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
604 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
605 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
606 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
607 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
Benjamin Kramer52ceb442013-04-01 10:23:49 +0000608
609 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
610 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
611 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
612 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
613 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
614 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
615 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
616 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
617 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
618 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
619 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
620 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
621
622 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
623 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
624 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
625 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
626 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
627 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
628 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
629 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
630 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
631 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
632 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
633 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
Quentin Colombet85b904d2014-03-27 22:27:41 +0000634 // The generic code to compute the scalar overhead is currently broken.
635 // Workaround this limitation by estimating the scalarization overhead
636 // here. We have roughly 10 instructions per scalar element.
637 // Multiply that by the vector width.
638 // FIXME: remove that when PR19268 is fixed.
Quentin Colombet3914bf52014-03-27 00:52:16 +0000639 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
640 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
Benjamin Kramer52ceb442013-04-01 10:23:49 +0000641
Jim Grosbach72fbde82014-03-27 00:04:11 +0000642 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
Renato Goline1fb0592013-01-20 20:57:20 +0000643 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
Adam Nemet6dafe972014-03-30 18:07:13 +0000644 // This node is expanded into scalarized operations but BasicTTI is overly
645 // optimistic estimating its cost. It computes 3 per element (one
646 // vector-extract, one scalar conversion and one vector-insert). The
647 // problem is that the inserts form a read-modify-write chain so latency
648 // should be factored in too. Inflating the cost per element by 1.
649 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
Adam Nemet10c4ce22014-03-31 21:54:48 +0000650 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
Chandler Carruth664e3542013-01-07 01:37:14 +0000651 };
652
Tim Northoverf0e21612014-02-06 18:18:36 +0000653 if (ST->hasAVX2()) {
654 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
655 DstTy.getSimpleVT(), SrcTy.getSimpleVT());
656 if (Idx != -1)
657 return AVX2ConversionTbl[Idx].Cost;
658 }
659
Chandler Carruth664e3542013-01-07 01:37:14 +0000660 if (ST->hasAVX()) {
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000661 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
662 SrcTy.getSimpleVT());
Renato Goline1fb0592013-01-20 20:57:20 +0000663 if (Idx != -1)
664 return AVXConversionTbl[Idx].Cost;
Chandler Carruth664e3542013-01-07 01:37:14 +0000665 }
666
Chandler Carruth705b1852015-01-31 03:43:40 +0000667 return BaseT::getCastInstrCost(Opcode, Dst, Src);
Chandler Carruth664e3542013-01-07 01:37:14 +0000668}
669
Chandler Carruth705b1852015-01-31 03:43:40 +0000670unsigned X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
671 Type *CondTy) {
Chandler Carruth664e3542013-01-07 01:37:14 +0000672 // Legalize the type.
Mehdi Amini44ede332015-07-09 02:09:04 +0000673 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
Chandler Carruth664e3542013-01-07 01:37:14 +0000674
675 MVT MTy = LT.second;
676
677 int ISD = TLI->InstructionOpcodeToISD(Opcode);
678 assert(ISD && "Invalid opcode");
679
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000680 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
Renato Goline1fb0592013-01-20 20:57:20 +0000681 { ISD::SETCC, MVT::v2f64, 1 },
682 { ISD::SETCC, MVT::v4f32, 1 },
683 { ISD::SETCC, MVT::v2i64, 1 },
684 { ISD::SETCC, MVT::v4i32, 1 },
685 { ISD::SETCC, MVT::v8i16, 1 },
686 { ISD::SETCC, MVT::v16i8, 1 },
Chandler Carruth664e3542013-01-07 01:37:14 +0000687 };
688
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000689 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
Renato Goline1fb0592013-01-20 20:57:20 +0000690 { ISD::SETCC, MVT::v4f64, 1 },
691 { ISD::SETCC, MVT::v8f32, 1 },
Chandler Carruth664e3542013-01-07 01:37:14 +0000692 // AVX1 does not support 8-wide integer compare.
Renato Goline1fb0592013-01-20 20:57:20 +0000693 { ISD::SETCC, MVT::v4i64, 4 },
694 { ISD::SETCC, MVT::v8i32, 4 },
695 { ISD::SETCC, MVT::v16i16, 4 },
696 { ISD::SETCC, MVT::v32i8, 4 },
Chandler Carruth664e3542013-01-07 01:37:14 +0000697 };
698
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000699 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
Renato Goline1fb0592013-01-20 20:57:20 +0000700 { ISD::SETCC, MVT::v4i64, 1 },
701 { ISD::SETCC, MVT::v8i32, 1 },
702 { ISD::SETCC, MVT::v16i16, 1 },
703 { ISD::SETCC, MVT::v32i8, 1 },
Chandler Carruth664e3542013-01-07 01:37:14 +0000704 };
705
Elena Demikhovsky27012472014-09-16 07:57:37 +0000706 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = {
707 { ISD::SETCC, MVT::v8i64, 1 },
708 { ISD::SETCC, MVT::v16i32, 1 },
709 { ISD::SETCC, MVT::v8f64, 1 },
710 { ISD::SETCC, MVT::v16f32, 1 },
711 };
712
713 if (ST->hasAVX512()) {
714 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy);
715 if (Idx != -1)
716 return LT.first * AVX512CostTbl[Idx].Cost;
717 }
718
Chandler Carruth664e3542013-01-07 01:37:14 +0000719 if (ST->hasAVX2()) {
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000720 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
Renato Goline1fb0592013-01-20 20:57:20 +0000721 if (Idx != -1)
722 return LT.first * AVX2CostTbl[Idx].Cost;
Chandler Carruth664e3542013-01-07 01:37:14 +0000723 }
724
725 if (ST->hasAVX()) {
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000726 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
Renato Goline1fb0592013-01-20 20:57:20 +0000727 if (Idx != -1)
728 return LT.first * AVX1CostTbl[Idx].Cost;
Chandler Carruth664e3542013-01-07 01:37:14 +0000729 }
730
731 if (ST->hasSSE42()) {
Benjamin Kramer21585fd2013-08-09 19:33:32 +0000732 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
Renato Goline1fb0592013-01-20 20:57:20 +0000733 if (Idx != -1)
734 return LT.first * SSE42CostTbl[Idx].Cost;
Chandler Carruth664e3542013-01-07 01:37:14 +0000735 }
736
Chandler Carruth705b1852015-01-31 03:43:40 +0000737 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
Chandler Carruth664e3542013-01-07 01:37:14 +0000738}
739
Chandler Carruth705b1852015-01-31 03:43:40 +0000740unsigned X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
741 unsigned Index) {
Chandler Carruth664e3542013-01-07 01:37:14 +0000742 assert(Val->isVectorTy() && "This must be a vector type");
743
744 if (Index != -1U) {
745 // Legalize the type.
Mehdi Amini44ede332015-07-09 02:09:04 +0000746 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
Chandler Carruth664e3542013-01-07 01:37:14 +0000747
748 // This type is legalized to a scalar type.
749 if (!LT.second.isVector())
750 return 0;
751
752 // The type may be split. Normalize the index to the new type.
753 unsigned Width = LT.second.getVectorNumElements();
754 Index = Index % Width;
755
756 // Floating point scalars are already located in index #0.
757 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
758 return 0;
759 }
760
Chandler Carruth705b1852015-01-31 03:43:40 +0000761 return BaseT::getVectorInstrCost(Opcode, Val, Index);
Chandler Carruth664e3542013-01-07 01:37:14 +0000762}
763
Chandler Carruth705b1852015-01-31 03:43:40 +0000764unsigned X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
765 bool Extract) {
Nadav Rotemf9ecbcb2013-06-27 17:52:04 +0000766 assert (Ty->isVectorTy() && "Can only scalarize vectors");
767 unsigned Cost = 0;
768
769 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
770 if (Insert)
Chandler Carruth705b1852015-01-31 03:43:40 +0000771 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
Nadav Rotemf9ecbcb2013-06-27 17:52:04 +0000772 if (Extract)
Chandler Carruth705b1852015-01-31 03:43:40 +0000773 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
Nadav Rotemf9ecbcb2013-06-27 17:52:04 +0000774 }
775
776 return Cost;
777}
778
Chandler Carruth705b1852015-01-31 03:43:40 +0000779unsigned X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
780 unsigned Alignment,
781 unsigned AddressSpace) {
Alp Tokerf907b892013-12-05 05:44:44 +0000782 // Handle non-power-of-two vectors such as <3 x float>
Nadav Rotemf9ecbcb2013-06-27 17:52:04 +0000783 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
784 unsigned NumElem = VTy->getVectorNumElements();
785
786 // Handle a few common cases:
787 // <3 x float>
788 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
789 // Cost = 64 bit store + extract + 32 bit store.
790 return 3;
791
792 // <3 x double>
793 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
794 // Cost = 128 bit store + unpack + 64 bit store.
795 return 3;
796
Alp Tokerf907b892013-12-05 05:44:44 +0000797 // Assume that all other non-power-of-two numbers are scalarized.
Nadav Rotemf9ecbcb2013-06-27 17:52:04 +0000798 if (!isPowerOf2_32(NumElem)) {
Chandler Carruth705b1852015-01-31 03:43:40 +0000799 unsigned Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(),
800 Alignment, AddressSpace);
Nadav Rotemf9ecbcb2013-06-27 17:52:04 +0000801 unsigned SplitCost = getScalarizationOverhead(Src,
802 Opcode == Instruction::Load,
803 Opcode==Instruction::Store);
804 return NumElem * Cost + SplitCost;
805 }
806 }
807
Chandler Carruth664e3542013-01-07 01:37:14 +0000808 // Legalize the type.
Mehdi Amini44ede332015-07-09 02:09:04 +0000809 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
Chandler Carruth664e3542013-01-07 01:37:14 +0000810 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
811 "Invalid Opcode");
812
813 // Each load/store unit costs 1.
814 unsigned Cost = LT.first * 1;
815
816 // On Sandybridge 256bit load/stores are double pumped
817 // (but not on Haswell).
818 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
819 Cost*=2;
820
821 return Cost;
822}
Arnold Schwaighofer6042a262013-07-12 19:16:07 +0000823
Chandler Carruth705b1852015-01-31 03:43:40 +0000824unsigned X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
825 unsigned Alignment,
826 unsigned AddressSpace) {
Elena Demikhovskya3232f72015-01-25 08:44:46 +0000827 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
828 if (!SrcVTy)
829 // To calculate scalar take the regular cost, without mask
830 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
831
832 unsigned NumElem = SrcVTy->getVectorNumElements();
833 VectorType *MaskTy =
834 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
835 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy, 1)) ||
836 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy, 1)) ||
837 !isPowerOf2_32(NumElem)) {
838 // Scalarization
839 unsigned MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
840 unsigned ScalarCompareCost =
841 getCmpSelInstrCost(Instruction::ICmp,
842 Type::getInt8Ty(getGlobalContext()), NULL);
843 unsigned BranchCost = getCFInstrCost(Instruction::Br);
844 unsigned MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
845
846 unsigned ValueSplitCost =
847 getScalarizationOverhead(SrcVTy, Opcode == Instruction::Load,
848 Opcode == Instruction::Store);
Chandler Carruth705b1852015-01-31 03:43:40 +0000849 unsigned MemopCost =
850 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
851 Alignment, AddressSpace);
Elena Demikhovskya3232f72015-01-25 08:44:46 +0000852 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
853 }
854
855 // Legalize the type.
Mehdi Amini44ede332015-07-09 02:09:04 +0000856 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
Elena Demikhovskya3232f72015-01-25 08:44:46 +0000857 unsigned Cost = 0;
Mehdi Amini44ede332015-07-09 02:09:04 +0000858 if (LT.second != TLI->getValueType(DL, SrcVTy).getSimpleVT() &&
Elena Demikhovskya3232f72015-01-25 08:44:46 +0000859 LT.second.getVectorNumElements() == NumElem)
860 // Promotion requires expand/truncate for data and a shuffle for mask.
Chandler Carruth705b1852015-01-31 03:43:40 +0000861 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, 0) +
862 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, 0);
863
Elena Demikhovskya3232f72015-01-25 08:44:46 +0000864 else if (LT.second.getVectorNumElements() > NumElem) {
865 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
866 LT.second.getVectorNumElements());
867 // Expanding requires fill mask with zeroes
Chandler Carruth705b1852015-01-31 03:43:40 +0000868 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
Elena Demikhovskya3232f72015-01-25 08:44:46 +0000869 }
870 if (!ST->hasAVX512())
871 return Cost + LT.first*4; // Each maskmov costs 4
872
873 // AVX-512 masked load/store is cheapper
874 return Cost+LT.first;
875}
876
Chandler Carruth705b1852015-01-31 03:43:40 +0000877unsigned X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
Arnold Schwaighofer6042a262013-07-12 19:16:07 +0000878 // Address computations in vectorized code with non-consecutive addresses will
879 // likely result in more instructions compared to scalar code where the
880 // computation can more often be merged into the index mode. The resulting
881 // extra micro-ops can significantly decrease throughput.
882 unsigned NumVectorInstToHideOverhead = 10;
883
884 if (Ty->isVectorTy() && IsComplex)
885 return NumVectorInstToHideOverhead;
886
Chandler Carruth705b1852015-01-31 03:43:40 +0000887 return BaseT::getAddressComputationCost(Ty, IsComplex);
Arnold Schwaighofer6042a262013-07-12 19:16:07 +0000888}
Yi Jiang5c343de2013-09-19 17:48:48 +0000889
Chandler Carruth705b1852015-01-31 03:43:40 +0000890unsigned X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
891 bool IsPairwise) {
Michael Liao5bf95782014-12-04 05:20:33 +0000892
Mehdi Amini44ede332015-07-09 02:09:04 +0000893 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
Michael Liao5bf95782014-12-04 05:20:33 +0000894
Yi Jiang5c343de2013-09-19 17:48:48 +0000895 MVT MTy = LT.second;
Michael Liao5bf95782014-12-04 05:20:33 +0000896
Yi Jiang5c343de2013-09-19 17:48:48 +0000897 int ISD = TLI->InstructionOpcodeToISD(Opcode);
898 assert(ISD && "Invalid opcode");
Michael Liao5bf95782014-12-04 05:20:33 +0000899
900 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
901 // and make it as the cost.
902
Yi Jiang5c343de2013-09-19 17:48:48 +0000903 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
904 { ISD::FADD, MVT::v2f64, 2 },
905 { ISD::FADD, MVT::v4f32, 4 },
906 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
907 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
908 { ISD::ADD, MVT::v8i16, 5 },
909 };
Michael Liao5bf95782014-12-04 05:20:33 +0000910
Yi Jiang5c343de2013-09-19 17:48:48 +0000911 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
912 { ISD::FADD, MVT::v4f32, 4 },
913 { ISD::FADD, MVT::v4f64, 5 },
914 { ISD::FADD, MVT::v8f32, 7 },
915 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
916 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
917 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
918 { ISD::ADD, MVT::v8i16, 5 },
919 { ISD::ADD, MVT::v8i32, 5 },
920 };
921
922 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
923 { ISD::FADD, MVT::v2f64, 2 },
924 { ISD::FADD, MVT::v4f32, 4 },
925 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
926 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
927 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
928 };
Michael Liao5bf95782014-12-04 05:20:33 +0000929
Yi Jiang5c343de2013-09-19 17:48:48 +0000930 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
931 { ISD::FADD, MVT::v4f32, 3 },
932 { ISD::FADD, MVT::v4f64, 3 },
933 { ISD::FADD, MVT::v8f32, 4 },
934 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
935 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
936 { ISD::ADD, MVT::v4i64, 3 },
937 { ISD::ADD, MVT::v8i16, 4 },
938 { ISD::ADD, MVT::v8i32, 5 },
939 };
Michael Liao5bf95782014-12-04 05:20:33 +0000940
Yi Jiang5c343de2013-09-19 17:48:48 +0000941 if (IsPairwise) {
942 if (ST->hasAVX()) {
943 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
944 if (Idx != -1)
945 return LT.first * AVX1CostTblPairWise[Idx].Cost;
946 }
Michael Liao5bf95782014-12-04 05:20:33 +0000947
Yi Jiang5c343de2013-09-19 17:48:48 +0000948 if (ST->hasSSE42()) {
949 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
950 if (Idx != -1)
951 return LT.first * SSE42CostTblPairWise[Idx].Cost;
952 }
953 } else {
954 if (ST->hasAVX()) {
955 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
956 if (Idx != -1)
957 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
958 }
Michael Liao5bf95782014-12-04 05:20:33 +0000959
Yi Jiang5c343de2013-09-19 17:48:48 +0000960 if (ST->hasSSE42()) {
961 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
962 if (Idx != -1)
963 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
964 }
965 }
966
Chandler Carruth705b1852015-01-31 03:43:40 +0000967 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
Yi Jiang5c343de2013-09-19 17:48:48 +0000968}
969
Juergen Ributzkab2e4edb2014-06-10 00:32:29 +0000970/// \brief Calculate the cost of materializing a 64-bit value. This helper
971/// method might only calculate a fraction of a larger immediate. Therefore it
972/// is valid to return a cost of ZERO.
Chandler Carruth705b1852015-01-31 03:43:40 +0000973unsigned X86TTIImpl::getIntImmCost(int64_t Val) {
Juergen Ributzkab2e4edb2014-06-10 00:32:29 +0000974 if (Val == 0)
Chandler Carruth705b1852015-01-31 03:43:40 +0000975 return TTI::TCC_Free;
Juergen Ributzkab2e4edb2014-06-10 00:32:29 +0000976
977 if (isInt<32>(Val))
Chandler Carruth705b1852015-01-31 03:43:40 +0000978 return TTI::TCC_Basic;
Juergen Ributzkab2e4edb2014-06-10 00:32:29 +0000979
Chandler Carruth705b1852015-01-31 03:43:40 +0000980 return 2 * TTI::TCC_Basic;
Juergen Ributzkab2e4edb2014-06-10 00:32:29 +0000981}
982
Chandler Carruth705b1852015-01-31 03:43:40 +0000983unsigned X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
Juergen Ributzkaf26beda2014-01-25 02:02:55 +0000984 assert(Ty->isIntegerTy());
985
986 unsigned BitSize = Ty->getPrimitiveSizeInBits();
987 if (BitSize == 0)
988 return ~0U;
989
Juergen Ributzka43176172014-05-19 21:00:53 +0000990 // Never hoist constants larger than 128bit, because this might lead to
991 // incorrect code generation or assertions in codegen.
992 // Fixme: Create a cost model for types larger than i128 once the codegen
993 // issues have been fixed.
994 if (BitSize > 128)
Chandler Carruth705b1852015-01-31 03:43:40 +0000995 return TTI::TCC_Free;
Juergen Ributzka43176172014-05-19 21:00:53 +0000996
Juergen Ributzkaf0dff492014-03-21 06:04:45 +0000997 if (Imm == 0)
Chandler Carruth705b1852015-01-31 03:43:40 +0000998 return TTI::TCC_Free;
Juergen Ributzkaf0dff492014-03-21 06:04:45 +0000999
Juergen Ributzkab2e4edb2014-06-10 00:32:29 +00001000 // Sign-extend all constants to a multiple of 64-bit.
1001 APInt ImmVal = Imm;
1002 if (BitSize & 0x3f)
1003 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1004
1005 // Split the constant into 64-bit chunks and calculate the cost for each
1006 // chunk.
1007 unsigned Cost = 0;
1008 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1009 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1010 int64_t Val = Tmp.getSExtValue();
1011 Cost += getIntImmCost(Val);
1012 }
1013 // We need at least one instruction to materialze the constant.
1014 return std::max(1U, Cost);
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001015}
1016
Chandler Carruth705b1852015-01-31 03:43:40 +00001017unsigned X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
1018 const APInt &Imm, Type *Ty) {
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001019 assert(Ty->isIntegerTy());
1020
1021 unsigned BitSize = Ty->getPrimitiveSizeInBits();
Juergen Ributzka43176172014-05-19 21:00:53 +00001022 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1023 // here, so that constant hoisting will ignore this constant.
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001024 if (BitSize == 0)
Chandler Carruth705b1852015-01-31 03:43:40 +00001025 return TTI::TCC_Free;
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001026
Juergen Ributzkaf0dff492014-03-21 06:04:45 +00001027 unsigned ImmIdx = ~0U;
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001028 switch (Opcode) {
Chandler Carruth705b1852015-01-31 03:43:40 +00001029 default:
1030 return TTI::TCC_Free;
Juergen Ributzkaf0dff492014-03-21 06:04:45 +00001031 case Instruction::GetElementPtr:
Juergen Ributzka27435b32014-04-02 21:45:36 +00001032 // Always hoist the base address of a GetElementPtr. This prevents the
1033 // creation of new constants for every base constant that gets constant
1034 // folded with the offset.
Juergen Ributzka631c4912014-03-25 18:01:25 +00001035 if (Idx == 0)
Chandler Carruth705b1852015-01-31 03:43:40 +00001036 return 2 * TTI::TCC_Basic;
1037 return TTI::TCC_Free;
Juergen Ributzkaf0dff492014-03-21 06:04:45 +00001038 case Instruction::Store:
1039 ImmIdx = 0;
1040 break;
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001041 case Instruction::Add:
1042 case Instruction::Sub:
1043 case Instruction::Mul:
1044 case Instruction::UDiv:
1045 case Instruction::SDiv:
1046 case Instruction::URem:
1047 case Instruction::SRem:
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001048 case Instruction::And:
1049 case Instruction::Or:
1050 case Instruction::Xor:
1051 case Instruction::ICmp:
Juergen Ributzkaf0dff492014-03-21 06:04:45 +00001052 ImmIdx = 1;
1053 break;
Michael Zolotukhin1f4a9602014-04-30 19:17:32 +00001054 // Always return TCC_Free for the shift value of a shift instruction.
1055 case Instruction::Shl:
1056 case Instruction::LShr:
1057 case Instruction::AShr:
1058 if (Idx == 1)
Chandler Carruth705b1852015-01-31 03:43:40 +00001059 return TTI::TCC_Free;
Michael Zolotukhin1f4a9602014-04-30 19:17:32 +00001060 break;
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001061 case Instruction::Trunc:
1062 case Instruction::ZExt:
1063 case Instruction::SExt:
1064 case Instruction::IntToPtr:
1065 case Instruction::PtrToInt:
1066 case Instruction::BitCast:
Juergen Ributzkaf0dff492014-03-21 06:04:45 +00001067 case Instruction::PHI:
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001068 case Instruction::Call:
1069 case Instruction::Select:
1070 case Instruction::Ret:
1071 case Instruction::Load:
Juergen Ributzkaf0dff492014-03-21 06:04:45 +00001072 break;
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001073 }
Juergen Ributzkaf0dff492014-03-21 06:04:45 +00001074
Juergen Ributzkab2e4edb2014-06-10 00:32:29 +00001075 if (Idx == ImmIdx) {
1076 unsigned NumConstants = (BitSize + 63) / 64;
Chandler Carruth705b1852015-01-31 03:43:40 +00001077 unsigned Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1078 return (Cost <= NumConstants * TTI::TCC_Basic)
1079 ? static_cast<unsigned>(TTI::TCC_Free)
1080 : Cost;
Juergen Ributzkab2e4edb2014-06-10 00:32:29 +00001081 }
Juergen Ributzkaf0dff492014-03-21 06:04:45 +00001082
Chandler Carruth705b1852015-01-31 03:43:40 +00001083 return X86TTIImpl::getIntImmCost(Imm, Ty);
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001084}
1085
Chandler Carruth705b1852015-01-31 03:43:40 +00001086unsigned X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
1087 const APInt &Imm, Type *Ty) {
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001088 assert(Ty->isIntegerTy());
1089
1090 unsigned BitSize = Ty->getPrimitiveSizeInBits();
Juergen Ributzka43176172014-05-19 21:00:53 +00001091 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1092 // here, so that constant hoisting will ignore this constant.
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001093 if (BitSize == 0)
Chandler Carruth705b1852015-01-31 03:43:40 +00001094 return TTI::TCC_Free;
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001095
1096 switch (IID) {
Chandler Carruth705b1852015-01-31 03:43:40 +00001097 default:
1098 return TTI::TCC_Free;
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001099 case Intrinsic::sadd_with_overflow:
1100 case Intrinsic::uadd_with_overflow:
1101 case Intrinsic::ssub_with_overflow:
1102 case Intrinsic::usub_with_overflow:
1103 case Intrinsic::smul_with_overflow:
1104 case Intrinsic::umul_with_overflow:
Juergen Ributzkaf0dff492014-03-21 06:04:45 +00001105 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
Chandler Carruth705b1852015-01-31 03:43:40 +00001106 return TTI::TCC_Free;
Juergen Ributzka5eef98c2014-03-25 18:01:23 +00001107 break;
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001108 case Intrinsic::experimental_stackmap:
Juergen Ributzka5eef98c2014-03-25 18:01:23 +00001109 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
Chandler Carruth705b1852015-01-31 03:43:40 +00001110 return TTI::TCC_Free;
Juergen Ributzka5eef98c2014-03-25 18:01:23 +00001111 break;
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001112 case Intrinsic::experimental_patchpoint_void:
1113 case Intrinsic::experimental_patchpoint_i64:
Juergen Ributzka5eef98c2014-03-25 18:01:23 +00001114 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
Chandler Carruth705b1852015-01-31 03:43:40 +00001115 return TTI::TCC_Free;
Juergen Ributzka5eef98c2014-03-25 18:01:23 +00001116 break;
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001117 }
Chandler Carruth705b1852015-01-31 03:43:40 +00001118 return X86TTIImpl::getIntImmCost(Imm, Ty);
Juergen Ributzkaf26beda2014-01-25 02:02:55 +00001119}
Elena Demikhovskyf1de34b2014-12-04 09:40:44 +00001120
Chandler Carruth705b1852015-01-31 03:43:40 +00001121bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) {
Elena Demikhovskyfb81b932014-12-25 07:49:20 +00001122 int DataWidth = DataTy->getPrimitiveSizeInBits();
Elena Demikhovskyf1de34b2014-12-04 09:40:44 +00001123
1124 // Todo: AVX512 allows gather/scatter, works with strided and random as well
Elena Demikhovskyfb81b932014-12-25 07:49:20 +00001125 if ((DataWidth < 32) || (Consecutive == 0))
Elena Demikhovskyf1de34b2014-12-04 09:40:44 +00001126 return false;
1127 if (ST->hasAVX512() || ST->hasAVX2())
1128 return true;
1129 return false;
1130}
1131
Chandler Carruth705b1852015-01-31 03:43:40 +00001132bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) {
Elena Demikhovsky3fcafa22014-12-14 09:43:50 +00001133 return isLegalMaskedLoad(DataType, Consecutive);
Elena Demikhovskyf1de34b2014-12-04 09:40:44 +00001134}
1135
Eric Christophere1002262015-07-02 01:11:50 +00001136bool X86TTIImpl::hasCompatibleFunctionAttributes(const Function *Caller,
1137 const Function *Callee) const {
1138 const TargetMachine &TM = getTLI()->getTargetMachine();
1139
1140 // Work this as a subsetting of subtarget features.
1141 const FeatureBitset &CallerBits =
1142 TM.getSubtargetImpl(*Caller)->getFeatureBits();
1143 const FeatureBitset &CalleeBits =
1144 TM.getSubtargetImpl(*Callee)->getFeatureBits();
1145
1146 // FIXME: This is likely too limiting as it will include subtarget features
1147 // that we might not care about for inlining, but it is conservatively
1148 // correct.
1149 return (CallerBits & CalleeBits) == CalleeBits;
1150}