blob: a38b22bd7896eff7e52a2f4cadf126931d6ef971 [file] [log] [blame]
Chandler Carruth664e3542013-01-07 01:37:14 +00001//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file implements a TargetTransformInfo analysis pass specific to the
11/// X86 target machine. It uses the target's detailed information to provide
12/// more precise answers to certain TTI queries, while letting the target
13/// independent and default TTI implementations handle the rest.
14///
15//===----------------------------------------------------------------------===//
16
17#define DEBUG_TYPE "x86tti"
18#include "X86.h"
19#include "X86TargetMachine.h"
Chandler Carruthd3e73552013-01-07 03:08:10 +000020#include "llvm/Analysis/TargetTransformInfo.h"
Chandler Carruth664e3542013-01-07 01:37:14 +000021#include "llvm/Support/Debug.h"
22#include "llvm/Target/TargetLowering.h"
Renato Golind4c392e2013-01-24 23:01:00 +000023#include "llvm/Target/CostTable.h"
Chandler Carruth664e3542013-01-07 01:37:14 +000024using namespace llvm;
25
26// Declare the pass initialization routine locally as target-specific passes
27// don't havve a target-wide initialization entry point, and so we rely on the
28// pass constructor initialization.
29namespace llvm {
30void initializeX86TTIPass(PassRegistry &);
31}
32
33namespace {
34
35class X86TTI : public ImmutablePass, public TargetTransformInfo {
36 const X86TargetMachine *TM;
37 const X86Subtarget *ST;
38 const X86TargetLowering *TLI;
39
40 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
41 /// are set if the result needs to be inserted and/or extracted from vectors.
42 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
43
44public:
45 X86TTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) {
46 llvm_unreachable("This pass cannot be directly constructed");
47 }
48
49 X86TTI(const X86TargetMachine *TM)
50 : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
51 TLI(TM->getTargetLowering()) {
52 initializeX86TTIPass(*PassRegistry::getPassRegistry());
53 }
54
55 virtual void initializePass() {
56 pushTTIStack(this);
57 }
58
59 virtual void finalizePass() {
60 popTTIStack();
61 }
62
63 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
64 TargetTransformInfo::getAnalysisUsage(AU);
65 }
66
67 /// Pass identification.
68 static char ID;
69
70 /// Provide necessary pointer adjustments for the two base classes.
71 virtual void *getAdjustedAnalysisPointer(const void *ID) {
72 if (ID == &TargetTransformInfo::ID)
73 return (TargetTransformInfo*)this;
74 return this;
75 }
76
77 /// \name Scalar TTI Implementations
78 /// @{
Chandler Carruth50a36cd2013-01-07 03:16:03 +000079 virtual PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
Chandler Carruth664e3542013-01-07 01:37:14 +000080
81 /// @}
82
83 /// \name Vector TTI Implementations
84 /// @{
85
86 virtual unsigned getNumberOfRegisters(bool Vector) const;
Nadav Rotemb1791a72013-01-09 22:29:00 +000087 virtual unsigned getRegisterBitWidth(bool Vector) const;
Nadav Rotemb696c362013-01-09 01:15:42 +000088 virtual unsigned getMaximumUnrollFactor() const;
Chandler Carruth664e3542013-01-07 01:37:14 +000089 virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const;
90 virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
91 int Index, Type *SubTp) const;
92 virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
93 Type *Src) const;
94 virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
95 Type *CondTy) const;
96 virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
97 unsigned Index) const;
98 virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
99 unsigned Alignment,
100 unsigned AddressSpace) const;
101
102 /// @}
103};
104
105} // end anonymous namespace
106
107INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti",
108 "X86 Target Transform Info", true, true, false)
109char X86TTI::ID = 0;
110
111ImmutablePass *
112llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) {
113 return new X86TTI(TM);
114}
115
116
117//===----------------------------------------------------------------------===//
118//
119// X86 cost model.
120//
121//===----------------------------------------------------------------------===//
122
Chandler Carruth50a36cd2013-01-07 03:16:03 +0000123X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const {
Chandler Carruth664e3542013-01-07 01:37:14 +0000124 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
125 // TODO: Currently the __builtin_popcount() implementation using SSE3
126 // instructions is inefficient. Once the problem is fixed, we should
127 // call ST->hasSSE3() instead of ST->hasSSE4().
Chandler Carruth50a36cd2013-01-07 03:16:03 +0000128 return ST->hasSSE41() ? PSK_FastHardware : PSK_Software;
Chandler Carruth664e3542013-01-07 01:37:14 +0000129}
130
131unsigned X86TTI::getNumberOfRegisters(bool Vector) const {
Nadav Rotemb1791a72013-01-09 22:29:00 +0000132 if (Vector && !ST->hasSSE1())
133 return 0;
134
Chandler Carruth664e3542013-01-07 01:37:14 +0000135 if (ST->is64Bit())
136 return 16;
137 return 8;
138}
139
Nadav Rotemb1791a72013-01-09 22:29:00 +0000140unsigned X86TTI::getRegisterBitWidth(bool Vector) const {
141 if (Vector) {
142 if (ST->hasAVX()) return 256;
143 if (ST->hasSSE1()) return 128;
144 return 0;
145 }
146
147 if (ST->is64Bit())
148 return 64;
149 return 32;
150
151}
152
Nadav Rotemb696c362013-01-09 01:15:42 +0000153unsigned X86TTI::getMaximumUnrollFactor() const {
154 if (ST->isAtom())
155 return 1;
156
157 // Sandybridge and Haswell have multiple execution ports and pipelined
158 // vector units.
159 if (ST->hasAVX())
160 return 4;
161
162 return 2;
163}
164
Chandler Carruth664e3542013-01-07 01:37:14 +0000165unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const {
166 // Legalize the type.
167 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
168
169 int ISD = TLI->InstructionOpcodeToISD(Opcode);
170 assert(ISD && "Invalid opcode");
171
Michael Liao70dd7f92013-03-20 22:01:10 +0000172 static const CostTblEntry<MVT> AVX2CostTable[] = {
173 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
174 // customize them to detect the cases where shift amount is a scalar one.
175 { ISD::SHL, MVT::v4i32, 1 },
176 { ISD::SRL, MVT::v4i32, 1 },
177 { ISD::SRA, MVT::v4i32, 1 },
178 { ISD::SHL, MVT::v8i32, 1 },
179 { ISD::SRL, MVT::v8i32, 1 },
180 { ISD::SRA, MVT::v8i32, 1 },
181 { ISD::SHL, MVT::v2i64, 1 },
182 { ISD::SRL, MVT::v2i64, 1 },
183 { ISD::SHL, MVT::v4i64, 1 },
184 { ISD::SRL, MVT::v4i64, 1 },
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000185
186 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence.
187 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized.
188
189 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized.
190 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized.
191
192 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
193 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized.
194 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
Michael Liao70dd7f92013-03-20 22:01:10 +0000195 };
196
197 // Look for AVX2 lowering tricks.
198 if (ST->hasAVX2()) {
199 int Idx = CostTableLookup<MVT>(AVX2CostTable, array_lengthof(AVX2CostTable),
200 ISD, LT.second);
201 if (Idx != -1)
202 return LT.first * AVX2CostTable[Idx].Cost;
203 }
204
Arnold Schwaighofere9b50162013-04-03 21:46:05 +0000205 static const CostTblEntry<MVT> SSE2CostTable[] = {
206 // We don't correctly identify costs of casts because they are marked as
207 // custom.
208 // For some cases, where the shift amount is a scalar we would be able
209 // to generate better code. Unfortunately, when this is the case the value
210 // (the splat) will get hoisted out of the loop, thereby making it invisible
211 // to ISel. The cost model must return worst case assumptions because it is
212 // used for vectorization and we don't want to make vectorized code worse
213 // than scalar code.
214 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence.
215 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
216 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
217 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
218
219 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
220 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
221 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
222 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
223
224 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized.
225 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
226 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
227 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
228 };
229
230 if (ST->hasSSE2()) {
231 int Idx = CostTableLookup<MVT>(SSE2CostTable, array_lengthof(SSE2CostTable),
232 ISD, LT.second);
233 if (Idx != -1)
234 return LT.first * SSE2CostTable[Idx].Cost;
235 }
236
Renato Golind4c392e2013-01-24 23:01:00 +0000237 static const CostTblEntry<MVT> AVX1CostTable[] = {
Renato Goline1fb0592013-01-20 20:57:20 +0000238 // We don't have to scalarize unsupported ops. We can issue two half-sized
239 // operations and we only need to extract the upper YMM half.
240 // Two ops + 1 extract + 1 insert = 4.
241 { ISD::MUL, MVT::v8i32, 4 },
242 { ISD::SUB, MVT::v8i32, 4 },
243 { ISD::ADD, MVT::v8i32, 4 },
Renato Goline1fb0592013-01-20 20:57:20 +0000244 { ISD::SUB, MVT::v4i64, 4 },
245 { ISD::ADD, MVT::v4i64, 4 },
Arnold Schwaighofer20ef54f2013-03-02 04:02:52 +0000246 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
247 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
248 // Because we believe v4i64 to be a legal type, we must also include the
249 // split factor of two in the cost table. Therefore, the cost here is 18
250 // instead of 9.
251 { ISD::MUL, MVT::v4i64, 18 },
252 };
Chandler Carruth664e3542013-01-07 01:37:14 +0000253
254 // Look for AVX1 lowering tricks.
Arnold Schwaighofer20ef54f2013-03-02 04:02:52 +0000255 if (ST->hasAVX() && !ST->hasAVX2()) {
256 int Idx = CostTableLookup<MVT>(AVX1CostTable, array_lengthof(AVX1CostTable),
257 ISD, LT.second);
Renato Goline1fb0592013-01-20 20:57:20 +0000258 if (Idx != -1)
259 return LT.first * AVX1CostTable[Idx].Cost;
Chandler Carruth664e3542013-01-07 01:37:14 +0000260 }
Arnold Schwaighofer20ef54f2013-03-02 04:02:52 +0000261
262 // Custom lowering of vectors.
263 static const CostTblEntry<MVT> CustomLowered[] = {
264 // A v2i64/v4i64 and multiply is custom lowered as a series of long
265 // multiplies(3), shifts(4) and adds(2).
266 { ISD::MUL, MVT::v2i64, 9 },
267 { ISD::MUL, MVT::v4i64, 9 },
268 };
269 int Idx = CostTableLookup<MVT>(CustomLowered, array_lengthof(CustomLowered),
270 ISD, LT.second);
271 if (Idx != -1)
272 return LT.first * CustomLowered[Idx].Cost;
273
274 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
275 // 2x pmuludq, 2x shuffle.
276 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
277 !ST->hasSSE41())
278 return 6;
279
Chandler Carruth664e3542013-01-07 01:37:14 +0000280 // Fallback to the default implementation.
281 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty);
282}
283
284unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
285 Type *SubTp) const {
286 // We only estimate the cost of reverse shuffles.
Chandler Carruth2109f472013-01-07 03:20:02 +0000287 if (Kind != SK_Reverse)
Chandler Carruth664e3542013-01-07 01:37:14 +0000288 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
289
290 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
291 unsigned Cost = 1;
292 if (LT.second.getSizeInBits() > 128)
293 Cost = 3; // Extract + insert + copy.
294
295 // Multiple by the number of parts.
296 return Cost * LT.first;
297}
298
299unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
300 int ISD = TLI->InstructionOpcodeToISD(Opcode);
301 assert(ISD && "Invalid opcode");
302
303 EVT SrcTy = TLI->getValueType(Src);
304 EVT DstTy = TLI->getValueType(Dst);
305
306 if (!SrcTy.isSimple() || !DstTy.isSimple())
307 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
308
Renato Golind4c392e2013-01-24 23:01:00 +0000309 static const TypeConversionCostTblEntry<MVT> AVXConversionTbl[] = {
Renato Goline1fb0592013-01-20 20:57:20 +0000310 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
311 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
312 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
313 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
314 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 },
315 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1 },
Benjamin Kramer52ceb442013-04-01 10:23:49 +0000316
317 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
318 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
319 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
320 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
321 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
322 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
323 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
324 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
325 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
326 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
327 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
328 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
329
330 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
331 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
332 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
333 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
334 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
335 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
336 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
337 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
338 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
339 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
340 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
341 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
342
Renato Goline1fb0592013-01-20 20:57:20 +0000343 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 1 },
344 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
345 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 6 },
346 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 9 },
Elena Demikhovsky0ccdd132013-02-20 12:42:54 +0000347 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 8 },
Nadav Rotem0f1bc602013-03-19 18:38:27 +0000348 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
349 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
Renato Goline1fb0592013-01-20 20:57:20 +0000350 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 },
Chandler Carruth664e3542013-01-07 01:37:14 +0000351 };
352
353 if (ST->hasAVX()) {
Renato Golind4c392e2013-01-24 23:01:00 +0000354 int Idx = ConvertCostTableLookup<MVT>(AVXConversionTbl,
Renato Goline1fb0592013-01-20 20:57:20 +0000355 array_lengthof(AVXConversionTbl),
356 ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT());
357 if (Idx != -1)
358 return AVXConversionTbl[Idx].Cost;
Chandler Carruth664e3542013-01-07 01:37:14 +0000359 }
360
361 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
362}
363
364unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
365 Type *CondTy) const {
366 // Legalize the type.
367 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
368
369 MVT MTy = LT.second;
370
371 int ISD = TLI->InstructionOpcodeToISD(Opcode);
372 assert(ISD && "Invalid opcode");
373
Renato Golind4c392e2013-01-24 23:01:00 +0000374 static const CostTblEntry<MVT> SSE42CostTbl[] = {
Renato Goline1fb0592013-01-20 20:57:20 +0000375 { ISD::SETCC, MVT::v2f64, 1 },
376 { ISD::SETCC, MVT::v4f32, 1 },
377 { ISD::SETCC, MVT::v2i64, 1 },
378 { ISD::SETCC, MVT::v4i32, 1 },
379 { ISD::SETCC, MVT::v8i16, 1 },
380 { ISD::SETCC, MVT::v16i8, 1 },
Chandler Carruth664e3542013-01-07 01:37:14 +0000381 };
382
Renato Golind4c392e2013-01-24 23:01:00 +0000383 static const CostTblEntry<MVT> AVX1CostTbl[] = {
Renato Goline1fb0592013-01-20 20:57:20 +0000384 { ISD::SETCC, MVT::v4f64, 1 },
385 { ISD::SETCC, MVT::v8f32, 1 },
Chandler Carruth664e3542013-01-07 01:37:14 +0000386 // AVX1 does not support 8-wide integer compare.
Renato Goline1fb0592013-01-20 20:57:20 +0000387 { ISD::SETCC, MVT::v4i64, 4 },
388 { ISD::SETCC, MVT::v8i32, 4 },
389 { ISD::SETCC, MVT::v16i16, 4 },
390 { ISD::SETCC, MVT::v32i8, 4 },
Chandler Carruth664e3542013-01-07 01:37:14 +0000391 };
392
Renato Golind4c392e2013-01-24 23:01:00 +0000393 static const CostTblEntry<MVT> AVX2CostTbl[] = {
Renato Goline1fb0592013-01-20 20:57:20 +0000394 { ISD::SETCC, MVT::v4i64, 1 },
395 { ISD::SETCC, MVT::v8i32, 1 },
396 { ISD::SETCC, MVT::v16i16, 1 },
397 { ISD::SETCC, MVT::v32i8, 1 },
Chandler Carruth664e3542013-01-07 01:37:14 +0000398 };
399
400 if (ST->hasAVX2()) {
Renato Golind4c392e2013-01-24 23:01:00 +0000401 int Idx = CostTableLookup<MVT>(AVX2CostTbl, array_lengthof(AVX2CostTbl), ISD, MTy);
Renato Goline1fb0592013-01-20 20:57:20 +0000402 if (Idx != -1)
403 return LT.first * AVX2CostTbl[Idx].Cost;
Chandler Carruth664e3542013-01-07 01:37:14 +0000404 }
405
406 if (ST->hasAVX()) {
Renato Golind4c392e2013-01-24 23:01:00 +0000407 int Idx = CostTableLookup<MVT>(AVX1CostTbl, array_lengthof(AVX1CostTbl), ISD, MTy);
Renato Goline1fb0592013-01-20 20:57:20 +0000408 if (Idx != -1)
409 return LT.first * AVX1CostTbl[Idx].Cost;
Chandler Carruth664e3542013-01-07 01:37:14 +0000410 }
411
412 if (ST->hasSSE42()) {
Renato Golind4c392e2013-01-24 23:01:00 +0000413 int Idx = CostTableLookup<MVT>(SSE42CostTbl, array_lengthof(SSE42CostTbl), ISD, MTy);
Renato Goline1fb0592013-01-20 20:57:20 +0000414 if (Idx != -1)
415 return LT.first * SSE42CostTbl[Idx].Cost;
Chandler Carruth664e3542013-01-07 01:37:14 +0000416 }
417
418 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
419}
420
421unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
422 unsigned Index) const {
423 assert(Val->isVectorTy() && "This must be a vector type");
424
425 if (Index != -1U) {
426 // Legalize the type.
427 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
428
429 // This type is legalized to a scalar type.
430 if (!LT.second.isVector())
431 return 0;
432
433 // The type may be split. Normalize the index to the new type.
434 unsigned Width = LT.second.getVectorNumElements();
435 Index = Index % Width;
436
437 // Floating point scalars are already located in index #0.
438 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
439 return 0;
440 }
441
442 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
443}
444
445unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
446 unsigned AddressSpace) const {
447 // Legalize the type.
448 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
449 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
450 "Invalid Opcode");
451
452 // Each load/store unit costs 1.
453 unsigned Cost = LT.first * 1;
454
455 // On Sandybridge 256bit load/stores are double pumped
456 // (but not on Haswell).
457 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
458 Cost*=2;
459
460 return Cost;
461}