blob: 5656a19d7e0dfbc2873cdc4db333fac9acac0b35 [file] [log] [blame]
David Blaikie1213dbf2015-06-26 16:57:30 +00001//===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines vectorizer utilities.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carruth6bda14b2017-06-06 11:49:48 +000014#include "llvm/Analysis/VectorUtils.h"
James Molloy55d633b2015-10-12 12:34:45 +000015#include "llvm/ADT/EquivalenceClasses.h"
16#include "llvm/Analysis/DemandedBits.h"
Hal Finkel9cf58c42015-07-11 10:52:42 +000017#include "llvm/Analysis/LoopInfo.h"
Florian Hahn1086ce22018-09-12 08:01:57 +000018#include "llvm/Analysis/LoopIterator.h"
Hal Finkel9cf58c42015-07-11 10:52:42 +000019#include "llvm/Analysis/ScalarEvolution.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000020#include "llvm/Analysis/ScalarEvolutionExpressions.h"
James Molloy55d633b2015-10-12 12:34:45 +000021#include "llvm/Analysis/TargetTransformInfo.h"
David Majnemerb4b27232016-04-19 19:10:21 +000022#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000023#include "llvm/IR/Constants.h"
Hal Finkel9cf58c42015-07-11 10:52:42 +000024#include "llvm/IR/GetElementPtrTypeIterator.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000025#include "llvm/IR/IRBuilder.h"
Hal Finkel9cf58c42015-07-11 10:52:42 +000026#include "llvm/IR/PatternMatch.h"
27#include "llvm/IR/Value.h"
Renato Golin3b1d3b02015-08-30 10:49:04 +000028
Florian Hahn1086ce22018-09-12 08:01:57 +000029#define DEBUG_TYPE "vectorutils"
30
David Majnemer5eaf08f2015-08-18 22:07:20 +000031using namespace llvm;
32using namespace llvm::PatternMatch;
David Blaikie1213dbf2015-06-26 16:57:30 +000033
Florian Hahn1086ce22018-09-12 08:01:57 +000034/// Maximum factor for an interleaved memory access.
35static cl::opt<unsigned> MaxInterleaveGroupFactor(
36 "max-interleave-group-factor", cl::Hidden,
37 cl::desc("Maximum factor for an interleaved access group (default = 8)"),
38 cl::init(8));
39
Sanjay Patel0f4f48062018-11-12 15:10:30 +000040/// Return true if all of the intrinsic's arguments and return type are scalars
41/// for the scalar form of the intrinsic and vectors for the vector form of the
42/// intrinsic.
David Blaikie1213dbf2015-06-26 16:57:30 +000043bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
44 switch (ID) {
Sanjay Patel0f4f48062018-11-12 15:10:30 +000045 case Intrinsic::bswap: // Begin integer bit-manipulation.
46 case Intrinsic::bitreverse:
47 case Intrinsic::ctpop:
48 case Intrinsic::ctlz:
49 case Intrinsic::cttz:
Sanjay Patel1456fd72018-11-12 15:20:14 +000050 case Intrinsic::fshl:
51 case Intrinsic::fshr:
Sanjay Patel0f4f48062018-11-12 15:10:30 +000052 case Intrinsic::sqrt: // Begin floating-point.
David Blaikie1213dbf2015-06-26 16:57:30 +000053 case Intrinsic::sin:
54 case Intrinsic::cos:
55 case Intrinsic::exp:
56 case Intrinsic::exp2:
57 case Intrinsic::log:
58 case Intrinsic::log10:
59 case Intrinsic::log2:
60 case Intrinsic::fabs:
61 case Intrinsic::minnum:
62 case Intrinsic::maxnum:
Thomas Lively8a91cf12018-10-19 21:11:43 +000063 case Intrinsic::minimum:
64 case Intrinsic::maximum:
David Blaikie1213dbf2015-06-26 16:57:30 +000065 case Intrinsic::copysign:
66 case Intrinsic::floor:
67 case Intrinsic::ceil:
68 case Intrinsic::trunc:
69 case Intrinsic::rint:
70 case Intrinsic::nearbyint:
71 case Intrinsic::round:
David Blaikie1213dbf2015-06-26 16:57:30 +000072 case Intrinsic::pow:
73 case Intrinsic::fma:
74 case Intrinsic::fmuladd:
David Blaikie1213dbf2015-06-26 16:57:30 +000075 case Intrinsic::powi:
Matt Arsenault80ea6dd2018-09-17 13:24:30 +000076 case Intrinsic::canonicalize:
Simon Pilgrimc2aadfa2019-01-03 12:18:23 +000077 case Intrinsic::sadd_sat:
78 case Intrinsic::ssub_sat:
79 case Intrinsic::uadd_sat:
80 case Intrinsic::usub_sat:
David Blaikie1213dbf2015-06-26 16:57:30 +000081 return true;
82 default:
83 return false;
84 }
85}
86
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000087/// Identifies if the intrinsic has a scalar operand. It check for
David Blaikie1213dbf2015-06-26 16:57:30 +000088/// ctlz,cttz and powi special intrinsics whose argument is scalar.
89bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
90 unsigned ScalarOpdIdx) {
91 switch (ID) {
92 case Intrinsic::ctlz:
93 case Intrinsic::cttz:
94 case Intrinsic::powi:
95 return (ScalarOpdIdx == 1);
96 default:
97 return false;
98 }
99}
100
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000101/// Returns intrinsic ID for call.
David Blaikie1213dbf2015-06-26 16:57:30 +0000102/// For the input call instruction it finds mapping intrinsic and returns
103/// its ID, in case it does not found it return not_intrinsic.
David Majnemerb4b27232016-04-19 19:10:21 +0000104Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
105 const TargetLibraryInfo *TLI) {
106 Intrinsic::ID ID = getIntrinsicForCallSite(CI, TLI);
107 if (ID == Intrinsic::not_intrinsic)
David Blaikie1213dbf2015-06-26 16:57:30 +0000108 return Intrinsic::not_intrinsic;
109
David Majnemerb4b27232016-04-19 19:10:21 +0000110 if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
Dan Gohman2c74fe92017-11-08 21:59:51 +0000111 ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
112 ID == Intrinsic::sideeffect)
David Majnemerb4b27232016-04-19 19:10:21 +0000113 return ID;
David Blaikie1213dbf2015-06-26 16:57:30 +0000114 return Intrinsic::not_intrinsic;
115}
Hal Finkel9cf58c42015-07-11 10:52:42 +0000116
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000117/// Find the operand of the GEP that should be checked for consecutive
Hal Finkel9cf58c42015-07-11 10:52:42 +0000118/// stores. This ignores trailing indices that have no effect on the final
119/// pointer.
120unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
121 const DataLayout &DL = Gep->getModule()->getDataLayout();
122 unsigned LastOperand = Gep->getNumOperands() - 1;
Eduard Burtescu19eb0312016-01-19 17:28:00 +0000123 unsigned GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
Hal Finkel9cf58c42015-07-11 10:52:42 +0000124
125 // Walk backwards and try to peel off zeros.
David Majnemer5eaf08f2015-08-18 22:07:20 +0000126 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
Hal Finkel9cf58c42015-07-11 10:52:42 +0000127 // Find the type we're currently indexing into.
128 gep_type_iterator GEPTI = gep_type_begin(Gep);
Peter Collingbourneab85225b2016-12-02 02:24:42 +0000129 std::advance(GEPTI, LastOperand - 2);
Hal Finkel9cf58c42015-07-11 10:52:42 +0000130
131 // If it's a type with the same allocation size as the result of the GEP we
132 // can peel off the zero index.
Peter Collingbourneab85225b2016-12-02 02:24:42 +0000133 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
Hal Finkel9cf58c42015-07-11 10:52:42 +0000134 break;
135 --LastOperand;
136 }
137
138 return LastOperand;
139}
140
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000141/// If the argument is a GEP, then returns the operand identified by
Hal Finkel9cf58c42015-07-11 10:52:42 +0000142/// getGEPInductionOperand. However, if there is some other non-loop-invariant
143/// operand, it returns that instead.
David Majnemer5eaf08f2015-08-18 22:07:20 +0000144Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
Hal Finkel9cf58c42015-07-11 10:52:42 +0000145 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
146 if (!GEP)
147 return Ptr;
148
149 unsigned InductionOperand = getGEPInductionOperand(GEP);
150
151 // Check that all of the gep indices are uniform except for our induction
152 // operand.
153 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
154 if (i != InductionOperand &&
155 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
156 return Ptr;
157 return GEP->getOperand(InductionOperand);
158}
159
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000160/// If a value has only one user that is a CastInst, return it.
David Majnemer5eaf08f2015-08-18 22:07:20 +0000161Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
162 Value *UniqueCast = nullptr;
Hal Finkel9cf58c42015-07-11 10:52:42 +0000163 for (User *U : Ptr->users()) {
164 CastInst *CI = dyn_cast<CastInst>(U);
165 if (CI && CI->getType() == Ty) {
166 if (!UniqueCast)
167 UniqueCast = CI;
168 else
169 return nullptr;
170 }
171 }
172 return UniqueCast;
173}
174
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000175/// Get the stride of a pointer access in a loop. Looks for symbolic
Hal Finkel9cf58c42015-07-11 10:52:42 +0000176/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
David Majnemer5eaf08f2015-08-18 22:07:20 +0000177Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
Craig Toppere3dcce92015-08-01 22:20:21 +0000178 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
Hal Finkel9cf58c42015-07-11 10:52:42 +0000179 if (!PtrTy || PtrTy->isAggregateType())
180 return nullptr;
181
182 // Try to remove a gep instruction to make the pointer (actually index at this
Vedant Kumard3196742018-02-28 19:08:52 +0000183 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
Hal Finkel9cf58c42015-07-11 10:52:42 +0000184 // pointer, otherwise, we are analyzing the index.
David Majnemer5eaf08f2015-08-18 22:07:20 +0000185 Value *OrigPtr = Ptr;
Hal Finkel9cf58c42015-07-11 10:52:42 +0000186
187 // The size of the pointer access.
188 int64_t PtrAccessSize = 1;
189
190 Ptr = stripGetElementPtr(Ptr, SE, Lp);
191 const SCEV *V = SE->getSCEV(Ptr);
192
193 if (Ptr != OrigPtr)
194 // Strip off casts.
195 while (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V))
196 V = C->getOperand();
197
198 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
199 if (!S)
200 return nullptr;
201
202 V = S->getStepRecurrence(*SE);
203 if (!V)
204 return nullptr;
205
206 // Strip off the size of access multiplication if we are still analyzing the
207 // pointer.
208 if (OrigPtr == Ptr) {
Hal Finkel9cf58c42015-07-11 10:52:42 +0000209 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
210 if (M->getOperand(0)->getSCEVType() != scConstant)
211 return nullptr;
212
Sanjoy Das0de2fec2015-12-17 20:28:46 +0000213 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
Hal Finkel9cf58c42015-07-11 10:52:42 +0000214
215 // Huge step value - give up.
216 if (APStepVal.getBitWidth() > 64)
217 return nullptr;
218
219 int64_t StepVal = APStepVal.getSExtValue();
220 if (PtrAccessSize != StepVal)
221 return nullptr;
222 V = M->getOperand(1);
223 }
224 }
225
226 // Strip off casts.
227 Type *StripedOffRecurrenceCast = nullptr;
228 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) {
229 StripedOffRecurrenceCast = C->getType();
230 V = C->getOperand();
231 }
232
233 // Look for the loop invariant symbolic value.
234 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
235 if (!U)
236 return nullptr;
237
David Majnemer5eaf08f2015-08-18 22:07:20 +0000238 Value *Stride = U->getValue();
Hal Finkel9cf58c42015-07-11 10:52:42 +0000239 if (!Lp->isLoopInvariant(Stride))
240 return nullptr;
241
242 // If we have stripped off the recurrence cast we have to make sure that we
243 // return the value that is used in this loop so that we can replace it later.
244 if (StripedOffRecurrenceCast)
245 Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast);
246
247 return Stride;
248}
David Majnemer599ca442015-07-13 01:15:53 +0000249
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000250/// Given a vector and an element number, see if the scalar value is
David Majnemer599ca442015-07-13 01:15:53 +0000251/// already around as a register, for example if it were inserted then extracted
252/// from the vector.
David Majnemer5eaf08f2015-08-18 22:07:20 +0000253Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
David Majnemer599ca442015-07-13 01:15:53 +0000254 assert(V->getType()->isVectorTy() && "Not looking at a vector?");
255 VectorType *VTy = cast<VectorType>(V->getType());
256 unsigned Width = VTy->getNumElements();
257 if (EltNo >= Width) // Out of range access.
258 return UndefValue::get(VTy->getElementType());
259
260 if (Constant *C = dyn_cast<Constant>(V))
261 return C->getAggregateElement(EltNo);
262
263 if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
264 // If this is an insert to a variable element, we don't know what it is.
265 if (!isa<ConstantInt>(III->getOperand(2)))
266 return nullptr;
267 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
268
269 // If this is an insert to the element we are looking for, return the
270 // inserted value.
271 if (EltNo == IIElt)
272 return III->getOperand(1);
273
274 // Otherwise, the insertelement doesn't modify the value, recurse on its
275 // vector input.
276 return findScalarElement(III->getOperand(0), EltNo);
277 }
278
279 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
280 unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements();
281 int InEl = SVI->getMaskValue(EltNo);
282 if (InEl < 0)
283 return UndefValue::get(VTy->getElementType());
284 if (InEl < (int)LHSWidth)
285 return findScalarElement(SVI->getOperand(0), InEl);
286 return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
287 }
288
289 // Extract a value from a vector add operation with a constant zero.
Sanjay Patel3413a662018-09-24 17:18:32 +0000290 // TODO: Use getBinOpIdentity() to generalize this.
291 Value *Val; Constant *C;
292 if (match(V, m_Add(m_Value(Val), m_Constant(C))))
293 if (Constant *Elt = C->getAggregateElement(EltNo))
David Majnemerc6bb0e22015-08-18 22:07:25 +0000294 if (Elt->isNullValue())
295 return findScalarElement(Val, EltNo);
David Majnemer599ca442015-07-13 01:15:53 +0000296
297 // Otherwise, we don't know.
298 return nullptr;
299}
Renato Golin3b1d3b02015-08-30 10:49:04 +0000300
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000301/// Get splat value if the input is a splat vector or return nullptr.
Elena Demikhovsky63a7ca92015-08-30 13:48:02 +0000302/// This function is not fully general. It checks only 2 cases:
303/// the input value is (1) a splat constants vector or (2) a sequence
304/// of instructions that broadcast a single value into a vector.
305///
Elena Demikhovsky0781d7b2015-12-01 12:08:36 +0000306const llvm::Value *llvm::getSplatValue(const Value *V) {
307
308 if (auto *C = dyn_cast<Constant>(V))
Elena Demikhovsky47fa2712015-12-01 12:30:40 +0000309 if (isa<VectorType>(V->getType()))
310 return C->getSplatValue();
Elena Demikhovsky63a7ca92015-08-30 13:48:02 +0000311
312 auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V);
Renato Golin3b1d3b02015-08-30 10:49:04 +0000313 if (!ShuffleInst)
314 return nullptr;
Elena Demikhovsky63a7ca92015-08-30 13:48:02 +0000315 // All-zero (or undef) shuffle mask elements.
316 for (int MaskElt : ShuffleInst->getShuffleMask())
317 if (MaskElt != 0 && MaskElt != -1)
Renato Golin3b1d3b02015-08-30 10:49:04 +0000318 return nullptr;
319 // The first shuffle source is 'insertelement' with index 0.
Elena Demikhovsky63a7ca92015-08-30 13:48:02 +0000320 auto *InsertEltInst =
321 dyn_cast<InsertElementInst>(ShuffleInst->getOperand(0));
Renato Golin3b1d3b02015-08-30 10:49:04 +0000322 if (!InsertEltInst || !isa<ConstantInt>(InsertEltInst->getOperand(2)) ||
Craig Topper79ab6432017-07-06 18:39:47 +0000323 !cast<ConstantInt>(InsertEltInst->getOperand(2))->isZero())
Renato Golin3b1d3b02015-08-30 10:49:04 +0000324 return nullptr;
325
326 return InsertEltInst->getOperand(1);
327}
James Molloy55d633b2015-10-12 12:34:45 +0000328
Charlie Turner54336a52015-11-26 20:39:51 +0000329MapVector<Instruction *, uint64_t>
James Molloy45f67d52015-11-09 14:32:05 +0000330llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
331 const TargetTransformInfo *TTI) {
James Molloy55d633b2015-10-12 12:34:45 +0000332
333 // DemandedBits will give us every value's live-out bits. But we want
334 // to ensure no extra casts would need to be inserted, so every DAG
335 // of connected values must have the same minimum bitwidth.
James Molloy45f67d52015-11-09 14:32:05 +0000336 EquivalenceClasses<Value *> ECs;
337 SmallVector<Value *, 16> Worklist;
338 SmallPtrSet<Value *, 4> Roots;
339 SmallPtrSet<Value *, 16> Visited;
340 DenseMap<Value *, uint64_t> DBits;
341 SmallPtrSet<Instruction *, 4> InstructionSet;
Charlie Turner54336a52015-11-26 20:39:51 +0000342 MapVector<Instruction *, uint64_t> MinBWs;
James Molloy45f67d52015-11-09 14:32:05 +0000343
James Molloy55d633b2015-10-12 12:34:45 +0000344 // Determine the roots. We work bottom-up, from truncs or icmps.
345 bool SeenExtFromIllegalType = false;
346 for (auto *BB : Blocks)
347 for (auto &I : *BB) {
348 InstructionSet.insert(&I);
349
350 if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
351 !TTI->isTypeLegal(I.getOperand(0)->getType()))
352 SeenExtFromIllegalType = true;
James Molloy45f67d52015-11-09 14:32:05 +0000353
James Molloy55d633b2015-10-12 12:34:45 +0000354 // Only deal with non-vector integers up to 64-bits wide.
355 if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
356 !I.getType()->isVectorTy() &&
357 I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
358 // Don't make work for ourselves. If we know the loaded type is legal,
359 // don't add it to the worklist.
360 if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
361 continue;
James Molloy45f67d52015-11-09 14:32:05 +0000362
James Molloy55d633b2015-10-12 12:34:45 +0000363 Worklist.push_back(&I);
364 Roots.insert(&I);
365 }
366 }
367 // Early exit.
368 if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
369 return MinBWs;
James Molloy45f67d52015-11-09 14:32:05 +0000370
James Molloy55d633b2015-10-12 12:34:45 +0000371 // Now proceed breadth-first, unioning values together.
372 while (!Worklist.empty()) {
373 Value *Val = Worklist.pop_back_val();
374 Value *Leader = ECs.getOrInsertLeaderValue(Val);
James Molloy45f67d52015-11-09 14:32:05 +0000375
James Molloy55d633b2015-10-12 12:34:45 +0000376 if (Visited.count(Val))
377 continue;
378 Visited.insert(Val);
379
380 // Non-instructions terminate a chain successfully.
381 if (!isa<Instruction>(Val))
382 continue;
383 Instruction *I = cast<Instruction>(Val);
384
385 // If we encounter a type that is larger than 64 bits, we can't represent
386 // it so bail out.
James Molloyaa1d6382016-05-10 12:27:23 +0000387 if (DB.getDemandedBits(I).getBitWidth() > 64)
Charlie Turner54336a52015-11-26 20:39:51 +0000388 return MapVector<Instruction *, uint64_t>();
James Molloy45f67d52015-11-09 14:32:05 +0000389
James Molloyaa1d6382016-05-10 12:27:23 +0000390 uint64_t V = DB.getDemandedBits(I).getZExtValue();
391 DBits[Leader] |= V;
392 DBits[I] = V;
James Molloy45f67d52015-11-09 14:32:05 +0000393
James Molloy55d633b2015-10-12 12:34:45 +0000394 // Casts, loads and instructions outside of our range terminate a chain
395 // successfully.
396 if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
397 !InstructionSet.count(I))
398 continue;
399
400 // Unsafe casts terminate a chain unsuccessfully. We can't do anything
401 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
402 // transform anything that relies on them.
403 if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
404 !I->getType()->isIntegerTy()) {
405 DBits[Leader] |= ~0ULL;
406 continue;
407 }
408
409 // We don't modify the types of PHIs. Reductions will already have been
410 // truncated if possible, and inductions' sizes will have been chosen by
411 // indvars.
412 if (isa<PHINode>(I))
413 continue;
414
415 if (DBits[Leader] == ~0ULL)
416 // All bits demanded, no point continuing.
417 continue;
418
419 for (Value *O : cast<User>(I)->operands()) {
420 ECs.unionSets(Leader, O);
421 Worklist.push_back(O);
422 }
423 }
424
425 // Now we've discovered all values, walk them to see if there are
426 // any users we didn't see. If there are, we can't optimize that
427 // chain.
428 for (auto &I : DBits)
429 for (auto *U : I.first->users())
430 if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
431 DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
James Molloy45f67d52015-11-09 14:32:05 +0000432
James Molloy55d633b2015-10-12 12:34:45 +0000433 for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
434 uint64_t LeaderDemandedBits = 0;
435 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
436 LeaderDemandedBits |= DBits[*MI];
437
438 uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) -
439 llvm::countLeadingZeros(LeaderDemandedBits);
440 // Round up to a power of 2
441 if (!isPowerOf2_64((uint64_t)MinBW))
442 MinBW = NextPowerOf2(MinBW);
James Molloy8e46cd02016-03-30 10:11:43 +0000443
444 // We don't modify the types of PHIs. Reductions will already have been
445 // truncated if possible, and inductions' sizes will have been chosen by
446 // indvars.
447 // If we are required to shrink a PHI, abandon this entire equivalence class.
448 bool Abort = false;
449 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
450 if (isa<PHINode>(*MI) && MinBW < (*MI)->getType()->getScalarSizeInBits()) {
451 Abort = true;
452 break;
453 }
454 if (Abort)
455 continue;
456
James Molloy55d633b2015-10-12 12:34:45 +0000457 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) {
458 if (!isa<Instruction>(*MI))
459 continue;
460 Type *Ty = (*MI)->getType();
461 if (Roots.count(*MI))
462 Ty = cast<Instruction>(*MI)->getOperand(0)->getType();
463 if (MinBW < Ty->getScalarSizeInBits())
464 MinBWs[cast<Instruction>(*MI)] = MinBW;
465 }
466 }
467
468 return MinBWs;
469}
Matt Arsenault727e2792016-06-30 21:17:59 +0000470
Michael Kruse978ba612018-12-20 04:58:07 +0000471/// Add all access groups in @p AccGroups to @p List.
472template <typename ListT>
473static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
474 // Interpret an access group as a list containing itself.
475 if (AccGroups->getNumOperands() == 0) {
476 assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
477 List.insert(AccGroups);
478 return;
479 }
480
481 for (auto &AccGroupListOp : AccGroups->operands()) {
482 auto *Item = cast<MDNode>(AccGroupListOp.get());
483 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
484 List.insert(Item);
485 }
Clement Courbetd4bd3eb2018-12-20 09:20:07 +0000486}
Michael Kruse978ba612018-12-20 04:58:07 +0000487
488MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
489 if (!AccGroups1)
490 return AccGroups2;
491 if (!AccGroups2)
492 return AccGroups1;
493 if (AccGroups1 == AccGroups2)
494 return AccGroups1;
495
496 SmallSetVector<Metadata *, 4> Union;
497 addToAccessGroupList(Union, AccGroups1);
498 addToAccessGroupList(Union, AccGroups2);
499
500 if (Union.size() == 0)
501 return nullptr;
502 if (Union.size() == 1)
503 return cast<MDNode>(Union.front());
504
505 LLVMContext &Ctx = AccGroups1->getContext();
506 return MDNode::get(Ctx, Union.getArrayRef());
507}
508
509MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
510 const Instruction *Inst2) {
511 bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
512 bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
513
514 if (!MayAccessMem1 && !MayAccessMem2)
515 return nullptr;
516 if (!MayAccessMem1)
517 return Inst2->getMetadata(LLVMContext::MD_access_group);
518 if (!MayAccessMem2)
519 return Inst1->getMetadata(LLVMContext::MD_access_group);
520
521 MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
522 MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
523 if (!MD1 || !MD2)
524 return nullptr;
525 if (MD1 == MD2)
526 return MD1;
527
528 // Use set for scalable 'contains' check.
529 SmallPtrSet<Metadata *, 4> AccGroupSet2;
530 addToAccessGroupList(AccGroupSet2, MD2);
531
532 SmallVector<Metadata *, 4> Intersection;
533 if (MD1->getNumOperands() == 0) {
534 assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
535 if (AccGroupSet2.count(MD1))
536 Intersection.push_back(MD1);
537 } else {
538 for (const MDOperand &Node : MD1->operands()) {
539 auto *Item = cast<MDNode>(Node.get());
540 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
541 if (AccGroupSet2.count(Item))
542 Intersection.push_back(Item);
543 }
544 }
545
546 if (Intersection.size() == 0)
547 return nullptr;
548 if (Intersection.size() == 1)
549 return cast<MDNode>(Intersection.front());
550
551 LLVMContext &Ctx = Inst1->getContext();
552 return MDNode::get(Ctx, Intersection);
553}
554
Matt Arsenault727e2792016-06-30 21:17:59 +0000555/// \returns \p I after propagating metadata from \p VL.
556Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
557 Instruction *I0 = cast<Instruction>(VL[0]);
558 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
559 I0->getAllMetadataOtherThanDebugLoc(Metadata);
560
Michael Kruse978ba612018-12-20 04:58:07 +0000561 for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
562 LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
563 LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
564 LLVMContext::MD_access_group}) {
Matt Arsenault727e2792016-06-30 21:17:59 +0000565 MDNode *MD = I0->getMetadata(Kind);
566
567 for (int J = 1, E = VL.size(); MD && J != E; ++J) {
568 const Instruction *IJ = cast<Instruction>(VL[J]);
569 MDNode *IMD = IJ->getMetadata(Kind);
570 switch (Kind) {
571 case LLVMContext::MD_tbaa:
572 MD = MDNode::getMostGenericTBAA(MD, IMD);
573 break;
574 case LLVMContext::MD_alias_scope:
575 MD = MDNode::getMostGenericAliasScope(MD, IMD);
576 break;
Matt Arsenault727e2792016-06-30 21:17:59 +0000577 case LLVMContext::MD_fpmath:
578 MD = MDNode::getMostGenericFPMath(MD, IMD);
579 break;
Justin Lebar11a32042016-09-11 01:39:08 +0000580 case LLVMContext::MD_noalias:
Matt Arsenault727e2792016-06-30 21:17:59 +0000581 case LLVMContext::MD_nontemporal:
Justin Lebar11a32042016-09-11 01:39:08 +0000582 case LLVMContext::MD_invariant_load:
Matt Arsenault727e2792016-06-30 21:17:59 +0000583 MD = MDNode::intersect(MD, IMD);
584 break;
Michael Kruse978ba612018-12-20 04:58:07 +0000585 case LLVMContext::MD_access_group:
586 MD = intersectAccessGroups(Inst, IJ);
587 break;
Matt Arsenault727e2792016-06-30 21:17:59 +0000588 default:
589 llvm_unreachable("unhandled metadata");
590 }
591 }
592
593 Inst->setMetadata(Kind, MD);
594 }
595
596 return Inst;
597}
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +0000598
Florian Hahna4dc7fe2018-11-13 15:58:18 +0000599Constant *
600llvm::createBitMaskForGaps(IRBuilder<> &Builder, unsigned VF,
601 const InterleaveGroup<Instruction> &Group) {
Dorit Nuzman34da6dd2018-10-31 09:57:56 +0000602 // All 1's means mask is not needed.
603 if (Group.getNumMembers() == Group.getFactor())
604 return nullptr;
605
606 // TODO: support reversed access.
607 assert(!Group.isReverse() && "Reversed group not supported.");
608
609 SmallVector<Constant *, 16> Mask;
610 for (unsigned i = 0; i < VF; i++)
611 for (unsigned j = 0; j < Group.getFactor(); ++j) {
612 unsigned HasMember = Group.getMember(j) ? 1 : 0;
613 Mask.push_back(Builder.getInt1(HasMember));
614 }
615
616 return ConstantVector::get(Mask);
617}
618
Dorit Nuzman38bbf812018-10-14 08:50:06 +0000619Constant *llvm::createReplicatedMask(IRBuilder<> &Builder,
620 unsigned ReplicationFactor, unsigned VF) {
621 SmallVector<Constant *, 16> MaskVec;
622 for (unsigned i = 0; i < VF; i++)
623 for (unsigned j = 0; j < ReplicationFactor; j++)
624 MaskVec.push_back(Builder.getInt32(i));
625
626 return ConstantVector::get(MaskVec);
627}
628
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +0000629Constant *llvm::createInterleaveMask(IRBuilder<> &Builder, unsigned VF,
630 unsigned NumVecs) {
631 SmallVector<Constant *, 16> Mask;
632 for (unsigned i = 0; i < VF; i++)
633 for (unsigned j = 0; j < NumVecs; j++)
634 Mask.push_back(Builder.getInt32(j * VF + i));
635
636 return ConstantVector::get(Mask);
637}
638
639Constant *llvm::createStrideMask(IRBuilder<> &Builder, unsigned Start,
640 unsigned Stride, unsigned VF) {
641 SmallVector<Constant *, 16> Mask;
642 for (unsigned i = 0; i < VF; i++)
643 Mask.push_back(Builder.getInt32(Start + i * Stride));
644
645 return ConstantVector::get(Mask);
646}
647
648Constant *llvm::createSequentialMask(IRBuilder<> &Builder, unsigned Start,
649 unsigned NumInts, unsigned NumUndefs) {
650 SmallVector<Constant *, 16> Mask;
651 for (unsigned i = 0; i < NumInts; i++)
652 Mask.push_back(Builder.getInt32(Start + i));
653
654 Constant *Undef = UndefValue::get(Builder.getInt32Ty());
655 for (unsigned i = 0; i < NumUndefs; i++)
656 Mask.push_back(Undef);
657
658 return ConstantVector::get(Mask);
659}
660
661/// A helper function for concatenating vectors. This function concatenates two
662/// vectors having the same element type. If the second vector has fewer
663/// elements than the first, it is padded with undefs.
664static Value *concatenateTwoVectors(IRBuilder<> &Builder, Value *V1,
665 Value *V2) {
666 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
667 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
668 assert(VecTy1 && VecTy2 &&
669 VecTy1->getScalarType() == VecTy2->getScalarType() &&
670 "Expect two vectors with the same element type");
671
672 unsigned NumElts1 = VecTy1->getNumElements();
673 unsigned NumElts2 = VecTy2->getNumElements();
674 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
675
676 if (NumElts1 > NumElts2) {
677 // Extend with UNDEFs.
678 Constant *ExtMask =
679 createSequentialMask(Builder, 0, NumElts2, NumElts1 - NumElts2);
680 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask);
681 }
682
683 Constant *Mask = createSequentialMask(Builder, 0, NumElts1 + NumElts2, 0);
684 return Builder.CreateShuffleVector(V1, V2, Mask);
685}
686
687Value *llvm::concatenateVectors(IRBuilder<> &Builder, ArrayRef<Value *> Vecs) {
688 unsigned NumVecs = Vecs.size();
689 assert(NumVecs > 1 && "Should be at least two vectors");
690
691 SmallVector<Value *, 8> ResList;
692 ResList.append(Vecs.begin(), Vecs.end());
693 do {
694 SmallVector<Value *, 8> TmpList;
695 for (unsigned i = 0; i < NumVecs - 1; i += 2) {
696 Value *V0 = ResList[i], *V1 = ResList[i + 1];
697 assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
698 "Only the last vector may have a different type");
699
700 TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
701 }
702
703 // Push the last vector if the total number of vectors is odd.
704 if (NumVecs % 2 != 0)
705 TmpList.push_back(ResList[NumVecs - 1]);
706
707 ResList = TmpList;
708 NumVecs = ResList.size();
709 } while (NumVecs > 1);
710
711 return ResList[0];
712}
Florian Hahn1086ce22018-09-12 08:01:57 +0000713
714bool InterleavedAccessInfo::isStrided(int Stride) {
715 unsigned Factor = std::abs(Stride);
716 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
717}
718
719void InterleavedAccessInfo::collectConstStrideAccesses(
720 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
721 const ValueToValueMap &Strides) {
722 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
723
724 // Since it's desired that the load/store instructions be maintained in
725 // "program order" for the interleaved access analysis, we have to visit the
726 // blocks in the loop in reverse postorder (i.e., in a topological order).
727 // Such an ordering will ensure that any load/store that may be executed
728 // before a second load/store will precede the second load/store in
729 // AccessStrideInfo.
730 LoopBlocksDFS DFS(TheLoop);
731 DFS.perform(LI);
732 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
733 for (auto &I : *BB) {
734 auto *LI = dyn_cast<LoadInst>(&I);
735 auto *SI = dyn_cast<StoreInst>(&I);
736 if (!LI && !SI)
737 continue;
738
739 Value *Ptr = getLoadStorePointerOperand(&I);
740 // We don't check wrapping here because we don't know yet if Ptr will be
741 // part of a full group or a group with gaps. Checking wrapping for all
742 // pointers (even those that end up in groups with no gaps) will be overly
743 // conservative. For full groups, wrapping should be ok since if we would
744 // wrap around the address space we would do a memory access at nullptr
745 // even without the transformation. The wrapping checks are therefore
746 // deferred until after we've formed the interleaved groups.
747 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
748 /*Assume=*/true, /*ShouldCheckWrap=*/false);
749
750 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
751 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
752 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
753
754 // An alignment of 0 means target ABI alignment.
755 unsigned Align = getLoadStoreAlignment(&I);
756 if (!Align)
757 Align = DL.getABITypeAlignment(PtrTy->getElementType());
758
759 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align);
760 }
761}
762
763// Analyze interleaved accesses and collect them into interleaved load and
764// store groups.
765//
766// When generating code for an interleaved load group, we effectively hoist all
767// loads in the group to the location of the first load in program order. When
768// generating code for an interleaved store group, we sink all stores to the
769// location of the last store. This code motion can change the order of load
770// and store instructions and may break dependences.
771//
772// The code generation strategy mentioned above ensures that we won't violate
773// any write-after-read (WAR) dependences.
774//
775// E.g., for the WAR dependence: a = A[i]; // (1)
776// A[i] = b; // (2)
777//
778// The store group of (2) is always inserted at or below (2), and the load
779// group of (1) is always inserted at or above (1). Thus, the instructions will
780// never be reordered. All other dependences are checked to ensure the
781// correctness of the instruction reordering.
782//
783// The algorithm visits all memory accesses in the loop in bottom-up program
784// order. Program order is established by traversing the blocks in the loop in
785// reverse postorder when collecting the accesses.
786//
787// We visit the memory accesses in bottom-up order because it can simplify the
788// construction of store groups in the presence of write-after-write (WAW)
789// dependences.
790//
791// E.g., for the WAW dependence: A[i] = a; // (1)
792// A[i] = b; // (2)
793// A[i + 1] = c; // (3)
794//
795// We will first create a store group with (3) and (2). (1) can't be added to
796// this group because it and (2) are dependent. However, (1) can be grouped
797// with other accesses that may precede it in program order. Note that a
798// bottom-up order does not imply that WAW dependences should not be checked.
Dorit Nuzman38bbf812018-10-14 08:50:06 +0000799void InterleavedAccessInfo::analyzeInterleaving(
800 bool EnablePredicatedInterleavedMemAccesses) {
Florian Hahn1086ce22018-09-12 08:01:57 +0000801 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
802 const ValueToValueMap &Strides = LAI->getSymbolicStrides();
803
804 // Holds all accesses with a constant stride.
805 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
806 collectConstStrideAccesses(AccessStrideInfo, Strides);
807
808 if (AccessStrideInfo.empty())
809 return;
810
811 // Collect the dependences in the loop.
812 collectDependences();
813
814 // Holds all interleaved store groups temporarily.
Florian Hahna4dc7fe2018-11-13 15:58:18 +0000815 SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
Florian Hahn1086ce22018-09-12 08:01:57 +0000816 // Holds all interleaved load groups temporarily.
Florian Hahna4dc7fe2018-11-13 15:58:18 +0000817 SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
Florian Hahn1086ce22018-09-12 08:01:57 +0000818
819 // Search in bottom-up program order for pairs of accesses (A and B) that can
820 // form interleaved load or store groups. In the algorithm below, access A
821 // precedes access B in program order. We initialize a group for B in the
822 // outer loop of the algorithm, and then in the inner loop, we attempt to
823 // insert each A into B's group if:
824 //
825 // 1. A and B have the same stride,
826 // 2. A and B have the same memory object size, and
827 // 3. A belongs in B's group according to its distance from B.
828 //
829 // Special care is taken to ensure group formation will not break any
830 // dependences.
831 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
832 BI != E; ++BI) {
833 Instruction *B = BI->first;
834 StrideDescriptor DesB = BI->second;
835
836 // Initialize a group for B if it has an allowable stride. Even if we don't
837 // create a group for B, we continue with the bottom-up algorithm to ensure
838 // we don't break any of B's dependences.
Florian Hahna4dc7fe2018-11-13 15:58:18 +0000839 InterleaveGroup<Instruction> *Group = nullptr;
Dorit Nuzman38bbf812018-10-14 08:50:06 +0000840 if (isStrided(DesB.Stride) &&
841 (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
Florian Hahn1086ce22018-09-12 08:01:57 +0000842 Group = getInterleaveGroup(B);
843 if (!Group) {
844 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
845 << '\n');
846 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
847 }
848 if (B->mayWriteToMemory())
849 StoreGroups.insert(Group);
850 else
851 LoadGroups.insert(Group);
852 }
853
854 for (auto AI = std::next(BI); AI != E; ++AI) {
855 Instruction *A = AI->first;
856 StrideDescriptor DesA = AI->second;
857
858 // Our code motion strategy implies that we can't have dependences
859 // between accesses in an interleaved group and other accesses located
860 // between the first and last member of the group. Note that this also
861 // means that a group can't have more than one member at a given offset.
862 // The accesses in a group can have dependences with other accesses, but
863 // we must ensure we don't extend the boundaries of the group such that
864 // we encompass those dependent accesses.
865 //
866 // For example, assume we have the sequence of accesses shown below in a
867 // stride-2 loop:
868 //
869 // (1, 2) is a group | A[i] = a; // (1)
870 // | A[i-1] = b; // (2) |
871 // A[i-3] = c; // (3)
872 // A[i] = d; // (4) | (2, 4) is not a group
873 //
874 // Because accesses (2) and (3) are dependent, we can group (2) with (1)
875 // but not with (4). If we did, the dependent access (3) would be within
876 // the boundaries of the (2, 4) group.
877 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
878 // If a dependence exists and A is already in a group, we know that A
879 // must be a store since A precedes B and WAR dependences are allowed.
880 // Thus, A would be sunk below B. We release A's group to prevent this
881 // illegal code motion. A will then be free to form another group with
882 // instructions that precede it.
883 if (isInterleaved(A)) {
Florian Hahna4dc7fe2018-11-13 15:58:18 +0000884 InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A);
Florian Hahn1086ce22018-09-12 08:01:57 +0000885 StoreGroups.remove(StoreGroup);
886 releaseGroup(StoreGroup);
887 }
888
889 // If a dependence exists and A is not already in a group (or it was
890 // and we just released it), B might be hoisted above A (if B is a
891 // load) or another store might be sunk below A (if B is a store). In
892 // either case, we can't add additional instructions to B's group. B
893 // will only form a group with instructions that it precedes.
894 break;
895 }
896
897 // At this point, we've checked for illegal code motion. If either A or B
898 // isn't strided, there's nothing left to do.
899 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
900 continue;
901
902 // Ignore A if it's already in a group or isn't the same kind of memory
903 // operation as B.
904 // Note that mayReadFromMemory() isn't mutually exclusive to
905 // mayWriteToMemory in the case of atomic loads. We shouldn't see those
906 // here, canVectorizeMemory() should have returned false - except for the
907 // case we asked for optimization remarks.
908 if (isInterleaved(A) ||
909 (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
910 (A->mayWriteToMemory() != B->mayWriteToMemory()))
911 continue;
912
913 // Check rules 1 and 2. Ignore A if its stride or size is different from
914 // that of B.
915 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
916 continue;
917
918 // Ignore A if the memory object of A and B don't belong to the same
919 // address space
920 if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
921 continue;
922
923 // Calculate the distance from A to B.
924 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
925 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
926 if (!DistToB)
927 continue;
928 int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
929
930 // Check rule 3. Ignore A if its distance to B is not a multiple of the
931 // size.
932 if (DistanceToB % static_cast<int64_t>(DesB.Size))
933 continue;
934
Dorit Nuzman38bbf812018-10-14 08:50:06 +0000935 // All members of a predicated interleave-group must have the same predicate,
936 // and currently must reside in the same BB.
937 BasicBlock *BlockA = A->getParent();
938 BasicBlock *BlockB = B->getParent();
939 if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
940 (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
Florian Hahn1086ce22018-09-12 08:01:57 +0000941 continue;
942
943 // The index of A is the index of B plus A's distance to B in multiples
944 // of the size.
945 int IndexA =
946 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
947
948 // Try to insert A into B's group.
949 if (Group->insertMember(A, IndexA, DesA.Align)) {
950 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
951 << " into the interleave group with" << *B
952 << '\n');
953 InterleaveGroupMap[A] = Group;
954
955 // Set the first load in program order as the insert position.
956 if (A->mayReadFromMemory())
957 Group->setInsertPos(A);
958 }
959 } // Iteration over A accesses.
960 } // Iteration over B accesses.
961
962 // Remove interleaved store groups with gaps.
Florian Hahna4dc7fe2018-11-13 15:58:18 +0000963 for (auto *Group : StoreGroups)
Florian Hahn1086ce22018-09-12 08:01:57 +0000964 if (Group->getNumMembers() != Group->getFactor()) {
965 LLVM_DEBUG(
966 dbgs() << "LV: Invalidate candidate interleaved store group due "
967 "to gaps.\n");
968 releaseGroup(Group);
969 }
970 // Remove interleaved groups with gaps (currently only loads) whose memory
971 // accesses may wrap around. We have to revisit the getPtrStride analysis,
972 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
973 // not check wrapping (see documentation there).
974 // FORNOW we use Assume=false;
975 // TODO: Change to Assume=true but making sure we don't exceed the threshold
976 // of runtime SCEV assumptions checks (thereby potentially failing to
977 // vectorize altogether).
978 // Additional optional optimizations:
979 // TODO: If we are peeling the loop and we know that the first pointer doesn't
980 // wrap then we can deduce that all pointers in the group don't wrap.
981 // This means that we can forcefully peel the loop in order to only have to
982 // check the first pointer for no-wrap. When we'll change to use Assume=true
983 // we'll only need at most one runtime check per interleaved group.
Florian Hahna4dc7fe2018-11-13 15:58:18 +0000984 for (auto *Group : LoadGroups) {
Florian Hahn1086ce22018-09-12 08:01:57 +0000985 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
986 // load would wrap around the address space we would do a memory access at
987 // nullptr even without the transformation.
988 if (Group->getNumMembers() == Group->getFactor())
989 continue;
990
991 // Case 2: If first and last members of the group don't wrap this implies
992 // that all the pointers in the group don't wrap.
993 // So we check only group member 0 (which is always guaranteed to exist),
994 // and group member Factor - 1; If the latter doesn't exist we rely on
995 // peeling (if it is a non-reveresed accsess -- see Case 3).
996 Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0));
997 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
998 /*ShouldCheckWrap=*/true)) {
999 LLVM_DEBUG(
1000 dbgs() << "LV: Invalidate candidate interleaved group due to "
1001 "first group member potentially pointer-wrapping.\n");
1002 releaseGroup(Group);
1003 continue;
1004 }
1005 Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
1006 if (LastMember) {
1007 Value *LastMemberPtr = getLoadStorePointerOperand(LastMember);
1008 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
1009 /*ShouldCheckWrap=*/true)) {
1010 LLVM_DEBUG(
1011 dbgs() << "LV: Invalidate candidate interleaved group due to "
1012 "last group member potentially pointer-wrapping.\n");
1013 releaseGroup(Group);
1014 }
1015 } else {
1016 // Case 3: A non-reversed interleaved load group with gaps: We need
1017 // to execute at least one scalar epilogue iteration. This will ensure
1018 // we don't speculatively access memory out-of-bounds. We only need
1019 // to look for a member at index factor - 1, since every group must have
1020 // a member at index zero.
1021 if (Group->isReverse()) {
1022 LLVM_DEBUG(
1023 dbgs() << "LV: Invalidate candidate interleaved group due to "
1024 "a reverse access with gaps.\n");
1025 releaseGroup(Group);
1026 continue;
1027 }
1028 LLVM_DEBUG(
1029 dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1030 RequiresScalarEpilogue = true;
1031 }
1032 }
1033}
Dorit Nuzman3ec99fe2018-10-22 06:17:09 +00001034
1035void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1036 // If no group had triggered the requirement to create an epilogue loop,
1037 // there is nothing to do.
1038 if (!requiresScalarEpilogue())
1039 return;
1040
1041 // Avoid releasing a Group twice.
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001042 SmallPtrSet<InterleaveGroup<Instruction> *, 4> DelSet;
Dorit Nuzman3ec99fe2018-10-22 06:17:09 +00001043 for (auto &I : InterleaveGroupMap) {
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001044 InterleaveGroup<Instruction> *Group = I.second;
Dorit Nuzman3ec99fe2018-10-22 06:17:09 +00001045 if (Group->requiresScalarEpilogue())
1046 DelSet.insert(Group);
1047 }
1048 for (auto *Ptr : DelSet) {
1049 LLVM_DEBUG(
Dorit Nuzman34da6dd2018-10-31 09:57:56 +00001050 dbgs()
Dorit Nuzman3ec99fe2018-10-22 06:17:09 +00001051 << "LV: Invalidate candidate interleaved group due to gaps that "
Dorit Nuzman34da6dd2018-10-31 09:57:56 +00001052 "require a scalar epilogue (not allowed under optsize) and cannot "
1053 "be masked (not enabled). \n");
Dorit Nuzman3ec99fe2018-10-22 06:17:09 +00001054 releaseGroup(Ptr);
1055 }
1056
1057 RequiresScalarEpilogue = false;
1058}
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001059
Florian Hahn86ed3472018-11-13 16:26:34 +00001060template <typename InstT>
1061void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1062 llvm_unreachable("addMetadata can only be used for Instruction");
1063}
1064
1065namespace llvm {
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001066template <>
1067void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1068 SmallVector<Value *, 4> VL;
1069 std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1070 [](std::pair<int, Instruction *> p) { return p.second; });
1071 propagateMetadata(NewInst, VL);
1072}
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001073}