blob: 7e71683dc1a4aae77285cd71b749b2479d765558 [file] [log] [blame]
Dan Gohman2d1be872009-04-16 03:18:22 +00001//===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
Misha Brukmanfd939082005-04-21 23:48:37 +00002//
Nate Begemaneaa13852004-10-18 21:08:22 +00003// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Misha Brukmanfd939082005-04-21 23:48:37 +00007//
Nate Begemaneaa13852004-10-18 21:08:22 +00008//===----------------------------------------------------------------------===//
9//
Dan Gohmancec8f9d2009-05-19 20:37:36 +000010// This transformation analyzes and transforms the induction variables (and
11// computations derived from them) into forms suitable for efficient execution
12// on the target.
13//
Nate Begemaneaa13852004-10-18 21:08:22 +000014// This pass performs a strength reduction on array references inside loops that
Dan Gohmancec8f9d2009-05-19 20:37:36 +000015// have as one or more of their components the loop induction variable, it
16// rewrites expressions to take advantage of scaled-index addressing modes
17// available on the target, and it performs a variety of other optimizations
18// related to loop induction variables.
Nate Begemaneaa13852004-10-18 21:08:22 +000019//
Dan Gohman572645c2010-02-12 10:34:29 +000020// Terminology note: this code has a lot of handling for "post-increment" or
21// "post-inc" users. This is not talking about post-increment addressing modes;
22// it is instead talking about code like this:
23//
24// %i = phi [ 0, %entry ], [ %i.next, %latch ]
25// ...
26// %i.next = add %i, 1
27// %c = icmp eq %i.next, %n
28//
29// The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however
30// it's useful to think about these as the same register, with some uses using
31// the value of the register before the add and some using // it after. In this
32// example, the icmp is a post-increment user, since it uses %i.next, which is
33// the value of the induction variable after the increment. The other common
34// case of post-increment users is users outside the loop.
35//
36// TODO: More sophistication in the way Formulae are generated and filtered.
37//
38// TODO: Handle multiple loops at a time.
39//
40// TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr
41// instead of a GlobalValue?
42//
43// TODO: When truncation is free, truncate ICmp users' operands to make it a
44// smaller encoding (on x86 at least).
45//
46// TODO: When a negated register is used by an add (such as in a list of
47// multiple base registers, or as the increment expression in an addrec),
48// we may not actually need both reg and (-1 * reg) in registers; the
49// negation can be implemented by using a sub instead of an add. The
50// lack of support for taking this into consideration when making
51// register pressure decisions is partly worked around by the "Special"
52// use kind.
53//
Nate Begemaneaa13852004-10-18 21:08:22 +000054//===----------------------------------------------------------------------===//
55
Chris Lattnerbe3e5212005-08-03 23:30:08 +000056#define DEBUG_TYPE "loop-reduce"
Nate Begemaneaa13852004-10-18 21:08:22 +000057#include "llvm/Transforms/Scalar.h"
58#include "llvm/Constants.h"
59#include "llvm/Instructions.h"
Dan Gohmane5b01be2007-05-04 14:59:09 +000060#include "llvm/IntrinsicInst.h"
Jeff Cohen2f3c9b72005-03-04 04:04:26 +000061#include "llvm/DerivedTypes.h"
Dan Gohman81db61a2009-05-12 02:17:14 +000062#include "llvm/Analysis/IVUsers.h"
Dan Gohman572645c2010-02-12 10:34:29 +000063#include "llvm/Analysis/Dominators.h"
Devang Patel0f54dcb2007-03-06 21:14:09 +000064#include "llvm/Analysis/LoopPass.h"
Nate Begeman16997482005-07-30 00:15:07 +000065#include "llvm/Analysis/ScalarEvolutionExpander.h"
Chris Lattner9fc5cdf2011-01-02 22:09:33 +000066#include "llvm/Assembly/Writer.h"
Chris Lattnere0391be2005-08-12 22:06:11 +000067#include "llvm/Transforms/Utils/BasicBlockUtils.h"
Nate Begemaneaa13852004-10-18 21:08:22 +000068#include "llvm/Transforms/Utils/Local.h"
Dan Gohman572645c2010-02-12 10:34:29 +000069#include "llvm/ADT/SmallBitVector.h"
70#include "llvm/ADT/SetVector.h"
71#include "llvm/ADT/DenseSet.h"
Nate Begeman16997482005-07-30 00:15:07 +000072#include "llvm/Support/Debug.h"
Andrew Trick80ef1b22011-09-27 00:44:14 +000073#include "llvm/Support/CommandLine.h"
Dan Gohmanafc36a92009-05-02 18:29:22 +000074#include "llvm/Support/ValueHandle.h"
Daniel Dunbar460f6562009-07-26 09:48:23 +000075#include "llvm/Support/raw_ostream.h"
Evan Chengd277f2c2006-03-13 23:14:23 +000076#include "llvm/Target/TargetLowering.h"
Jeff Cohencfb1d422005-07-30 18:22:27 +000077#include <algorithm>
Nate Begemaneaa13852004-10-18 21:08:22 +000078using namespace llvm;
79
Andrew Tricka02bfce2011-10-11 02:30:45 +000080// Temporary flag to cleanup congruent phis after LSR phi expansion.
81// It's currently disabled until we can determine whether it's truly useful or
82// not. The flag should be removed after the v3.0 release.
Andrew Trick24f670f2012-01-07 07:08:17 +000083// This is now needed for ivchains.
Benjamin Kramer0861f572011-11-26 23:01:57 +000084static cl::opt<bool> EnablePhiElim(
Andrew Trick24f670f2012-01-07 07:08:17 +000085 "enable-lsr-phielim", cl::Hidden, cl::init(true),
86 cl::desc("Enable LSR phi elimination"));
Andrew Trick80ef1b22011-09-27 00:44:14 +000087
Andrew Trick22d20c22012-01-09 21:18:52 +000088#ifndef NDEBUG
89// Stress test IV chain generation.
90static cl::opt<bool> StressIVChain(
91 "stress-ivchain", cl::Hidden, cl::init(false),
92 cl::desc("Stress test LSR IV chains"));
93#else
94static bool StressIVChain = false;
95#endif
96
Dan Gohman572645c2010-02-12 10:34:29 +000097namespace {
Nate Begemaneaa13852004-10-18 21:08:22 +000098
Dan Gohman572645c2010-02-12 10:34:29 +000099/// RegSortData - This class holds data which is used to order reuse candidates.
100class RegSortData {
101public:
102 /// UsedByIndices - This represents the set of LSRUse indices which reference
103 /// a particular register.
104 SmallBitVector UsedByIndices;
105
106 RegSortData() {}
107
108 void print(raw_ostream &OS) const;
109 void dump() const;
110};
111
112}
113
114void RegSortData::print(raw_ostream &OS) const {
115 OS << "[NumUses=" << UsedByIndices.count() << ']';
116}
117
118void RegSortData::dump() const {
119 print(errs()); errs() << '\n';
120}
Dan Gohmanc17e0cf2009-02-20 04:17:46 +0000121
Chris Lattner0e5f4992006-12-19 21:40:18 +0000122namespace {
Dale Johannesendc42f482007-03-20 00:47:50 +0000123
Dan Gohman572645c2010-02-12 10:34:29 +0000124/// RegUseTracker - Map register candidates to information about how they are
125/// used.
126class RegUseTracker {
127 typedef DenseMap<const SCEV *, RegSortData> RegUsesTy;
Dale Johannesendc42f482007-03-20 00:47:50 +0000128
Dan Gohman90bb3552010-05-18 22:33:00 +0000129 RegUsesTy RegUsesMap;
Dan Gohman572645c2010-02-12 10:34:29 +0000130 SmallVector<const SCEV *, 16> RegSequence;
Evan Chengd1d6b5c2006-03-16 21:53:05 +0000131
Dan Gohman572645c2010-02-12 10:34:29 +0000132public:
133 void CountRegister(const SCEV *Reg, size_t LUIdx);
Dan Gohmanb2df4332010-05-18 23:42:37 +0000134 void DropRegister(const SCEV *Reg, size_t LUIdx);
Dan Gohmanc6897702010-10-07 23:33:43 +0000135 void SwapAndDropUse(size_t LUIdx, size_t LastLUIdx);
Dan Gohmana10756e2010-01-21 02:09:26 +0000136
Dan Gohman572645c2010-02-12 10:34:29 +0000137 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const;
Dan Gohmana10756e2010-01-21 02:09:26 +0000138
Dan Gohman572645c2010-02-12 10:34:29 +0000139 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const;
Dan Gohmana10756e2010-01-21 02:09:26 +0000140
Dan Gohman572645c2010-02-12 10:34:29 +0000141 void clear();
Dan Gohmana10756e2010-01-21 02:09:26 +0000142
Dan Gohman572645c2010-02-12 10:34:29 +0000143 typedef SmallVectorImpl<const SCEV *>::iterator iterator;
144 typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator;
145 iterator begin() { return RegSequence.begin(); }
146 iterator end() { return RegSequence.end(); }
147 const_iterator begin() const { return RegSequence.begin(); }
148 const_iterator end() const { return RegSequence.end(); }
149};
Dan Gohmana10756e2010-01-21 02:09:26 +0000150
Dan Gohmana10756e2010-01-21 02:09:26 +0000151}
152
Dan Gohman572645c2010-02-12 10:34:29 +0000153void
154RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) {
155 std::pair<RegUsesTy::iterator, bool> Pair =
Dan Gohman90bb3552010-05-18 22:33:00 +0000156 RegUsesMap.insert(std::make_pair(Reg, RegSortData()));
Dan Gohman572645c2010-02-12 10:34:29 +0000157 RegSortData &RSD = Pair.first->second;
158 if (Pair.second)
159 RegSequence.push_back(Reg);
160 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1));
161 RSD.UsedByIndices.set(LUIdx);
Dan Gohmana10756e2010-01-21 02:09:26 +0000162}
163
Dan Gohmanb2df4332010-05-18 23:42:37 +0000164void
165RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) {
166 RegUsesTy::iterator It = RegUsesMap.find(Reg);
167 assert(It != RegUsesMap.end());
168 RegSortData &RSD = It->second;
169 assert(RSD.UsedByIndices.size() > LUIdx);
170 RSD.UsedByIndices.reset(LUIdx);
171}
172
Dan Gohmana2086b32010-05-19 23:43:12 +0000173void
Dan Gohmanc6897702010-10-07 23:33:43 +0000174RegUseTracker::SwapAndDropUse(size_t LUIdx, size_t LastLUIdx) {
175 assert(LUIdx <= LastLUIdx);
176
177 // Update RegUses. The data structure is not optimized for this purpose;
178 // we must iterate through it and update each of the bit vectors.
Dan Gohmana2086b32010-05-19 23:43:12 +0000179 for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end();
Dan Gohmanc6897702010-10-07 23:33:43 +0000180 I != E; ++I) {
181 SmallBitVector &UsedByIndices = I->second.UsedByIndices;
182 if (LUIdx < UsedByIndices.size())
183 UsedByIndices[LUIdx] =
184 LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : 0;
185 UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx));
186 }
Dan Gohmana2086b32010-05-19 23:43:12 +0000187}
188
Dan Gohman572645c2010-02-12 10:34:29 +0000189bool
190RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const {
Dan Gohman46fd7a62010-08-29 15:18:49 +0000191 RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
192 if (I == RegUsesMap.end())
193 return false;
194 const SmallBitVector &UsedByIndices = I->second.UsedByIndices;
Dan Gohman572645c2010-02-12 10:34:29 +0000195 int i = UsedByIndices.find_first();
196 if (i == -1) return false;
197 if ((size_t)i != LUIdx) return true;
198 return UsedByIndices.find_next(i) != -1;
199}
Dan Gohmana10756e2010-01-21 02:09:26 +0000200
Dan Gohman572645c2010-02-12 10:34:29 +0000201const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const {
Dan Gohman90bb3552010-05-18 22:33:00 +0000202 RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
203 assert(I != RegUsesMap.end() && "Unknown register!");
Dan Gohman572645c2010-02-12 10:34:29 +0000204 return I->second.UsedByIndices;
205}
Dan Gohmana10756e2010-01-21 02:09:26 +0000206
Dan Gohman572645c2010-02-12 10:34:29 +0000207void RegUseTracker::clear() {
Dan Gohman90bb3552010-05-18 22:33:00 +0000208 RegUsesMap.clear();
Dan Gohman572645c2010-02-12 10:34:29 +0000209 RegSequence.clear();
210}
Dan Gohmana10756e2010-01-21 02:09:26 +0000211
Dan Gohman572645c2010-02-12 10:34:29 +0000212namespace {
213
214/// Formula - This class holds information that describes a formula for
215/// computing satisfying a use. It may include broken-out immediates and scaled
216/// registers.
217struct Formula {
218 /// AM - This is used to represent complex addressing, as well as other kinds
219 /// of interesting uses.
220 TargetLowering::AddrMode AM;
221
222 /// BaseRegs - The list of "base" registers for this use. When this is
223 /// non-empty, AM.HasBaseReg should be set to true.
224 SmallVector<const SCEV *, 2> BaseRegs;
225
226 /// ScaledReg - The 'scaled' register for this use. This should be non-null
227 /// when AM.Scale is not zero.
228 const SCEV *ScaledReg;
229
Dan Gohmancca82142011-05-03 00:46:49 +0000230 /// UnfoldedOffset - An additional constant offset which added near the
231 /// use. This requires a temporary register, but the offset itself can
232 /// live in an add immediate field rather than a register.
233 int64_t UnfoldedOffset;
234
235 Formula() : ScaledReg(0), UnfoldedOffset(0) {}
Dan Gohman572645c2010-02-12 10:34:29 +0000236
Dan Gohmandc0e8fb2010-11-17 21:41:58 +0000237 void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE);
Dan Gohman572645c2010-02-12 10:34:29 +0000238
239 unsigned getNumRegs() const;
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000240 Type *getType() const;
Dan Gohman572645c2010-02-12 10:34:29 +0000241
Dan Gohman5ce6d052010-05-20 15:17:54 +0000242 void DeleteBaseReg(const SCEV *&S);
243
Dan Gohman572645c2010-02-12 10:34:29 +0000244 bool referencesReg(const SCEV *S) const;
245 bool hasRegsUsedByUsesOtherThan(size_t LUIdx,
246 const RegUseTracker &RegUses) const;
247
248 void print(raw_ostream &OS) const;
249 void dump() const;
250};
251
252}
253
Dan Gohman3f46a3a2010-03-01 17:49:51 +0000254/// DoInitialMatch - Recursion helper for InitialMatch.
Dan Gohman572645c2010-02-12 10:34:29 +0000255static void DoInitialMatch(const SCEV *S, Loop *L,
256 SmallVectorImpl<const SCEV *> &Good,
257 SmallVectorImpl<const SCEV *> &Bad,
Dan Gohmandc0e8fb2010-11-17 21:41:58 +0000258 ScalarEvolution &SE) {
Dan Gohman572645c2010-02-12 10:34:29 +0000259 // Collect expressions which properly dominate the loop header.
Dan Gohmandc0e8fb2010-11-17 21:41:58 +0000260 if (SE.properlyDominates(S, L->getHeader())) {
Dan Gohman572645c2010-02-12 10:34:29 +0000261 Good.push_back(S);
262 return;
Dan Gohmana10756e2010-01-21 02:09:26 +0000263 }
Dan Gohman572645c2010-02-12 10:34:29 +0000264
265 // Look at add operands.
266 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
267 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
268 I != E; ++I)
Dan Gohmandc0e8fb2010-11-17 21:41:58 +0000269 DoInitialMatch(*I, L, Good, Bad, SE);
Dan Gohman572645c2010-02-12 10:34:29 +0000270 return;
271 }
272
273 // Look at addrec operands.
274 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
275 if (!AR->getStart()->isZero()) {
Dan Gohmandc0e8fb2010-11-17 21:41:58 +0000276 DoInitialMatch(AR->getStart(), L, Good, Bad, SE);
Dan Gohmandeff6212010-05-03 22:09:21 +0000277 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
Dan Gohman572645c2010-02-12 10:34:29 +0000278 AR->getStepRecurrence(SE),
Andrew Trick3228cc22011-03-14 16:50:06 +0000279 // FIXME: AR->getNoWrapFlags()
280 AR->getLoop(), SCEV::FlagAnyWrap),
Dan Gohmandc0e8fb2010-11-17 21:41:58 +0000281 L, Good, Bad, SE);
Dan Gohman572645c2010-02-12 10:34:29 +0000282 return;
283 }
284
285 // Handle a multiplication by -1 (negation) if it didn't fold.
286 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S))
287 if (Mul->getOperand(0)->isAllOnesValue()) {
288 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end());
289 const SCEV *NewMul = SE.getMulExpr(Ops);
290
291 SmallVector<const SCEV *, 4> MyGood;
292 SmallVector<const SCEV *, 4> MyBad;
Dan Gohmandc0e8fb2010-11-17 21:41:58 +0000293 DoInitialMatch(NewMul, L, MyGood, MyBad, SE);
Dan Gohman572645c2010-02-12 10:34:29 +0000294 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue(
295 SE.getEffectiveSCEVType(NewMul->getType())));
296 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(),
297 E = MyGood.end(); I != E; ++I)
298 Good.push_back(SE.getMulExpr(NegOne, *I));
299 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(),
300 E = MyBad.end(); I != E; ++I)
301 Bad.push_back(SE.getMulExpr(NegOne, *I));
302 return;
303 }
304
305 // Ok, we can't do anything interesting. Just stuff the whole thing into a
306 // register and hope for the best.
307 Bad.push_back(S);
308}
309
310/// InitialMatch - Incorporate loop-variant parts of S into this Formula,
311/// attempting to keep all loop-invariant and loop-computable values in a
312/// single base register.
Dan Gohmandc0e8fb2010-11-17 21:41:58 +0000313void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) {
Dan Gohman572645c2010-02-12 10:34:29 +0000314 SmallVector<const SCEV *, 4> Good;
315 SmallVector<const SCEV *, 4> Bad;
Dan Gohmandc0e8fb2010-11-17 21:41:58 +0000316 DoInitialMatch(S, L, Good, Bad, SE);
Dan Gohman572645c2010-02-12 10:34:29 +0000317 if (!Good.empty()) {
Dan Gohmane60bb152010-04-08 23:36:27 +0000318 const SCEV *Sum = SE.getAddExpr(Good);
319 if (!Sum->isZero())
320 BaseRegs.push_back(Sum);
Dan Gohman572645c2010-02-12 10:34:29 +0000321 AM.HasBaseReg = true;
322 }
323 if (!Bad.empty()) {
Dan Gohmane60bb152010-04-08 23:36:27 +0000324 const SCEV *Sum = SE.getAddExpr(Bad);
325 if (!Sum->isZero())
326 BaseRegs.push_back(Sum);
Dan Gohman572645c2010-02-12 10:34:29 +0000327 AM.HasBaseReg = true;
328 }
329}
330
331/// getNumRegs - Return the total number of register operands used by this
332/// formula. This does not include register uses implied by non-constant
333/// addrec strides.
334unsigned Formula::getNumRegs() const {
335 return !!ScaledReg + BaseRegs.size();
336}
337
338/// getType - Return the type of this formula, if it has one, or null
339/// otherwise. This type is meaningless except for the bit size.
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000340Type *Formula::getType() const {
Dan Gohman572645c2010-02-12 10:34:29 +0000341 return !BaseRegs.empty() ? BaseRegs.front()->getType() :
342 ScaledReg ? ScaledReg->getType() :
343 AM.BaseGV ? AM.BaseGV->getType() :
344 0;
345}
346
Dan Gohman5ce6d052010-05-20 15:17:54 +0000347/// DeleteBaseReg - Delete the given base reg from the BaseRegs list.
348void Formula::DeleteBaseReg(const SCEV *&S) {
349 if (&S != &BaseRegs.back())
350 std::swap(S, BaseRegs.back());
351 BaseRegs.pop_back();
352}
353
Dan Gohman572645c2010-02-12 10:34:29 +0000354/// referencesReg - Test if this formula references the given register.
355bool Formula::referencesReg(const SCEV *S) const {
356 return S == ScaledReg ||
357 std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end();
358}
359
360/// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers
361/// which are used by uses other than the use with the given index.
362bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx,
363 const RegUseTracker &RegUses) const {
364 if (ScaledReg)
365 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx))
366 return true;
367 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(),
368 E = BaseRegs.end(); I != E; ++I)
369 if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx))
370 return true;
371 return false;
372}
373
374void Formula::print(raw_ostream &OS) const {
375 bool First = true;
376 if (AM.BaseGV) {
377 if (!First) OS << " + "; else First = false;
378 WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false);
379 }
380 if (AM.BaseOffs != 0) {
381 if (!First) OS << " + "; else First = false;
382 OS << AM.BaseOffs;
383 }
384 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(),
385 E = BaseRegs.end(); I != E; ++I) {
386 if (!First) OS << " + "; else First = false;
387 OS << "reg(" << **I << ')';
388 }
Dan Gohmanc4cfbaf2010-05-18 22:35:55 +0000389 if (AM.HasBaseReg && BaseRegs.empty()) {
390 if (!First) OS << " + "; else First = false;
391 OS << "**error: HasBaseReg**";
392 } else if (!AM.HasBaseReg && !BaseRegs.empty()) {
393 if (!First) OS << " + "; else First = false;
394 OS << "**error: !HasBaseReg**";
395 }
Dan Gohman572645c2010-02-12 10:34:29 +0000396 if (AM.Scale != 0) {
397 if (!First) OS << " + "; else First = false;
398 OS << AM.Scale << "*reg(";
399 if (ScaledReg)
400 OS << *ScaledReg;
401 else
402 OS << "<unknown>";
403 OS << ')';
404 }
Dan Gohmancca82142011-05-03 00:46:49 +0000405 if (UnfoldedOffset != 0) {
406 if (!First) OS << " + "; else First = false;
407 OS << "imm(" << UnfoldedOffset << ')';
408 }
Dan Gohman572645c2010-02-12 10:34:29 +0000409}
410
411void Formula::dump() const {
412 print(errs()); errs() << '\n';
413}
414
Dan Gohmanaae01f12010-02-19 19:32:49 +0000415/// isAddRecSExtable - Return true if the given addrec can be sign-extended
416/// without changing its value.
417static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000418 Type *WideTy =
Dan Gohmanea507f52010-05-20 19:44:23 +0000419 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1);
Dan Gohmanaae01f12010-02-19 19:32:49 +0000420 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
421}
422
423/// isAddSExtable - Return true if the given add can be sign-extended
424/// without changing its value.
425static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000426 Type *WideTy =
Dan Gohmanea507f52010-05-20 19:44:23 +0000427 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1);
Dan Gohmanaae01f12010-02-19 19:32:49 +0000428 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy));
429}
430
Dan Gohman473e6352010-06-24 16:45:11 +0000431/// isMulSExtable - Return true if the given mul can be sign-extended
Dan Gohmanaae01f12010-02-19 19:32:49 +0000432/// without changing its value.
Dan Gohman473e6352010-06-24 16:45:11 +0000433static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) {
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000434 Type *WideTy =
Dan Gohman473e6352010-06-24 16:45:11 +0000435 IntegerType::get(SE.getContext(),
436 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands());
437 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy));
Dan Gohmanaae01f12010-02-19 19:32:49 +0000438}
439
Dan Gohmanf09b7122010-02-19 19:35:48 +0000440/// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined
441/// and if the remainder is known to be zero, or null otherwise. If
442/// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified
443/// to Y, ignoring that the multiplication may overflow, which is useful when
444/// the result will be used in a context where the most significant bits are
445/// ignored.
446static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
447 ScalarEvolution &SE,
448 bool IgnoreSignificantBits = false) {
Dan Gohman572645c2010-02-12 10:34:29 +0000449 // Handle the trivial case, which works for any SCEV type.
450 if (LHS == RHS)
Dan Gohmandeff6212010-05-03 22:09:21 +0000451 return SE.getConstant(LHS->getType(), 1);
Dan Gohman572645c2010-02-12 10:34:29 +0000452
Dan Gohmand42819a2010-06-24 16:51:25 +0000453 // Handle a few RHS special cases.
454 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS);
455 if (RC) {
456 const APInt &RA = RC->getValue()->getValue();
457 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do
458 // some folding.
459 if (RA.isAllOnesValue())
460 return SE.getMulExpr(LHS, RC);
461 // Handle x /s 1 as x.
462 if (RA == 1)
463 return LHS;
464 }
Dan Gohman572645c2010-02-12 10:34:29 +0000465
466 // Check for a division of a constant by a constant.
467 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) {
Dan Gohman572645c2010-02-12 10:34:29 +0000468 if (!RC)
469 return 0;
Dan Gohmand42819a2010-06-24 16:51:25 +0000470 const APInt &LA = C->getValue()->getValue();
471 const APInt &RA = RC->getValue()->getValue();
472 if (LA.srem(RA) != 0)
Dan Gohman572645c2010-02-12 10:34:29 +0000473 return 0;
Dan Gohmand42819a2010-06-24 16:51:25 +0000474 return SE.getConstant(LA.sdiv(RA));
Dan Gohman572645c2010-02-12 10:34:29 +0000475 }
476
Dan Gohmanaae01f12010-02-19 19:32:49 +0000477 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
Dan Gohman572645c2010-02-12 10:34:29 +0000478 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) {
Dan Gohmanaae01f12010-02-19 19:32:49 +0000479 if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) {
Dan Gohmanf09b7122010-02-19 19:35:48 +0000480 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE,
481 IgnoreSignificantBits);
Dan Gohmanaae01f12010-02-19 19:32:49 +0000482 if (!Step) return 0;
Dan Gohman694a15e2010-08-19 01:02:31 +0000483 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
484 IgnoreSignificantBits);
485 if (!Start) return 0;
Andrew Trick3228cc22011-03-14 16:50:06 +0000486 // FlagNW is independent of the start value, step direction, and is
487 // preserved with smaller magnitude steps.
488 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
489 return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap);
Dan Gohmanaae01f12010-02-19 19:32:49 +0000490 }
Dan Gohman2ea09e02010-06-24 16:57:52 +0000491 return 0;
Dan Gohman572645c2010-02-12 10:34:29 +0000492 }
493
Dan Gohmanaae01f12010-02-19 19:32:49 +0000494 // Distribute the sdiv over add operands, if the add doesn't overflow.
Dan Gohman572645c2010-02-12 10:34:29 +0000495 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) {
Dan Gohmanaae01f12010-02-19 19:32:49 +0000496 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) {
497 SmallVector<const SCEV *, 8> Ops;
498 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
499 I != E; ++I) {
Dan Gohmanf09b7122010-02-19 19:35:48 +0000500 const SCEV *Op = getExactSDiv(*I, RHS, SE,
501 IgnoreSignificantBits);
Dan Gohmanaae01f12010-02-19 19:32:49 +0000502 if (!Op) return 0;
503 Ops.push_back(Op);
504 }
505 return SE.getAddExpr(Ops);
Dan Gohman572645c2010-02-12 10:34:29 +0000506 }
Dan Gohman2ea09e02010-06-24 16:57:52 +0000507 return 0;
Dan Gohman572645c2010-02-12 10:34:29 +0000508 }
509
510 // Check for a multiply operand that we can pull RHS out of.
Dan Gohman2ea09e02010-06-24 16:57:52 +0000511 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) {
Dan Gohmanaae01f12010-02-19 19:32:49 +0000512 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) {
Dan Gohman572645c2010-02-12 10:34:29 +0000513 SmallVector<const SCEV *, 4> Ops;
514 bool Found = false;
515 for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end();
516 I != E; ++I) {
Dan Gohman47667442010-05-20 16:23:28 +0000517 const SCEV *S = *I;
Dan Gohman572645c2010-02-12 10:34:29 +0000518 if (!Found)
Dan Gohman47667442010-05-20 16:23:28 +0000519 if (const SCEV *Q = getExactSDiv(S, RHS, SE,
Dan Gohmanf09b7122010-02-19 19:35:48 +0000520 IgnoreSignificantBits)) {
Dan Gohman47667442010-05-20 16:23:28 +0000521 S = Q;
Dan Gohman572645c2010-02-12 10:34:29 +0000522 Found = true;
Dan Gohman572645c2010-02-12 10:34:29 +0000523 }
Dan Gohman47667442010-05-20 16:23:28 +0000524 Ops.push_back(S);
Dan Gohman572645c2010-02-12 10:34:29 +0000525 }
526 return Found ? SE.getMulExpr(Ops) : 0;
527 }
Dan Gohman2ea09e02010-06-24 16:57:52 +0000528 return 0;
529 }
Dan Gohman572645c2010-02-12 10:34:29 +0000530
531 // Otherwise we don't know.
532 return 0;
533}
534
535/// ExtractImmediate - If S involves the addition of a constant integer value,
536/// return that integer value, and mutate S to point to a new SCEV with that
537/// value excluded.
538static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
539 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
540 if (C->getValue()->getValue().getMinSignedBits() <= 64) {
Dan Gohmandeff6212010-05-03 22:09:21 +0000541 S = SE.getConstant(C->getType(), 0);
Dan Gohman572645c2010-02-12 10:34:29 +0000542 return C->getValue()->getSExtValue();
543 }
544 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
545 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
546 int64_t Result = ExtractImmediate(NewOps.front(), SE);
Dan Gohmane62d5882010-08-13 21:17:19 +0000547 if (Result != 0)
548 S = SE.getAddExpr(NewOps);
Dan Gohman572645c2010-02-12 10:34:29 +0000549 return Result;
550 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
551 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
552 int64_t Result = ExtractImmediate(NewOps.front(), SE);
Dan Gohmane62d5882010-08-13 21:17:19 +0000553 if (Result != 0)
Andrew Trick3228cc22011-03-14 16:50:06 +0000554 S = SE.getAddRecExpr(NewOps, AR->getLoop(),
555 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
556 SCEV::FlagAnyWrap);
Dan Gohman572645c2010-02-12 10:34:29 +0000557 return Result;
558 }
559 return 0;
560}
561
562/// ExtractSymbol - If S involves the addition of a GlobalValue address,
563/// return that symbol, and mutate S to point to a new SCEV with that
564/// value excluded.
565static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
566 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
567 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) {
Dan Gohmandeff6212010-05-03 22:09:21 +0000568 S = SE.getConstant(GV->getType(), 0);
Dan Gohman572645c2010-02-12 10:34:29 +0000569 return GV;
570 }
571 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
572 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
573 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE);
Dan Gohmane62d5882010-08-13 21:17:19 +0000574 if (Result)
575 S = SE.getAddExpr(NewOps);
Dan Gohman572645c2010-02-12 10:34:29 +0000576 return Result;
577 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
578 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
579 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
Dan Gohmane62d5882010-08-13 21:17:19 +0000580 if (Result)
Andrew Trick3228cc22011-03-14 16:50:06 +0000581 S = SE.getAddRecExpr(NewOps, AR->getLoop(),
582 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
583 SCEV::FlagAnyWrap);
Dan Gohman572645c2010-02-12 10:34:29 +0000584 return Result;
585 }
586 return 0;
Nate Begemaneaa13852004-10-18 21:08:22 +0000587}
588
Dan Gohmanf284ce22009-02-18 00:08:39 +0000589/// isAddressUse - Returns true if the specified instruction is using the
Dale Johannesen203af582008-12-05 21:47:27 +0000590/// specified value as an address.
591static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
592 bool isAddress = isa<LoadInst>(Inst);
593 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
594 if (SI->getOperand(1) == OperandVal)
595 isAddress = true;
596 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
597 // Addressing modes can also be folded into prefetches and a variety
598 // of intrinsics.
599 switch (II->getIntrinsicID()) {
600 default: break;
601 case Intrinsic::prefetch:
Dale Johannesen203af582008-12-05 21:47:27 +0000602 case Intrinsic::x86_sse_storeu_ps:
603 case Intrinsic::x86_sse2_storeu_pd:
604 case Intrinsic::x86_sse2_storeu_dq:
605 case Intrinsic::x86_sse2_storel_dq:
Gabor Greifad72e732010-06-30 09:15:28 +0000606 if (II->getArgOperand(0) == OperandVal)
Dale Johannesen203af582008-12-05 21:47:27 +0000607 isAddress = true;
608 break;
609 }
610 }
611 return isAddress;
612}
Chris Lattner0ae33eb2005-10-03 01:04:44 +0000613
Dan Gohman21e77222009-03-09 21:01:17 +0000614/// getAccessType - Return the type of the memory being accessed.
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000615static Type *getAccessType(const Instruction *Inst) {
616 Type *AccessTy = Inst->getType();
Dan Gohman21e77222009-03-09 21:01:17 +0000617 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
Dan Gohmana537bf82009-05-18 16:45:28 +0000618 AccessTy = SI->getOperand(0)->getType();
Dan Gohman21e77222009-03-09 21:01:17 +0000619 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
620 // Addressing modes can also be folded into prefetches and a variety
621 // of intrinsics.
622 switch (II->getIntrinsicID()) {
623 default: break;
624 case Intrinsic::x86_sse_storeu_ps:
625 case Intrinsic::x86_sse2_storeu_pd:
626 case Intrinsic::x86_sse2_storeu_dq:
627 case Intrinsic::x86_sse2_storel_dq:
Gabor Greifad72e732010-06-30 09:15:28 +0000628 AccessTy = II->getArgOperand(0)->getType();
Dan Gohman21e77222009-03-09 21:01:17 +0000629 break;
630 }
631 }
Dan Gohman572645c2010-02-12 10:34:29 +0000632
633 // All pointers have the same requirements, so canonicalize them to an
634 // arbitrary pointer type to minimize variation.
Chris Lattnerdb125cf2011-07-18 04:54:35 +0000635 if (PointerType *PTy = dyn_cast<PointerType>(AccessTy))
Dan Gohman572645c2010-02-12 10:34:29 +0000636 AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1),
637 PTy->getAddressSpace());
638
Dan Gohmana537bf82009-05-18 16:45:28 +0000639 return AccessTy;
Dan Gohman21e77222009-03-09 21:01:17 +0000640}
641
Andrew Trick8a5d7922011-12-06 03:13:31 +0000642/// isExistingPhi - Return true if this AddRec is already a phi in its loop.
643static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
644 for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin();
645 PHINode *PN = dyn_cast<PHINode>(I); ++I) {
646 if (SE.isSCEVable(PN->getType()) &&
647 (SE.getEffectiveSCEVType(PN->getType()) ==
648 SE.getEffectiveSCEVType(AR->getType())) &&
649 SE.getSCEV(PN) == AR)
650 return true;
651 }
652 return false;
653}
654
Andrew Trick64925c52012-01-10 01:45:08 +0000655/// Check if expanding this expression is likely to incur significant cost. This
656/// is tricky because SCEV doesn't track which expressions are actually computed
657/// by the current IR.
658///
659/// We currently allow expansion of IV increments that involve adds,
660/// multiplication by constants, and AddRecs from existing phis.
661///
662/// TODO: Allow UDivExpr if we can find an existing IV increment that is an
663/// obvious multiple of the UDivExpr.
664static bool isHighCostExpansion(const SCEV *S,
665 SmallPtrSet<const SCEV*, 8> &Processed,
666 ScalarEvolution &SE) {
667 // Zero/One operand expressions
668 switch (S->getSCEVType()) {
669 case scUnknown:
670 case scConstant:
671 return false;
672 case scTruncate:
673 return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(),
674 Processed, SE);
675 case scZeroExtend:
676 return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(),
677 Processed, SE);
678 case scSignExtend:
679 return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(),
680 Processed, SE);
681 }
682
683 if (!Processed.insert(S))
684 return false;
685
686 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
687 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
688 I != E; ++I) {
689 if (isHighCostExpansion(*I, Processed, SE))
690 return true;
691 }
692 return false;
693 }
694
695 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
696 if (Mul->getNumOperands() == 2) {
697 // Multiplication by a constant is ok
698 if (isa<SCEVConstant>(Mul->getOperand(0)))
699 return isHighCostExpansion(Mul->getOperand(1), Processed, SE);
700
701 // If we have the value of one operand, check if an existing
702 // multiplication already generates this expression.
703 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) {
704 Value *UVal = U->getValue();
705 for (Value::use_iterator UI = UVal->use_begin(), UE = UVal->use_end();
706 UI != UE; ++UI) {
Andrew Trick05fecbe2012-03-26 20:28:37 +0000707 // If U is a constant, it may be used by a ConstantExpr.
708 Instruction *User = dyn_cast<Instruction>(*UI);
709 if (User && User->getOpcode() == Instruction::Mul
Andrew Trick64925c52012-01-10 01:45:08 +0000710 && SE.isSCEVable(User->getType())) {
711 return SE.getSCEV(User) == Mul;
712 }
713 }
714 }
715 }
716 }
717
718 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
719 if (isExistingPhi(AR, SE))
720 return false;
721 }
722
723 // Fow now, consider any other type of expression (div/mul/min/max) high cost.
724 return true;
725}
726
Dan Gohman572645c2010-02-12 10:34:29 +0000727/// DeleteTriviallyDeadInstructions - If any of the instructions is the
728/// specified set are trivially dead, delete them and see if this makes any of
729/// their operands subsequently dead.
730static bool
731DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) {
732 bool Changed = false;
733
734 while (!DeadInsts.empty()) {
Gabor Greiff097b592010-09-18 11:55:34 +0000735 Instruction *I = dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val());
Dan Gohman572645c2010-02-12 10:34:29 +0000736
737 if (I == 0 || !isInstructionTriviallyDead(I))
738 continue;
739
740 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
741 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
742 *OI = 0;
743 if (U->use_empty())
744 DeadInsts.push_back(U);
745 }
746
747 I->eraseFromParent();
748 Changed = true;
749 }
750
751 return Changed;
752}
753
Dan Gohman7979b722010-01-22 00:46:49 +0000754namespace {
Jim Grosbach56a1f802009-11-17 17:53:56 +0000755
Dan Gohman572645c2010-02-12 10:34:29 +0000756/// Cost - This class is used to measure and compare candidate formulae.
757class Cost {
758 /// TODO: Some of these could be merged. Also, a lexical ordering
759 /// isn't always optimal.
760 unsigned NumRegs;
761 unsigned AddRecCost;
762 unsigned NumIVMuls;
763 unsigned NumBaseAdds;
764 unsigned ImmCost;
765 unsigned SetupCost;
Nate Begeman16997482005-07-30 00:15:07 +0000766
Dan Gohman572645c2010-02-12 10:34:29 +0000767public:
768 Cost()
769 : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0),
770 SetupCost(0) {}
Jim Grosbach56a1f802009-11-17 17:53:56 +0000771
Dan Gohman572645c2010-02-12 10:34:29 +0000772 bool operator<(const Cost &Other) const;
Dan Gohman7979b722010-01-22 00:46:49 +0000773
Dan Gohman572645c2010-02-12 10:34:29 +0000774 void Loose();
Dan Gohman7979b722010-01-22 00:46:49 +0000775
Andrew Trick7d11bd82011-09-26 23:11:04 +0000776#ifndef NDEBUG
777 // Once any of the metrics loses, they must all remain losers.
778 bool isValid() {
779 return ((NumRegs | AddRecCost | NumIVMuls | NumBaseAdds
780 | ImmCost | SetupCost) != ~0u)
781 || ((NumRegs & AddRecCost & NumIVMuls & NumBaseAdds
782 & ImmCost & SetupCost) == ~0u);
783 }
784#endif
785
786 bool isLoser() {
787 assert(isValid() && "invalid cost");
788 return NumRegs == ~0u;
789 }
790
Dan Gohman572645c2010-02-12 10:34:29 +0000791 void RateFormula(const Formula &F,
792 SmallPtrSet<const SCEV *, 16> &Regs,
793 const DenseSet<const SCEV *> &VisitedRegs,
794 const Loop *L,
795 const SmallVectorImpl<int64_t> &Offsets,
Andrew Trick8a5d7922011-12-06 03:13:31 +0000796 ScalarEvolution &SE, DominatorTree &DT,
797 SmallPtrSet<const SCEV *, 16> *LoserRegs = 0);
Dan Gohman7979b722010-01-22 00:46:49 +0000798
Dan Gohman572645c2010-02-12 10:34:29 +0000799 void print(raw_ostream &OS) const;
800 void dump() const;
Dan Gohman7979b722010-01-22 00:46:49 +0000801
Dan Gohman572645c2010-02-12 10:34:29 +0000802private:
803 void RateRegister(const SCEV *Reg,
804 SmallPtrSet<const SCEV *, 16> &Regs,
805 const Loop *L,
806 ScalarEvolution &SE, DominatorTree &DT);
Dan Gohman9214b822010-02-13 02:06:02 +0000807 void RatePrimaryRegister(const SCEV *Reg,
808 SmallPtrSet<const SCEV *, 16> &Regs,
809 const Loop *L,
Andrew Trick8a5d7922011-12-06 03:13:31 +0000810 ScalarEvolution &SE, DominatorTree &DT,
811 SmallPtrSet<const SCEV *, 16> *LoserRegs);
Dan Gohman572645c2010-02-12 10:34:29 +0000812};
813
814}
815
816/// RateRegister - Tally up interesting quantities from the given register.
817void Cost::RateRegister(const SCEV *Reg,
818 SmallPtrSet<const SCEV *, 16> &Regs,
819 const Loop *L,
820 ScalarEvolution &SE, DominatorTree &DT) {
Dan Gohman9214b822010-02-13 02:06:02 +0000821 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) {
Andrew Trick0c01bc32011-09-29 01:33:38 +0000822 // If this is an addrec for another loop, don't second-guess its addrec phi
823 // nodes. LSR isn't currently smart enough to reason about more than one
Andrew Trickbd618f12012-03-22 22:42:45 +0000824 // loop at a time. LSR has already run on inner loops, will not run on outer
825 // loops, and cannot be expected to change sibling loops.
826 if (AR->getLoop() != L) {
827 // If the AddRec exists, consider it's register free and leave it alone.
Andrew Trick8a5d7922011-12-06 03:13:31 +0000828 if (isExistingPhi(AR, SE))
829 return;
830
Andrew Trickbd618f12012-03-22 22:42:45 +0000831 // Otherwise, do not consider this formula at all.
832 Loose();
833 return;
Dan Gohman572645c2010-02-12 10:34:29 +0000834 }
Andrew Trickbd618f12012-03-22 22:42:45 +0000835 AddRecCost += 1; /// TODO: This should be a function of the stride.
Dan Gohman572645c2010-02-12 10:34:29 +0000836
Dan Gohman9214b822010-02-13 02:06:02 +0000837 // Add the step value register, if it needs one.
838 // TODO: The non-affine case isn't precisely modeled here.
Andrew Trick25b689e2011-09-26 23:35:25 +0000839 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) {
840 if (!Regs.count(AR->getOperand(1))) {
Dan Gohman9214b822010-02-13 02:06:02 +0000841 RateRegister(AR->getOperand(1), Regs, L, SE, DT);
Andrew Trick25b689e2011-09-26 23:35:25 +0000842 if (isLoser())
843 return;
844 }
845 }
Dan Gohman572645c2010-02-12 10:34:29 +0000846 }
Dan Gohman9214b822010-02-13 02:06:02 +0000847 ++NumRegs;
848
849 // Rough heuristic; favor registers which don't require extra setup
850 // instructions in the preheader.
851 if (!isa<SCEVUnknown>(Reg) &&
852 !isa<SCEVConstant>(Reg) &&
853 !(isa<SCEVAddRecExpr>(Reg) &&
854 (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) ||
855 isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart()))))
856 ++SetupCost;
Dan Gohman23c3fde2010-10-07 23:41:58 +0000857
858 NumIVMuls += isa<SCEVMulExpr>(Reg) &&
Dan Gohman17ead4f2010-11-17 21:23:15 +0000859 SE.hasComputableLoopEvolution(Reg, L);
Dan Gohman9214b822010-02-13 02:06:02 +0000860}
861
862/// RatePrimaryRegister - Record this register in the set. If we haven't seen it
Andrew Trick8a5d7922011-12-06 03:13:31 +0000863/// before, rate it. Optional LoserRegs provides a way to declare any formula
864/// that refers to one of those regs an instant loser.
Dan Gohman9214b822010-02-13 02:06:02 +0000865void Cost::RatePrimaryRegister(const SCEV *Reg,
Dan Gohman7fca2292010-02-16 19:42:34 +0000866 SmallPtrSet<const SCEV *, 16> &Regs,
867 const Loop *L,
Andrew Trick8a5d7922011-12-06 03:13:31 +0000868 ScalarEvolution &SE, DominatorTree &DT,
869 SmallPtrSet<const SCEV *, 16> *LoserRegs) {
870 if (LoserRegs && LoserRegs->count(Reg)) {
871 Loose();
872 return;
873 }
874 if (Regs.insert(Reg)) {
Dan Gohman9214b822010-02-13 02:06:02 +0000875 RateRegister(Reg, Regs, L, SE, DT);
Andrew Trick8a5d7922011-12-06 03:13:31 +0000876 if (isLoser())
877 LoserRegs->insert(Reg);
878 }
Dan Gohman572645c2010-02-12 10:34:29 +0000879}
880
881void Cost::RateFormula(const Formula &F,
882 SmallPtrSet<const SCEV *, 16> &Regs,
883 const DenseSet<const SCEV *> &VisitedRegs,
884 const Loop *L,
885 const SmallVectorImpl<int64_t> &Offsets,
Andrew Trick8a5d7922011-12-06 03:13:31 +0000886 ScalarEvolution &SE, DominatorTree &DT,
887 SmallPtrSet<const SCEV *, 16> *LoserRegs) {
Dan Gohman572645c2010-02-12 10:34:29 +0000888 // Tally up the registers.
889 if (const SCEV *ScaledReg = F.ScaledReg) {
890 if (VisitedRegs.count(ScaledReg)) {
891 Loose();
892 return;
893 }
Andrew Trick8a5d7922011-12-06 03:13:31 +0000894 RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs);
Andrew Trick7d11bd82011-09-26 23:11:04 +0000895 if (isLoser())
896 return;
Dan Gohman572645c2010-02-12 10:34:29 +0000897 }
898 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
899 E = F.BaseRegs.end(); I != E; ++I) {
900 const SCEV *BaseReg = *I;
901 if (VisitedRegs.count(BaseReg)) {
902 Loose();
903 return;
904 }
Andrew Trick8a5d7922011-12-06 03:13:31 +0000905 RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs);
Andrew Trick7d11bd82011-09-26 23:11:04 +0000906 if (isLoser())
907 return;
Dan Gohman572645c2010-02-12 10:34:29 +0000908 }
909
Dan Gohmancca82142011-05-03 00:46:49 +0000910 // Determine how many (unfolded) adds we'll need inside the loop.
911 size_t NumBaseParts = F.BaseRegs.size() + (F.UnfoldedOffset != 0);
912 if (NumBaseParts > 1)
913 NumBaseAdds += NumBaseParts - 1;
Dan Gohman572645c2010-02-12 10:34:29 +0000914
915 // Tally up the non-zero immediates.
916 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
917 E = Offsets.end(); I != E; ++I) {
918 int64_t Offset = (uint64_t)*I + F.AM.BaseOffs;
919 if (F.AM.BaseGV)
920 ImmCost += 64; // Handle symbolic values conservatively.
921 // TODO: This should probably be the pointer size.
922 else if (Offset != 0)
923 ImmCost += APInt(64, Offset, true).getMinSignedBits();
924 }
Andrew Trick7d11bd82011-09-26 23:11:04 +0000925 assert(isValid() && "invalid cost");
Dan Gohman572645c2010-02-12 10:34:29 +0000926}
927
Chris Lattner7a2bdde2011-04-15 05:18:47 +0000928/// Loose - Set this cost to a losing value.
Dan Gohman572645c2010-02-12 10:34:29 +0000929void Cost::Loose() {
930 NumRegs = ~0u;
931 AddRecCost = ~0u;
932 NumIVMuls = ~0u;
933 NumBaseAdds = ~0u;
934 ImmCost = ~0u;
935 SetupCost = ~0u;
936}
937
938/// operator< - Choose the lower cost.
939bool Cost::operator<(const Cost &Other) const {
940 if (NumRegs != Other.NumRegs)
941 return NumRegs < Other.NumRegs;
942 if (AddRecCost != Other.AddRecCost)
943 return AddRecCost < Other.AddRecCost;
944 if (NumIVMuls != Other.NumIVMuls)
945 return NumIVMuls < Other.NumIVMuls;
946 if (NumBaseAdds != Other.NumBaseAdds)
947 return NumBaseAdds < Other.NumBaseAdds;
948 if (ImmCost != Other.ImmCost)
949 return ImmCost < Other.ImmCost;
950 if (SetupCost != Other.SetupCost)
951 return SetupCost < Other.SetupCost;
952 return false;
953}
954
955void Cost::print(raw_ostream &OS) const {
956 OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s");
957 if (AddRecCost != 0)
958 OS << ", with addrec cost " << AddRecCost;
959 if (NumIVMuls != 0)
960 OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s");
961 if (NumBaseAdds != 0)
962 OS << ", plus " << NumBaseAdds << " base add"
963 << (NumBaseAdds == 1 ? "" : "s");
964 if (ImmCost != 0)
965 OS << ", plus " << ImmCost << " imm cost";
966 if (SetupCost != 0)
967 OS << ", plus " << SetupCost << " setup cost";
968}
969
970void Cost::dump() const {
971 print(errs()); errs() << '\n';
972}
973
974namespace {
975
976/// LSRFixup - An operand value in an instruction which is to be replaced
977/// with some equivalent, possibly strength-reduced, replacement.
978struct LSRFixup {
979 /// UserInst - The instruction which will be updated.
980 Instruction *UserInst;
981
982 /// OperandValToReplace - The operand of the instruction which will
983 /// be replaced. The operand may be used more than once; every instance
984 /// will be replaced.
985 Value *OperandValToReplace;
986
Dan Gohman448db1c2010-04-07 22:27:08 +0000987 /// PostIncLoops - If this user is to use the post-incremented value of an
Dan Gohman572645c2010-02-12 10:34:29 +0000988 /// induction variable, this variable is non-null and holds the loop
989 /// associated with the induction variable.
Dan Gohman448db1c2010-04-07 22:27:08 +0000990 PostIncLoopSet PostIncLoops;
Dan Gohman572645c2010-02-12 10:34:29 +0000991
992 /// LUIdx - The index of the LSRUse describing the expression which
993 /// this fixup needs, minus an offset (below).
994 size_t LUIdx;
995
996 /// Offset - A constant offset to be added to the LSRUse expression.
997 /// This allows multiple fixups to share the same LSRUse with different
998 /// offsets, for example in an unrolled loop.
999 int64_t Offset;
1000
Dan Gohman448db1c2010-04-07 22:27:08 +00001001 bool isUseFullyOutsideLoop(const Loop *L) const;
1002
Dan Gohman572645c2010-02-12 10:34:29 +00001003 LSRFixup();
1004
1005 void print(raw_ostream &OS) const;
1006 void dump() const;
1007};
1008
1009}
1010
1011LSRFixup::LSRFixup()
Dan Gohmanea507f52010-05-20 19:44:23 +00001012 : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {}
Dan Gohman572645c2010-02-12 10:34:29 +00001013
Dan Gohman448db1c2010-04-07 22:27:08 +00001014/// isUseFullyOutsideLoop - Test whether this fixup always uses its
1015/// value outside of the given loop.
1016bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const {
1017 // PHI nodes use their value in their incoming blocks.
1018 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) {
1019 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1020 if (PN->getIncomingValue(i) == OperandValToReplace &&
1021 L->contains(PN->getIncomingBlock(i)))
1022 return false;
1023 return true;
1024 }
1025
1026 return !L->contains(UserInst);
1027}
1028
Dan Gohman572645c2010-02-12 10:34:29 +00001029void LSRFixup::print(raw_ostream &OS) const {
1030 OS << "UserInst=";
1031 // Store is common and interesting enough to be worth special-casing.
1032 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) {
1033 OS << "store ";
1034 WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false);
1035 } else if (UserInst->getType()->isVoidTy())
1036 OS << UserInst->getOpcodeName();
1037 else
1038 WriteAsOperand(OS, UserInst, /*PrintType=*/false);
1039
1040 OS << ", OperandValToReplace=";
1041 WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false);
1042
Dan Gohman448db1c2010-04-07 22:27:08 +00001043 for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(),
1044 E = PostIncLoops.end(); I != E; ++I) {
Dan Gohman572645c2010-02-12 10:34:29 +00001045 OS << ", PostIncLoop=";
Dan Gohman448db1c2010-04-07 22:27:08 +00001046 WriteAsOperand(OS, (*I)->getHeader(), /*PrintType=*/false);
Dan Gohman572645c2010-02-12 10:34:29 +00001047 }
1048
1049 if (LUIdx != ~size_t(0))
1050 OS << ", LUIdx=" << LUIdx;
1051
1052 if (Offset != 0)
1053 OS << ", Offset=" << Offset;
1054}
1055
1056void LSRFixup::dump() const {
1057 print(errs()); errs() << '\n';
1058}
1059
1060namespace {
1061
1062/// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding
1063/// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*.
1064struct UniquifierDenseMapInfo {
1065 static SmallVector<const SCEV *, 2> getEmptyKey() {
1066 SmallVector<const SCEV *, 2> V;
1067 V.push_back(reinterpret_cast<const SCEV *>(-1));
1068 return V;
1069 }
1070
1071 static SmallVector<const SCEV *, 2> getTombstoneKey() {
1072 SmallVector<const SCEV *, 2> V;
1073 V.push_back(reinterpret_cast<const SCEV *>(-2));
1074 return V;
1075 }
1076
1077 static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) {
1078 unsigned Result = 0;
1079 for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(),
1080 E = V.end(); I != E; ++I)
1081 Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I);
1082 return Result;
1083 }
1084
1085 static bool isEqual(const SmallVector<const SCEV *, 2> &LHS,
1086 const SmallVector<const SCEV *, 2> &RHS) {
1087 return LHS == RHS;
1088 }
1089};
1090
1091/// LSRUse - This class holds the state that LSR keeps for each use in
1092/// IVUsers, as well as uses invented by LSR itself. It includes information
1093/// about what kinds of things can be folded into the user, information about
1094/// the user itself, and information about how the use may be satisfied.
1095/// TODO: Represent multiple users of the same expression in common?
1096class LSRUse {
1097 DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier;
1098
1099public:
1100 /// KindType - An enum for a kind of use, indicating what types of
1101 /// scaled and immediate operands it might support.
1102 enum KindType {
1103 Basic, ///< A normal use, with no folding.
1104 Special, ///< A special case of basic, allowing -1 scales.
1105 Address, ///< An address use; folding according to TargetLowering
1106 ICmpZero ///< An equality icmp with both operands folded into one.
1107 // TODO: Add a generic icmp too?
Dan Gohman7979b722010-01-22 00:46:49 +00001108 };
Dan Gohman572645c2010-02-12 10:34:29 +00001109
1110 KindType Kind;
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001111 Type *AccessTy;
Dan Gohman572645c2010-02-12 10:34:29 +00001112
1113 SmallVector<int64_t, 8> Offsets;
1114 int64_t MinOffset;
1115 int64_t MaxOffset;
1116
1117 /// AllFixupsOutsideLoop - This records whether all of the fixups using this
1118 /// LSRUse are outside of the loop, in which case some special-case heuristics
1119 /// may be used.
1120 bool AllFixupsOutsideLoop;
1121
Dan Gohmana9db1292010-07-15 20:24:58 +00001122 /// WidestFixupType - This records the widest use type for any fixup using
1123 /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different
1124 /// max fixup widths to be equivalent, because the narrower one may be relying
1125 /// on the implicit truncation to truncate away bogus bits.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001126 Type *WidestFixupType;
Dan Gohmana9db1292010-07-15 20:24:58 +00001127
Dan Gohman572645c2010-02-12 10:34:29 +00001128 /// Formulae - A list of ways to build a value that can satisfy this user.
1129 /// After the list is populated, one of these is selected heuristically and
1130 /// used to formulate a replacement for OperandValToReplace in UserInst.
1131 SmallVector<Formula, 12> Formulae;
1132
1133 /// Regs - The set of register candidates used by all formulae in this LSRUse.
1134 SmallPtrSet<const SCEV *, 4> Regs;
1135
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001136 LSRUse(KindType K, Type *T) : Kind(K), AccessTy(T),
Dan Gohman572645c2010-02-12 10:34:29 +00001137 MinOffset(INT64_MAX),
1138 MaxOffset(INT64_MIN),
Dan Gohmana9db1292010-07-15 20:24:58 +00001139 AllFixupsOutsideLoop(true),
1140 WidestFixupType(0) {}
Dan Gohman572645c2010-02-12 10:34:29 +00001141
Dan Gohmana2086b32010-05-19 23:43:12 +00001142 bool HasFormulaWithSameRegs(const Formula &F) const;
Dan Gohman454d26d2010-02-22 04:11:59 +00001143 bool InsertFormula(const Formula &F);
Dan Gohmand69d6282010-05-18 22:39:15 +00001144 void DeleteFormula(Formula &F);
Dan Gohmanb2df4332010-05-18 23:42:37 +00001145 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses);
Dan Gohman572645c2010-02-12 10:34:29 +00001146
Dan Gohman572645c2010-02-12 10:34:29 +00001147 void print(raw_ostream &OS) const;
1148 void dump() const;
1149};
1150
Dan Gohmanb6211712010-06-19 21:21:39 +00001151}
1152
Dan Gohmana2086b32010-05-19 23:43:12 +00001153/// HasFormula - Test whether this use as a formula which has the same
1154/// registers as the given formula.
1155bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
1156 SmallVector<const SCEV *, 2> Key = F.BaseRegs;
1157 if (F.ScaledReg) Key.push_back(F.ScaledReg);
1158 // Unstable sort by host order ok, because this is only used for uniquifying.
1159 std::sort(Key.begin(), Key.end());
1160 return Uniquifier.count(Key);
1161}
1162
Dan Gohman572645c2010-02-12 10:34:29 +00001163/// InsertFormula - If the given formula has not yet been inserted, add it to
1164/// the list, and return true. Return false otherwise.
Dan Gohman454d26d2010-02-22 04:11:59 +00001165bool LSRUse::InsertFormula(const Formula &F) {
Dan Gohman572645c2010-02-12 10:34:29 +00001166 SmallVector<const SCEV *, 2> Key = F.BaseRegs;
1167 if (F.ScaledReg) Key.push_back(F.ScaledReg);
1168 // Unstable sort by host order ok, because this is only used for uniquifying.
1169 std::sort(Key.begin(), Key.end());
1170
1171 if (!Uniquifier.insert(Key).second)
1172 return false;
1173
1174 // Using a register to hold the value of 0 is not profitable.
1175 assert((!F.ScaledReg || !F.ScaledReg->isZero()) &&
1176 "Zero allocated in a scaled register!");
1177#ifndef NDEBUG
1178 for (SmallVectorImpl<const SCEV *>::const_iterator I =
1179 F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I)
1180 assert(!(*I)->isZero() && "Zero allocated in a base register!");
1181#endif
1182
1183 // Add the formula to the list.
1184 Formulae.push_back(F);
1185
1186 // Record registers now being used by this use.
Dan Gohman572645c2010-02-12 10:34:29 +00001187 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1188
1189 return true;
Dan Gohman7979b722010-01-22 00:46:49 +00001190}
1191
Dan Gohmand69d6282010-05-18 22:39:15 +00001192/// DeleteFormula - Remove the given formula from this use's list.
1193void LSRUse::DeleteFormula(Formula &F) {
Dan Gohman5ce6d052010-05-20 15:17:54 +00001194 if (&F != &Formulae.back())
1195 std::swap(F, Formulae.back());
Dan Gohmand69d6282010-05-18 22:39:15 +00001196 Formulae.pop_back();
1197}
1198
Dan Gohmanb2df4332010-05-18 23:42:37 +00001199/// RecomputeRegs - Recompute the Regs field, and update RegUses.
1200void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) {
1201 // Now that we've filtered out some formulae, recompute the Regs set.
1202 SmallPtrSet<const SCEV *, 4> OldRegs = Regs;
1203 Regs.clear();
Dan Gohman402d4352010-05-20 20:33:18 +00001204 for (SmallVectorImpl<Formula>::const_iterator I = Formulae.begin(),
1205 E = Formulae.end(); I != E; ++I) {
1206 const Formula &F = *I;
Dan Gohmanb2df4332010-05-18 23:42:37 +00001207 if (F.ScaledReg) Regs.insert(F.ScaledReg);
1208 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1209 }
1210
1211 // Update the RegTracker.
1212 for (SmallPtrSet<const SCEV *, 4>::iterator I = OldRegs.begin(),
1213 E = OldRegs.end(); I != E; ++I)
1214 if (!Regs.count(*I))
1215 RegUses.DropRegister(*I, LUIdx);
1216}
1217
Dan Gohman572645c2010-02-12 10:34:29 +00001218void LSRUse::print(raw_ostream &OS) const {
1219 OS << "LSR Use: Kind=";
1220 switch (Kind) {
1221 case Basic: OS << "Basic"; break;
1222 case Special: OS << "Special"; break;
1223 case ICmpZero: OS << "ICmpZero"; break;
1224 case Address:
1225 OS << "Address of ";
Duncan Sands1df98592010-02-16 11:11:14 +00001226 if (AccessTy->isPointerTy())
Dan Gohman572645c2010-02-12 10:34:29 +00001227 OS << "pointer"; // the full pointer type could be really verbose
1228 else
1229 OS << *AccessTy;
Evan Chengcdf43b12007-10-25 09:11:16 +00001230 }
1231
Dan Gohman572645c2010-02-12 10:34:29 +00001232 OS << ", Offsets={";
1233 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
1234 E = Offsets.end(); I != E; ++I) {
1235 OS << *I;
Oscar Fuentesee56c422010-08-02 06:00:15 +00001236 if (llvm::next(I) != E)
Dan Gohman572645c2010-02-12 10:34:29 +00001237 OS << ',';
Dan Gohman7979b722010-01-22 00:46:49 +00001238 }
Dan Gohman572645c2010-02-12 10:34:29 +00001239 OS << '}';
Dan Gohman7979b722010-01-22 00:46:49 +00001240
Dan Gohman572645c2010-02-12 10:34:29 +00001241 if (AllFixupsOutsideLoop)
1242 OS << ", all-fixups-outside-loop";
Dan Gohmana9db1292010-07-15 20:24:58 +00001243
1244 if (WidestFixupType)
1245 OS << ", widest fixup type: " << *WidestFixupType;
Dan Gohman7979b722010-01-22 00:46:49 +00001246}
1247
Dan Gohman572645c2010-02-12 10:34:29 +00001248void LSRUse::dump() const {
1249 print(errs()); errs() << '\n';
1250}
Dan Gohman7979b722010-01-22 00:46:49 +00001251
Dan Gohman572645c2010-02-12 10:34:29 +00001252/// isLegalUse - Test whether the use described by AM is "legal", meaning it can
1253/// be completely folded into the user instruction at isel time. This includes
1254/// address-mode folding and special icmp tricks.
1255static bool isLegalUse(const TargetLowering::AddrMode &AM,
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001256 LSRUse::KindType Kind, Type *AccessTy,
Dan Gohman572645c2010-02-12 10:34:29 +00001257 const TargetLowering *TLI) {
1258 switch (Kind) {
1259 case LSRUse::Address:
1260 // If we have low-level target information, ask the target if it can
1261 // completely fold this address.
1262 if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy);
1263
1264 // Otherwise, just guess that reg+reg addressing is legal.
1265 return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1;
1266
1267 case LSRUse::ICmpZero:
1268 // There's not even a target hook for querying whether it would be legal to
1269 // fold a GV into an ICmp.
1270 if (AM.BaseGV)
1271 return false;
1272
1273 // ICmp only has two operands; don't allow more than two non-trivial parts.
1274 if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0)
1275 return false;
1276
1277 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by
1278 // putting the scaled register in the other operand of the icmp.
1279 if (AM.Scale != 0 && AM.Scale != -1)
1280 return false;
1281
1282 // If we have low-level target information, ask the target if it can fold an
1283 // integer immediate on an icmp.
1284 if (AM.BaseOffs != 0) {
Eli Friedmandae36ba2011-10-13 23:48:33 +00001285 if (TLI) return TLI->isLegalICmpImmediate(-(uint64_t)AM.BaseOffs);
Dan Gohman572645c2010-02-12 10:34:29 +00001286 return false;
Dan Gohman7979b722010-01-22 00:46:49 +00001287 }
Dan Gohman572645c2010-02-12 10:34:29 +00001288
1289 return true;
1290
1291 case LSRUse::Basic:
1292 // Only handle single-register values.
1293 return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0;
1294
1295 case LSRUse::Special:
1296 // Only handle -1 scales, or no scale.
1297 return AM.Scale == 0 || AM.Scale == -1;
Dan Gohman7979b722010-01-22 00:46:49 +00001298 }
1299
David Blaikie4d6ccb52012-01-20 21:51:11 +00001300 llvm_unreachable("Invalid LSRUse Kind!");
Dan Gohman7979b722010-01-22 00:46:49 +00001301}
1302
Dan Gohman572645c2010-02-12 10:34:29 +00001303static bool isLegalUse(TargetLowering::AddrMode AM,
1304 int64_t MinOffset, int64_t MaxOffset,
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001305 LSRUse::KindType Kind, Type *AccessTy,
Dan Gohman572645c2010-02-12 10:34:29 +00001306 const TargetLowering *TLI) {
1307 // Check for overflow.
1308 if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) !=
1309 (MinOffset > 0))
1310 return false;
1311 AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset;
1312 if (isLegalUse(AM, Kind, AccessTy, TLI)) {
1313 AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset;
1314 // Check for overflow.
1315 if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) !=
1316 (MaxOffset > 0))
1317 return false;
1318 AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset;
1319 return isLegalUse(AM, Kind, AccessTy, TLI);
Dan Gohman7979b722010-01-22 00:46:49 +00001320 }
Dan Gohman572645c2010-02-12 10:34:29 +00001321 return false;
Dan Gohman7979b722010-01-22 00:46:49 +00001322}
1323
Dan Gohman572645c2010-02-12 10:34:29 +00001324static bool isAlwaysFoldable(int64_t BaseOffs,
1325 GlobalValue *BaseGV,
1326 bool HasBaseReg,
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001327 LSRUse::KindType Kind, Type *AccessTy,
Dan Gohman454d26d2010-02-22 04:11:59 +00001328 const TargetLowering *TLI) {
Dan Gohman572645c2010-02-12 10:34:29 +00001329 // Fast-path: zero is always foldable.
1330 if (BaseOffs == 0 && !BaseGV) return true;
Dan Gohman7979b722010-01-22 00:46:49 +00001331
Dan Gohman572645c2010-02-12 10:34:29 +00001332 // Conservatively, create an address with an immediate and a
1333 // base and a scale.
1334 TargetLowering::AddrMode AM;
1335 AM.BaseOffs = BaseOffs;
1336 AM.BaseGV = BaseGV;
1337 AM.HasBaseReg = HasBaseReg;
1338 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
Dan Gohman7979b722010-01-22 00:46:49 +00001339
Dan Gohmana2086b32010-05-19 23:43:12 +00001340 // Canonicalize a scale of 1 to a base register if the formula doesn't
1341 // already have a base register.
1342 if (!AM.HasBaseReg && AM.Scale == 1) {
1343 AM.Scale = 0;
1344 AM.HasBaseReg = true;
1345 }
1346
Dan Gohman572645c2010-02-12 10:34:29 +00001347 return isLegalUse(AM, Kind, AccessTy, TLI);
Dan Gohman7979b722010-01-22 00:46:49 +00001348}
1349
Dan Gohman572645c2010-02-12 10:34:29 +00001350static bool isAlwaysFoldable(const SCEV *S,
1351 int64_t MinOffset, int64_t MaxOffset,
1352 bool HasBaseReg,
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001353 LSRUse::KindType Kind, Type *AccessTy,
Dan Gohman572645c2010-02-12 10:34:29 +00001354 const TargetLowering *TLI,
1355 ScalarEvolution &SE) {
1356 // Fast-path: zero is always foldable.
1357 if (S->isZero()) return true;
1358
1359 // Conservatively, create an address with an immediate and a
1360 // base and a scale.
1361 int64_t BaseOffs = ExtractImmediate(S, SE);
1362 GlobalValue *BaseGV = ExtractSymbol(S, SE);
1363
1364 // If there's anything else involved, it's not foldable.
1365 if (!S->isZero()) return false;
1366
1367 // Fast-path: zero is always foldable.
1368 if (BaseOffs == 0 && !BaseGV) return true;
1369
1370 // Conservatively, create an address with an immediate and a
1371 // base and a scale.
1372 TargetLowering::AddrMode AM;
1373 AM.BaseOffs = BaseOffs;
1374 AM.BaseGV = BaseGV;
1375 AM.HasBaseReg = HasBaseReg;
1376 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
1377
1378 return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI);
Dan Gohman7979b722010-01-22 00:46:49 +00001379}
1380
Dan Gohmanb6211712010-06-19 21:21:39 +00001381namespace {
1382
Dan Gohman1e3121c2010-06-19 21:29:59 +00001383/// UseMapDenseMapInfo - A DenseMapInfo implementation for holding
1384/// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind.
1385struct UseMapDenseMapInfo {
1386 static std::pair<const SCEV *, LSRUse::KindType> getEmptyKey() {
1387 return std::make_pair(reinterpret_cast<const SCEV *>(-1), LSRUse::Basic);
1388 }
1389
1390 static std::pair<const SCEV *, LSRUse::KindType> getTombstoneKey() {
1391 return std::make_pair(reinterpret_cast<const SCEV *>(-2), LSRUse::Basic);
1392 }
1393
1394 static unsigned
1395 getHashValue(const std::pair<const SCEV *, LSRUse::KindType> &V) {
1396 unsigned Result = DenseMapInfo<const SCEV *>::getHashValue(V.first);
1397 Result ^= DenseMapInfo<unsigned>::getHashValue(unsigned(V.second));
1398 return Result;
1399 }
1400
1401 static bool isEqual(const std::pair<const SCEV *, LSRUse::KindType> &LHS,
1402 const std::pair<const SCEV *, LSRUse::KindType> &RHS) {
1403 return LHS == RHS;
1404 }
1405};
1406
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00001407/// IVInc - An individual increment in a Chain of IV increments.
1408/// Relate an IV user to an expression that computes the IV it uses from the IV
1409/// used by the previous link in the Chain.
1410///
1411/// For the head of a chain, IncExpr holds the absolute SCEV expression for the
1412/// original IVOperand. The head of the chain's IVOperand is only valid during
1413/// chain collection, before LSR replaces IV users. During chain generation,
1414/// IncExpr can be used to find the new IVOperand that computes the same
1415/// expression.
1416struct IVInc {
1417 Instruction *UserInst;
1418 Value* IVOperand;
1419 const SCEV *IncExpr;
1420
1421 IVInc(Instruction *U, Value *O, const SCEV *E):
1422 UserInst(U), IVOperand(O), IncExpr(E) {}
1423};
1424
1425// IVChain - The list of IV increments in program order.
1426// We typically add the head of a chain without finding subsequent links.
1427typedef SmallVector<IVInc,1> IVChain;
1428
1429/// ChainUsers - Helper for CollectChains to track multiple IV increment uses.
1430/// Distinguish between FarUsers that definitely cross IV increments and
1431/// NearUsers that may be used between IV increments.
1432struct ChainUsers {
1433 SmallPtrSet<Instruction*, 4> FarUsers;
1434 SmallPtrSet<Instruction*, 4> NearUsers;
1435};
1436
Dan Gohman572645c2010-02-12 10:34:29 +00001437/// LSRInstance - This class holds state for the main loop strength reduction
1438/// logic.
1439class LSRInstance {
1440 IVUsers &IU;
1441 ScalarEvolution &SE;
1442 DominatorTree &DT;
Dan Gohmane5f76872010-04-09 22:07:05 +00001443 LoopInfo &LI;
Dan Gohman572645c2010-02-12 10:34:29 +00001444 const TargetLowering *const TLI;
1445 Loop *const L;
1446 bool Changed;
1447
1448 /// IVIncInsertPos - This is the insert position that the current loop's
1449 /// induction variable increment should be placed. In simple loops, this is
1450 /// the latch block's terminator. But in more complicated cases, this is a
1451 /// position which will dominate all the in-loop post-increment users.
1452 Instruction *IVIncInsertPos;
1453
1454 /// Factors - Interesting factors between use strides.
1455 SmallSetVector<int64_t, 8> Factors;
1456
1457 /// Types - Interesting use types, to facilitate truncation reuse.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001458 SmallSetVector<Type *, 4> Types;
Dan Gohman572645c2010-02-12 10:34:29 +00001459
1460 /// Fixups - The list of operands which are to be replaced.
1461 SmallVector<LSRFixup, 16> Fixups;
1462
1463 /// Uses - The list of interesting uses.
1464 SmallVector<LSRUse, 16> Uses;
1465
1466 /// RegUses - Track which uses use which register candidates.
1467 RegUseTracker RegUses;
1468
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00001469 // Limit the number of chains to avoid quadratic behavior. We don't expect to
1470 // have more than a few IV increment chains in a loop. Missing a Chain falls
1471 // back to normal LSR behavior for those uses.
1472 static const unsigned MaxChains = 8;
1473
1474 /// IVChainVec - IV users can form a chain of IV increments.
1475 SmallVector<IVChain, MaxChains> IVChainVec;
1476
Andrew Trick22d20c22012-01-09 21:18:52 +00001477 /// IVIncSet - IV users that belong to profitable IVChains.
1478 SmallPtrSet<Use*, MaxChains> IVIncSet;
1479
Dan Gohman572645c2010-02-12 10:34:29 +00001480 void OptimizeShadowIV();
1481 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse);
1482 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse);
Dan Gohmanc6519f92010-05-20 20:05:31 +00001483 void OptimizeLoopTermCond();
Dan Gohman572645c2010-02-12 10:34:29 +00001484
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00001485 void ChainInstruction(Instruction *UserInst, Instruction *IVOper,
1486 SmallVectorImpl<ChainUsers> &ChainUsersVec);
Andrew Trick22d20c22012-01-09 21:18:52 +00001487 void FinalizeChain(IVChain &Chain);
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00001488 void CollectChains();
Andrew Trick22d20c22012-01-09 21:18:52 +00001489 void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
1490 SmallVectorImpl<WeakVH> &DeadInsts);
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00001491
Dan Gohman572645c2010-02-12 10:34:29 +00001492 void CollectInterestingTypesAndFactors();
1493 void CollectFixupsAndInitialFormulae();
1494
1495 LSRFixup &getNewFixup() {
1496 Fixups.push_back(LSRFixup());
1497 return Fixups.back();
1498 }
1499
1500 // Support for sharing of LSRUses between LSRFixups.
Dan Gohman1e3121c2010-06-19 21:29:59 +00001501 typedef DenseMap<std::pair<const SCEV *, LSRUse::KindType>,
1502 size_t,
1503 UseMapDenseMapInfo> UseMapTy;
Dan Gohman572645c2010-02-12 10:34:29 +00001504 UseMapTy UseMap;
1505
Dan Gohman191bd642010-09-01 01:45:53 +00001506 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001507 LSRUse::KindType Kind, Type *AccessTy);
Dan Gohman572645c2010-02-12 10:34:29 +00001508
1509 std::pair<size_t, int64_t> getUse(const SCEV *&Expr,
1510 LSRUse::KindType Kind,
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001511 Type *AccessTy);
Dan Gohman572645c2010-02-12 10:34:29 +00001512
Dan Gohmanc6897702010-10-07 23:33:43 +00001513 void DeleteUse(LSRUse &LU, size_t LUIdx);
Dan Gohman5ce6d052010-05-20 15:17:54 +00001514
Dan Gohman191bd642010-09-01 01:45:53 +00001515 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU);
Dan Gohmana2086b32010-05-19 23:43:12 +00001516
Dan Gohman454d26d2010-02-22 04:11:59 +00001517 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
Dan Gohman572645c2010-02-12 10:34:29 +00001518 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
1519 void CountRegisters(const Formula &F, size_t LUIdx);
1520 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F);
1521
1522 void CollectLoopInvariantFixupsAndFormulae();
1523
1524 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base,
1525 unsigned Depth = 0);
1526 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base);
1527 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
1528 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
1529 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base);
1530 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base);
1531 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base);
1532 void GenerateCrossUseConstantOffsets();
1533 void GenerateAllReuseFormulae();
1534
1535 void FilterOutUndesirableDedicatedRegisters();
Dan Gohmand079c302010-05-18 22:51:59 +00001536
1537 size_t EstimateSearchSpaceComplexity() const;
Dan Gohman4aa5c2e2010-08-29 16:09:42 +00001538 void NarrowSearchSpaceByDetectingSupersets();
1539 void NarrowSearchSpaceByCollapsingUnrolledCode();
Dan Gohman4f7e18d2010-08-29 16:39:22 +00001540 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
Dan Gohman4aa5c2e2010-08-29 16:09:42 +00001541 void NarrowSearchSpaceByPickingWinnerRegs();
Dan Gohman572645c2010-02-12 10:34:29 +00001542 void NarrowSearchSpaceUsingHeuristics();
1543
1544 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
1545 Cost &SolutionCost,
1546 SmallVectorImpl<const Formula *> &Workspace,
1547 const Cost &CurCost,
1548 const SmallPtrSet<const SCEV *, 16> &CurRegs,
1549 DenseSet<const SCEV *> &VisitedRegs) const;
1550 void Solve(SmallVectorImpl<const Formula *> &Solution) const;
1551
Dan Gohmane5f76872010-04-09 22:07:05 +00001552 BasicBlock::iterator
1553 HoistInsertPosition(BasicBlock::iterator IP,
1554 const SmallVectorImpl<Instruction *> &Inputs) const;
Andrew Trickb5c26ef2012-01-20 07:41:13 +00001555 BasicBlock::iterator
1556 AdjustInsertPositionForExpand(BasicBlock::iterator IP,
1557 const LSRFixup &LF,
1558 const LSRUse &LU,
1559 SCEVExpander &Rewriter) const;
Dan Gohmand96eae82010-04-09 02:00:38 +00001560
Dan Gohman572645c2010-02-12 10:34:29 +00001561 Value *Expand(const LSRFixup &LF,
1562 const Formula &F,
Dan Gohman454d26d2010-02-22 04:11:59 +00001563 BasicBlock::iterator IP,
Dan Gohman572645c2010-02-12 10:34:29 +00001564 SCEVExpander &Rewriter,
Dan Gohman454d26d2010-02-22 04:11:59 +00001565 SmallVectorImpl<WeakVH> &DeadInsts) const;
Dan Gohman3a02cbc2010-02-16 20:25:07 +00001566 void RewriteForPHI(PHINode *PN, const LSRFixup &LF,
1567 const Formula &F,
Dan Gohman3a02cbc2010-02-16 20:25:07 +00001568 SCEVExpander &Rewriter,
1569 SmallVectorImpl<WeakVH> &DeadInsts,
Dan Gohman3a02cbc2010-02-16 20:25:07 +00001570 Pass *P) const;
Dan Gohman572645c2010-02-12 10:34:29 +00001571 void Rewrite(const LSRFixup &LF,
1572 const Formula &F,
Dan Gohman572645c2010-02-12 10:34:29 +00001573 SCEVExpander &Rewriter,
1574 SmallVectorImpl<WeakVH> &DeadInsts,
Dan Gohman572645c2010-02-12 10:34:29 +00001575 Pass *P) const;
1576 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
1577 Pass *P);
1578
Andrew Trickd56ef8d2011-12-13 00:55:33 +00001579public:
Dan Gohman572645c2010-02-12 10:34:29 +00001580 LSRInstance(const TargetLowering *tli, Loop *l, Pass *P);
1581
1582 bool getChanged() const { return Changed; }
1583
1584 void print_factors_and_types(raw_ostream &OS) const;
1585 void print_fixups(raw_ostream &OS) const;
1586 void print_uses(raw_ostream &OS) const;
1587 void print(raw_ostream &OS) const;
1588 void dump() const;
1589};
1590
1591}
1592
1593/// OptimizeShadowIV - If IV is used in a int-to-float cast
Dan Gohman3f46a3a2010-03-01 17:49:51 +00001594/// inside the loop then try to eliminate the cast operation.
Dan Gohman572645c2010-02-12 10:34:29 +00001595void LSRInstance::OptimizeShadowIV() {
1596 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1597 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
1598 return;
1599
1600 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end();
1601 UI != E; /* empty */) {
1602 IVUsers::const_iterator CandidateUI = UI;
1603 ++UI;
1604 Instruction *ShadowUse = CandidateUI->getUser();
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001605 Type *DestTy = NULL;
Andrew Trickc2c988e2011-07-21 01:05:01 +00001606 bool IsSigned = false;
Dan Gohman572645c2010-02-12 10:34:29 +00001607
1608 /* If shadow use is a int->float cast then insert a second IV
1609 to eliminate this cast.
1610
1611 for (unsigned i = 0; i < n; ++i)
1612 foo((double)i);
1613
1614 is transformed into
1615
1616 double d = 0.0;
1617 for (unsigned i = 0; i < n; ++i, ++d)
1618 foo(d);
1619 */
Andrew Trickc2c988e2011-07-21 01:05:01 +00001620 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) {
1621 IsSigned = false;
Dan Gohman572645c2010-02-12 10:34:29 +00001622 DestTy = UCast->getDestTy();
Andrew Trickc2c988e2011-07-21 01:05:01 +00001623 }
1624 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) {
1625 IsSigned = true;
Dan Gohman572645c2010-02-12 10:34:29 +00001626 DestTy = SCast->getDestTy();
Andrew Trickc2c988e2011-07-21 01:05:01 +00001627 }
Dan Gohman572645c2010-02-12 10:34:29 +00001628 if (!DestTy) continue;
1629
1630 if (TLI) {
1631 // If target does not support DestTy natively then do not apply
1632 // this transformation.
1633 EVT DVT = TLI->getValueType(DestTy);
1634 if (!TLI->isTypeLegal(DVT)) continue;
1635 }
1636
1637 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
1638 if (!PH) continue;
1639 if (PH->getNumIncomingValues() != 2) continue;
1640
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001641 Type *SrcTy = PH->getType();
Dan Gohman572645c2010-02-12 10:34:29 +00001642 int Mantissa = DestTy->getFPMantissaWidth();
1643 if (Mantissa == -1) continue;
1644 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa)
1645 continue;
1646
1647 unsigned Entry, Latch;
1648 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
1649 Entry = 0;
1650 Latch = 1;
Dan Gohman7979b722010-01-22 00:46:49 +00001651 } else {
Dan Gohman572645c2010-02-12 10:34:29 +00001652 Entry = 1;
1653 Latch = 0;
Dan Gohman7979b722010-01-22 00:46:49 +00001654 }
Dan Gohman7979b722010-01-22 00:46:49 +00001655
Dan Gohman572645c2010-02-12 10:34:29 +00001656 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
1657 if (!Init) continue;
Andrew Trickc2c988e2011-07-21 01:05:01 +00001658 Constant *NewInit = ConstantFP::get(DestTy, IsSigned ?
Andrew Trickc205a092011-07-21 01:45:54 +00001659 (double)Init->getSExtValue() :
1660 (double)Init->getZExtValue());
Dan Gohman7979b722010-01-22 00:46:49 +00001661
Dan Gohman572645c2010-02-12 10:34:29 +00001662 BinaryOperator *Incr =
1663 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
1664 if (!Incr) continue;
1665 if (Incr->getOpcode() != Instruction::Add
1666 && Incr->getOpcode() != Instruction::Sub)
Dan Gohman7979b722010-01-22 00:46:49 +00001667 continue;
Dan Gohman7979b722010-01-22 00:46:49 +00001668
Dan Gohman572645c2010-02-12 10:34:29 +00001669 /* Initialize new IV, double d = 0.0 in above example. */
1670 ConstantInt *C = NULL;
1671 if (Incr->getOperand(0) == PH)
1672 C = dyn_cast<ConstantInt>(Incr->getOperand(1));
1673 else if (Incr->getOperand(1) == PH)
1674 C = dyn_cast<ConstantInt>(Incr->getOperand(0));
Dan Gohman7979b722010-01-22 00:46:49 +00001675 else
Dan Gohman7979b722010-01-22 00:46:49 +00001676 continue;
1677
Dan Gohman572645c2010-02-12 10:34:29 +00001678 if (!C) continue;
Dan Gohman7979b722010-01-22 00:46:49 +00001679
Dan Gohman572645c2010-02-12 10:34:29 +00001680 // Ignore negative constants, as the code below doesn't handle them
1681 // correctly. TODO: Remove this restriction.
1682 if (!C->getValue().isStrictlyPositive()) continue;
Dan Gohman7979b722010-01-22 00:46:49 +00001683
Dan Gohman572645c2010-02-12 10:34:29 +00001684 /* Add new PHINode. */
Jay Foad3ecfc862011-03-30 11:28:46 +00001685 PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH);
Dan Gohman7979b722010-01-22 00:46:49 +00001686
Dan Gohman572645c2010-02-12 10:34:29 +00001687 /* create new increment. '++d' in above example. */
1688 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
1689 BinaryOperator *NewIncr =
1690 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
1691 Instruction::FAdd : Instruction::FSub,
1692 NewPH, CFP, "IV.S.next.", Incr);
Dan Gohman7979b722010-01-22 00:46:49 +00001693
Dan Gohman572645c2010-02-12 10:34:29 +00001694 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
1695 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
Dan Gohman7979b722010-01-22 00:46:49 +00001696
Dan Gohman572645c2010-02-12 10:34:29 +00001697 /* Remove cast operation */
1698 ShadowUse->replaceAllUsesWith(NewPH);
1699 ShadowUse->eraseFromParent();
Dan Gohmanc6519f92010-05-20 20:05:31 +00001700 Changed = true;
Dan Gohman572645c2010-02-12 10:34:29 +00001701 break;
Dan Gohman7979b722010-01-22 00:46:49 +00001702 }
1703}
1704
1705/// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1706/// set the IV user and stride information and return true, otherwise return
1707/// false.
Dan Gohmanea507f52010-05-20 19:44:23 +00001708bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) {
Dan Gohman572645c2010-02-12 10:34:29 +00001709 for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
1710 if (UI->getUser() == Cond) {
1711 // NOTE: we could handle setcc instructions with multiple uses here, but
1712 // InstCombine does it as well for simple uses, it's not clear that it
1713 // occurs enough in real life to handle.
1714 CondUse = UI;
1715 return true;
1716 }
Dan Gohman7979b722010-01-22 00:46:49 +00001717 return false;
Evan Chengcdf43b12007-10-25 09:11:16 +00001718}
1719
Dan Gohman7979b722010-01-22 00:46:49 +00001720/// OptimizeMax - Rewrite the loop's terminating condition if it uses
1721/// a max computation.
1722///
1723/// This is a narrow solution to a specific, but acute, problem. For loops
1724/// like this:
1725///
1726/// i = 0;
1727/// do {
1728/// p[i] = 0.0;
1729/// } while (++i < n);
1730///
1731/// the trip count isn't just 'n', because 'n' might not be positive. And
1732/// unfortunately this can come up even for loops where the user didn't use
1733/// a C do-while loop. For example, seemingly well-behaved top-test loops
1734/// will commonly be lowered like this:
1735//
1736/// if (n > 0) {
1737/// i = 0;
1738/// do {
1739/// p[i] = 0.0;
1740/// } while (++i < n);
1741/// }
1742///
1743/// and then it's possible for subsequent optimization to obscure the if
1744/// test in such a way that indvars can't find it.
1745///
1746/// When indvars can't find the if test in loops like this, it creates a
1747/// max expression, which allows it to give the loop a canonical
1748/// induction variable:
1749///
1750/// i = 0;
1751/// max = n < 1 ? 1 : n;
1752/// do {
1753/// p[i] = 0.0;
1754/// } while (++i != max);
1755///
1756/// Canonical induction variables are necessary because the loop passes
1757/// are designed around them. The most obvious example of this is the
1758/// LoopInfo analysis, which doesn't remember trip count values. It
1759/// expects to be able to rediscover the trip count each time it is
Dan Gohman572645c2010-02-12 10:34:29 +00001760/// needed, and it does this using a simple analysis that only succeeds if
Dan Gohman7979b722010-01-22 00:46:49 +00001761/// the loop has a canonical induction variable.
1762///
1763/// However, when it comes time to generate code, the maximum operation
1764/// can be quite costly, especially if it's inside of an outer loop.
1765///
1766/// This function solves this problem by detecting this type of loop and
1767/// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
1768/// the instructions for the maximum computation.
1769///
Dan Gohman572645c2010-02-12 10:34:29 +00001770ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
Dan Gohman7979b722010-01-22 00:46:49 +00001771 // Check that the loop matches the pattern we're looking for.
1772 if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
1773 Cond->getPredicate() != CmpInst::ICMP_NE)
1774 return Cond;
Dan Gohmana10756e2010-01-21 02:09:26 +00001775
Dan Gohman7979b722010-01-22 00:46:49 +00001776 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
1777 if (!Sel || !Sel->hasOneUse()) return Cond;
Dan Gohmana10756e2010-01-21 02:09:26 +00001778
Dan Gohman572645c2010-02-12 10:34:29 +00001779 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
Dan Gohman7979b722010-01-22 00:46:49 +00001780 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
1781 return Cond;
Dan Gohmandeff6212010-05-03 22:09:21 +00001782 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1);
Dan Gohmana10756e2010-01-21 02:09:26 +00001783
Dan Gohman7979b722010-01-22 00:46:49 +00001784 // Add one to the backedge-taken count to get the trip count.
Dan Gohman4065f602010-08-16 15:39:27 +00001785 const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount);
Dan Gohman1d367982010-04-24 03:13:44 +00001786 if (IterationCount != SE.getSCEV(Sel)) return Cond;
Dan Gohman7979b722010-01-22 00:46:49 +00001787
Dan Gohman1d367982010-04-24 03:13:44 +00001788 // Check for a max calculation that matches the pattern. There's no check
1789 // for ICMP_ULE here because the comparison would be with zero, which
1790 // isn't interesting.
1791 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1792 const SCEVNAryExpr *Max = 0;
1793 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) {
1794 Pred = ICmpInst::ICMP_SLE;
1795 Max = S;
1796 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) {
1797 Pred = ICmpInst::ICMP_SLT;
1798 Max = S;
1799 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) {
1800 Pred = ICmpInst::ICMP_ULT;
1801 Max = U;
1802 } else {
1803 // No match; bail.
Dan Gohman7979b722010-01-22 00:46:49 +00001804 return Cond;
Dan Gohman1d367982010-04-24 03:13:44 +00001805 }
Dan Gohman7979b722010-01-22 00:46:49 +00001806
1807 // To handle a max with more than two operands, this optimization would
1808 // require additional checking and setup.
1809 if (Max->getNumOperands() != 2)
1810 return Cond;
1811
1812 const SCEV *MaxLHS = Max->getOperand(0);
1813 const SCEV *MaxRHS = Max->getOperand(1);
Dan Gohman1d367982010-04-24 03:13:44 +00001814
1815 // ScalarEvolution canonicalizes constants to the left. For < and >, look
1816 // for a comparison with 1. For <= and >=, a comparison with zero.
1817 if (!MaxLHS ||
1818 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One)))
1819 return Cond;
1820
Dan Gohman7979b722010-01-22 00:46:49 +00001821 // Check the relevant induction variable for conformance to
1822 // the pattern.
Dan Gohman572645c2010-02-12 10:34:29 +00001823 const SCEV *IV = SE.getSCEV(Cond->getOperand(0));
Dan Gohman7979b722010-01-22 00:46:49 +00001824 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
1825 if (!AR || !AR->isAffine() ||
1826 AR->getStart() != One ||
Dan Gohman572645c2010-02-12 10:34:29 +00001827 AR->getStepRecurrence(SE) != One)
Dan Gohman7979b722010-01-22 00:46:49 +00001828 return Cond;
1829
1830 assert(AR->getLoop() == L &&
1831 "Loop condition operand is an addrec in a different loop!");
1832
1833 // Check the right operand of the select, and remember it, as it will
1834 // be used in the new comparison instruction.
1835 Value *NewRHS = 0;
Dan Gohman1d367982010-04-24 03:13:44 +00001836 if (ICmpInst::isTrueWhenEqual(Pred)) {
1837 // Look for n+1, and grab n.
1838 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1)))
1839 if (isa<ConstantInt>(BO->getOperand(1)) &&
1840 cast<ConstantInt>(BO->getOperand(1))->isOne() &&
1841 SE.getSCEV(BO->getOperand(0)) == MaxRHS)
1842 NewRHS = BO->getOperand(0);
1843 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2)))
1844 if (isa<ConstantInt>(BO->getOperand(1)) &&
1845 cast<ConstantInt>(BO->getOperand(1))->isOne() &&
1846 SE.getSCEV(BO->getOperand(0)) == MaxRHS)
1847 NewRHS = BO->getOperand(0);
1848 if (!NewRHS)
1849 return Cond;
1850 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS)
Dan Gohman7979b722010-01-22 00:46:49 +00001851 NewRHS = Sel->getOperand(1);
Dan Gohman572645c2010-02-12 10:34:29 +00001852 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS)
Dan Gohman7979b722010-01-22 00:46:49 +00001853 NewRHS = Sel->getOperand(2);
Dan Gohmancaf71ab2010-06-22 23:07:13 +00001854 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS))
1855 NewRHS = SU->getValue();
Dan Gohman1d367982010-04-24 03:13:44 +00001856 else
Dan Gohmancaf71ab2010-06-22 23:07:13 +00001857 // Max doesn't match expected pattern.
1858 return Cond;
Dan Gohman7979b722010-01-22 00:46:49 +00001859
1860 // Determine the new comparison opcode. It may be signed or unsigned,
1861 // and the original comparison may be either equality or inequality.
Dan Gohman7979b722010-01-22 00:46:49 +00001862 if (Cond->getPredicate() == CmpInst::ICMP_EQ)
1863 Pred = CmpInst::getInversePredicate(Pred);
1864
1865 // Ok, everything looks ok to change the condition into an SLT or SGE and
1866 // delete the max calculation.
1867 ICmpInst *NewCond =
1868 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp");
1869
1870 // Delete the max calculation instructions.
1871 Cond->replaceAllUsesWith(NewCond);
1872 CondUse->setUser(NewCond);
1873 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
1874 Cond->eraseFromParent();
1875 Sel->eraseFromParent();
1876 if (Cmp->use_empty())
1877 Cmp->eraseFromParent();
1878 return NewCond;
Dan Gohmanad7321f2008-09-15 21:22:06 +00001879}
1880
Jim Grosbach56a1f802009-11-17 17:53:56 +00001881/// OptimizeLoopTermCond - Change loop terminating condition to use the
Evan Cheng586f69a2009-11-12 07:35:05 +00001882/// postinc iv when possible.
Dan Gohmanc6519f92010-05-20 20:05:31 +00001883void
Dan Gohman572645c2010-02-12 10:34:29 +00001884LSRInstance::OptimizeLoopTermCond() {
1885 SmallPtrSet<Instruction *, 4> PostIncs;
1886
Evan Cheng586f69a2009-11-12 07:35:05 +00001887 BasicBlock *LatchBlock = L->getLoopLatch();
Evan Cheng076e0852009-11-17 18:10:11 +00001888 SmallVector<BasicBlock*, 8> ExitingBlocks;
1889 L->getExitingBlocks(ExitingBlocks);
Jim Grosbach56a1f802009-11-17 17:53:56 +00001890
Evan Cheng076e0852009-11-17 18:10:11 +00001891 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
1892 BasicBlock *ExitingBlock = ExitingBlocks[i];
Evan Cheng586f69a2009-11-12 07:35:05 +00001893
Dan Gohman572645c2010-02-12 10:34:29 +00001894 // Get the terminating condition for the loop if possible. If we
Evan Cheng076e0852009-11-17 18:10:11 +00001895 // can, we want to change it to use a post-incremented version of its
1896 // induction variable, to allow coalescing the live ranges for the IV into
1897 // one register value.
Evan Cheng586f69a2009-11-12 07:35:05 +00001898
Evan Cheng076e0852009-11-17 18:10:11 +00001899 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
1900 if (!TermBr)
1901 continue;
1902 // FIXME: Overly conservative, termination condition could be an 'or' etc..
1903 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
1904 continue;
Evan Cheng586f69a2009-11-12 07:35:05 +00001905
Evan Cheng076e0852009-11-17 18:10:11 +00001906 // Search IVUsesByStride to find Cond's IVUse if there is one.
1907 IVStrideUse *CondUse = 0;
Evan Cheng076e0852009-11-17 18:10:11 +00001908 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
Dan Gohman572645c2010-02-12 10:34:29 +00001909 if (!FindIVUserForCond(Cond, CondUse))
Evan Cheng076e0852009-11-17 18:10:11 +00001910 continue;
1911
Evan Cheng076e0852009-11-17 18:10:11 +00001912 // If the trip count is computed in terms of a max (due to ScalarEvolution
1913 // being unable to find a sufficient guard, for example), change the loop
1914 // comparison to use SLT or ULT instead of NE.
Dan Gohman572645c2010-02-12 10:34:29 +00001915 // One consequence of doing this now is that it disrupts the count-down
1916 // optimization. That's not always a bad thing though, because in such
1917 // cases it may still be worthwhile to avoid a max.
1918 Cond = OptimizeMax(Cond, CondUse);
Evan Cheng076e0852009-11-17 18:10:11 +00001919
Dan Gohman572645c2010-02-12 10:34:29 +00001920 // If this exiting block dominates the latch block, it may also use
1921 // the post-inc value if it won't be shared with other uses.
1922 // Check for dominance.
1923 if (!DT.dominates(ExitingBlock, LatchBlock))
Dan Gohman7979b722010-01-22 00:46:49 +00001924 continue;
Evan Cheng076e0852009-11-17 18:10:11 +00001925
Dan Gohman572645c2010-02-12 10:34:29 +00001926 // Conservatively avoid trying to use the post-inc value in non-latch
1927 // exits if there may be pre-inc users in intervening blocks.
Dan Gohman590bfe82010-02-14 03:21:49 +00001928 if (LatchBlock != ExitingBlock)
Dan Gohman572645c2010-02-12 10:34:29 +00001929 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
1930 // Test if the use is reachable from the exiting block. This dominator
1931 // query is a conservative approximation of reachability.
1932 if (&*UI != CondUse &&
1933 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) {
1934 // Conservatively assume there may be reuse if the quotient of their
1935 // strides could be a legal scale.
Dan Gohmanc0564542010-04-19 21:48:58 +00001936 const SCEV *A = IU.getStride(*CondUse, L);
1937 const SCEV *B = IU.getStride(*UI, L);
Dan Gohman448db1c2010-04-07 22:27:08 +00001938 if (!A || !B) continue;
Dan Gohman572645c2010-02-12 10:34:29 +00001939 if (SE.getTypeSizeInBits(A->getType()) !=
1940 SE.getTypeSizeInBits(B->getType())) {
1941 if (SE.getTypeSizeInBits(A->getType()) >
1942 SE.getTypeSizeInBits(B->getType()))
1943 B = SE.getSignExtendExpr(B, A->getType());
1944 else
1945 A = SE.getSignExtendExpr(A, B->getType());
1946 }
1947 if (const SCEVConstant *D =
Dan Gohmanf09b7122010-02-19 19:35:48 +00001948 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) {
Dan Gohman9f383eb2010-05-20 22:25:20 +00001949 const ConstantInt *C = D->getValue();
Dan Gohman572645c2010-02-12 10:34:29 +00001950 // Stride of one or negative one can have reuse with non-addresses.
Dan Gohman9f383eb2010-05-20 22:25:20 +00001951 if (C->isOne() || C->isAllOnesValue())
Dan Gohman572645c2010-02-12 10:34:29 +00001952 goto decline_post_inc;
1953 // Avoid weird situations.
Dan Gohman9f383eb2010-05-20 22:25:20 +00001954 if (C->getValue().getMinSignedBits() >= 64 ||
1955 C->getValue().isMinSignedValue())
Dan Gohman572645c2010-02-12 10:34:29 +00001956 goto decline_post_inc;
Dan Gohman590bfe82010-02-14 03:21:49 +00001957 // Without TLI, assume that any stride might be valid, and so any
1958 // use might be shared.
1959 if (!TLI)
1960 goto decline_post_inc;
Dan Gohman572645c2010-02-12 10:34:29 +00001961 // Check for possible scaled-address reuse.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00001962 Type *AccessTy = getAccessType(UI->getUser());
Dan Gohman572645c2010-02-12 10:34:29 +00001963 TargetLowering::AddrMode AM;
Dan Gohman9f383eb2010-05-20 22:25:20 +00001964 AM.Scale = C->getSExtValue();
Dan Gohman2763dfd2010-02-14 02:45:21 +00001965 if (TLI->isLegalAddressingMode(AM, AccessTy))
Dan Gohman572645c2010-02-12 10:34:29 +00001966 goto decline_post_inc;
1967 AM.Scale = -AM.Scale;
Dan Gohman2763dfd2010-02-14 02:45:21 +00001968 if (TLI->isLegalAddressingMode(AM, AccessTy))
Dan Gohman572645c2010-02-12 10:34:29 +00001969 goto decline_post_inc;
1970 }
1971 }
1972
David Greene63c94632009-12-23 22:58:38 +00001973 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: "
Dan Gohman572645c2010-02-12 10:34:29 +00001974 << *Cond << '\n');
Evan Cheng076e0852009-11-17 18:10:11 +00001975
1976 // It's possible for the setcc instruction to be anywhere in the loop, and
1977 // possible for it to have multiple users. If it is not immediately before
1978 // the exiting block branch, move it.
Dan Gohman572645c2010-02-12 10:34:29 +00001979 if (&*++BasicBlock::iterator(Cond) != TermBr) {
1980 if (Cond->hasOneUse()) {
Evan Cheng076e0852009-11-17 18:10:11 +00001981 Cond->moveBefore(TermBr);
1982 } else {
Dan Gohman572645c2010-02-12 10:34:29 +00001983 // Clone the terminating condition and insert into the loopend.
1984 ICmpInst *OldCond = Cond;
Evan Cheng076e0852009-11-17 18:10:11 +00001985 Cond = cast<ICmpInst>(Cond->clone());
1986 Cond->setName(L->getHeader()->getName() + ".termcond");
1987 ExitingBlock->getInstList().insert(TermBr, Cond);
1988
1989 // Clone the IVUse, as the old use still exists!
Andrew Trick4417e532011-06-21 15:43:52 +00001990 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace());
Dan Gohman572645c2010-02-12 10:34:29 +00001991 TermBr->replaceUsesOfWith(OldCond, Cond);
Evan Cheng076e0852009-11-17 18:10:11 +00001992 }
Evan Cheng586f69a2009-11-12 07:35:05 +00001993 }
1994
Evan Cheng076e0852009-11-17 18:10:11 +00001995 // If we get to here, we know that we can transform the setcc instruction to
1996 // use the post-incremented version of the IV, allowing us to coalesce the
1997 // live ranges for the IV correctly.
Dan Gohman448db1c2010-04-07 22:27:08 +00001998 CondUse->transformToPostInc(L);
Evan Cheng076e0852009-11-17 18:10:11 +00001999 Changed = true;
2000
Dan Gohman572645c2010-02-12 10:34:29 +00002001 PostIncs.insert(Cond);
2002 decline_post_inc:;
Dan Gohmana10756e2010-01-21 02:09:26 +00002003 }
Dan Gohman572645c2010-02-12 10:34:29 +00002004
2005 // Determine an insertion point for the loop induction variable increment. It
2006 // must dominate all the post-inc comparisons we just set up, and it must
2007 // dominate the loop latch edge.
2008 IVIncInsertPos = L->getLoopLatch()->getTerminator();
2009 for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(),
2010 E = PostIncs.end(); I != E; ++I) {
2011 BasicBlock *BB =
2012 DT.findNearestCommonDominator(IVIncInsertPos->getParent(),
2013 (*I)->getParent());
2014 if (BB == (*I)->getParent())
2015 IVIncInsertPos = *I;
2016 else if (BB != IVIncInsertPos->getParent())
2017 IVIncInsertPos = BB->getTerminator();
2018 }
Dan Gohmana10756e2010-01-21 02:09:26 +00002019}
2020
Chris Lattner7a2bdde2011-04-15 05:18:47 +00002021/// reconcileNewOffset - Determine if the given use can accommodate a fixup
Dan Gohman76c315a2010-05-20 20:52:00 +00002022/// at the given offset and other details. If so, update the use and
2023/// return true.
Dan Gohman572645c2010-02-12 10:34:29 +00002024bool
Dan Gohman191bd642010-09-01 01:45:53 +00002025LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
Chris Lattnerdb125cf2011-07-18 04:54:35 +00002026 LSRUse::KindType Kind, Type *AccessTy) {
Dan Gohman191bd642010-09-01 01:45:53 +00002027 int64_t NewMinOffset = LU.MinOffset;
2028 int64_t NewMaxOffset = LU.MaxOffset;
Chris Lattnerdb125cf2011-07-18 04:54:35 +00002029 Type *NewAccessTy = AccessTy;
Dan Gohman7979b722010-01-22 00:46:49 +00002030
Dan Gohman572645c2010-02-12 10:34:29 +00002031 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to
2032 // something conservative, however this can pessimize in the case that one of
2033 // the uses will have all its uses outside the loop, for example.
2034 if (LU.Kind != Kind)
Dan Gohman7979b722010-01-22 00:46:49 +00002035 return false;
Dan Gohman572645c2010-02-12 10:34:29 +00002036 // Conservatively assume HasBaseReg is true for now.
Dan Gohman191bd642010-09-01 01:45:53 +00002037 if (NewOffset < LU.MinOffset) {
2038 if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg,
Dan Gohman454d26d2010-02-22 04:11:59 +00002039 Kind, AccessTy, TLI))
Dan Gohman7979b722010-01-22 00:46:49 +00002040 return false;
Dan Gohman191bd642010-09-01 01:45:53 +00002041 NewMinOffset = NewOffset;
2042 } else if (NewOffset > LU.MaxOffset) {
2043 if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg,
Dan Gohman454d26d2010-02-22 04:11:59 +00002044 Kind, AccessTy, TLI))
Dan Gohman7979b722010-01-22 00:46:49 +00002045 return false;
Dan Gohman191bd642010-09-01 01:45:53 +00002046 NewMaxOffset = NewOffset;
Dan Gohmana10756e2010-01-21 02:09:26 +00002047 }
Dan Gohman572645c2010-02-12 10:34:29 +00002048 // Check for a mismatched access type, and fall back conservatively as needed.
Dan Gohman74e5ef02010-06-19 21:30:18 +00002049 // TODO: Be less conservative when the type is similar and can use the same
2050 // addressing modes.
Dan Gohman572645c2010-02-12 10:34:29 +00002051 if (Kind == LSRUse::Address && AccessTy != LU.AccessTy)
Dan Gohman191bd642010-09-01 01:45:53 +00002052 NewAccessTy = Type::getVoidTy(AccessTy->getContext());
Dan Gohmana10756e2010-01-21 02:09:26 +00002053
Dan Gohman572645c2010-02-12 10:34:29 +00002054 // Update the use.
Dan Gohman191bd642010-09-01 01:45:53 +00002055 LU.MinOffset = NewMinOffset;
2056 LU.MaxOffset = NewMaxOffset;
2057 LU.AccessTy = NewAccessTy;
2058 if (NewOffset != LU.Offsets.back())
2059 LU.Offsets.push_back(NewOffset);
Dan Gohman8b0ade32010-01-21 22:42:49 +00002060 return true;
2061}
2062
Dan Gohman572645c2010-02-12 10:34:29 +00002063/// getUse - Return an LSRUse index and an offset value for a fixup which
2064/// needs the given expression, with the given kind and optional access type.
Dan Gohman3f46a3a2010-03-01 17:49:51 +00002065/// Either reuse an existing use or create a new one, as needed.
Dan Gohman572645c2010-02-12 10:34:29 +00002066std::pair<size_t, int64_t>
2067LSRInstance::getUse(const SCEV *&Expr,
Chris Lattnerdb125cf2011-07-18 04:54:35 +00002068 LSRUse::KindType Kind, Type *AccessTy) {
Dan Gohman572645c2010-02-12 10:34:29 +00002069 const SCEV *Copy = Expr;
2070 int64_t Offset = ExtractImmediate(Expr, SE);
Evan Cheng586f69a2009-11-12 07:35:05 +00002071
Dan Gohman572645c2010-02-12 10:34:29 +00002072 // Basic uses can't accept any offset, for example.
Dan Gohman454d26d2010-02-22 04:11:59 +00002073 if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, TLI)) {
Dan Gohman572645c2010-02-12 10:34:29 +00002074 Expr = Copy;
2075 Offset = 0;
2076 }
2077
2078 std::pair<UseMapTy::iterator, bool> P =
Dan Gohman1e3121c2010-06-19 21:29:59 +00002079 UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0));
Dan Gohman572645c2010-02-12 10:34:29 +00002080 if (!P.second) {
2081 // A use already existed with this base.
2082 size_t LUIdx = P.first->second;
2083 LSRUse &LU = Uses[LUIdx];
Dan Gohman191bd642010-09-01 01:45:53 +00002084 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy))
Dan Gohman572645c2010-02-12 10:34:29 +00002085 // Reuse this use.
2086 return std::make_pair(LUIdx, Offset);
2087 }
2088
2089 // Create a new use.
2090 size_t LUIdx = Uses.size();
2091 P.first->second = LUIdx;
2092 Uses.push_back(LSRUse(Kind, AccessTy));
2093 LSRUse &LU = Uses[LUIdx];
2094
Dan Gohman191bd642010-09-01 01:45:53 +00002095 // We don't need to track redundant offsets, but we don't need to go out
2096 // of our way here to avoid them.
2097 if (LU.Offsets.empty() || Offset != LU.Offsets.back())
2098 LU.Offsets.push_back(Offset);
2099
Dan Gohman572645c2010-02-12 10:34:29 +00002100 LU.MinOffset = Offset;
2101 LU.MaxOffset = Offset;
2102 return std::make_pair(LUIdx, Offset);
2103}
2104
Dan Gohman5ce6d052010-05-20 15:17:54 +00002105/// DeleteUse - Delete the given use from the Uses list.
Dan Gohmanc6897702010-10-07 23:33:43 +00002106void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) {
Dan Gohman191bd642010-09-01 01:45:53 +00002107 if (&LU != &Uses.back())
Dan Gohman5ce6d052010-05-20 15:17:54 +00002108 std::swap(LU, Uses.back());
2109 Uses.pop_back();
Dan Gohmanc6897702010-10-07 23:33:43 +00002110
2111 // Update RegUses.
2112 RegUses.SwapAndDropUse(LUIdx, Uses.size());
Dan Gohman5ce6d052010-05-20 15:17:54 +00002113}
2114
Dan Gohmana2086b32010-05-19 23:43:12 +00002115/// FindUseWithFormula - Look for a use distinct from OrigLU which is has
2116/// a formula that has the same registers as the given formula.
2117LSRUse *
2118LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF,
Dan Gohman191bd642010-09-01 01:45:53 +00002119 const LSRUse &OrigLU) {
2120 // Search all uses for the formula. This could be more clever.
Dan Gohmana2086b32010-05-19 23:43:12 +00002121 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
2122 LSRUse &LU = Uses[LUIdx];
Dan Gohman6a832712010-08-29 15:27:08 +00002123 // Check whether this use is close enough to OrigLU, to see whether it's
2124 // worthwhile looking through its formulae.
2125 // Ignore ICmpZero uses because they may contain formulae generated by
2126 // GenerateICmpZeroScales, in which case adding fixup offsets may
2127 // be invalid.
Dan Gohmana2086b32010-05-19 23:43:12 +00002128 if (&LU != &OrigLU &&
2129 LU.Kind != LSRUse::ICmpZero &&
2130 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy &&
Dan Gohmana9db1292010-07-15 20:24:58 +00002131 LU.WidestFixupType == OrigLU.WidestFixupType &&
Dan Gohmana2086b32010-05-19 23:43:12 +00002132 LU.HasFormulaWithSameRegs(OrigF)) {
Dan Gohman6a832712010-08-29 15:27:08 +00002133 // Scan through this use's formulae.
Dan Gohman402d4352010-05-20 20:33:18 +00002134 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
2135 E = LU.Formulae.end(); I != E; ++I) {
2136 const Formula &F = *I;
Dan Gohman6a832712010-08-29 15:27:08 +00002137 // Check to see if this formula has the same registers and symbols
2138 // as OrigF.
Dan Gohmana2086b32010-05-19 23:43:12 +00002139 if (F.BaseRegs == OrigF.BaseRegs &&
2140 F.ScaledReg == OrigF.ScaledReg &&
2141 F.AM.BaseGV == OrigF.AM.BaseGV &&
Dan Gohmancca82142011-05-03 00:46:49 +00002142 F.AM.Scale == OrigF.AM.Scale &&
2143 F.UnfoldedOffset == OrigF.UnfoldedOffset) {
Dan Gohman191bd642010-09-01 01:45:53 +00002144 if (F.AM.BaseOffs == 0)
Dan Gohmana2086b32010-05-19 23:43:12 +00002145 return &LU;
Dan Gohman6a832712010-08-29 15:27:08 +00002146 // This is the formula where all the registers and symbols matched;
2147 // there aren't going to be any others. Since we declined it, we
2148 // can skip the rest of the formulae and procede to the next LSRUse.
Dan Gohmana2086b32010-05-19 23:43:12 +00002149 break;
2150 }
2151 }
2152 }
2153 }
2154
Dan Gohman6a832712010-08-29 15:27:08 +00002155 // Nothing looked good.
Dan Gohmana2086b32010-05-19 23:43:12 +00002156 return 0;
2157}
2158
Dan Gohman572645c2010-02-12 10:34:29 +00002159void LSRInstance::CollectInterestingTypesAndFactors() {
2160 SmallSetVector<const SCEV *, 4> Strides;
2161
Dan Gohman1b7bf182010-02-19 00:05:23 +00002162 // Collect interesting types and strides.
Dan Gohman448db1c2010-04-07 22:27:08 +00002163 SmallVector<const SCEV *, 4> Worklist;
Dan Gohman572645c2010-02-12 10:34:29 +00002164 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
Dan Gohmanc0564542010-04-19 21:48:58 +00002165 const SCEV *Expr = IU.getExpr(*UI);
Dan Gohman572645c2010-02-12 10:34:29 +00002166
2167 // Collect interesting types.
Dan Gohman448db1c2010-04-07 22:27:08 +00002168 Types.insert(SE.getEffectiveSCEVType(Expr->getType()));
Dan Gohman572645c2010-02-12 10:34:29 +00002169
Dan Gohman448db1c2010-04-07 22:27:08 +00002170 // Add strides for mentioned loops.
2171 Worklist.push_back(Expr);
2172 do {
2173 const SCEV *S = Worklist.pop_back_val();
2174 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
Andrew Trickbd618f12012-03-22 22:42:45 +00002175 if (AR->getLoop() == L)
Andrew Trickfa1948a2011-12-10 00:25:00 +00002176 Strides.insert(AR->getStepRecurrence(SE));
Dan Gohman448db1c2010-04-07 22:27:08 +00002177 Worklist.push_back(AR->getStart());
2178 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
Dan Gohman403a8cd2010-06-21 19:47:52 +00002179 Worklist.append(Add->op_begin(), Add->op_end());
Dan Gohman448db1c2010-04-07 22:27:08 +00002180 }
2181 } while (!Worklist.empty());
Dan Gohman1b7bf182010-02-19 00:05:23 +00002182 }
2183
2184 // Compute interesting factors from the set of interesting strides.
2185 for (SmallSetVector<const SCEV *, 4>::const_iterator
2186 I = Strides.begin(), E = Strides.end(); I != E; ++I)
Dan Gohman572645c2010-02-12 10:34:29 +00002187 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter =
Oscar Fuentesee56c422010-08-02 06:00:15 +00002188 llvm::next(I); NewStrideIter != E; ++NewStrideIter) {
Dan Gohman1b7bf182010-02-19 00:05:23 +00002189 const SCEV *OldStride = *I;
Dan Gohman572645c2010-02-12 10:34:29 +00002190 const SCEV *NewStride = *NewStrideIter;
Dan Gohman572645c2010-02-12 10:34:29 +00002191
2192 if (SE.getTypeSizeInBits(OldStride->getType()) !=
2193 SE.getTypeSizeInBits(NewStride->getType())) {
2194 if (SE.getTypeSizeInBits(OldStride->getType()) >
2195 SE.getTypeSizeInBits(NewStride->getType()))
2196 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType());
2197 else
2198 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType());
2199 }
2200 if (const SCEVConstant *Factor =
Dan Gohmanf09b7122010-02-19 19:35:48 +00002201 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride,
2202 SE, true))) {
Dan Gohman572645c2010-02-12 10:34:29 +00002203 if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
2204 Factors.insert(Factor->getValue()->getValue().getSExtValue());
2205 } else if (const SCEVConstant *Factor =
Dan Gohman454d26d2010-02-22 04:11:59 +00002206 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride,
2207 NewStride,
Dan Gohmanf09b7122010-02-19 19:35:48 +00002208 SE, true))) {
Dan Gohman572645c2010-02-12 10:34:29 +00002209 if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
2210 Factors.insert(Factor->getValue()->getValue().getSExtValue());
2211 }
2212 }
Dan Gohman572645c2010-02-12 10:34:29 +00002213
2214 // If all uses use the same type, don't bother looking for truncation-based
2215 // reuse.
2216 if (Types.size() == 1)
2217 Types.clear();
2218
2219 DEBUG(print_factors_and_types(dbgs()));
2220}
2221
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00002222/// findIVOperand - Helper for CollectChains that finds an IV operand (computed
2223/// by an AddRec in this loop) within [OI,OE) or returns OE. If IVUsers mapped
2224/// Instructions to IVStrideUses, we could partially skip this.
2225static User::op_iterator
2226findIVOperand(User::op_iterator OI, User::op_iterator OE,
2227 Loop *L, ScalarEvolution &SE) {
2228 for(; OI != OE; ++OI) {
2229 if (Instruction *Oper = dyn_cast<Instruction>(*OI)) {
2230 if (!SE.isSCEVable(Oper->getType()))
2231 continue;
2232
2233 if (const SCEVAddRecExpr *AR =
2234 dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) {
2235 if (AR->getLoop() == L)
2236 break;
2237 }
2238 }
2239 }
2240 return OI;
2241}
2242
2243/// getWideOperand - IVChain logic must consistenctly peek base TruncInst
2244/// operands, so wrap it in a convenient helper.
2245static Value *getWideOperand(Value *Oper) {
2246 if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper))
2247 return Trunc->getOperand(0);
2248 return Oper;
2249}
2250
2251/// isCompatibleIVType - Return true if we allow an IV chain to include both
2252/// types.
2253static bool isCompatibleIVType(Value *LVal, Value *RVal) {
2254 Type *LType = LVal->getType();
2255 Type *RType = RVal->getType();
2256 return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy());
2257}
2258
Andrew Trick64925c52012-01-10 01:45:08 +00002259/// getExprBase - Return an approximation of this SCEV expression's "base", or
2260/// NULL for any constant. Returning the expression itself is
2261/// conservative. Returning a deeper subexpression is more precise and valid as
2262/// long as it isn't less complex than another subexpression. For expressions
2263/// involving multiple unscaled values, we need to return the pointer-type
2264/// SCEVUnknown. This avoids forming chains across objects, such as:
2265/// PrevOper==a[i], IVOper==b[i], IVInc==b-a.
2266///
2267/// Since SCEVUnknown is the rightmost type, and pointers are the rightmost
2268/// SCEVUnknown, we simply return the rightmost SCEV operand.
2269static const SCEV *getExprBase(const SCEV *S) {
2270 switch (S->getSCEVType()) {
2271 default: // uncluding scUnknown.
2272 return S;
2273 case scConstant:
2274 return 0;
2275 case scTruncate:
2276 return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand());
2277 case scZeroExtend:
2278 return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand());
2279 case scSignExtend:
2280 return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand());
2281 case scAddExpr: {
2282 // Skip over scaled operands (scMulExpr) to follow add operands as long as
2283 // there's nothing more complex.
2284 // FIXME: not sure if we want to recognize negation.
2285 const SCEVAddExpr *Add = cast<SCEVAddExpr>(S);
2286 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()),
2287 E(Add->op_begin()); I != E; ++I) {
2288 const SCEV *SubExpr = *I;
2289 if (SubExpr->getSCEVType() == scAddExpr)
2290 return getExprBase(SubExpr);
2291
2292 if (SubExpr->getSCEVType() != scMulExpr)
2293 return SubExpr;
2294 }
2295 return S; // all operands are scaled, be conservative.
2296 }
2297 case scAddRecExpr:
2298 return getExprBase(cast<SCEVAddRecExpr>(S)->getStart());
2299 }
2300}
2301
Andrew Trick22d20c22012-01-09 21:18:52 +00002302/// Return true if the chain increment is profitable to expand into a loop
2303/// invariant value, which may require its own register. A profitable chain
2304/// increment will be an offset relative to the same base. We allow such offsets
2305/// to potentially be used as chain increment as long as it's not obviously
2306/// expensive to expand using real instructions.
2307static const SCEV *
2308getProfitableChainIncrement(Value *NextIV, Value *PrevIV,
2309 const IVChain &Chain, Loop *L,
2310 ScalarEvolution &SE, const TargetLowering *TLI) {
Andrew Trick64925c52012-01-10 01:45:08 +00002311 // Prune the solution space aggressively by checking that both IV operands
2312 // are expressions that operate on the same unscaled SCEVUnknown. This
2313 // "base" will be canceled by the subsequent getMinusSCEV call. Checking first
2314 // avoids creating extra SCEV expressions.
2315 const SCEV *OperExpr = SE.getSCEV(NextIV);
2316 const SCEV *PrevExpr = SE.getSCEV(PrevIV);
2317 if (getExprBase(OperExpr) != getExprBase(PrevExpr) && !StressIVChain)
2318 return 0;
2319
2320 const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr);
Andrew Trick22d20c22012-01-09 21:18:52 +00002321 if (!SE.isLoopInvariant(IncExpr, L))
2322 return 0;
2323
2324 // We are not able to expand an increment unless it is loop invariant,
2325 // however, the following checks are purely for profitability.
2326 if (StressIVChain)
2327 return IncExpr;
2328
Andrew Trick64925c52012-01-10 01:45:08 +00002329 // Do not replace a constant offset from IV head with a nonconstant IV
2330 // increment.
2331 if (!isa<SCEVConstant>(IncExpr)) {
2332 const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Chain[0].IVOperand));
2333 if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr)))
2334 return 0;
2335 }
2336
2337 SmallPtrSet<const SCEV*, 8> Processed;
2338 if (isHighCostExpansion(IncExpr, Processed, SE))
2339 return 0;
2340
2341 return IncExpr;
Andrew Trick22d20c22012-01-09 21:18:52 +00002342}
2343
2344/// Return true if the number of registers needed for the chain is estimated to
2345/// be less than the number required for the individual IV users. First prohibit
2346/// any IV users that keep the IV live across increments (the Users set should
2347/// be empty). Next count the number and type of increments in the chain.
2348///
2349/// Chaining IVs can lead to considerable code bloat if ISEL doesn't
2350/// effectively use postinc addressing modes. Only consider it profitable it the
2351/// increments can be computed in fewer registers when chained.
2352///
2353/// TODO: Consider IVInc free if it's already used in another chains.
2354static bool
2355isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users,
2356 ScalarEvolution &SE, const TargetLowering *TLI) {
2357 if (StressIVChain)
2358 return true;
2359
Andrew Trick64925c52012-01-10 01:45:08 +00002360 if (Chain.size() <= 2)
2361 return false;
2362
2363 if (!Users.empty()) {
2364 DEBUG(dbgs() << "Chain: " << *Chain[0].UserInst << " users:\n";
2365 for (SmallPtrSet<Instruction*, 4>::const_iterator I = Users.begin(),
2366 E = Users.end(); I != E; ++I) {
2367 dbgs() << " " << **I << "\n";
2368 });
2369 return false;
2370 }
2371 assert(!Chain.empty() && "empty IV chains are not allowed");
2372
2373 // The chain itself may require a register, so intialize cost to 1.
2374 int cost = 1;
2375
2376 // A complete chain likely eliminates the need for keeping the original IV in
2377 // a register. LSR does not currently know how to form a complete chain unless
2378 // the header phi already exists.
2379 if (isa<PHINode>(Chain.back().UserInst)
2380 && SE.getSCEV(Chain.back().UserInst) == Chain[0].IncExpr) {
2381 --cost;
2382 }
2383 const SCEV *LastIncExpr = 0;
2384 unsigned NumConstIncrements = 0;
2385 unsigned NumVarIncrements = 0;
2386 unsigned NumReusedIncrements = 0;
2387 for (IVChain::const_iterator I = llvm::next(Chain.begin()), E = Chain.end();
2388 I != E; ++I) {
2389
2390 if (I->IncExpr->isZero())
2391 continue;
2392
2393 // Incrementing by zero or some constant is neutral. We assume constants can
2394 // be folded into an addressing mode or an add's immediate operand.
2395 if (isa<SCEVConstant>(I->IncExpr)) {
2396 ++NumConstIncrements;
2397 continue;
2398 }
2399
2400 if (I->IncExpr == LastIncExpr)
2401 ++NumReusedIncrements;
2402 else
2403 ++NumVarIncrements;
2404
2405 LastIncExpr = I->IncExpr;
2406 }
2407 // An IV chain with a single increment is handled by LSR's postinc
2408 // uses. However, a chain with multiple increments requires keeping the IV's
2409 // value live longer than it needs to be if chained.
2410 if (NumConstIncrements > 1)
2411 --cost;
2412
2413 // Materializing increment expressions in the preheader that didn't exist in
2414 // the original code may cost a register. For example, sign-extended array
2415 // indices can produce ridiculous increments like this:
2416 // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64)))
2417 cost += NumVarIncrements;
2418
2419 // Reusing variable increments likely saves a register to hold the multiple of
2420 // the stride.
2421 cost -= NumReusedIncrements;
2422
2423 DEBUG(dbgs() << "Chain: " << *Chain[0].UserInst << " Cost: " << cost << "\n");
2424
2425 return cost < 0;
Andrew Trick22d20c22012-01-09 21:18:52 +00002426}
2427
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00002428/// ChainInstruction - Add this IV user to an existing chain or make it the head
2429/// of a new chain.
2430void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
2431 SmallVectorImpl<ChainUsers> &ChainUsersVec) {
2432 // When IVs are used as types of varying widths, they are generally converted
2433 // to a wider type with some uses remaining narrow under a (free) trunc.
2434 Value *NextIV = getWideOperand(IVOper);
2435
2436 // Visit all existing chains. Check if its IVOper can be computed as a
2437 // profitable loop invariant increment from the last link in the Chain.
2438 unsigned ChainIdx = 0, NChains = IVChainVec.size();
2439 const SCEV *LastIncExpr = 0;
2440 for (; ChainIdx < NChains; ++ChainIdx) {
2441 Value *PrevIV = getWideOperand(IVChainVec[ChainIdx].back().IVOperand);
2442 if (!isCompatibleIVType(PrevIV, NextIV))
2443 continue;
2444
Andrew Trickd4e46a62012-03-26 20:28:35 +00002445 // A phi node terminates a chain.
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00002446 if (isa<PHINode>(UserInst)
2447 && isa<PHINode>(IVChainVec[ChainIdx].back().UserInst))
2448 continue;
2449
Andrew Trick22d20c22012-01-09 21:18:52 +00002450 if (const SCEV *IncExpr =
2451 getProfitableChainIncrement(NextIV, PrevIV, IVChainVec[ChainIdx],
2452 L, SE, TLI)) {
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00002453 LastIncExpr = IncExpr;
2454 break;
2455 }
2456 }
2457 // If we haven't found a chain, create a new one, unless we hit the max. Don't
2458 // bother for phi nodes, because they must be last in the chain.
2459 if (ChainIdx == NChains) {
2460 if (isa<PHINode>(UserInst))
2461 return;
Andrew Trick22d20c22012-01-09 21:18:52 +00002462 if (NChains >= MaxChains && !StressIVChain) {
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00002463 DEBUG(dbgs() << "IV Chain Limit\n");
2464 return;
2465 }
Andrew Trick0041d4d2012-01-20 21:23:40 +00002466 LastIncExpr = SE.getSCEV(NextIV);
2467 // IVUsers may have skipped over sign/zero extensions. We don't currently
2468 // attempt to form chains involving extensions unless they can be hoisted
2469 // into this loop's AddRec.
2470 if (!isa<SCEVAddRecExpr>(LastIncExpr))
2471 return;
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00002472 ++NChains;
2473 IVChainVec.resize(NChains);
2474 ChainUsersVec.resize(NChains);
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00002475 DEBUG(dbgs() << "IV Head: (" << *UserInst << ") IV=" << *LastIncExpr
2476 << "\n");
2477 }
2478 else
2479 DEBUG(dbgs() << "IV Inc: (" << *UserInst << ") IV+" << *LastIncExpr
2480 << "\n");
2481
2482 // Add this IV user to the end of the chain.
2483 IVChainVec[ChainIdx].push_back(IVInc(UserInst, IVOper, LastIncExpr));
2484
2485 SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers;
2486 // This chain's NearUsers become FarUsers.
2487 if (!LastIncExpr->isZero()) {
2488 ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(),
2489 NearUsers.end());
2490 NearUsers.clear();
2491 }
2492
2493 // All other uses of IVOperand become near uses of the chain.
2494 // We currently ignore intermediate values within SCEV expressions, assuming
2495 // they will eventually be used be the current chain, or can be computed
2496 // from one of the chain increments. To be more precise we could
2497 // transitively follow its user and only add leaf IV users to the set.
2498 for (Value::use_iterator UseIter = IVOper->use_begin(),
2499 UseEnd = IVOper->use_end(); UseIter != UseEnd; ++UseIter) {
2500 Instruction *OtherUse = dyn_cast<Instruction>(*UseIter);
Andrew Trick81748bc2012-03-26 18:03:16 +00002501 if (!OtherUse || OtherUse == UserInst)
2502 continue;
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00002503 if (SE.isSCEVable(OtherUse->getType())
2504 && !isa<SCEVUnknown>(SE.getSCEV(OtherUse))
2505 && IU.isIVUserOrOperand(OtherUse)) {
2506 continue;
2507 }
Andrew Trick81748bc2012-03-26 18:03:16 +00002508 NearUsers.insert(OtherUse);
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00002509 }
2510
2511 // Since this user is part of the chain, it's no longer considered a use
2512 // of the chain.
2513 ChainUsersVec[ChainIdx].FarUsers.erase(UserInst);
2514}
2515
2516/// CollectChains - Populate the vector of Chains.
2517///
2518/// This decreases ILP at the architecture level. Targets with ample registers,
2519/// multiple memory ports, and no register renaming probably don't want
2520/// this. However, such targets should probably disable LSR altogether.
2521///
2522/// The job of LSR is to make a reasonable choice of induction variables across
2523/// the loop. Subsequent passes can easily "unchain" computation exposing more
2524/// ILP *within the loop* if the target wants it.
2525///
2526/// Finding the best IV chain is potentially a scheduling problem. Since LSR
2527/// will not reorder memory operations, it will recognize this as a chain, but
2528/// will generate redundant IV increments. Ideally this would be corrected later
2529/// by a smart scheduler:
2530/// = A[i]
2531/// = A[i+x]
2532/// A[i] =
2533/// A[i+x] =
2534///
2535/// TODO: Walk the entire domtree within this loop, not just the path to the
2536/// loop latch. This will discover chains on side paths, but requires
2537/// maintaining multiple copies of the Chains state.
2538void LSRInstance::CollectChains() {
2539 SmallVector<ChainUsers, 8> ChainUsersVec;
2540
2541 SmallVector<BasicBlock *,8> LatchPath;
2542 BasicBlock *LoopHeader = L->getHeader();
2543 for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch());
2544 Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) {
2545 LatchPath.push_back(Rung->getBlock());
2546 }
2547 LatchPath.push_back(LoopHeader);
2548
2549 // Walk the instruction stream from the loop header to the loop latch.
2550 for (SmallVectorImpl<BasicBlock *>::reverse_iterator
2551 BBIter = LatchPath.rbegin(), BBEnd = LatchPath.rend();
2552 BBIter != BBEnd; ++BBIter) {
2553 for (BasicBlock::iterator I = (*BBIter)->begin(), E = (*BBIter)->end();
2554 I != E; ++I) {
2555 // Skip instructions that weren't seen by IVUsers analysis.
2556 if (isa<PHINode>(I) || !IU.isIVUserOrOperand(I))
2557 continue;
2558
2559 // Ignore users that are part of a SCEV expression. This way we only
2560 // consider leaf IV Users. This effectively rediscovers a portion of
2561 // IVUsers analysis but in program order this time.
2562 if (SE.isSCEVable(I->getType()) && !isa<SCEVUnknown>(SE.getSCEV(I)))
2563 continue;
2564
2565 // Remove this instruction from any NearUsers set it may be in.
2566 for (unsigned ChainIdx = 0, NChains = IVChainVec.size();
2567 ChainIdx < NChains; ++ChainIdx) {
2568 ChainUsersVec[ChainIdx].NearUsers.erase(I);
2569 }
2570 // Search for operands that can be chained.
2571 SmallPtrSet<Instruction*, 4> UniqueOperands;
2572 User::op_iterator IVOpEnd = I->op_end();
2573 User::op_iterator IVOpIter = findIVOperand(I->op_begin(), IVOpEnd, L, SE);
2574 while (IVOpIter != IVOpEnd) {
2575 Instruction *IVOpInst = cast<Instruction>(*IVOpIter);
2576 if (UniqueOperands.insert(IVOpInst))
2577 ChainInstruction(I, IVOpInst, ChainUsersVec);
2578 IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE);
2579 }
2580 } // Continue walking down the instructions.
2581 } // Continue walking down the domtree.
2582 // Visit phi backedges to determine if the chain can generate the IV postinc.
2583 for (BasicBlock::iterator I = L->getHeader()->begin();
2584 PHINode *PN = dyn_cast<PHINode>(I); ++I) {
2585 if (!SE.isSCEVable(PN->getType()))
2586 continue;
2587
2588 Instruction *IncV =
2589 dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
2590 if (IncV)
2591 ChainInstruction(PN, IncV, ChainUsersVec);
2592 }
Andrew Trick22d20c22012-01-09 21:18:52 +00002593 // Remove any unprofitable chains.
2594 unsigned ChainIdx = 0;
2595 for (unsigned UsersIdx = 0, NChains = IVChainVec.size();
2596 UsersIdx < NChains; ++UsersIdx) {
2597 if (!isProfitableChain(IVChainVec[UsersIdx],
2598 ChainUsersVec[UsersIdx].FarUsers, SE, TLI))
2599 continue;
2600 // Preserve the chain at UsesIdx.
2601 if (ChainIdx != UsersIdx)
2602 IVChainVec[ChainIdx] = IVChainVec[UsersIdx];
2603 FinalizeChain(IVChainVec[ChainIdx]);
2604 ++ChainIdx;
2605 }
2606 IVChainVec.resize(ChainIdx);
2607}
2608
2609void LSRInstance::FinalizeChain(IVChain &Chain) {
2610 assert(!Chain.empty() && "empty IV chains are not allowed");
2611 DEBUG(dbgs() << "Final Chain: " << *Chain[0].UserInst << "\n");
2612
2613 for (IVChain::const_iterator I = llvm::next(Chain.begin()), E = Chain.end();
2614 I != E; ++I) {
2615 DEBUG(dbgs() << " Inc: " << *I->UserInst << "\n");
2616 User::op_iterator UseI =
2617 std::find(I->UserInst->op_begin(), I->UserInst->op_end(), I->IVOperand);
2618 assert(UseI != I->UserInst->op_end() && "cannot find IV operand");
2619 IVIncSet.insert(UseI);
2620 }
2621}
2622
2623/// Return true if the IVInc can be folded into an addressing mode.
2624static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
2625 Value *Operand, const TargetLowering *TLI) {
2626 const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr);
2627 if (!IncConst || !isAddressUse(UserInst, Operand))
2628 return false;
2629
2630 if (IncConst->getValue()->getValue().getMinSignedBits() > 64)
2631 return false;
2632
2633 int64_t IncOffset = IncConst->getValue()->getSExtValue();
2634 if (!isAlwaysFoldable(IncOffset, /*BaseGV=*/0, /*HaseBaseReg=*/false,
2635 LSRUse::Address, getAccessType(UserInst), TLI))
2636 return false;
2637
2638 return true;
2639}
2640
2641/// GenerateIVChains - Generate an add or subtract for each IVInc in a chain to
2642/// materialize the IV user's operand from the previous IV user's operand.
2643void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
2644 SmallVectorImpl<WeakVH> &DeadInsts) {
2645 // Find the new IVOperand for the head of the chain. It may have been replaced
2646 // by LSR.
2647 const IVInc &Head = Chain[0];
2648 User::op_iterator IVOpEnd = Head.UserInst->op_end();
2649 User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(),
2650 IVOpEnd, L, SE);
2651 Value *IVSrc = 0;
2652 while (IVOpIter != IVOpEnd) {
2653 IVSrc = getWideOperand(*IVOpIter);
2654
2655 // If this operand computes the expression that the chain needs, we may use
2656 // it. (Check this after setting IVSrc which is used below.)
2657 //
2658 // Note that if Head.IncExpr is wider than IVSrc, then this phi is too
2659 // narrow for the chain, so we can no longer use it. We do allow using a
2660 // wider phi, assuming the LSR checked for free truncation. In that case we
2661 // should already have a truncate on this operand such that
2662 // getSCEV(IVSrc) == IncExpr.
2663 if (SE.getSCEV(*IVOpIter) == Head.IncExpr
2664 || SE.getSCEV(IVSrc) == Head.IncExpr) {
2665 break;
2666 }
2667 IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE);
2668 }
2669 if (IVOpIter == IVOpEnd) {
2670 // Gracefully give up on this chain.
2671 DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n");
2672 return;
2673 }
2674
2675 DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n");
2676 Type *IVTy = IVSrc->getType();
2677 Type *IntTy = SE.getEffectiveSCEVType(IVTy);
2678 const SCEV *LeftOverExpr = 0;
2679 for (IVChain::const_iterator IncI = llvm::next(Chain.begin()),
2680 IncE = Chain.end(); IncI != IncE; ++IncI) {
2681
2682 Instruction *InsertPt = IncI->UserInst;
2683 if (isa<PHINode>(InsertPt))
2684 InsertPt = L->getLoopLatch()->getTerminator();
2685
2686 // IVOper will replace the current IV User's operand. IVSrc is the IV
2687 // value currently held in a register.
2688 Value *IVOper = IVSrc;
2689 if (!IncI->IncExpr->isZero()) {
2690 // IncExpr was the result of subtraction of two narrow values, so must
2691 // be signed.
2692 const SCEV *IncExpr = SE.getNoopOrSignExtend(IncI->IncExpr, IntTy);
2693 LeftOverExpr = LeftOverExpr ?
2694 SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr;
2695 }
2696 if (LeftOverExpr && !LeftOverExpr->isZero()) {
2697 // Expand the IV increment.
2698 Rewriter.clearPostInc();
2699 Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt);
2700 const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc),
2701 SE.getUnknown(IncV));
2702 IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt);
2703
2704 // If an IV increment can't be folded, use it as the next IV value.
2705 if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand,
2706 TLI)) {
2707 assert(IVTy == IVOper->getType() && "inconsistent IV increment type");
2708 IVSrc = IVOper;
2709 LeftOverExpr = 0;
2710 }
2711 }
2712 Type *OperTy = IncI->IVOperand->getType();
2713 if (IVTy != OperTy) {
2714 assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) &&
2715 "cannot extend a chained IV");
2716 IRBuilder<> Builder(InsertPt);
2717 IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain");
2718 }
2719 IncI->UserInst->replaceUsesOfWith(IncI->IVOperand, IVOper);
2720 DeadInsts.push_back(IncI->IVOperand);
2721 }
2722 // If LSR created a new, wider phi, we may also replace its postinc. We only
2723 // do this if we also found a wide value for the head of the chain.
2724 if (isa<PHINode>(Chain.back().UserInst)) {
2725 for (BasicBlock::iterator I = L->getHeader()->begin();
2726 PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
2727 if (!isCompatibleIVType(Phi, IVSrc))
2728 continue;
2729 Instruction *PostIncV = dyn_cast<Instruction>(
2730 Phi->getIncomingValueForBlock(L->getLoopLatch()));
2731 if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc)))
2732 continue;
2733 Value *IVOper = IVSrc;
2734 Type *PostIncTy = PostIncV->getType();
2735 if (IVTy != PostIncTy) {
2736 assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types");
2737 IRBuilder<> Builder(L->getLoopLatch()->getTerminator());
2738 Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc());
2739 IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain");
2740 }
2741 Phi->replaceUsesOfWith(PostIncV, IVOper);
2742 DeadInsts.push_back(PostIncV);
2743 }
2744 }
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00002745}
2746
Dan Gohman572645c2010-02-12 10:34:29 +00002747void LSRInstance::CollectFixupsAndInitialFormulae() {
2748 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
Andrew Trick22d20c22012-01-09 21:18:52 +00002749 Instruction *UserInst = UI->getUser();
2750 // Skip IV users that are part of profitable IV Chains.
2751 User::op_iterator UseI = std::find(UserInst->op_begin(), UserInst->op_end(),
2752 UI->getOperandValToReplace());
2753 assert(UseI != UserInst->op_end() && "cannot find IV operand");
2754 if (IVIncSet.count(UseI))
2755 continue;
2756
Dan Gohman572645c2010-02-12 10:34:29 +00002757 // Record the uses.
2758 LSRFixup &LF = getNewFixup();
Andrew Trick22d20c22012-01-09 21:18:52 +00002759 LF.UserInst = UserInst;
Dan Gohman572645c2010-02-12 10:34:29 +00002760 LF.OperandValToReplace = UI->getOperandValToReplace();
Dan Gohman448db1c2010-04-07 22:27:08 +00002761 LF.PostIncLoops = UI->getPostIncLoops();
Dan Gohman572645c2010-02-12 10:34:29 +00002762
2763 LSRUse::KindType Kind = LSRUse::Basic;
Chris Lattnerdb125cf2011-07-18 04:54:35 +00002764 Type *AccessTy = 0;
Dan Gohman572645c2010-02-12 10:34:29 +00002765 if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) {
2766 Kind = LSRUse::Address;
2767 AccessTy = getAccessType(LF.UserInst);
2768 }
2769
Dan Gohmanc0564542010-04-19 21:48:58 +00002770 const SCEV *S = IU.getExpr(*UI);
Dan Gohman572645c2010-02-12 10:34:29 +00002771
2772 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as
2773 // (N - i == 0), and this allows (N - i) to be the expression that we work
2774 // with rather than just N or i, so we can consider the register
2775 // requirements for both N and i at the same time. Limiting this code to
2776 // equality icmps is not a problem because all interesting loops use
2777 // equality icmps, thanks to IndVarSimplify.
2778 if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst))
2779 if (CI->isEquality()) {
2780 // Swap the operands if needed to put the OperandValToReplace on the
2781 // left, for consistency.
2782 Value *NV = CI->getOperand(1);
2783 if (NV == LF.OperandValToReplace) {
2784 CI->setOperand(1, CI->getOperand(0));
2785 CI->setOperand(0, NV);
Dan Gohmanf182b232010-05-20 19:26:52 +00002786 NV = CI->getOperand(1);
Dan Gohman9da1bf42010-05-20 19:16:03 +00002787 Changed = true;
Dan Gohman572645c2010-02-12 10:34:29 +00002788 }
2789
2790 // x == y --> x - y == 0
2791 const SCEV *N = SE.getSCEV(NV);
Dan Gohman17ead4f2010-11-17 21:23:15 +00002792 if (SE.isLoopInvariant(N, L)) {
Dan Gohman673968a2011-05-18 21:02:18 +00002793 // S is normalized, so normalize N before folding it into S
2794 // to keep the result normalized.
2795 N = TransformForPostIncUse(Normalize, N, CI, 0,
2796 LF.PostIncLoops, SE, DT);
Dan Gohman572645c2010-02-12 10:34:29 +00002797 Kind = LSRUse::ICmpZero;
2798 S = SE.getMinusSCEV(N, S);
2799 }
2800
2801 // -1 and the negations of all interesting strides (except the negation
2802 // of -1) are now also interesting.
2803 for (size_t i = 0, e = Factors.size(); i != e; ++i)
2804 if (Factors[i] != -1)
2805 Factors.insert(-(uint64_t)Factors[i]);
2806 Factors.insert(-1);
2807 }
2808
2809 // Set up the initial formula for this use.
2810 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy);
2811 LF.LUIdx = P.first;
2812 LF.Offset = P.second;
2813 LSRUse &LU = Uses[LF.LUIdx];
Dan Gohman448db1c2010-04-07 22:27:08 +00002814 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
Dan Gohmana9db1292010-07-15 20:24:58 +00002815 if (!LU.WidestFixupType ||
2816 SE.getTypeSizeInBits(LU.WidestFixupType) <
2817 SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
2818 LU.WidestFixupType = LF.OperandValToReplace->getType();
Dan Gohman572645c2010-02-12 10:34:29 +00002819
2820 // If this is the first use of this LSRUse, give it a formula.
2821 if (LU.Formulae.empty()) {
Dan Gohman454d26d2010-02-22 04:11:59 +00002822 InsertInitialFormula(S, LU, LF.LUIdx);
Dan Gohman572645c2010-02-12 10:34:29 +00002823 CountRegisters(LU.Formulae.back(), LF.LUIdx);
2824 }
2825 }
2826
2827 DEBUG(print_fixups(dbgs()));
2828}
2829
Dan Gohman76c315a2010-05-20 20:52:00 +00002830/// InsertInitialFormula - Insert a formula for the given expression into
2831/// the given use, separating out loop-variant portions from loop-invariant
2832/// and loop-computable portions.
Dan Gohman572645c2010-02-12 10:34:29 +00002833void
Dan Gohman454d26d2010-02-22 04:11:59 +00002834LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) {
Dan Gohman572645c2010-02-12 10:34:29 +00002835 Formula F;
Dan Gohmandc0e8fb2010-11-17 21:41:58 +00002836 F.InitialMatch(S, L, SE);
Dan Gohman572645c2010-02-12 10:34:29 +00002837 bool Inserted = InsertFormula(LU, LUIdx, F);
2838 assert(Inserted && "Initial formula already exists!"); (void)Inserted;
2839}
2840
Dan Gohman76c315a2010-05-20 20:52:00 +00002841/// InsertSupplementalFormula - Insert a simple single-register formula for
2842/// the given expression into the given use.
Dan Gohman572645c2010-02-12 10:34:29 +00002843void
2844LSRInstance::InsertSupplementalFormula(const SCEV *S,
2845 LSRUse &LU, size_t LUIdx) {
2846 Formula F;
2847 F.BaseRegs.push_back(S);
2848 F.AM.HasBaseReg = true;
2849 bool Inserted = InsertFormula(LU, LUIdx, F);
2850 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted;
2851}
2852
2853/// CountRegisters - Note which registers are used by the given formula,
2854/// updating RegUses.
2855void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) {
2856 if (F.ScaledReg)
2857 RegUses.CountRegister(F.ScaledReg, LUIdx);
2858 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
2859 E = F.BaseRegs.end(); I != E; ++I)
2860 RegUses.CountRegister(*I, LUIdx);
2861}
2862
2863/// InsertFormula - If the given formula has not yet been inserted, add it to
2864/// the list, and return true. Return false otherwise.
2865bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) {
Dan Gohman454d26d2010-02-22 04:11:59 +00002866 if (!LU.InsertFormula(F))
Dan Gohman572645c2010-02-12 10:34:29 +00002867 return false;
2868
2869 CountRegisters(F, LUIdx);
2870 return true;
2871}
2872
2873/// CollectLoopInvariantFixupsAndFormulae - Check for other uses of
2874/// loop-invariant values which we're tracking. These other uses will pin these
2875/// values in registers, making them less profitable for elimination.
2876/// TODO: This currently misses non-constant addrec step registers.
2877/// TODO: Should this give more weight to users inside the loop?
2878void
2879LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
2880 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end());
2881 SmallPtrSet<const SCEV *, 8> Inserted;
2882
2883 while (!Worklist.empty()) {
2884 const SCEV *S = Worklist.pop_back_val();
2885
2886 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S))
Dan Gohman403a8cd2010-06-21 19:47:52 +00002887 Worklist.append(N->op_begin(), N->op_end());
Dan Gohman572645c2010-02-12 10:34:29 +00002888 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
2889 Worklist.push_back(C->getOperand());
2890 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2891 Worklist.push_back(D->getLHS());
2892 Worklist.push_back(D->getRHS());
2893 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2894 if (!Inserted.insert(U)) continue;
2895 const Value *V = U->getValue();
Dan Gohmana15ec5d2010-06-04 23:16:05 +00002896 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
2897 // Look for instructions defined outside the loop.
Dan Gohman572645c2010-02-12 10:34:29 +00002898 if (L->contains(Inst)) continue;
Dan Gohmana15ec5d2010-06-04 23:16:05 +00002899 } else if (isa<UndefValue>(V))
2900 // Undef doesn't have a live range, so it doesn't matter.
2901 continue;
Gabor Greif60ad7812010-03-25 23:06:16 +00002902 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
Dan Gohman572645c2010-02-12 10:34:29 +00002903 UI != UE; ++UI) {
2904 const Instruction *UserInst = dyn_cast<Instruction>(*UI);
2905 // Ignore non-instructions.
2906 if (!UserInst)
Dan Gohman7979b722010-01-22 00:46:49 +00002907 continue;
Dan Gohman572645c2010-02-12 10:34:29 +00002908 // Ignore instructions in other functions (as can happen with
2909 // Constants).
2910 if (UserInst->getParent()->getParent() != L->getHeader()->getParent())
Dan Gohman7979b722010-01-22 00:46:49 +00002911 continue;
Dan Gohman572645c2010-02-12 10:34:29 +00002912 // Ignore instructions not dominated by the loop.
2913 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ?
2914 UserInst->getParent() :
2915 cast<PHINode>(UserInst)->getIncomingBlock(
2916 PHINode::getIncomingValueNumForOperand(UI.getOperandNo()));
2917 if (!DT.dominates(L->getHeader(), UseBB))
2918 continue;
2919 // Ignore uses which are part of other SCEV expressions, to avoid
2920 // analyzing them multiple times.
Dan Gohman4a2a6832010-04-09 19:12:34 +00002921 if (SE.isSCEVable(UserInst->getType())) {
2922 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst));
2923 // If the user is a no-op, look through to its uses.
2924 if (!isa<SCEVUnknown>(UserS))
2925 continue;
2926 if (UserS == U) {
2927 Worklist.push_back(
2928 SE.getUnknown(const_cast<Instruction *>(UserInst)));
2929 continue;
2930 }
2931 }
Dan Gohman572645c2010-02-12 10:34:29 +00002932 // Ignore icmp instructions which are already being analyzed.
2933 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) {
2934 unsigned OtherIdx = !UI.getOperandNo();
2935 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx));
Dan Gohman17ead4f2010-11-17 21:23:15 +00002936 if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L))
Dan Gohman572645c2010-02-12 10:34:29 +00002937 continue;
2938 }
2939
2940 LSRFixup &LF = getNewFixup();
2941 LF.UserInst = const_cast<Instruction *>(UserInst);
2942 LF.OperandValToReplace = UI.getUse();
2943 std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0);
2944 LF.LUIdx = P.first;
2945 LF.Offset = P.second;
2946 LSRUse &LU = Uses[LF.LUIdx];
Dan Gohman448db1c2010-04-07 22:27:08 +00002947 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
Dan Gohmana9db1292010-07-15 20:24:58 +00002948 if (!LU.WidestFixupType ||
2949 SE.getTypeSizeInBits(LU.WidestFixupType) <
2950 SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
2951 LU.WidestFixupType = LF.OperandValToReplace->getType();
Dan Gohman572645c2010-02-12 10:34:29 +00002952 InsertSupplementalFormula(U, LU, LF.LUIdx);
2953 CountRegisters(LU.Formulae.back(), Uses.size() - 1);
2954 break;
2955 }
2956 }
2957 }
2958}
2959
2960/// CollectSubexprs - Split S into subexpressions which can be pulled out into
2961/// separate registers. If C is non-null, multiply each subexpression by C.
2962static void CollectSubexprs(const SCEV *S, const SCEVConstant *C,
2963 SmallVectorImpl<const SCEV *> &Ops,
Dan Gohman3e3f15b2010-06-25 22:32:18 +00002964 const Loop *L,
Dan Gohman572645c2010-02-12 10:34:29 +00002965 ScalarEvolution &SE) {
2966 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2967 // Break out add operands.
2968 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
2969 I != E; ++I)
Dan Gohman3e22b7c2010-08-16 15:50:00 +00002970 CollectSubexprs(*I, C, Ops, L, SE);
Dan Gohman572645c2010-02-12 10:34:29 +00002971 return;
2972 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2973 // Split a non-zero base out of an addrec.
2974 if (!AR->getStart()->isZero()) {
Dan Gohmandeff6212010-05-03 22:09:21 +00002975 CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
Dan Gohman572645c2010-02-12 10:34:29 +00002976 AR->getStepRecurrence(SE),
Andrew Trick3228cc22011-03-14 16:50:06 +00002977 AR->getLoop(),
2978 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
2979 SCEV::FlagAnyWrap),
Dan Gohman3e22b7c2010-08-16 15:50:00 +00002980 C, Ops, L, SE);
2981 CollectSubexprs(AR->getStart(), C, Ops, L, SE);
Dan Gohman572645c2010-02-12 10:34:29 +00002982 return;
2983 }
2984 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2985 // Break (C * (a + b + c)) into C*a + C*b + C*c.
2986 if (Mul->getNumOperands() == 2)
2987 if (const SCEVConstant *Op0 =
2988 dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
2989 CollectSubexprs(Mul->getOperand(1),
2990 C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0,
Dan Gohman3e22b7c2010-08-16 15:50:00 +00002991 Ops, L, SE);
Dan Gohman572645c2010-02-12 10:34:29 +00002992 return;
2993 }
2994 }
2995
Dan Gohman3e22b7c2010-08-16 15:50:00 +00002996 // Otherwise use the value itself, optionally with a scale applied.
2997 Ops.push_back(C ? SE.getMulExpr(C, S) : S);
Dan Gohman572645c2010-02-12 10:34:29 +00002998}
2999
3000/// GenerateReassociations - Split out subexpressions from adds and the bases of
3001/// addrecs.
3002void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
3003 Formula Base,
3004 unsigned Depth) {
3005 // Arbitrarily cap recursion to protect compile time.
3006 if (Depth >= 3) return;
3007
3008 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
3009 const SCEV *BaseReg = Base.BaseRegs[i];
3010
Dan Gohman3e22b7c2010-08-16 15:50:00 +00003011 SmallVector<const SCEV *, 8> AddOps;
3012 CollectSubexprs(BaseReg, 0, AddOps, L, SE);
Dan Gohman3e3f15b2010-06-25 22:32:18 +00003013
Dan Gohman572645c2010-02-12 10:34:29 +00003014 if (AddOps.size() == 1) continue;
3015
3016 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(),
3017 JE = AddOps.end(); J != JE; ++J) {
Dan Gohman3e22b7c2010-08-16 15:50:00 +00003018
3019 // Loop-variant "unknown" values are uninteresting; we won't be able to
3020 // do anything meaningful with them.
Dan Gohman17ead4f2010-11-17 21:23:15 +00003021 if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L))
Dan Gohman3e22b7c2010-08-16 15:50:00 +00003022 continue;
3023
Dan Gohman572645c2010-02-12 10:34:29 +00003024 // Don't pull a constant into a register if the constant could be folded
3025 // into an immediate field.
3026 if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset,
3027 Base.getNumRegs() > 1,
3028 LU.Kind, LU.AccessTy, TLI, SE))
3029 continue;
3030
3031 // Collect all operands except *J.
Dan Gohman403a8cd2010-06-21 19:47:52 +00003032 SmallVector<const SCEV *, 8> InnerAddOps
Dan Gohman4eaee282010-08-04 17:43:57 +00003033 (((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J);
Dan Gohman403a8cd2010-06-21 19:47:52 +00003034 InnerAddOps.append
Oscar Fuentesee56c422010-08-02 06:00:15 +00003035 (llvm::next(J), ((const SmallVector<const SCEV *, 8> &)AddOps).end());
Dan Gohman572645c2010-02-12 10:34:29 +00003036
3037 // Don't leave just a constant behind in a register if the constant could
3038 // be folded into an immediate field.
3039 if (InnerAddOps.size() == 1 &&
3040 isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset,
3041 Base.getNumRegs() > 1,
3042 LU.Kind, LU.AccessTy, TLI, SE))
3043 continue;
3044
Dan Gohmanfafb8902010-04-23 01:55:05 +00003045 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
3046 if (InnerSum->isZero())
3047 continue;
Dan Gohman572645c2010-02-12 10:34:29 +00003048 Formula F = Base;
Dan Gohmancca82142011-05-03 00:46:49 +00003049
3050 // Add the remaining pieces of the add back into the new formula.
3051 const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum);
3052 if (TLI && InnerSumSC &&
3053 SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 &&
3054 TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
3055 InnerSumSC->getValue()->getZExtValue())) {
3056 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
3057 InnerSumSC->getValue()->getZExtValue();
3058 F.BaseRegs.erase(F.BaseRegs.begin() + i);
3059 } else
3060 F.BaseRegs[i] = InnerSum;
3061
3062 // Add J as its own register, or an unfolded immediate.
3063 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J);
3064 if (TLI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
3065 TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
3066 SC->getValue()->getZExtValue()))
3067 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
3068 SC->getValue()->getZExtValue();
3069 else
3070 F.BaseRegs.push_back(*J);
3071
Dan Gohman572645c2010-02-12 10:34:29 +00003072 if (InsertFormula(LU, LUIdx, F))
3073 // If that formula hadn't been seen before, recurse to find more like
3074 // it.
3075 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1);
3076 }
3077 }
3078}
3079
3080/// GenerateCombinations - Generate a formula consisting of all of the
3081/// loop-dominating registers added into a single register.
3082void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
Dan Gohman441a3892010-02-14 18:51:39 +00003083 Formula Base) {
Dan Gohman3f46a3a2010-03-01 17:49:51 +00003084 // This method is only interesting on a plurality of registers.
Dan Gohman572645c2010-02-12 10:34:29 +00003085 if (Base.BaseRegs.size() <= 1) return;
3086
3087 Formula F = Base;
3088 F.BaseRegs.clear();
3089 SmallVector<const SCEV *, 4> Ops;
3090 for (SmallVectorImpl<const SCEV *>::const_iterator
3091 I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) {
3092 const SCEV *BaseReg = *I;
Dan Gohmandc0e8fb2010-11-17 21:41:58 +00003093 if (SE.properlyDominates(BaseReg, L->getHeader()) &&
Dan Gohman17ead4f2010-11-17 21:23:15 +00003094 !SE.hasComputableLoopEvolution(BaseReg, L))
Dan Gohman572645c2010-02-12 10:34:29 +00003095 Ops.push_back(BaseReg);
3096 else
3097 F.BaseRegs.push_back(BaseReg);
3098 }
3099 if (Ops.size() > 1) {
Dan Gohmance947362010-02-14 18:50:49 +00003100 const SCEV *Sum = SE.getAddExpr(Ops);
3101 // TODO: If Sum is zero, it probably means ScalarEvolution missed an
3102 // opportunity to fold something. For now, just ignore such cases
Dan Gohman3f46a3a2010-03-01 17:49:51 +00003103 // rather than proceed with zero in a register.
Dan Gohmance947362010-02-14 18:50:49 +00003104 if (!Sum->isZero()) {
3105 F.BaseRegs.push_back(Sum);
3106 (void)InsertFormula(LU, LUIdx, F);
3107 }
Dan Gohman572645c2010-02-12 10:34:29 +00003108 }
3109}
3110
3111/// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets.
3112void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx,
3113 Formula Base) {
3114 // We can't add a symbolic offset if the address already contains one.
3115 if (Base.AM.BaseGV) return;
3116
3117 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
3118 const SCEV *G = Base.BaseRegs[i];
3119 GlobalValue *GV = ExtractSymbol(G, SE);
3120 if (G->isZero() || !GV)
3121 continue;
3122 Formula F = Base;
3123 F.AM.BaseGV = GV;
3124 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
3125 LU.Kind, LU.AccessTy, TLI))
3126 continue;
3127 F.BaseRegs[i] = G;
3128 (void)InsertFormula(LU, LUIdx, F);
3129 }
3130}
3131
3132/// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets.
3133void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
3134 Formula Base) {
3135 // TODO: For now, just add the min and max offset, because it usually isn't
3136 // worthwhile looking at everything inbetween.
Dan Gohmanc88c1a42010-07-15 15:14:45 +00003137 SmallVector<int64_t, 2> Worklist;
Dan Gohman572645c2010-02-12 10:34:29 +00003138 Worklist.push_back(LU.MinOffset);
3139 if (LU.MaxOffset != LU.MinOffset)
3140 Worklist.push_back(LU.MaxOffset);
3141
3142 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
3143 const SCEV *G = Base.BaseRegs[i];
3144
3145 for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(),
3146 E = Worklist.end(); I != E; ++I) {
3147 Formula F = Base;
3148 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I;
3149 if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I,
3150 LU.Kind, LU.AccessTy, TLI)) {
Dan Gohmanc88c1a42010-07-15 15:14:45 +00003151 // Add the offset to the base register.
Dan Gohman4065f602010-08-16 15:39:27 +00003152 const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G);
Dan Gohmanc88c1a42010-07-15 15:14:45 +00003153 // If it cancelled out, drop the base register, otherwise update it.
3154 if (NewG->isZero()) {
3155 std::swap(F.BaseRegs[i], F.BaseRegs.back());
3156 F.BaseRegs.pop_back();
3157 } else
3158 F.BaseRegs[i] = NewG;
Dan Gohman572645c2010-02-12 10:34:29 +00003159
3160 (void)InsertFormula(LU, LUIdx, F);
3161 }
3162 }
3163
3164 int64_t Imm = ExtractImmediate(G, SE);
3165 if (G->isZero() || Imm == 0)
3166 continue;
3167 Formula F = Base;
3168 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm;
3169 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
3170 LU.Kind, LU.AccessTy, TLI))
3171 continue;
3172 F.BaseRegs[i] = G;
3173 (void)InsertFormula(LU, LUIdx, F);
3174 }
3175}
3176
3177/// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up
3178/// the comparison. For example, x == y -> x*c == y*c.
3179void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
3180 Formula Base) {
3181 if (LU.Kind != LSRUse::ICmpZero) return;
3182
3183 // Determine the integer type for the base formula.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00003184 Type *IntTy = Base.getType();
Dan Gohman572645c2010-02-12 10:34:29 +00003185 if (!IntTy) return;
3186 if (SE.getTypeSizeInBits(IntTy) > 64) return;
3187
3188 // Don't do this if there is more than one offset.
3189 if (LU.MinOffset != LU.MaxOffset) return;
3190
3191 assert(!Base.AM.BaseGV && "ICmpZero use is not legal!");
3192
3193 // Check each interesting stride.
3194 for (SmallSetVector<int64_t, 8>::const_iterator
3195 I = Factors.begin(), E = Factors.end(); I != E; ++I) {
3196 int64_t Factor = *I;
Dan Gohman572645c2010-02-12 10:34:29 +00003197
3198 // Check that the multiplication doesn't overflow.
Dan Gohman2ea09e02010-06-24 16:57:52 +00003199 if (Base.AM.BaseOffs == INT64_MIN && Factor == -1)
Dan Gohman968cb932010-02-17 00:41:53 +00003200 continue;
Dan Gohman2ea09e02010-06-24 16:57:52 +00003201 int64_t NewBaseOffs = (uint64_t)Base.AM.BaseOffs * Factor;
3202 if (NewBaseOffs / Factor != Base.AM.BaseOffs)
Dan Gohman572645c2010-02-12 10:34:29 +00003203 continue;
3204
3205 // Check that multiplying with the use offset doesn't overflow.
3206 int64_t Offset = LU.MinOffset;
Dan Gohman968cb932010-02-17 00:41:53 +00003207 if (Offset == INT64_MIN && Factor == -1)
3208 continue;
Dan Gohman572645c2010-02-12 10:34:29 +00003209 Offset = (uint64_t)Offset * Factor;
Dan Gohman378c0b32010-02-17 00:42:19 +00003210 if (Offset / Factor != LU.MinOffset)
Dan Gohman572645c2010-02-12 10:34:29 +00003211 continue;
3212
Dan Gohman2ea09e02010-06-24 16:57:52 +00003213 Formula F = Base;
3214 F.AM.BaseOffs = NewBaseOffs;
3215
Dan Gohman572645c2010-02-12 10:34:29 +00003216 // Check that this scale is legal.
3217 if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI))
3218 continue;
3219
3220 // Compensate for the use having MinOffset built into it.
3221 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset;
3222
Dan Gohmandeff6212010-05-03 22:09:21 +00003223 const SCEV *FactorS = SE.getConstant(IntTy, Factor);
Dan Gohman572645c2010-02-12 10:34:29 +00003224
3225 // Check that multiplying with each base register doesn't overflow.
3226 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) {
3227 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS);
Dan Gohmanf09b7122010-02-19 19:35:48 +00003228 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i])
Dan Gohman572645c2010-02-12 10:34:29 +00003229 goto next;
3230 }
3231
3232 // Check that multiplying with the scaled register doesn't overflow.
3233 if (F.ScaledReg) {
3234 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS);
Dan Gohmanf09b7122010-02-19 19:35:48 +00003235 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg)
Dan Gohman572645c2010-02-12 10:34:29 +00003236 continue;
3237 }
3238
Dan Gohmancca82142011-05-03 00:46:49 +00003239 // Check that multiplying with the unfolded offset doesn't overflow.
3240 if (F.UnfoldedOffset != 0) {
Dan Gohman1b58d452011-05-23 21:07:39 +00003241 if (F.UnfoldedOffset == INT64_MIN && Factor == -1)
3242 continue;
Dan Gohmancca82142011-05-03 00:46:49 +00003243 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor;
3244 if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset)
3245 continue;
3246 }
3247
Dan Gohman572645c2010-02-12 10:34:29 +00003248 // If we make it here and it's legal, add it.
3249 (void)InsertFormula(LU, LUIdx, F);
3250 next:;
3251 }
3252}
3253
3254/// GenerateScales - Generate stride factor reuse formulae by making use of
3255/// scaled-offset address modes, for example.
Dan Gohmanea507f52010-05-20 19:44:23 +00003256void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
Dan Gohman572645c2010-02-12 10:34:29 +00003257 // Determine the integer type for the base formula.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00003258 Type *IntTy = Base.getType();
Dan Gohman572645c2010-02-12 10:34:29 +00003259 if (!IntTy) return;
3260
3261 // If this Formula already has a scaled register, we can't add another one.
3262 if (Base.AM.Scale != 0) return;
3263
3264 // Check each interesting stride.
3265 for (SmallSetVector<int64_t, 8>::const_iterator
3266 I = Factors.begin(), E = Factors.end(); I != E; ++I) {
3267 int64_t Factor = *I;
3268
3269 Base.AM.Scale = Factor;
3270 Base.AM.HasBaseReg = Base.BaseRegs.size() > 1;
3271 // Check whether this scale is going to be legal.
3272 if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
3273 LU.Kind, LU.AccessTy, TLI)) {
3274 // As a special-case, handle special out-of-loop Basic users specially.
3275 // TODO: Reconsider this special case.
3276 if (LU.Kind == LSRUse::Basic &&
3277 isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
3278 LSRUse::Special, LU.AccessTy, TLI) &&
3279 LU.AllFixupsOutsideLoop)
3280 LU.Kind = LSRUse::Special;
3281 else
3282 continue;
3283 }
3284 // For an ICmpZero, negating a solitary base register won't lead to
3285 // new solutions.
3286 if (LU.Kind == LSRUse::ICmpZero &&
3287 !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV)
3288 continue;
3289 // For each addrec base reg, apply the scale, if possible.
3290 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3291 if (const SCEVAddRecExpr *AR =
3292 dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) {
Dan Gohmandeff6212010-05-03 22:09:21 +00003293 const SCEV *FactorS = SE.getConstant(IntTy, Factor);
Dan Gohman572645c2010-02-12 10:34:29 +00003294 if (FactorS->isZero())
3295 continue;
3296 // Divide out the factor, ignoring high bits, since we'll be
3297 // scaling the value back up in the end.
Dan Gohmanf09b7122010-02-19 19:35:48 +00003298 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) {
Dan Gohman572645c2010-02-12 10:34:29 +00003299 // TODO: This could be optimized to avoid all the copying.
3300 Formula F = Base;
3301 F.ScaledReg = Quotient;
Dan Gohman5ce6d052010-05-20 15:17:54 +00003302 F.DeleteBaseReg(F.BaseRegs[i]);
Dan Gohman572645c2010-02-12 10:34:29 +00003303 (void)InsertFormula(LU, LUIdx, F);
3304 }
3305 }
3306 }
3307}
3308
3309/// GenerateTruncates - Generate reuse formulae from different IV types.
Dan Gohmanea507f52010-05-20 19:44:23 +00003310void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
Dan Gohman572645c2010-02-12 10:34:29 +00003311 // This requires TargetLowering to tell us which truncates are free.
3312 if (!TLI) return;
3313
3314 // Don't bother truncating symbolic values.
3315 if (Base.AM.BaseGV) return;
3316
3317 // Determine the integer type for the base formula.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00003318 Type *DstTy = Base.getType();
Dan Gohman572645c2010-02-12 10:34:29 +00003319 if (!DstTy) return;
3320 DstTy = SE.getEffectiveSCEVType(DstTy);
3321
Chris Lattnerdb125cf2011-07-18 04:54:35 +00003322 for (SmallSetVector<Type *, 4>::const_iterator
Dan Gohman572645c2010-02-12 10:34:29 +00003323 I = Types.begin(), E = Types.end(); I != E; ++I) {
Chris Lattnerdb125cf2011-07-18 04:54:35 +00003324 Type *SrcTy = *I;
Dan Gohman572645c2010-02-12 10:34:29 +00003325 if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) {
3326 Formula F = Base;
3327
3328 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I);
3329 for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(),
3330 JE = F.BaseRegs.end(); J != JE; ++J)
3331 *J = SE.getAnyExtendExpr(*J, SrcTy);
3332
3333 // TODO: This assumes we've done basic processing on all uses and
3334 // have an idea what the register usage is.
3335 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses))
3336 continue;
3337
3338 (void)InsertFormula(LU, LUIdx, F);
3339 }
3340 }
3341}
3342
3343namespace {
3344
Dan Gohman6020d852010-02-14 18:51:20 +00003345/// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to
Dan Gohman572645c2010-02-12 10:34:29 +00003346/// defer modifications so that the search phase doesn't have to worry about
3347/// the data structures moving underneath it.
3348struct WorkItem {
3349 size_t LUIdx;
3350 int64_t Imm;
3351 const SCEV *OrigReg;
3352
3353 WorkItem(size_t LI, int64_t I, const SCEV *R)
3354 : LUIdx(LI), Imm(I), OrigReg(R) {}
3355
3356 void print(raw_ostream &OS) const;
3357 void dump() const;
3358};
3359
3360}
3361
3362void WorkItem::print(raw_ostream &OS) const {
3363 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx
3364 << " , add offset " << Imm;
3365}
3366
3367void WorkItem::dump() const {
3368 print(errs()); errs() << '\n';
3369}
3370
3371/// GenerateCrossUseConstantOffsets - Look for registers which are a constant
3372/// distance apart and try to form reuse opportunities between them.
3373void LSRInstance::GenerateCrossUseConstantOffsets() {
3374 // Group the registers by their value without any added constant offset.
3375 typedef std::map<int64_t, const SCEV *> ImmMapTy;
3376 typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy;
3377 RegMapTy Map;
3378 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap;
3379 SmallVector<const SCEV *, 8> Sequence;
3380 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
3381 I != E; ++I) {
3382 const SCEV *Reg = *I;
3383 int64_t Imm = ExtractImmediate(Reg, SE);
3384 std::pair<RegMapTy::iterator, bool> Pair =
3385 Map.insert(std::make_pair(Reg, ImmMapTy()));
3386 if (Pair.second)
3387 Sequence.push_back(Reg);
3388 Pair.first->second.insert(std::make_pair(Imm, *I));
3389 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I);
3390 }
3391
3392 // Now examine each set of registers with the same base value. Build up
3393 // a list of work to do and do the work in a separate step so that we're
3394 // not adding formulae and register counts while we're searching.
Dan Gohman191bd642010-09-01 01:45:53 +00003395 SmallVector<WorkItem, 32> WorkItems;
3396 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems;
Dan Gohman572645c2010-02-12 10:34:29 +00003397 for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(),
3398 E = Sequence.end(); I != E; ++I) {
3399 const SCEV *Reg = *I;
3400 const ImmMapTy &Imms = Map.find(Reg)->second;
3401
Dan Gohmancd045c02010-02-12 19:20:37 +00003402 // It's not worthwhile looking for reuse if there's only one offset.
3403 if (Imms.size() == 1)
3404 continue;
3405
Dan Gohman572645c2010-02-12 10:34:29 +00003406 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':';
3407 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
3408 J != JE; ++J)
3409 dbgs() << ' ' << J->first;
3410 dbgs() << '\n');
3411
3412 // Examine each offset.
3413 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
3414 J != JE; ++J) {
3415 const SCEV *OrigReg = J->second;
3416
3417 int64_t JImm = J->first;
3418 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg);
3419
3420 if (!isa<SCEVConstant>(OrigReg) &&
3421 UsedByIndicesMap[Reg].count() == 1) {
3422 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n');
3423 continue;
3424 }
3425
3426 // Conservatively examine offsets between this orig reg a few selected
3427 // other orig regs.
3428 ImmMapTy::const_iterator OtherImms[] = {
3429 Imms.begin(), prior(Imms.end()),
Dan Gohmancca82142011-05-03 00:46:49 +00003430 Imms.lower_bound((Imms.begin()->first + prior(Imms.end())->first) / 2)
Dan Gohman572645c2010-02-12 10:34:29 +00003431 };
3432 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) {
3433 ImmMapTy::const_iterator M = OtherImms[i];
Dan Gohmancd045c02010-02-12 19:20:37 +00003434 if (M == J || M == JE) continue;
Dan Gohman572645c2010-02-12 10:34:29 +00003435
3436 // Compute the difference between the two.
3437 int64_t Imm = (uint64_t)JImm - M->first;
3438 for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1;
Dan Gohman191bd642010-09-01 01:45:53 +00003439 LUIdx = UsedByIndices.find_next(LUIdx))
Dan Gohman572645c2010-02-12 10:34:29 +00003440 // Make a memo of this use, offset, and register tuple.
Dan Gohman191bd642010-09-01 01:45:53 +00003441 if (UniqueItems.insert(std::make_pair(LUIdx, Imm)))
3442 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg));
Evan Cheng586f69a2009-11-12 07:35:05 +00003443 }
3444 }
3445 }
3446
Dan Gohman572645c2010-02-12 10:34:29 +00003447 Map.clear();
3448 Sequence.clear();
3449 UsedByIndicesMap.clear();
Dan Gohman191bd642010-09-01 01:45:53 +00003450 UniqueItems.clear();
Dan Gohman572645c2010-02-12 10:34:29 +00003451
3452 // Now iterate through the worklist and add new formulae.
3453 for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(),
3454 E = WorkItems.end(); I != E; ++I) {
3455 const WorkItem &WI = *I;
3456 size_t LUIdx = WI.LUIdx;
3457 LSRUse &LU = Uses[LUIdx];
3458 int64_t Imm = WI.Imm;
3459 const SCEV *OrigReg = WI.OrigReg;
3460
Chris Lattnerdb125cf2011-07-18 04:54:35 +00003461 Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType());
Dan Gohman572645c2010-02-12 10:34:29 +00003462 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm));
3463 unsigned BitWidth = SE.getTypeSizeInBits(IntTy);
3464
Dan Gohman3f46a3a2010-03-01 17:49:51 +00003465 // TODO: Use a more targeted data structure.
Dan Gohman572645c2010-02-12 10:34:29 +00003466 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) {
Dan Gohman9f383eb2010-05-20 22:25:20 +00003467 const Formula &F = LU.Formulae[L];
Dan Gohman572645c2010-02-12 10:34:29 +00003468 // Use the immediate in the scaled register.
3469 if (F.ScaledReg == OrigReg) {
3470 int64_t Offs = (uint64_t)F.AM.BaseOffs +
3471 Imm * (uint64_t)F.AM.Scale;
3472 // Don't create 50 + reg(-50).
3473 if (F.referencesReg(SE.getSCEV(
3474 ConstantInt::get(IntTy, -(uint64_t)Offs))))
3475 continue;
3476 Formula NewF = F;
3477 NewF.AM.BaseOffs = Offs;
3478 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
3479 LU.Kind, LU.AccessTy, TLI))
3480 continue;
3481 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg);
3482
3483 // If the new scale is a constant in a register, and adding the constant
3484 // value to the immediate would produce a value closer to zero than the
3485 // immediate itself, then the formula isn't worthwhile.
3486 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg))
Chris Lattnerc73b24d2011-07-15 06:08:15 +00003487 if (C->getValue()->isNegative() !=
Dan Gohman572645c2010-02-12 10:34:29 +00003488 (NewF.AM.BaseOffs < 0) &&
3489 (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale))
Dan Gohmane0567812010-04-08 23:03:40 +00003490 .ule(abs64(NewF.AM.BaseOffs)))
Dan Gohman572645c2010-02-12 10:34:29 +00003491 continue;
3492
3493 // OK, looks good.
3494 (void)InsertFormula(LU, LUIdx, NewF);
3495 } else {
3496 // Use the immediate in a base register.
3497 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) {
3498 const SCEV *BaseReg = F.BaseRegs[N];
3499 if (BaseReg != OrigReg)
3500 continue;
3501 Formula NewF = F;
3502 NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm;
3503 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
Dan Gohmancca82142011-05-03 00:46:49 +00003504 LU.Kind, LU.AccessTy, TLI)) {
3505 if (!TLI ||
3506 !TLI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm))
3507 continue;
3508 NewF = F;
3509 NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm;
3510 }
Dan Gohman572645c2010-02-12 10:34:29 +00003511 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg);
3512
3513 // If the new formula has a constant in a register, and adding the
3514 // constant value to the immediate would produce a value closer to
3515 // zero than the immediate itself, then the formula isn't worthwhile.
3516 for (SmallVectorImpl<const SCEV *>::const_iterator
3517 J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end();
3518 J != JE; ++J)
3519 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J))
Dan Gohman360026f2010-05-18 23:48:08 +00003520 if ((C->getValue()->getValue() + NewF.AM.BaseOffs).abs().slt(
3521 abs64(NewF.AM.BaseOffs)) &&
3522 (C->getValue()->getValue() +
3523 NewF.AM.BaseOffs).countTrailingZeros() >=
3524 CountTrailingZeros_64(NewF.AM.BaseOffs))
Dan Gohman572645c2010-02-12 10:34:29 +00003525 goto skip_formula;
3526
3527 // Ok, looks good.
3528 (void)InsertFormula(LU, LUIdx, NewF);
3529 break;
3530 skip_formula:;
3531 }
3532 }
3533 }
3534 }
Dale Johannesenc1acc3f2009-05-11 17:15:42 +00003535}
3536
Dan Gohman572645c2010-02-12 10:34:29 +00003537/// GenerateAllReuseFormulae - Generate formulae for each use.
3538void
3539LSRInstance::GenerateAllReuseFormulae() {
Dan Gohmanc2385a02010-02-16 01:42:53 +00003540 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan
Dan Gohman572645c2010-02-12 10:34:29 +00003541 // queries are more precise.
3542 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3543 LSRUse &LU = Uses[LUIdx];
3544 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3545 GenerateReassociations(LU, LUIdx, LU.Formulae[i]);
3546 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3547 GenerateCombinations(LU, LUIdx, LU.Formulae[i]);
3548 }
3549 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3550 LSRUse &LU = Uses[LUIdx];
3551 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3552 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]);
3553 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3554 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]);
3555 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3556 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]);
3557 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3558 GenerateScales(LU, LUIdx, LU.Formulae[i]);
Dan Gohmanc2385a02010-02-16 01:42:53 +00003559 }
3560 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3561 LSRUse &LU = Uses[LUIdx];
Dan Gohman572645c2010-02-12 10:34:29 +00003562 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3563 GenerateTruncates(LU, LUIdx, LU.Formulae[i]);
3564 }
3565
3566 GenerateCrossUseConstantOffsets();
Dan Gohman3902f9f2010-08-29 15:21:38 +00003567
3568 DEBUG(dbgs() << "\n"
3569 "After generating reuse formulae:\n";
3570 print_uses(dbgs()));
Dan Gohman572645c2010-02-12 10:34:29 +00003571}
3572
Dan Gohmanf63d70f2010-10-07 23:43:09 +00003573/// If there are multiple formulae with the same set of registers used
Dan Gohman572645c2010-02-12 10:34:29 +00003574/// by other uses, pick the best one and delete the others.
3575void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
Dan Gohmanfc7744b2010-10-07 23:52:18 +00003576 DenseSet<const SCEV *> VisitedRegs;
3577 SmallPtrSet<const SCEV *, 16> Regs;
Andrew Trick8a5d7922011-12-06 03:13:31 +00003578 SmallPtrSet<const SCEV *, 16> LoserRegs;
Dan Gohman572645c2010-02-12 10:34:29 +00003579#ifndef NDEBUG
Dan Gohmanc6519f92010-05-20 20:05:31 +00003580 bool ChangedFormulae = false;
Dan Gohman572645c2010-02-12 10:34:29 +00003581#endif
3582
3583 // Collect the best formula for each unique set of shared registers. This
3584 // is reset for each use.
3585 typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo>
3586 BestFormulaeTy;
3587 BestFormulaeTy BestFormulae;
3588
3589 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3590 LSRUse &LU = Uses[LUIdx];
Dan Gohmanea507f52010-05-20 19:44:23 +00003591 DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n');
Dan Gohman572645c2010-02-12 10:34:29 +00003592
Dan Gohmanb2df4332010-05-18 23:42:37 +00003593 bool Any = false;
Dan Gohman572645c2010-02-12 10:34:29 +00003594 for (size_t FIdx = 0, NumForms = LU.Formulae.size();
3595 FIdx != NumForms; ++FIdx) {
3596 Formula &F = LU.Formulae[FIdx];
3597
Andrew Trick8a5d7922011-12-06 03:13:31 +00003598 // Some formulas are instant losers. For example, they may depend on
3599 // nonexistent AddRecs from other loops. These need to be filtered
3600 // immediately, otherwise heuristics could choose them over others leading
3601 // to an unsatisfactory solution. Passing LoserRegs into RateFormula here
3602 // avoids the need to recompute this information across formulae using the
3603 // same bad AddRec. Passing LoserRegs is also essential unless we remove
3604 // the corresponding bad register from the Regs set.
3605 Cost CostF;
3606 Regs.clear();
3607 CostF.RateFormula(F, Regs, VisitedRegs, L, LU.Offsets, SE, DT,
3608 &LoserRegs);
3609 if (CostF.isLoser()) {
3610 // During initial formula generation, undesirable formulae are generated
3611 // by uses within other loops that have some non-trivial address mode or
3612 // use the postinc form of the IV. LSR needs to provide these formulae
3613 // as the basis of rediscovering the desired formula that uses an AddRec
3614 // corresponding to the existing phi. Once all formulae have been
3615 // generated, these initial losers may be pruned.
3616 DEBUG(dbgs() << " Filtering loser "; F.print(dbgs());
3617 dbgs() << "\n");
Dan Gohman572645c2010-02-12 10:34:29 +00003618 }
Andrew Trick8a5d7922011-12-06 03:13:31 +00003619 else {
3620 SmallVector<const SCEV *, 2> Key;
3621 for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(),
3622 JE = F.BaseRegs.end(); J != JE; ++J) {
3623 const SCEV *Reg = *J;
3624 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx))
3625 Key.push_back(Reg);
3626 }
3627 if (F.ScaledReg &&
3628 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx))
3629 Key.push_back(F.ScaledReg);
3630 // Unstable sort by host order ok, because this is only used for
3631 // uniquifying.
3632 std::sort(Key.begin(), Key.end());
Dan Gohman572645c2010-02-12 10:34:29 +00003633
Andrew Trick8a5d7922011-12-06 03:13:31 +00003634 std::pair<BestFormulaeTy::const_iterator, bool> P =
3635 BestFormulae.insert(std::make_pair(Key, FIdx));
3636 if (P.second)
3637 continue;
3638
Dan Gohman572645c2010-02-12 10:34:29 +00003639 Formula &Best = LU.Formulae[P.first->second];
Dan Gohmanfc7744b2010-10-07 23:52:18 +00003640
Dan Gohmanfc7744b2010-10-07 23:52:18 +00003641 Cost CostBest;
Dan Gohmanfc7744b2010-10-07 23:52:18 +00003642 Regs.clear();
Andrew Trick8a5d7922011-12-06 03:13:31 +00003643 CostBest.RateFormula(Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT);
Dan Gohmanfc7744b2010-10-07 23:52:18 +00003644 if (CostF < CostBest)
Dan Gohman572645c2010-02-12 10:34:29 +00003645 std::swap(F, Best);
Dan Gohman6458ff92010-05-18 22:37:37 +00003646 DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
Dan Gohman572645c2010-02-12 10:34:29 +00003647 dbgs() << "\n"
Dan Gohman6458ff92010-05-18 22:37:37 +00003648 " in favor of formula "; Best.print(dbgs());
Dan Gohman572645c2010-02-12 10:34:29 +00003649 dbgs() << '\n');
Dan Gohman572645c2010-02-12 10:34:29 +00003650 }
Andrew Trick8a5d7922011-12-06 03:13:31 +00003651#ifndef NDEBUG
3652 ChangedFormulae = true;
3653#endif
3654 LU.DeleteFormula(F);
3655 --FIdx;
3656 --NumForms;
3657 Any = true;
Dan Gohman59dc6032010-05-07 23:36:59 +00003658 }
3659
Dan Gohman57aaa0b2010-05-18 23:55:57 +00003660 // Now that we've filtered out some formulae, recompute the Regs set.
Dan Gohmanb2df4332010-05-18 23:42:37 +00003661 if (Any)
3662 LU.RecomputeRegs(LUIdx, RegUses);
Dan Gohman59dc6032010-05-07 23:36:59 +00003663
3664 // Reset this to prepare for the next use.
Dan Gohman572645c2010-02-12 10:34:29 +00003665 BestFormulae.clear();
3666 }
3667
Dan Gohmanc6519f92010-05-20 20:05:31 +00003668 DEBUG(if (ChangedFormulae) {
Dan Gohman9214b822010-02-13 02:06:02 +00003669 dbgs() << "\n"
3670 "After filtering out undesirable candidates:\n";
Dan Gohman572645c2010-02-12 10:34:29 +00003671 print_uses(dbgs());
3672 });
3673}
3674
Dan Gohmand079c302010-05-18 22:51:59 +00003675// This is a rough guess that seems to work fairly well.
3676static const size_t ComplexityLimit = UINT16_MAX;
3677
3678/// EstimateSearchSpaceComplexity - Estimate the worst-case number of
3679/// solutions the solver might have to consider. It almost never considers
3680/// this many solutions because it prune the search space, but the pruning
3681/// isn't always sufficient.
3682size_t LSRInstance::EstimateSearchSpaceComplexity() const {
Dan Gohman0d6715a2010-10-07 23:37:58 +00003683 size_t Power = 1;
Dan Gohmand079c302010-05-18 22:51:59 +00003684 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
3685 E = Uses.end(); I != E; ++I) {
3686 size_t FSize = I->Formulae.size();
3687 if (FSize >= ComplexityLimit) {
3688 Power = ComplexityLimit;
3689 break;
3690 }
3691 Power *= FSize;
3692 if (Power >= ComplexityLimit)
3693 break;
3694 }
3695 return Power;
3696}
3697
Dan Gohman4aa5c2e2010-08-29 16:09:42 +00003698/// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset
3699/// of the registers of another formula, it won't help reduce register
3700/// pressure (though it may not necessarily hurt register pressure); remove
3701/// it to simplify the system.
3702void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
Dan Gohmana2086b32010-05-19 23:43:12 +00003703 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
3704 DEBUG(dbgs() << "The search space is too complex.\n");
3705
3706 DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "
3707 "which use a superset of registers used by other "
3708 "formulae.\n");
3709
3710 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3711 LSRUse &LU = Uses[LUIdx];
3712 bool Any = false;
3713 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
3714 Formula &F = LU.Formulae[i];
Dan Gohmanf7ff37d2010-05-20 20:00:41 +00003715 // Look for a formula with a constant or GV in a register. If the use
3716 // also has a formula with that same value in an immediate field,
3717 // delete the one that uses a register.
Dan Gohmana2086b32010-05-19 23:43:12 +00003718 for (SmallVectorImpl<const SCEV *>::const_iterator
3719 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) {
3720 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) {
3721 Formula NewF = F;
3722 NewF.AM.BaseOffs += C->getValue()->getSExtValue();
3723 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
3724 (I - F.BaseRegs.begin()));
3725 if (LU.HasFormulaWithSameRegs(NewF)) {
3726 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
3727 LU.DeleteFormula(F);
3728 --i;
3729 --e;
3730 Any = true;
3731 break;
3732 }
3733 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) {
3734 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue()))
3735 if (!F.AM.BaseGV) {
3736 Formula NewF = F;
3737 NewF.AM.BaseGV = GV;
3738 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
3739 (I - F.BaseRegs.begin()));
3740 if (LU.HasFormulaWithSameRegs(NewF)) {
3741 DEBUG(dbgs() << " Deleting "; F.print(dbgs());
3742 dbgs() << '\n');
3743 LU.DeleteFormula(F);
3744 --i;
3745 --e;
3746 Any = true;
3747 break;
3748 }
3749 }
3750 }
3751 }
3752 }
3753 if (Any)
3754 LU.RecomputeRegs(LUIdx, RegUses);
3755 }
3756
3757 DEBUG(dbgs() << "After pre-selection:\n";
3758 print_uses(dbgs()));
3759 }
Dan Gohman4aa5c2e2010-08-29 16:09:42 +00003760}
Dan Gohmana2086b32010-05-19 23:43:12 +00003761
Dan Gohman4aa5c2e2010-08-29 16:09:42 +00003762/// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers
3763/// for expressions like A, A+1, A+2, etc., allocate a single register for
3764/// them.
3765void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
Dan Gohmana2086b32010-05-19 23:43:12 +00003766 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
3767 DEBUG(dbgs() << "The search space is too complex.\n");
3768
3769 DEBUG(dbgs() << "Narrowing the search space by assuming that uses "
3770 "separated by a constant offset will use the same "
3771 "registers.\n");
3772
Dan Gohmanf7ff37d2010-05-20 20:00:41 +00003773 // This is especially useful for unrolled loops.
3774
Dan Gohmana2086b32010-05-19 23:43:12 +00003775 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3776 LSRUse &LU = Uses[LUIdx];
Dan Gohman402d4352010-05-20 20:33:18 +00003777 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
3778 E = LU.Formulae.end(); I != E; ++I) {
3779 const Formula &F = *I;
Dan Gohmana2086b32010-05-19 23:43:12 +00003780 if (F.AM.BaseOffs != 0 && F.AM.Scale == 0) {
Dan Gohman191bd642010-09-01 01:45:53 +00003781 if (LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU)) {
3782 if (reconcileNewOffset(*LUThatHas, F.AM.BaseOffs,
Dan Gohmana2086b32010-05-19 23:43:12 +00003783 /*HasBaseReg=*/false,
3784 LU.Kind, LU.AccessTy)) {
3785 DEBUG(dbgs() << " Deleting use "; LU.print(dbgs());
3786 dbgs() << '\n');
3787
3788 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
3789
Dan Gohman191bd642010-09-01 01:45:53 +00003790 // Update the relocs to reference the new use.
3791 for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(),
3792 E = Fixups.end(); I != E; ++I) {
3793 LSRFixup &Fixup = *I;
3794 if (Fixup.LUIdx == LUIdx) {
3795 Fixup.LUIdx = LUThatHas - &Uses.front();
3796 Fixup.Offset += F.AM.BaseOffs;
Dan Gohmandd3db0e2010-10-07 23:36:45 +00003797 // Add the new offset to LUThatHas' offset list.
3798 if (LUThatHas->Offsets.back() != Fixup.Offset) {
3799 LUThatHas->Offsets.push_back(Fixup.Offset);
3800 if (Fixup.Offset > LUThatHas->MaxOffset)
3801 LUThatHas->MaxOffset = Fixup.Offset;
3802 if (Fixup.Offset < LUThatHas->MinOffset)
3803 LUThatHas->MinOffset = Fixup.Offset;
3804 }
Dan Gohman191bd642010-09-01 01:45:53 +00003805 DEBUG(dbgs() << "New fixup has offset "
3806 << Fixup.Offset << '\n');
3807 }
3808 if (Fixup.LUIdx == NumUses-1)
3809 Fixup.LUIdx = LUIdx;
3810 }
3811
Dan Gohmanc2921ea2010-10-08 19:33:26 +00003812 // Delete formulae from the new use which are no longer legal.
3813 bool Any = false;
3814 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) {
3815 Formula &F = LUThatHas->Formulae[i];
3816 if (!isLegalUse(F.AM,
3817 LUThatHas->MinOffset, LUThatHas->MaxOffset,
3818 LUThatHas->Kind, LUThatHas->AccessTy, TLI)) {
3819 DEBUG(dbgs() << " Deleting "; F.print(dbgs());
3820 dbgs() << '\n');
3821 LUThatHas->DeleteFormula(F);
3822 --i;
3823 --e;
3824 Any = true;
3825 }
3826 }
3827 if (Any)
3828 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses);
3829
Dan Gohmana2086b32010-05-19 23:43:12 +00003830 // Delete the old use.
Dan Gohmanc6897702010-10-07 23:33:43 +00003831 DeleteUse(LU, LUIdx);
Dan Gohmana2086b32010-05-19 23:43:12 +00003832 --LUIdx;
3833 --NumUses;
3834 break;
3835 }
3836 }
3837 }
3838 }
3839 }
3840
3841 DEBUG(dbgs() << "After pre-selection:\n";
3842 print_uses(dbgs()));
3843 }
Dan Gohman4aa5c2e2010-08-29 16:09:42 +00003844}
Dan Gohmana2086b32010-05-19 23:43:12 +00003845
Andrew Trick3228cc22011-03-14 16:50:06 +00003846/// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call
Dan Gohman4f7e18d2010-08-29 16:39:22 +00003847/// FilterOutUndesirableDedicatedRegisters again, if necessary, now that
3848/// we've done more filtering, as it may be able to find more formulae to
3849/// eliminate.
3850void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
3851 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
3852 DEBUG(dbgs() << "The search space is too complex.\n");
3853
3854 DEBUG(dbgs() << "Narrowing the search space by re-filtering out "
3855 "undesirable dedicated registers.\n");
3856
3857 FilterOutUndesirableDedicatedRegisters();
3858
3859 DEBUG(dbgs() << "After pre-selection:\n";
3860 print_uses(dbgs()));
3861 }
3862}
3863
Dan Gohman4aa5c2e2010-08-29 16:09:42 +00003864/// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely
3865/// to be profitable, and then in any use which has any reference to that
3866/// register, delete all formulae which do not reference that register.
3867void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
Dan Gohman76c315a2010-05-20 20:52:00 +00003868 // With all other options exhausted, loop until the system is simple
3869 // enough to handle.
Dan Gohman572645c2010-02-12 10:34:29 +00003870 SmallPtrSet<const SCEV *, 4> Taken;
Dan Gohmand079c302010-05-18 22:51:59 +00003871 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
Dan Gohman572645c2010-02-12 10:34:29 +00003872 // Ok, we have too many of formulae on our hands to conveniently handle.
3873 // Use a rough heuristic to thin out the list.
Dan Gohman0da751b2010-05-18 22:41:32 +00003874 DEBUG(dbgs() << "The search space is too complex.\n");
Dan Gohman572645c2010-02-12 10:34:29 +00003875
3876 // Pick the register which is used by the most LSRUses, which is likely
3877 // to be a good reuse register candidate.
3878 const SCEV *Best = 0;
3879 unsigned BestNum = 0;
3880 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
3881 I != E; ++I) {
3882 const SCEV *Reg = *I;
3883 if (Taken.count(Reg))
3884 continue;
3885 if (!Best)
3886 Best = Reg;
3887 else {
3888 unsigned Count = RegUses.getUsedByIndices(Reg).count();
3889 if (Count > BestNum) {
3890 Best = Reg;
3891 BestNum = Count;
3892 }
3893 }
3894 }
3895
3896 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best
Dan Gohman3f46a3a2010-03-01 17:49:51 +00003897 << " will yield profitable reuse.\n");
Dan Gohman572645c2010-02-12 10:34:29 +00003898 Taken.insert(Best);
3899
3900 // In any use with formulae which references this register, delete formulae
3901 // which don't reference it.
Dan Gohmanb2df4332010-05-18 23:42:37 +00003902 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3903 LSRUse &LU = Uses[LUIdx];
Dan Gohman572645c2010-02-12 10:34:29 +00003904 if (!LU.Regs.count(Best)) continue;
3905
Dan Gohmanb2df4332010-05-18 23:42:37 +00003906 bool Any = false;
Dan Gohman572645c2010-02-12 10:34:29 +00003907 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
3908 Formula &F = LU.Formulae[i];
3909 if (!F.referencesReg(Best)) {
3910 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
Dan Gohmand69d6282010-05-18 22:39:15 +00003911 LU.DeleteFormula(F);
Dan Gohman572645c2010-02-12 10:34:29 +00003912 --e;
3913 --i;
Dan Gohmanb2df4332010-05-18 23:42:37 +00003914 Any = true;
Dan Gohman59dc6032010-05-07 23:36:59 +00003915 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?");
Dan Gohman572645c2010-02-12 10:34:29 +00003916 continue;
3917 }
Dan Gohman572645c2010-02-12 10:34:29 +00003918 }
Dan Gohmanb2df4332010-05-18 23:42:37 +00003919
3920 if (Any)
3921 LU.RecomputeRegs(LUIdx, RegUses);
Dan Gohman572645c2010-02-12 10:34:29 +00003922 }
3923
3924 DEBUG(dbgs() << "After pre-selection:\n";
3925 print_uses(dbgs()));
3926 }
3927}
3928
Dan Gohman4aa5c2e2010-08-29 16:09:42 +00003929/// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of
3930/// formulae to choose from, use some rough heuristics to prune down the number
3931/// of formulae. This keeps the main solver from taking an extraordinary amount
3932/// of time in some worst-case scenarios.
3933void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
3934 NarrowSearchSpaceByDetectingSupersets();
3935 NarrowSearchSpaceByCollapsingUnrolledCode();
Dan Gohman4f7e18d2010-08-29 16:39:22 +00003936 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
Dan Gohman4aa5c2e2010-08-29 16:09:42 +00003937 NarrowSearchSpaceByPickingWinnerRegs();
3938}
3939
Dan Gohman572645c2010-02-12 10:34:29 +00003940/// SolveRecurse - This is the recursive solver.
3941void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
3942 Cost &SolutionCost,
3943 SmallVectorImpl<const Formula *> &Workspace,
3944 const Cost &CurCost,
3945 const SmallPtrSet<const SCEV *, 16> &CurRegs,
3946 DenseSet<const SCEV *> &VisitedRegs) const {
3947 // Some ideas:
3948 // - prune more:
3949 // - use more aggressive filtering
3950 // - sort the formula so that the most profitable solutions are found first
3951 // - sort the uses too
3952 // - search faster:
Dan Gohman3f46a3a2010-03-01 17:49:51 +00003953 // - don't compute a cost, and then compare. compare while computing a cost
Dan Gohman572645c2010-02-12 10:34:29 +00003954 // and bail early.
3955 // - track register sets with SmallBitVector
3956
3957 const LSRUse &LU = Uses[Workspace.size()];
3958
3959 // If this use references any register that's already a part of the
3960 // in-progress solution, consider it a requirement that a formula must
3961 // reference that register in order to be considered. This prunes out
3962 // unprofitable searching.
3963 SmallSetVector<const SCEV *, 4> ReqRegs;
3964 for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(),
3965 E = CurRegs.end(); I != E; ++I)
Dan Gohman9214b822010-02-13 02:06:02 +00003966 if (LU.Regs.count(*I))
Dan Gohman572645c2010-02-12 10:34:29 +00003967 ReqRegs.insert(*I);
Dan Gohman572645c2010-02-12 10:34:29 +00003968
3969 SmallPtrSet<const SCEV *, 16> NewRegs;
3970 Cost NewCost;
3971 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
3972 E = LU.Formulae.end(); I != E; ++I) {
3973 const Formula &F = *I;
3974
3975 // Ignore formulae which do not use any of the required registers.
Andrew Trickd1944542012-03-22 22:42:51 +00003976 bool SatisfiedReqReg = true;
Dan Gohman572645c2010-02-12 10:34:29 +00003977 for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(),
3978 JE = ReqRegs.end(); J != JE; ++J) {
3979 const SCEV *Reg = *J;
3980 if ((!F.ScaledReg || F.ScaledReg != Reg) &&
3981 std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) ==
Andrew Trickd1944542012-03-22 22:42:51 +00003982 F.BaseRegs.end()) {
3983 SatisfiedReqReg = false;
3984 break;
3985 }
Dan Gohman572645c2010-02-12 10:34:29 +00003986 }
Andrew Trickd1944542012-03-22 22:42:51 +00003987 if (!SatisfiedReqReg) {
3988 // If none of the formulae satisfied the required registers, then we could
3989 // clear ReqRegs and try again. Currently, we simply give up in this case.
3990 continue;
3991 }
Dan Gohman572645c2010-02-12 10:34:29 +00003992
3993 // Evaluate the cost of the current formula. If it's already worse than
3994 // the current best, prune the search at that point.
3995 NewCost = CurCost;
3996 NewRegs = CurRegs;
3997 NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT);
3998 if (NewCost < SolutionCost) {
3999 Workspace.push_back(&F);
4000 if (Workspace.size() != Uses.size()) {
4001 SolveRecurse(Solution, SolutionCost, Workspace, NewCost,
4002 NewRegs, VisitedRegs);
4003 if (F.getNumRegs() == 1 && Workspace.size() == 1)
4004 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]);
4005 } else {
4006 DEBUG(dbgs() << "New best at "; NewCost.print(dbgs());
Andrew Trick8bf295b2012-01-09 18:58:16 +00004007 dbgs() << ".\n Regs:";
Dan Gohman572645c2010-02-12 10:34:29 +00004008 for (SmallPtrSet<const SCEV *, 16>::const_iterator
4009 I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I)
4010 dbgs() << ' ' << **I;
4011 dbgs() << '\n');
4012
4013 SolutionCost = NewCost;
4014 Solution = Workspace;
4015 }
4016 Workspace.pop_back();
4017 }
Dan Gohman9214b822010-02-13 02:06:02 +00004018 }
Dan Gohman572645c2010-02-12 10:34:29 +00004019}
4020
Dan Gohman76c315a2010-05-20 20:52:00 +00004021/// Solve - Choose one formula from each use. Return the results in the given
4022/// Solution vector.
Dan Gohman572645c2010-02-12 10:34:29 +00004023void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
4024 SmallVector<const Formula *, 8> Workspace;
4025 Cost SolutionCost;
4026 SolutionCost.Loose();
4027 Cost CurCost;
4028 SmallPtrSet<const SCEV *, 16> CurRegs;
4029 DenseSet<const SCEV *> VisitedRegs;
4030 Workspace.reserve(Uses.size());
4031
Dan Gohmanf7ff37d2010-05-20 20:00:41 +00004032 // SolveRecurse does all the work.
Dan Gohman572645c2010-02-12 10:34:29 +00004033 SolveRecurse(Solution, SolutionCost, Workspace, CurCost,
4034 CurRegs, VisitedRegs);
Andrew Trick80ef1b22011-09-27 00:44:14 +00004035 if (Solution.empty()) {
4036 DEBUG(dbgs() << "\nNo Satisfactory Solution\n");
4037 return;
4038 }
Dan Gohman572645c2010-02-12 10:34:29 +00004039
4040 // Ok, we've now made all our decisions.
4041 DEBUG(dbgs() << "\n"
4042 "The chosen solution requires "; SolutionCost.print(dbgs());
4043 dbgs() << ":\n";
4044 for (size_t i = 0, e = Uses.size(); i != e; ++i) {
4045 dbgs() << " ";
4046 Uses[i].print(dbgs());
4047 dbgs() << "\n"
4048 " ";
4049 Solution[i]->print(dbgs());
4050 dbgs() << '\n';
4051 });
Dan Gohmana5528782010-05-20 20:59:23 +00004052
4053 assert(Solution.size() == Uses.size() && "Malformed solution!");
Dan Gohman572645c2010-02-12 10:34:29 +00004054}
4055
Dan Gohmane5f76872010-04-09 22:07:05 +00004056/// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up
4057/// the dominator tree far as we can go while still being dominated by the
4058/// input positions. This helps canonicalize the insert position, which
4059/// encourages sharing.
4060BasicBlock::iterator
4061LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
4062 const SmallVectorImpl<Instruction *> &Inputs)
4063 const {
4064 for (;;) {
4065 const Loop *IPLoop = LI.getLoopFor(IP->getParent());
4066 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0;
4067
4068 BasicBlock *IDom;
Dan Gohmand974a0e2010-05-20 20:00:25 +00004069 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) {
Dan Gohman0fe46d92010-05-20 22:46:54 +00004070 if (!Rung) return IP;
Dan Gohmand974a0e2010-05-20 20:00:25 +00004071 Rung = Rung->getIDom();
4072 if (!Rung) return IP;
4073 IDom = Rung->getBlock();
Dan Gohmane5f76872010-04-09 22:07:05 +00004074
4075 // Don't climb into a loop though.
4076 const Loop *IDomLoop = LI.getLoopFor(IDom);
4077 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0;
4078 if (IDomDepth <= IPLoopDepth &&
4079 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop))
4080 break;
4081 }
4082
4083 bool AllDominate = true;
4084 Instruction *BetterPos = 0;
4085 Instruction *Tentative = IDom->getTerminator();
4086 for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(),
4087 E = Inputs.end(); I != E; ++I) {
4088 Instruction *Inst = *I;
4089 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) {
4090 AllDominate = false;
4091 break;
4092 }
4093 // Attempt to find an insert position in the middle of the block,
4094 // instead of at the end, so that it can be used for other expansions.
4095 if (IDom == Inst->getParent() &&
4096 (!BetterPos || DT.dominates(BetterPos, Inst)))
Douglas Gregor7d9663c2010-05-11 06:17:44 +00004097 BetterPos = llvm::next(BasicBlock::iterator(Inst));
Dan Gohmane5f76872010-04-09 22:07:05 +00004098 }
4099 if (!AllDominate)
4100 break;
4101 if (BetterPos)
4102 IP = BetterPos;
4103 else
4104 IP = Tentative;
4105 }
4106
4107 return IP;
4108}
4109
4110/// AdjustInsertPositionForExpand - Determine an input position which will be
Dan Gohmand96eae82010-04-09 02:00:38 +00004111/// dominated by the operands and which will dominate the result.
4112BasicBlock::iterator
Andrew Trickb5c26ef2012-01-20 07:41:13 +00004113LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP,
Dan Gohmane5f76872010-04-09 22:07:05 +00004114 const LSRFixup &LF,
Andrew Trickb5c26ef2012-01-20 07:41:13 +00004115 const LSRUse &LU,
4116 SCEVExpander &Rewriter) const {
Dan Gohmand96eae82010-04-09 02:00:38 +00004117 // Collect some instructions which must be dominated by the
Dan Gohman448db1c2010-04-07 22:27:08 +00004118 // expanding replacement. These must be dominated by any operands that
Dan Gohman572645c2010-02-12 10:34:29 +00004119 // will be required in the expansion.
4120 SmallVector<Instruction *, 4> Inputs;
4121 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace))
4122 Inputs.push_back(I);
4123 if (LU.Kind == LSRUse::ICmpZero)
4124 if (Instruction *I =
4125 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1)))
4126 Inputs.push_back(I);
Dan Gohman448db1c2010-04-07 22:27:08 +00004127 if (LF.PostIncLoops.count(L)) {
4128 if (LF.isUseFullyOutsideLoop(L))
Dan Gohman069d6f32010-03-02 01:59:21 +00004129 Inputs.push_back(L->getLoopLatch()->getTerminator());
4130 else
4131 Inputs.push_back(IVIncInsertPos);
4132 }
Dan Gohman701a4ae2010-04-08 05:57:57 +00004133 // The expansion must also be dominated by the increment positions of any
4134 // loops it for which it is using post-inc mode.
4135 for (PostIncLoopSet::const_iterator I = LF.PostIncLoops.begin(),
4136 E = LF.PostIncLoops.end(); I != E; ++I) {
4137 const Loop *PIL = *I;
4138 if (PIL == L) continue;
4139
Dan Gohmane5f76872010-04-09 22:07:05 +00004140 // Be dominated by the loop exit.
Dan Gohman701a4ae2010-04-08 05:57:57 +00004141 SmallVector<BasicBlock *, 4> ExitingBlocks;
4142 PIL->getExitingBlocks(ExitingBlocks);
4143 if (!ExitingBlocks.empty()) {
4144 BasicBlock *BB = ExitingBlocks[0];
4145 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i)
4146 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]);
4147 Inputs.push_back(BB->getTerminator());
4148 }
4149 }
Dan Gohman572645c2010-02-12 10:34:29 +00004150
Andrew Trickb5c26ef2012-01-20 07:41:13 +00004151 assert(!isa<PHINode>(LowestIP) && !isa<LandingPadInst>(LowestIP)
4152 && !isa<DbgInfoIntrinsic>(LowestIP) &&
4153 "Insertion point must be a normal instruction");
4154
Dan Gohman572645c2010-02-12 10:34:29 +00004155 // Then, climb up the immediate dominator tree as far as we can go while
4156 // still being dominated by the input positions.
Andrew Trickb5c26ef2012-01-20 07:41:13 +00004157 BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs);
Dan Gohmand96eae82010-04-09 02:00:38 +00004158
4159 // Don't insert instructions before PHI nodes.
Dan Gohman572645c2010-02-12 10:34:29 +00004160 while (isa<PHINode>(IP)) ++IP;
Dan Gohmand96eae82010-04-09 02:00:38 +00004161
Bill Wendlinga4c86ab2011-08-24 21:06:46 +00004162 // Ignore landingpad instructions.
4163 while (isa<LandingPadInst>(IP)) ++IP;
4164
Dan Gohmand96eae82010-04-09 02:00:38 +00004165 // Ignore debug intrinsics.
Dan Gohman449f31c2010-03-26 00:33:27 +00004166 while (isa<DbgInfoIntrinsic>(IP)) ++IP;
Dan Gohman572645c2010-02-12 10:34:29 +00004167
Andrew Trickb5c26ef2012-01-20 07:41:13 +00004168 // Set IP below instructions recently inserted by SCEVExpander. This keeps the
4169 // IP consistent across expansions and allows the previously inserted
4170 // instructions to be reused by subsequent expansion.
4171 while (Rewriter.isInsertedInstruction(IP) && IP != LowestIP) ++IP;
4172
Dan Gohmand96eae82010-04-09 02:00:38 +00004173 return IP;
4174}
4175
Dan Gohman76c315a2010-05-20 20:52:00 +00004176/// Expand - Emit instructions for the leading candidate expression for this
4177/// LSRUse (this is called "expanding").
Dan Gohmand96eae82010-04-09 02:00:38 +00004178Value *LSRInstance::Expand(const LSRFixup &LF,
4179 const Formula &F,
4180 BasicBlock::iterator IP,
4181 SCEVExpander &Rewriter,
4182 SmallVectorImpl<WeakVH> &DeadInsts) const {
4183 const LSRUse &LU = Uses[LF.LUIdx];
4184
4185 // Determine an input position which will be dominated by the operands and
4186 // which will dominate the result.
Andrew Trickb5c26ef2012-01-20 07:41:13 +00004187 IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter);
Dan Gohmand96eae82010-04-09 02:00:38 +00004188
Dan Gohman572645c2010-02-12 10:34:29 +00004189 // Inform the Rewriter if we have a post-increment use, so that it can
4190 // perform an advantageous expansion.
Dan Gohman448db1c2010-04-07 22:27:08 +00004191 Rewriter.setPostInc(LF.PostIncLoops);
Dan Gohman572645c2010-02-12 10:34:29 +00004192
4193 // This is the type that the user actually needs.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00004194 Type *OpTy = LF.OperandValToReplace->getType();
Dan Gohman572645c2010-02-12 10:34:29 +00004195 // This will be the type that we'll initially expand to.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00004196 Type *Ty = F.getType();
Dan Gohman572645c2010-02-12 10:34:29 +00004197 if (!Ty)
4198 // No type known; just expand directly to the ultimate type.
4199 Ty = OpTy;
4200 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy))
4201 // Expand directly to the ultimate type if it's the right size.
4202 Ty = OpTy;
4203 // This is the type to do integer arithmetic in.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00004204 Type *IntTy = SE.getEffectiveSCEVType(Ty);
Dan Gohman572645c2010-02-12 10:34:29 +00004205
4206 // Build up a list of operands to add together to form the full base.
4207 SmallVector<const SCEV *, 8> Ops;
4208
4209 // Expand the BaseRegs portion.
4210 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
4211 E = F.BaseRegs.end(); I != E; ++I) {
4212 const SCEV *Reg = *I;
4213 assert(!Reg->isZero() && "Zero allocated in a base register!");
4214
Dan Gohman448db1c2010-04-07 22:27:08 +00004215 // If we're expanding for a post-inc user, make the post-inc adjustment.
4216 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops);
4217 Reg = TransformForPostIncUse(Denormalize, Reg,
4218 LF.UserInst, LF.OperandValToReplace,
4219 Loops, SE, DT);
Dan Gohman572645c2010-02-12 10:34:29 +00004220
4221 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP)));
4222 }
4223
Dan Gohman087bd1e2010-03-03 05:29:13 +00004224 // Flush the operand list to suppress SCEVExpander hoisting.
4225 if (!Ops.empty()) {
4226 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
4227 Ops.clear();
4228 Ops.push_back(SE.getUnknown(FullV));
4229 }
4230
Dan Gohman572645c2010-02-12 10:34:29 +00004231 // Expand the ScaledReg portion.
4232 Value *ICmpScaledV = 0;
4233 if (F.AM.Scale != 0) {
4234 const SCEV *ScaledS = F.ScaledReg;
4235
Dan Gohman448db1c2010-04-07 22:27:08 +00004236 // If we're expanding for a post-inc user, make the post-inc adjustment.
4237 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops);
4238 ScaledS = TransformForPostIncUse(Denormalize, ScaledS,
4239 LF.UserInst, LF.OperandValToReplace,
4240 Loops, SE, DT);
Dan Gohman572645c2010-02-12 10:34:29 +00004241
4242 if (LU.Kind == LSRUse::ICmpZero) {
4243 // An interesting way of "folding" with an icmp is to use a negated
4244 // scale, which we'll implement by inserting it into the other operand
4245 // of the icmp.
4246 assert(F.AM.Scale == -1 &&
4247 "The only scale supported by ICmpZero uses is -1!");
4248 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP);
4249 } else {
4250 // Otherwise just expand the scaled register and an explicit scale,
4251 // which is expected to be matched as part of the address.
4252 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP));
4253 ScaledS = SE.getMulExpr(ScaledS,
Dan Gohmandeff6212010-05-03 22:09:21 +00004254 SE.getConstant(ScaledS->getType(), F.AM.Scale));
Dan Gohman572645c2010-02-12 10:34:29 +00004255 Ops.push_back(ScaledS);
Dan Gohman087bd1e2010-03-03 05:29:13 +00004256
4257 // Flush the operand list to suppress SCEVExpander hoisting.
4258 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
4259 Ops.clear();
4260 Ops.push_back(SE.getUnknown(FullV));
Dan Gohman572645c2010-02-12 10:34:29 +00004261 }
4262 }
4263
Dan Gohman087bd1e2010-03-03 05:29:13 +00004264 // Expand the GV portion.
4265 if (F.AM.BaseGV) {
4266 Ops.push_back(SE.getUnknown(F.AM.BaseGV));
4267
4268 // Flush the operand list to suppress SCEVExpander hoisting.
4269 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
4270 Ops.clear();
4271 Ops.push_back(SE.getUnknown(FullV));
4272 }
4273
4274 // Expand the immediate portion.
Dan Gohman572645c2010-02-12 10:34:29 +00004275 int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset;
4276 if (Offset != 0) {
4277 if (LU.Kind == LSRUse::ICmpZero) {
4278 // The other interesting way of "folding" with an ICmpZero is to use a
4279 // negated immediate.
4280 if (!ICmpScaledV)
Eli Friedmandae36ba2011-10-13 23:48:33 +00004281 ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset);
Dan Gohman572645c2010-02-12 10:34:29 +00004282 else {
4283 Ops.push_back(SE.getUnknown(ICmpScaledV));
4284 ICmpScaledV = ConstantInt::get(IntTy, Offset);
4285 }
4286 } else {
4287 // Just add the immediate values. These again are expected to be matched
4288 // as part of the address.
Dan Gohman087bd1e2010-03-03 05:29:13 +00004289 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset)));
Dan Gohman572645c2010-02-12 10:34:29 +00004290 }
4291 }
4292
Dan Gohmancca82142011-05-03 00:46:49 +00004293 // Expand the unfolded offset portion.
4294 int64_t UnfoldedOffset = F.UnfoldedOffset;
4295 if (UnfoldedOffset != 0) {
4296 // Just add the immediate values.
4297 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy,
4298 UnfoldedOffset)));
4299 }
4300
Dan Gohman572645c2010-02-12 10:34:29 +00004301 // Emit instructions summing all the operands.
4302 const SCEV *FullS = Ops.empty() ?
Dan Gohmandeff6212010-05-03 22:09:21 +00004303 SE.getConstant(IntTy, 0) :
Dan Gohman572645c2010-02-12 10:34:29 +00004304 SE.getAddExpr(Ops);
4305 Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP);
4306
4307 // We're done expanding now, so reset the rewriter.
Dan Gohman448db1c2010-04-07 22:27:08 +00004308 Rewriter.clearPostInc();
Dan Gohman572645c2010-02-12 10:34:29 +00004309
4310 // An ICmpZero Formula represents an ICmp which we're handling as a
4311 // comparison against zero. Now that we've expanded an expression for that
4312 // form, update the ICmp's other operand.
4313 if (LU.Kind == LSRUse::ICmpZero) {
4314 ICmpInst *CI = cast<ICmpInst>(LF.UserInst);
4315 DeadInsts.push_back(CI->getOperand(1));
4316 assert(!F.AM.BaseGV && "ICmp does not support folding a global value and "
4317 "a scale at the same time!");
4318 if (F.AM.Scale == -1) {
4319 if (ICmpScaledV->getType() != OpTy) {
4320 Instruction *Cast =
4321 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false,
4322 OpTy, false),
4323 ICmpScaledV, OpTy, "tmp", CI);
4324 ICmpScaledV = Cast;
4325 }
4326 CI->setOperand(1, ICmpScaledV);
4327 } else {
4328 assert(F.AM.Scale == 0 &&
4329 "ICmp does not support folding a global value and "
4330 "a scale at the same time!");
4331 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy),
4332 -(uint64_t)Offset);
4333 if (C->getType() != OpTy)
4334 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4335 OpTy, false),
4336 C, OpTy);
4337
4338 CI->setOperand(1, C);
4339 }
4340 }
4341
4342 return FullV;
4343}
4344
Dan Gohman3a02cbc2010-02-16 20:25:07 +00004345/// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use
4346/// of their operands effectively happens in their predecessor blocks, so the
4347/// expression may need to be expanded in multiple places.
4348void LSRInstance::RewriteForPHI(PHINode *PN,
4349 const LSRFixup &LF,
4350 const Formula &F,
Dan Gohman3a02cbc2010-02-16 20:25:07 +00004351 SCEVExpander &Rewriter,
4352 SmallVectorImpl<WeakVH> &DeadInsts,
Dan Gohman3a02cbc2010-02-16 20:25:07 +00004353 Pass *P) const {
4354 DenseMap<BasicBlock *, Value *> Inserted;
4355 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
4356 if (PN->getIncomingValue(i) == LF.OperandValToReplace) {
4357 BasicBlock *BB = PN->getIncomingBlock(i);
4358
4359 // If this is a critical edge, split the edge so that we do not insert
4360 // the code on all predecessor/successor paths. We do this unless this
4361 // is the canonical backedge for this loop, which complicates post-inc
4362 // users.
4363 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 &&
Dan Gohman3ef98382011-02-08 00:55:13 +00004364 !isa<IndirectBrInst>(BB->getTerminator())) {
Bill Wendling89d44112011-08-25 01:08:34 +00004365 BasicBlock *Parent = PN->getParent();
4366 Loop *PNLoop = LI.getLoopFor(Parent);
4367 if (!PNLoop || Parent != PNLoop->getHeader()) {
Dan Gohman3ef98382011-02-08 00:55:13 +00004368 // Split the critical edge.
Bill Wendling8b6af8a2011-08-25 05:55:40 +00004369 BasicBlock *NewBB = 0;
4370 if (!Parent->isLandingPad()) {
Andrew Trickf143b792011-10-04 03:50:44 +00004371 NewBB = SplitCriticalEdge(BB, Parent, P,
4372 /*MergeIdenticalEdges=*/true,
4373 /*DontDeleteUselessPhis=*/true);
Bill Wendling8b6af8a2011-08-25 05:55:40 +00004374 } else {
4375 SmallVector<BasicBlock*, 2> NewBBs;
4376 SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs);
4377 NewBB = NewBBs[0];
4378 }
Dan Gohman3a02cbc2010-02-16 20:25:07 +00004379
Dan Gohman3ef98382011-02-08 00:55:13 +00004380 // If PN is outside of the loop and BB is in the loop, we want to
4381 // move the block to be immediately before the PHI block, not
4382 // immediately after BB.
4383 if (L->contains(BB) && !L->contains(PN))
4384 NewBB->moveBefore(PN->getParent());
Dan Gohman3a02cbc2010-02-16 20:25:07 +00004385
Dan Gohman3ef98382011-02-08 00:55:13 +00004386 // Splitting the edge can reduce the number of PHI entries we have.
4387 e = PN->getNumIncomingValues();
4388 BB = NewBB;
4389 i = PN->getBasicBlockIndex(BB);
4390 }
Dan Gohman3a02cbc2010-02-16 20:25:07 +00004391 }
4392
4393 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair =
4394 Inserted.insert(std::make_pair(BB, static_cast<Value *>(0)));
4395 if (!Pair.second)
4396 PN->setIncomingValue(i, Pair.first->second);
4397 else {
Dan Gohman454d26d2010-02-22 04:11:59 +00004398 Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts);
Dan Gohman3a02cbc2010-02-16 20:25:07 +00004399
4400 // If this is reuse-by-noop-cast, insert the noop cast.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00004401 Type *OpTy = LF.OperandValToReplace->getType();
Dan Gohman3a02cbc2010-02-16 20:25:07 +00004402 if (FullV->getType() != OpTy)
4403 FullV =
4404 CastInst::Create(CastInst::getCastOpcode(FullV, false,
4405 OpTy, false),
4406 FullV, LF.OperandValToReplace->getType(),
4407 "tmp", BB->getTerminator());
4408
4409 PN->setIncomingValue(i, FullV);
4410 Pair.first->second = FullV;
4411 }
4412 }
4413}
4414
Dan Gohman572645c2010-02-12 10:34:29 +00004415/// Rewrite - Emit instructions for the leading candidate expression for this
4416/// LSRUse (this is called "expanding"), and update the UserInst to reference
4417/// the newly expanded value.
4418void LSRInstance::Rewrite(const LSRFixup &LF,
4419 const Formula &F,
Dan Gohman572645c2010-02-12 10:34:29 +00004420 SCEVExpander &Rewriter,
4421 SmallVectorImpl<WeakVH> &DeadInsts,
Dan Gohman572645c2010-02-12 10:34:29 +00004422 Pass *P) const {
Dan Gohman572645c2010-02-12 10:34:29 +00004423 // First, find an insertion point that dominates UserInst. For PHI nodes,
4424 // find the nearest block which dominates all the relevant uses.
4425 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) {
Dan Gohman454d26d2010-02-22 04:11:59 +00004426 RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P);
Dan Gohman572645c2010-02-12 10:34:29 +00004427 } else {
Dan Gohman454d26d2010-02-22 04:11:59 +00004428 Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts);
Dan Gohman572645c2010-02-12 10:34:29 +00004429
4430 // If this is reuse-by-noop-cast, insert the noop cast.
Chris Lattnerdb125cf2011-07-18 04:54:35 +00004431 Type *OpTy = LF.OperandValToReplace->getType();
Dan Gohman572645c2010-02-12 10:34:29 +00004432 if (FullV->getType() != OpTy) {
4433 Instruction *Cast =
4434 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false),
4435 FullV, OpTy, "tmp", LF.UserInst);
4436 FullV = Cast;
4437 }
4438
4439 // Update the user. ICmpZero is handled specially here (for now) because
4440 // Expand may have updated one of the operands of the icmp already, and
4441 // its new value may happen to be equal to LF.OperandValToReplace, in
4442 // which case doing replaceUsesOfWith leads to replacing both operands
4443 // with the same value. TODO: Reorganize this.
4444 if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero)
4445 LF.UserInst->setOperand(0, FullV);
4446 else
4447 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV);
4448 }
4449
4450 DeadInsts.push_back(LF.OperandValToReplace);
4451}
4452
Dan Gohman76c315a2010-05-20 20:52:00 +00004453/// ImplementSolution - Rewrite all the fixup locations with new values,
4454/// following the chosen solution.
Dan Gohman572645c2010-02-12 10:34:29 +00004455void
4456LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
4457 Pass *P) {
4458 // Keep track of instructions we may have made dead, so that
4459 // we can remove them after we are done working.
4460 SmallVector<WeakVH, 16> DeadInsts;
4461
Andrew Trick5e7645b2011-06-28 05:07:32 +00004462 SCEVExpander Rewriter(SE, "lsr");
Andrew Trick8bf295b2012-01-09 18:58:16 +00004463#ifndef NDEBUG
4464 Rewriter.setDebugType(DEBUG_TYPE);
4465#endif
Dan Gohman572645c2010-02-12 10:34:29 +00004466 Rewriter.disableCanonicalMode();
Andrew Trickc5701912011-10-07 23:46:21 +00004467 Rewriter.enableLSRMode();
Dan Gohman572645c2010-02-12 10:34:29 +00004468 Rewriter.setIVIncInsertPos(L, IVIncInsertPos);
4469
Andrew Trick64925c52012-01-10 01:45:08 +00004470 // Mark phi nodes that terminate chains so the expander tries to reuse them.
4471 for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(),
4472 ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) {
4473 if (PHINode *PN = dyn_cast<PHINode>(ChainI->back().UserInst))
4474 Rewriter.setChainedPhi(PN);
4475 }
4476
Dan Gohman572645c2010-02-12 10:34:29 +00004477 // Expand the new value definitions and update the users.
Dan Gohman402d4352010-05-20 20:33:18 +00004478 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
4479 E = Fixups.end(); I != E; ++I) {
4480 const LSRFixup &Fixup = *I;
Dan Gohman572645c2010-02-12 10:34:29 +00004481
Dan Gohman402d4352010-05-20 20:33:18 +00004482 Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P);
Dan Gohman572645c2010-02-12 10:34:29 +00004483
4484 Changed = true;
4485 }
4486
Andrew Trick22d20c22012-01-09 21:18:52 +00004487 for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(),
4488 ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) {
4489 GenerateIVChain(*ChainI, Rewriter, DeadInsts);
4490 Changed = true;
4491 }
Dan Gohman572645c2010-02-12 10:34:29 +00004492 // Clean up after ourselves. This must be done before deleting any
4493 // instructions.
4494 Rewriter.clear();
4495
4496 Changed |= DeleteTriviallyDeadInstructions(DeadInsts);
4497}
4498
4499LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
4500 : IU(P->getAnalysis<IVUsers>()),
4501 SE(P->getAnalysis<ScalarEvolution>()),
4502 DT(P->getAnalysis<DominatorTree>()),
Dan Gohmane5f76872010-04-09 22:07:05 +00004503 LI(P->getAnalysis<LoopInfo>()),
Dan Gohman572645c2010-02-12 10:34:29 +00004504 TLI(tli), L(l), Changed(false), IVIncInsertPos(0) {
Devang Patel0f54dcb2007-03-06 21:14:09 +00004505
Dan Gohman03e896b2009-11-05 21:11:53 +00004506 // If LoopSimplify form is not available, stay out of trouble.
Andrew Trickacdb4aa2012-01-07 03:16:50 +00004507 if (!L->isLoopSimplifyForm())
4508 return;
Dan Gohman03e896b2009-11-05 21:11:53 +00004509
Andrew Trick75ae2032012-03-16 03:16:56 +00004510 // If there's no interesting work to be done, bail early.
4511 if (IU.empty()) return;
4512
4513#ifndef NDEBUG
Andrew Trick0f080912012-01-17 06:45:52 +00004514 // All dominating loops must have preheaders, or SCEVExpander may not be able
4515 // to materialize an AddRecExpr whose Start is an outer AddRecExpr.
4516 //
Andrew Trick75ae2032012-03-16 03:16:56 +00004517 // IVUsers analysis should only create users that are dominated by simple loop
4518 // headers. Since this loop should dominate all of its users, its user list
4519 // should be empty if this loop itself is not within a simple loop nest.
Andrew Trick0f080912012-01-17 06:45:52 +00004520 for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader());
4521 Rung; Rung = Rung->getIDom()) {
4522 BasicBlock *BB = Rung->getBlock();
4523 const Loop *DomLoop = LI.getLoopFor(BB);
4524 if (DomLoop && DomLoop->getHeader() == BB) {
Andrew Trick75ae2032012-03-16 03:16:56 +00004525 assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest");
Andrew Trick0f080912012-01-17 06:45:52 +00004526 }
Andrew Trickacdb4aa2012-01-07 03:16:50 +00004527 }
Andrew Trick75ae2032012-03-16 03:16:56 +00004528#endif // DEBUG
Dan Gohman80b0f8c2009-03-09 20:34:59 +00004529
Dan Gohman572645c2010-02-12 10:34:29 +00004530 DEBUG(dbgs() << "\nLSR on loop ";
4531 WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false);
4532 dbgs() << ":\n");
Dan Gohmanf7912df2009-03-09 20:46:50 +00004533
Dan Gohman402d4352010-05-20 20:33:18 +00004534 // First, perform some low-level loop optimizations.
Dan Gohman572645c2010-02-12 10:34:29 +00004535 OptimizeShadowIV();
Dan Gohmanc6519f92010-05-20 20:05:31 +00004536 OptimizeLoopTermCond();
Evan Cheng5792f512009-05-11 22:33:01 +00004537
Andrew Trick37eb38d2011-07-21 00:40:04 +00004538 // If loop preparation eliminates all interesting IV users, bail.
4539 if (IU.empty()) return;
4540
Andrew Trick5219f862011-09-29 01:53:08 +00004541 // Skip nested loops until we can model them better with formulae.
Andrew Trickbd618f12012-03-22 22:42:45 +00004542 if (!L->empty()) {
Andrew Trick0c01bc32011-09-29 01:33:38 +00004543 DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n");
Andrew Trick5219f862011-09-29 01:53:08 +00004544 return;
Andrew Trick0c01bc32011-09-29 01:33:38 +00004545 }
4546
Dan Gohman402d4352010-05-20 20:33:18 +00004547 // Start collecting data and preparing for the solver.
Andrew Trick6c7d0ae2012-01-09 19:50:34 +00004548 CollectChains();
Dan Gohman572645c2010-02-12 10:34:29 +00004549 CollectInterestingTypesAndFactors();
4550 CollectFixupsAndInitialFormulae();
4551 CollectLoopInvariantFixupsAndFormulae();
Chris Lattner010de252005-08-08 05:28:22 +00004552
Andrew Trick22d20c22012-01-09 21:18:52 +00004553 assert(!Uses.empty() && "IVUsers reported at least one use");
Dan Gohman572645c2010-02-12 10:34:29 +00004554 DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n";
4555 print_uses(dbgs()));
Misha Brukmanfd939082005-04-21 23:48:37 +00004556
Dan Gohman572645c2010-02-12 10:34:29 +00004557 // Now use the reuse data to generate a bunch of interesting ways
4558 // to formulate the values needed for the uses.
4559 GenerateAllReuseFormulae();
Evan Chengd1d6b5c2006-03-16 21:53:05 +00004560
Dan Gohman572645c2010-02-12 10:34:29 +00004561 FilterOutUndesirableDedicatedRegisters();
4562 NarrowSearchSpaceUsingHeuristics();
Dan Gohman6bec5bb2009-12-18 00:06:20 +00004563
Dan Gohman572645c2010-02-12 10:34:29 +00004564 SmallVector<const Formula *, 8> Solution;
4565 Solve(Solution);
Dan Gohman6bec5bb2009-12-18 00:06:20 +00004566
Dan Gohman572645c2010-02-12 10:34:29 +00004567 // Release memory that is no longer needed.
4568 Factors.clear();
4569 Types.clear();
4570 RegUses.clear();
4571
Andrew Trick80ef1b22011-09-27 00:44:14 +00004572 if (Solution.empty())
4573 return;
4574
Dan Gohman572645c2010-02-12 10:34:29 +00004575#ifndef NDEBUG
4576 // Formulae should be legal.
4577 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
4578 E = Uses.end(); I != E; ++I) {
4579 const LSRUse &LU = *I;
4580 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
4581 JE = LU.Formulae.end(); J != JE; ++J)
4582 assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset,
4583 LU.Kind, LU.AccessTy, TLI) &&
4584 "Illegal formula generated!");
4585 };
4586#endif
4587
4588 // Now that we've decided what we want, make it so.
4589 ImplementSolution(Solution, P);
4590}
4591
4592void LSRInstance::print_factors_and_types(raw_ostream &OS) const {
4593 if (Factors.empty() && Types.empty()) return;
4594
4595 OS << "LSR has identified the following interesting factors and types: ";
4596 bool First = true;
4597
4598 for (SmallSetVector<int64_t, 8>::const_iterator
4599 I = Factors.begin(), E = Factors.end(); I != E; ++I) {
4600 if (!First) OS << ", ";
4601 First = false;
4602 OS << '*' << *I;
Evan Cheng81ebdcf2009-11-10 21:14:05 +00004603 }
Dale Johannesenc1acc3f2009-05-11 17:15:42 +00004604
Chris Lattnerdb125cf2011-07-18 04:54:35 +00004605 for (SmallSetVector<Type *, 4>::const_iterator
Dan Gohman572645c2010-02-12 10:34:29 +00004606 I = Types.begin(), E = Types.end(); I != E; ++I) {
4607 if (!First) OS << ", ";
4608 First = false;
4609 OS << '(' << **I << ')';
4610 }
4611 OS << '\n';
4612}
4613
4614void LSRInstance::print_fixups(raw_ostream &OS) const {
4615 OS << "LSR is examining the following fixup sites:\n";
4616 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
4617 E = Fixups.end(); I != E; ++I) {
Dan Gohman572645c2010-02-12 10:34:29 +00004618 dbgs() << " ";
Dan Gohman9f383eb2010-05-20 22:25:20 +00004619 I->print(OS);
Dan Gohman572645c2010-02-12 10:34:29 +00004620 OS << '\n';
4621 }
4622}
4623
4624void LSRInstance::print_uses(raw_ostream &OS) const {
4625 OS << "LSR is examining the following uses:\n";
4626 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
4627 E = Uses.end(); I != E; ++I) {
4628 const LSRUse &LU = *I;
4629 dbgs() << " ";
4630 LU.print(OS);
4631 OS << '\n';
4632 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
4633 JE = LU.Formulae.end(); J != JE; ++J) {
4634 OS << " ";
4635 J->print(OS);
4636 OS << '\n';
4637 }
4638 }
4639}
4640
4641void LSRInstance::print(raw_ostream &OS) const {
4642 print_factors_and_types(OS);
4643 print_fixups(OS);
4644 print_uses(OS);
4645}
4646
4647void LSRInstance::dump() const {
4648 print(errs()); errs() << '\n';
4649}
4650
4651namespace {
4652
4653class LoopStrengthReduce : public LoopPass {
4654 /// TLI - Keep a pointer of a TargetLowering to consult for determining
4655 /// transformation profitability.
4656 const TargetLowering *const TLI;
4657
4658public:
4659 static char ID; // Pass ID, replacement for typeid
4660 explicit LoopStrengthReduce(const TargetLowering *tli = 0);
4661
4662private:
4663 bool runOnLoop(Loop *L, LPPassManager &LPM);
4664 void getAnalysisUsage(AnalysisUsage &AU) const;
4665};
4666
4667}
4668
4669char LoopStrengthReduce::ID = 0;
Owen Anderson2ab36d32010-10-12 19:48:12 +00004670INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
Owen Andersonce665bd2010-10-07 22:25:06 +00004671 "Loop Strength Reduction", false, false)
Owen Anderson2ab36d32010-10-12 19:48:12 +00004672INITIALIZE_PASS_DEPENDENCY(DominatorTree)
4673INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
4674INITIALIZE_PASS_DEPENDENCY(IVUsers)
Owen Anderson205942a2010-10-19 20:08:44 +00004675INITIALIZE_PASS_DEPENDENCY(LoopInfo)
4676INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
Owen Anderson2ab36d32010-10-12 19:48:12 +00004677INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce",
4678 "Loop Strength Reduction", false, false)
4679
Dan Gohman572645c2010-02-12 10:34:29 +00004680
4681Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
4682 return new LoopStrengthReduce(TLI);
4683}
4684
4685LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli)
Owen Anderson081c34b2010-10-19 17:21:58 +00004686 : LoopPass(ID), TLI(tli) {
4687 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
4688 }
Dan Gohman572645c2010-02-12 10:34:29 +00004689
4690void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
4691 // We split critical edges, so we change the CFG. However, we do update
4692 // many analyses if they are around.
Eric Christopher6793c492011-02-10 01:48:24 +00004693 AU.addPreservedID(LoopSimplifyID);
Dan Gohman572645c2010-02-12 10:34:29 +00004694
Eric Christopher6793c492011-02-10 01:48:24 +00004695 AU.addRequired<LoopInfo>();
4696 AU.addPreserved<LoopInfo>();
4697 AU.addRequiredID(LoopSimplifyID);
Dan Gohman572645c2010-02-12 10:34:29 +00004698 AU.addRequired<DominatorTree>();
4699 AU.addPreserved<DominatorTree>();
4700 AU.addRequired<ScalarEvolution>();
4701 AU.addPreserved<ScalarEvolution>();
Cameron Zwarich2c2b9332011-02-10 23:53:14 +00004702 // Requiring LoopSimplify a second time here prevents IVUsers from running
4703 // twice, since LoopSimplify was invalidated by running ScalarEvolution.
4704 AU.addRequiredID(LoopSimplifyID);
Dan Gohman572645c2010-02-12 10:34:29 +00004705 AU.addRequired<IVUsers>();
4706 AU.addPreserved<IVUsers>();
4707}
4708
4709bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
4710 bool Changed = false;
4711
4712 // Run the main LSR transformation.
4713 Changed |= LSRInstance(TLI, L, this).getChanged();
4714
Andrew Trickf231a6d2012-01-07 01:36:44 +00004715 // Remove any extra phis created by processing inner loops.
Dan Gohman9fff2182010-01-05 16:31:45 +00004716 Changed |= DeleteDeadPHIs(L->getHeader());
Andrew Trickf231a6d2012-01-07 01:36:44 +00004717 if (EnablePhiElim) {
4718 SmallVector<WeakVH, 16> DeadInsts;
4719 SCEVExpander Rewriter(getAnalysis<ScalarEvolution>(), "lsr");
4720#ifndef NDEBUG
4721 Rewriter.setDebugType(DEBUG_TYPE);
4722#endif
4723 unsigned numFolded = Rewriter.
4724 replaceCongruentIVs(L, &getAnalysis<DominatorTree>(), DeadInsts, TLI);
4725 if (numFolded) {
4726 Changed = true;
4727 DeleteTriviallyDeadInstructions(DeadInsts);
4728 DeleteDeadPHIs(L->getHeader());
4729 }
4730 }
Evan Cheng1ce75dc2008-07-07 19:51:32 +00004731 return Changed;
Nate Begemaneaa13852004-10-18 21:08:22 +00004732}