blob: d28aa58a058b447869586dbea38e8fd5149757f4 [file] [log] [blame]
Chris Lattnerd934c702004-04-02 20:23:17 +00001//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
Misha Brukman01808ca2005-04-21 21:13:18 +00002//
Chris Lattnerd934c702004-04-02 20:23:17 +00003// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Misha Brukman01808ca2005-04-21 21:13:18 +00007//
Chris Lattnerd934c702004-04-02 20:23:17 +00008//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution analysis
11// engine, which is used primarily to analyze expressions involving induction
12// variables in loops.
13//
14// There are several aspects to this library. First is the representation of
15// scalar expressions, which are represented as subclasses of the SCEV class.
16// These classes are used to represent certain types of subexpressions that we
Dan Gohmanef2ae2c2009-07-25 16:18:07 +000017// can handle. We only create one SCEV of a particular shape, so
18// pointer-comparisons for equality are legal.
Chris Lattnerd934c702004-04-02 20:23:17 +000019//
20// One important aspect of the SCEV objects is that they are never cyclic, even
21// if there is a cycle in the dataflow for an expression (ie, a PHI node). If
22// the PHI node is one of the idioms that we can represent (e.g., a polynomial
23// recurrence) then we represent it directly as a recurrence node, otherwise we
24// represent it as a SCEVUnknown node.
25//
26// In addition to being able to represent expressions of various types, we also
27// have folders that are used to build the *canonical* representation for a
28// particular expression. These folders are capable of using a variety of
29// rewrite rules to simplify the expressions.
Misha Brukman01808ca2005-04-21 21:13:18 +000030//
Chris Lattnerd934c702004-04-02 20:23:17 +000031// Once the folders are defined, we can implement the more interesting
32// higher-level code, such as the code that recognizes PHI nodes of various
33// types, computes the execution count of a loop, etc.
34//
Chris Lattnerd934c702004-04-02 20:23:17 +000035// TODO: We should use these routines and value representations to implement
36// dependence analysis!
37//
38//===----------------------------------------------------------------------===//
39//
40// There are several good references for the techniques used in this analysis.
41//
42// Chains of recurrences -- a method to expedite the evaluation
43// of closed-form functions
44// Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45//
46// On computational properties of chains of recurrences
47// Eugene V. Zima
48//
49// Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50// Robert A. van Engelen
51//
52// Efficient Symbolic Analysis for Optimizing Compilers
53// Robert A. van Engelen
54//
55// Using the chains of recurrences algebra for data dependence testing and
56// induction variable substitution
57// MS Thesis, Johnie Birch
58//
59//===----------------------------------------------------------------------===//
60
Chandler Carruthed0881b2012-12-03 16:50:05 +000061#include "llvm/Analysis/ScalarEvolution.h"
62#include "llvm/ADT/STLExtras.h"
63#include "llvm/ADT/SmallPtrSet.h"
64#include "llvm/ADT/Statistic.h"
John Criswellfe5f33b2005-10-27 15:54:34 +000065#include "llvm/Analysis/ConstantFolding.h"
Duncan Sandsd06f50e2010-11-17 04:18:45 +000066#include "llvm/Analysis/InstructionSimplify.h"
Chris Lattnerd934c702004-04-02 20:23:17 +000067#include "llvm/Analysis/LoopInfo.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000068#include "llvm/Analysis/ScalarEvolutionExpressions.h"
Dan Gohman1ee696d2009-06-16 19:52:01 +000069#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth8cd041e2014-03-04 12:24:34 +000070#include "llvm/IR/ConstantRange.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000071#include "llvm/IR/Constants.h"
72#include "llvm/IR/DataLayout.h"
73#include "llvm/IR/DerivedTypes.h"
Chandler Carruth5ad5f152014-01-13 09:26:24 +000074#include "llvm/IR/Dominators.h"
Chandler Carruth03eb0de2014-03-04 10:40:04 +000075#include "llvm/IR/GetElementPtrTypeIterator.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000076#include "llvm/IR/GlobalAlias.h"
77#include "llvm/IR/GlobalVariable.h"
Chandler Carruth83948572014-03-04 10:30:26 +000078#include "llvm/IR/InstIterator.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000079#include "llvm/IR/Instructions.h"
80#include "llvm/IR/LLVMContext.h"
81#include "llvm/IR/Operator.h"
Chris Lattner996795b2006-06-28 23:17:24 +000082#include "llvm/Support/CommandLine.h"
David Greene2330f782009-12-23 22:58:38 +000083#include "llvm/Support/Debug.h"
Torok Edwin56d06592009-07-11 20:10:48 +000084#include "llvm/Support/ErrorHandling.h"
Chris Lattner0a1e9932006-12-19 01:16:02 +000085#include "llvm/Support/MathExtras.h"
Dan Gohmane20f8242009-04-21 00:47:46 +000086#include "llvm/Support/raw_ostream.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000087#include "llvm/Target/TargetLibraryInfo.h"
Alkis Evlogimenosa5c04ee2004-09-03 18:19:51 +000088#include <algorithm>
Chris Lattnerd934c702004-04-02 20:23:17 +000089using namespace llvm;
90
Chandler Carruthf1221bd2014-04-22 02:48:03 +000091#define DEBUG_TYPE "scalar-evolution"
92
Chris Lattner57ef9422006-12-19 22:30:33 +000093STATISTIC(NumArrayLenItCounts,
94 "Number of trip counts computed with array length");
95STATISTIC(NumTripCountsComputed,
96 "Number of loops with predictable loop counts");
97STATISTIC(NumTripCountsNotComputed,
98 "Number of loops without predictable loop counts");
99STATISTIC(NumBruteForceTripCountsComputed,
100 "Number of loops with trip counts computed by force");
101
Dan Gohmand78c4002008-05-13 00:00:25 +0000102static cl::opt<unsigned>
Chris Lattner57ef9422006-12-19 22:30:33 +0000103MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
104 cl::desc("Maximum number of iterations SCEV will "
Dan Gohmance973df2009-06-24 04:48:43 +0000105 "symbolically execute a constant "
106 "derived loop"),
Chris Lattner57ef9422006-12-19 22:30:33 +0000107 cl::init(100));
108
Benjamin Kramer214935e2012-10-26 17:31:32 +0000109// FIXME: Enable this with XDEBUG when the test suite is clean.
110static cl::opt<bool>
111VerifySCEV("verify-scev",
112 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
113
Owen Anderson8ac477f2010-10-12 19:48:12 +0000114INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
115 "Scalar Evolution Analysis", false, true)
116INITIALIZE_PASS_DEPENDENCY(LoopInfo)
Chandler Carruth73523022014-01-13 13:07:17 +0000117INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Chad Rosierc24b86f2011-12-01 03:08:23 +0000118INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
Owen Anderson8ac477f2010-10-12 19:48:12 +0000119INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
Owen Andersondf7a4f22010-10-07 22:25:06 +0000120 "Scalar Evolution Analysis", false, true)
Devang Patel8c78a0b2007-05-03 01:11:54 +0000121char ScalarEvolution::ID = 0;
Chris Lattnerd934c702004-04-02 20:23:17 +0000122
123//===----------------------------------------------------------------------===//
124// SCEV class definitions
125//===----------------------------------------------------------------------===//
126
127//===----------------------------------------------------------------------===//
128// Implementation of the SCEV class.
129//
Dan Gohman3423e722009-06-30 20:13:32 +0000130
Manman Ren49d684e2012-09-12 05:06:18 +0000131#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
Chris Lattnerd934c702004-04-02 20:23:17 +0000132void SCEV::dump() const {
David Greenedf1c4972009-12-23 22:18:14 +0000133 print(dbgs());
134 dbgs() << '\n';
Dan Gohmane20f8242009-04-21 00:47:46 +0000135}
Manman Renc3366cc2012-09-06 19:55:56 +0000136#endif
Dan Gohmane20f8242009-04-21 00:47:46 +0000137
Dan Gohman534749b2010-11-17 22:27:42 +0000138void SCEV::print(raw_ostream &OS) const {
Benjamin Kramer987b8502014-02-11 19:02:55 +0000139 switch (static_cast<SCEVTypes>(getSCEVType())) {
Dan Gohman534749b2010-11-17 22:27:42 +0000140 case scConstant:
Chandler Carruthd48cdbf2014-01-09 02:29:41 +0000141 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
Dan Gohman534749b2010-11-17 22:27:42 +0000142 return;
143 case scTruncate: {
144 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
145 const SCEV *Op = Trunc->getOperand();
146 OS << "(trunc " << *Op->getType() << " " << *Op << " to "
147 << *Trunc->getType() << ")";
148 return;
149 }
150 case scZeroExtend: {
151 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
152 const SCEV *Op = ZExt->getOperand();
153 OS << "(zext " << *Op->getType() << " " << *Op << " to "
154 << *ZExt->getType() << ")";
155 return;
156 }
157 case scSignExtend: {
158 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
159 const SCEV *Op = SExt->getOperand();
160 OS << "(sext " << *Op->getType() << " " << *Op << " to "
161 << *SExt->getType() << ")";
162 return;
163 }
164 case scAddRecExpr: {
165 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
166 OS << "{" << *AR->getOperand(0);
167 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
168 OS << ",+," << *AR->getOperand(i);
169 OS << "}<";
Andrew Trick8b55b732011-03-14 16:50:06 +0000170 if (AR->getNoWrapFlags(FlagNUW))
Chris Lattnera337f5e2011-01-09 02:16:18 +0000171 OS << "nuw><";
Andrew Trick8b55b732011-03-14 16:50:06 +0000172 if (AR->getNoWrapFlags(FlagNSW))
Chris Lattnera337f5e2011-01-09 02:16:18 +0000173 OS << "nsw><";
Andrew Trick8b55b732011-03-14 16:50:06 +0000174 if (AR->getNoWrapFlags(FlagNW) &&
175 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
176 OS << "nw><";
Chandler Carruthd48cdbf2014-01-09 02:29:41 +0000177 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
Dan Gohman534749b2010-11-17 22:27:42 +0000178 OS << ">";
179 return;
180 }
181 case scAddExpr:
182 case scMulExpr:
183 case scUMaxExpr:
184 case scSMaxExpr: {
185 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
Craig Topper9f008862014-04-15 04:59:12 +0000186 const char *OpStr = nullptr;
Dan Gohman534749b2010-11-17 22:27:42 +0000187 switch (NAry->getSCEVType()) {
188 case scAddExpr: OpStr = " + "; break;
189 case scMulExpr: OpStr = " * "; break;
190 case scUMaxExpr: OpStr = " umax "; break;
191 case scSMaxExpr: OpStr = " smax "; break;
192 }
193 OS << "(";
194 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
195 I != E; ++I) {
196 OS << **I;
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000197 if (std::next(I) != E)
Dan Gohman534749b2010-11-17 22:27:42 +0000198 OS << OpStr;
199 }
200 OS << ")";
Andrew Trickd912a5b2011-11-29 02:06:35 +0000201 switch (NAry->getSCEVType()) {
202 case scAddExpr:
203 case scMulExpr:
204 if (NAry->getNoWrapFlags(FlagNUW))
205 OS << "<nuw>";
206 if (NAry->getNoWrapFlags(FlagNSW))
207 OS << "<nsw>";
208 }
Dan Gohman534749b2010-11-17 22:27:42 +0000209 return;
210 }
211 case scUDivExpr: {
212 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
213 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
214 return;
215 }
216 case scUnknown: {
217 const SCEVUnknown *U = cast<SCEVUnknown>(this);
Chris Lattner229907c2011-07-18 04:54:35 +0000218 Type *AllocTy;
Dan Gohman534749b2010-11-17 22:27:42 +0000219 if (U->isSizeOf(AllocTy)) {
220 OS << "sizeof(" << *AllocTy << ")";
221 return;
222 }
223 if (U->isAlignOf(AllocTy)) {
224 OS << "alignof(" << *AllocTy << ")";
225 return;
226 }
Andrew Trick2a3b7162011-03-09 17:23:39 +0000227
Chris Lattner229907c2011-07-18 04:54:35 +0000228 Type *CTy;
Dan Gohman534749b2010-11-17 22:27:42 +0000229 Constant *FieldNo;
230 if (U->isOffsetOf(CTy, FieldNo)) {
231 OS << "offsetof(" << *CTy << ", ";
Chandler Carruthd48cdbf2014-01-09 02:29:41 +0000232 FieldNo->printAsOperand(OS, false);
Dan Gohman534749b2010-11-17 22:27:42 +0000233 OS << ")";
234 return;
235 }
Andrew Trick2a3b7162011-03-09 17:23:39 +0000236
Dan Gohman534749b2010-11-17 22:27:42 +0000237 // Otherwise just print it normally.
Chandler Carruthd48cdbf2014-01-09 02:29:41 +0000238 U->getValue()->printAsOperand(OS, false);
Dan Gohman534749b2010-11-17 22:27:42 +0000239 return;
240 }
241 case scCouldNotCompute:
242 OS << "***COULDNOTCOMPUTE***";
243 return;
Dan Gohman534749b2010-11-17 22:27:42 +0000244 }
245 llvm_unreachable("Unknown SCEV kind!");
246}
247
Chris Lattner229907c2011-07-18 04:54:35 +0000248Type *SCEV::getType() const {
Benjamin Kramer987b8502014-02-11 19:02:55 +0000249 switch (static_cast<SCEVTypes>(getSCEVType())) {
Dan Gohman534749b2010-11-17 22:27:42 +0000250 case scConstant:
251 return cast<SCEVConstant>(this)->getType();
252 case scTruncate:
253 case scZeroExtend:
254 case scSignExtend:
255 return cast<SCEVCastExpr>(this)->getType();
256 case scAddRecExpr:
257 case scMulExpr:
258 case scUMaxExpr:
259 case scSMaxExpr:
260 return cast<SCEVNAryExpr>(this)->getType();
261 case scAddExpr:
262 return cast<SCEVAddExpr>(this)->getType();
263 case scUDivExpr:
264 return cast<SCEVUDivExpr>(this)->getType();
265 case scUnknown:
266 return cast<SCEVUnknown>(this)->getType();
267 case scCouldNotCompute:
268 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
Dan Gohman534749b2010-11-17 22:27:42 +0000269 }
Benjamin Kramer987b8502014-02-11 19:02:55 +0000270 llvm_unreachable("Unknown SCEV kind!");
Dan Gohman534749b2010-11-17 22:27:42 +0000271}
272
Dan Gohmanbe928e32008-06-18 16:23:07 +0000273bool SCEV::isZero() const {
274 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
275 return SC->getValue()->isZero();
276 return false;
277}
278
Dan Gohmanba7f6d82009-05-18 15:22:39 +0000279bool SCEV::isOne() const {
280 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
281 return SC->getValue()->isOne();
282 return false;
283}
Chris Lattnerd934c702004-04-02 20:23:17 +0000284
Dan Gohman18a96bb2009-06-24 00:30:26 +0000285bool SCEV::isAllOnesValue() const {
286 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
287 return SC->getValue()->isAllOnesValue();
288 return false;
289}
290
Andrew Trick881a7762012-01-07 00:27:31 +0000291/// isNonConstantNegative - Return true if the specified scev is negated, but
292/// not a constant.
293bool SCEV::isNonConstantNegative() const {
294 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
295 if (!Mul) return false;
296
297 // If there is a constant factor, it will be first.
298 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
299 if (!SC) return false;
300
301 // Return true if the value is negative, this matches things like (-42 * V).
302 return SC->getValue()->getValue().isNegative();
303}
304
Owen Anderson04052ec2009-06-22 21:57:23 +0000305SCEVCouldNotCompute::SCEVCouldNotCompute() :
Dan Gohman24ceda82010-06-18 19:54:20 +0000306 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
Dan Gohmanc5c85c02009-06-27 21:21:31 +0000307
Chris Lattnerd934c702004-04-02 20:23:17 +0000308bool SCEVCouldNotCompute::classof(const SCEV *S) {
309 return S->getSCEVType() == scCouldNotCompute;
310}
311
Dan Gohmanaf752342009-07-07 17:06:11 +0000312const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
Dan Gohmanc5c85c02009-06-27 21:21:31 +0000313 FoldingSetNodeID ID;
314 ID.AddInteger(scConstant);
315 ID.AddPointer(V);
Craig Topper9f008862014-04-15 04:59:12 +0000316 void *IP = nullptr;
Dan Gohmanc5c85c02009-06-27 21:21:31 +0000317 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
Dan Gohman24ceda82010-06-18 19:54:20 +0000318 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
Dan Gohmanc5c85c02009-06-27 21:21:31 +0000319 UniqueSCEVs.InsertNode(S, IP);
320 return S;
Chris Lattnerb4f681b2004-04-15 15:07:24 +0000321}
Chris Lattnerd934c702004-04-02 20:23:17 +0000322
Nick Lewycky31eaca52014-01-27 10:04:03 +0000323const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
Owen Andersonedb4a702009-07-24 23:12:02 +0000324 return getConstant(ConstantInt::get(getContext(), Val));
Dan Gohman0a76e7f2007-07-09 15:25:17 +0000325}
326
Dan Gohmanaf752342009-07-07 17:06:11 +0000327const SCEV *
Chris Lattner229907c2011-07-18 04:54:35 +0000328ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
329 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
Dan Gohmana029cbe2010-04-21 16:04:04 +0000330 return getConstant(ConstantInt::get(ITy, V, isSigned));
Dan Gohman7ccc52f2009-06-15 22:12:54 +0000331}
332
Dan Gohman24ceda82010-06-18 19:54:20 +0000333SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
Chris Lattner229907c2011-07-18 04:54:35 +0000334 unsigned SCEVTy, const SCEV *op, Type *ty)
Dan Gohman24ceda82010-06-18 19:54:20 +0000335 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
Dan Gohmanc5c85c02009-06-27 21:21:31 +0000336
Dan Gohman24ceda82010-06-18 19:54:20 +0000337SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
Chris Lattner229907c2011-07-18 04:54:35 +0000338 const SCEV *op, Type *ty)
Dan Gohman24ceda82010-06-18 19:54:20 +0000339 : SCEVCastExpr(ID, scTruncate, op, ty) {
Duncan Sands19d0b472010-02-16 11:11:14 +0000340 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
341 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
Chris Lattnerb4f681b2004-04-15 15:07:24 +0000342 "Cannot truncate non-integer value!");
Chris Lattnerb4f681b2004-04-15 15:07:24 +0000343}
Chris Lattnerd934c702004-04-02 20:23:17 +0000344
Dan Gohman24ceda82010-06-18 19:54:20 +0000345SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
Chris Lattner229907c2011-07-18 04:54:35 +0000346 const SCEV *op, Type *ty)
Dan Gohman24ceda82010-06-18 19:54:20 +0000347 : SCEVCastExpr(ID, scZeroExtend, op, ty) {
Duncan Sands19d0b472010-02-16 11:11:14 +0000348 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
349 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
Chris Lattnerb4f681b2004-04-15 15:07:24 +0000350 "Cannot zero extend non-integer value!");
Chris Lattnerb4f681b2004-04-15 15:07:24 +0000351}
352
Dan Gohman24ceda82010-06-18 19:54:20 +0000353SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
Chris Lattner229907c2011-07-18 04:54:35 +0000354 const SCEV *op, Type *ty)
Dan Gohman24ceda82010-06-18 19:54:20 +0000355 : SCEVCastExpr(ID, scSignExtend, op, ty) {
Duncan Sands19d0b472010-02-16 11:11:14 +0000356 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
357 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
Dan Gohmancb9e09a2007-06-15 14:38:12 +0000358 "Cannot sign extend non-integer value!");
Dan Gohmancb9e09a2007-06-15 14:38:12 +0000359}
360
Dan Gohman7cac9572010-08-02 23:49:30 +0000361void SCEVUnknown::deleted() {
Dan Gohman761065e2010-11-17 02:44:44 +0000362 // Clear this SCEVUnknown from various maps.
Dan Gohman7e6b3932010-11-17 23:28:48 +0000363 SE->forgetMemoizedResults(this);
Dan Gohman7cac9572010-08-02 23:49:30 +0000364
365 // Remove this SCEVUnknown from the uniquing map.
366 SE->UniqueSCEVs.RemoveNode(this);
367
368 // Release the value.
Craig Topper9f008862014-04-15 04:59:12 +0000369 setValPtr(nullptr);
Dan Gohman7cac9572010-08-02 23:49:30 +0000370}
371
372void SCEVUnknown::allUsesReplacedWith(Value *New) {
Dan Gohman761065e2010-11-17 02:44:44 +0000373 // Clear this SCEVUnknown from various maps.
Dan Gohman7e6b3932010-11-17 23:28:48 +0000374 SE->forgetMemoizedResults(this);
Dan Gohman7cac9572010-08-02 23:49:30 +0000375
376 // Remove this SCEVUnknown from the uniquing map.
377 SE->UniqueSCEVs.RemoveNode(this);
378
379 // Update this SCEVUnknown to point to the new value. This is needed
380 // because there may still be outstanding SCEVs which still point to
381 // this SCEVUnknown.
382 setValPtr(New);
383}
384
Chris Lattner229907c2011-07-18 04:54:35 +0000385bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
Dan Gohman7cac9572010-08-02 23:49:30 +0000386 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
Dan Gohmancf913832010-01-28 02:15:55 +0000387 if (VCE->getOpcode() == Instruction::PtrToInt)
388 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
Dan Gohman7e5f1b22010-02-02 01:38:49 +0000389 if (CE->getOpcode() == Instruction::GetElementPtr &&
390 CE->getOperand(0)->isNullValue() &&
391 CE->getNumOperands() == 2)
392 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
393 if (CI->isOne()) {
394 AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
395 ->getElementType();
396 return true;
397 }
Dan Gohmancf913832010-01-28 02:15:55 +0000398
399 return false;
400}
401
Chris Lattner229907c2011-07-18 04:54:35 +0000402bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
Dan Gohman7cac9572010-08-02 23:49:30 +0000403 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
Dan Gohmancf913832010-01-28 02:15:55 +0000404 if (VCE->getOpcode() == Instruction::PtrToInt)
405 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
Dan Gohman7e5f1b22010-02-02 01:38:49 +0000406 if (CE->getOpcode() == Instruction::GetElementPtr &&
407 CE->getOperand(0)->isNullValue()) {
Chris Lattner229907c2011-07-18 04:54:35 +0000408 Type *Ty =
Dan Gohman7e5f1b22010-02-02 01:38:49 +0000409 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
Chris Lattner229907c2011-07-18 04:54:35 +0000410 if (StructType *STy = dyn_cast<StructType>(Ty))
Dan Gohman7e5f1b22010-02-02 01:38:49 +0000411 if (!STy->isPacked() &&
412 CE->getNumOperands() == 3 &&
413 CE->getOperand(1)->isNullValue()) {
414 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
415 if (CI->isOne() &&
416 STy->getNumElements() == 2 &&
Duncan Sands9dff9be2010-02-15 16:12:20 +0000417 STy->getElementType(0)->isIntegerTy(1)) {
Dan Gohman7e5f1b22010-02-02 01:38:49 +0000418 AllocTy = STy->getElementType(1);
419 return true;
420 }
421 }
422 }
Dan Gohmancf913832010-01-28 02:15:55 +0000423
424 return false;
425}
426
Chris Lattner229907c2011-07-18 04:54:35 +0000427bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
Dan Gohman7cac9572010-08-02 23:49:30 +0000428 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
Dan Gohmane5e1b7b2010-02-01 18:27:38 +0000429 if (VCE->getOpcode() == Instruction::PtrToInt)
430 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
431 if (CE->getOpcode() == Instruction::GetElementPtr &&
432 CE->getNumOperands() == 3 &&
433 CE->getOperand(0)->isNullValue() &&
434 CE->getOperand(1)->isNullValue()) {
Chris Lattner229907c2011-07-18 04:54:35 +0000435 Type *Ty =
Dan Gohmane5e1b7b2010-02-01 18:27:38 +0000436 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
437 // Ignore vector types here so that ScalarEvolutionExpander doesn't
438 // emit getelementptrs that index into vectors.
Duncan Sands19d0b472010-02-16 11:11:14 +0000439 if (Ty->isStructTy() || Ty->isArrayTy()) {
Dan Gohmane5e1b7b2010-02-01 18:27:38 +0000440 CTy = Ty;
441 FieldNo = CE->getOperand(2);
442 return true;
443 }
444 }
445
446 return false;
447}
448
Chris Lattnereb3e8402004-06-20 06:23:15 +0000449//===----------------------------------------------------------------------===//
450// SCEV Utilities
451//===----------------------------------------------------------------------===//
452
453namespace {
454 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
455 /// than the complexity of the RHS. This comparator is used to canonicalize
456 /// expressions.
Nick Lewycky02d5f772009-10-25 06:33:48 +0000457 class SCEVComplexityCompare {
Dan Gohman3324b9e2010-08-13 20:17:27 +0000458 const LoopInfo *const LI;
Dan Gohman9ba542c2009-05-07 14:39:04 +0000459 public:
Dan Gohman992db002010-07-23 21:18:55 +0000460 explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
Dan Gohman9ba542c2009-05-07 14:39:04 +0000461
Dan Gohman27065672010-08-27 15:26:01 +0000462 // Return true or false if LHS is less than, or at least RHS, respectively.
Dan Gohman5e6ce7b2008-04-14 18:23:56 +0000463 bool operator()(const SCEV *LHS, const SCEV *RHS) const {
Dan Gohman27065672010-08-27 15:26:01 +0000464 return compare(LHS, RHS) < 0;
465 }
466
467 // Return negative, zero, or positive, if LHS is less than, equal to, or
468 // greater than RHS, respectively. A three-way result allows recursive
469 // comparisons to be more efficient.
470 int compare(const SCEV *LHS, const SCEV *RHS) const {
Dan Gohmancc2f1eb2009-08-31 21:15:23 +0000471 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
472 if (LHS == RHS)
Dan Gohman27065672010-08-27 15:26:01 +0000473 return 0;
Dan Gohmancc2f1eb2009-08-31 21:15:23 +0000474
Dan Gohman9ba542c2009-05-07 14:39:04 +0000475 // Primarily, sort the SCEVs by their getSCEVType().
Dan Gohman5ae31022010-07-23 21:20:52 +0000476 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
477 if (LType != RType)
Dan Gohman27065672010-08-27 15:26:01 +0000478 return (int)LType - (int)RType;
Dan Gohman9ba542c2009-05-07 14:39:04 +0000479
Dan Gohman24ceda82010-06-18 19:54:20 +0000480 // Aside from the getSCEVType() ordering, the particular ordering
481 // isn't very important except that it's beneficial to be consistent,
482 // so that (a + b) and (b + a) don't end up as different expressions.
Benjamin Kramer987b8502014-02-11 19:02:55 +0000483 switch (static_cast<SCEVTypes>(LType)) {
Dan Gohman27065672010-08-27 15:26:01 +0000484 case scUnknown: {
485 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
Dan Gohman24ceda82010-06-18 19:54:20 +0000486 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
Dan Gohman27065672010-08-27 15:26:01 +0000487
488 // Sort SCEVUnknown values with some loose heuristics. TODO: This is
489 // not as complete as it could be.
Dan Gohman0c436ab2010-08-13 21:24:58 +0000490 const Value *LV = LU->getValue(), *RV = RU->getValue();
Dan Gohman24ceda82010-06-18 19:54:20 +0000491
492 // Order pointer values after integer values. This helps SCEVExpander
493 // form GEPs.
Dan Gohman0c436ab2010-08-13 21:24:58 +0000494 bool LIsPointer = LV->getType()->isPointerTy(),
495 RIsPointer = RV->getType()->isPointerTy();
Dan Gohman5ae31022010-07-23 21:20:52 +0000496 if (LIsPointer != RIsPointer)
Dan Gohman27065672010-08-27 15:26:01 +0000497 return (int)LIsPointer - (int)RIsPointer;
Dan Gohman24ceda82010-06-18 19:54:20 +0000498
499 // Compare getValueID values.
Dan Gohman0c436ab2010-08-13 21:24:58 +0000500 unsigned LID = LV->getValueID(),
501 RID = RV->getValueID();
Dan Gohman5ae31022010-07-23 21:20:52 +0000502 if (LID != RID)
Dan Gohman27065672010-08-27 15:26:01 +0000503 return (int)LID - (int)RID;
Dan Gohman24ceda82010-06-18 19:54:20 +0000504
505 // Sort arguments by their position.
Dan Gohman0c436ab2010-08-13 21:24:58 +0000506 if (const Argument *LA = dyn_cast<Argument>(LV)) {
507 const Argument *RA = cast<Argument>(RV);
Dan Gohman27065672010-08-27 15:26:01 +0000508 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
509 return (int)LArgNo - (int)RArgNo;
Dan Gohman24ceda82010-06-18 19:54:20 +0000510 }
511
Dan Gohman27065672010-08-27 15:26:01 +0000512 // For instructions, compare their loop depth, and their operand
513 // count. This is pretty loose.
Dan Gohman0c436ab2010-08-13 21:24:58 +0000514 if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
515 const Instruction *RInst = cast<Instruction>(RV);
Dan Gohman24ceda82010-06-18 19:54:20 +0000516
517 // Compare loop depths.
Dan Gohman0c436ab2010-08-13 21:24:58 +0000518 const BasicBlock *LParent = LInst->getParent(),
519 *RParent = RInst->getParent();
520 if (LParent != RParent) {
521 unsigned LDepth = LI->getLoopDepth(LParent),
522 RDepth = LI->getLoopDepth(RParent);
523 if (LDepth != RDepth)
Dan Gohman27065672010-08-27 15:26:01 +0000524 return (int)LDepth - (int)RDepth;
Dan Gohman0c436ab2010-08-13 21:24:58 +0000525 }
Dan Gohman24ceda82010-06-18 19:54:20 +0000526
527 // Compare the number of operands.
Dan Gohman0c436ab2010-08-13 21:24:58 +0000528 unsigned LNumOps = LInst->getNumOperands(),
529 RNumOps = RInst->getNumOperands();
Dan Gohman27065672010-08-27 15:26:01 +0000530 return (int)LNumOps - (int)RNumOps;
Dan Gohman24ceda82010-06-18 19:54:20 +0000531 }
532
Dan Gohman27065672010-08-27 15:26:01 +0000533 return 0;
Dan Gohman24ceda82010-06-18 19:54:20 +0000534 }
535
Dan Gohman27065672010-08-27 15:26:01 +0000536 case scConstant: {
537 const SCEVConstant *LC = cast<SCEVConstant>(LHS);
Dan Gohman24ceda82010-06-18 19:54:20 +0000538 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
Dan Gohman27065672010-08-27 15:26:01 +0000539
540 // Compare constant values.
Dan Gohmanf2961822010-08-16 16:25:35 +0000541 const APInt &LA = LC->getValue()->getValue();
542 const APInt &RA = RC->getValue()->getValue();
543 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
Dan Gohman5ae31022010-07-23 21:20:52 +0000544 if (LBitWidth != RBitWidth)
Dan Gohman27065672010-08-27 15:26:01 +0000545 return (int)LBitWidth - (int)RBitWidth;
546 return LA.ult(RA) ? -1 : 1;
Dan Gohman24ceda82010-06-18 19:54:20 +0000547 }
548
Dan Gohman27065672010-08-27 15:26:01 +0000549 case scAddRecExpr: {
550 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
Dan Gohman24ceda82010-06-18 19:54:20 +0000551 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
Dan Gohman27065672010-08-27 15:26:01 +0000552
553 // Compare addrec loop depths.
Dan Gohman0c436ab2010-08-13 21:24:58 +0000554 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
555 if (LLoop != RLoop) {
556 unsigned LDepth = LLoop->getLoopDepth(),
557 RDepth = RLoop->getLoopDepth();
558 if (LDepth != RDepth)
Dan Gohman27065672010-08-27 15:26:01 +0000559 return (int)LDepth - (int)RDepth;
Dan Gohman0c436ab2010-08-13 21:24:58 +0000560 }
Dan Gohman27065672010-08-27 15:26:01 +0000561
562 // Addrec complexity grows with operand count.
563 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
564 if (LNumOps != RNumOps)
565 return (int)LNumOps - (int)RNumOps;
566
567 // Lexicographically compare.
568 for (unsigned i = 0; i != LNumOps; ++i) {
569 long X = compare(LA->getOperand(i), RA->getOperand(i));
570 if (X != 0)
571 return X;
572 }
573
574 return 0;
Dan Gohman24ceda82010-06-18 19:54:20 +0000575 }
576
Dan Gohman27065672010-08-27 15:26:01 +0000577 case scAddExpr:
578 case scMulExpr:
579 case scSMaxExpr:
580 case scUMaxExpr: {
581 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
Dan Gohman24ceda82010-06-18 19:54:20 +0000582 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
Dan Gohman27065672010-08-27 15:26:01 +0000583
584 // Lexicographically compare n-ary expressions.
Dan Gohman5ae31022010-07-23 21:20:52 +0000585 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
Andrew Trickc3bc8b82013-07-31 02:43:40 +0000586 if (LNumOps != RNumOps)
587 return (int)LNumOps - (int)RNumOps;
588
Dan Gohman5ae31022010-07-23 21:20:52 +0000589 for (unsigned i = 0; i != LNumOps; ++i) {
590 if (i >= RNumOps)
Dan Gohman27065672010-08-27 15:26:01 +0000591 return 1;
592 long X = compare(LC->getOperand(i), RC->getOperand(i));
593 if (X != 0)
594 return X;
Dan Gohman24ceda82010-06-18 19:54:20 +0000595 }
Dan Gohman27065672010-08-27 15:26:01 +0000596 return (int)LNumOps - (int)RNumOps;
Dan Gohman24ceda82010-06-18 19:54:20 +0000597 }
598
Dan Gohman27065672010-08-27 15:26:01 +0000599 case scUDivExpr: {
600 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
Dan Gohman24ceda82010-06-18 19:54:20 +0000601 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
Dan Gohman27065672010-08-27 15:26:01 +0000602
603 // Lexicographically compare udiv expressions.
604 long X = compare(LC->getLHS(), RC->getLHS());
605 if (X != 0)
606 return X;
607 return compare(LC->getRHS(), RC->getRHS());
Dan Gohman24ceda82010-06-18 19:54:20 +0000608 }
609
Dan Gohman27065672010-08-27 15:26:01 +0000610 case scTruncate:
611 case scZeroExtend:
612 case scSignExtend: {
613 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
Dan Gohman24ceda82010-06-18 19:54:20 +0000614 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
Dan Gohman27065672010-08-27 15:26:01 +0000615
616 // Compare cast expressions by operand.
617 return compare(LC->getOperand(), RC->getOperand());
618 }
619
Benjamin Kramer987b8502014-02-11 19:02:55 +0000620 case scCouldNotCompute:
621 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
Dan Gohman24ceda82010-06-18 19:54:20 +0000622 }
Benjamin Kramer987b8502014-02-11 19:02:55 +0000623 llvm_unreachable("Unknown SCEV kind!");
Chris Lattnereb3e8402004-06-20 06:23:15 +0000624 }
625 };
626}
627
628/// GroupByComplexity - Given a list of SCEV objects, order them by their
629/// complexity, and group objects of the same complexity together by value.
630/// When this routine is finished, we know that any duplicates in the vector are
631/// consecutive and that complexity is monotonically increasing.
632///
Dan Gohman8b0a4192010-03-01 17:49:51 +0000633/// Note that we go take special precautions to ensure that we get deterministic
Chris Lattnereb3e8402004-06-20 06:23:15 +0000634/// results from this routine. In other words, we don't want the results of
635/// this to depend on where the addresses of various SCEV objects happened to
636/// land in memory.
637///
Dan Gohmanaf752342009-07-07 17:06:11 +0000638static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
Dan Gohman9ba542c2009-05-07 14:39:04 +0000639 LoopInfo *LI) {
Chris Lattnereb3e8402004-06-20 06:23:15 +0000640 if (Ops.size() < 2) return; // Noop
641 if (Ops.size() == 2) {
642 // This is the common case, which also happens to be trivially simple.
643 // Special case it.
Dan Gohman7712d292010-08-29 15:07:13 +0000644 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
645 if (SCEVComplexityCompare(LI)(RHS, LHS))
646 std::swap(LHS, RHS);
Chris Lattnereb3e8402004-06-20 06:23:15 +0000647 return;
648 }
649
Dan Gohman24ceda82010-06-18 19:54:20 +0000650 // Do the rough sort by complexity.
651 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
652
653 // Now that we are sorted by complexity, group elements of the same
654 // complexity. Note that this is, at worst, N^2, but the vector is likely to
655 // be extremely short in practice. Note that we take this approach because we
656 // do not want to depend on the addresses of the objects we are grouping.
657 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
658 const SCEV *S = Ops[i];
659 unsigned Complexity = S->getSCEVType();
660
661 // If there are any objects of the same complexity and same value as this
662 // one, group them.
663 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
664 if (Ops[j] == S) { // Found a duplicate.
665 // Move it to immediately after i'th element.
666 std::swap(Ops[i+1], Ops[j]);
667 ++i; // no need to rescan it.
668 if (i == e-2) return; // Done!
669 }
670 }
671 }
Chris Lattnereb3e8402004-06-20 06:23:15 +0000672}
673
Chris Lattnerd934c702004-04-02 20:23:17 +0000674
Chris Lattnerd934c702004-04-02 20:23:17 +0000675
676//===----------------------------------------------------------------------===//
677// Simple SCEV method implementations
678//===----------------------------------------------------------------------===//
679
Eli Friedman61f67622008-08-04 23:49:06 +0000680/// BinomialCoefficient - Compute BC(It, K). The result has width W.
Dan Gohman4d5435d2009-05-24 23:45:28 +0000681/// Assume, K > 0.
Dan Gohmanaf752342009-07-07 17:06:11 +0000682static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
Dan Gohman32291b12009-07-21 00:38:55 +0000683 ScalarEvolution &SE,
Nick Lewycky702cf1e2011-09-06 06:39:54 +0000684 Type *ResultTy) {
Eli Friedman61f67622008-08-04 23:49:06 +0000685 // Handle the simplest case efficiently.
686 if (K == 1)
687 return SE.getTruncateOrZeroExtend(It, ResultTy);
688
Wojciech Matyjewiczd2d97642008-02-11 11:03:14 +0000689 // We are using the following formula for BC(It, K):
690 //
691 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
692 //
Eli Friedman61f67622008-08-04 23:49:06 +0000693 // Suppose, W is the bitwidth of the return value. We must be prepared for
694 // overflow. Hence, we must assure that the result of our computation is
695 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
696 // safe in modular arithmetic.
Wojciech Matyjewiczd2d97642008-02-11 11:03:14 +0000697 //
Eli Friedman61f67622008-08-04 23:49:06 +0000698 // However, this code doesn't use exactly that formula; the formula it uses
Dan Gohmance973df2009-06-24 04:48:43 +0000699 // is something like the following, where T is the number of factors of 2 in
Eli Friedman61f67622008-08-04 23:49:06 +0000700 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
701 // exponentiation:
Wojciech Matyjewiczd2d97642008-02-11 11:03:14 +0000702 //
Eli Friedman61f67622008-08-04 23:49:06 +0000703 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
Wojciech Matyjewiczd2d97642008-02-11 11:03:14 +0000704 //
Eli Friedman61f67622008-08-04 23:49:06 +0000705 // This formula is trivially equivalent to the previous formula. However,
706 // this formula can be implemented much more efficiently. The trick is that
707 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
708 // arithmetic. To do exact division in modular arithmetic, all we have
709 // to do is multiply by the inverse. Therefore, this step can be done at
710 // width W.
Dan Gohmance973df2009-06-24 04:48:43 +0000711 //
Eli Friedman61f67622008-08-04 23:49:06 +0000712 // The next issue is how to safely do the division by 2^T. The way this
713 // is done is by doing the multiplication step at a width of at least W + T
714 // bits. This way, the bottom W+T bits of the product are accurate. Then,
715 // when we perform the division by 2^T (which is equivalent to a right shift
716 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
717 // truncated out after the division by 2^T.
718 //
719 // In comparison to just directly using the first formula, this technique
720 // is much more efficient; using the first formula requires W * K bits,
721 // but this formula less than W + K bits. Also, the first formula requires
722 // a division step, whereas this formula only requires multiplies and shifts.
723 //
724 // It doesn't matter whether the subtraction step is done in the calculation
725 // width or the input iteration count's width; if the subtraction overflows,
726 // the result must be zero anyway. We prefer here to do it in the width of
727 // the induction variable because it helps a lot for certain cases; CodeGen
728 // isn't smart enough to ignore the overflow, which leads to much less
729 // efficient code if the width of the subtraction is wider than the native
730 // register width.
731 //
732 // (It's possible to not widen at all by pulling out factors of 2 before
733 // the multiplication; for example, K=2 can be calculated as
734 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
735 // extra arithmetic, so it's not an obvious win, and it gets
736 // much more complicated for K > 3.)
Wojciech Matyjewiczd2d97642008-02-11 11:03:14 +0000737
Eli Friedman61f67622008-08-04 23:49:06 +0000738 // Protection from insane SCEVs; this bound is conservative,
739 // but it probably doesn't matter.
740 if (K > 1000)
Dan Gohman31efa302009-04-18 17:58:19 +0000741 return SE.getCouldNotCompute();
Wojciech Matyjewiczd2d97642008-02-11 11:03:14 +0000742
Dan Gohmanb397e1a2009-04-21 01:07:12 +0000743 unsigned W = SE.getTypeSizeInBits(ResultTy);
Wojciech Matyjewiczd2d97642008-02-11 11:03:14 +0000744
Eli Friedman61f67622008-08-04 23:49:06 +0000745 // Calculate K! / 2^T and T; we divide out the factors of two before
746 // multiplying for calculating K! / 2^T to avoid overflow.
747 // Other overflow doesn't matter because we only care about the bottom
748 // W bits of the result.
749 APInt OddFactorial(W, 1);
750 unsigned T = 1;
751 for (unsigned i = 3; i <= K; ++i) {
752 APInt Mult(W, i);
753 unsigned TwoFactors = Mult.countTrailingZeros();
754 T += TwoFactors;
755 Mult = Mult.lshr(TwoFactors);
756 OddFactorial *= Mult;
Chris Lattnerd934c702004-04-02 20:23:17 +0000757 }
Nick Lewyckyed169d52008-06-13 04:38:55 +0000758
Eli Friedman61f67622008-08-04 23:49:06 +0000759 // We need at least W + T bits for the multiplication step
Nick Lewycky21add8f2009-01-25 08:16:27 +0000760 unsigned CalculationBits = W + T;
Eli Friedman61f67622008-08-04 23:49:06 +0000761
Dan Gohman8b0a4192010-03-01 17:49:51 +0000762 // Calculate 2^T, at width T+W.
Benjamin Kramerfc3ea6f2013-07-11 16:05:50 +0000763 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
Eli Friedman61f67622008-08-04 23:49:06 +0000764
765 // Calculate the multiplicative inverse of K! / 2^T;
766 // this multiplication factor will perform the exact division by
767 // K! / 2^T.
768 APInt Mod = APInt::getSignedMinValue(W+1);
769 APInt MultiplyFactor = OddFactorial.zext(W+1);
770 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
771 MultiplyFactor = MultiplyFactor.trunc(W);
772
773 // Calculate the product, at width T+W
Chris Lattner229907c2011-07-18 04:54:35 +0000774 IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
Owen Anderson55f1c092009-08-13 21:58:54 +0000775 CalculationBits);
Dan Gohmanaf752342009-07-07 17:06:11 +0000776 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
Eli Friedman61f67622008-08-04 23:49:06 +0000777 for (unsigned i = 1; i != K; ++i) {
Dan Gohman1d2ded72010-05-03 22:09:21 +0000778 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
Eli Friedman61f67622008-08-04 23:49:06 +0000779 Dividend = SE.getMulExpr(Dividend,
780 SE.getTruncateOrZeroExtend(S, CalculationTy));
781 }
782
783 // Divide by 2^T
Dan Gohmanaf752342009-07-07 17:06:11 +0000784 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
Eli Friedman61f67622008-08-04 23:49:06 +0000785
786 // Truncate the result, and divide by K! / 2^T.
787
788 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
789 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
Chris Lattnerd934c702004-04-02 20:23:17 +0000790}
791
Chris Lattnerd934c702004-04-02 20:23:17 +0000792/// evaluateAtIteration - Return the value of this chain of recurrences at
793/// the specified iteration number. We can evaluate this recurrence by
794/// multiplying each element in the chain by the binomial coefficient
795/// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
796///
Wojciech Matyjewiczd2d97642008-02-11 11:03:14 +0000797/// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
Chris Lattnerd934c702004-04-02 20:23:17 +0000798///
Wojciech Matyjewiczd2d97642008-02-11 11:03:14 +0000799/// where BC(It, k) stands for binomial coefficient.
Chris Lattnerd934c702004-04-02 20:23:17 +0000800///
Dan Gohmanaf752342009-07-07 17:06:11 +0000801const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
Dan Gohman32291b12009-07-21 00:38:55 +0000802 ScalarEvolution &SE) const {
Dan Gohmanaf752342009-07-07 17:06:11 +0000803 const SCEV *Result = getStart();
Chris Lattnerd934c702004-04-02 20:23:17 +0000804 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
Wojciech Matyjewiczd2d97642008-02-11 11:03:14 +0000805 // The computation is correct in the face of overflow provided that the
806 // multiplication is performed _after_ the evaluation of the binomial
807 // coefficient.
Dan Gohmanaf752342009-07-07 17:06:11 +0000808 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
Nick Lewycky707663e2008-10-13 03:58:02 +0000809 if (isa<SCEVCouldNotCompute>(Coeff))
810 return Coeff;
811
812 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
Chris Lattnerd934c702004-04-02 20:23:17 +0000813 }
814 return Result;
815}
816
Chris Lattnerd934c702004-04-02 20:23:17 +0000817//===----------------------------------------------------------------------===//
818// SCEV Expression folder implementations
819//===----------------------------------------------------------------------===//
820
Dan Gohmanaf752342009-07-07 17:06:11 +0000821const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
Chris Lattner229907c2011-07-18 04:54:35 +0000822 Type *Ty) {
Dan Gohmanb397e1a2009-04-21 01:07:12 +0000823 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
Dan Gohman413e91f2009-04-21 00:55:22 +0000824 "This is not a truncating conversion!");
Dan Gohman194e42c2009-05-01 16:44:18 +0000825 assert(isSCEVable(Ty) &&
826 "This is not a conversion to a SCEVable type!");
827 Ty = getEffectiveSCEVType(Ty);
Dan Gohman413e91f2009-04-21 00:55:22 +0000828
Dan Gohman3a302cb2009-07-13 20:50:19 +0000829 FoldingSetNodeID ID;
830 ID.AddInteger(scTruncate);
831 ID.AddPointer(Op);
832 ID.AddPointer(Ty);
Craig Topper9f008862014-04-15 04:59:12 +0000833 void *IP = nullptr;
Dan Gohman3a302cb2009-07-13 20:50:19 +0000834 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
835
Dan Gohman3423e722009-06-30 20:13:32 +0000836 // Fold if the operand is constant.
Dan Gohmana30370b2009-05-04 22:02:23 +0000837 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
Dan Gohman8d7576e2009-06-24 00:38:39 +0000838 return getConstant(
Nuno Lopesab5c9242012-05-15 15:44:38 +0000839 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
Chris Lattnerd934c702004-04-02 20:23:17 +0000840
Dan Gohman79af8542009-04-22 16:20:48 +0000841 // trunc(trunc(x)) --> trunc(x)
Dan Gohmana30370b2009-05-04 22:02:23 +0000842 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
Dan Gohman79af8542009-04-22 16:20:48 +0000843 return getTruncateExpr(ST->getOperand(), Ty);
844
Nick Lewyckyb4d9f7a2009-04-23 05:15:08 +0000845 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
Dan Gohmana30370b2009-05-04 22:02:23 +0000846 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
Nick Lewyckyb4d9f7a2009-04-23 05:15:08 +0000847 return getTruncateOrSignExtend(SS->getOperand(), Ty);
848
849 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
Dan Gohmana30370b2009-05-04 22:02:23 +0000850 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
Nick Lewyckyb4d9f7a2009-04-23 05:15:08 +0000851 return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
852
Nick Lewycky5143f0f2011-01-19 16:59:46 +0000853 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
854 // eliminate all the truncates.
855 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
856 SmallVector<const SCEV *, 4> Operands;
857 bool hasTrunc = false;
858 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
859 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
860 hasTrunc = isa<SCEVTruncateExpr>(S);
861 Operands.push_back(S);
862 }
863 if (!hasTrunc)
Andrew Trick8b55b732011-03-14 16:50:06 +0000864 return getAddExpr(Operands);
Nick Lewyckyd9e6b4a2011-01-26 08:40:22 +0000865 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
Nick Lewycky5143f0f2011-01-19 16:59:46 +0000866 }
867
Nick Lewycky5c901f32011-01-19 18:56:00 +0000868 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
869 // eliminate all the truncates.
870 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
871 SmallVector<const SCEV *, 4> Operands;
872 bool hasTrunc = false;
873 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
874 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
875 hasTrunc = isa<SCEVTruncateExpr>(S);
876 Operands.push_back(S);
877 }
878 if (!hasTrunc)
Andrew Trick8b55b732011-03-14 16:50:06 +0000879 return getMulExpr(Operands);
Nick Lewyckyd9e6b4a2011-01-26 08:40:22 +0000880 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
Nick Lewycky5c901f32011-01-19 18:56:00 +0000881 }
882
Dan Gohman5a728c92009-06-18 16:24:47 +0000883 // If the input value is a chrec scev, truncate the chrec's operands.
Dan Gohmana30370b2009-05-04 22:02:23 +0000884 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
Dan Gohmanaf752342009-07-07 17:06:11 +0000885 SmallVector<const SCEV *, 4> Operands;
Chris Lattnerd934c702004-04-02 20:23:17 +0000886 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
Dan Gohman2e55cc52009-05-08 21:03:19 +0000887 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
Andrew Trick8b55b732011-03-14 16:50:06 +0000888 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
Chris Lattnerd934c702004-04-02 20:23:17 +0000889 }
890
Dan Gohman89dd42a2010-06-25 18:47:08 +0000891 // The cast wasn't folded; create an explicit cast node. We can reuse
892 // the existing insert position since if we get here, we won't have
893 // made any changes which would invalidate it.
Dan Gohman01c65a22010-03-18 18:49:47 +0000894 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
895 Op, Ty);
Dan Gohmanc5c85c02009-06-27 21:21:31 +0000896 UniqueSCEVs.InsertNode(S, IP);
897 return S;
Chris Lattnerd934c702004-04-02 20:23:17 +0000898}
899
Dan Gohmanaf752342009-07-07 17:06:11 +0000900const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
Chris Lattner229907c2011-07-18 04:54:35 +0000901 Type *Ty) {
Dan Gohmanb397e1a2009-04-21 01:07:12 +0000902 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
Dan Gohmanc1c2ba72009-04-16 19:25:55 +0000903 "This is not an extending conversion!");
Dan Gohman194e42c2009-05-01 16:44:18 +0000904 assert(isSCEVable(Ty) &&
905 "This is not a conversion to a SCEVable type!");
906 Ty = getEffectiveSCEVType(Ty);
Dan Gohmanc1c2ba72009-04-16 19:25:55 +0000907
Dan Gohman3423e722009-06-30 20:13:32 +0000908 // Fold if the operand is constant.
Dan Gohman5235cc22010-06-24 16:47:03 +0000909 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
910 return getConstant(
Nuno Lopesab5c9242012-05-15 15:44:38 +0000911 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
Chris Lattnerd934c702004-04-02 20:23:17 +0000912
Dan Gohman79af8542009-04-22 16:20:48 +0000913 // zext(zext(x)) --> zext(x)
Dan Gohmana30370b2009-05-04 22:02:23 +0000914 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
Dan Gohman79af8542009-04-22 16:20:48 +0000915 return getZeroExtendExpr(SZ->getOperand(), Ty);
916
Dan Gohman74a0ba12009-07-13 20:55:53 +0000917 // Before doing any expensive analysis, check to see if we've already
918 // computed a SCEV for this Op and Ty.
919 FoldingSetNodeID ID;
920 ID.AddInteger(scZeroExtend);
921 ID.AddPointer(Op);
922 ID.AddPointer(Ty);
Craig Topper9f008862014-04-15 04:59:12 +0000923 void *IP = nullptr;
Dan Gohman74a0ba12009-07-13 20:55:53 +0000924 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
925
Nick Lewyckybc98f5b2011-01-23 06:20:19 +0000926 // zext(trunc(x)) --> zext(x) or x or trunc(x)
927 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
928 // It's possible the bits taken off by the truncate were all zero bits. If
929 // so, we should be able to simplify this further.
930 const SCEV *X = ST->getOperand();
931 ConstantRange CR = getUnsignedRange(X);
Nick Lewyckybc98f5b2011-01-23 06:20:19 +0000932 unsigned TruncBits = getTypeSizeInBits(ST->getType());
933 unsigned NewBits = getTypeSizeInBits(Ty);
934 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
Nick Lewyckyd4192f72011-01-23 20:06:05 +0000935 CR.zextOrTrunc(NewBits)))
936 return getTruncateOrZeroExtend(X, Ty);
Nick Lewyckybc98f5b2011-01-23 06:20:19 +0000937 }
938
Dan Gohman76466372009-04-27 20:16:15 +0000939 // If the input value is a chrec scev, and we can prove that the value
Chris Lattnerd934c702004-04-02 20:23:17 +0000940 // did not overflow the old, smaller, value, we can zero extend all of the
Dan Gohman76466372009-04-27 20:16:15 +0000941 // operands (often constants). This allows analysis of something like
Chris Lattnerd934c702004-04-02 20:23:17 +0000942 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
Dan Gohmana30370b2009-05-04 22:02:23 +0000943 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
Dan Gohman76466372009-04-27 20:16:15 +0000944 if (AR->isAffine()) {
Dan Gohmane65c9172009-07-13 21:35:55 +0000945 const SCEV *Start = AR->getStart();
946 const SCEV *Step = AR->getStepRecurrence(*this);
947 unsigned BitWidth = getTypeSizeInBits(AR->getType());
948 const Loop *L = AR->getLoop();
949
Dan Gohman62ef6a72009-07-25 01:22:26 +0000950 // If we have special knowledge that this addrec won't overflow,
951 // we don't need to do any further analysis.
Andrew Trick8b55b732011-03-14 16:50:06 +0000952 if (AR->getNoWrapFlags(SCEV::FlagNUW))
Dan Gohman62ef6a72009-07-25 01:22:26 +0000953 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
954 getZeroExtendExpr(Step, Ty),
Andrew Trickf6b01ff2011-03-15 00:37:00 +0000955 L, AR->getNoWrapFlags());
Dan Gohman62ef6a72009-07-25 01:22:26 +0000956
Dan Gohman76466372009-04-27 20:16:15 +0000957 // Check whether the backedge-taken count is SCEVCouldNotCompute.
958 // Note that this serves two purposes: It filters out loops that are
959 // simply not analyzable, and it covers the case where this code is
960 // being called from within backedge-taken count analysis, such that
961 // attempting to ask for the backedge-taken count would likely result
962 // in infinite recursion. In the later case, the analysis code will
963 // cope with a conservative value, and it will take care to purge
964 // that value once it has finished.
Dan Gohmane65c9172009-07-13 21:35:55 +0000965 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
Dan Gohman2b8da352009-04-30 20:47:05 +0000966 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
Dan Gohman95c5b0e2009-04-29 01:54:20 +0000967 // Manually compute the final value for AR, checking for
Dan Gohman494dac32009-04-29 22:28:28 +0000968 // overflow.
Dan Gohman76466372009-04-27 20:16:15 +0000969
970 // Check whether the backedge-taken count can be losslessly casted to
971 // the addrec's type. The count is always unsigned.
Dan Gohmanaf752342009-07-07 17:06:11 +0000972 const SCEV *CastedMaxBECount =
Dan Gohman2b8da352009-04-30 20:47:05 +0000973 getTruncateOrZeroExtend(MaxBECount, Start->getType());
Dan Gohmanaf752342009-07-07 17:06:11 +0000974 const SCEV *RecastedMaxBECount =
Dan Gohman4fc36682009-05-18 15:58:39 +0000975 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
976 if (MaxBECount == RecastedMaxBECount) {
Chris Lattner229907c2011-07-18 04:54:35 +0000977 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
Dan Gohman2b8da352009-04-30 20:47:05 +0000978 // Check whether Start+Step*MaxBECount has no unsigned overflow.
Dan Gohman007f5042010-02-24 19:31:06 +0000979 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
Nuno Lopesc2a170e2012-05-15 20:20:14 +0000980 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
981 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
982 const SCEV *WideMaxBECount =
983 getZeroExtendExpr(CastedMaxBECount, WideTy);
Dan Gohmanaf752342009-07-07 17:06:11 +0000984 const SCEV *OperandExtendedAdd =
Nuno Lopesc2a170e2012-05-15 20:20:14 +0000985 getAddExpr(WideStart,
986 getMulExpr(WideMaxBECount,
Dan Gohman4fc36682009-05-18 15:58:39 +0000987 getZeroExtendExpr(Step, WideTy)));
Nuno Lopesc2a170e2012-05-15 20:20:14 +0000988 if (ZAdd == OperandExtendedAdd) {
Andrew Trickf6b01ff2011-03-15 00:37:00 +0000989 // Cache knowledge of AR NUW, which is propagated to this AddRec.
990 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
Dan Gohman494dac32009-04-29 22:28:28 +0000991 // Return the expression with the addrec on the outside.
992 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
993 getZeroExtendExpr(Step, Ty),
Andrew Trickf6b01ff2011-03-15 00:37:00 +0000994 L, AR->getNoWrapFlags());
995 }
Dan Gohman76466372009-04-27 20:16:15 +0000996 // Similar to above, only this time treat the step value as signed.
997 // This covers loops that count down.
Dan Gohman4fc36682009-05-18 15:58:39 +0000998 OperandExtendedAdd =
Nuno Lopesc2a170e2012-05-15 20:20:14 +0000999 getAddExpr(WideStart,
1000 getMulExpr(WideMaxBECount,
Dan Gohman4fc36682009-05-18 15:58:39 +00001001 getSignExtendExpr(Step, WideTy)));
Nuno Lopesc2a170e2012-05-15 20:20:14 +00001002 if (ZAdd == OperandExtendedAdd) {
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001003 // Cache knowledge of AR NW, which is propagated to this AddRec.
1004 // Negative step causes unsigned wrap, but it still can't self-wrap.
1005 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
Dan Gohman494dac32009-04-29 22:28:28 +00001006 // Return the expression with the addrec on the outside.
1007 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1008 getSignExtendExpr(Step, Ty),
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001009 L, AR->getNoWrapFlags());
1010 }
Dan Gohmane65c9172009-07-13 21:35:55 +00001011 }
1012
1013 // If the backedge is guarded by a comparison with the pre-inc value
1014 // the addrec is safe. Also, if the entry is guarded by a comparison
1015 // with the start value and the backedge is guarded by a comparison
1016 // with the post-inc value, the addrec is safe.
1017 if (isKnownPositive(Step)) {
1018 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
1019 getUnsignedRange(Step).getUnsignedMax());
1020 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
Dan Gohmanb50349a2010-04-11 19:27:13 +00001021 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
Dan Gohmane65c9172009-07-13 21:35:55 +00001022 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001023 AR->getPostIncExpr(*this), N))) {
1024 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1025 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
Dan Gohmane65c9172009-07-13 21:35:55 +00001026 // Return the expression with the addrec on the outside.
1027 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1028 getZeroExtendExpr(Step, Ty),
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001029 L, AR->getNoWrapFlags());
1030 }
Dan Gohmane65c9172009-07-13 21:35:55 +00001031 } else if (isKnownNegative(Step)) {
1032 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1033 getSignedRange(Step).getSignedMin());
Dan Gohman5f18c542010-05-04 01:11:15 +00001034 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1035 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
Dan Gohmane65c9172009-07-13 21:35:55 +00001036 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001037 AR->getPostIncExpr(*this), N))) {
1038 // Cache knowledge of AR NW, which is propagated to this AddRec.
1039 // Negative step causes unsigned wrap, but it still can't self-wrap.
1040 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1041 // Return the expression with the addrec on the outside.
Dan Gohmane65c9172009-07-13 21:35:55 +00001042 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1043 getSignExtendExpr(Step, Ty),
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001044 L, AR->getNoWrapFlags());
1045 }
Dan Gohman76466372009-04-27 20:16:15 +00001046 }
1047 }
1048 }
Chris Lattnerd934c702004-04-02 20:23:17 +00001049
Dan Gohman74a0ba12009-07-13 20:55:53 +00001050 // The cast wasn't folded; create an explicit cast node.
1051 // Recompute the insert position, as it may have been invalidated.
Dan Gohmanc5c85c02009-06-27 21:21:31 +00001052 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
Dan Gohman01c65a22010-03-18 18:49:47 +00001053 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1054 Op, Ty);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00001055 UniqueSCEVs.InsertNode(S, IP);
1056 return S;
Chris Lattnerd934c702004-04-02 20:23:17 +00001057}
1058
Andrew Trick812276e2011-05-31 21:17:47 +00001059// Get the limit of a recurrence such that incrementing by Step cannot cause
1060// signed overflow as long as the value of the recurrence within the loop does
1061// not exceed this limit before incrementing.
1062static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1063 ICmpInst::Predicate *Pred,
1064 ScalarEvolution *SE) {
1065 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1066 if (SE->isKnownPositive(Step)) {
1067 *Pred = ICmpInst::ICMP_SLT;
1068 return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1069 SE->getSignedRange(Step).getSignedMax());
1070 }
1071 if (SE->isKnownNegative(Step)) {
1072 *Pred = ICmpInst::ICMP_SGT;
1073 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1074 SE->getSignedRange(Step).getSignedMin());
1075 }
Craig Topper9f008862014-04-15 04:59:12 +00001076 return nullptr;
Andrew Trick812276e2011-05-31 21:17:47 +00001077}
1078
1079// The recurrence AR has been shown to have no signed wrap. Typically, if we can
1080// prove NSW for AR, then we can just as easily prove NSW for its preincrement
1081// or postincrement sibling. This allows normalizing a sign extended AddRec as
1082// such: {sext(Step + Start),+,Step} => {(Step + sext(Start),+,Step} As a
1083// result, the expression "Step + sext(PreIncAR)" is congruent with
1084// "sext(PostIncAR)"
1085static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
Chris Lattner229907c2011-07-18 04:54:35 +00001086 Type *Ty,
Andrew Trick812276e2011-05-31 21:17:47 +00001087 ScalarEvolution *SE) {
1088 const Loop *L = AR->getLoop();
1089 const SCEV *Start = AR->getStart();
1090 const SCEV *Step = AR->getStepRecurrence(*SE);
1091
1092 // Check for a simple looking step prior to loop entry.
1093 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
Andrew Trickef8e4ef2011-09-28 17:02:54 +00001094 if (!SA)
Craig Topper9f008862014-04-15 04:59:12 +00001095 return nullptr;
Andrew Trickef8e4ef2011-09-28 17:02:54 +00001096
1097 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1098 // subtraction is expensive. For this purpose, perform a quick and dirty
1099 // difference, by checking for Step in the operand list.
1100 SmallVector<const SCEV *, 4> DiffOps;
1101 for (SCEVAddExpr::op_iterator I = SA->op_begin(), E = SA->op_end();
1102 I != E; ++I) {
1103 if (*I != Step)
1104 DiffOps.push_back(*I);
1105 }
1106 if (DiffOps.size() == SA->getNumOperands())
Craig Topper9f008862014-04-15 04:59:12 +00001107 return nullptr;
Andrew Trick812276e2011-05-31 21:17:47 +00001108
1109 // This is a postinc AR. Check for overflow on the preinc recurrence using the
1110 // same three conditions that getSignExtendedExpr checks.
1111
1112 // 1. NSW flags on the step increment.
Andrew Trickef8e4ef2011-09-28 17:02:54 +00001113 const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags());
Andrew Trick812276e2011-05-31 21:17:47 +00001114 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1115 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1116
Andrew Trick8ef3ad02011-06-01 19:14:56 +00001117 if (PreAR && PreAR->getNoWrapFlags(SCEV::FlagNSW))
Andrew Trick812276e2011-05-31 21:17:47 +00001118 return PreStart;
Andrew Trick812276e2011-05-31 21:17:47 +00001119
1120 // 2. Direct overflow check on the step operation's expression.
1121 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
Chris Lattner229907c2011-07-18 04:54:35 +00001122 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
Andrew Trick812276e2011-05-31 21:17:47 +00001123 const SCEV *OperandExtendedStart =
1124 SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy),
1125 SE->getSignExtendExpr(Step, WideTy));
1126 if (SE->getSignExtendExpr(Start, WideTy) == OperandExtendedStart) {
1127 // Cache knowledge of PreAR NSW.
1128 if (PreAR)
1129 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(SCEV::FlagNSW);
1130 // FIXME: this optimization needs a unit test
1131 DEBUG(dbgs() << "SCEV: untested prestart overflow check\n");
1132 return PreStart;
1133 }
1134
1135 // 3. Loop precondition.
1136 ICmpInst::Predicate Pred;
1137 const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, SE);
1138
Andrew Trick8ef3ad02011-06-01 19:14:56 +00001139 if (OverflowLimit &&
1140 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
Andrew Trick812276e2011-05-31 21:17:47 +00001141 return PreStart;
1142 }
Craig Topper9f008862014-04-15 04:59:12 +00001143 return nullptr;
Andrew Trick812276e2011-05-31 21:17:47 +00001144}
1145
1146// Get the normalized sign-extended expression for this AddRec's Start.
1147static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
Chris Lattner229907c2011-07-18 04:54:35 +00001148 Type *Ty,
Andrew Trick812276e2011-05-31 21:17:47 +00001149 ScalarEvolution *SE) {
1150 const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE);
1151 if (!PreStart)
1152 return SE->getSignExtendExpr(AR->getStart(), Ty);
1153
1154 return SE->getAddExpr(SE->getSignExtendExpr(AR->getStepRecurrence(*SE), Ty),
1155 SE->getSignExtendExpr(PreStart, Ty));
1156}
1157
Dan Gohmanaf752342009-07-07 17:06:11 +00001158const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
Chris Lattner229907c2011-07-18 04:54:35 +00001159 Type *Ty) {
Dan Gohmanb397e1a2009-04-21 01:07:12 +00001160 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
Dan Gohman413e91f2009-04-21 00:55:22 +00001161 "This is not an extending conversion!");
Dan Gohman194e42c2009-05-01 16:44:18 +00001162 assert(isSCEVable(Ty) &&
1163 "This is not a conversion to a SCEVable type!");
1164 Ty = getEffectiveSCEVType(Ty);
Dan Gohman413e91f2009-04-21 00:55:22 +00001165
Dan Gohman3423e722009-06-30 20:13:32 +00001166 // Fold if the operand is constant.
Dan Gohman5235cc22010-06-24 16:47:03 +00001167 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1168 return getConstant(
Nuno Lopesab5c9242012-05-15 15:44:38 +00001169 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
Dan Gohmancb9e09a2007-06-15 14:38:12 +00001170
Dan Gohman79af8542009-04-22 16:20:48 +00001171 // sext(sext(x)) --> sext(x)
Dan Gohmana30370b2009-05-04 22:02:23 +00001172 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
Dan Gohman79af8542009-04-22 16:20:48 +00001173 return getSignExtendExpr(SS->getOperand(), Ty);
1174
Nick Lewyckye9ea75e2011-01-19 15:56:12 +00001175 // sext(zext(x)) --> zext(x)
1176 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1177 return getZeroExtendExpr(SZ->getOperand(), Ty);
1178
Dan Gohman74a0ba12009-07-13 20:55:53 +00001179 // Before doing any expensive analysis, check to see if we've already
1180 // computed a SCEV for this Op and Ty.
1181 FoldingSetNodeID ID;
1182 ID.AddInteger(scSignExtend);
1183 ID.AddPointer(Op);
1184 ID.AddPointer(Ty);
Craig Topper9f008862014-04-15 04:59:12 +00001185 void *IP = nullptr;
Dan Gohman74a0ba12009-07-13 20:55:53 +00001186 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1187
Nick Lewyckyb32c8942011-01-22 22:06:21 +00001188 // If the input value is provably positive, build a zext instead.
1189 if (isKnownNonNegative(Op))
1190 return getZeroExtendExpr(Op, Ty);
1191
Nick Lewyckybc98f5b2011-01-23 06:20:19 +00001192 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1193 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1194 // It's possible the bits taken off by the truncate were all sign bits. If
1195 // so, we should be able to simplify this further.
1196 const SCEV *X = ST->getOperand();
1197 ConstantRange CR = getSignedRange(X);
Nick Lewyckybc98f5b2011-01-23 06:20:19 +00001198 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1199 unsigned NewBits = getTypeSizeInBits(Ty);
1200 if (CR.truncate(TruncBits).signExtend(NewBits).contains(
Nick Lewyckyd4192f72011-01-23 20:06:05 +00001201 CR.sextOrTrunc(NewBits)))
1202 return getTruncateOrSignExtend(X, Ty);
Nick Lewyckybc98f5b2011-01-23 06:20:19 +00001203 }
1204
Dan Gohman76466372009-04-27 20:16:15 +00001205 // If the input value is a chrec scev, and we can prove that the value
Dan Gohmancb9e09a2007-06-15 14:38:12 +00001206 // did not overflow the old, smaller, value, we can sign extend all of the
Dan Gohman76466372009-04-27 20:16:15 +00001207 // operands (often constants). This allows analysis of something like
Dan Gohmancb9e09a2007-06-15 14:38:12 +00001208 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
Dan Gohmana30370b2009-05-04 22:02:23 +00001209 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
Dan Gohman76466372009-04-27 20:16:15 +00001210 if (AR->isAffine()) {
Dan Gohmane65c9172009-07-13 21:35:55 +00001211 const SCEV *Start = AR->getStart();
1212 const SCEV *Step = AR->getStepRecurrence(*this);
1213 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1214 const Loop *L = AR->getLoop();
1215
Dan Gohman62ef6a72009-07-25 01:22:26 +00001216 // If we have special knowledge that this addrec won't overflow,
1217 // we don't need to do any further analysis.
Andrew Trick8b55b732011-03-14 16:50:06 +00001218 if (AR->getNoWrapFlags(SCEV::FlagNSW))
Andrew Trick812276e2011-05-31 21:17:47 +00001219 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
Dan Gohman62ef6a72009-07-25 01:22:26 +00001220 getSignExtendExpr(Step, Ty),
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001221 L, SCEV::FlagNSW);
Dan Gohman62ef6a72009-07-25 01:22:26 +00001222
Dan Gohman76466372009-04-27 20:16:15 +00001223 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1224 // Note that this serves two purposes: It filters out loops that are
1225 // simply not analyzable, and it covers the case where this code is
1226 // being called from within backedge-taken count analysis, such that
1227 // attempting to ask for the backedge-taken count would likely result
1228 // in infinite recursion. In the later case, the analysis code will
1229 // cope with a conservative value, and it will take care to purge
1230 // that value once it has finished.
Dan Gohmane65c9172009-07-13 21:35:55 +00001231 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
Dan Gohman2b8da352009-04-30 20:47:05 +00001232 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
Dan Gohman95c5b0e2009-04-29 01:54:20 +00001233 // Manually compute the final value for AR, checking for
Dan Gohman494dac32009-04-29 22:28:28 +00001234 // overflow.
Dan Gohman76466372009-04-27 20:16:15 +00001235
1236 // Check whether the backedge-taken count can be losslessly casted to
Dan Gohman494dac32009-04-29 22:28:28 +00001237 // the addrec's type. The count is always unsigned.
Dan Gohmanaf752342009-07-07 17:06:11 +00001238 const SCEV *CastedMaxBECount =
Dan Gohman2b8da352009-04-30 20:47:05 +00001239 getTruncateOrZeroExtend(MaxBECount, Start->getType());
Dan Gohmanaf752342009-07-07 17:06:11 +00001240 const SCEV *RecastedMaxBECount =
Dan Gohman4fc36682009-05-18 15:58:39 +00001241 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1242 if (MaxBECount == RecastedMaxBECount) {
Chris Lattner229907c2011-07-18 04:54:35 +00001243 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
Dan Gohman2b8da352009-04-30 20:47:05 +00001244 // Check whether Start+Step*MaxBECount has no signed overflow.
Dan Gohman007f5042010-02-24 19:31:06 +00001245 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
Nuno Lopesc2a170e2012-05-15 20:20:14 +00001246 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
1247 const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
1248 const SCEV *WideMaxBECount =
1249 getZeroExtendExpr(CastedMaxBECount, WideTy);
Dan Gohmanaf752342009-07-07 17:06:11 +00001250 const SCEV *OperandExtendedAdd =
Nuno Lopesc2a170e2012-05-15 20:20:14 +00001251 getAddExpr(WideStart,
1252 getMulExpr(WideMaxBECount,
Dan Gohman4fc36682009-05-18 15:58:39 +00001253 getSignExtendExpr(Step, WideTy)));
Nuno Lopesc2a170e2012-05-15 20:20:14 +00001254 if (SAdd == OperandExtendedAdd) {
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001255 // Cache knowledge of AR NSW, which is propagated to this AddRec.
1256 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
Dan Gohman494dac32009-04-29 22:28:28 +00001257 // Return the expression with the addrec on the outside.
Andrew Trick812276e2011-05-31 21:17:47 +00001258 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
Dan Gohman494dac32009-04-29 22:28:28 +00001259 getSignExtendExpr(Step, Ty),
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001260 L, AR->getNoWrapFlags());
1261 }
Dan Gohman8c129d72009-07-16 17:34:36 +00001262 // Similar to above, only this time treat the step value as unsigned.
1263 // This covers loops that count up with an unsigned step.
Dan Gohman8c129d72009-07-16 17:34:36 +00001264 OperandExtendedAdd =
Nuno Lopesc2a170e2012-05-15 20:20:14 +00001265 getAddExpr(WideStart,
1266 getMulExpr(WideMaxBECount,
Dan Gohman8c129d72009-07-16 17:34:36 +00001267 getZeroExtendExpr(Step, WideTy)));
Nuno Lopesc2a170e2012-05-15 20:20:14 +00001268 if (SAdd == OperandExtendedAdd) {
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001269 // Cache knowledge of AR NSW, which is propagated to this AddRec.
1270 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
Dan Gohman8c129d72009-07-16 17:34:36 +00001271 // Return the expression with the addrec on the outside.
Andrew Trick812276e2011-05-31 21:17:47 +00001272 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
Dan Gohman8c129d72009-07-16 17:34:36 +00001273 getZeroExtendExpr(Step, Ty),
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001274 L, AR->getNoWrapFlags());
1275 }
Dan Gohmane65c9172009-07-13 21:35:55 +00001276 }
1277
1278 // If the backedge is guarded by a comparison with the pre-inc value
1279 // the addrec is safe. Also, if the entry is guarded by a comparison
1280 // with the start value and the backedge is guarded by a comparison
1281 // with the post-inc value, the addrec is safe.
Andrew Trick812276e2011-05-31 21:17:47 +00001282 ICmpInst::Predicate Pred;
1283 const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, this);
1284 if (OverflowLimit &&
1285 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
1286 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
1287 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
1288 OverflowLimit)))) {
1289 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
1290 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1291 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1292 getSignExtendExpr(Step, Ty),
1293 L, AR->getNoWrapFlags());
Dan Gohman76466372009-04-27 20:16:15 +00001294 }
1295 }
1296 }
Dan Gohmancb9e09a2007-06-15 14:38:12 +00001297
Dan Gohman74a0ba12009-07-13 20:55:53 +00001298 // The cast wasn't folded; create an explicit cast node.
1299 // Recompute the insert position, as it may have been invalidated.
Dan Gohmanc5c85c02009-06-27 21:21:31 +00001300 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
Dan Gohman01c65a22010-03-18 18:49:47 +00001301 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1302 Op, Ty);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00001303 UniqueSCEVs.InsertNode(S, IP);
1304 return S;
Dan Gohmancb9e09a2007-06-15 14:38:12 +00001305}
1306
Dan Gohman8db2edc2009-06-13 15:56:47 +00001307/// getAnyExtendExpr - Return a SCEV for the given operand extended with
1308/// unspecified bits out to the given type.
1309///
Dan Gohmanaf752342009-07-07 17:06:11 +00001310const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
Chris Lattner229907c2011-07-18 04:54:35 +00001311 Type *Ty) {
Dan Gohman8db2edc2009-06-13 15:56:47 +00001312 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1313 "This is not an extending conversion!");
1314 assert(isSCEVable(Ty) &&
1315 "This is not a conversion to a SCEVable type!");
1316 Ty = getEffectiveSCEVType(Ty);
1317
1318 // Sign-extend negative constants.
1319 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1320 if (SC->getValue()->getValue().isNegative())
1321 return getSignExtendExpr(Op, Ty);
1322
1323 // Peel off a truncate cast.
1324 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
Dan Gohmanaf752342009-07-07 17:06:11 +00001325 const SCEV *NewOp = T->getOperand();
Dan Gohman8db2edc2009-06-13 15:56:47 +00001326 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1327 return getAnyExtendExpr(NewOp, Ty);
1328 return getTruncateOrNoop(NewOp, Ty);
1329 }
1330
1331 // Next try a zext cast. If the cast is folded, use it.
Dan Gohmanaf752342009-07-07 17:06:11 +00001332 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
Dan Gohman8db2edc2009-06-13 15:56:47 +00001333 if (!isa<SCEVZeroExtendExpr>(ZExt))
1334 return ZExt;
1335
1336 // Next try a sext cast. If the cast is folded, use it.
Dan Gohmanaf752342009-07-07 17:06:11 +00001337 const SCEV *SExt = getSignExtendExpr(Op, Ty);
Dan Gohman8db2edc2009-06-13 15:56:47 +00001338 if (!isa<SCEVSignExtendExpr>(SExt))
1339 return SExt;
1340
Dan Gohman51ad99d2010-01-21 02:09:26 +00001341 // Force the cast to be folded into the operands of an addrec.
1342 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1343 SmallVector<const SCEV *, 4> Ops;
1344 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
1345 I != E; ++I)
1346 Ops.push_back(getAnyExtendExpr(*I, Ty));
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001347 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
Dan Gohman51ad99d2010-01-21 02:09:26 +00001348 }
1349
Dan Gohman8db2edc2009-06-13 15:56:47 +00001350 // If the expression is obviously signed, use the sext cast value.
1351 if (isa<SCEVSMaxExpr>(Op))
1352 return SExt;
1353
1354 // Absent any other information, use the zext cast value.
1355 return ZExt;
1356}
1357
Dan Gohman038d02e2009-06-14 22:58:51 +00001358/// CollectAddOperandsWithScales - Process the given Ops list, which is
1359/// a list of operands to be added under the given scale, update the given
1360/// map. This is a helper function for getAddRecExpr. As an example of
1361/// what it does, given a sequence of operands that would form an add
1362/// expression like this:
1363///
Tobias Grosserba49e422014-03-05 10:37:17 +00001364/// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
Dan Gohman038d02e2009-06-14 22:58:51 +00001365///
1366/// where A and B are constants, update the map with these values:
1367///
1368/// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1369///
1370/// and add 13 + A*B*29 to AccumulatedConstant.
1371/// This will allow getAddRecExpr to produce this:
1372///
1373/// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1374///
1375/// This form often exposes folding opportunities that are hidden in
1376/// the original operand list.
1377///
Sylvestre Ledru91ce36c2012-09-27 10:14:43 +00001378/// Return true iff it appears that any interesting folding opportunities
Dan Gohman038d02e2009-06-14 22:58:51 +00001379/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1380/// the common case where no interesting opportunities are present, and
1381/// is also used as a check to avoid infinite recursion.
1382///
1383static bool
Dan Gohmanaf752342009-07-07 17:06:11 +00001384CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
Craig Topper2cd5ff82013-07-11 16:22:38 +00001385 SmallVectorImpl<const SCEV *> &NewOps,
Dan Gohman038d02e2009-06-14 22:58:51 +00001386 APInt &AccumulatedConstant,
Dan Gohman00524492010-03-18 01:17:13 +00001387 const SCEV *const *Ops, size_t NumOperands,
Dan Gohman038d02e2009-06-14 22:58:51 +00001388 const APInt &Scale,
1389 ScalarEvolution &SE) {
1390 bool Interesting = false;
1391
Dan Gohman45073042010-06-18 19:12:32 +00001392 // Iterate over the add operands. They are sorted, with constants first.
1393 unsigned i = 0;
1394 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1395 ++i;
1396 // Pull a buried constant out to the outside.
1397 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
1398 Interesting = true;
1399 AccumulatedConstant += Scale * C->getValue()->getValue();
1400 }
1401
1402 // Next comes everything else. We're especially interested in multiplies
1403 // here, but they're in the middle, so just visit the rest with one loop.
1404 for (; i != NumOperands; ++i) {
Dan Gohman038d02e2009-06-14 22:58:51 +00001405 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1406 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1407 APInt NewScale =
1408 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1409 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1410 // A multiplication of a constant with another add; recurse.
Dan Gohman00524492010-03-18 01:17:13 +00001411 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
Dan Gohman038d02e2009-06-14 22:58:51 +00001412 Interesting |=
1413 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
Dan Gohman00524492010-03-18 01:17:13 +00001414 Add->op_begin(), Add->getNumOperands(),
Dan Gohman038d02e2009-06-14 22:58:51 +00001415 NewScale, SE);
1416 } else {
1417 // A multiplication of a constant with some other value. Update
1418 // the map.
Dan Gohmanaf752342009-07-07 17:06:11 +00001419 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1420 const SCEV *Key = SE.getMulExpr(MulOps);
1421 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
Dan Gohmane00beaa2009-06-29 18:25:52 +00001422 M.insert(std::make_pair(Key, NewScale));
Dan Gohman038d02e2009-06-14 22:58:51 +00001423 if (Pair.second) {
Dan Gohman038d02e2009-06-14 22:58:51 +00001424 NewOps.push_back(Pair.first->first);
1425 } else {
1426 Pair.first->second += NewScale;
1427 // The map already had an entry for this value, which may indicate
1428 // a folding opportunity.
1429 Interesting = true;
1430 }
1431 }
Dan Gohman038d02e2009-06-14 22:58:51 +00001432 } else {
1433 // An ordinary operand. Update the map.
Dan Gohmanaf752342009-07-07 17:06:11 +00001434 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
Dan Gohmane00beaa2009-06-29 18:25:52 +00001435 M.insert(std::make_pair(Ops[i], Scale));
Dan Gohman038d02e2009-06-14 22:58:51 +00001436 if (Pair.second) {
Dan Gohman038d02e2009-06-14 22:58:51 +00001437 NewOps.push_back(Pair.first->first);
1438 } else {
1439 Pair.first->second += Scale;
1440 // The map already had an entry for this value, which may indicate
1441 // a folding opportunity.
1442 Interesting = true;
1443 }
1444 }
1445 }
1446
1447 return Interesting;
1448}
1449
1450namespace {
1451 struct APIntCompare {
1452 bool operator()(const APInt &LHS, const APInt &RHS) const {
1453 return LHS.ult(RHS);
1454 }
1455 };
1456}
1457
Dan Gohman4d5435d2009-05-24 23:45:28 +00001458/// getAddExpr - Get a canonical add expression, or something simpler if
1459/// possible.
Dan Gohman816fe0a2009-10-09 00:10:36 +00001460const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
Andrew Trick8b55b732011-03-14 16:50:06 +00001461 SCEV::NoWrapFlags Flags) {
1462 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
1463 "only nuw or nsw allowed");
Chris Lattnerd934c702004-04-02 20:23:17 +00001464 assert(!Ops.empty() && "Cannot get empty add!");
Chris Lattner74498e12004-04-07 16:16:11 +00001465 if (Ops.size() == 1) return Ops[0];
Dan Gohmand33f36e2009-05-18 15:44:58 +00001466#ifndef NDEBUG
Chris Lattner229907c2011-07-18 04:54:35 +00001467 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
Dan Gohmand33f36e2009-05-18 15:44:58 +00001468 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
Dan Gohman9136d9f2010-06-18 19:09:27 +00001469 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
Dan Gohmand33f36e2009-05-18 15:44:58 +00001470 "SCEVAddExpr operand types don't match!");
1471#endif
Chris Lattnerd934c702004-04-02 20:23:17 +00001472
Andrew Trick8b55b732011-03-14 16:50:06 +00001473 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001474 // And vice-versa.
1475 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
1476 SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
1477 if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
Dan Gohman51ad99d2010-01-21 02:09:26 +00001478 bool All = true;
Dan Gohman74c61502010-08-16 16:27:53 +00001479 for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
1480 E = Ops.end(); I != E; ++I)
1481 if (!isKnownNonNegative(*I)) {
Dan Gohman51ad99d2010-01-21 02:09:26 +00001482 All = false;
1483 break;
1484 }
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001485 if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
Dan Gohman51ad99d2010-01-21 02:09:26 +00001486 }
1487
Chris Lattnerd934c702004-04-02 20:23:17 +00001488 // Sort by complexity, this groups all similar expression types together.
Dan Gohman9ba542c2009-05-07 14:39:04 +00001489 GroupByComplexity(Ops, LI);
Chris Lattnerd934c702004-04-02 20:23:17 +00001490
1491 // If there are any constants, fold them together.
1492 unsigned Idx = 0;
Dan Gohmana30370b2009-05-04 22:02:23 +00001493 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
Chris Lattnerd934c702004-04-02 20:23:17 +00001494 ++Idx;
Chris Lattner74498e12004-04-07 16:16:11 +00001495 assert(Idx < Ops.size());
Dan Gohmana30370b2009-05-04 22:02:23 +00001496 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
Chris Lattnerd934c702004-04-02 20:23:17 +00001497 // We found two constants, fold them together!
Dan Gohman0652fd52009-06-14 22:47:23 +00001498 Ops[0] = getConstant(LHSC->getValue()->getValue() +
1499 RHSC->getValue()->getValue());
Dan Gohman011cf682009-06-14 22:53:57 +00001500 if (Ops.size() == 2) return Ops[0];
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00001501 Ops.erase(Ops.begin()+1); // Erase the folded element
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00001502 LHSC = cast<SCEVConstant>(Ops[0]);
Chris Lattnerd934c702004-04-02 20:23:17 +00001503 }
1504
1505 // If we are left with a constant zero being added, strip it off.
Dan Gohmanebbd05f2010-04-12 23:08:18 +00001506 if (LHSC->getValue()->isZero()) {
Chris Lattnerd934c702004-04-02 20:23:17 +00001507 Ops.erase(Ops.begin());
1508 --Idx;
1509 }
Chris Lattnerd934c702004-04-02 20:23:17 +00001510
Dan Gohmanebbd05f2010-04-12 23:08:18 +00001511 if (Ops.size() == 1) return Ops[0];
1512 }
Misha Brukman01808ca2005-04-21 21:13:18 +00001513
Dan Gohman15871f22010-08-27 21:39:59 +00001514 // Okay, check to see if the same value occurs in the operand list more than
1515 // once. If so, merge them together into an multiply expression. Since we
1516 // sorted the list, these values are required to be adjacent.
Chris Lattner229907c2011-07-18 04:54:35 +00001517 Type *Ty = Ops[0]->getType();
Dan Gohmane67b2872010-08-12 14:46:54 +00001518 bool FoundMatch = false;
Dan Gohman15871f22010-08-27 21:39:59 +00001519 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
Chris Lattnerd934c702004-04-02 20:23:17 +00001520 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
Dan Gohman15871f22010-08-27 21:39:59 +00001521 // Scan ahead to count how many equal operands there are.
1522 unsigned Count = 2;
1523 while (i+Count != e && Ops[i+Count] == Ops[i])
1524 ++Count;
1525 // Merge the values into a multiply.
1526 const SCEV *Scale = getConstant(Ty, Count);
1527 const SCEV *Mul = getMulExpr(Scale, Ops[i]);
1528 if (Ops.size() == Count)
Chris Lattnerd934c702004-04-02 20:23:17 +00001529 return Mul;
Dan Gohmane67b2872010-08-12 14:46:54 +00001530 Ops[i] = Mul;
Dan Gohman15871f22010-08-27 21:39:59 +00001531 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
Dan Gohmanfe22f1d2010-08-28 00:39:27 +00001532 --i; e -= Count - 1;
Dan Gohmane67b2872010-08-12 14:46:54 +00001533 FoundMatch = true;
Chris Lattnerd934c702004-04-02 20:23:17 +00001534 }
Dan Gohmane67b2872010-08-12 14:46:54 +00001535 if (FoundMatch)
Andrew Trick8b55b732011-03-14 16:50:06 +00001536 return getAddExpr(Ops, Flags);
Chris Lattnerd934c702004-04-02 20:23:17 +00001537
Dan Gohman2e55cc52009-05-08 21:03:19 +00001538 // Check for truncates. If all the operands are truncated from the same
1539 // type, see if factoring out the truncate would permit the result to be
1540 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1541 // if the contents of the resulting outer trunc fold to something simple.
1542 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1543 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
Chris Lattner229907c2011-07-18 04:54:35 +00001544 Type *DstType = Trunc->getType();
1545 Type *SrcType = Trunc->getOperand()->getType();
Dan Gohmanaf752342009-07-07 17:06:11 +00001546 SmallVector<const SCEV *, 8> LargeOps;
Dan Gohman2e55cc52009-05-08 21:03:19 +00001547 bool Ok = true;
1548 // Check all the operands to see if they can be represented in the
1549 // source type of the truncate.
1550 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1551 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1552 if (T->getOperand()->getType() != SrcType) {
1553 Ok = false;
1554 break;
1555 }
1556 LargeOps.push_back(T->getOperand());
1557 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
Dan Gohmanff3174e2010-04-23 01:51:29 +00001558 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
Dan Gohman2e55cc52009-05-08 21:03:19 +00001559 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
Dan Gohmanaf752342009-07-07 17:06:11 +00001560 SmallVector<const SCEV *, 8> LargeMulOps;
Dan Gohman2e55cc52009-05-08 21:03:19 +00001561 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1562 if (const SCEVTruncateExpr *T =
1563 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1564 if (T->getOperand()->getType() != SrcType) {
1565 Ok = false;
1566 break;
1567 }
1568 LargeMulOps.push_back(T->getOperand());
1569 } else if (const SCEVConstant *C =
1570 dyn_cast<SCEVConstant>(M->getOperand(j))) {
Dan Gohmanff3174e2010-04-23 01:51:29 +00001571 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
Dan Gohman2e55cc52009-05-08 21:03:19 +00001572 } else {
1573 Ok = false;
1574 break;
1575 }
1576 }
1577 if (Ok)
1578 LargeOps.push_back(getMulExpr(LargeMulOps));
1579 } else {
1580 Ok = false;
1581 break;
1582 }
1583 }
1584 if (Ok) {
1585 // Evaluate the expression in the larger type.
Andrew Trick8b55b732011-03-14 16:50:06 +00001586 const SCEV *Fold = getAddExpr(LargeOps, Flags);
Dan Gohman2e55cc52009-05-08 21:03:19 +00001587 // If it folds to something simple, use it. Otherwise, don't.
1588 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1589 return getTruncateExpr(Fold, DstType);
1590 }
1591 }
1592
1593 // Skip past any other cast SCEVs.
Dan Gohmaneed125f2007-06-18 19:30:09 +00001594 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1595 ++Idx;
1596
1597 // If there are add operands they would be next.
Chris Lattnerd934c702004-04-02 20:23:17 +00001598 if (Idx < Ops.size()) {
1599 bool DeletedAdd = false;
Dan Gohmana30370b2009-05-04 22:02:23 +00001600 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
Chris Lattnerd934c702004-04-02 20:23:17 +00001601 // If we have an add, expand the add operands onto the end of the operands
1602 // list.
Chris Lattnerd934c702004-04-02 20:23:17 +00001603 Ops.erase(Ops.begin()+Idx);
Dan Gohmandd41bba2010-06-21 19:47:52 +00001604 Ops.append(Add->op_begin(), Add->op_end());
Chris Lattnerd934c702004-04-02 20:23:17 +00001605 DeletedAdd = true;
1606 }
1607
1608 // If we deleted at least one add, we added operands to the end of the list,
1609 // and they are not necessarily sorted. Recurse to resort and resimplify
Dan Gohman8b0a4192010-03-01 17:49:51 +00001610 // any operands we just acquired.
Chris Lattnerd934c702004-04-02 20:23:17 +00001611 if (DeletedAdd)
Dan Gohmana37eaf22007-10-22 18:31:58 +00001612 return getAddExpr(Ops);
Chris Lattnerd934c702004-04-02 20:23:17 +00001613 }
1614
1615 // Skip over the add expression until we get to a multiply.
1616 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1617 ++Idx;
1618
Dan Gohman038d02e2009-06-14 22:58:51 +00001619 // Check to see if there are any folding opportunities present with
1620 // operands multiplied by constant values.
1621 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1622 uint64_t BitWidth = getTypeSizeInBits(Ty);
Dan Gohmanaf752342009-07-07 17:06:11 +00001623 DenseMap<const SCEV *, APInt> M;
1624 SmallVector<const SCEV *, 8> NewOps;
Dan Gohman038d02e2009-06-14 22:58:51 +00001625 APInt AccumulatedConstant(BitWidth, 0);
1626 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
Dan Gohman00524492010-03-18 01:17:13 +00001627 Ops.data(), Ops.size(),
1628 APInt(BitWidth, 1), *this)) {
Dan Gohman038d02e2009-06-14 22:58:51 +00001629 // Some interesting folding opportunity is present, so its worthwhile to
1630 // re-generate the operands list. Group the operands by constant scale,
1631 // to avoid multiplying by the same constant scale multiple times.
Dan Gohmanaf752342009-07-07 17:06:11 +00001632 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
Craig Topper31ee5862013-07-03 15:07:05 +00001633 for (SmallVectorImpl<const SCEV *>::const_iterator I = NewOps.begin(),
Dan Gohman038d02e2009-06-14 22:58:51 +00001634 E = NewOps.end(); I != E; ++I)
1635 MulOpLists[M.find(*I)->second].push_back(*I);
1636 // Re-generate the operands list.
1637 Ops.clear();
1638 if (AccumulatedConstant != 0)
1639 Ops.push_back(getConstant(AccumulatedConstant));
Dan Gohmance973df2009-06-24 04:48:43 +00001640 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1641 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
Dan Gohman038d02e2009-06-14 22:58:51 +00001642 if (I->first != 0)
Dan Gohmance973df2009-06-24 04:48:43 +00001643 Ops.push_back(getMulExpr(getConstant(I->first),
1644 getAddExpr(I->second)));
Dan Gohman038d02e2009-06-14 22:58:51 +00001645 if (Ops.empty())
Dan Gohman1d2ded72010-05-03 22:09:21 +00001646 return getConstant(Ty, 0);
Dan Gohman038d02e2009-06-14 22:58:51 +00001647 if (Ops.size() == 1)
1648 return Ops[0];
1649 return getAddExpr(Ops);
1650 }
1651 }
1652
Chris Lattnerd934c702004-04-02 20:23:17 +00001653 // If we are adding something to a multiply expression, make sure the
1654 // something is not already an operand of the multiply. If so, merge it into
1655 // the multiply.
1656 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
Dan Gohman48f82222009-05-04 22:30:44 +00001657 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
Chris Lattnerd934c702004-04-02 20:23:17 +00001658 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
Dan Gohman48f82222009-05-04 22:30:44 +00001659 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
Dan Gohman157847f2010-08-12 14:52:55 +00001660 if (isa<SCEVConstant>(MulOpSCEV))
1661 continue;
Chris Lattnerd934c702004-04-02 20:23:17 +00001662 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
Dan Gohman157847f2010-08-12 14:52:55 +00001663 if (MulOpSCEV == Ops[AddOp]) {
Chris Lattnerd934c702004-04-02 20:23:17 +00001664 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
Dan Gohmanaf752342009-07-07 17:06:11 +00001665 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
Chris Lattnerd934c702004-04-02 20:23:17 +00001666 if (Mul->getNumOperands() != 2) {
1667 // If the multiply has more than two operands, we must get the
1668 // Y*Z term.
Dan Gohman797a1db2010-08-16 16:57:24 +00001669 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1670 Mul->op_begin()+MulOp);
1671 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
Dan Gohmana37eaf22007-10-22 18:31:58 +00001672 InnerMul = getMulExpr(MulOps);
Chris Lattnerd934c702004-04-02 20:23:17 +00001673 }
Dan Gohman1d2ded72010-05-03 22:09:21 +00001674 const SCEV *One = getConstant(Ty, 1);
Dan Gohmancf32f2b2010-08-13 20:17:14 +00001675 const SCEV *AddOne = getAddExpr(One, InnerMul);
Dan Gohman157847f2010-08-12 14:52:55 +00001676 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
Chris Lattnerd934c702004-04-02 20:23:17 +00001677 if (Ops.size() == 2) return OuterMul;
1678 if (AddOp < Idx) {
1679 Ops.erase(Ops.begin()+AddOp);
1680 Ops.erase(Ops.begin()+Idx-1);
1681 } else {
1682 Ops.erase(Ops.begin()+Idx);
1683 Ops.erase(Ops.begin()+AddOp-1);
1684 }
1685 Ops.push_back(OuterMul);
Dan Gohmana37eaf22007-10-22 18:31:58 +00001686 return getAddExpr(Ops);
Chris Lattnerd934c702004-04-02 20:23:17 +00001687 }
Misha Brukman01808ca2005-04-21 21:13:18 +00001688
Chris Lattnerd934c702004-04-02 20:23:17 +00001689 // Check this multiply against other multiplies being added together.
1690 for (unsigned OtherMulIdx = Idx+1;
1691 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1692 ++OtherMulIdx) {
Dan Gohman48f82222009-05-04 22:30:44 +00001693 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
Chris Lattnerd934c702004-04-02 20:23:17 +00001694 // If MulOp occurs in OtherMul, we can fold the two multiplies
1695 // together.
1696 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1697 OMulOp != e; ++OMulOp)
1698 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1699 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
Dan Gohmanaf752342009-07-07 17:06:11 +00001700 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
Chris Lattnerd934c702004-04-02 20:23:17 +00001701 if (Mul->getNumOperands() != 2) {
Dan Gohmance973df2009-06-24 04:48:43 +00001702 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
Dan Gohman797a1db2010-08-16 16:57:24 +00001703 Mul->op_begin()+MulOp);
1704 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
Dan Gohmana37eaf22007-10-22 18:31:58 +00001705 InnerMul1 = getMulExpr(MulOps);
Chris Lattnerd934c702004-04-02 20:23:17 +00001706 }
Dan Gohmanaf752342009-07-07 17:06:11 +00001707 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
Chris Lattnerd934c702004-04-02 20:23:17 +00001708 if (OtherMul->getNumOperands() != 2) {
Dan Gohmance973df2009-06-24 04:48:43 +00001709 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
Dan Gohman797a1db2010-08-16 16:57:24 +00001710 OtherMul->op_begin()+OMulOp);
1711 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
Dan Gohmana37eaf22007-10-22 18:31:58 +00001712 InnerMul2 = getMulExpr(MulOps);
Chris Lattnerd934c702004-04-02 20:23:17 +00001713 }
Dan Gohmanaf752342009-07-07 17:06:11 +00001714 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1715 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
Chris Lattnerd934c702004-04-02 20:23:17 +00001716 if (Ops.size() == 2) return OuterMul;
Dan Gohmanaabfc522010-08-31 22:50:31 +00001717 Ops.erase(Ops.begin()+Idx);
1718 Ops.erase(Ops.begin()+OtherMulIdx-1);
1719 Ops.push_back(OuterMul);
1720 return getAddExpr(Ops);
Chris Lattnerd934c702004-04-02 20:23:17 +00001721 }
1722 }
1723 }
1724 }
1725
1726 // If there are any add recurrences in the operands list, see if any other
1727 // added values are loop invariant. If so, we can fold them into the
1728 // recurrence.
1729 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1730 ++Idx;
1731
1732 // Scan over all recurrences, trying to fold loop invariants into them.
1733 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1734 // Scan all of the other operands to this add and add them to the vector if
1735 // they are loop invariant w.r.t. the recurrence.
Dan Gohmanaf752342009-07-07 17:06:11 +00001736 SmallVector<const SCEV *, 8> LIOps;
Dan Gohman48f82222009-05-04 22:30:44 +00001737 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
Dan Gohmanebbd05f2010-04-12 23:08:18 +00001738 const Loop *AddRecLoop = AddRec->getLoop();
Chris Lattnerd934c702004-04-02 20:23:17 +00001739 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Dan Gohmanafd6db92010-11-17 21:23:15 +00001740 if (isLoopInvariant(Ops[i], AddRecLoop)) {
Chris Lattnerd934c702004-04-02 20:23:17 +00001741 LIOps.push_back(Ops[i]);
1742 Ops.erase(Ops.begin()+i);
1743 --i; --e;
1744 }
1745
1746 // If we found some loop invariants, fold them into the recurrence.
1747 if (!LIOps.empty()) {
Dan Gohman81313fd2008-09-14 17:21:12 +00001748 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
Chris Lattnerd934c702004-04-02 20:23:17 +00001749 LIOps.push_back(AddRec->getStart());
1750
Dan Gohmanaf752342009-07-07 17:06:11 +00001751 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
Dan Gohman7a2dab82009-12-18 03:57:04 +00001752 AddRec->op_end());
Dan Gohmana37eaf22007-10-22 18:31:58 +00001753 AddRecOps[0] = getAddExpr(LIOps);
Chris Lattnerd934c702004-04-02 20:23:17 +00001754
Dan Gohman16206132010-06-30 07:16:37 +00001755 // Build the new addrec. Propagate the NUW and NSW flags if both the
Eric Christopher23bf3ba2011-01-11 09:02:09 +00001756 // outer add and the inner addrec are guaranteed to have no overflow.
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001757 // Always propagate NW.
1758 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
Andrew Trick8b55b732011-03-14 16:50:06 +00001759 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
Dan Gohman51f13052009-12-18 18:45:31 +00001760
Chris Lattnerd934c702004-04-02 20:23:17 +00001761 // If all of the other operands were loop invariant, we are done.
1762 if (Ops.size() == 1) return NewRec;
1763
Nick Lewyckydb66b822011-09-06 05:08:09 +00001764 // Otherwise, add the folded AddRec by the non-invariant parts.
Chris Lattnerd934c702004-04-02 20:23:17 +00001765 for (unsigned i = 0;; ++i)
1766 if (Ops[i] == AddRec) {
1767 Ops[i] = NewRec;
1768 break;
1769 }
Dan Gohmana37eaf22007-10-22 18:31:58 +00001770 return getAddExpr(Ops);
Chris Lattnerd934c702004-04-02 20:23:17 +00001771 }
1772
1773 // Okay, if there weren't any loop invariants to be folded, check to see if
1774 // there are multiple AddRec's with the same loop induction variable being
1775 // added together. If so, we can fold them.
1776 for (unsigned OtherIdx = Idx+1;
Dan Gohmanc866bf42010-08-27 20:45:56 +00001777 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1778 ++OtherIdx)
1779 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
1780 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
1781 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1782 AddRec->op_end());
1783 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1784 ++OtherIdx)
Dan Gohman028c1812010-08-29 14:53:34 +00001785 if (const SCEVAddRecExpr *OtherAddRec =
Dan Gohmanc866bf42010-08-27 20:45:56 +00001786 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
Dan Gohman028c1812010-08-29 14:53:34 +00001787 if (OtherAddRec->getLoop() == AddRecLoop) {
1788 for (unsigned i = 0, e = OtherAddRec->getNumOperands();
1789 i != e; ++i) {
Dan Gohmanc866bf42010-08-27 20:45:56 +00001790 if (i >= AddRecOps.size()) {
Dan Gohman028c1812010-08-29 14:53:34 +00001791 AddRecOps.append(OtherAddRec->op_begin()+i,
1792 OtherAddRec->op_end());
Dan Gohmanc866bf42010-08-27 20:45:56 +00001793 break;
1794 }
Dan Gohman028c1812010-08-29 14:53:34 +00001795 AddRecOps[i] = getAddExpr(AddRecOps[i],
1796 OtherAddRec->getOperand(i));
Dan Gohmanc866bf42010-08-27 20:45:56 +00001797 }
1798 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
Chris Lattnerd934c702004-04-02 20:23:17 +00001799 }
Andrew Trick8b55b732011-03-14 16:50:06 +00001800 // Step size has changed, so we cannot guarantee no self-wraparound.
1801 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
Dan Gohmanc866bf42010-08-27 20:45:56 +00001802 return getAddExpr(Ops);
Chris Lattnerd934c702004-04-02 20:23:17 +00001803 }
1804
1805 // Otherwise couldn't fold anything into this recurrence. Move onto the
1806 // next one.
1807 }
1808
1809 // Okay, it looks like we really DO need an add expr. Check to see if we
1810 // already have one, otherwise create a new one.
Dan Gohmanc5c85c02009-06-27 21:21:31 +00001811 FoldingSetNodeID ID;
1812 ID.AddInteger(scAddExpr);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00001813 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1814 ID.AddPointer(Ops[i]);
Craig Topper9f008862014-04-15 04:59:12 +00001815 void *IP = nullptr;
Dan Gohman51ad99d2010-01-21 02:09:26 +00001816 SCEVAddExpr *S =
1817 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1818 if (!S) {
Dan Gohman00524492010-03-18 01:17:13 +00001819 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1820 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
Dan Gohman01c65a22010-03-18 18:49:47 +00001821 S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
1822 O, Ops.size());
Dan Gohman51ad99d2010-01-21 02:09:26 +00001823 UniqueSCEVs.InsertNode(S, IP);
1824 }
Andrew Trick8b55b732011-03-14 16:50:06 +00001825 S->setNoWrapFlags(Flags);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00001826 return S;
Chris Lattnerd934c702004-04-02 20:23:17 +00001827}
1828
Nick Lewycky287682e2011-10-04 06:51:26 +00001829static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
1830 uint64_t k = i*j;
1831 if (j > 1 && k / j != i) Overflow = true;
1832 return k;
1833}
1834
1835/// Compute the result of "n choose k", the binomial coefficient. If an
1836/// intermediate computation overflows, Overflow will be set and the return will
Benjamin Kramerbde91762012-06-02 10:20:22 +00001837/// be garbage. Overflow is not cleared on absence of overflow.
Nick Lewycky287682e2011-10-04 06:51:26 +00001838static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
1839 // We use the multiplicative formula:
1840 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
1841 // At each iteration, we take the n-th term of the numeral and divide by the
1842 // (k-n)th term of the denominator. This division will always produce an
1843 // integral result, and helps reduce the chance of overflow in the
1844 // intermediate computations. However, we can still overflow even when the
1845 // final result would fit.
1846
1847 if (n == 0 || n == k) return 1;
1848 if (k > n) return 0;
1849
1850 if (k > n/2)
1851 k = n-k;
1852
1853 uint64_t r = 1;
1854 for (uint64_t i = 1; i <= k; ++i) {
1855 r = umul_ov(r, n-(i-1), Overflow);
1856 r /= i;
1857 }
1858 return r;
1859}
1860
Dan Gohman4d5435d2009-05-24 23:45:28 +00001861/// getMulExpr - Get a canonical multiply expression, or something simpler if
1862/// possible.
Dan Gohman816fe0a2009-10-09 00:10:36 +00001863const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
Andrew Trick8b55b732011-03-14 16:50:06 +00001864 SCEV::NoWrapFlags Flags) {
1865 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
1866 "only nuw or nsw allowed");
Chris Lattnerd934c702004-04-02 20:23:17 +00001867 assert(!Ops.empty() && "Cannot get empty mul!");
Dan Gohman51ad99d2010-01-21 02:09:26 +00001868 if (Ops.size() == 1) return Ops[0];
Dan Gohmand33f36e2009-05-18 15:44:58 +00001869#ifndef NDEBUG
Chris Lattner229907c2011-07-18 04:54:35 +00001870 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
Dan Gohmand33f36e2009-05-18 15:44:58 +00001871 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
Dan Gohmanb6c773e2010-08-16 16:13:54 +00001872 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
Dan Gohmand33f36e2009-05-18 15:44:58 +00001873 "SCEVMulExpr operand types don't match!");
1874#endif
Chris Lattnerd934c702004-04-02 20:23:17 +00001875
Andrew Trick8b55b732011-03-14 16:50:06 +00001876 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001877 // And vice-versa.
1878 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
1879 SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
1880 if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
Dan Gohman51ad99d2010-01-21 02:09:26 +00001881 bool All = true;
Dan Gohman74c61502010-08-16 16:27:53 +00001882 for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
1883 E = Ops.end(); I != E; ++I)
1884 if (!isKnownNonNegative(*I)) {
Dan Gohman51ad99d2010-01-21 02:09:26 +00001885 All = false;
1886 break;
1887 }
Andrew Trickf6b01ff2011-03-15 00:37:00 +00001888 if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
Dan Gohman51ad99d2010-01-21 02:09:26 +00001889 }
1890
Chris Lattnerd934c702004-04-02 20:23:17 +00001891 // Sort by complexity, this groups all similar expression types together.
Dan Gohman9ba542c2009-05-07 14:39:04 +00001892 GroupByComplexity(Ops, LI);
Chris Lattnerd934c702004-04-02 20:23:17 +00001893
1894 // If there are any constants, fold them together.
1895 unsigned Idx = 0;
Dan Gohmana30370b2009-05-04 22:02:23 +00001896 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
Chris Lattnerd934c702004-04-02 20:23:17 +00001897
1898 // C1*(C2+V) -> C1*C2 + C1*V
1899 if (Ops.size() == 2)
Dan Gohmana30370b2009-05-04 22:02:23 +00001900 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
Chris Lattnerd934c702004-04-02 20:23:17 +00001901 if (Add->getNumOperands() == 2 &&
1902 isa<SCEVConstant>(Add->getOperand(0)))
Dan Gohmana37eaf22007-10-22 18:31:58 +00001903 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1904 getMulExpr(LHSC, Add->getOperand(1)));
Chris Lattnerd934c702004-04-02 20:23:17 +00001905
Chris Lattnerd934c702004-04-02 20:23:17 +00001906 ++Idx;
Dan Gohmana30370b2009-05-04 22:02:23 +00001907 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
Chris Lattnerd934c702004-04-02 20:23:17 +00001908 // We found two constants, fold them together!
Owen Andersonedb4a702009-07-24 23:12:02 +00001909 ConstantInt *Fold = ConstantInt::get(getContext(),
1910 LHSC->getValue()->getValue() *
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00001911 RHSC->getValue()->getValue());
1912 Ops[0] = getConstant(Fold);
1913 Ops.erase(Ops.begin()+1); // Erase the folded element
1914 if (Ops.size() == 1) return Ops[0];
1915 LHSC = cast<SCEVConstant>(Ops[0]);
Chris Lattnerd934c702004-04-02 20:23:17 +00001916 }
1917
1918 // If we are left with a constant one being multiplied, strip it off.
1919 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1920 Ops.erase(Ops.begin());
1921 --Idx;
Reid Spencer2e54a152007-03-02 00:28:52 +00001922 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
Chris Lattnerd934c702004-04-02 20:23:17 +00001923 // If we have a multiply of zero, it will always be zero.
1924 return Ops[0];
Dan Gohman51ad99d2010-01-21 02:09:26 +00001925 } else if (Ops[0]->isAllOnesValue()) {
1926 // If we have a mul by -1 of an add, try distributing the -1 among the
1927 // add operands.
Andrew Trick8b55b732011-03-14 16:50:06 +00001928 if (Ops.size() == 2) {
Dan Gohman51ad99d2010-01-21 02:09:26 +00001929 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
1930 SmallVector<const SCEV *, 4> NewOps;
1931 bool AnyFolded = false;
Andrew Trick8b55b732011-03-14 16:50:06 +00001932 for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
1933 E = Add->op_end(); I != E; ++I) {
Dan Gohman51ad99d2010-01-21 02:09:26 +00001934 const SCEV *Mul = getMulExpr(Ops[0], *I);
1935 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
1936 NewOps.push_back(Mul);
1937 }
1938 if (AnyFolded)
1939 return getAddExpr(NewOps);
1940 }
Andrew Tricke92dcce2011-03-14 17:38:54 +00001941 else if (const SCEVAddRecExpr *
1942 AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
1943 // Negation preserves a recurrence's no self-wrap property.
1944 SmallVector<const SCEV *, 4> Operands;
1945 for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
1946 E = AddRec->op_end(); I != E; ++I) {
1947 Operands.push_back(getMulExpr(Ops[0], *I));
1948 }
1949 return getAddRecExpr(Operands, AddRec->getLoop(),
1950 AddRec->getNoWrapFlags(SCEV::FlagNW));
1951 }
Andrew Trick8b55b732011-03-14 16:50:06 +00001952 }
Chris Lattnerd934c702004-04-02 20:23:17 +00001953 }
Dan Gohmanfe4b2912010-04-13 16:49:23 +00001954
1955 if (Ops.size() == 1)
1956 return Ops[0];
Chris Lattnerd934c702004-04-02 20:23:17 +00001957 }
1958
1959 // Skip over the add expression until we get to a multiply.
1960 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1961 ++Idx;
1962
Chris Lattnerd934c702004-04-02 20:23:17 +00001963 // If there are mul operands inline them all into this expression.
1964 if (Idx < Ops.size()) {
1965 bool DeletedMul = false;
Dan Gohmana30370b2009-05-04 22:02:23 +00001966 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
Chris Lattnerd934c702004-04-02 20:23:17 +00001967 // If we have an mul, expand the mul operands onto the end of the operands
1968 // list.
Chris Lattnerd934c702004-04-02 20:23:17 +00001969 Ops.erase(Ops.begin()+Idx);
Dan Gohmandd41bba2010-06-21 19:47:52 +00001970 Ops.append(Mul->op_begin(), Mul->op_end());
Chris Lattnerd934c702004-04-02 20:23:17 +00001971 DeletedMul = true;
1972 }
1973
1974 // If we deleted at least one mul, we added operands to the end of the list,
1975 // and they are not necessarily sorted. Recurse to resort and resimplify
Dan Gohman8b0a4192010-03-01 17:49:51 +00001976 // any operands we just acquired.
Chris Lattnerd934c702004-04-02 20:23:17 +00001977 if (DeletedMul)
Dan Gohmana37eaf22007-10-22 18:31:58 +00001978 return getMulExpr(Ops);
Chris Lattnerd934c702004-04-02 20:23:17 +00001979 }
1980
1981 // If there are any add recurrences in the operands list, see if any other
1982 // added values are loop invariant. If so, we can fold them into the
1983 // recurrence.
1984 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1985 ++Idx;
1986
1987 // Scan over all recurrences, trying to fold loop invariants into them.
1988 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1989 // Scan all of the other operands to this mul and add them to the vector if
1990 // they are loop invariant w.r.t. the recurrence.
Dan Gohmanaf752342009-07-07 17:06:11 +00001991 SmallVector<const SCEV *, 8> LIOps;
Dan Gohman48f82222009-05-04 22:30:44 +00001992 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
Dan Gohman0f2de012010-08-29 14:55:19 +00001993 const Loop *AddRecLoop = AddRec->getLoop();
Chris Lattnerd934c702004-04-02 20:23:17 +00001994 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Dan Gohmanafd6db92010-11-17 21:23:15 +00001995 if (isLoopInvariant(Ops[i], AddRecLoop)) {
Chris Lattnerd934c702004-04-02 20:23:17 +00001996 LIOps.push_back(Ops[i]);
1997 Ops.erase(Ops.begin()+i);
1998 --i; --e;
1999 }
2000
2001 // If we found some loop invariants, fold them into the recurrence.
2002 if (!LIOps.empty()) {
Dan Gohman81313fd2008-09-14 17:21:12 +00002003 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
Dan Gohmanaf752342009-07-07 17:06:11 +00002004 SmallVector<const SCEV *, 4> NewOps;
Chris Lattnerd934c702004-04-02 20:23:17 +00002005 NewOps.reserve(AddRec->getNumOperands());
Dan Gohman8f5954f2010-06-17 23:34:09 +00002006 const SCEV *Scale = getMulExpr(LIOps);
2007 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
2008 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
Chris Lattnerd934c702004-04-02 20:23:17 +00002009
Dan Gohman16206132010-06-30 07:16:37 +00002010 // Build the new addrec. Propagate the NUW and NSW flags if both the
2011 // outer mul and the inner addrec are guaranteed to have no overflow.
Andrew Trick8b55b732011-03-14 16:50:06 +00002012 //
2013 // No self-wrap cannot be guaranteed after changing the step size, but
Chris Lattner0ab5e2c2011-04-15 05:18:47 +00002014 // will be inferred if either NUW or NSW is true.
Andrew Trick8b55b732011-03-14 16:50:06 +00002015 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
2016 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
Chris Lattnerd934c702004-04-02 20:23:17 +00002017
2018 // If all of the other operands were loop invariant, we are done.
2019 if (Ops.size() == 1) return NewRec;
2020
Nick Lewyckydb66b822011-09-06 05:08:09 +00002021 // Otherwise, multiply the folded AddRec by the non-invariant parts.
Chris Lattnerd934c702004-04-02 20:23:17 +00002022 for (unsigned i = 0;; ++i)
2023 if (Ops[i] == AddRec) {
2024 Ops[i] = NewRec;
2025 break;
2026 }
Dan Gohmana37eaf22007-10-22 18:31:58 +00002027 return getMulExpr(Ops);
Chris Lattnerd934c702004-04-02 20:23:17 +00002028 }
2029
2030 // Okay, if there weren't any loop invariants to be folded, check to see if
2031 // there are multiple AddRec's with the same loop induction variable being
2032 // multiplied together. If so, we can fold them.
2033 for (unsigned OtherIdx = Idx+1;
Dan Gohmanf01a5ee2010-08-31 22:52:12 +00002034 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
Nick Lewyckye0aa54b2011-09-06 21:42:18 +00002035 ++OtherIdx) {
Andrew Trick946f76b2012-05-30 03:35:17 +00002036 if (AddRecLoop != cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop())
2037 continue;
2038
2039 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
2040 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
2041 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
2042 // ]]],+,...up to x=2n}.
2043 // Note that the arguments to choose() are always integers with values
2044 // known at compile time, never SCEV objects.
2045 //
2046 // The implementation avoids pointless extra computations when the two
2047 // addrec's are of different length (mathematically, it's equivalent to
2048 // an infinite stream of zeros on the right).
2049 bool OpsModified = false;
2050 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2051 ++OtherIdx) {
2052 const SCEVAddRecExpr *OtherAddRec =
2053 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2054 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
2055 continue;
2056
2057 bool Overflow = false;
2058 Type *Ty = AddRec->getType();
2059 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
2060 SmallVector<const SCEV*, 7> AddRecOps;
2061 for (int x = 0, xe = AddRec->getNumOperands() +
2062 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
2063 const SCEV *Term = getConstant(Ty, 0);
2064 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
2065 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
2066 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
2067 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
2068 z < ze && !Overflow; ++z) {
2069 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
2070 uint64_t Coeff;
2071 if (LargerThan64Bits)
2072 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
2073 else
2074 Coeff = Coeff1*Coeff2;
2075 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
2076 const SCEV *Term1 = AddRec->getOperand(y-z);
2077 const SCEV *Term2 = OtherAddRec->getOperand(z);
2078 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
Dan Gohmanf01a5ee2010-08-31 22:52:12 +00002079 }
Andrew Trick946f76b2012-05-30 03:35:17 +00002080 }
2081 AddRecOps.push_back(Term);
2082 }
2083 if (!Overflow) {
2084 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
2085 SCEV::FlagAnyWrap);
2086 if (Ops.size() == 2) return NewAddRec;
Andrew Tricka3f90432012-05-30 03:35:20 +00002087 Ops[Idx] = NewAddRec;
Andrew Trick946f76b2012-05-30 03:35:17 +00002088 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2089 OpsModified = true;
Andrew Tricka3f90432012-05-30 03:35:20 +00002090 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
2091 if (!AddRec)
2092 break;
Andrew Trick946f76b2012-05-30 03:35:17 +00002093 }
Chris Lattnerd934c702004-04-02 20:23:17 +00002094 }
Andrew Trick946f76b2012-05-30 03:35:17 +00002095 if (OpsModified)
2096 return getMulExpr(Ops);
Nick Lewyckye0aa54b2011-09-06 21:42:18 +00002097 }
Chris Lattnerd934c702004-04-02 20:23:17 +00002098
2099 // Otherwise couldn't fold anything into this recurrence. Move onto the
2100 // next one.
2101 }
2102
2103 // Okay, it looks like we really DO need an mul expr. Check to see if we
2104 // already have one, otherwise create a new one.
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002105 FoldingSetNodeID ID;
2106 ID.AddInteger(scMulExpr);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002107 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2108 ID.AddPointer(Ops[i]);
Craig Topper9f008862014-04-15 04:59:12 +00002109 void *IP = nullptr;
Dan Gohman51ad99d2010-01-21 02:09:26 +00002110 SCEVMulExpr *S =
2111 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2112 if (!S) {
Dan Gohman00524492010-03-18 01:17:13 +00002113 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2114 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
Dan Gohman01c65a22010-03-18 18:49:47 +00002115 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2116 O, Ops.size());
Dan Gohman51ad99d2010-01-21 02:09:26 +00002117 UniqueSCEVs.InsertNode(S, IP);
2118 }
Andrew Trick8b55b732011-03-14 16:50:06 +00002119 S->setNoWrapFlags(Flags);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002120 return S;
Chris Lattnerd934c702004-04-02 20:23:17 +00002121}
2122
Andreas Bolka7a5c8db2009-08-07 22:55:26 +00002123/// getUDivExpr - Get a canonical unsigned division expression, or something
2124/// simpler if possible.
Dan Gohmanabd17092009-06-24 14:49:00 +00002125const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
2126 const SCEV *RHS) {
Dan Gohmand33f36e2009-05-18 15:44:58 +00002127 assert(getEffectiveSCEVType(LHS->getType()) ==
2128 getEffectiveSCEVType(RHS->getType()) &&
2129 "SCEVUDivExpr operand types don't match!");
2130
Dan Gohmana30370b2009-05-04 22:02:23 +00002131 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
Chris Lattnerd934c702004-04-02 20:23:17 +00002132 if (RHSC->getValue()->equalsInt(1))
Dan Gohman8a8ad7d2009-08-20 16:42:55 +00002133 return LHS; // X udiv 1 --> x
Dan Gohmanacd700a2010-04-22 01:35:11 +00002134 // If the denominator is zero, the result of the udiv is undefined. Don't
2135 // try to analyze it, because the resolution chosen here may differ from
2136 // the resolution chosen in other parts of the compiler.
2137 if (!RHSC->getValue()->isZero()) {
2138 // Determine if the division can be folded into the operands of
2139 // its operands.
2140 // TODO: Generalize this to non-constants by using known-bits information.
Chris Lattner229907c2011-07-18 04:54:35 +00002141 Type *Ty = LHS->getType();
Dan Gohmanacd700a2010-04-22 01:35:11 +00002142 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
Dan Gohmandb764c62010-08-04 19:52:50 +00002143 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
Dan Gohmanacd700a2010-04-22 01:35:11 +00002144 // For non-power-of-two values, effectively round the value up to the
2145 // nearest power of two.
2146 if (!RHSC->getValue()->getValue().isPowerOf2())
2147 ++MaxShiftAmt;
Chris Lattner229907c2011-07-18 04:54:35 +00002148 IntegerType *ExtTy =
Dan Gohmanacd700a2010-04-22 01:35:11 +00002149 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
Dan Gohmanacd700a2010-04-22 01:35:11 +00002150 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
2151 if (const SCEVConstant *Step =
Andrew Trick6d45a012011-08-06 07:00:37 +00002152 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
2153 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
2154 const APInt &StepInt = Step->getValue()->getValue();
2155 const APInt &DivInt = RHSC->getValue()->getValue();
2156 if (!StepInt.urem(DivInt) &&
Dan Gohmanacd700a2010-04-22 01:35:11 +00002157 getZeroExtendExpr(AR, ExtTy) ==
2158 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2159 getZeroExtendExpr(Step, ExtTy),
Andrew Trick8b55b732011-03-14 16:50:06 +00002160 AR->getLoop(), SCEV::FlagAnyWrap)) {
Dan Gohmanacd700a2010-04-22 01:35:11 +00002161 SmallVector<const SCEV *, 4> Operands;
2162 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
2163 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
Andrew Trick8b55b732011-03-14 16:50:06 +00002164 return getAddRecExpr(Operands, AR->getLoop(),
Andrew Trickf6b01ff2011-03-15 00:37:00 +00002165 SCEV::FlagNW);
Dan Gohmanc3a3cb42009-05-08 20:18:49 +00002166 }
Andrew Trick6d45a012011-08-06 07:00:37 +00002167 /// Get a canonical UDivExpr for a recurrence.
2168 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
2169 // We can currently only fold X%N if X is constant.
2170 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
2171 if (StartC && !DivInt.urem(StepInt) &&
2172 getZeroExtendExpr(AR, ExtTy) ==
2173 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2174 getZeroExtendExpr(Step, ExtTy),
2175 AR->getLoop(), SCEV::FlagAnyWrap)) {
2176 const APInt &StartInt = StartC->getValue()->getValue();
2177 const APInt &StartRem = StartInt.urem(StepInt);
2178 if (StartRem != 0)
2179 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
2180 AR->getLoop(), SCEV::FlagNW);
2181 }
2182 }
Dan Gohmanacd700a2010-04-22 01:35:11 +00002183 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
2184 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
2185 SmallVector<const SCEV *, 4> Operands;
2186 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
2187 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
2188 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
2189 // Find an operand that's safely divisible.
2190 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
2191 const SCEV *Op = M->getOperand(i);
2192 const SCEV *Div = getUDivExpr(Op, RHSC);
2193 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
2194 Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
2195 M->op_end());
2196 Operands[i] = Div;
2197 return getMulExpr(Operands);
2198 }
2199 }
Dan Gohmanc3a3cb42009-05-08 20:18:49 +00002200 }
Dan Gohmanacd700a2010-04-22 01:35:11 +00002201 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
Andrew Trick7d1eea82011-04-27 18:17:36 +00002202 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
Dan Gohmanacd700a2010-04-22 01:35:11 +00002203 SmallVector<const SCEV *, 4> Operands;
2204 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
2205 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
2206 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
2207 Operands.clear();
2208 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2209 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
2210 if (isa<SCEVUDivExpr>(Op) ||
2211 getMulExpr(Op, RHS) != A->getOperand(i))
2212 break;
2213 Operands.push_back(Op);
2214 }
2215 if (Operands.size() == A->getNumOperands())
2216 return getAddExpr(Operands);
2217 }
2218 }
Dan Gohmanc3a3cb42009-05-08 20:18:49 +00002219
Dan Gohmanacd700a2010-04-22 01:35:11 +00002220 // Fold if both operands are constant.
2221 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
2222 Constant *LHSCV = LHSC->getValue();
2223 Constant *RHSCV = RHSC->getValue();
2224 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
2225 RHSCV)));
2226 }
Chris Lattnerd934c702004-04-02 20:23:17 +00002227 }
2228 }
2229
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002230 FoldingSetNodeID ID;
2231 ID.AddInteger(scUDivExpr);
2232 ID.AddPointer(LHS);
2233 ID.AddPointer(RHS);
Craig Topper9f008862014-04-15 04:59:12 +00002234 void *IP = nullptr;
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002235 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
Dan Gohman01c65a22010-03-18 18:49:47 +00002236 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
2237 LHS, RHS);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002238 UniqueSCEVs.InsertNode(S, IP);
2239 return S;
Chris Lattnerd934c702004-04-02 20:23:17 +00002240}
2241
Nick Lewycky31eaca52014-01-27 10:04:03 +00002242static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
2243 APInt A = C1->getValue()->getValue().abs();
2244 APInt B = C2->getValue()->getValue().abs();
2245 uint32_t ABW = A.getBitWidth();
2246 uint32_t BBW = B.getBitWidth();
2247
2248 if (ABW > BBW)
2249 B = B.zext(ABW);
2250 else if (ABW < BBW)
2251 A = A.zext(BBW);
2252
2253 return APIntOps::GreatestCommonDivisor(A, B);
2254}
2255
2256/// getUDivExactExpr - Get a canonical unsigned division expression, or
2257/// something simpler if possible. There is no representation for an exact udiv
2258/// in SCEV IR, but we can attempt to remove factors from the LHS and RHS.
2259/// We can't do this when it's not exact because the udiv may be clearing bits.
2260const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
2261 const SCEV *RHS) {
2262 // TODO: we could try to find factors in all sorts of things, but for now we
2263 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
2264 // end of this file for inspiration.
2265
2266 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
2267 if (!Mul)
2268 return getUDivExpr(LHS, RHS);
2269
2270 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
2271 // If the mulexpr multiplies by a constant, then that constant must be the
2272 // first element of the mulexpr.
2273 if (const SCEVConstant *LHSCst =
2274 dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
2275 if (LHSCst == RHSCst) {
2276 SmallVector<const SCEV *, 2> Operands;
2277 Operands.append(Mul->op_begin() + 1, Mul->op_end());
2278 return getMulExpr(Operands);
2279 }
2280
2281 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
2282 // that there's a factor provided by one of the other terms. We need to
2283 // check.
2284 APInt Factor = gcd(LHSCst, RHSCst);
2285 if (!Factor.isIntN(1)) {
2286 LHSCst = cast<SCEVConstant>(
2287 getConstant(LHSCst->getValue()->getValue().udiv(Factor)));
2288 RHSCst = cast<SCEVConstant>(
2289 getConstant(RHSCst->getValue()->getValue().udiv(Factor)));
2290 SmallVector<const SCEV *, 2> Operands;
2291 Operands.push_back(LHSCst);
2292 Operands.append(Mul->op_begin() + 1, Mul->op_end());
2293 LHS = getMulExpr(Operands);
2294 RHS = RHSCst;
Nick Lewycky629199c2014-01-27 10:47:44 +00002295 Mul = dyn_cast<SCEVMulExpr>(LHS);
2296 if (!Mul)
2297 return getUDivExactExpr(LHS, RHS);
Nick Lewycky31eaca52014-01-27 10:04:03 +00002298 }
2299 }
2300 }
2301
2302 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
2303 if (Mul->getOperand(i) == RHS) {
2304 SmallVector<const SCEV *, 2> Operands;
2305 Operands.append(Mul->op_begin(), Mul->op_begin() + i);
2306 Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
2307 return getMulExpr(Operands);
2308 }
2309 }
2310
2311 return getUDivExpr(LHS, RHS);
2312}
Chris Lattnerd934c702004-04-02 20:23:17 +00002313
Dan Gohman4d5435d2009-05-24 23:45:28 +00002314/// getAddRecExpr - Get an add recurrence expression for the specified loop.
2315/// Simplify the expression as much as possible.
Andrew Trick8b55b732011-03-14 16:50:06 +00002316const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
2317 const Loop *L,
2318 SCEV::NoWrapFlags Flags) {
Dan Gohmanaf752342009-07-07 17:06:11 +00002319 SmallVector<const SCEV *, 4> Operands;
Chris Lattnerd934c702004-04-02 20:23:17 +00002320 Operands.push_back(Start);
Dan Gohmana30370b2009-05-04 22:02:23 +00002321 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
Chris Lattnerd934c702004-04-02 20:23:17 +00002322 if (StepChrec->getLoop() == L) {
Dan Gohmandd41bba2010-06-21 19:47:52 +00002323 Operands.append(StepChrec->op_begin(), StepChrec->op_end());
Andrew Trickf6b01ff2011-03-15 00:37:00 +00002324 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
Chris Lattnerd934c702004-04-02 20:23:17 +00002325 }
2326
2327 Operands.push_back(Step);
Andrew Trick8b55b732011-03-14 16:50:06 +00002328 return getAddRecExpr(Operands, L, Flags);
Chris Lattnerd934c702004-04-02 20:23:17 +00002329}
2330
Dan Gohman4d5435d2009-05-24 23:45:28 +00002331/// getAddRecExpr - Get an add recurrence expression for the specified loop.
2332/// Simplify the expression as much as possible.
Dan Gohmance973df2009-06-24 04:48:43 +00002333const SCEV *
Dan Gohmanaf752342009-07-07 17:06:11 +00002334ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
Andrew Trick8b55b732011-03-14 16:50:06 +00002335 const Loop *L, SCEV::NoWrapFlags Flags) {
Chris Lattnerd934c702004-04-02 20:23:17 +00002336 if (Operands.size() == 1) return Operands[0];
Dan Gohmand33f36e2009-05-18 15:44:58 +00002337#ifndef NDEBUG
Chris Lattner229907c2011-07-18 04:54:35 +00002338 Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
Dan Gohmand33f36e2009-05-18 15:44:58 +00002339 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
Dan Gohmanb6c773e2010-08-16 16:13:54 +00002340 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
Dan Gohmand33f36e2009-05-18 15:44:58 +00002341 "SCEVAddRecExpr operand types don't match!");
Dan Gohmand3a32ae2010-11-17 20:48:38 +00002342 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
Dan Gohmanafd6db92010-11-17 21:23:15 +00002343 assert(isLoopInvariant(Operands[i], L) &&
Dan Gohmand3a32ae2010-11-17 20:48:38 +00002344 "SCEVAddRecExpr operand is not loop-invariant!");
Dan Gohmand33f36e2009-05-18 15:44:58 +00002345#endif
Chris Lattnerd934c702004-04-02 20:23:17 +00002346
Dan Gohmanbe928e32008-06-18 16:23:07 +00002347 if (Operands.back()->isZero()) {
2348 Operands.pop_back();
Andrew Trick8b55b732011-03-14 16:50:06 +00002349 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
Dan Gohmanbe928e32008-06-18 16:23:07 +00002350 }
Chris Lattnerd934c702004-04-02 20:23:17 +00002351
Dan Gohmancf9c64e2010-02-19 18:49:22 +00002352 // It's tempting to want to call getMaxBackedgeTakenCount count here and
2353 // use that information to infer NUW and NSW flags. However, computing a
2354 // BE count requires calling getAddRecExpr, so we may not yet have a
2355 // meaningful BE count at this point (and if we don't, we'd be stuck
2356 // with a SCEVCouldNotCompute as the cached BE count).
2357
Andrew Trick8b55b732011-03-14 16:50:06 +00002358 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
Andrew Trickf6b01ff2011-03-15 00:37:00 +00002359 // And vice-versa.
2360 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2361 SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
2362 if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
Dan Gohman51ad99d2010-01-21 02:09:26 +00002363 bool All = true;
Dan Gohman74c61502010-08-16 16:27:53 +00002364 for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
2365 E = Operands.end(); I != E; ++I)
2366 if (!isKnownNonNegative(*I)) {
Dan Gohman51ad99d2010-01-21 02:09:26 +00002367 All = false;
2368 break;
2369 }
Andrew Trickf6b01ff2011-03-15 00:37:00 +00002370 if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
Dan Gohman51ad99d2010-01-21 02:09:26 +00002371 }
2372
Dan Gohman223a5d22008-08-08 18:33:12 +00002373 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
Dan Gohmana30370b2009-05-04 22:02:23 +00002374 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
Dan Gohmancb0efec2009-12-18 01:14:11 +00002375 const Loop *NestedLoop = NestedAR->getLoop();
Dan Gohman63c020a2010-08-13 20:23:25 +00002376 if (L->contains(NestedLoop) ?
Dan Gohman51ad99d2010-01-21 02:09:26 +00002377 (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
Dan Gohman63c020a2010-08-13 20:23:25 +00002378 (!NestedLoop->contains(L) &&
Dan Gohman51ad99d2010-01-21 02:09:26 +00002379 DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
Dan Gohmanaf752342009-07-07 17:06:11 +00002380 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
Dan Gohmancb0efec2009-12-18 01:14:11 +00002381 NestedAR->op_end());
Dan Gohman223a5d22008-08-08 18:33:12 +00002382 Operands[0] = NestedAR->getStart();
Dan Gohmancc030b72009-06-26 22:36:20 +00002383 // AddRecs require their operands be loop-invariant with respect to their
2384 // loops. Don't perform this transformation if it would break this
2385 // requirement.
2386 bool AllInvariant = true;
2387 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
Dan Gohmanafd6db92010-11-17 21:23:15 +00002388 if (!isLoopInvariant(Operands[i], L)) {
Dan Gohmancc030b72009-06-26 22:36:20 +00002389 AllInvariant = false;
2390 break;
2391 }
2392 if (AllInvariant) {
Andrew Trick8b55b732011-03-14 16:50:06 +00002393 // Create a recurrence for the outer loop with the same step size.
2394 //
Andrew Trick8b55b732011-03-14 16:50:06 +00002395 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
2396 // inner recurrence has the same property.
Andrew Trickf6b01ff2011-03-15 00:37:00 +00002397 SCEV::NoWrapFlags OuterFlags =
2398 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
Andrew Trick8b55b732011-03-14 16:50:06 +00002399
2400 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
Dan Gohmancc030b72009-06-26 22:36:20 +00002401 AllInvariant = true;
2402 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
Dan Gohmanafd6db92010-11-17 21:23:15 +00002403 if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
Dan Gohmancc030b72009-06-26 22:36:20 +00002404 AllInvariant = false;
2405 break;
2406 }
Andrew Trick8b55b732011-03-14 16:50:06 +00002407 if (AllInvariant) {
Dan Gohmancc030b72009-06-26 22:36:20 +00002408 // Ok, both add recurrences are valid after the transformation.
Andrew Trick8b55b732011-03-14 16:50:06 +00002409 //
Andrew Trick8b55b732011-03-14 16:50:06 +00002410 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
2411 // the outer recurrence has the same property.
Andrew Trickf6b01ff2011-03-15 00:37:00 +00002412 SCEV::NoWrapFlags InnerFlags =
2413 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
Andrew Trick8b55b732011-03-14 16:50:06 +00002414 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
2415 }
Dan Gohmancc030b72009-06-26 22:36:20 +00002416 }
2417 // Reset Operands to its original state.
2418 Operands[0] = NestedAR;
Dan Gohman223a5d22008-08-08 18:33:12 +00002419 }
2420 }
2421
Dan Gohman8d67d2f2010-01-19 22:27:22 +00002422 // Okay, it looks like we really DO need an addrec expr. Check to see if we
2423 // already have one, otherwise create a new one.
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002424 FoldingSetNodeID ID;
2425 ID.AddInteger(scAddRecExpr);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002426 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2427 ID.AddPointer(Operands[i]);
2428 ID.AddPointer(L);
Craig Topper9f008862014-04-15 04:59:12 +00002429 void *IP = nullptr;
Dan Gohman51ad99d2010-01-21 02:09:26 +00002430 SCEVAddRecExpr *S =
2431 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2432 if (!S) {
Dan Gohman00524492010-03-18 01:17:13 +00002433 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2434 std::uninitialized_copy(Operands.begin(), Operands.end(), O);
Dan Gohman01c65a22010-03-18 18:49:47 +00002435 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2436 O, Operands.size(), L);
Dan Gohman51ad99d2010-01-21 02:09:26 +00002437 UniqueSCEVs.InsertNode(S, IP);
2438 }
Andrew Trick8b55b732011-03-14 16:50:06 +00002439 S->setNoWrapFlags(Flags);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002440 return S;
Chris Lattnerd934c702004-04-02 20:23:17 +00002441}
2442
Dan Gohmanabd17092009-06-24 14:49:00 +00002443const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2444 const SCEV *RHS) {
Dan Gohmanaf752342009-07-07 17:06:11 +00002445 SmallVector<const SCEV *, 2> Ops;
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002446 Ops.push_back(LHS);
2447 Ops.push_back(RHS);
2448 return getSMaxExpr(Ops);
2449}
2450
Dan Gohmanaf752342009-07-07 17:06:11 +00002451const SCEV *
2452ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002453 assert(!Ops.empty() && "Cannot get empty smax!");
2454 if (Ops.size() == 1) return Ops[0];
Dan Gohmand33f36e2009-05-18 15:44:58 +00002455#ifndef NDEBUG
Chris Lattner229907c2011-07-18 04:54:35 +00002456 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
Dan Gohmand33f36e2009-05-18 15:44:58 +00002457 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
Dan Gohmanb6c773e2010-08-16 16:13:54 +00002458 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
Dan Gohmand33f36e2009-05-18 15:44:58 +00002459 "SCEVSMaxExpr operand types don't match!");
2460#endif
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002461
2462 // Sort by complexity, this groups all similar expression types together.
Dan Gohman9ba542c2009-05-07 14:39:04 +00002463 GroupByComplexity(Ops, LI);
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002464
2465 // If there are any constants, fold them together.
2466 unsigned Idx = 0;
Dan Gohmana30370b2009-05-04 22:02:23 +00002467 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002468 ++Idx;
2469 assert(Idx < Ops.size());
Dan Gohmana30370b2009-05-04 22:02:23 +00002470 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002471 // We found two constants, fold them together!
Owen Andersonedb4a702009-07-24 23:12:02 +00002472 ConstantInt *Fold = ConstantInt::get(getContext(),
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002473 APIntOps::smax(LHSC->getValue()->getValue(),
2474 RHSC->getValue()->getValue()));
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002475 Ops[0] = getConstant(Fold);
2476 Ops.erase(Ops.begin()+1); // Erase the folded element
2477 if (Ops.size() == 1) return Ops[0];
2478 LHSC = cast<SCEVConstant>(Ops[0]);
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002479 }
2480
Dan Gohmanf57bdb72009-06-24 14:46:22 +00002481 // If we are left with a constant minimum-int, strip it off.
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002482 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2483 Ops.erase(Ops.begin());
2484 --Idx;
Dan Gohmanf57bdb72009-06-24 14:46:22 +00002485 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2486 // If we have an smax with a constant maximum-int, it will always be
2487 // maximum-int.
2488 return Ops[0];
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002489 }
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002490
Dan Gohmanfe4b2912010-04-13 16:49:23 +00002491 if (Ops.size() == 1) return Ops[0];
2492 }
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002493
2494 // Find the first SMax
2495 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2496 ++Idx;
2497
2498 // Check to see if one of the operands is an SMax. If so, expand its operands
2499 // onto our operand list, and recurse to simplify.
2500 if (Idx < Ops.size()) {
2501 bool DeletedSMax = false;
Dan Gohmana30370b2009-05-04 22:02:23 +00002502 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002503 Ops.erase(Ops.begin()+Idx);
Dan Gohmandd41bba2010-06-21 19:47:52 +00002504 Ops.append(SMax->op_begin(), SMax->op_end());
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002505 DeletedSMax = true;
2506 }
2507
2508 if (DeletedSMax)
2509 return getSMaxExpr(Ops);
2510 }
2511
2512 // Okay, check to see if the same value occurs in the operand list twice. If
2513 // so, delete one. Since we sorted the list, these values are required to
2514 // be adjacent.
2515 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
Dan Gohman7ef0dc22010-04-13 16:51:03 +00002516 // X smax Y smax Y --> X smax Y
2517 // X smax Y --> X, if X is always greater than Y
2518 if (Ops[i] == Ops[i+1] ||
2519 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
2520 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2521 --i; --e;
2522 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002523 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2524 --i; --e;
2525 }
2526
2527 if (Ops.size() == 1) return Ops[0];
2528
2529 assert(!Ops.empty() && "Reduced smax down to nothing!");
2530
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002531 // Okay, it looks like we really DO need an smax expr. Check to see if we
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002532 // already have one, otherwise create a new one.
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002533 FoldingSetNodeID ID;
2534 ID.AddInteger(scSMaxExpr);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002535 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2536 ID.AddPointer(Ops[i]);
Craig Topper9f008862014-04-15 04:59:12 +00002537 void *IP = nullptr;
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002538 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
Dan Gohman00524492010-03-18 01:17:13 +00002539 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2540 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
Dan Gohman01c65a22010-03-18 18:49:47 +00002541 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
2542 O, Ops.size());
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002543 UniqueSCEVs.InsertNode(S, IP);
2544 return S;
Nick Lewyckycdb7e542007-11-25 22:41:31 +00002545}
2546
Dan Gohmanabd17092009-06-24 14:49:00 +00002547const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
2548 const SCEV *RHS) {
Dan Gohmanaf752342009-07-07 17:06:11 +00002549 SmallVector<const SCEV *, 2> Ops;
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002550 Ops.push_back(LHS);
2551 Ops.push_back(RHS);
2552 return getUMaxExpr(Ops);
2553}
2554
Dan Gohmanaf752342009-07-07 17:06:11 +00002555const SCEV *
2556ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002557 assert(!Ops.empty() && "Cannot get empty umax!");
2558 if (Ops.size() == 1) return Ops[0];
Dan Gohmand33f36e2009-05-18 15:44:58 +00002559#ifndef NDEBUG
Chris Lattner229907c2011-07-18 04:54:35 +00002560 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
Dan Gohmand33f36e2009-05-18 15:44:58 +00002561 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
Dan Gohmanb6c773e2010-08-16 16:13:54 +00002562 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
Dan Gohmand33f36e2009-05-18 15:44:58 +00002563 "SCEVUMaxExpr operand types don't match!");
2564#endif
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002565
2566 // Sort by complexity, this groups all similar expression types together.
Dan Gohman9ba542c2009-05-07 14:39:04 +00002567 GroupByComplexity(Ops, LI);
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002568
2569 // If there are any constants, fold them together.
2570 unsigned Idx = 0;
Dan Gohmana30370b2009-05-04 22:02:23 +00002571 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002572 ++Idx;
2573 assert(Idx < Ops.size());
Dan Gohmana30370b2009-05-04 22:02:23 +00002574 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002575 // We found two constants, fold them together!
Owen Andersonedb4a702009-07-24 23:12:02 +00002576 ConstantInt *Fold = ConstantInt::get(getContext(),
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002577 APIntOps::umax(LHSC->getValue()->getValue(),
2578 RHSC->getValue()->getValue()));
2579 Ops[0] = getConstant(Fold);
2580 Ops.erase(Ops.begin()+1); // Erase the folded element
2581 if (Ops.size() == 1) return Ops[0];
2582 LHSC = cast<SCEVConstant>(Ops[0]);
2583 }
2584
Dan Gohmanf57bdb72009-06-24 14:46:22 +00002585 // If we are left with a constant minimum-int, strip it off.
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002586 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
2587 Ops.erase(Ops.begin());
2588 --Idx;
Dan Gohmanf57bdb72009-06-24 14:46:22 +00002589 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
2590 // If we have an umax with a constant maximum-int, it will always be
2591 // maximum-int.
2592 return Ops[0];
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002593 }
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002594
Dan Gohmanfe4b2912010-04-13 16:49:23 +00002595 if (Ops.size() == 1) return Ops[0];
2596 }
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002597
2598 // Find the first UMax
2599 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
2600 ++Idx;
2601
2602 // Check to see if one of the operands is a UMax. If so, expand its operands
2603 // onto our operand list, and recurse to simplify.
2604 if (Idx < Ops.size()) {
2605 bool DeletedUMax = false;
Dan Gohmana30370b2009-05-04 22:02:23 +00002606 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002607 Ops.erase(Ops.begin()+Idx);
Dan Gohmandd41bba2010-06-21 19:47:52 +00002608 Ops.append(UMax->op_begin(), UMax->op_end());
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002609 DeletedUMax = true;
2610 }
2611
2612 if (DeletedUMax)
2613 return getUMaxExpr(Ops);
2614 }
2615
2616 // Okay, check to see if the same value occurs in the operand list twice. If
2617 // so, delete one. Since we sorted the list, these values are required to
2618 // be adjacent.
2619 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
Dan Gohman7ef0dc22010-04-13 16:51:03 +00002620 // X umax Y umax Y --> X umax Y
2621 // X umax Y --> X, if X is always greater than Y
2622 if (Ops[i] == Ops[i+1] ||
2623 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
2624 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2625 --i; --e;
2626 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002627 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2628 --i; --e;
2629 }
2630
2631 if (Ops.size() == 1) return Ops[0];
2632
2633 assert(!Ops.empty() && "Reduced umax down to nothing!");
2634
2635 // Okay, it looks like we really DO need a umax expr. Check to see if we
2636 // already have one, otherwise create a new one.
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002637 FoldingSetNodeID ID;
2638 ID.AddInteger(scUMaxExpr);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002639 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2640 ID.AddPointer(Ops[i]);
Craig Topper9f008862014-04-15 04:59:12 +00002641 void *IP = nullptr;
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002642 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
Dan Gohman00524492010-03-18 01:17:13 +00002643 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2644 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
Dan Gohman01c65a22010-03-18 18:49:47 +00002645 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
2646 O, Ops.size());
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002647 UniqueSCEVs.InsertNode(S, IP);
2648 return S;
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00002649}
2650
Dan Gohmanabd17092009-06-24 14:49:00 +00002651const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2652 const SCEV *RHS) {
Dan Gohman692b4682009-06-22 03:18:45 +00002653 // ~smax(~x, ~y) == smin(x, y).
2654 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2655}
2656
Dan Gohmanabd17092009-06-24 14:49:00 +00002657const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2658 const SCEV *RHS) {
Dan Gohman692b4682009-06-22 03:18:45 +00002659 // ~umax(~x, ~y) == umin(x, y)
2660 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2661}
2662
Matt Arsenaulta90a18e2013-09-10 19:55:24 +00002663const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
Micah Villmowcdfe20b2012-10-08 16:38:25 +00002664 // If we have DataLayout, we can bypass creating a target-independent
Dan Gohman11862a62010-04-12 23:03:26 +00002665 // constant expression and then folding it back into a ConstantInt.
2666 // This is just a compile-time optimization.
Rafael Espindola7c68beb2014-02-18 15:33:12 +00002667 if (DL)
2668 return getConstant(IntTy, DL->getTypeAllocSize(AllocTy));
Dan Gohman11862a62010-04-12 23:03:26 +00002669
Dan Gohmane5e1b7b2010-02-01 18:27:38 +00002670 Constant *C = ConstantExpr::getSizeOf(AllocTy);
2671 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
Rafael Espindola7c68beb2014-02-18 15:33:12 +00002672 if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
Dan Gohmana3b6c4b2010-05-28 16:12:08 +00002673 C = Folded;
Chris Lattner229907c2011-07-18 04:54:35 +00002674 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
Matt Arsenaulta90a18e2013-09-10 19:55:24 +00002675 assert(Ty == IntTy && "Effective SCEV type doesn't match");
Dan Gohmane5e1b7b2010-02-01 18:27:38 +00002676 return getTruncateOrZeroExtend(getSCEV(C), Ty);
2677}
2678
Matt Arsenaulta90a18e2013-09-10 19:55:24 +00002679const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
2680 StructType *STy,
Dan Gohmane5e1b7b2010-02-01 18:27:38 +00002681 unsigned FieldNo) {
Micah Villmowcdfe20b2012-10-08 16:38:25 +00002682 // If we have DataLayout, we can bypass creating a target-independent
Dan Gohman11862a62010-04-12 23:03:26 +00002683 // constant expression and then folding it back into a ConstantInt.
2684 // This is just a compile-time optimization.
Rafael Espindola7c68beb2014-02-18 15:33:12 +00002685 if (DL) {
Matt Arsenaulta90a18e2013-09-10 19:55:24 +00002686 return getConstant(IntTy,
Rafael Espindola7c68beb2014-02-18 15:33:12 +00002687 DL->getStructLayout(STy)->getElementOffset(FieldNo));
Matt Arsenaulta90a18e2013-09-10 19:55:24 +00002688 }
Dan Gohman11862a62010-04-12 23:03:26 +00002689
Dan Gohmancf913832010-01-28 02:15:55 +00002690 Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
2691 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
Rafael Espindola7c68beb2014-02-18 15:33:12 +00002692 if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
Dan Gohmana3b6c4b2010-05-28 16:12:08 +00002693 C = Folded;
Dan Gohmanbf2a9ae2009-08-18 16:46:41 +00002694
Matt Arsenault4ed49b52013-10-21 18:08:09 +00002695 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
Dan Gohmancf913832010-01-28 02:15:55 +00002696 return getTruncateOrZeroExtend(getSCEV(C), Ty);
Dan Gohmanbf2a9ae2009-08-18 16:46:41 +00002697}
2698
Dan Gohmanaf752342009-07-07 17:06:11 +00002699const SCEV *ScalarEvolution::getUnknown(Value *V) {
Dan Gohmanf436bac2009-06-24 00:54:57 +00002700 // Don't attempt to do anything other than create a SCEVUnknown object
2701 // here. createSCEV only calls getUnknown after checking for all other
2702 // interesting possibilities, and any other code that calls getUnknown
2703 // is doing so in order to hide a value from SCEV canonicalization.
2704
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002705 FoldingSetNodeID ID;
2706 ID.AddInteger(scUnknown);
2707 ID.AddPointer(V);
Craig Topper9f008862014-04-15 04:59:12 +00002708 void *IP = nullptr;
Dan Gohman7cac9572010-08-02 23:49:30 +00002709 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
2710 assert(cast<SCEVUnknown>(S)->getValue() == V &&
2711 "Stale SCEVUnknown in uniquing map!");
2712 return S;
2713 }
2714 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
2715 FirstUnknown);
2716 FirstUnknown = cast<SCEVUnknown>(S);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002717 UniqueSCEVs.InsertNode(S, IP);
2718 return S;
Chris Lattnerb4f681b2004-04-15 15:07:24 +00002719}
2720
Chris Lattnerd934c702004-04-02 20:23:17 +00002721//===----------------------------------------------------------------------===//
Chris Lattnerd934c702004-04-02 20:23:17 +00002722// Basic SCEV Analysis and PHI Idiom Recognition Code
2723//
2724
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002725/// isSCEVable - Test if values of the given type are analyzable within
2726/// the SCEV framework. This primarily includes integer types, and it
2727/// can optionally include pointer types if the ScalarEvolution class
2728/// has access to target-specific information.
Chris Lattner229907c2011-07-18 04:54:35 +00002729bool ScalarEvolution::isSCEVable(Type *Ty) const {
Dan Gohmanbf2a9ae2009-08-18 16:46:41 +00002730 // Integers and pointers are always SCEVable.
Duncan Sands19d0b472010-02-16 11:11:14 +00002731 return Ty->isIntegerTy() || Ty->isPointerTy();
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002732}
2733
2734/// getTypeSizeInBits - Return the size in bits of the specified type,
2735/// for which isSCEVable must return true.
Chris Lattner229907c2011-07-18 04:54:35 +00002736uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002737 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2738
Micah Villmowcdfe20b2012-10-08 16:38:25 +00002739 // If we have a DataLayout, use it!
Rafael Espindola7c68beb2014-02-18 15:33:12 +00002740 if (DL)
2741 return DL->getTypeSizeInBits(Ty);
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002742
Dan Gohmanbf2a9ae2009-08-18 16:46:41 +00002743 // Integer types have fixed sizes.
Duncan Sands9dff9be2010-02-15 16:12:20 +00002744 if (Ty->isIntegerTy())
Dan Gohmanbf2a9ae2009-08-18 16:46:41 +00002745 return Ty->getPrimitiveSizeInBits();
2746
Micah Villmowcdfe20b2012-10-08 16:38:25 +00002747 // The only other support type is pointer. Without DataLayout, conservatively
Dan Gohmanbf2a9ae2009-08-18 16:46:41 +00002748 // assume pointers are 64-bit.
Duncan Sands19d0b472010-02-16 11:11:14 +00002749 assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
Dan Gohmanbf2a9ae2009-08-18 16:46:41 +00002750 return 64;
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002751}
2752
2753/// getEffectiveSCEVType - Return a type with the same bitwidth as
2754/// the given type and which represents how SCEV will treat the given
2755/// type, for which isSCEVable must return true. For pointer types,
2756/// this is the pointer-sized integer type.
Chris Lattner229907c2011-07-18 04:54:35 +00002757Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002758 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2759
Matt Arsenaulta90a18e2013-09-10 19:55:24 +00002760 if (Ty->isIntegerTy()) {
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002761 return Ty;
Matt Arsenaulta90a18e2013-09-10 19:55:24 +00002762 }
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002763
Dan Gohmanbf2a9ae2009-08-18 16:46:41 +00002764 // The only other support type is pointer.
Duncan Sands19d0b472010-02-16 11:11:14 +00002765 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
Matt Arsenaulta90a18e2013-09-10 19:55:24 +00002766
Rafael Espindola7c68beb2014-02-18 15:33:12 +00002767 if (DL)
2768 return DL->getIntPtrType(Ty);
Dan Gohmanbf2a9ae2009-08-18 16:46:41 +00002769
Micah Villmowcdfe20b2012-10-08 16:38:25 +00002770 // Without DataLayout, conservatively assume pointers are 64-bit.
Dan Gohmanbf2a9ae2009-08-18 16:46:41 +00002771 return Type::getInt64Ty(getContext());
Dan Gohman0a40ad92009-04-16 03:18:22 +00002772}
Chris Lattnerd934c702004-04-02 20:23:17 +00002773
Dan Gohmanaf752342009-07-07 17:06:11 +00002774const SCEV *ScalarEvolution::getCouldNotCompute() {
Dan Gohmanc5c85c02009-06-27 21:21:31 +00002775 return &CouldNotCompute;
Dan Gohman31efa302009-04-18 17:58:19 +00002776}
2777
Shuxin Yangefc4c012013-07-08 17:33:13 +00002778namespace {
2779 // Helper class working with SCEVTraversal to figure out if a SCEV contains
2780 // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne
2781 // is set iff if find such SCEVUnknown.
2782 //
2783 struct FindInvalidSCEVUnknown {
2784 bool FindOne;
2785 FindInvalidSCEVUnknown() { FindOne = false; }
2786 bool follow(const SCEV *S) {
Benjamin Kramer987b8502014-02-11 19:02:55 +00002787 switch (static_cast<SCEVTypes>(S->getSCEVType())) {
Shuxin Yangefc4c012013-07-08 17:33:13 +00002788 case scConstant:
2789 return false;
2790 case scUnknown:
Shuxin Yang23773b32013-07-12 07:25:38 +00002791 if (!cast<SCEVUnknown>(S)->getValue())
Shuxin Yangefc4c012013-07-08 17:33:13 +00002792 FindOne = true;
2793 return false;
2794 default:
2795 return true;
2796 }
2797 }
2798 bool isDone() const { return FindOne; }
2799 };
2800}
2801
2802bool ScalarEvolution::checkValidity(const SCEV *S) const {
2803 FindInvalidSCEVUnknown F;
2804 SCEVTraversal<FindInvalidSCEVUnknown> ST(F);
2805 ST.visitAll(S);
2806
2807 return !F.FindOne;
2808}
2809
Chris Lattnerd934c702004-04-02 20:23:17 +00002810/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2811/// expression and create a new one.
Dan Gohmanaf752342009-07-07 17:06:11 +00002812const SCEV *ScalarEvolution::getSCEV(Value *V) {
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002813 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
Chris Lattnerd934c702004-04-02 20:23:17 +00002814
Shuxin Yangefc4c012013-07-08 17:33:13 +00002815 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
2816 if (I != ValueExprMap.end()) {
2817 const SCEV *S = I->second;
Shuxin Yang23773b32013-07-12 07:25:38 +00002818 if (checkValidity(S))
Shuxin Yangefc4c012013-07-08 17:33:13 +00002819 return S;
2820 else
2821 ValueExprMap.erase(I);
2822 }
Dan Gohmanaf752342009-07-07 17:06:11 +00002823 const SCEV *S = createSCEV(V);
Dan Gohmanc29eeae2010-08-16 16:31:39 +00002824
2825 // The process of creating a SCEV for V may have caused other SCEVs
2826 // to have been created, so it's necessary to insert the new entry
2827 // from scratch, rather than trying to remember the insert position
2828 // above.
Dan Gohman9bad2fb2010-08-27 18:55:03 +00002829 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
Chris Lattnerd934c702004-04-02 20:23:17 +00002830 return S;
2831}
2832
Dan Gohman0a40ad92009-04-16 03:18:22 +00002833/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2834///
Dan Gohmanaf752342009-07-07 17:06:11 +00002835const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
Dan Gohmana30370b2009-05-04 22:02:23 +00002836 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
Owen Anderson53a52212009-07-13 04:09:18 +00002837 return getConstant(
Owen Anderson487375e2009-07-29 18:55:55 +00002838 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
Dan Gohman0a40ad92009-04-16 03:18:22 +00002839
Chris Lattner229907c2011-07-18 04:54:35 +00002840 Type *Ty = V->getType();
Dan Gohmanc8e23622009-04-21 23:15:49 +00002841 Ty = getEffectiveSCEVType(Ty);
Owen Anderson542619e2009-07-13 20:58:05 +00002842 return getMulExpr(V,
Owen Anderson5a1acd92009-07-31 20:28:14 +00002843 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
Dan Gohman0a40ad92009-04-16 03:18:22 +00002844}
2845
2846/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
Dan Gohmanaf752342009-07-07 17:06:11 +00002847const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
Dan Gohmana30370b2009-05-04 22:02:23 +00002848 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
Owen Anderson542619e2009-07-13 20:58:05 +00002849 return getConstant(
Owen Anderson487375e2009-07-29 18:55:55 +00002850 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
Dan Gohman0a40ad92009-04-16 03:18:22 +00002851
Chris Lattner229907c2011-07-18 04:54:35 +00002852 Type *Ty = V->getType();
Dan Gohmanc8e23622009-04-21 23:15:49 +00002853 Ty = getEffectiveSCEVType(Ty);
Owen Anderson542619e2009-07-13 20:58:05 +00002854 const SCEV *AllOnes =
Owen Anderson5a1acd92009-07-31 20:28:14 +00002855 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
Dan Gohman0a40ad92009-04-16 03:18:22 +00002856 return getMinusSCEV(AllOnes, V);
2857}
2858
Andrew Trick8b55b732011-03-14 16:50:06 +00002859/// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
Chris Lattnerfc877522011-01-09 22:26:35 +00002860const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
Andrew Trick8b55b732011-03-14 16:50:06 +00002861 SCEV::NoWrapFlags Flags) {
Andrew Tricka34f1b12011-03-15 01:16:14 +00002862 assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW");
2863
Dan Gohman46f00a22010-07-20 16:53:00 +00002864 // Fast path: X - X --> 0.
2865 if (LHS == RHS)
2866 return getConstant(LHS->getType(), 0);
2867
Dan Gohman0a40ad92009-04-16 03:18:22 +00002868 // X - Y --> X + -Y
Andrew Trick8b55b732011-03-14 16:50:06 +00002869 return getAddExpr(LHS, getNegativeSCEV(RHS), Flags);
Dan Gohman0a40ad92009-04-16 03:18:22 +00002870}
2871
2872/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2873/// input value to the specified type. If the type must be extended, it is zero
2874/// extended.
Dan Gohmanaf752342009-07-07 17:06:11 +00002875const SCEV *
Chris Lattner229907c2011-07-18 04:54:35 +00002876ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
2877 Type *SrcTy = V->getType();
Duncan Sands19d0b472010-02-16 11:11:14 +00002878 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2879 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
Dan Gohman0a40ad92009-04-16 03:18:22 +00002880 "Cannot truncate or zero extend with non-integer arguments!");
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002881 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
Dan Gohman0a40ad92009-04-16 03:18:22 +00002882 return V; // No conversion
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002883 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
Dan Gohmanc8e23622009-04-21 23:15:49 +00002884 return getTruncateExpr(V, Ty);
2885 return getZeroExtendExpr(V, Ty);
Dan Gohman0a40ad92009-04-16 03:18:22 +00002886}
2887
2888/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2889/// input value to the specified type. If the type must be extended, it is sign
2890/// extended.
Dan Gohmanaf752342009-07-07 17:06:11 +00002891const SCEV *
2892ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
Chris Lattner229907c2011-07-18 04:54:35 +00002893 Type *Ty) {
2894 Type *SrcTy = V->getType();
Duncan Sands19d0b472010-02-16 11:11:14 +00002895 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2896 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
Dan Gohman0a40ad92009-04-16 03:18:22 +00002897 "Cannot truncate or zero extend with non-integer arguments!");
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002898 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
Dan Gohman0a40ad92009-04-16 03:18:22 +00002899 return V; // No conversion
Dan Gohmanb397e1a2009-04-21 01:07:12 +00002900 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
Dan Gohmanc8e23622009-04-21 23:15:49 +00002901 return getTruncateExpr(V, Ty);
2902 return getSignExtendExpr(V, Ty);
Dan Gohman0a40ad92009-04-16 03:18:22 +00002903}
2904
Dan Gohmane712a2f2009-05-13 03:46:30 +00002905/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2906/// input value to the specified type. If the type must be extended, it is zero
2907/// extended. The conversion must not be narrowing.
Dan Gohmanaf752342009-07-07 17:06:11 +00002908const SCEV *
Chris Lattner229907c2011-07-18 04:54:35 +00002909ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
2910 Type *SrcTy = V->getType();
Duncan Sands19d0b472010-02-16 11:11:14 +00002911 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2912 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
Dan Gohmane712a2f2009-05-13 03:46:30 +00002913 "Cannot noop or zero extend with non-integer arguments!");
2914 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2915 "getNoopOrZeroExtend cannot truncate!");
2916 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2917 return V; // No conversion
2918 return getZeroExtendExpr(V, Ty);
2919}
2920
2921/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2922/// input value to the specified type. If the type must be extended, it is sign
2923/// extended. The conversion must not be narrowing.
Dan Gohmanaf752342009-07-07 17:06:11 +00002924const SCEV *
Chris Lattner229907c2011-07-18 04:54:35 +00002925ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
2926 Type *SrcTy = V->getType();
Duncan Sands19d0b472010-02-16 11:11:14 +00002927 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2928 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
Dan Gohmane712a2f2009-05-13 03:46:30 +00002929 "Cannot noop or sign extend with non-integer arguments!");
2930 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2931 "getNoopOrSignExtend cannot truncate!");
2932 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2933 return V; // No conversion
2934 return getSignExtendExpr(V, Ty);
2935}
2936
Dan Gohman8db2edc2009-06-13 15:56:47 +00002937/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2938/// the input value to the specified type. If the type must be extended,
2939/// it is extended with unspecified bits. The conversion must not be
2940/// narrowing.
Dan Gohmanaf752342009-07-07 17:06:11 +00002941const SCEV *
Chris Lattner229907c2011-07-18 04:54:35 +00002942ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
2943 Type *SrcTy = V->getType();
Duncan Sands19d0b472010-02-16 11:11:14 +00002944 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2945 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
Dan Gohman8db2edc2009-06-13 15:56:47 +00002946 "Cannot noop or any extend with non-integer arguments!");
2947 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2948 "getNoopOrAnyExtend cannot truncate!");
2949 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2950 return V; // No conversion
2951 return getAnyExtendExpr(V, Ty);
2952}
2953
Dan Gohmane712a2f2009-05-13 03:46:30 +00002954/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2955/// input value to the specified type. The conversion must not be widening.
Dan Gohmanaf752342009-07-07 17:06:11 +00002956const SCEV *
Chris Lattner229907c2011-07-18 04:54:35 +00002957ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
2958 Type *SrcTy = V->getType();
Duncan Sands19d0b472010-02-16 11:11:14 +00002959 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2960 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
Dan Gohmane712a2f2009-05-13 03:46:30 +00002961 "Cannot truncate or noop with non-integer arguments!");
2962 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2963 "getTruncateOrNoop cannot extend!");
2964 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2965 return V; // No conversion
2966 return getTruncateExpr(V, Ty);
2967}
2968
Dan Gohman96212b62009-06-22 00:31:57 +00002969/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2970/// the types using zero-extension, and then perform a umax operation
2971/// with them.
Dan Gohmanabd17092009-06-24 14:49:00 +00002972const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2973 const SCEV *RHS) {
Dan Gohmanaf752342009-07-07 17:06:11 +00002974 const SCEV *PromotedLHS = LHS;
2975 const SCEV *PromotedRHS = RHS;
Dan Gohman96212b62009-06-22 00:31:57 +00002976
2977 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2978 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2979 else
2980 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2981
2982 return getUMaxExpr(PromotedLHS, PromotedRHS);
2983}
2984
Dan Gohman2bc22302009-06-22 15:03:27 +00002985/// getUMinFromMismatchedTypes - Promote the operands to the wider of
2986/// the types using zero-extension, and then perform a umin operation
2987/// with them.
Dan Gohmanabd17092009-06-24 14:49:00 +00002988const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2989 const SCEV *RHS) {
Dan Gohmanaf752342009-07-07 17:06:11 +00002990 const SCEV *PromotedLHS = LHS;
2991 const SCEV *PromotedRHS = RHS;
Dan Gohman2bc22302009-06-22 15:03:27 +00002992
2993 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2994 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2995 else
2996 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2997
2998 return getUMinExpr(PromotedLHS, PromotedRHS);
2999}
3000
Andrew Trick87716c92011-03-17 23:51:11 +00003001/// getPointerBase - Transitively follow the chain of pointer-type operands
3002/// until reaching a SCEV that does not have a single pointer operand. This
3003/// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
3004/// but corner cases do exist.
3005const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
3006 // A pointer operand may evaluate to a nonpointer expression, such as null.
3007 if (!V->getType()->isPointerTy())
3008 return V;
3009
3010 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
3011 return getPointerBase(Cast->getOperand());
3012 }
3013 else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
Craig Topper9f008862014-04-15 04:59:12 +00003014 const SCEV *PtrOp = nullptr;
Andrew Trick87716c92011-03-17 23:51:11 +00003015 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
3016 I != E; ++I) {
3017 if ((*I)->getType()->isPointerTy()) {
3018 // Cannot find the base of an expression with multiple pointer operands.
3019 if (PtrOp)
3020 return V;
3021 PtrOp = *I;
3022 }
3023 }
3024 if (!PtrOp)
3025 return V;
3026 return getPointerBase(PtrOp);
3027 }
3028 return V;
3029}
3030
Dan Gohman0b89dff2009-07-25 01:13:03 +00003031/// PushDefUseChildren - Push users of the given Instruction
3032/// onto the given Worklist.
3033static void
3034PushDefUseChildren(Instruction *I,
3035 SmallVectorImpl<Instruction *> &Worklist) {
3036 // Push the def-use children onto the Worklist stack.
Chandler Carruthcdf47882014-03-09 03:16:01 +00003037 for (User *U : I->users())
3038 Worklist.push_back(cast<Instruction>(U));
Dan Gohman0b89dff2009-07-25 01:13:03 +00003039}
3040
3041/// ForgetSymbolicValue - This looks up computed SCEV values for all
3042/// instructions that depend on the given instruction and removes them from
Dan Gohman9bad2fb2010-08-27 18:55:03 +00003043/// the ValueExprMapType map if they reference SymName. This is used during PHI
Dan Gohman0b89dff2009-07-25 01:13:03 +00003044/// resolution.
Dan Gohmance973df2009-06-24 04:48:43 +00003045void
Dan Gohmana9c205c2010-02-25 06:57:05 +00003046ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
Dan Gohman0b89dff2009-07-25 01:13:03 +00003047 SmallVector<Instruction *, 16> Worklist;
Dan Gohmana9c205c2010-02-25 06:57:05 +00003048 PushDefUseChildren(PN, Worklist);
Chris Lattnerd934c702004-04-02 20:23:17 +00003049
Dan Gohman0b89dff2009-07-25 01:13:03 +00003050 SmallPtrSet<Instruction *, 8> Visited;
Dan Gohmana9c205c2010-02-25 06:57:05 +00003051 Visited.insert(PN);
Dan Gohman0b89dff2009-07-25 01:13:03 +00003052 while (!Worklist.empty()) {
Dan Gohmana9c205c2010-02-25 06:57:05 +00003053 Instruction *I = Worklist.pop_back_val();
Dan Gohman0b89dff2009-07-25 01:13:03 +00003054 if (!Visited.insert(I)) continue;
Chris Lattner7b0fbe72005-02-13 04:37:18 +00003055
Dan Gohman9bad2fb2010-08-27 18:55:03 +00003056 ValueExprMapType::iterator It =
Benjamin Kramere2ef47c2012-06-30 22:37:15 +00003057 ValueExprMap.find_as(static_cast<Value *>(I));
Dan Gohman9bad2fb2010-08-27 18:55:03 +00003058 if (It != ValueExprMap.end()) {
Dan Gohman761065e2010-11-17 02:44:44 +00003059 const SCEV *Old = It->second;
3060
Dan Gohman0b89dff2009-07-25 01:13:03 +00003061 // Short-circuit the def-use traversal if the symbolic name
3062 // ceases to appear in expressions.
Dan Gohman534749b2010-11-17 22:27:42 +00003063 if (Old != SymName && !hasOperand(Old, SymName))
Dan Gohman0b89dff2009-07-25 01:13:03 +00003064 continue;
Chris Lattner7b0fbe72005-02-13 04:37:18 +00003065
Dan Gohman0b89dff2009-07-25 01:13:03 +00003066 // SCEVUnknown for a PHI either means that it has an unrecognized
Dan Gohmana9c205c2010-02-25 06:57:05 +00003067 // structure, it's a PHI that's in the progress of being computed
3068 // by createNodeForPHI, or it's a single-value PHI. In the first case,
3069 // additional loop trip count information isn't going to change anything.
3070 // In the second case, createNodeForPHI will perform the necessary
3071 // updates on its own when it gets to that point. In the third, we do
3072 // want to forget the SCEVUnknown.
3073 if (!isa<PHINode>(I) ||
Dan Gohman761065e2010-11-17 02:44:44 +00003074 !isa<SCEVUnknown>(Old) ||
3075 (I != PN && Old == SymName)) {
Dan Gohman7e6b3932010-11-17 23:28:48 +00003076 forgetMemoizedResults(Old);
Dan Gohman9bad2fb2010-08-27 18:55:03 +00003077 ValueExprMap.erase(It);
Dan Gohmancc2f1eb2009-08-31 21:15:23 +00003078 }
Dan Gohman0b89dff2009-07-25 01:13:03 +00003079 }
3080
3081 PushDefUseChildren(I, Worklist);
3082 }
Chris Lattner7b0fbe72005-02-13 04:37:18 +00003083}
Chris Lattnerd934c702004-04-02 20:23:17 +00003084
3085/// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
3086/// a loop header, making it a potential recurrence, or it doesn't.
3087///
Dan Gohmanaf752342009-07-07 17:06:11 +00003088const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
Dan Gohman6635bb22010-04-12 07:49:36 +00003089 if (const Loop *L = LI->getLoopFor(PN->getParent()))
3090 if (L->getHeader() == PN->getParent()) {
3091 // The loop may have multiple entrances or multiple exits; we can analyze
3092 // this phi as an addrec if it has a unique entry value and a unique
3093 // backedge value.
Craig Topper9f008862014-04-15 04:59:12 +00003094 Value *BEValueV = nullptr, *StartValueV = nullptr;
Dan Gohman6635bb22010-04-12 07:49:36 +00003095 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
3096 Value *V = PN->getIncomingValue(i);
3097 if (L->contains(PN->getIncomingBlock(i))) {
3098 if (!BEValueV) {
3099 BEValueV = V;
3100 } else if (BEValueV != V) {
Craig Topper9f008862014-04-15 04:59:12 +00003101 BEValueV = nullptr;
Dan Gohman6635bb22010-04-12 07:49:36 +00003102 break;
3103 }
3104 } else if (!StartValueV) {
3105 StartValueV = V;
3106 } else if (StartValueV != V) {
Craig Topper9f008862014-04-15 04:59:12 +00003107 StartValueV = nullptr;
Dan Gohman6635bb22010-04-12 07:49:36 +00003108 break;
3109 }
3110 }
3111 if (BEValueV && StartValueV) {
Chris Lattnerd934c702004-04-02 20:23:17 +00003112 // While we are analyzing this PHI node, handle its value symbolically.
Dan Gohmanaf752342009-07-07 17:06:11 +00003113 const SCEV *SymbolicName = getUnknown(PN);
Benjamin Kramere2ef47c2012-06-30 22:37:15 +00003114 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
Chris Lattnerd934c702004-04-02 20:23:17 +00003115 "PHI node already processed?");
Dan Gohman9bad2fb2010-08-27 18:55:03 +00003116 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
Chris Lattnerd934c702004-04-02 20:23:17 +00003117
3118 // Using this symbolic name for the PHI, analyze the value coming around
3119 // the back-edge.
Dan Gohman0b89dff2009-07-25 01:13:03 +00003120 const SCEV *BEValue = getSCEV(BEValueV);
Chris Lattnerd934c702004-04-02 20:23:17 +00003121
3122 // NOTE: If BEValue is loop invariant, we know that the PHI node just
3123 // has a special value for the first iteration of the loop.
3124
3125 // If the value coming around the backedge is an add with the symbolic
3126 // value we just inserted, then we found a simple induction variable!
Dan Gohmana30370b2009-05-04 22:02:23 +00003127 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
Chris Lattnerd934c702004-04-02 20:23:17 +00003128 // If there is a single occurrence of the symbolic value, replace it
3129 // with a recurrence.
3130 unsigned FoundIndex = Add->getNumOperands();
3131 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3132 if (Add->getOperand(i) == SymbolicName)
3133 if (FoundIndex == e) {
3134 FoundIndex = i;
3135 break;
3136 }
3137
3138 if (FoundIndex != Add->getNumOperands()) {
3139 // Create an add with everything but the specified operand.
Dan Gohmanaf752342009-07-07 17:06:11 +00003140 SmallVector<const SCEV *, 8> Ops;
Chris Lattnerd934c702004-04-02 20:23:17 +00003141 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3142 if (i != FoundIndex)
3143 Ops.push_back(Add->getOperand(i));
Dan Gohmanaf752342009-07-07 17:06:11 +00003144 const SCEV *Accum = getAddExpr(Ops);
Chris Lattnerd934c702004-04-02 20:23:17 +00003145
3146 // This is not a valid addrec if the step amount is varying each
3147 // loop iteration, but is not itself an addrec in this loop.
Dan Gohmanafd6db92010-11-17 21:23:15 +00003148 if (isLoopInvariant(Accum, L) ||
Chris Lattnerd934c702004-04-02 20:23:17 +00003149 (isa<SCEVAddRecExpr>(Accum) &&
3150 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
Andrew Trick8b55b732011-03-14 16:50:06 +00003151 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
Dan Gohman51ad99d2010-01-21 02:09:26 +00003152
3153 // If the increment doesn't overflow, then neither the addrec nor
3154 // the post-increment will overflow.
3155 if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
3156 if (OBO->hasNoUnsignedWrap())
Andrew Trick8b55b732011-03-14 16:50:06 +00003157 Flags = setFlags(Flags, SCEV::FlagNUW);
Dan Gohman51ad99d2010-01-21 02:09:26 +00003158 if (OBO->hasNoSignedWrap())
Andrew Trick8b55b732011-03-14 16:50:06 +00003159 Flags = setFlags(Flags, SCEV::FlagNSW);
Benjamin Kramer6094f302013-10-28 07:30:06 +00003160 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
Andrew Trick8b55b732011-03-14 16:50:06 +00003161 // If the increment is an inbounds GEP, then we know the address
3162 // space cannot be wrapped around. We cannot make any guarantee
3163 // about signed or unsigned overflow because pointers are
3164 // unsigned but we may have a negative index from the base
Benjamin Kramer6094f302013-10-28 07:30:06 +00003165 // pointer. We can guarantee that no unsigned wrap occurs if the
3166 // indices form a positive value.
3167 if (GEP->isInBounds()) {
Andrew Trickf6b01ff2011-03-15 00:37:00 +00003168 Flags = setFlags(Flags, SCEV::FlagNW);
Benjamin Kramer6094f302013-10-28 07:30:06 +00003169
3170 const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
3171 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
3172 Flags = setFlags(Flags, SCEV::FlagNUW);
3173 }
Andrew Trick34e2f0c2013-11-06 02:08:26 +00003174 } else if (const SubOperator *OBO =
3175 dyn_cast<SubOperator>(BEValueV)) {
3176 if (OBO->hasNoUnsignedWrap())
3177 Flags = setFlags(Flags, SCEV::FlagNUW);
3178 if (OBO->hasNoSignedWrap())
3179 Flags = setFlags(Flags, SCEV::FlagNSW);
Dan Gohman51ad99d2010-01-21 02:09:26 +00003180 }
3181
Dan Gohman6635bb22010-04-12 07:49:36 +00003182 const SCEV *StartVal = getSCEV(StartValueV);
Andrew Trick8b55b732011-03-14 16:50:06 +00003183 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
Dan Gohman62ef6a72009-07-25 01:22:26 +00003184
Dan Gohman51ad99d2010-01-21 02:09:26 +00003185 // Since the no-wrap flags are on the increment, they apply to the
3186 // post-incremented value as well.
Dan Gohmanafd6db92010-11-17 21:23:15 +00003187 if (isLoopInvariant(Accum, L))
Dan Gohman51ad99d2010-01-21 02:09:26 +00003188 (void)getAddRecExpr(getAddExpr(StartVal, Accum),
Andrew Trick8b55b732011-03-14 16:50:06 +00003189 Accum, L, Flags);
Chris Lattnerd934c702004-04-02 20:23:17 +00003190
3191 // Okay, for the entire analysis of this edge we assumed the PHI
Dan Gohman0b89dff2009-07-25 01:13:03 +00003192 // to be symbolic. We now need to go back and purge all of the
3193 // entries for the scalars that use the symbolic expression.
3194 ForgetSymbolicName(PN, SymbolicName);
Dan Gohman9bad2fb2010-08-27 18:55:03 +00003195 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
Chris Lattnerd934c702004-04-02 20:23:17 +00003196 return PHISCEV;
3197 }
3198 }
Dan Gohmana30370b2009-05-04 22:02:23 +00003199 } else if (const SCEVAddRecExpr *AddRec =
3200 dyn_cast<SCEVAddRecExpr>(BEValue)) {
Chris Lattnere8cbdbf2006-04-26 18:34:07 +00003201 // Otherwise, this could be a loop like this:
3202 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
3203 // In this case, j = {1,+,1} and BEValue is j.
3204 // Because the other in-value of i (0) fits the evolution of BEValue
3205 // i really is an addrec evolution.
3206 if (AddRec->getLoop() == L && AddRec->isAffine()) {
Dan Gohman6635bb22010-04-12 07:49:36 +00003207 const SCEV *StartVal = getSCEV(StartValueV);
Chris Lattnere8cbdbf2006-04-26 18:34:07 +00003208
3209 // If StartVal = j.start - j.stride, we can use StartVal as the
3210 // initial step of the addrec evolution.
Dan Gohmanc8e23622009-04-21 23:15:49 +00003211 if (StartVal == getMinusSCEV(AddRec->getOperand(0),
Dan Gohman068b7932010-04-11 23:44:58 +00003212 AddRec->getOperand(1))) {
Andrew Trick8b55b732011-03-14 16:50:06 +00003213 // FIXME: For constant StartVal, we should be able to infer
3214 // no-wrap flags.
Dan Gohmanaf752342009-07-07 17:06:11 +00003215 const SCEV *PHISCEV =
Andrew Trick8b55b732011-03-14 16:50:06 +00003216 getAddRecExpr(StartVal, AddRec->getOperand(1), L,
3217 SCEV::FlagAnyWrap);
Chris Lattnere8cbdbf2006-04-26 18:34:07 +00003218
3219 // Okay, for the entire analysis of this edge we assumed the PHI
Dan Gohman0b89dff2009-07-25 01:13:03 +00003220 // to be symbolic. We now need to go back and purge all of the
3221 // entries for the scalars that use the symbolic expression.
3222 ForgetSymbolicName(PN, SymbolicName);
Dan Gohman9bad2fb2010-08-27 18:55:03 +00003223 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
Chris Lattnere8cbdbf2006-04-26 18:34:07 +00003224 return PHISCEV;
3225 }
3226 }
Chris Lattnerd934c702004-04-02 20:23:17 +00003227 }
Chris Lattnerd934c702004-04-02 20:23:17 +00003228 }
Dan Gohman6635bb22010-04-12 07:49:36 +00003229 }
Misha Brukman01808ca2005-04-21 21:13:18 +00003230
Dan Gohmana9c205c2010-02-25 06:57:05 +00003231 // If the PHI has a single incoming value, follow that value, unless the
3232 // PHI's incoming blocks are in a different loop, in which case doing so
3233 // risks breaking LCSSA form. Instcombine would normally zap these, but
3234 // it doesn't have DominatorTree information, so it may miss cases.
Rafael Espindola7c68beb2014-02-18 15:33:12 +00003235 if (Value *V = SimplifyInstruction(PN, DL, TLI, DT))
Duncan Sandsaef146b2010-11-18 19:59:41 +00003236 if (LI->replacementPreservesLCSSAForm(PN, V))
Dan Gohmana9c205c2010-02-25 06:57:05 +00003237 return getSCEV(V);
Duncan Sands39d771312010-11-17 20:49:12 +00003238
Chris Lattnerd934c702004-04-02 20:23:17 +00003239 // If it's not a loop phi, we can't handle it yet.
Dan Gohmanc8e23622009-04-21 23:15:49 +00003240 return getUnknown(PN);
Chris Lattnerd934c702004-04-02 20:23:17 +00003241}
3242
Dan Gohmanee750d12009-05-08 20:26:55 +00003243/// createNodeForGEP - Expand GEP instructions into add and multiply
3244/// operations. This allows them to be analyzed by regular SCEV code.
3245///
Dan Gohmanb256ccf2009-12-18 02:09:29 +00003246const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
Chris Lattner229907c2011-07-18 04:54:35 +00003247 Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Dan Gohman2173bd32009-05-08 20:36:47 +00003248 Value *Base = GEP->getOperand(0);
Dan Gohman30f24fe2009-05-09 00:14:52 +00003249 // Don't attempt to analyze GEPs over unsized objects.
Matt Arsenault404c60a2013-10-21 19:43:56 +00003250 if (!Base->getType()->getPointerElementType()->isSized())
Dan Gohman30f24fe2009-05-09 00:14:52 +00003251 return getUnknown(GEP);
Matt Arsenault4c265902013-09-27 22:38:23 +00003252
3253 // Don't blindly transfer the inbounds flag from the GEP instruction to the
3254 // Add expression, because the Instruction may be guarded by control flow
3255 // and the no-overflow bits may not be valid for the expression in any
3256 // context.
3257 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3258
Dan Gohman1d2ded72010-05-03 22:09:21 +00003259 const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
Dan Gohman2173bd32009-05-08 20:36:47 +00003260 gep_type_iterator GTI = gep_type_begin(GEP);
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +00003261 for (GetElementPtrInst::op_iterator I = std::next(GEP->op_begin()),
Dan Gohman2173bd32009-05-08 20:36:47 +00003262 E = GEP->op_end();
Dan Gohmanee750d12009-05-08 20:26:55 +00003263 I != E; ++I) {
3264 Value *Index = *I;
3265 // Compute the (potentially symbolic) offset in bytes for this index.
Chris Lattner229907c2011-07-18 04:54:35 +00003266 if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
Dan Gohmanee750d12009-05-08 20:26:55 +00003267 // For a struct, add the member offset.
Dan Gohmanee750d12009-05-08 20:26:55 +00003268 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
Matt Arsenaulta90a18e2013-09-10 19:55:24 +00003269 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
Dan Gohman16206132010-06-30 07:16:37 +00003270
Dan Gohman16206132010-06-30 07:16:37 +00003271 // Add the field offset to the running total offset.
Dan Gohmanc0cca7f2010-06-30 17:27:11 +00003272 TotalOffset = getAddExpr(TotalOffset, FieldOffset);
Dan Gohmanee750d12009-05-08 20:26:55 +00003273 } else {
3274 // For an array, add the element offset, explicitly scaled.
Matt Arsenaulta90a18e2013-09-10 19:55:24 +00003275 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, *GTI);
Dan Gohman16206132010-06-30 07:16:37 +00003276 const SCEV *IndexS = getSCEV(Index);
Dan Gohman8b0a4192010-03-01 17:49:51 +00003277 // Getelementptr indices are signed.
Dan Gohman16206132010-06-30 07:16:37 +00003278 IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
3279
Dan Gohman16206132010-06-30 07:16:37 +00003280 // Multiply the index by the element size to compute the element offset.
Matt Arsenault4c265902013-09-27 22:38:23 +00003281 const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize, Wrap);
Dan Gohman16206132010-06-30 07:16:37 +00003282
3283 // Add the element offset to the running total offset.
Dan Gohmanc0cca7f2010-06-30 17:27:11 +00003284 TotalOffset = getAddExpr(TotalOffset, LocalOffset);
Dan Gohmanee750d12009-05-08 20:26:55 +00003285 }
3286 }
Dan Gohman16206132010-06-30 07:16:37 +00003287
3288 // Get the SCEV for the GEP base.
3289 const SCEV *BaseS = getSCEV(Base);
3290
Dan Gohman16206132010-06-30 07:16:37 +00003291 // Add the total offset from all the GEP indices to the base.
Matt Arsenault4c265902013-09-27 22:38:23 +00003292 return getAddExpr(BaseS, TotalOffset, Wrap);
Dan Gohmanee750d12009-05-08 20:26:55 +00003293}
3294
Nick Lewycky3783b462007-11-22 07:59:40 +00003295/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
3296/// guaranteed to end in (at every loop iteration). It is, at the same time,
3297/// the minimum number of times S is divisible by 2. For example, given {4,+,8}
3298/// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
Dan Gohmanc702fc02009-06-19 23:29:04 +00003299uint32_t
Dan Gohmanaf752342009-07-07 17:06:11 +00003300ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
Dan Gohmana30370b2009-05-04 22:02:23 +00003301 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
Chris Lattner69ec1ec2007-11-23 22:36:49 +00003302 return C->getValue()->getValue().countTrailingZeros();
Chris Lattner49b090e2006-12-12 02:26:09 +00003303
Dan Gohmana30370b2009-05-04 22:02:23 +00003304 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
Dan Gohmanc702fc02009-06-19 23:29:04 +00003305 return std::min(GetMinTrailingZeros(T->getOperand()),
3306 (uint32_t)getTypeSizeInBits(T->getType()));
Nick Lewycky3783b462007-11-22 07:59:40 +00003307
Dan Gohmana30370b2009-05-04 22:02:23 +00003308 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
Dan Gohmanc702fc02009-06-19 23:29:04 +00003309 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3310 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3311 getTypeSizeInBits(E->getType()) : OpRes;
Nick Lewycky3783b462007-11-22 07:59:40 +00003312 }
3313
Dan Gohmana30370b2009-05-04 22:02:23 +00003314 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
Dan Gohmanc702fc02009-06-19 23:29:04 +00003315 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3316 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3317 getTypeSizeInBits(E->getType()) : OpRes;
Nick Lewycky3783b462007-11-22 07:59:40 +00003318 }
3319
Dan Gohmana30370b2009-05-04 22:02:23 +00003320 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
Nick Lewycky3783b462007-11-22 07:59:40 +00003321 // The result is the min of all operands results.
Dan Gohmanc702fc02009-06-19 23:29:04 +00003322 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
Nick Lewycky3783b462007-11-22 07:59:40 +00003323 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
Dan Gohmanc702fc02009-06-19 23:29:04 +00003324 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
Nick Lewycky3783b462007-11-22 07:59:40 +00003325 return MinOpRes;
Chris Lattner49b090e2006-12-12 02:26:09 +00003326 }
3327
Dan Gohmana30370b2009-05-04 22:02:23 +00003328 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
Nick Lewycky3783b462007-11-22 07:59:40 +00003329 // The result is the sum of all operands results.
Dan Gohmanc702fc02009-06-19 23:29:04 +00003330 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
3331 uint32_t BitWidth = getTypeSizeInBits(M->getType());
Nick Lewycky3783b462007-11-22 07:59:40 +00003332 for (unsigned i = 1, e = M->getNumOperands();
3333 SumOpRes != BitWidth && i != e; ++i)
Dan Gohmanc702fc02009-06-19 23:29:04 +00003334 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
Nick Lewycky3783b462007-11-22 07:59:40 +00003335 BitWidth);
3336 return SumOpRes;
Chris Lattner49b090e2006-12-12 02:26:09 +00003337 }
Nick Lewycky3783b462007-11-22 07:59:40 +00003338
Dan Gohmana30370b2009-05-04 22:02:23 +00003339 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
Nick Lewycky3783b462007-11-22 07:59:40 +00003340 // The result is the min of all operands results.
Dan Gohmanc702fc02009-06-19 23:29:04 +00003341 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
Nick Lewycky3783b462007-11-22 07:59:40 +00003342 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
Dan Gohmanc702fc02009-06-19 23:29:04 +00003343 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
Nick Lewycky3783b462007-11-22 07:59:40 +00003344 return MinOpRes;
Chris Lattner49b090e2006-12-12 02:26:09 +00003345 }
Nick Lewycky3783b462007-11-22 07:59:40 +00003346
Dan Gohmana30370b2009-05-04 22:02:23 +00003347 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
Nick Lewyckycdb7e542007-11-25 22:41:31 +00003348 // The result is the min of all operands results.
Dan Gohmanc702fc02009-06-19 23:29:04 +00003349 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
Nick Lewyckycdb7e542007-11-25 22:41:31 +00003350 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
Dan Gohmanc702fc02009-06-19 23:29:04 +00003351 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
Nick Lewyckycdb7e542007-11-25 22:41:31 +00003352 return MinOpRes;
3353 }
3354
Dan Gohmana30370b2009-05-04 22:02:23 +00003355 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00003356 // The result is the min of all operands results.
Dan Gohmanc702fc02009-06-19 23:29:04 +00003357 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00003358 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
Dan Gohmanc702fc02009-06-19 23:29:04 +00003359 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00003360 return MinOpRes;
3361 }
3362
Dan Gohmanc702fc02009-06-19 23:29:04 +00003363 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3364 // For a SCEVUnknown, ask ValueTracking.
3365 unsigned BitWidth = getTypeSizeInBits(U->getType());
Dan Gohmanc702fc02009-06-19 23:29:04 +00003366 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
Rafael Espindolaba0a6ca2012-04-04 12:51:34 +00003367 ComputeMaskedBits(U->getValue(), Zeros, Ones);
Dan Gohmanc702fc02009-06-19 23:29:04 +00003368 return Zeros.countTrailingOnes();
3369 }
3370
3371 // SCEVUDivExpr
Nick Lewycky3783b462007-11-22 07:59:40 +00003372 return 0;
Chris Lattner49b090e2006-12-12 02:26:09 +00003373}
Chris Lattnerd934c702004-04-02 20:23:17 +00003374
Dan Gohmane65c9172009-07-13 21:35:55 +00003375/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
3376///
3377ConstantRange
3378ScalarEvolution::getUnsignedRange(const SCEV *S) {
Dan Gohman761065e2010-11-17 02:44:44 +00003379 // See if we've computed this range already.
3380 DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
3381 if (I != UnsignedRanges.end())
3382 return I->second;
Dan Gohmanc702fc02009-06-19 23:29:04 +00003383
3384 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
Dan Gohmaned756312010-11-17 20:23:08 +00003385 return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
Dan Gohmanc702fc02009-06-19 23:29:04 +00003386
Dan Gohman85be4332010-01-26 19:19:05 +00003387 unsigned BitWidth = getTypeSizeInBits(S->getType());
3388 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3389
3390 // If the value has known zeros, the maximum unsigned value will have those
3391 // known zeros as well.
3392 uint32_t TZ = GetMinTrailingZeros(S);
3393 if (TZ != 0)
3394 ConservativeResult =
3395 ConstantRange(APInt::getMinValue(BitWidth),
3396 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
3397
Dan Gohmane65c9172009-07-13 21:35:55 +00003398 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3399 ConstantRange X = getUnsignedRange(Add->getOperand(0));
3400 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3401 X = X.add(getUnsignedRange(Add->getOperand(i)));
Dan Gohmaned756312010-11-17 20:23:08 +00003402 return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
Dan Gohmane65c9172009-07-13 21:35:55 +00003403 }
3404
3405 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3406 ConstantRange X = getUnsignedRange(Mul->getOperand(0));
3407 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3408 X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
Dan Gohmaned756312010-11-17 20:23:08 +00003409 return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));
Dan Gohmane65c9172009-07-13 21:35:55 +00003410 }
3411
3412 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3413 ConstantRange X = getUnsignedRange(SMax->getOperand(0));
3414 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3415 X = X.smax(getUnsignedRange(SMax->getOperand(i)));
Dan Gohmaned756312010-11-17 20:23:08 +00003416 return setUnsignedRange(SMax, ConservativeResult.intersectWith(X));
Dan Gohmane65c9172009-07-13 21:35:55 +00003417 }
3418
3419 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3420 ConstantRange X = getUnsignedRange(UMax->getOperand(0));
3421 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3422 X = X.umax(getUnsignedRange(UMax->getOperand(i)));
Dan Gohmaned756312010-11-17 20:23:08 +00003423 return setUnsignedRange(UMax, ConservativeResult.intersectWith(X));
Dan Gohmane65c9172009-07-13 21:35:55 +00003424 }
3425
3426 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3427 ConstantRange X = getUnsignedRange(UDiv->getLHS());
3428 ConstantRange Y = getUnsignedRange(UDiv->getRHS());
Dan Gohmaned756312010-11-17 20:23:08 +00003429 return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
Dan Gohmane65c9172009-07-13 21:35:55 +00003430 }
3431
3432 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3433 ConstantRange X = getUnsignedRange(ZExt->getOperand());
Dan Gohmaned756312010-11-17 20:23:08 +00003434 return setUnsignedRange(ZExt,
3435 ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
Dan Gohmane65c9172009-07-13 21:35:55 +00003436 }
3437
3438 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3439 ConstantRange X = getUnsignedRange(SExt->getOperand());
Dan Gohmaned756312010-11-17 20:23:08 +00003440 return setUnsignedRange(SExt,
3441 ConservativeResult.intersectWith(X.signExtend(BitWidth)));
Dan Gohmane65c9172009-07-13 21:35:55 +00003442 }
3443
3444 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3445 ConstantRange X = getUnsignedRange(Trunc->getOperand());
Dan Gohmaned756312010-11-17 20:23:08 +00003446 return setUnsignedRange(Trunc,
3447 ConservativeResult.intersectWith(X.truncate(BitWidth)));
Dan Gohmane65c9172009-07-13 21:35:55 +00003448 }
3449
Dan Gohmane65c9172009-07-13 21:35:55 +00003450 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
Dan Gohman51ad99d2010-01-21 02:09:26 +00003451 // If there's no unsigned wrap, the value will never be less than its
3452 // initial value.
Andrew Trick8b55b732011-03-14 16:50:06 +00003453 if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
Dan Gohman51ad99d2010-01-21 02:09:26 +00003454 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
Dan Gohmanebbd05f2010-04-12 23:08:18 +00003455 if (!C->getValue()->isZero())
Dan Gohmanae4a4142010-04-11 22:12:18 +00003456 ConservativeResult =
Dan Gohman9396b422010-06-30 06:58:35 +00003457 ConservativeResult.intersectWith(
3458 ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
Dan Gohmane65c9172009-07-13 21:35:55 +00003459
3460 // TODO: non-affine addrec
Dan Gohman85be4332010-01-26 19:19:05 +00003461 if (AddRec->isAffine()) {
Chris Lattner229907c2011-07-18 04:54:35 +00003462 Type *Ty = AddRec->getType();
Dan Gohmane65c9172009-07-13 21:35:55 +00003463 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
Dan Gohman85be4332010-01-26 19:19:05 +00003464 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3465 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
Dan Gohmane65c9172009-07-13 21:35:55 +00003466 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3467
3468 const SCEV *Start = AddRec->getStart();
Dan Gohmanf76210e2010-04-12 07:39:33 +00003469 const SCEV *Step = AddRec->getStepRecurrence(*this);
Dan Gohmane65c9172009-07-13 21:35:55 +00003470
3471 ConstantRange StartRange = getUnsignedRange(Start);
Dan Gohmanf76210e2010-04-12 07:39:33 +00003472 ConstantRange StepRange = getSignedRange(Step);
3473 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3474 ConstantRange EndRange =
3475 StartRange.add(MaxBECountRange.multiply(StepRange));
3476
3477 // Check for overflow. This must be done with ConstantRange arithmetic
3478 // because we could be called from within the ScalarEvolution overflow
3479 // checking code.
3480 ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
3481 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3482 ConstantRange ExtMaxBECountRange =
3483 MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3484 ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
3485 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3486 ExtEndRange)
Dan Gohmaned756312010-11-17 20:23:08 +00003487 return setUnsignedRange(AddRec, ConservativeResult);
Dan Gohmanf76210e2010-04-12 07:39:33 +00003488
Dan Gohmane65c9172009-07-13 21:35:55 +00003489 APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
3490 EndRange.getUnsignedMin());
3491 APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
3492 EndRange.getUnsignedMax());
3493 if (Min.isMinValue() && Max.isMaxValue())
Dan Gohmaned756312010-11-17 20:23:08 +00003494 return setUnsignedRange(AddRec, ConservativeResult);
3495 return setUnsignedRange(AddRec,
3496 ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
Dan Gohmane65c9172009-07-13 21:35:55 +00003497 }
3498 }
Dan Gohman51ad99d2010-01-21 02:09:26 +00003499
Dan Gohmaned756312010-11-17 20:23:08 +00003500 return setUnsignedRange(AddRec, ConservativeResult);
Dan Gohmanc702fc02009-06-19 23:29:04 +00003501 }
3502
3503 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3504 // For a SCEVUnknown, ask ValueTracking.
Dan Gohmanc702fc02009-06-19 23:29:04 +00003505 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
Rafael Espindola7c68beb2014-02-18 15:33:12 +00003506 ComputeMaskedBits(U->getValue(), Zeros, Ones, DL);
Dan Gohman1a7ab942009-07-20 22:34:18 +00003507 if (Ones == ~Zeros + 1)
Dan Gohmaned756312010-11-17 20:23:08 +00003508 return setUnsignedRange(U, ConservativeResult);
3509 return setUnsignedRange(U,
3510 ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)));
Dan Gohmanc702fc02009-06-19 23:29:04 +00003511 }
3512
Dan Gohmaned756312010-11-17 20:23:08 +00003513 return setUnsignedRange(S, ConservativeResult);
Dan Gohmanc702fc02009-06-19 23:29:04 +00003514}
3515
Dan Gohmane65c9172009-07-13 21:35:55 +00003516/// getSignedRange - Determine the signed range for a particular SCEV.
3517///
3518ConstantRange
3519ScalarEvolution::getSignedRange(const SCEV *S) {
Dan Gohman3ac8cd62011-01-24 17:54:18 +00003520 // See if we've computed this range already.
Dan Gohman761065e2010-11-17 02:44:44 +00003521 DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S);
3522 if (I != SignedRanges.end())
3523 return I->second;
Dan Gohmanc702fc02009-06-19 23:29:04 +00003524
Dan Gohmane65c9172009-07-13 21:35:55 +00003525 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
Dan Gohmaned756312010-11-17 20:23:08 +00003526 return setSignedRange(C, ConstantRange(C->getValue()->getValue()));
Dan Gohmane65c9172009-07-13 21:35:55 +00003527
Dan Gohman51aaf022010-01-26 04:40:18 +00003528 unsigned BitWidth = getTypeSizeInBits(S->getType());
3529 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3530
3531 // If the value has known zeros, the maximum signed value will have those
3532 // known zeros as well.
3533 uint32_t TZ = GetMinTrailingZeros(S);
3534 if (TZ != 0)
3535 ConservativeResult =
3536 ConstantRange(APInt::getSignedMinValue(BitWidth),
3537 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
3538
Dan Gohmane65c9172009-07-13 21:35:55 +00003539 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3540 ConstantRange X = getSignedRange(Add->getOperand(0));
3541 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3542 X = X.add(getSignedRange(Add->getOperand(i)));
Dan Gohmaned756312010-11-17 20:23:08 +00003543 return setSignedRange(Add, ConservativeResult.intersectWith(X));
Dan Gohmanc702fc02009-06-19 23:29:04 +00003544 }
3545
Dan Gohmane65c9172009-07-13 21:35:55 +00003546 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3547 ConstantRange X = getSignedRange(Mul->getOperand(0));
3548 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3549 X = X.multiply(getSignedRange(Mul->getOperand(i)));
Dan Gohmaned756312010-11-17 20:23:08 +00003550 return setSignedRange(Mul, ConservativeResult.intersectWith(X));
Dan Gohmanc702fc02009-06-19 23:29:04 +00003551 }
3552
Dan Gohmane65c9172009-07-13 21:35:55 +00003553 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3554 ConstantRange X = getSignedRange(SMax->getOperand(0));
3555 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3556 X = X.smax(getSignedRange(SMax->getOperand(i)));
Dan Gohmaned756312010-11-17 20:23:08 +00003557 return setSignedRange(SMax, ConservativeResult.intersectWith(X));
Dan Gohmane65c9172009-07-13 21:35:55 +00003558 }
Dan Gohmand261d272009-06-24 01:05:09 +00003559
Dan Gohmane65c9172009-07-13 21:35:55 +00003560 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3561 ConstantRange X = getSignedRange(UMax->getOperand(0));
3562 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3563 X = X.umax(getSignedRange(UMax->getOperand(i)));
Dan Gohmaned756312010-11-17 20:23:08 +00003564 return setSignedRange(UMax, ConservativeResult.intersectWith(X));
Dan Gohmane65c9172009-07-13 21:35:55 +00003565 }
Dan Gohmand261d272009-06-24 01:05:09 +00003566
Dan Gohmane65c9172009-07-13 21:35:55 +00003567 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3568 ConstantRange X = getSignedRange(UDiv->getLHS());
3569 ConstantRange Y = getSignedRange(UDiv->getRHS());
Dan Gohmaned756312010-11-17 20:23:08 +00003570 return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
Dan Gohmane65c9172009-07-13 21:35:55 +00003571 }
Dan Gohmand261d272009-06-24 01:05:09 +00003572
Dan Gohmane65c9172009-07-13 21:35:55 +00003573 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3574 ConstantRange X = getSignedRange(ZExt->getOperand());
Dan Gohmaned756312010-11-17 20:23:08 +00003575 return setSignedRange(ZExt,
3576 ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
Dan Gohmane65c9172009-07-13 21:35:55 +00003577 }
3578
3579 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3580 ConstantRange X = getSignedRange(SExt->getOperand());
Dan Gohmaned756312010-11-17 20:23:08 +00003581 return setSignedRange(SExt,
3582 ConservativeResult.intersectWith(X.signExtend(BitWidth)));
Dan Gohmane65c9172009-07-13 21:35:55 +00003583 }
3584
3585 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3586 ConstantRange X = getSignedRange(Trunc->getOperand());
Dan Gohmaned756312010-11-17 20:23:08 +00003587 return setSignedRange(Trunc,
3588 ConservativeResult.intersectWith(X.truncate(BitWidth)));
Dan Gohmane65c9172009-07-13 21:35:55 +00003589 }
3590
Dan Gohmane65c9172009-07-13 21:35:55 +00003591 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
Dan Gohman51ad99d2010-01-21 02:09:26 +00003592 // If there's no signed wrap, and all the operands have the same sign or
3593 // zero, the value won't ever change sign.
Andrew Trick8b55b732011-03-14 16:50:06 +00003594 if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
Dan Gohman51ad99d2010-01-21 02:09:26 +00003595 bool AllNonNeg = true;
3596 bool AllNonPos = true;
3597 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3598 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
3599 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
3600 }
Dan Gohman51ad99d2010-01-21 02:09:26 +00003601 if (AllNonNeg)
Dan Gohman51aaf022010-01-26 04:40:18 +00003602 ConservativeResult = ConservativeResult.intersectWith(
3603 ConstantRange(APInt(BitWidth, 0),
3604 APInt::getSignedMinValue(BitWidth)));
Dan Gohman51ad99d2010-01-21 02:09:26 +00003605 else if (AllNonPos)
Dan Gohman51aaf022010-01-26 04:40:18 +00003606 ConservativeResult = ConservativeResult.intersectWith(
3607 ConstantRange(APInt::getSignedMinValue(BitWidth),
3608 APInt(BitWidth, 1)));
Dan Gohman51ad99d2010-01-21 02:09:26 +00003609 }
Dan Gohmane65c9172009-07-13 21:35:55 +00003610
3611 // TODO: non-affine addrec
Dan Gohman85be4332010-01-26 19:19:05 +00003612 if (AddRec->isAffine()) {
Chris Lattner229907c2011-07-18 04:54:35 +00003613 Type *Ty = AddRec->getType();
Dan Gohmane65c9172009-07-13 21:35:55 +00003614 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
Dan Gohman85be4332010-01-26 19:19:05 +00003615 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3616 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
Dan Gohmane65c9172009-07-13 21:35:55 +00003617 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3618
3619 const SCEV *Start = AddRec->getStart();
Dan Gohmanf76210e2010-04-12 07:39:33 +00003620 const SCEV *Step = AddRec->getStepRecurrence(*this);
Dan Gohmane65c9172009-07-13 21:35:55 +00003621
3622 ConstantRange StartRange = getSignedRange(Start);
Dan Gohmanf76210e2010-04-12 07:39:33 +00003623 ConstantRange StepRange = getSignedRange(Step);
3624 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3625 ConstantRange EndRange =
3626 StartRange.add(MaxBECountRange.multiply(StepRange));
3627
3628 // Check for overflow. This must be done with ConstantRange arithmetic
3629 // because we could be called from within the ScalarEvolution overflow
3630 // checking code.
3631 ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
3632 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3633 ConstantRange ExtMaxBECountRange =
3634 MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3635 ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
3636 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3637 ExtEndRange)
Dan Gohmaned756312010-11-17 20:23:08 +00003638 return setSignedRange(AddRec, ConservativeResult);
Dan Gohmanf76210e2010-04-12 07:39:33 +00003639
Dan Gohmane65c9172009-07-13 21:35:55 +00003640 APInt Min = APIntOps::smin(StartRange.getSignedMin(),
3641 EndRange.getSignedMin());
3642 APInt Max = APIntOps::smax(StartRange.getSignedMax(),
3643 EndRange.getSignedMax());
3644 if (Min.isMinSignedValue() && Max.isMaxSignedValue())
Dan Gohmaned756312010-11-17 20:23:08 +00003645 return setSignedRange(AddRec, ConservativeResult);
3646 return setSignedRange(AddRec,
3647 ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
Dan Gohmand261d272009-06-24 01:05:09 +00003648 }
Dan Gohmand261d272009-06-24 01:05:09 +00003649 }
Dan Gohman51ad99d2010-01-21 02:09:26 +00003650
Dan Gohmaned756312010-11-17 20:23:08 +00003651 return setSignedRange(AddRec, ConservativeResult);
Dan Gohmand261d272009-06-24 01:05:09 +00003652 }
3653
Dan Gohmanc702fc02009-06-19 23:29:04 +00003654 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3655 // For a SCEVUnknown, ask ValueTracking.
Rafael Espindola7c68beb2014-02-18 15:33:12 +00003656 if (!U->getValue()->getType()->isIntegerTy() && !DL)
Dan Gohmaned756312010-11-17 20:23:08 +00003657 return setSignedRange(U, ConservativeResult);
Rafael Espindola7c68beb2014-02-18 15:33:12 +00003658 unsigned NS = ComputeNumSignBits(U->getValue(), DL);
Hal Finkelff666bd2013-07-09 18:16:16 +00003659 if (NS <= 1)
Dan Gohmaned756312010-11-17 20:23:08 +00003660 return setSignedRange(U, ConservativeResult);
3661 return setSignedRange(U, ConservativeResult.intersectWith(
Dan Gohmane65c9172009-07-13 21:35:55 +00003662 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
Dan Gohmaned756312010-11-17 20:23:08 +00003663 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)));
Dan Gohmanc702fc02009-06-19 23:29:04 +00003664 }
3665
Dan Gohmaned756312010-11-17 20:23:08 +00003666 return setSignedRange(S, ConservativeResult);
Dan Gohmanc702fc02009-06-19 23:29:04 +00003667}
3668
Chris Lattnerd934c702004-04-02 20:23:17 +00003669/// createSCEV - We know that there is no SCEV for the specified value.
3670/// Analyze the expression.
3671///
Dan Gohmanaf752342009-07-07 17:06:11 +00003672const SCEV *ScalarEvolution::createSCEV(Value *V) {
Dan Gohmanb397e1a2009-04-21 01:07:12 +00003673 if (!isSCEVable(V->getType()))
Dan Gohmanc8e23622009-04-21 23:15:49 +00003674 return getUnknown(V);
Dan Gohman0a40ad92009-04-16 03:18:22 +00003675
Dan Gohman05e89732008-06-22 19:56:46 +00003676 unsigned Opcode = Instruction::UserOp1;
Dan Gohman69451a02010-03-09 23:46:50 +00003677 if (Instruction *I = dyn_cast<Instruction>(V)) {
Dan Gohman05e89732008-06-22 19:56:46 +00003678 Opcode = I->getOpcode();
Dan Gohman69451a02010-03-09 23:46:50 +00003679
3680 // Don't attempt to analyze instructions in blocks that aren't
3681 // reachable. Such instructions don't matter, and they aren't required
3682 // to obey basic rules for definitions dominating uses which this
3683 // analysis depends on.
3684 if (!DT->isReachableFromEntry(I->getParent()))
3685 return getUnknown(V);
3686 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
Dan Gohman05e89732008-06-22 19:56:46 +00003687 Opcode = CE->getOpcode();
Dan Gohmanf436bac2009-06-24 00:54:57 +00003688 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
3689 return getConstant(CI);
3690 else if (isa<ConstantPointerNull>(V))
Dan Gohman1d2ded72010-05-03 22:09:21 +00003691 return getConstant(V->getType(), 0);
Dan Gohmanf161e06e2009-08-25 17:49:57 +00003692 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
3693 return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
Dan Gohman05e89732008-06-22 19:56:46 +00003694 else
Dan Gohmanc8e23622009-04-21 23:15:49 +00003695 return getUnknown(V);
Chris Lattnera3e0bb42007-04-02 05:41:38 +00003696
Dan Gohman80ca01c2009-07-17 20:47:02 +00003697 Operator *U = cast<Operator>(V);
Dan Gohman05e89732008-06-22 19:56:46 +00003698 switch (Opcode) {
Dan Gohmane5fb1032010-08-16 16:03:49 +00003699 case Instruction::Add: {
3700 // The simple thing to do would be to just call getSCEV on both operands
3701 // and call getAddExpr with the result. However if we're looking at a
3702 // bunch of things all added together, this can be quite inefficient,
3703 // because it leads to N-1 getAddExpr calls for N ultimate operands.
3704 // Instead, gather up all the operands and make a single getAddExpr call.
3705 // LLVM IR canonical form means we need only traverse the left operands.
Andrew Trickd25089f2011-11-29 02:16:38 +00003706 //
3707 // Don't apply this instruction's NSW or NUW flags to the new
3708 // expression. The instruction may be guarded by control flow that the
3709 // no-wrap behavior depends on. Non-control-equivalent instructions can be
3710 // mapped to the same SCEV expression, and it would be incorrect to transfer
3711 // NSW/NUW semantics to those operations.
Dan Gohmane5fb1032010-08-16 16:03:49 +00003712 SmallVector<const SCEV *, 4> AddOps;
3713 AddOps.push_back(getSCEV(U->getOperand(1)));
Dan Gohman47308d52010-08-31 22:53:17 +00003714 for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
3715 unsigned Opcode = Op->getValueID() - Value::InstructionVal;
3716 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
3717 break;
Dan Gohmane5fb1032010-08-16 16:03:49 +00003718 U = cast<Operator>(Op);
Dan Gohman47308d52010-08-31 22:53:17 +00003719 const SCEV *Op1 = getSCEV(U->getOperand(1));
3720 if (Opcode == Instruction::Sub)
3721 AddOps.push_back(getNegativeSCEV(Op1));
3722 else
3723 AddOps.push_back(Op1);
Dan Gohmane5fb1032010-08-16 16:03:49 +00003724 }
3725 AddOps.push_back(getSCEV(U->getOperand(0)));
Andrew Trickd25089f2011-11-29 02:16:38 +00003726 return getAddExpr(AddOps);
Dan Gohmane5fb1032010-08-16 16:03:49 +00003727 }
3728 case Instruction::Mul: {
Andrew Trickd25089f2011-11-29 02:16:38 +00003729 // Don't transfer NSW/NUW for the same reason as AddExpr.
Dan Gohmane5fb1032010-08-16 16:03:49 +00003730 SmallVector<const SCEV *, 4> MulOps;
3731 MulOps.push_back(getSCEV(U->getOperand(1)));
3732 for (Value *Op = U->getOperand(0);
Andrew Trick2a3b7162011-03-09 17:23:39 +00003733 Op->getValueID() == Instruction::Mul + Value::InstructionVal;
Dan Gohmane5fb1032010-08-16 16:03:49 +00003734 Op = U->getOperand(0)) {
3735 U = cast<Operator>(Op);
3736 MulOps.push_back(getSCEV(U->getOperand(1)));
3737 }
3738 MulOps.push_back(getSCEV(U->getOperand(0)));
3739 return getMulExpr(MulOps);
3740 }
Dan Gohman05e89732008-06-22 19:56:46 +00003741 case Instruction::UDiv:
Dan Gohmanc8e23622009-04-21 23:15:49 +00003742 return getUDivExpr(getSCEV(U->getOperand(0)),
3743 getSCEV(U->getOperand(1)));
Dan Gohman05e89732008-06-22 19:56:46 +00003744 case Instruction::Sub:
Dan Gohmanc8e23622009-04-21 23:15:49 +00003745 return getMinusSCEV(getSCEV(U->getOperand(0)),
3746 getSCEV(U->getOperand(1)));
Dan Gohman0ec05372009-04-21 02:26:00 +00003747 case Instruction::And:
3748 // For an expression like x&255 that merely masks off the high bits,
3749 // use zext(trunc(x)) as the SCEV expression.
3750 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
Dan Gohmandf199482009-04-25 17:05:40 +00003751 if (CI->isNullValue())
3752 return getSCEV(U->getOperand(1));
Dan Gohman05c1d372009-04-27 01:41:10 +00003753 if (CI->isAllOnesValue())
3754 return getSCEV(U->getOperand(0));
Dan Gohman0ec05372009-04-21 02:26:00 +00003755 const APInt &A = CI->getValue();
Dan Gohman1ee696d2009-06-16 19:52:01 +00003756
3757 // Instcombine's ShrinkDemandedConstant may strip bits out of
3758 // constants, obscuring what would otherwise be a low-bits mask.
3759 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3760 // knew about to reconstruct a low-bits mask value.
3761 unsigned LZ = A.countLeadingZeros();
Nick Lewycky31eaca52014-01-27 10:04:03 +00003762 unsigned TZ = A.countTrailingZeros();
Dan Gohman1ee696d2009-06-16 19:52:01 +00003763 unsigned BitWidth = A.getBitWidth();
Dan Gohman1ee696d2009-06-16 19:52:01 +00003764 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
Rafael Espindola7c68beb2014-02-18 15:33:12 +00003765 ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, DL);
Dan Gohman1ee696d2009-06-16 19:52:01 +00003766
Nick Lewycky31eaca52014-01-27 10:04:03 +00003767 APInt EffectiveMask =
3768 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
3769 if ((LZ != 0 || TZ != 0) && !((~A & ~KnownZero) & EffectiveMask)) {
3770 const SCEV *MulCount = getConstant(
3771 ConstantInt::get(getContext(), APInt::getOneBitSet(BitWidth, TZ)));
3772 return getMulExpr(
3773 getZeroExtendExpr(
3774 getTruncateExpr(
3775 getUDivExactExpr(getSCEV(U->getOperand(0)), MulCount),
3776 IntegerType::get(getContext(), BitWidth - LZ - TZ)),
3777 U->getType()),
3778 MulCount);
3779 }
Dan Gohman0ec05372009-04-21 02:26:00 +00003780 }
3781 break;
Dan Gohman1ee696d2009-06-16 19:52:01 +00003782
Dan Gohman05e89732008-06-22 19:56:46 +00003783 case Instruction::Or:
3784 // If the RHS of the Or is a constant, we may have something like:
3785 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
3786 // optimizations will transparently handle this case.
3787 //
3788 // In order for this transformation to be safe, the LHS must be of the
3789 // form X*(2^n) and the Or constant must be less than 2^n.
3790 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
Dan Gohmanaf752342009-07-07 17:06:11 +00003791 const SCEV *LHS = getSCEV(U->getOperand(0));
Dan Gohman05e89732008-06-22 19:56:46 +00003792 const APInt &CIVal = CI->getValue();
Dan Gohmanc702fc02009-06-19 23:29:04 +00003793 if (GetMinTrailingZeros(LHS) >=
Dan Gohman36bad002009-09-17 18:05:20 +00003794 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
3795 // Build a plain add SCEV.
3796 const SCEV *S = getAddExpr(LHS, getSCEV(CI));
3797 // If the LHS of the add was an addrec and it has no-wrap flags,
3798 // transfer the no-wrap flags, since an or won't introduce a wrap.
3799 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
3800 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
Andrew Trick8b55b732011-03-14 16:50:06 +00003801 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
3802 OldAR->getNoWrapFlags());
Dan Gohman36bad002009-09-17 18:05:20 +00003803 }
3804 return S;
3805 }
Chris Lattnerd934c702004-04-02 20:23:17 +00003806 }
Dan Gohman05e89732008-06-22 19:56:46 +00003807 break;
3808 case Instruction::Xor:
Dan Gohman05e89732008-06-22 19:56:46 +00003809 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
Nick Lewyckyf5c547d2008-07-07 06:15:49 +00003810 // If the RHS of the xor is a signbit, then this is just an add.
3811 // Instcombine turns add of signbit into xor as a strength reduction step.
Dan Gohman05e89732008-06-22 19:56:46 +00003812 if (CI->getValue().isSignBit())
Dan Gohmanc8e23622009-04-21 23:15:49 +00003813 return getAddExpr(getSCEV(U->getOperand(0)),
3814 getSCEV(U->getOperand(1)));
Nick Lewyckyf5c547d2008-07-07 06:15:49 +00003815
3816 // If the RHS of xor is -1, then this is a not operation.
Dan Gohmand277a1e2009-05-18 16:17:44 +00003817 if (CI->isAllOnesValue())
Dan Gohmanc8e23622009-04-21 23:15:49 +00003818 return getNotSCEV(getSCEV(U->getOperand(0)));
Dan Gohman6350296e2009-05-18 16:29:04 +00003819
3820 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3821 // This is a variant of the check for xor with -1, and it handles
3822 // the case where instcombine has trimmed non-demanded bits out
3823 // of an xor with -1.
3824 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
3825 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
3826 if (BO->getOpcode() == Instruction::And &&
3827 LCI->getValue() == CI->getValue())
3828 if (const SCEVZeroExtendExpr *Z =
Dan Gohmanb50f5a42009-06-17 01:22:39 +00003829 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
Chris Lattner229907c2011-07-18 04:54:35 +00003830 Type *UTy = U->getType();
Dan Gohmanaf752342009-07-07 17:06:11 +00003831 const SCEV *Z0 = Z->getOperand();
Chris Lattner229907c2011-07-18 04:54:35 +00003832 Type *Z0Ty = Z0->getType();
Dan Gohmaneddf7712009-06-18 00:00:20 +00003833 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
3834
Dan Gohman8b0a4192010-03-01 17:49:51 +00003835 // If C is a low-bits mask, the zero extend is serving to
Dan Gohmaneddf7712009-06-18 00:00:20 +00003836 // mask off the high bits. Complement the operand and
3837 // re-apply the zext.
3838 if (APIntOps::isMask(Z0TySize, CI->getValue()))
3839 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
3840
3841 // If C is a single bit, it may be in the sign-bit position
3842 // before the zero-extend. In this case, represent the xor
3843 // using an add, which is equivalent, and re-apply the zext.
Jay Foad583abbc2010-12-07 08:25:19 +00003844 APInt Trunc = CI->getValue().trunc(Z0TySize);
3845 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
Dan Gohmaneddf7712009-06-18 00:00:20 +00003846 Trunc.isSignBit())
3847 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
3848 UTy);
Dan Gohmanb50f5a42009-06-17 01:22:39 +00003849 }
Dan Gohman05e89732008-06-22 19:56:46 +00003850 }
3851 break;
3852
3853 case Instruction::Shl:
3854 // Turn shift left of a constant amount into a multiply.
3855 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
Dan Gohmane5e1b7b2010-02-01 18:27:38 +00003856 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
Dan Gohmanacd700a2010-04-22 01:35:11 +00003857
3858 // If the shift count is not less than the bitwidth, the result of
3859 // the shift is undefined. Don't try to analyze it, because the
3860 // resolution chosen here may differ from the resolution chosen in
3861 // other parts of the compiler.
3862 if (SA->getValue().uge(BitWidth))
3863 break;
3864
Owen Andersonedb4a702009-07-24 23:12:02 +00003865 Constant *X = ConstantInt::get(getContext(),
Benjamin Kramerfc3ea6f2013-07-11 16:05:50 +00003866 APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
Dan Gohmanc8e23622009-04-21 23:15:49 +00003867 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
Dan Gohman05e89732008-06-22 19:56:46 +00003868 }
3869 break;
3870
Nick Lewyckyf5c547d2008-07-07 06:15:49 +00003871 case Instruction::LShr:
Nick Lewycky52348302009-01-13 09:18:58 +00003872 // Turn logical shift right of a constant into a unsigned divide.
Nick Lewyckyf5c547d2008-07-07 06:15:49 +00003873 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
Dan Gohmane5e1b7b2010-02-01 18:27:38 +00003874 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
Dan Gohmanacd700a2010-04-22 01:35:11 +00003875
3876 // If the shift count is not less than the bitwidth, the result of
3877 // the shift is undefined. Don't try to analyze it, because the
3878 // resolution chosen here may differ from the resolution chosen in
3879 // other parts of the compiler.
3880 if (SA->getValue().uge(BitWidth))
3881 break;
3882
Owen Andersonedb4a702009-07-24 23:12:02 +00003883 Constant *X = ConstantInt::get(getContext(),
Benjamin Kramerfc3ea6f2013-07-11 16:05:50 +00003884 APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
Dan Gohmanc8e23622009-04-21 23:15:49 +00003885 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
Nick Lewyckyf5c547d2008-07-07 06:15:49 +00003886 }
3887 break;
3888
Dan Gohman0ec05372009-04-21 02:26:00 +00003889 case Instruction::AShr:
3890 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3891 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
Dan Gohmanacd700a2010-04-22 01:35:11 +00003892 if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
Dan Gohman0ec05372009-04-21 02:26:00 +00003893 if (L->getOpcode() == Instruction::Shl &&
3894 L->getOperand(1) == U->getOperand(1)) {
Dan Gohmanacd700a2010-04-22 01:35:11 +00003895 uint64_t BitWidth = getTypeSizeInBits(U->getType());
3896
3897 // If the shift count is not less than the bitwidth, the result of
3898 // the shift is undefined. Don't try to analyze it, because the
3899 // resolution chosen here may differ from the resolution chosen in
3900 // other parts of the compiler.
3901 if (CI->getValue().uge(BitWidth))
3902 break;
3903
Dan Gohmandf199482009-04-25 17:05:40 +00003904 uint64_t Amt = BitWidth - CI->getZExtValue();
3905 if (Amt == BitWidth)
3906 return getSCEV(L->getOperand(0)); // shift by zero --> noop
Dan Gohman0ec05372009-04-21 02:26:00 +00003907 return
Dan Gohmanc8e23622009-04-21 23:15:49 +00003908 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
Dan Gohmanacd700a2010-04-22 01:35:11 +00003909 IntegerType::get(getContext(),
3910 Amt)),
3911 U->getType());
Dan Gohman0ec05372009-04-21 02:26:00 +00003912 }
3913 break;
3914
Dan Gohman05e89732008-06-22 19:56:46 +00003915 case Instruction::Trunc:
Dan Gohmanc8e23622009-04-21 23:15:49 +00003916 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
Dan Gohman05e89732008-06-22 19:56:46 +00003917
3918 case Instruction::ZExt:
Dan Gohmanc8e23622009-04-21 23:15:49 +00003919 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
Dan Gohman05e89732008-06-22 19:56:46 +00003920
3921 case Instruction::SExt:
Dan Gohmanc8e23622009-04-21 23:15:49 +00003922 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
Dan Gohman05e89732008-06-22 19:56:46 +00003923
3924 case Instruction::BitCast:
3925 // BitCasts are no-op casts so we just eliminate the cast.
Dan Gohmanb397e1a2009-04-21 01:07:12 +00003926 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
Dan Gohman05e89732008-06-22 19:56:46 +00003927 return getSCEV(U->getOperand(0));
3928 break;
3929
Dan Gohmane5e1b7b2010-02-01 18:27:38 +00003930 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3931 // lead to pointer expressions which cannot safely be expanded to GEPs,
3932 // because ScalarEvolution doesn't respect the GEP aliasing rules when
3933 // simplifying integer expressions.
Dan Gohman0a40ad92009-04-16 03:18:22 +00003934
Dan Gohmanee750d12009-05-08 20:26:55 +00003935 case Instruction::GetElementPtr:
Dan Gohmanb256ccf2009-12-18 02:09:29 +00003936 return createNodeForGEP(cast<GEPOperator>(U));
Dan Gohman0a40ad92009-04-16 03:18:22 +00003937
Dan Gohman05e89732008-06-22 19:56:46 +00003938 case Instruction::PHI:
3939 return createNodeForPHI(cast<PHINode>(U));
3940
3941 case Instruction::Select:
3942 // This could be a smax or umax that was lowered earlier.
3943 // Try to recover it.
3944 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
3945 Value *LHS = ICI->getOperand(0);
3946 Value *RHS = ICI->getOperand(1);
3947 switch (ICI->getPredicate()) {
3948 case ICmpInst::ICMP_SLT:
3949 case ICmpInst::ICMP_SLE:
3950 std::swap(LHS, RHS);
3951 // fall through
3952 case ICmpInst::ICMP_SGT:
3953 case ICmpInst::ICMP_SGE:
Dan Gohmanf33bac32010-04-24 03:09:42 +00003954 // a >s b ? a+x : b+x -> smax(a, b)+x
3955 // a >s b ? b+x : a+x -> smin(a, b)+x
3956 if (LHS->getType() == U->getType()) {
3957 const SCEV *LS = getSCEV(LHS);
3958 const SCEV *RS = getSCEV(RHS);
3959 const SCEV *LA = getSCEV(U->getOperand(1));
3960 const SCEV *RA = getSCEV(U->getOperand(2));
3961 const SCEV *LDiff = getMinusSCEV(LA, LS);
3962 const SCEV *RDiff = getMinusSCEV(RA, RS);
3963 if (LDiff == RDiff)
3964 return getAddExpr(getSMaxExpr(LS, RS), LDiff);
3965 LDiff = getMinusSCEV(LA, RS);
3966 RDiff = getMinusSCEV(RA, LS);
3967 if (LDiff == RDiff)
3968 return getAddExpr(getSMinExpr(LS, RS), LDiff);
3969 }
Dan Gohman05e89732008-06-22 19:56:46 +00003970 break;
3971 case ICmpInst::ICMP_ULT:
3972 case ICmpInst::ICMP_ULE:
3973 std::swap(LHS, RHS);
3974 // fall through
3975 case ICmpInst::ICMP_UGT:
3976 case ICmpInst::ICMP_UGE:
Dan Gohmanf33bac32010-04-24 03:09:42 +00003977 // a >u b ? a+x : b+x -> umax(a, b)+x
3978 // a >u b ? b+x : a+x -> umin(a, b)+x
3979 if (LHS->getType() == U->getType()) {
3980 const SCEV *LS = getSCEV(LHS);
3981 const SCEV *RS = getSCEV(RHS);
3982 const SCEV *LA = getSCEV(U->getOperand(1));
3983 const SCEV *RA = getSCEV(U->getOperand(2));
3984 const SCEV *LDiff = getMinusSCEV(LA, LS);
3985 const SCEV *RDiff = getMinusSCEV(RA, RS);
3986 if (LDiff == RDiff)
3987 return getAddExpr(getUMaxExpr(LS, RS), LDiff);
3988 LDiff = getMinusSCEV(LA, RS);
3989 RDiff = getMinusSCEV(RA, LS);
3990 if (LDiff == RDiff)
3991 return getAddExpr(getUMinExpr(LS, RS), LDiff);
3992 }
Dan Gohman05e89732008-06-22 19:56:46 +00003993 break;
Dan Gohman4d3c3cf2009-06-18 20:21:07 +00003994 case ICmpInst::ICMP_NE:
Dan Gohmanf33bac32010-04-24 03:09:42 +00003995 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
3996 if (LHS->getType() == U->getType() &&
Dan Gohman4d3c3cf2009-06-18 20:21:07 +00003997 isa<ConstantInt>(RHS) &&
Dan Gohmanf33bac32010-04-24 03:09:42 +00003998 cast<ConstantInt>(RHS)->isZero()) {
3999 const SCEV *One = getConstant(LHS->getType(), 1);
4000 const SCEV *LS = getSCEV(LHS);
4001 const SCEV *LA = getSCEV(U->getOperand(1));
4002 const SCEV *RA = getSCEV(U->getOperand(2));
4003 const SCEV *LDiff = getMinusSCEV(LA, LS);
4004 const SCEV *RDiff = getMinusSCEV(RA, One);
4005 if (LDiff == RDiff)
Dan Gohmancf32f2b2010-08-13 20:17:14 +00004006 return getAddExpr(getUMaxExpr(One, LS), LDiff);
Dan Gohmanf33bac32010-04-24 03:09:42 +00004007 }
Dan Gohman4d3c3cf2009-06-18 20:21:07 +00004008 break;
4009 case ICmpInst::ICMP_EQ:
Dan Gohmanf33bac32010-04-24 03:09:42 +00004010 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
4011 if (LHS->getType() == U->getType() &&
Dan Gohman4d3c3cf2009-06-18 20:21:07 +00004012 isa<ConstantInt>(RHS) &&
Dan Gohmanf33bac32010-04-24 03:09:42 +00004013 cast<ConstantInt>(RHS)->isZero()) {
4014 const SCEV *One = getConstant(LHS->getType(), 1);
4015 const SCEV *LS = getSCEV(LHS);
4016 const SCEV *LA = getSCEV(U->getOperand(1));
4017 const SCEV *RA = getSCEV(U->getOperand(2));
4018 const SCEV *LDiff = getMinusSCEV(LA, One);
4019 const SCEV *RDiff = getMinusSCEV(RA, LS);
4020 if (LDiff == RDiff)
Dan Gohmancf32f2b2010-08-13 20:17:14 +00004021 return getAddExpr(getUMaxExpr(One, LS), LDiff);
Dan Gohmanf33bac32010-04-24 03:09:42 +00004022 }
Dan Gohman4d3c3cf2009-06-18 20:21:07 +00004023 break;
Dan Gohman05e89732008-06-22 19:56:46 +00004024 default:
4025 break;
4026 }
4027 }
4028
4029 default: // We cannot analyze this expression.
4030 break;
Chris Lattnerd934c702004-04-02 20:23:17 +00004031 }
4032
Dan Gohmanc8e23622009-04-21 23:15:49 +00004033 return getUnknown(V);
Chris Lattnerd934c702004-04-02 20:23:17 +00004034}
4035
4036
4037
4038//===----------------------------------------------------------------------===//
4039// Iteration Count Computation Code
4040//
4041
Andrew Trick2b6860f2011-08-11 23:36:16 +00004042/// getSmallConstantTripCount - Returns the maximum trip count of this loop as a
Andrew Tricke81211f2012-01-11 06:52:55 +00004043/// normal unsigned value. Returns 0 if the trip count is unknown or not
4044/// constant. Will also return 0 if the maximum trip count is very large (>=
4045/// 2^32).
4046///
4047/// This "trip count" assumes that control exits via ExitingBlock. More
4048/// precisely, it is the number of times that control may reach ExitingBlock
4049/// before taking the branch. For loops with multiple exits, it may not be the
4050/// number times that the loop header executes because the loop may exit
4051/// prematurely via another branch.
Andrew Trickee9143a2013-05-31 23:34:46 +00004052///
4053/// FIXME: We conservatively call getBackedgeTakenCount(L) instead of
4054/// getExitCount(L, ExitingBlock) to compute a safe trip count considering all
4055/// loop exits. getExitCount() may return an exact count for this branch
4056/// assuming no-signed-wrap. The number of well-defined iterations may actually
4057/// be higher than this trip count if this exit test is skipped and the loop
4058/// exits via a different branch. Ideally, getExitCount() would know whether it
4059/// depends on a NSW assumption, and we would only fall back to a conservative
4060/// trip count in that case.
Andrew Tricke81211f2012-01-11 06:52:55 +00004061unsigned ScalarEvolution::
Aaron Ballmand07f5512013-06-04 01:01:56 +00004062getSmallConstantTripCount(Loop *L, BasicBlock * /*ExitingBlock*/) {
Andrew Trick2b6860f2011-08-11 23:36:16 +00004063 const SCEVConstant *ExitCount =
Andrew Trickee9143a2013-05-31 23:34:46 +00004064 dyn_cast<SCEVConstant>(getBackedgeTakenCount(L));
Andrew Trick2b6860f2011-08-11 23:36:16 +00004065 if (!ExitCount)
4066 return 0;
4067
4068 ConstantInt *ExitConst = ExitCount->getValue();
4069
4070 // Guard against huge trip counts.
4071 if (ExitConst->getValue().getActiveBits() > 32)
4072 return 0;
4073
4074 // In case of integer overflow, this returns 0, which is correct.
4075 return ((unsigned)ExitConst->getZExtValue()) + 1;
4076}
4077
4078/// getSmallConstantTripMultiple - Returns the largest constant divisor of the
4079/// trip count of this loop as a normal unsigned value, if possible. This
4080/// means that the actual trip count is always a multiple of the returned
4081/// value (don't forget the trip count could very well be zero as well!).
4082///
4083/// Returns 1 if the trip count is unknown or not guaranteed to be the
4084/// multiple of a constant (which is also the case if the trip count is simply
4085/// constant, use getSmallConstantTripCount for that case), Will also return 1
4086/// if the trip count is very large (>= 2^32).
Andrew Tricke81211f2012-01-11 06:52:55 +00004087///
4088/// As explained in the comments for getSmallConstantTripCount, this assumes
4089/// that control exits the loop via ExitingBlock.
4090unsigned ScalarEvolution::
Aaron Ballmand07f5512013-06-04 01:01:56 +00004091getSmallConstantTripMultiple(Loop *L, BasicBlock * /*ExitingBlock*/) {
Andrew Trickee9143a2013-05-31 23:34:46 +00004092 const SCEV *ExitCount = getBackedgeTakenCount(L);
Andrew Trick2b6860f2011-08-11 23:36:16 +00004093 if (ExitCount == getCouldNotCompute())
4094 return 1;
4095
4096 // Get the trip count from the BE count by adding 1.
4097 const SCEV *TCMul = getAddExpr(ExitCount,
4098 getConstant(ExitCount->getType(), 1));
4099 // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt
4100 // to factor simple cases.
4101 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul))
4102 TCMul = Mul->getOperand(0);
4103
4104 const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul);
4105 if (!MulC)
4106 return 1;
4107
4108 ConstantInt *Result = MulC->getValue();
4109
Hal Finkel30bd9342012-10-24 19:46:44 +00004110 // Guard against huge trip counts (this requires checking
4111 // for zero to handle the case where the trip count == -1 and the
4112 // addition wraps).
4113 if (!Result || Result->getValue().getActiveBits() > 32 ||
4114 Result->getValue().getActiveBits() == 0)
Andrew Trick2b6860f2011-08-11 23:36:16 +00004115 return 1;
4116
4117 return (unsigned)Result->getZExtValue();
4118}
4119
Andrew Trick3ca3f982011-07-26 17:19:55 +00004120// getExitCount - Get the expression for the number of loop iterations for which
Andrew Trickee9143a2013-05-31 23:34:46 +00004121// this loop is guaranteed not to exit via ExitingBlock. Otherwise return
Andrew Trick3ca3f982011-07-26 17:19:55 +00004122// SCEVCouldNotCompute.
Andrew Trick77c55422011-08-02 04:23:35 +00004123const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) {
4124 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
Andrew Trick3ca3f982011-07-26 17:19:55 +00004125}
4126
Dan Gohman0bddac12009-02-24 18:55:53 +00004127/// getBackedgeTakenCount - If the specified loop has a predictable
4128/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
4129/// object. The backedge-taken count is the number of times the loop header
4130/// will be branched to from within the loop. This is one less than the
4131/// trip count of the loop, since it doesn't count the first iteration,
4132/// when the header is branched to from outside the loop.
4133///
4134/// Note that it is not valid to call this method on a loop without a
4135/// loop-invariant backedge-taken count (see
4136/// hasLoopInvariantBackedgeTakenCount).
4137///
Dan Gohmanaf752342009-07-07 17:06:11 +00004138const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
Andrew Trick3ca3f982011-07-26 17:19:55 +00004139 return getBackedgeTakenInfo(L).getExact(this);
Dan Gohman2b8da352009-04-30 20:47:05 +00004140}
4141
4142/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
4143/// return the least SCEV value that is known never to be less than the
4144/// actual backedge taken count.
Dan Gohmanaf752342009-07-07 17:06:11 +00004145const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
Andrew Trick3ca3f982011-07-26 17:19:55 +00004146 return getBackedgeTakenInfo(L).getMax(this);
Dan Gohman2b8da352009-04-30 20:47:05 +00004147}
4148
Dan Gohmandc191042009-07-08 19:23:34 +00004149/// PushLoopPHIs - Push PHI nodes in the header of the given loop
4150/// onto the given Worklist.
4151static void
4152PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
4153 BasicBlock *Header = L->getHeader();
4154
4155 // Push all Loop-header PHIs onto the Worklist stack.
4156 for (BasicBlock::iterator I = Header->begin();
4157 PHINode *PN = dyn_cast<PHINode>(I); ++I)
4158 Worklist.push_back(PN);
4159}
4160
Dan Gohman2b8da352009-04-30 20:47:05 +00004161const ScalarEvolution::BackedgeTakenInfo &
4162ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
Andrew Trick3ca3f982011-07-26 17:19:55 +00004163 // Initially insert an invalid entry for this loop. If the insertion
Dan Gohman8b0a4192010-03-01 17:49:51 +00004164 // succeeds, proceed to actually compute a backedge-taken count and
Dan Gohman76466372009-04-27 20:16:15 +00004165 // update the value. The temporary CouldNotCompute value tells SCEV
4166 // code elsewhere that it shouldn't attempt to request a new
4167 // backedge-taken count, which could result in infinite recursion.
Dan Gohman0daf6872011-05-09 18:44:09 +00004168 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
Andrew Trick3ca3f982011-07-26 17:19:55 +00004169 BackedgeTakenCounts.insert(std::make_pair(L, BackedgeTakenInfo()));
Chris Lattnera337f5e2011-01-09 02:16:18 +00004170 if (!Pair.second)
4171 return Pair.first->second;
Dan Gohman76466372009-04-27 20:16:15 +00004172
Andrew Trick3ca3f982011-07-26 17:19:55 +00004173 // ComputeBackedgeTakenCount may allocate memory for its result. Inserting it
4174 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
4175 // must be cleared in this scope.
4176 BackedgeTakenInfo Result = ComputeBackedgeTakenCount(L);
4177
4178 if (Result.getExact(this) != getCouldNotCompute()) {
4179 assert(isLoopInvariant(Result.getExact(this), L) &&
4180 isLoopInvariant(Result.getMax(this), L) &&
Chris Lattnera337f5e2011-01-09 02:16:18 +00004181 "Computed backedge-taken count isn't loop invariant for loop!");
4182 ++NumTripCountsComputed;
Andrew Trick3ca3f982011-07-26 17:19:55 +00004183 }
4184 else if (Result.getMax(this) == getCouldNotCompute() &&
4185 isa<PHINode>(L->getHeader()->begin())) {
4186 // Only count loops that have phi nodes as not being computable.
4187 ++NumTripCountsNotComputed;
Chris Lattnera337f5e2011-01-09 02:16:18 +00004188 }
Dan Gohman2b8da352009-04-30 20:47:05 +00004189
Chris Lattnera337f5e2011-01-09 02:16:18 +00004190 // Now that we know more about the trip count for this loop, forget any
4191 // existing SCEV values for PHI nodes in this loop since they are only
4192 // conservative estimates made without the benefit of trip count
4193 // information. This is similar to the code in forgetLoop, except that
4194 // it handles SCEVUnknown PHI nodes specially.
Andrew Trick3ca3f982011-07-26 17:19:55 +00004195 if (Result.hasAnyInfo()) {
Chris Lattnera337f5e2011-01-09 02:16:18 +00004196 SmallVector<Instruction *, 16> Worklist;
4197 PushLoopPHIs(L, Worklist);
Dan Gohmandc191042009-07-08 19:23:34 +00004198
Chris Lattnera337f5e2011-01-09 02:16:18 +00004199 SmallPtrSet<Instruction *, 8> Visited;
4200 while (!Worklist.empty()) {
4201 Instruction *I = Worklist.pop_back_val();
4202 if (!Visited.insert(I)) continue;
Dan Gohmandc191042009-07-08 19:23:34 +00004203
Chris Lattnera337f5e2011-01-09 02:16:18 +00004204 ValueExprMapType::iterator It =
Benjamin Kramere2ef47c2012-06-30 22:37:15 +00004205 ValueExprMap.find_as(static_cast<Value *>(I));
Chris Lattnera337f5e2011-01-09 02:16:18 +00004206 if (It != ValueExprMap.end()) {
4207 const SCEV *Old = It->second;
Dan Gohman761065e2010-11-17 02:44:44 +00004208
Chris Lattnera337f5e2011-01-09 02:16:18 +00004209 // SCEVUnknown for a PHI either means that it has an unrecognized
4210 // structure, or it's a PHI that's in the progress of being computed
4211 // by createNodeForPHI. In the former case, additional loop trip
4212 // count information isn't going to change anything. In the later
4213 // case, createNodeForPHI will perform the necessary updates on its
4214 // own when it gets to that point.
4215 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
4216 forgetMemoizedResults(Old);
4217 ValueExprMap.erase(It);
Dan Gohmandc191042009-07-08 19:23:34 +00004218 }
Chris Lattnera337f5e2011-01-09 02:16:18 +00004219 if (PHINode *PN = dyn_cast<PHINode>(I))
4220 ConstantEvolutionLoopExitValue.erase(PN);
Dan Gohmandc191042009-07-08 19:23:34 +00004221 }
Chris Lattnera337f5e2011-01-09 02:16:18 +00004222
4223 PushDefUseChildren(I, Worklist);
Dan Gohmandc191042009-07-08 19:23:34 +00004224 }
Chris Lattnerd934c702004-04-02 20:23:17 +00004225 }
Dan Gohman6acd95b2011-04-25 22:48:29 +00004226
4227 // Re-lookup the insert position, since the call to
4228 // ComputeBackedgeTakenCount above could result in a
4229 // recusive call to getBackedgeTakenInfo (on a different
4230 // loop), which would invalidate the iterator computed
4231 // earlier.
4232 return BackedgeTakenCounts.find(L)->second = Result;
Chris Lattnerd934c702004-04-02 20:23:17 +00004233}
4234
Dan Gohman880c92a2009-10-31 15:04:55 +00004235/// forgetLoop - This method should be called by the client when it has
4236/// changed a loop in a way that may effect ScalarEvolution's ability to
4237/// compute a trip count, or if the loop is deleted.
4238void ScalarEvolution::forgetLoop(const Loop *L) {
4239 // Drop any stored trip count value.
Andrew Trick3ca3f982011-07-26 17:19:55 +00004240 DenseMap<const Loop*, BackedgeTakenInfo>::iterator BTCPos =
4241 BackedgeTakenCounts.find(L);
4242 if (BTCPos != BackedgeTakenCounts.end()) {
4243 BTCPos->second.clear();
4244 BackedgeTakenCounts.erase(BTCPos);
4245 }
Dan Gohmanf1505722009-05-02 17:43:35 +00004246
Dan Gohman880c92a2009-10-31 15:04:55 +00004247 // Drop information about expressions based on loop-header PHIs.
Dan Gohman48f82222009-05-04 22:30:44 +00004248 SmallVector<Instruction *, 16> Worklist;
Dan Gohmandc191042009-07-08 19:23:34 +00004249 PushLoopPHIs(L, Worklist);
Dan Gohman48f82222009-05-04 22:30:44 +00004250
Dan Gohmandc191042009-07-08 19:23:34 +00004251 SmallPtrSet<Instruction *, 8> Visited;
Dan Gohman48f82222009-05-04 22:30:44 +00004252 while (!Worklist.empty()) {
4253 Instruction *I = Worklist.pop_back_val();
Dan Gohmandc191042009-07-08 19:23:34 +00004254 if (!Visited.insert(I)) continue;
4255
Benjamin Kramere2ef47c2012-06-30 22:37:15 +00004256 ValueExprMapType::iterator It =
4257 ValueExprMap.find_as(static_cast<Value *>(I));
Dan Gohman9bad2fb2010-08-27 18:55:03 +00004258 if (It != ValueExprMap.end()) {
Dan Gohman7e6b3932010-11-17 23:28:48 +00004259 forgetMemoizedResults(It->second);
Dan Gohman9bad2fb2010-08-27 18:55:03 +00004260 ValueExprMap.erase(It);
Dan Gohmandc191042009-07-08 19:23:34 +00004261 if (PHINode *PN = dyn_cast<PHINode>(I))
4262 ConstantEvolutionLoopExitValue.erase(PN);
4263 }
4264
4265 PushDefUseChildren(I, Worklist);
Dan Gohman48f82222009-05-04 22:30:44 +00004266 }
Dan Gohmandcb354b2010-10-29 20:16:10 +00004267
4268 // Forget all contained loops too, to avoid dangling entries in the
4269 // ValuesAtScopes map.
4270 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4271 forgetLoop(*I);
Dan Gohman43300342009-02-17 20:49:49 +00004272}
4273
Eric Christopheref6d5932010-07-29 01:25:38 +00004274/// forgetValue - This method should be called by the client when it has
4275/// changed a value in a way that may effect its value, or which may
4276/// disconnect it from a def-use chain linking it to a loop.
4277void ScalarEvolution::forgetValue(Value *V) {
Dale Johannesen1d6827a2010-02-19 07:14:22 +00004278 Instruction *I = dyn_cast<Instruction>(V);
4279 if (!I) return;
4280
4281 // Drop information about expressions based on loop-header PHIs.
4282 SmallVector<Instruction *, 16> Worklist;
4283 Worklist.push_back(I);
4284
4285 SmallPtrSet<Instruction *, 8> Visited;
4286 while (!Worklist.empty()) {
4287 I = Worklist.pop_back_val();
4288 if (!Visited.insert(I)) continue;
4289
Benjamin Kramere2ef47c2012-06-30 22:37:15 +00004290 ValueExprMapType::iterator It =
4291 ValueExprMap.find_as(static_cast<Value *>(I));
Dan Gohman9bad2fb2010-08-27 18:55:03 +00004292 if (It != ValueExprMap.end()) {
Dan Gohman7e6b3932010-11-17 23:28:48 +00004293 forgetMemoizedResults(It->second);
Dan Gohman9bad2fb2010-08-27 18:55:03 +00004294 ValueExprMap.erase(It);
Dale Johannesen1d6827a2010-02-19 07:14:22 +00004295 if (PHINode *PN = dyn_cast<PHINode>(I))
4296 ConstantEvolutionLoopExitValue.erase(PN);
4297 }
4298
4299 PushDefUseChildren(I, Worklist);
4300 }
4301}
4302
Andrew Trick3ca3f982011-07-26 17:19:55 +00004303/// getExact - Get the exact loop backedge taken count considering all loop
Andrew Trick90c7a102011-11-16 00:52:40 +00004304/// exits. A computable result can only be return for loops with a single exit.
4305/// Returning the minimum taken count among all exits is incorrect because one
4306/// of the loop's exit limit's may have been skipped. HowFarToZero assumes that
4307/// the limit of each loop test is never skipped. This is a valid assumption as
4308/// long as the loop exits via that test. For precise results, it is the
4309/// caller's responsibility to specify the relevant loop exit using
4310/// getExact(ExitingBlock, SE).
Andrew Trick3ca3f982011-07-26 17:19:55 +00004311const SCEV *
4312ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
4313 // If any exits were not computable, the loop is not computable.
4314 if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute();
4315
Andrew Trick90c7a102011-11-16 00:52:40 +00004316 // We need exactly one computable exit.
Andrew Trick77c55422011-08-02 04:23:35 +00004317 if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
Andrew Trick3ca3f982011-07-26 17:19:55 +00004318 assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
4319
Craig Topper9f008862014-04-15 04:59:12 +00004320 const SCEV *BECount = nullptr;
Andrew Trick3ca3f982011-07-26 17:19:55 +00004321 for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
Craig Topper9f008862014-04-15 04:59:12 +00004322 ENT != nullptr; ENT = ENT->getNextExit()) {
Andrew Trick3ca3f982011-07-26 17:19:55 +00004323
4324 assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV");
4325
4326 if (!BECount)
4327 BECount = ENT->ExactNotTaken;
Andrew Trick90c7a102011-11-16 00:52:40 +00004328 else if (BECount != ENT->ExactNotTaken)
4329 return SE->getCouldNotCompute();
Andrew Trick3ca3f982011-07-26 17:19:55 +00004330 }
Andrew Trickbbb226a2011-09-02 21:20:46 +00004331 assert(BECount && "Invalid not taken count for loop exit");
Andrew Trick3ca3f982011-07-26 17:19:55 +00004332 return BECount;
4333}
4334
4335/// getExact - Get the exact not taken count for this loop exit.
4336const SCEV *
Andrew Trick77c55422011-08-02 04:23:35 +00004337ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
Andrew Trick3ca3f982011-07-26 17:19:55 +00004338 ScalarEvolution *SE) const {
4339 for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
Craig Topper9f008862014-04-15 04:59:12 +00004340 ENT != nullptr; ENT = ENT->getNextExit()) {
Andrew Trick3ca3f982011-07-26 17:19:55 +00004341
Andrew Trick77c55422011-08-02 04:23:35 +00004342 if (ENT->ExitingBlock == ExitingBlock)
Andrew Trick3ca3f982011-07-26 17:19:55 +00004343 return ENT->ExactNotTaken;
4344 }
4345 return SE->getCouldNotCompute();
4346}
4347
4348/// getMax - Get the max backedge taken count for the loop.
4349const SCEV *
4350ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const {
4351 return Max ? Max : SE->getCouldNotCompute();
4352}
4353
Andrew Trick9093e152013-03-26 03:14:53 +00004354bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
4355 ScalarEvolution *SE) const {
4356 if (Max && Max != SE->getCouldNotCompute() && SE->hasOperand(Max, S))
4357 return true;
4358
4359 if (!ExitNotTaken.ExitingBlock)
4360 return false;
4361
4362 for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
Craig Topper9f008862014-04-15 04:59:12 +00004363 ENT != nullptr; ENT = ENT->getNextExit()) {
Andrew Trick9093e152013-03-26 03:14:53 +00004364
4365 if (ENT->ExactNotTaken != SE->getCouldNotCompute()
4366 && SE->hasOperand(ENT->ExactNotTaken, S)) {
4367 return true;
4368 }
4369 }
4370 return false;
4371}
4372
Andrew Trick3ca3f982011-07-26 17:19:55 +00004373/// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
4374/// computable exit into a persistent ExitNotTakenInfo array.
4375ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
4376 SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts,
4377 bool Complete, const SCEV *MaxCount) : Max(MaxCount) {
4378
4379 if (!Complete)
4380 ExitNotTaken.setIncomplete();
4381
4382 unsigned NumExits = ExitCounts.size();
4383 if (NumExits == 0) return;
4384
Andrew Trick77c55422011-08-02 04:23:35 +00004385 ExitNotTaken.ExitingBlock = ExitCounts[0].first;
Andrew Trick3ca3f982011-07-26 17:19:55 +00004386 ExitNotTaken.ExactNotTaken = ExitCounts[0].second;
4387 if (NumExits == 1) return;
4388
4389 // Handle the rare case of multiple computable exits.
4390 ExitNotTakenInfo *ENT = new ExitNotTakenInfo[NumExits-1];
4391
4392 ExitNotTakenInfo *PrevENT = &ExitNotTaken;
4393 for (unsigned i = 1; i < NumExits; ++i, PrevENT = ENT, ++ENT) {
4394 PrevENT->setNextExit(ENT);
Andrew Trick77c55422011-08-02 04:23:35 +00004395 ENT->ExitingBlock = ExitCounts[i].first;
Andrew Trick3ca3f982011-07-26 17:19:55 +00004396 ENT->ExactNotTaken = ExitCounts[i].second;
4397 }
4398}
4399
4400/// clear - Invalidate this result and free the ExitNotTakenInfo array.
4401void ScalarEvolution::BackedgeTakenInfo::clear() {
Craig Topper9f008862014-04-15 04:59:12 +00004402 ExitNotTaken.ExitingBlock = nullptr;
4403 ExitNotTaken.ExactNotTaken = nullptr;
Andrew Trick3ca3f982011-07-26 17:19:55 +00004404 delete[] ExitNotTaken.getNextExit();
4405}
4406
Dan Gohman0bddac12009-02-24 18:55:53 +00004407/// ComputeBackedgeTakenCount - Compute the number of times the backedge
4408/// of the specified loop will execute.
Dan Gohman2b8da352009-04-30 20:47:05 +00004409ScalarEvolution::BackedgeTakenInfo
4410ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
Dan Gohmancb0efec2009-12-18 01:14:11 +00004411 SmallVector<BasicBlock *, 8> ExitingBlocks;
Dan Gohman96212b62009-06-22 00:31:57 +00004412 L->getExitingBlocks(ExitingBlocks);
Chris Lattnerd934c702004-04-02 20:23:17 +00004413
Dan Gohman96212b62009-06-22 00:31:57 +00004414 // Examine all exits and pick the most conservative values.
Dan Gohmanaf752342009-07-07 17:06:11 +00004415 const SCEV *MaxBECount = getCouldNotCompute();
Andrew Trick3ca3f982011-07-26 17:19:55 +00004416 bool CouldComputeBECount = true;
Andrew Trickee5aa7f2014-01-15 06:42:11 +00004417 BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
Craig Topper9f008862014-04-15 04:59:12 +00004418 const SCEV *LatchMaxCount = nullptr;
Andrew Trick3ca3f982011-07-26 17:19:55 +00004419 SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts;
Dan Gohman96212b62009-06-22 00:31:57 +00004420 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
Andrew Trick3ca3f982011-07-26 17:19:55 +00004421 ExitLimit EL = ComputeExitLimit(L, ExitingBlocks[i]);
4422 if (EL.Exact == getCouldNotCompute())
Dan Gohman96212b62009-06-22 00:31:57 +00004423 // We couldn't compute an exact value for this exit, so
Dan Gohman8885b372009-06-22 21:10:22 +00004424 // we won't be able to compute an exact value for the loop.
Andrew Trick3ca3f982011-07-26 17:19:55 +00004425 CouldComputeBECount = false;
4426 else
4427 ExitCounts.push_back(std::make_pair(ExitingBlocks[i], EL.Exact));
4428
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004429 if (MaxBECount == getCouldNotCompute())
Andrew Trick3ca3f982011-07-26 17:19:55 +00004430 MaxBECount = EL.Max;
Andrew Trick90c7a102011-11-16 00:52:40 +00004431 else if (EL.Max != getCouldNotCompute()) {
4432 // We cannot take the "min" MaxBECount, because non-unit stride loops may
4433 // skip some loop tests. Taking the max over the exits is sufficiently
4434 // conservative. TODO: We could do better taking into consideration
Andrew Trickee5aa7f2014-01-15 06:42:11 +00004435 // non-latch exits that dominate the latch.
4436 if (EL.MustExit && ExitingBlocks[i] == Latch)
4437 LatchMaxCount = EL.Max;
4438 else
4439 MaxBECount = getUMaxFromMismatchedTypes(MaxBECount, EL.Max);
Andrew Trick90c7a102011-11-16 00:52:40 +00004440 }
Dan Gohman96212b62009-06-22 00:31:57 +00004441 }
Andrew Trickee5aa7f2014-01-15 06:42:11 +00004442 // Be more precise in the easy case of a loop latch that must exit.
4443 if (LatchMaxCount) {
4444 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, LatchMaxCount);
4445 }
Andrew Trick3ca3f982011-07-26 17:19:55 +00004446 return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
Dan Gohman96212b62009-06-22 00:31:57 +00004447}
4448
Andrew Trick3ca3f982011-07-26 17:19:55 +00004449/// ComputeExitLimit - Compute the number of times the backedge of the specified
4450/// loop will execute if it exits via the specified block.
4451ScalarEvolution::ExitLimit
4452ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
Dan Gohman96212b62009-06-22 00:31:57 +00004453
4454 // Okay, we've chosen an exiting block. See what condition causes us to
Benjamin Kramer5a188542014-02-11 15:44:32 +00004455 // exit at this block and remember the exit block and whether all other targets
4456 // lead to the loop header.
4457 bool MustExecuteLoopHeader = true;
Craig Topper9f008862014-04-15 04:59:12 +00004458 BasicBlock *Exit = nullptr;
Benjamin Kramer5a188542014-02-11 15:44:32 +00004459 for (succ_iterator SI = succ_begin(ExitingBlock), SE = succ_end(ExitingBlock);
4460 SI != SE; ++SI)
4461 if (!L->contains(*SI)) {
4462 if (Exit) // Multiple exit successors.
4463 return getCouldNotCompute();
4464 Exit = *SI;
4465 } else if (*SI != L->getHeader()) {
4466 MustExecuteLoopHeader = false;
4467 }
Dan Gohmance973df2009-06-24 04:48:43 +00004468
Chris Lattner18954852007-01-07 02:24:26 +00004469 // At this point, we know we have a conditional branch that determines whether
4470 // the loop is exited. However, we don't know if the branch is executed each
4471 // time through the loop. If not, then the execution count of the branch will
4472 // not be equal to the trip count of the loop.
4473 //
4474 // Currently we check for this by checking to see if the Exit branch goes to
4475 // the loop header. If so, we know it will always execute the same number of
Chris Lattner5a554762007-01-14 01:24:47 +00004476 // times as the loop. We also handle the case where the exit block *is* the
Dan Gohman96212b62009-06-22 00:31:57 +00004477 // loop header. This is common for un-rotated loops.
4478 //
4479 // If both of those tests fail, walk up the unique predecessor chain to the
4480 // header, stopping if there is an edge that doesn't exit the loop. If the
4481 // header is reached, the execution count of the branch will be equal to the
4482 // trip count of the loop.
4483 //
4484 // More extensive analysis could be done to handle more cases here.
4485 //
Benjamin Kramer5a188542014-02-11 15:44:32 +00004486 if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) {
Dan Gohman96212b62009-06-22 00:31:57 +00004487 // The simple checks failed, try climbing the unique predecessor chain
4488 // up to the header.
4489 bool Ok = false;
Benjamin Kramer5a188542014-02-11 15:44:32 +00004490 for (BasicBlock *BB = ExitingBlock; BB; ) {
Dan Gohman96212b62009-06-22 00:31:57 +00004491 BasicBlock *Pred = BB->getUniquePredecessor();
4492 if (!Pred)
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004493 return getCouldNotCompute();
Dan Gohman96212b62009-06-22 00:31:57 +00004494 TerminatorInst *PredTerm = Pred->getTerminator();
4495 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
4496 BasicBlock *PredSucc = PredTerm->getSuccessor(i);
4497 if (PredSucc == BB)
4498 continue;
4499 // If the predecessor has a successor that isn't BB and isn't
4500 // outside the loop, assume the worst.
4501 if (L->contains(PredSucc))
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004502 return getCouldNotCompute();
Dan Gohman96212b62009-06-22 00:31:57 +00004503 }
4504 if (Pred == L->getHeader()) {
4505 Ok = true;
4506 break;
4507 }
4508 BB = Pred;
4509 }
4510 if (!Ok)
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004511 return getCouldNotCompute();
Dan Gohman96212b62009-06-22 00:31:57 +00004512 }
4513
Benjamin Kramer5a188542014-02-11 15:44:32 +00004514 TerminatorInst *Term = ExitingBlock->getTerminator();
4515 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
4516 assert(BI->isConditional() && "If unconditional, it can't be in loop!");
4517 // Proceed to the next level to examine the exit condition expression.
4518 return ComputeExitLimitFromCond(L, BI->getCondition(), BI->getSuccessor(0),
4519 BI->getSuccessor(1),
4520 /*IsSubExpr=*/false);
4521 }
4522
4523 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term))
4524 return ComputeExitLimitFromSingleExitSwitch(L, SI, Exit,
4525 /*IsSubExpr=*/false);
4526
4527 return getCouldNotCompute();
Dan Gohman96212b62009-06-22 00:31:57 +00004528}
4529
Andrew Trick3ca3f982011-07-26 17:19:55 +00004530/// ComputeExitLimitFromCond - Compute the number of times the
Dan Gohman96212b62009-06-22 00:31:57 +00004531/// backedge of the specified loop will execute if its exit condition
4532/// were a conditional branch of ExitCond, TBB, and FBB.
Andrew Trick5b245a12013-05-31 06:43:25 +00004533///
4534/// @param IsSubExpr is true if ExitCond does not directly control the exit
4535/// branch. In this case, we cannot assume that the loop only exits when the
4536/// condition is true and cannot infer that failing to meet the condition prior
4537/// to integer wraparound results in undefined behavior.
Andrew Trick3ca3f982011-07-26 17:19:55 +00004538ScalarEvolution::ExitLimit
4539ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
4540 Value *ExitCond,
4541 BasicBlock *TBB,
Andrew Trick5b245a12013-05-31 06:43:25 +00004542 BasicBlock *FBB,
4543 bool IsSubExpr) {
Dan Gohmanf19aeec2009-06-24 01:18:18 +00004544 // Check if the controlling expression for this loop is an And or Or.
Dan Gohman96212b62009-06-22 00:31:57 +00004545 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
4546 if (BO->getOpcode() == Instruction::And) {
4547 // Recurse on the operands of the and.
Andrew Trick5b245a12013-05-31 06:43:25 +00004548 bool EitherMayExit = L->contains(TBB);
4549 ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
4550 IsSubExpr || EitherMayExit);
4551 ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
4552 IsSubExpr || EitherMayExit);
Dan Gohmanaf752342009-07-07 17:06:11 +00004553 const SCEV *BECount = getCouldNotCompute();
4554 const SCEV *MaxBECount = getCouldNotCompute();
Andrew Trickee5aa7f2014-01-15 06:42:11 +00004555 bool MustExit = false;
Andrew Trick5b245a12013-05-31 06:43:25 +00004556 if (EitherMayExit) {
Dan Gohman96212b62009-06-22 00:31:57 +00004557 // Both conditions must be true for the loop to continue executing.
4558 // Choose the less conservative count.
Andrew Trick3ca3f982011-07-26 17:19:55 +00004559 if (EL0.Exact == getCouldNotCompute() ||
4560 EL1.Exact == getCouldNotCompute())
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004561 BECount = getCouldNotCompute();
Dan Gohmaned627382009-06-22 15:09:28 +00004562 else
Andrew Trick3ca3f982011-07-26 17:19:55 +00004563 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
4564 if (EL0.Max == getCouldNotCompute())
4565 MaxBECount = EL1.Max;
4566 else if (EL1.Max == getCouldNotCompute())
4567 MaxBECount = EL0.Max;
Dan Gohmaned627382009-06-22 15:09:28 +00004568 else
Andrew Trick3ca3f982011-07-26 17:19:55 +00004569 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
Andrew Trickee5aa7f2014-01-15 06:42:11 +00004570 MustExit = EL0.MustExit || EL1.MustExit;
Dan Gohman96212b62009-06-22 00:31:57 +00004571 } else {
Dan Gohmanf7495f22010-08-11 00:12:36 +00004572 // Both conditions must be true at the same time for the loop to exit.
4573 // For now, be conservative.
Dan Gohman96212b62009-06-22 00:31:57 +00004574 assert(L->contains(FBB) && "Loop block has no successor in loop!");
Andrew Trick3ca3f982011-07-26 17:19:55 +00004575 if (EL0.Max == EL1.Max)
4576 MaxBECount = EL0.Max;
4577 if (EL0.Exact == EL1.Exact)
4578 BECount = EL0.Exact;
Andrew Trickee5aa7f2014-01-15 06:42:11 +00004579 MustExit = EL0.MustExit && EL1.MustExit;
Dan Gohman96212b62009-06-22 00:31:57 +00004580 }
4581
Andrew Trickee5aa7f2014-01-15 06:42:11 +00004582 return ExitLimit(BECount, MaxBECount, MustExit);
Dan Gohman96212b62009-06-22 00:31:57 +00004583 }
4584 if (BO->getOpcode() == Instruction::Or) {
4585 // Recurse on the operands of the or.
Andrew Trick5b245a12013-05-31 06:43:25 +00004586 bool EitherMayExit = L->contains(FBB);
4587 ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
4588 IsSubExpr || EitherMayExit);
4589 ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
4590 IsSubExpr || EitherMayExit);
Dan Gohmanaf752342009-07-07 17:06:11 +00004591 const SCEV *BECount = getCouldNotCompute();
4592 const SCEV *MaxBECount = getCouldNotCompute();
Andrew Trickee5aa7f2014-01-15 06:42:11 +00004593 bool MustExit = false;
Andrew Trick5b245a12013-05-31 06:43:25 +00004594 if (EitherMayExit) {
Dan Gohman96212b62009-06-22 00:31:57 +00004595 // Both conditions must be false for the loop to continue executing.
4596 // Choose the less conservative count.
Andrew Trick3ca3f982011-07-26 17:19:55 +00004597 if (EL0.Exact == getCouldNotCompute() ||
4598 EL1.Exact == getCouldNotCompute())
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004599 BECount = getCouldNotCompute();
Dan Gohmaned627382009-06-22 15:09:28 +00004600 else
Andrew Trick3ca3f982011-07-26 17:19:55 +00004601 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
4602 if (EL0.Max == getCouldNotCompute())
4603 MaxBECount = EL1.Max;
4604 else if (EL1.Max == getCouldNotCompute())
4605 MaxBECount = EL0.Max;
Dan Gohmaned627382009-06-22 15:09:28 +00004606 else
Andrew Trick3ca3f982011-07-26 17:19:55 +00004607 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
Andrew Trickee5aa7f2014-01-15 06:42:11 +00004608 MustExit = EL0.MustExit || EL1.MustExit;
Dan Gohman96212b62009-06-22 00:31:57 +00004609 } else {
Dan Gohmanf7495f22010-08-11 00:12:36 +00004610 // Both conditions must be false at the same time for the loop to exit.
4611 // For now, be conservative.
Dan Gohman96212b62009-06-22 00:31:57 +00004612 assert(L->contains(TBB) && "Loop block has no successor in loop!");
Andrew Trick3ca3f982011-07-26 17:19:55 +00004613 if (EL0.Max == EL1.Max)
4614 MaxBECount = EL0.Max;
4615 if (EL0.Exact == EL1.Exact)
4616 BECount = EL0.Exact;
Andrew Trickee5aa7f2014-01-15 06:42:11 +00004617 MustExit = EL0.MustExit && EL1.MustExit;
Dan Gohman96212b62009-06-22 00:31:57 +00004618 }
4619
Andrew Trickee5aa7f2014-01-15 06:42:11 +00004620 return ExitLimit(BECount, MaxBECount, MustExit);
Dan Gohman96212b62009-06-22 00:31:57 +00004621 }
4622 }
4623
4624 // With an icmp, it may be feasible to compute an exact backedge-taken count.
Dan Gohman8b0a4192010-03-01 17:49:51 +00004625 // Proceed to the next level to examine the icmp.
Dan Gohman96212b62009-06-22 00:31:57 +00004626 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
Andrew Trick5b245a12013-05-31 06:43:25 +00004627 return ComputeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, IsSubExpr);
Reid Spencer266e42b2006-12-23 06:05:41 +00004628
Dan Gohman6b1e2a82010-02-19 18:12:07 +00004629 // Check for a constant condition. These are normally stripped out by
4630 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
4631 // preserve the CFG and is temporarily leaving constant conditions
4632 // in place.
4633 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
4634 if (L->contains(FBB) == !CI->getZExtValue())
4635 // The backedge is always taken.
4636 return getCouldNotCompute();
4637 else
4638 // The backedge is never taken.
Dan Gohman1d2ded72010-05-03 22:09:21 +00004639 return getConstant(CI->getType(), 0);
Dan Gohman6b1e2a82010-02-19 18:12:07 +00004640 }
4641
Eli Friedmanebf98b02009-05-09 12:32:42 +00004642 // If it's not an integer or pointer comparison then compute it the hard way.
Andrew Trick3ca3f982011-07-26 17:19:55 +00004643 return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
Dan Gohman96212b62009-06-22 00:31:57 +00004644}
4645
Andrew Trick3ca3f982011-07-26 17:19:55 +00004646/// ComputeExitLimitFromICmp - Compute the number of times the
Dan Gohman96212b62009-06-22 00:31:57 +00004647/// backedge of the specified loop will execute if its exit condition
4648/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
Andrew Trick3ca3f982011-07-26 17:19:55 +00004649ScalarEvolution::ExitLimit
4650ScalarEvolution::ComputeExitLimitFromICmp(const Loop *L,
4651 ICmpInst *ExitCond,
4652 BasicBlock *TBB,
Andrew Trick5b245a12013-05-31 06:43:25 +00004653 BasicBlock *FBB,
4654 bool IsSubExpr) {
Chris Lattnerd934c702004-04-02 20:23:17 +00004655
Reid Spencer266e42b2006-12-23 06:05:41 +00004656 // If the condition was exit on true, convert the condition to exit on false
4657 ICmpInst::Predicate Cond;
Dan Gohman96212b62009-06-22 00:31:57 +00004658 if (!L->contains(FBB))
Reid Spencer266e42b2006-12-23 06:05:41 +00004659 Cond = ExitCond->getPredicate();
Chris Lattnerec901cc2004-10-12 01:49:27 +00004660 else
Reid Spencer266e42b2006-12-23 06:05:41 +00004661 Cond = ExitCond->getInversePredicate();
Chris Lattnerec901cc2004-10-12 01:49:27 +00004662
4663 // Handle common loops like: for (X = "string"; *X; ++X)
4664 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
4665 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
Andrew Trick3ca3f982011-07-26 17:19:55 +00004666 ExitLimit ItCnt =
4667 ComputeLoadConstantCompareExitLimit(LI, RHS, L, Cond);
Dan Gohmanba820342010-02-24 17:31:30 +00004668 if (ItCnt.hasAnyInfo())
4669 return ItCnt;
Chris Lattnerec901cc2004-10-12 01:49:27 +00004670 }
4671
Dan Gohmanaf752342009-07-07 17:06:11 +00004672 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
4673 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
Chris Lattnerd934c702004-04-02 20:23:17 +00004674
4675 // Try to evaluate any dependencies out of the loop.
Dan Gohman8ca08852009-05-24 23:25:42 +00004676 LHS = getSCEVAtScope(LHS, L);
4677 RHS = getSCEVAtScope(RHS, L);
Chris Lattnerd934c702004-04-02 20:23:17 +00004678
Dan Gohmance973df2009-06-24 04:48:43 +00004679 // At this point, we would like to compute how many iterations of the
Reid Spencer266e42b2006-12-23 06:05:41 +00004680 // loop the predicate will return true for these inputs.
Dan Gohmanafd6db92010-11-17 21:23:15 +00004681 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
Dan Gohmandc5f5cb2008-09-16 18:52:57 +00004682 // If there is a loop-invariant, force it into the RHS.
Chris Lattnerd934c702004-04-02 20:23:17 +00004683 std::swap(LHS, RHS);
Reid Spencer266e42b2006-12-23 06:05:41 +00004684 Cond = ICmpInst::getSwappedPredicate(Cond);
Chris Lattnerd934c702004-04-02 20:23:17 +00004685 }
4686
Dan Gohman81585c12010-05-03 16:35:17 +00004687 // Simplify the operands before analyzing them.
4688 (void)SimplifyICmpOperands(Cond, LHS, RHS);
4689
Chris Lattnerd934c702004-04-02 20:23:17 +00004690 // If we have a comparison of a chrec against a constant, try to use value
4691 // ranges to answer this query.
Dan Gohmana30370b2009-05-04 22:02:23 +00004692 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
4693 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
Chris Lattnerd934c702004-04-02 20:23:17 +00004694 if (AddRec->getLoop() == L) {
Eli Friedmanebf98b02009-05-09 12:32:42 +00004695 // Form the constant range.
4696 ConstantRange CompRange(
4697 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
Misha Brukman01808ca2005-04-21 21:13:18 +00004698
Dan Gohmanaf752342009-07-07 17:06:11 +00004699 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
Eli Friedmanebf98b02009-05-09 12:32:42 +00004700 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
Chris Lattnerd934c702004-04-02 20:23:17 +00004701 }
Misha Brukman01808ca2005-04-21 21:13:18 +00004702
Chris Lattnerd934c702004-04-02 20:23:17 +00004703 switch (Cond) {
Reid Spencer266e42b2006-12-23 06:05:41 +00004704 case ICmpInst::ICMP_NE: { // while (X != Y)
Chris Lattnerd934c702004-04-02 20:23:17 +00004705 // Convert to: while (X-Y != 0)
Andrew Trick5b245a12013-05-31 06:43:25 +00004706 ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, IsSubExpr);
Andrew Trick3ca3f982011-07-26 17:19:55 +00004707 if (EL.hasAnyInfo()) return EL;
Chris Lattnerd934c702004-04-02 20:23:17 +00004708 break;
Reid Spencer266e42b2006-12-23 06:05:41 +00004709 }
Dan Gohman8a8ad7d2009-08-20 16:42:55 +00004710 case ICmpInst::ICMP_EQ: { // while (X == Y)
4711 // Convert to: while (X-Y == 0)
Andrew Trick3ca3f982011-07-26 17:19:55 +00004712 ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
4713 if (EL.hasAnyInfo()) return EL;
Chris Lattnerd934c702004-04-02 20:23:17 +00004714 break;
Reid Spencer266e42b2006-12-23 06:05:41 +00004715 }
Andrew Trick34e2f0c2013-11-06 02:08:26 +00004716 case ICmpInst::ICMP_SLT:
4717 case ICmpInst::ICMP_ULT: { // while (X < Y)
4718 bool IsSigned = Cond == ICmpInst::ICMP_SLT;
4719 ExitLimit EL = HowManyLessThans(LHS, RHS, L, IsSigned, IsSubExpr);
Andrew Trick3ca3f982011-07-26 17:19:55 +00004720 if (EL.hasAnyInfo()) return EL;
Chris Lattner587a75b2005-08-15 23:33:51 +00004721 break;
Reid Spencer266e42b2006-12-23 06:05:41 +00004722 }
Andrew Trick34e2f0c2013-11-06 02:08:26 +00004723 case ICmpInst::ICMP_SGT:
4724 case ICmpInst::ICMP_UGT: { // while (X > Y)
4725 bool IsSigned = Cond == ICmpInst::ICMP_SGT;
4726 ExitLimit EL = HowManyGreaterThans(LHS, RHS, L, IsSigned, IsSubExpr);
Andrew Trick3ca3f982011-07-26 17:19:55 +00004727 if (EL.hasAnyInfo()) return EL;
Chris Lattner587a75b2005-08-15 23:33:51 +00004728 break;
Reid Spencer266e42b2006-12-23 06:05:41 +00004729 }
Chris Lattnerd934c702004-04-02 20:23:17 +00004730 default:
Chris Lattner09169212004-04-02 20:26:46 +00004731#if 0
David Greenedf1c4972009-12-23 22:18:14 +00004732 dbgs() << "ComputeBackedgeTakenCount ";
Chris Lattnerd934c702004-04-02 20:23:17 +00004733 if (ExitCond->getOperand(0)->getType()->isUnsigned())
David Greenedf1c4972009-12-23 22:18:14 +00004734 dbgs() << "[unsigned] ";
4735 dbgs() << *LHS << " "
Dan Gohmance973df2009-06-24 04:48:43 +00004736 << Instruction::getOpcodeName(Instruction::ICmp)
Reid Spencer266e42b2006-12-23 06:05:41 +00004737 << " " << *RHS << "\n";
Chris Lattner09169212004-04-02 20:26:46 +00004738#endif
Chris Lattner0defaa12004-04-03 00:43:03 +00004739 break;
Chris Lattnerd934c702004-04-02 20:23:17 +00004740 }
Andrew Trick3ca3f982011-07-26 17:19:55 +00004741 return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
Chris Lattner4021d1a2004-04-17 18:36:24 +00004742}
4743
Benjamin Kramer5a188542014-02-11 15:44:32 +00004744ScalarEvolution::ExitLimit
4745ScalarEvolution::ComputeExitLimitFromSingleExitSwitch(const Loop *L,
4746 SwitchInst *Switch,
4747 BasicBlock *ExitingBlock,
4748 bool IsSubExpr) {
4749 assert(!L->contains(ExitingBlock) && "Not an exiting block!");
4750
4751 // Give up if the exit is the default dest of a switch.
4752 if (Switch->getDefaultDest() == ExitingBlock)
4753 return getCouldNotCompute();
4754
4755 assert(L->contains(Switch->getDefaultDest()) &&
4756 "Default case must not exit the loop!");
4757 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
4758 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
4759
4760 // while (X != Y) --> while (X-Y != 0)
4761 ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, IsSubExpr);
4762 if (EL.hasAnyInfo())
4763 return EL;
4764
4765 return getCouldNotCompute();
4766}
4767
Chris Lattnerec901cc2004-10-12 01:49:27 +00004768static ConstantInt *
Dan Gohmana37eaf22007-10-22 18:31:58 +00004769EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
4770 ScalarEvolution &SE) {
Dan Gohmanaf752342009-07-07 17:06:11 +00004771 const SCEV *InVal = SE.getConstant(C);
4772 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
Chris Lattnerec901cc2004-10-12 01:49:27 +00004773 assert(isa<SCEVConstant>(Val) &&
4774 "Evaluation of SCEV at constant didn't fold correctly?");
4775 return cast<SCEVConstant>(Val)->getValue();
4776}
4777
Andrew Trick3ca3f982011-07-26 17:19:55 +00004778/// ComputeLoadConstantCompareExitLimit - Given an exit condition of
Dan Gohman0bddac12009-02-24 18:55:53 +00004779/// 'icmp op load X, cst', try to see if we can compute the backedge
4780/// execution count.
Andrew Trick3ca3f982011-07-26 17:19:55 +00004781ScalarEvolution::ExitLimit
4782ScalarEvolution::ComputeLoadConstantCompareExitLimit(
4783 LoadInst *LI,
4784 Constant *RHS,
4785 const Loop *L,
4786 ICmpInst::Predicate predicate) {
4787
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004788 if (LI->isVolatile()) return getCouldNotCompute();
Chris Lattnerec901cc2004-10-12 01:49:27 +00004789
4790 // Check to see if the loaded pointer is a getelementptr of a global.
Dan Gohmanba820342010-02-24 17:31:30 +00004791 // TODO: Use SCEV instead of manually grubbing with GEPs.
Chris Lattnerec901cc2004-10-12 01:49:27 +00004792 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004793 if (!GEP) return getCouldNotCompute();
Chris Lattnerec901cc2004-10-12 01:49:27 +00004794
4795 // Make sure that it is really a constant global we are gepping, with an
4796 // initializer, and make sure the first IDX is really 0.
4797 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
Dan Gohman5d5bc6d2009-08-19 18:20:44 +00004798 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
Chris Lattnerec901cc2004-10-12 01:49:27 +00004799 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
4800 !cast<Constant>(GEP->getOperand(1))->isNullValue())
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004801 return getCouldNotCompute();
Chris Lattnerec901cc2004-10-12 01:49:27 +00004802
4803 // Okay, we allow one non-constant index into the GEP instruction.
Craig Topper9f008862014-04-15 04:59:12 +00004804 Value *VarIdx = nullptr;
Chris Lattnere166a852012-01-24 05:49:24 +00004805 std::vector<Constant*> Indexes;
Chris Lattnerec901cc2004-10-12 01:49:27 +00004806 unsigned VarIdxNum = 0;
4807 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
4808 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
4809 Indexes.push_back(CI);
4810 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004811 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
Chris Lattnerec901cc2004-10-12 01:49:27 +00004812 VarIdx = GEP->getOperand(i);
4813 VarIdxNum = i-2;
Craig Topper9f008862014-04-15 04:59:12 +00004814 Indexes.push_back(nullptr);
Chris Lattnerec901cc2004-10-12 01:49:27 +00004815 }
4816
Andrew Trick7004e4b2012-03-26 22:33:59 +00004817 // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
4818 if (!VarIdx)
4819 return getCouldNotCompute();
4820
Chris Lattnerec901cc2004-10-12 01:49:27 +00004821 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
4822 // Check to see if X is a loop variant variable value now.
Dan Gohmanaf752342009-07-07 17:06:11 +00004823 const SCEV *Idx = getSCEV(VarIdx);
Dan Gohman8ca08852009-05-24 23:25:42 +00004824 Idx = getSCEVAtScope(Idx, L);
Chris Lattnerec901cc2004-10-12 01:49:27 +00004825
4826 // We can only recognize very limited forms of loop index expressions, in
4827 // particular, only affine AddRec's like {C1,+,C2}.
Dan Gohman48f82222009-05-04 22:30:44 +00004828 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
Dan Gohmanafd6db92010-11-17 21:23:15 +00004829 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
Chris Lattnerec901cc2004-10-12 01:49:27 +00004830 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
4831 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004832 return getCouldNotCompute();
Chris Lattnerec901cc2004-10-12 01:49:27 +00004833
4834 unsigned MaxSteps = MaxBruteForceIterations;
4835 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
Owen Andersonedb4a702009-07-24 23:12:02 +00004836 ConstantInt *ItCst = ConstantInt::get(
Owen Andersonb6b25302009-07-14 23:09:55 +00004837 cast<IntegerType>(IdxExpr->getType()), IterationNum);
Dan Gohmanc8e23622009-04-21 23:15:49 +00004838 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
Chris Lattnerec901cc2004-10-12 01:49:27 +00004839
4840 // Form the GEP offset.
4841 Indexes[VarIdxNum] = Val;
4842
Chris Lattnere166a852012-01-24 05:49:24 +00004843 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
4844 Indexes);
Craig Topper9f008862014-04-15 04:59:12 +00004845 if (!Result) break; // Cannot compute!
Chris Lattnerec901cc2004-10-12 01:49:27 +00004846
4847 // Evaluate the condition for this iteration.
Reid Spencer266e42b2006-12-23 06:05:41 +00004848 Result = ConstantExpr::getICmp(predicate, Result, RHS);
Zhou Sheng75b871f2007-01-11 12:24:14 +00004849 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
Reid Spencer983e3b32007-03-01 07:25:48 +00004850 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
Chris Lattnerec901cc2004-10-12 01:49:27 +00004851#if 0
David Greenedf1c4972009-12-23 22:18:14 +00004852 dbgs() << "\n***\n*** Computed loop count " << *ItCst
Dan Gohmane20f8242009-04-21 00:47:46 +00004853 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
4854 << "***\n";
Chris Lattnerec901cc2004-10-12 01:49:27 +00004855#endif
4856 ++NumArrayLenItCounts;
Dan Gohmanc8e23622009-04-21 23:15:49 +00004857 return getConstant(ItCst); // Found terminating iteration!
Chris Lattnerec901cc2004-10-12 01:49:27 +00004858 }
4859 }
Dan Gohmanc5c85c02009-06-27 21:21:31 +00004860 return getCouldNotCompute();
Chris Lattnerec901cc2004-10-12 01:49:27 +00004861}
4862
4863
Chris Lattnerdd730472004-04-17 22:58:41 +00004864/// CanConstantFold - Return true if we can constant fold an instruction of the
4865/// specified type, assuming that all operands were constants.
4866static bool CanConstantFold(const Instruction *I) {
Reid Spencer2341c222007-02-02 02:16:23 +00004867 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
Nick Lewyckya6674c72011-10-22 19:58:20 +00004868 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
4869 isa<LoadInst>(I))
Chris Lattnerdd730472004-04-17 22:58:41 +00004870 return true;
Misha Brukman01808ca2005-04-21 21:13:18 +00004871
Chris Lattnerdd730472004-04-17 22:58:41 +00004872 if (const CallInst *CI = dyn_cast<CallInst>(I))
4873 if (const Function *F = CI->getCalledFunction())
Dan Gohmana65951f2008-01-31 01:05:10 +00004874 return canConstantFoldCallTo(F);
Chris Lattnerdd730472004-04-17 22:58:41 +00004875 return false;
Chris Lattner4021d1a2004-04-17 18:36:24 +00004876}
4877
Andrew Trick3a86ba72011-10-05 03:25:31 +00004878/// Determine whether this instruction can constant evolve within this loop
4879/// assuming its operands can all constant evolve.
4880static bool canConstantEvolve(Instruction *I, const Loop *L) {
4881 // An instruction outside of the loop can't be derived from a loop PHI.
4882 if (!L->contains(I)) return false;
4883
4884 if (isa<PHINode>(I)) {
4885 if (L->getHeader() == I->getParent())
4886 return true;
4887 else
4888 // We don't currently keep track of the control flow needed to evaluate
4889 // PHIs, so we cannot handle PHIs inside of loops.
4890 return false;
4891 }
4892
4893 // If we won't be able to constant fold this expression even if the operands
4894 // are constants, bail early.
4895 return CanConstantFold(I);
4896}
4897
4898/// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
4899/// recursing through each instruction operand until reaching a loop header phi.
4900static PHINode *
4901getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
Andrew Tricke9162f12011-10-05 05:58:49 +00004902 DenseMap<Instruction *, PHINode *> &PHIMap) {
Andrew Trick3a86ba72011-10-05 03:25:31 +00004903
4904 // Otherwise, we can evaluate this instruction if all of its operands are
4905 // constant or derived from a PHI node themselves.
Craig Topper9f008862014-04-15 04:59:12 +00004906 PHINode *PHI = nullptr;
Andrew Trick3a86ba72011-10-05 03:25:31 +00004907 for (Instruction::op_iterator OpI = UseInst->op_begin(),
4908 OpE = UseInst->op_end(); OpI != OpE; ++OpI) {
4909
4910 if (isa<Constant>(*OpI)) continue;
4911
4912 Instruction *OpInst = dyn_cast<Instruction>(*OpI);
Craig Topper9f008862014-04-15 04:59:12 +00004913 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
Andrew Trick3a86ba72011-10-05 03:25:31 +00004914
4915 PHINode *P = dyn_cast<PHINode>(OpInst);
Andrew Trick3e8a5762011-10-05 22:06:53 +00004916 if (!P)
4917 // If this operand is already visited, reuse the prior result.
4918 // We may have P != PHI if this is the deepest point at which the
4919 // inconsistent paths meet.
4920 P = PHIMap.lookup(OpInst);
4921 if (!P) {
4922 // Recurse and memoize the results, whether a phi is found or not.
4923 // This recursive call invalidates pointers into PHIMap.
4924 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap);
4925 PHIMap[OpInst] = P;
Andrew Tricke9162f12011-10-05 05:58:49 +00004926 }
Craig Topper9f008862014-04-15 04:59:12 +00004927 if (!P)
4928 return nullptr; // Not evolving from PHI
4929 if (PHI && PHI != P)
4930 return nullptr; // Evolving from multiple different PHIs.
Andrew Tricke9162f12011-10-05 05:58:49 +00004931 PHI = P;
Andrew Trick3a86ba72011-10-05 03:25:31 +00004932 }
4933 // This is a expression evolving from a constant PHI!
4934 return PHI;
4935}
4936
Chris Lattnerdd730472004-04-17 22:58:41 +00004937/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
4938/// in the loop that V is derived from. We allow arbitrary operations along the
4939/// way, but the operands of an operation must either be constants or a value
4940/// derived from a constant PHI. If this expression does not fit with these
4941/// constraints, return null.
4942static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
Chris Lattnerdd730472004-04-17 22:58:41 +00004943 Instruction *I = dyn_cast<Instruction>(V);
Craig Topper9f008862014-04-15 04:59:12 +00004944 if (!I || !canConstantEvolve(I, L)) return nullptr;
Chris Lattnerdd730472004-04-17 22:58:41 +00004945
Anton Korobeynikov579f0712008-02-20 11:08:44 +00004946 if (PHINode *PN = dyn_cast<PHINode>(I)) {
Andrew Trick3a86ba72011-10-05 03:25:31 +00004947 return PN;
Anton Korobeynikov579f0712008-02-20 11:08:44 +00004948 }
Chris Lattnerdd730472004-04-17 22:58:41 +00004949
Andrew Trick3a86ba72011-10-05 03:25:31 +00004950 // Record non-constant instructions contained by the loop.
Andrew Tricke9162f12011-10-05 05:58:49 +00004951 DenseMap<Instruction *, PHINode *> PHIMap;
4952 return getConstantEvolvingPHIOperands(I, L, PHIMap);
Chris Lattnerdd730472004-04-17 22:58:41 +00004953}
4954
4955/// EvaluateExpression - Given an expression that passes the
4956/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
4957/// in the loop has the value PHIVal. If we can't fold this expression for some
4958/// reason, return null.
Andrew Trick3a86ba72011-10-05 03:25:31 +00004959static Constant *EvaluateExpression(Value *V, const Loop *L,
4960 DenseMap<Instruction *, Constant *> &Vals,
Rafael Espindola7c68beb2014-02-18 15:33:12 +00004961 const DataLayout *DL,
Chad Rosiere6de63d2011-12-01 21:29:16 +00004962 const TargetLibraryInfo *TLI) {
Andrew Tricke9162f12011-10-05 05:58:49 +00004963 // Convenient constant check, but redundant for recursive calls.
Reid Spencer30d69a52004-07-18 00:18:30 +00004964 if (Constant *C = dyn_cast<Constant>(V)) return C;
Nick Lewyckya6674c72011-10-22 19:58:20 +00004965 Instruction *I = dyn_cast<Instruction>(V);
Craig Topper9f008862014-04-15 04:59:12 +00004966 if (!I) return nullptr;
Andrew Trick3a86ba72011-10-05 03:25:31 +00004967
Andrew Trick3a86ba72011-10-05 03:25:31 +00004968 if (Constant *C = Vals.lookup(I)) return C;
4969
Nick Lewyckya6674c72011-10-22 19:58:20 +00004970 // An instruction inside the loop depends on a value outside the loop that we
4971 // weren't given a mapping for, or a value such as a call inside the loop.
Craig Topper9f008862014-04-15 04:59:12 +00004972 if (!canConstantEvolve(I, L)) return nullptr;
Nick Lewyckya6674c72011-10-22 19:58:20 +00004973
4974 // An unmapped PHI can be due to a branch or another loop inside this loop,
4975 // or due to this not being the initial iteration through a loop where we
4976 // couldn't compute the evolution of this particular PHI last time.
Craig Topper9f008862014-04-15 04:59:12 +00004977 if (isa<PHINode>(I)) return nullptr;
Chris Lattnerdd730472004-04-17 22:58:41 +00004978
Dan Gohmanf820bd32010-06-22 13:15:46 +00004979 std::vector<Constant*> Operands(I->getNumOperands());
Chris Lattnerdd730472004-04-17 22:58:41 +00004980
4981 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
Andrew Tricke9162f12011-10-05 05:58:49 +00004982 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
4983 if (!Operand) {
Nick Lewyckya447e0f32011-10-14 09:38:46 +00004984 Operands[i] = dyn_cast<Constant>(I->getOperand(i));
Craig Topper9f008862014-04-15 04:59:12 +00004985 if (!Operands[i]) return nullptr;
Andrew Tricke9162f12011-10-05 05:58:49 +00004986 continue;
4987 }
Rafael Espindola7c68beb2014-02-18 15:33:12 +00004988 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
Andrew Tricke9162f12011-10-05 05:58:49 +00004989 Vals[Operand] = C;
Craig Topper9f008862014-04-15 04:59:12 +00004990 if (!C) return nullptr;
Andrew Tricke9162f12011-10-05 05:58:49 +00004991 Operands[i] = C;
Chris Lattnerdd730472004-04-17 22:58:41 +00004992 }
4993
Nick Lewyckya6674c72011-10-22 19:58:20 +00004994 if (CmpInst *CI = dyn_cast<CmpInst>(I))
Chris Lattnercdfb80d2009-11-09 23:06:58 +00004995 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
Rafael Espindola7c68beb2014-02-18 15:33:12 +00004996 Operands[1], DL, TLI);
Nick Lewyckya6674c72011-10-22 19:58:20 +00004997 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
4998 if (!LI->isVolatile())
Rafael Espindola7c68beb2014-02-18 15:33:12 +00004999 return ConstantFoldLoadFromConstPtr(Operands[0], DL);
Nick Lewyckya6674c72011-10-22 19:58:20 +00005000 }
Rafael Espindola7c68beb2014-02-18 15:33:12 +00005001 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL,
Chad Rosiere6de63d2011-12-01 21:29:16 +00005002 TLI);
Chris Lattnerdd730472004-04-17 22:58:41 +00005003}
5004
5005/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
5006/// in the header of its containing loop, we know the loop executes a
5007/// constant number of times, and the PHI node is just a recurrence
5008/// involving constants, fold it.
Dan Gohmance973df2009-06-24 04:48:43 +00005009Constant *
5010ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
Dan Gohmancb0efec2009-12-18 01:14:11 +00005011 const APInt &BEs,
Dan Gohmance973df2009-06-24 04:48:43 +00005012 const Loop *L) {
Dan Gohman0daf6872011-05-09 18:44:09 +00005013 DenseMap<PHINode*, Constant*>::const_iterator I =
Chris Lattnerdd730472004-04-17 22:58:41 +00005014 ConstantEvolutionLoopExitValue.find(PN);
5015 if (I != ConstantEvolutionLoopExitValue.end())
5016 return I->second;
5017
Dan Gohman4ce1fb12010-04-08 23:03:40 +00005018 if (BEs.ugt(MaxBruteForceIterations))
Craig Topper9f008862014-04-15 04:59:12 +00005019 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it.
Chris Lattnerdd730472004-04-17 22:58:41 +00005020
5021 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
5022
Andrew Trick3a86ba72011-10-05 03:25:31 +00005023 DenseMap<Instruction *, Constant *> CurrentIterVals;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005024 BasicBlock *Header = L->getHeader();
5025 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
Andrew Trick3a86ba72011-10-05 03:25:31 +00005026
Chris Lattnerdd730472004-04-17 22:58:41 +00005027 // Since the loop is canonicalized, the PHI node must have two entries. One
5028 // entry must be a constant (coming in from outside of the loop), and the
5029 // second must be derived from the same PHI.
5030 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
Craig Topper9f008862014-04-15 04:59:12 +00005031 PHINode *PHI = nullptr;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005032 for (BasicBlock::iterator I = Header->begin();
5033 (PHI = dyn_cast<PHINode>(I)); ++I) {
5034 Constant *StartCST =
5035 dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
Craig Topper9f008862014-04-15 04:59:12 +00005036 if (!StartCST) continue;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005037 CurrentIterVals[PHI] = StartCST;
5038 }
5039 if (!CurrentIterVals.count(PN))
Craig Topper9f008862014-04-15 04:59:12 +00005040 return RetVal = nullptr;
Chris Lattnerdd730472004-04-17 22:58:41 +00005041
5042 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
Chris Lattnerdd730472004-04-17 22:58:41 +00005043
5044 // Execute the loop symbolically to determine the exit value.
Dan Gohman0bddac12009-02-24 18:55:53 +00005045 if (BEs.getActiveBits() >= 32)
Craig Topper9f008862014-04-15 04:59:12 +00005046 return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it!
Chris Lattnerdd730472004-04-17 22:58:41 +00005047
Dan Gohman0bddac12009-02-24 18:55:53 +00005048 unsigned NumIterations = BEs.getZExtValue(); // must be in range
Reid Spencer983e3b32007-03-01 07:25:48 +00005049 unsigned IterationNum = 0;
Andrew Trick3a86ba72011-10-05 03:25:31 +00005050 for (; ; ++IterationNum) {
Chris Lattnerdd730472004-04-17 22:58:41 +00005051 if (IterationNum == NumIterations)
Andrew Trick3a86ba72011-10-05 03:25:31 +00005052 return RetVal = CurrentIterVals[PN]; // Got exit value!
Chris Lattnerdd730472004-04-17 22:58:41 +00005053
Nick Lewyckya6674c72011-10-22 19:58:20 +00005054 // Compute the value of the PHIs for the next iteration.
Andrew Trick3a86ba72011-10-05 03:25:31 +00005055 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
Nick Lewyckya6674c72011-10-22 19:58:20 +00005056 DenseMap<Instruction *, Constant *> NextIterVals;
Rafael Espindola7c68beb2014-02-18 15:33:12 +00005057 Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL,
Chad Rosiere6de63d2011-12-01 21:29:16 +00005058 TLI);
Craig Topper9f008862014-04-15 04:59:12 +00005059 if (!NextPHI)
5060 return nullptr; // Couldn't evaluate!
Andrew Trick3a86ba72011-10-05 03:25:31 +00005061 NextIterVals[PN] = NextPHI;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005062
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005063 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
5064
Nick Lewyckya6674c72011-10-22 19:58:20 +00005065 // Also evaluate the other PHI nodes. However, we don't get to stop if we
5066 // cease to be able to evaluate one of them or if they stop evolving,
5067 // because that doesn't necessarily prevent us from computing PN.
Nick Lewyckyd48ab842011-11-12 03:09:12 +00005068 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005069 for (DenseMap<Instruction *, Constant *>::const_iterator
5070 I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
5071 PHINode *PHI = dyn_cast<PHINode>(I->first);
Nick Lewycky8e904de2011-10-24 05:51:01 +00005072 if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
Nick Lewyckyd48ab842011-11-12 03:09:12 +00005073 PHIsToCompute.push_back(std::make_pair(PHI, I->second));
5074 }
5075 // We use two distinct loops because EvaluateExpression may invalidate any
5076 // iterators into CurrentIterVals.
5077 for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator
5078 I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) {
5079 PHINode *PHI = I->first;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005080 Constant *&NextPHI = NextIterVals[PHI];
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005081 if (!NextPHI) { // Not already computed.
5082 Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
Rafael Espindola7c68beb2014-02-18 15:33:12 +00005083 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005084 }
5085 if (NextPHI != I->second)
5086 StoppedEvolving = false;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005087 }
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005088
5089 // If all entries in CurrentIterVals == NextIterVals then we can stop
5090 // iterating, the loop can't continue to change.
5091 if (StoppedEvolving)
5092 return RetVal = CurrentIterVals[PN];
5093
Andrew Trick3a86ba72011-10-05 03:25:31 +00005094 CurrentIterVals.swap(NextIterVals);
Chris Lattnerdd730472004-04-17 22:58:41 +00005095 }
5096}
5097
Andrew Trick3ca3f982011-07-26 17:19:55 +00005098/// ComputeExitCountExhaustively - If the loop is known to execute a
Chris Lattner4021d1a2004-04-17 18:36:24 +00005099/// constant number of times (the condition evolves only from constants),
5100/// try to evaluate a few iterations of the loop until we get the exit
5101/// condition gets a value of ExitWhen (true or false). If we cannot
Dan Gohmanc5c85c02009-06-27 21:21:31 +00005102/// evaluate the trip count of the loop, return getCouldNotCompute().
Nick Lewyckya6674c72011-10-22 19:58:20 +00005103const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
5104 Value *Cond,
5105 bool ExitWhen) {
Chris Lattner4021d1a2004-04-17 18:36:24 +00005106 PHINode *PN = getConstantEvolvingPHI(Cond, L);
Craig Topper9f008862014-04-15 04:59:12 +00005107 if (!PN) return getCouldNotCompute();
Chris Lattner4021d1a2004-04-17 18:36:24 +00005108
Dan Gohman866971e2010-06-19 14:17:24 +00005109 // If the loop is canonicalized, the PHI will have exactly two entries.
5110 // That's the only form we support here.
5111 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
5112
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005113 DenseMap<Instruction *, Constant *> CurrentIterVals;
5114 BasicBlock *Header = L->getHeader();
5115 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
5116
Dan Gohman866971e2010-06-19 14:17:24 +00005117 // One entry must be a constant (coming in from outside of the loop), and the
Chris Lattner4021d1a2004-04-17 18:36:24 +00005118 // second must be derived from the same PHI.
5119 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
Craig Topper9f008862014-04-15 04:59:12 +00005120 PHINode *PHI = nullptr;
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005121 for (BasicBlock::iterator I = Header->begin();
5122 (PHI = dyn_cast<PHINode>(I)); ++I) {
5123 Constant *StartCST =
5124 dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
Craig Topper9f008862014-04-15 04:59:12 +00005125 if (!StartCST) continue;
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005126 CurrentIterVals[PHI] = StartCST;
5127 }
5128 if (!CurrentIterVals.count(PN))
5129 return getCouldNotCompute();
Chris Lattner4021d1a2004-04-17 18:36:24 +00005130
5131 // Okay, we find a PHI node that defines the trip count of this loop. Execute
5132 // the loop symbolically to determine when the condition gets a value of
5133 // "ExitWhen".
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005134
Andrew Trick90c7a102011-11-16 00:52:40 +00005135 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005136 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
Zhou Sheng75b871f2007-01-11 12:24:14 +00005137 ConstantInt *CondVal =
Chad Rosiere6de63d2011-12-01 21:29:16 +00005138 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
Rafael Espindola7c68beb2014-02-18 15:33:12 +00005139 DL, TLI));
Chris Lattnerdd730472004-04-17 22:58:41 +00005140
Zhou Sheng75b871f2007-01-11 12:24:14 +00005141 // Couldn't symbolically evaluate.
Dan Gohmanc5c85c02009-06-27 21:21:31 +00005142 if (!CondVal) return getCouldNotCompute();
Zhou Sheng75b871f2007-01-11 12:24:14 +00005143
Reid Spencer983e3b32007-03-01 07:25:48 +00005144 if (CondVal->getValue() == uint64_t(ExitWhen)) {
Chris Lattner4021d1a2004-04-17 18:36:24 +00005145 ++NumBruteForceTripCountsComputed;
Owen Anderson55f1c092009-08-13 21:58:54 +00005146 return getConstant(Type::getInt32Ty(getContext()), IterationNum);
Chris Lattner4021d1a2004-04-17 18:36:24 +00005147 }
Misha Brukman01808ca2005-04-21 21:13:18 +00005148
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005149 // Update all the PHI nodes for the next iteration.
5150 DenseMap<Instruction *, Constant *> NextIterVals;
Nick Lewyckyd48ab842011-11-12 03:09:12 +00005151
5152 // Create a list of which PHIs we need to compute. We want to do this before
5153 // calling EvaluateExpression on them because that may invalidate iterators
5154 // into CurrentIterVals.
5155 SmallVector<PHINode *, 8> PHIsToCompute;
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005156 for (DenseMap<Instruction *, Constant *>::const_iterator
5157 I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
5158 PHINode *PHI = dyn_cast<PHINode>(I->first);
5159 if (!PHI || PHI->getParent() != Header) continue;
Nick Lewyckyd48ab842011-11-12 03:09:12 +00005160 PHIsToCompute.push_back(PHI);
5161 }
5162 for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(),
5163 E = PHIsToCompute.end(); I != E; ++I) {
5164 PHINode *PHI = *I;
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005165 Constant *&NextPHI = NextIterVals[PHI];
5166 if (NextPHI) continue; // Already computed!
5167
5168 Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
Rafael Espindola7c68beb2014-02-18 15:33:12 +00005169 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
Duncan Sandsa370f3e2011-10-25 12:28:52 +00005170 }
5171 CurrentIterVals.swap(NextIterVals);
Chris Lattner4021d1a2004-04-17 18:36:24 +00005172 }
5173
5174 // Too many iterations were needed to evaluate.
Dan Gohmanc5c85c02009-06-27 21:21:31 +00005175 return getCouldNotCompute();
Chris Lattnerd934c702004-04-02 20:23:17 +00005176}
5177
Dan Gohman237d9e52009-09-03 15:00:26 +00005178/// getSCEVAtScope - Return a SCEV expression for the specified value
Dan Gohmanb81f47d2009-05-08 20:38:54 +00005179/// at the specified scope in the program. The L value specifies a loop
5180/// nest to evaluate the expression at, where null is the top-level or a
5181/// specified loop is immediately inside of the loop.
5182///
5183/// This method can be used to compute the exit value for a variable defined
5184/// in a loop by querying what the value will hold in the parent loop.
5185///
Dan Gohman8ca08852009-05-24 23:25:42 +00005186/// In the case that a relevant loop exit value cannot be computed, the
5187/// original value V is returned.
Dan Gohmanaf752342009-07-07 17:06:11 +00005188const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
Dan Gohmancc2f1eb2009-08-31 21:15:23 +00005189 // Check to see if we've folded this expression at this loop before.
Wan Xiaofeib2c8cdc2013-11-12 09:40:41 +00005190 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = ValuesAtScopes[V];
5191 for (unsigned u = 0; u < Values.size(); u++) {
5192 if (Values[u].first == L)
5193 return Values[u].second ? Values[u].second : V;
5194 }
Craig Topper9f008862014-04-15 04:59:12 +00005195 Values.push_back(std::make_pair(L, static_cast<const SCEV *>(nullptr)));
Dan Gohmancc2f1eb2009-08-31 21:15:23 +00005196 // Otherwise compute it.
5197 const SCEV *C = computeSCEVAtScope(V, L);
Wan Xiaofeib2c8cdc2013-11-12 09:40:41 +00005198 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values2 = ValuesAtScopes[V];
5199 for (unsigned u = Values2.size(); u > 0; u--) {
5200 if (Values2[u - 1].first == L) {
5201 Values2[u - 1].second = C;
5202 break;
5203 }
5204 }
Dan Gohmancc2f1eb2009-08-31 21:15:23 +00005205 return C;
5206}
5207
Nick Lewyckya6674c72011-10-22 19:58:20 +00005208/// This builds up a Constant using the ConstantExpr interface. That way, we
5209/// will return Constants for objects which aren't represented by a
5210/// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
5211/// Returns NULL if the SCEV isn't representable as a Constant.
5212static Constant *BuildConstantFromSCEV(const SCEV *V) {
Benjamin Kramer987b8502014-02-11 19:02:55 +00005213 switch (static_cast<SCEVTypes>(V->getSCEVType())) {
Nick Lewyckya6674c72011-10-22 19:58:20 +00005214 case scCouldNotCompute:
5215 case scAddRecExpr:
5216 break;
5217 case scConstant:
5218 return cast<SCEVConstant>(V)->getValue();
5219 case scUnknown:
5220 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
5221 case scSignExtend: {
5222 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
5223 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
5224 return ConstantExpr::getSExt(CastOp, SS->getType());
5225 break;
5226 }
5227 case scZeroExtend: {
5228 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
5229 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
5230 return ConstantExpr::getZExt(CastOp, SZ->getType());
5231 break;
5232 }
5233 case scTruncate: {
5234 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
5235 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
5236 return ConstantExpr::getTrunc(CastOp, ST->getType());
5237 break;
5238 }
5239 case scAddExpr: {
5240 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
5241 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
Matt Arsenaultbe18b8a2013-10-21 18:41:10 +00005242 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
5243 unsigned AS = PTy->getAddressSpace();
5244 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
5245 C = ConstantExpr::getBitCast(C, DestPtrTy);
5246 }
Nick Lewyckya6674c72011-10-22 19:58:20 +00005247 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
5248 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
Craig Topper9f008862014-04-15 04:59:12 +00005249 if (!C2) return nullptr;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005250
5251 // First pointer!
5252 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
Matt Arsenaultbe18b8a2013-10-21 18:41:10 +00005253 unsigned AS = C2->getType()->getPointerAddressSpace();
Nick Lewyckya6674c72011-10-22 19:58:20 +00005254 std::swap(C, C2);
Matt Arsenaultbe18b8a2013-10-21 18:41:10 +00005255 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
Nick Lewyckya6674c72011-10-22 19:58:20 +00005256 // The offsets have been converted to bytes. We can add bytes to an
5257 // i8* by GEP with the byte count in the first index.
Matt Arsenaultbe18b8a2013-10-21 18:41:10 +00005258 C = ConstantExpr::getBitCast(C, DestPtrTy);
Nick Lewyckya6674c72011-10-22 19:58:20 +00005259 }
5260
5261 // Don't bother trying to sum two pointers. We probably can't
5262 // statically compute a load that results from it anyway.
5263 if (C2->getType()->isPointerTy())
Craig Topper9f008862014-04-15 04:59:12 +00005264 return nullptr;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005265
Matt Arsenaultbe18b8a2013-10-21 18:41:10 +00005266 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
5267 if (PTy->getElementType()->isStructTy())
Nick Lewyckya6674c72011-10-22 19:58:20 +00005268 C2 = ConstantExpr::getIntegerCast(
5269 C2, Type::getInt32Ty(C->getContext()), true);
5270 C = ConstantExpr::getGetElementPtr(C, C2);
5271 } else
5272 C = ConstantExpr::getAdd(C, C2);
5273 }
5274 return C;
5275 }
5276 break;
5277 }
5278 case scMulExpr: {
5279 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
5280 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
5281 // Don't bother with pointers at all.
Craig Topper9f008862014-04-15 04:59:12 +00005282 if (C->getType()->isPointerTy()) return nullptr;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005283 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
5284 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
Craig Topper9f008862014-04-15 04:59:12 +00005285 if (!C2 || C2->getType()->isPointerTy()) return nullptr;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005286 C = ConstantExpr::getMul(C, C2);
5287 }
5288 return C;
5289 }
5290 break;
5291 }
5292 case scUDivExpr: {
5293 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
5294 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
5295 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
5296 if (LHS->getType() == RHS->getType())
5297 return ConstantExpr::getUDiv(LHS, RHS);
5298 break;
5299 }
Benjamin Kramer987b8502014-02-11 19:02:55 +00005300 case scSMaxExpr:
5301 case scUMaxExpr:
5302 break; // TODO: smax, umax.
Nick Lewyckya6674c72011-10-22 19:58:20 +00005303 }
Craig Topper9f008862014-04-15 04:59:12 +00005304 return nullptr;
Nick Lewyckya6674c72011-10-22 19:58:20 +00005305}
5306
Dan Gohmancc2f1eb2009-08-31 21:15:23 +00005307const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
Chris Lattnerdd730472004-04-17 22:58:41 +00005308 if (isa<SCEVConstant>(V)) return V;
Misha Brukman01808ca2005-04-21 21:13:18 +00005309
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00005310 // If this instruction is evolved from a constant-evolving PHI, compute the
Chris Lattnerdd730472004-04-17 22:58:41 +00005311 // exit value from the loop without using SCEVs.
Dan Gohmana30370b2009-05-04 22:02:23 +00005312 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
Chris Lattnerdd730472004-04-17 22:58:41 +00005313 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
Dan Gohmanc8e23622009-04-21 23:15:49 +00005314 const Loop *LI = (*this->LI)[I->getParent()];
Chris Lattnerdd730472004-04-17 22:58:41 +00005315 if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
5316 if (PHINode *PN = dyn_cast<PHINode>(I))
5317 if (PN->getParent() == LI->getHeader()) {
5318 // Okay, there is no closed form solution for the PHI node. Check
Dan Gohman0bddac12009-02-24 18:55:53 +00005319 // to see if the loop that contains it has a known backedge-taken
5320 // count. If so, we may be able to force computation of the exit
5321 // value.
Dan Gohmanaf752342009-07-07 17:06:11 +00005322 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
Dan Gohmana30370b2009-05-04 22:02:23 +00005323 if (const SCEVConstant *BTCC =
Dan Gohman0bddac12009-02-24 18:55:53 +00005324 dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
Chris Lattnerdd730472004-04-17 22:58:41 +00005325 // Okay, we know how many times the containing loop executes. If
5326 // this is a constant evolving PHI node, get the final value at
5327 // the specified iteration number.
5328 Constant *RV = getConstantEvolutionLoopExitValue(PN,
Dan Gohman0bddac12009-02-24 18:55:53 +00005329 BTCC->getValue()->getValue(),
Chris Lattnerdd730472004-04-17 22:58:41 +00005330 LI);
Dan Gohman9d203c62009-06-29 21:31:18 +00005331 if (RV) return getSCEV(RV);
Chris Lattnerdd730472004-04-17 22:58:41 +00005332 }
5333 }
5334
Reid Spencere6328ca2006-12-04 21:33:23 +00005335 // Okay, this is an expression that we cannot symbolically evaluate
Chris Lattnerdd730472004-04-17 22:58:41 +00005336 // into a SCEV. Check to see if it's possible to symbolically evaluate
Reid Spencere6328ca2006-12-04 21:33:23 +00005337 // the arguments into constants, and if so, try to constant propagate the
Chris Lattnerdd730472004-04-17 22:58:41 +00005338 // result. This is particularly useful for computing loop exit values.
5339 if (CanConstantFold(I)) {
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005340 SmallVector<Constant *, 4> Operands;
5341 bool MadeImprovement = false;
Chris Lattnerdd730472004-04-17 22:58:41 +00005342 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
5343 Value *Op = I->getOperand(i);
5344 if (Constant *C = dyn_cast<Constant>(Op)) {
5345 Operands.push_back(C);
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005346 continue;
Chris Lattnerdd730472004-04-17 22:58:41 +00005347 }
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005348
5349 // If any of the operands is non-constant and if they are
5350 // non-integer and non-pointer, don't even try to analyze them
5351 // with scev techniques.
5352 if (!isSCEVable(Op->getType()))
5353 return V;
5354
5355 const SCEV *OrigV = getSCEV(Op);
5356 const SCEV *OpV = getSCEVAtScope(OrigV, L);
5357 MadeImprovement |= OrigV != OpV;
5358
Nick Lewyckya6674c72011-10-22 19:58:20 +00005359 Constant *C = BuildConstantFromSCEV(OpV);
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005360 if (!C) return V;
5361 if (C->getType() != Op->getType())
5362 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
5363 Op->getType(),
5364 false),
5365 C, Op->getType());
5366 Operands.push_back(C);
Chris Lattnerdd730472004-04-17 22:58:41 +00005367 }
Dan Gohmance973df2009-06-24 04:48:43 +00005368
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005369 // Check to see if getSCEVAtScope actually made an improvement.
5370 if (MadeImprovement) {
Craig Topper9f008862014-04-15 04:59:12 +00005371 Constant *C = nullptr;
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005372 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
5373 C = ConstantFoldCompareInstOperands(CI->getPredicate(),
Rafael Espindola7c68beb2014-02-18 15:33:12 +00005374 Operands[0], Operands[1], DL,
Chad Rosier43a33062011-12-02 01:26:24 +00005375 TLI);
Nick Lewyckya6674c72011-10-22 19:58:20 +00005376 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
5377 if (!LI->isVolatile())
Rafael Espindola7c68beb2014-02-18 15:33:12 +00005378 C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
Nick Lewyckya6674c72011-10-22 19:58:20 +00005379 } else
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005380 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
Rafael Espindola7c68beb2014-02-18 15:33:12 +00005381 Operands, DL, TLI);
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005382 if (!C) return V;
Dan Gohman4aad7502010-02-24 19:31:47 +00005383 return getSCEV(C);
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005384 }
Chris Lattnerdd730472004-04-17 22:58:41 +00005385 }
5386 }
5387
5388 // This is some other type of SCEVUnknown, just return it.
5389 return V;
5390 }
5391
Dan Gohmana30370b2009-05-04 22:02:23 +00005392 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
Chris Lattnerd934c702004-04-02 20:23:17 +00005393 // Avoid performing the look-up in the common case where the specified
5394 // expression has no loop-variant portions.
5395 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
Dan Gohmanaf752342009-07-07 17:06:11 +00005396 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
Chris Lattnerd934c702004-04-02 20:23:17 +00005397 if (OpAtScope != Comm->getOperand(i)) {
Chris Lattnerd934c702004-04-02 20:23:17 +00005398 // Okay, at least one of these operands is loop variant but might be
5399 // foldable. Build a new instance of the folded commutative expression.
Dan Gohmance973df2009-06-24 04:48:43 +00005400 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
5401 Comm->op_begin()+i);
Chris Lattnerd934c702004-04-02 20:23:17 +00005402 NewOps.push_back(OpAtScope);
5403
5404 for (++i; i != e; ++i) {
5405 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
Chris Lattnerd934c702004-04-02 20:23:17 +00005406 NewOps.push_back(OpAtScope);
5407 }
5408 if (isa<SCEVAddExpr>(Comm))
Dan Gohmanc8e23622009-04-21 23:15:49 +00005409 return getAddExpr(NewOps);
Nick Lewyckycdb7e542007-11-25 22:41:31 +00005410 if (isa<SCEVMulExpr>(Comm))
Dan Gohmanc8e23622009-04-21 23:15:49 +00005411 return getMulExpr(NewOps);
Nick Lewyckycdb7e542007-11-25 22:41:31 +00005412 if (isa<SCEVSMaxExpr>(Comm))
Dan Gohmanc8e23622009-04-21 23:15:49 +00005413 return getSMaxExpr(NewOps);
Nick Lewycky1c44ebc2008-02-20 06:48:22 +00005414 if (isa<SCEVUMaxExpr>(Comm))
Dan Gohmanc8e23622009-04-21 23:15:49 +00005415 return getUMaxExpr(NewOps);
Torok Edwinfbcc6632009-07-14 16:55:14 +00005416 llvm_unreachable("Unknown commutative SCEV type!");
Chris Lattnerd934c702004-04-02 20:23:17 +00005417 }
5418 }
5419 // If we got here, all operands are loop invariant.
5420 return Comm;
5421 }
5422
Dan Gohmana30370b2009-05-04 22:02:23 +00005423 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
Dan Gohmanaf752342009-07-07 17:06:11 +00005424 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
5425 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
Nick Lewycky52348302009-01-13 09:18:58 +00005426 if (LHS == Div->getLHS() && RHS == Div->getRHS())
5427 return Div; // must be loop invariant
Dan Gohmanc8e23622009-04-21 23:15:49 +00005428 return getUDivExpr(LHS, RHS);
Chris Lattnerd934c702004-04-02 20:23:17 +00005429 }
5430
5431 // If this is a loop recurrence for a loop that does not contain L, then we
5432 // are dealing with the final value computed by the loop.
Dan Gohmana30370b2009-05-04 22:02:23 +00005433 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005434 // First, attempt to evaluate each operand.
5435 // Avoid performing the look-up in the common case where the specified
5436 // expression has no loop-variant portions.
5437 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
5438 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
5439 if (OpAtScope == AddRec->getOperand(i))
5440 continue;
5441
5442 // Okay, at least one of these operands is loop variant but might be
5443 // foldable. Build a new instance of the folded commutative expression.
5444 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
5445 AddRec->op_begin()+i);
5446 NewOps.push_back(OpAtScope);
5447 for (++i; i != e; ++i)
5448 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
5449
Andrew Trick759ba082011-04-27 01:21:25 +00005450 const SCEV *FoldedRec =
Andrew Trick8b55b732011-03-14 16:50:06 +00005451 getAddRecExpr(NewOps, AddRec->getLoop(),
Andrew Trick759ba082011-04-27 01:21:25 +00005452 AddRec->getNoWrapFlags(SCEV::FlagNW));
5453 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
Andrew Trick01eff822011-04-27 05:42:17 +00005454 // The addrec may be folded to a nonrecurrence, for example, if the
5455 // induction variable is multiplied by zero after constant folding. Go
5456 // ahead and return the folded value.
Andrew Trick759ba082011-04-27 01:21:25 +00005457 if (!AddRec)
5458 return FoldedRec;
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005459 break;
5460 }
5461
5462 // If the scope is outside the addrec's loop, evaluate it by using the
5463 // loop exit value of the addrec.
5464 if (!AddRec->getLoop()->contains(L)) {
Chris Lattnerd934c702004-04-02 20:23:17 +00005465 // To evaluate this recurrence, we need to know how many times the AddRec
5466 // loop iterates. Compute this now.
Dan Gohmanaf752342009-07-07 17:06:11 +00005467 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
Dan Gohmanc5c85c02009-06-27 21:21:31 +00005468 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
Misha Brukman01808ca2005-04-21 21:13:18 +00005469
Eli Friedman61f67622008-08-04 23:49:06 +00005470 // Then, evaluate the AddRec.
Dan Gohmanc8e23622009-04-21 23:15:49 +00005471 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
Chris Lattnerd934c702004-04-02 20:23:17 +00005472 }
Dan Gohmanae36b1e2010-06-29 23:43:06 +00005473
Dan Gohman8ca08852009-05-24 23:25:42 +00005474 return AddRec;
Chris Lattnerd934c702004-04-02 20:23:17 +00005475 }
5476
Dan Gohmana30370b2009-05-04 22:02:23 +00005477 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
Dan Gohmanaf752342009-07-07 17:06:11 +00005478 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
Dan Gohman0098d012009-04-29 22:29:01 +00005479 if (Op == Cast->getOperand())
5480 return Cast; // must be loop invariant
5481 return getZeroExtendExpr(Op, Cast->getType());
5482 }
5483
Dan Gohmana30370b2009-05-04 22:02:23 +00005484 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
Dan Gohmanaf752342009-07-07 17:06:11 +00005485 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
Dan Gohman0098d012009-04-29 22:29:01 +00005486 if (Op == Cast->getOperand())
5487 return Cast; // must be loop invariant
5488 return getSignExtendExpr(Op, Cast->getType());
5489 }
5490
Dan Gohmana30370b2009-05-04 22:02:23 +00005491 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
Dan Gohmanaf752342009-07-07 17:06:11 +00005492 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
Dan Gohman0098d012009-04-29 22:29:01 +00005493 if (Op == Cast->getOperand())
5494 return Cast; // must be loop invariant
5495 return getTruncateExpr(Op, Cast->getType());
5496 }
5497
Torok Edwinfbcc6632009-07-14 16:55:14 +00005498 llvm_unreachable("Unknown SCEV type!");
Chris Lattnerd934c702004-04-02 20:23:17 +00005499}
5500
Dan Gohmanb81f47d2009-05-08 20:38:54 +00005501/// getSCEVAtScope - This is a convenience function which does
5502/// getSCEVAtScope(getSCEV(V), L).
Dan Gohmanaf752342009-07-07 17:06:11 +00005503const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
Dan Gohmanc8e23622009-04-21 23:15:49 +00005504 return getSCEVAtScope(getSCEV(V), L);
5505}
5506
Wojciech Matyjewiczf0d21cd2008-07-20 15:55:14 +00005507/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
5508/// following equation:
5509///
5510/// A * X = B (mod N)
5511///
5512/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
5513/// A and B isn't important.
5514///
5515/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
Dan Gohmanaf752342009-07-07 17:06:11 +00005516static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
Wojciech Matyjewiczf0d21cd2008-07-20 15:55:14 +00005517 ScalarEvolution &SE) {
5518 uint32_t BW = A.getBitWidth();
5519 assert(BW == B.getBitWidth() && "Bit widths must be the same.");
5520 assert(A != 0 && "A must be non-zero.");
5521
5522 // 1. D = gcd(A, N)
5523 //
5524 // The gcd of A and N may have only one prime factor: 2. The number of
5525 // trailing zeros in A is its multiplicity
5526 uint32_t Mult2 = A.countTrailingZeros();
5527 // D = 2^Mult2
5528
5529 // 2. Check if B is divisible by D.
5530 //
5531 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
5532 // is not less than multiplicity of this prime factor for D.
5533 if (B.countTrailingZeros() < Mult2)
Dan Gohman31efa302009-04-18 17:58:19 +00005534 return SE.getCouldNotCompute();
Wojciech Matyjewiczf0d21cd2008-07-20 15:55:14 +00005535
5536 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
5537 // modulo (N / D).
5538 //
5539 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
5540 // bit width during computations.
5541 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
5542 APInt Mod(BW + 1, 0);
Jay Foad25a5e4c2010-12-01 08:53:58 +00005543 Mod.setBit(BW - Mult2); // Mod = N / D
Wojciech Matyjewiczf0d21cd2008-07-20 15:55:14 +00005544 APInt I = AD.multiplicativeInverse(Mod);
5545
5546 // 4. Compute the minimum unsigned root of the equation:
5547 // I * (B / D) mod (N / D)
5548 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
5549
5550 // The result is guaranteed to be less than 2^BW so we may truncate it to BW
5551 // bits.
5552 return SE.getConstant(Result.trunc(BW));
5553}
Chris Lattnerd934c702004-04-02 20:23:17 +00005554
5555/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
5556/// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
5557/// might be the same) or two SCEVCouldNotCompute objects.
5558///
Dan Gohmanaf752342009-07-07 17:06:11 +00005559static std::pair<const SCEV *,const SCEV *>
Dan Gohmana37eaf22007-10-22 18:31:58 +00005560SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
Chris Lattnerd934c702004-04-02 20:23:17 +00005561 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
Dan Gohman48f82222009-05-04 22:30:44 +00005562 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
5563 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
5564 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
Misha Brukman01808ca2005-04-21 21:13:18 +00005565
Chris Lattnerd934c702004-04-02 20:23:17 +00005566 // We currently can only solve this if the coefficients are constants.
Reid Spencer983e3b32007-03-01 07:25:48 +00005567 if (!LC || !MC || !NC) {
Dan Gohman48f82222009-05-04 22:30:44 +00005568 const SCEV *CNC = SE.getCouldNotCompute();
Chris Lattnerd934c702004-04-02 20:23:17 +00005569 return std::make_pair(CNC, CNC);
5570 }
5571
Reid Spencer983e3b32007-03-01 07:25:48 +00005572 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
Chris Lattnercad61e82007-04-15 19:52:49 +00005573 const APInt &L = LC->getValue()->getValue();
5574 const APInt &M = MC->getValue()->getValue();
5575 const APInt &N = NC->getValue()->getValue();
Reid Spencer983e3b32007-03-01 07:25:48 +00005576 APInt Two(BitWidth, 2);
5577 APInt Four(BitWidth, 4);
Misha Brukman01808ca2005-04-21 21:13:18 +00005578
Dan Gohmance973df2009-06-24 04:48:43 +00005579 {
Reid Spencer983e3b32007-03-01 07:25:48 +00005580 using namespace APIntOps;
Zhou Sheng2852d992007-04-07 17:48:27 +00005581 const APInt& C = L;
Reid Spencer983e3b32007-03-01 07:25:48 +00005582 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
5583 // The B coefficient is M-N/2
5584 APInt B(M);
5585 B -= sdiv(N,Two);
Misha Brukman01808ca2005-04-21 21:13:18 +00005586
Reid Spencer983e3b32007-03-01 07:25:48 +00005587 // The A coefficient is N/2
Zhou Sheng2852d992007-04-07 17:48:27 +00005588 APInt A(N.sdiv(Two));
Chris Lattnerd934c702004-04-02 20:23:17 +00005589
Reid Spencer983e3b32007-03-01 07:25:48 +00005590 // Compute the B^2-4ac term.
5591 APInt SqrtTerm(B);
5592 SqrtTerm *= B;
5593 SqrtTerm -= Four * (A * C);
Chris Lattnerd934c702004-04-02 20:23:17 +00005594
Nick Lewyckyfb780832012-08-01 09:14:36 +00005595 if (SqrtTerm.isNegative()) {
5596 // The loop is provably infinite.
5597 const SCEV *CNC = SE.getCouldNotCompute();
5598 return std::make_pair(CNC, CNC);
5599 }
5600
Reid Spencer983e3b32007-03-01 07:25:48 +00005601 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
5602 // integer value or else APInt::sqrt() will assert.
5603 APInt SqrtVal(SqrtTerm.sqrt());
Misha Brukman01808ca2005-04-21 21:13:18 +00005604
Dan Gohmance973df2009-06-24 04:48:43 +00005605 // Compute the two solutions for the quadratic formula.
Reid Spencer983e3b32007-03-01 07:25:48 +00005606 // The divisions must be performed as signed divisions.
5607 APInt NegB(-B);
Nick Lewycky31555522011-10-03 07:10:45 +00005608 APInt TwoA(A << 1);
Nick Lewycky7b14e202008-11-03 02:43:49 +00005609 if (TwoA.isMinValue()) {
Dan Gohman48f82222009-05-04 22:30:44 +00005610 const SCEV *CNC = SE.getCouldNotCompute();
Nick Lewycky7b14e202008-11-03 02:43:49 +00005611 return std::make_pair(CNC, CNC);
5612 }
5613
Owen Anderson47db9412009-07-22 00:24:57 +00005614 LLVMContext &Context = SE.getContext();
Owen Andersonf1f17432009-07-06 22:37:39 +00005615
5616 ConstantInt *Solution1 =
Owen Andersonedb4a702009-07-24 23:12:02 +00005617 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
Owen Andersonf1f17432009-07-06 22:37:39 +00005618 ConstantInt *Solution2 =
Owen Andersonedb4a702009-07-24 23:12:02 +00005619 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
Misha Brukman01808ca2005-04-21 21:13:18 +00005620
Dan Gohmance973df2009-06-24 04:48:43 +00005621 return std::make_pair(SE.getConstant(Solution1),
Dan Gohmana37eaf22007-10-22 18:31:58 +00005622 SE.getConstant(Solution2));
Nick Lewycky31555522011-10-03 07:10:45 +00005623 } // end APIntOps namespace
Chris Lattnerd934c702004-04-02 20:23:17 +00005624}
5625
5626/// HowFarToZero - Return the number of times a backedge comparing the specified
Dan Gohman4c720c02009-06-06 14:37:11 +00005627/// value to zero will execute. If not computable, return CouldNotCompute.
Andrew Trick8b55b732011-03-14 16:50:06 +00005628///
5629/// This is only used for loops with a "x != y" exit test. The exit condition is
5630/// now expressed as a single expression, V = x-y. So the exit test is
5631/// effectively V != 0. We know and take advantage of the fact that this
5632/// expression only being used in a comparison by zero context.
Andrew Trick3ca3f982011-07-26 17:19:55 +00005633ScalarEvolution::ExitLimit
Andrew Trick5b245a12013-05-31 06:43:25 +00005634ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr) {
Chris Lattnerd934c702004-04-02 20:23:17 +00005635 // If the value is a constant
Dan Gohmana30370b2009-05-04 22:02:23 +00005636 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
Chris Lattnerd934c702004-04-02 20:23:17 +00005637 // If the value is already zero, the branch will execute zero times.
Reid Spencer2e54a152007-03-02 00:28:52 +00005638 if (C->getValue()->isZero()) return C;
Dan Gohmanc5c85c02009-06-27 21:21:31 +00005639 return getCouldNotCompute(); // Otherwise it will loop infinitely.
Chris Lattnerd934c702004-04-02 20:23:17 +00005640 }
5641
Dan Gohman48f82222009-05-04 22:30:44 +00005642 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
Chris Lattnerd934c702004-04-02 20:23:17 +00005643 if (!AddRec || AddRec->getLoop() != L)
Dan Gohmanc5c85c02009-06-27 21:21:31 +00005644 return getCouldNotCompute();
Chris Lattnerd934c702004-04-02 20:23:17 +00005645
Chris Lattnerdff679f2011-01-09 22:39:48 +00005646 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
5647 // the quadratic equation to solve it.
5648 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
5649 std::pair<const SCEV *,const SCEV *> Roots =
5650 SolveQuadraticEquation(AddRec, *this);
Dan Gohman48f82222009-05-04 22:30:44 +00005651 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
5652 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
Chris Lattnerdff679f2011-01-09 22:39:48 +00005653 if (R1 && R2) {
Chris Lattner09169212004-04-02 20:26:46 +00005654#if 0
David Greenedf1c4972009-12-23 22:18:14 +00005655 dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
Dan Gohmane20f8242009-04-21 00:47:46 +00005656 << " sol#2: " << *R2 << "\n";
Chris Lattner09169212004-04-02 20:26:46 +00005657#endif
Chris Lattnerd934c702004-04-02 20:23:17 +00005658 // Pick the smallest positive root value.
Zhou Sheng75b871f2007-01-11 12:24:14 +00005659 if (ConstantInt *CB =
Chris Lattner28f140a2011-01-09 22:58:47 +00005660 dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
5661 R1->getValue(),
5662 R2->getValue()))) {
Reid Spencercddc9df2007-01-12 04:24:46 +00005663 if (CB->getZExtValue() == false)
Chris Lattnerd934c702004-04-02 20:23:17 +00005664 std::swap(R1, R2); // R1 is the minimum root now.
Andrew Trick2a3b7162011-03-09 17:23:39 +00005665
Chris Lattnerd934c702004-04-02 20:23:17 +00005666 // We can only use this value if the chrec ends up with an exact zero
5667 // value at this index. When solving for "X*X != 5", for example, we
5668 // should not accept a root of 2.
Dan Gohmanaf752342009-07-07 17:06:11 +00005669 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
Dan Gohmanbe928e32008-06-18 16:23:07 +00005670 if (Val->isZero())
5671 return R1; // We found a quadratic root!
Chris Lattnerd934c702004-04-02 20:23:17 +00005672 }
5673 }
Chris Lattnerdff679f2011-01-09 22:39:48 +00005674 return getCouldNotCompute();
Chris Lattnerd934c702004-04-02 20:23:17 +00005675 }
Misha Brukman01808ca2005-04-21 21:13:18 +00005676
Chris Lattnerdff679f2011-01-09 22:39:48 +00005677 // Otherwise we can only handle this if it is affine.
5678 if (!AddRec->isAffine())
5679 return getCouldNotCompute();
5680
5681 // If this is an affine expression, the execution count of this branch is
5682 // the minimum unsigned root of the following equation:
5683 //
5684 // Start + Step*N = 0 (mod 2^BW)
5685 //
5686 // equivalent to:
5687 //
5688 // Step*N = -Start (mod 2^BW)
5689 //
5690 // where BW is the common bit width of Start and Step.
5691
5692 // Get the initial value for the loop.
5693 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
5694 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
5695
5696 // For now we handle only constant steps.
Andrew Trick8b55b732011-03-14 16:50:06 +00005697 //
5698 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
5699 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
5700 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
5701 // We have not yet seen any such cases.
Chris Lattnerdff679f2011-01-09 22:39:48 +00005702 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
Craig Topper9f008862014-04-15 04:59:12 +00005703 if (!StepC || StepC->getValue()->equalsInt(0))
Chris Lattnerdff679f2011-01-09 22:39:48 +00005704 return getCouldNotCompute();
5705
Andrew Trick8b55b732011-03-14 16:50:06 +00005706 // For positive steps (counting up until unsigned overflow):
5707 // N = -Start/Step (as unsigned)
5708 // For negative steps (counting down to zero):
5709 // N = Start/-Step
5710 // First compute the unsigned distance from zero in the direction of Step.
Andrew Trickf1781db2011-03-14 17:28:02 +00005711 bool CountDown = StepC->getValue()->getValue().isNegative();
5712 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
Andrew Trick8b55b732011-03-14 16:50:06 +00005713
5714 // Handle unitary steps, which cannot wraparound.
Andrew Trickf1781db2011-03-14 17:28:02 +00005715 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
5716 // N = Distance (as unsigned)
Nick Lewycky31555522011-10-03 07:10:45 +00005717 if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) {
5718 ConstantRange CR = getUnsignedRange(Start);
5719 const SCEV *MaxBECount;
5720 if (!CountDown && CR.getUnsignedMin().isMinValue())
5721 // When counting up, the worst starting value is 1, not 0.
5722 MaxBECount = CR.getUnsignedMax().isMinValue()
5723 ? getConstant(APInt::getMinValue(CR.getBitWidth()))
5724 : getConstant(APInt::getMaxValue(CR.getBitWidth()));
5725 else
5726 MaxBECount = getConstant(CountDown ? CR.getUnsignedMax()
5727 : -CR.getUnsignedMin());
Andrew Trickee5aa7f2014-01-15 06:42:11 +00005728 return ExitLimit(Distance, MaxBECount, /*MustExit=*/true);
Nick Lewycky31555522011-10-03 07:10:45 +00005729 }
Andrew Trick2a3b7162011-03-09 17:23:39 +00005730
Andrew Trickf1781db2011-03-14 17:28:02 +00005731 // If the recurrence is known not to wraparound, unsigned divide computes the
Andrew Trick5b245a12013-05-31 06:43:25 +00005732 // back edge count. (Ideally we would have an "isexact" bit for udiv). We know
5733 // that the value will either become zero (and thus the loop terminates), that
5734 // the loop will terminate through some other exit condition first, or that
5735 // the loop has undefined behavior. This means we can't "miss" the exit
Andrew Trickee5aa7f2014-01-15 06:42:11 +00005736 // value, even with nonunit stride, and exit later via the same branch. Note
5737 // that we can skip this exit if loop later exits via a different
5738 // branch. Hence MustExit=false.
Andrew Trickf1781db2011-03-14 17:28:02 +00005739 //
Andrew Trick5b245a12013-05-31 06:43:25 +00005740 // This is only valid for expressions that directly compute the loop exit. It
5741 // is invalid for subexpressions in which the loop may exit through this
5742 // branch even if this subexpression is false. In that case, the trip count
5743 // computed by this udiv could be smaller than the number of well-defined
5744 // iterations.
Andrew Trickee5aa7f2014-01-15 06:42:11 +00005745 if (!IsSubExpr && AddRec->getNoWrapFlags(SCEV::FlagNW)) {
5746 const SCEV *Exact =
5747 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
5748 return ExitLimit(Exact, Exact, /*MustExit=*/false);
5749 }
Benjamin Kramere75eaca2014-03-25 16:25:12 +00005750
5751 // If Step is a power of two that evenly divides Start we know that the loop
5752 // will always terminate. Start may not be a constant so we just have the
5753 // number of trailing zeros available. This is safe even in presence of
5754 // overflow as the recurrence will overflow to exactly 0.
5755 const APInt &StepV = StepC->getValue()->getValue();
5756 if (StepV.isPowerOf2() &&
5757 GetMinTrailingZeros(getNegativeSCEV(Start)) >= StepV.countTrailingZeros())
5758 return getUDivExactExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
5759
Chris Lattnerdff679f2011-01-09 22:39:48 +00005760 // Then, try to solve the above equation provided that Start is constant.
5761 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
5762 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
5763 -StartC->getValue()->getValue(),
5764 *this);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00005765 return getCouldNotCompute();
Chris Lattnerd934c702004-04-02 20:23:17 +00005766}
5767
5768/// HowFarToNonZero - Return the number of times a backedge checking the
5769/// specified value for nonzero will execute. If not computable, return
Dan Gohman4c720c02009-06-06 14:37:11 +00005770/// CouldNotCompute
Andrew Trick3ca3f982011-07-26 17:19:55 +00005771ScalarEvolution::ExitLimit
Dan Gohmanba820342010-02-24 17:31:30 +00005772ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
Chris Lattnerd934c702004-04-02 20:23:17 +00005773 // Loops that look like: while (X == 0) are very strange indeed. We don't
5774 // handle them yet except for the trivial case. This could be expanded in the
5775 // future as needed.
Misha Brukman01808ca2005-04-21 21:13:18 +00005776
Chris Lattnerd934c702004-04-02 20:23:17 +00005777 // If the value is a constant, check to see if it is known to be non-zero
5778 // already. If so, the backedge will execute zero times.
Dan Gohmana30370b2009-05-04 22:02:23 +00005779 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
Nick Lewycky5a3db142008-02-21 09:14:53 +00005780 if (!C->getValue()->isNullValue())
Dan Gohman1d2ded72010-05-03 22:09:21 +00005781 return getConstant(C->getType(), 0);
Dan Gohmanc5c85c02009-06-27 21:21:31 +00005782 return getCouldNotCompute(); // Otherwise it will loop infinitely.
Chris Lattnerd934c702004-04-02 20:23:17 +00005783 }
Misha Brukman01808ca2005-04-21 21:13:18 +00005784
Chris Lattnerd934c702004-04-02 20:23:17 +00005785 // We could implement others, but I really doubt anyone writes loops like
5786 // this, and if they did, they would already be constant folded.
Dan Gohmanc5c85c02009-06-27 21:21:31 +00005787 return getCouldNotCompute();
Chris Lattnerd934c702004-04-02 20:23:17 +00005788}
5789
Dan Gohmanf9081a22008-09-15 22:18:04 +00005790/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
5791/// (which may not be an immediate predecessor) which has exactly one
5792/// successor from which BB is reachable, or null if no such block is
5793/// found.
5794///
Dan Gohman4e3c1132010-04-15 16:19:08 +00005795std::pair<BasicBlock *, BasicBlock *>
Dan Gohmanc8e23622009-04-21 23:15:49 +00005796ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
Dan Gohmanfa066ef2009-04-30 20:48:53 +00005797 // If the block has a unique predecessor, then there is no path from the
5798 // predecessor to the block that does not go through the direct edge
5799 // from the predecessor to the block.
Dan Gohmanf9081a22008-09-15 22:18:04 +00005800 if (BasicBlock *Pred = BB->getSinglePredecessor())
Dan Gohman4e3c1132010-04-15 16:19:08 +00005801 return std::make_pair(Pred, BB);
Dan Gohmanf9081a22008-09-15 22:18:04 +00005802
5803 // A loop's header is defined to be a block that dominates the loop.
Dan Gohman8c77f1a2009-05-18 15:36:09 +00005804 // If the header has a unique predecessor outside the loop, it must be
5805 // a block that has exactly one successor that can reach the loop.
Dan Gohmanc8e23622009-04-21 23:15:49 +00005806 if (Loop *L = LI->getLoopFor(BB))
Dan Gohman75c6b0b2010-06-22 23:43:28 +00005807 return std::make_pair(L->getLoopPredecessor(), L->getHeader());
Dan Gohmanf9081a22008-09-15 22:18:04 +00005808
Dan Gohman4e3c1132010-04-15 16:19:08 +00005809 return std::pair<BasicBlock *, BasicBlock *>();
Dan Gohmanf9081a22008-09-15 22:18:04 +00005810}
5811
Dan Gohman450f4e02009-06-20 00:35:32 +00005812/// HasSameValue - SCEV structural equivalence is usually sufficient for
5813/// testing whether two expressions are equal, however for the purposes of
5814/// looking for a condition guarding a loop, it can be useful to be a little
5815/// more general, since a front-end may have replicated the controlling
5816/// expression.
5817///
Dan Gohmanaf752342009-07-07 17:06:11 +00005818static bool HasSameValue(const SCEV *A, const SCEV *B) {
Dan Gohman450f4e02009-06-20 00:35:32 +00005819 // Quick check to see if they are the same SCEV.
5820 if (A == B) return true;
5821
5822 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
5823 // two different instructions with the same value. Check for this case.
5824 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
5825 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
5826 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
5827 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
Dan Gohman2d085562009-08-25 17:56:57 +00005828 if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
Dan Gohman450f4e02009-06-20 00:35:32 +00005829 return true;
5830
5831 // Otherwise assume they may have a different value.
5832 return false;
5833}
5834
Dan Gohman48ff3cf2010-04-24 01:28:42 +00005835/// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
Sylvestre Ledru91ce36c2012-09-27 10:14:43 +00005836/// predicate Pred. Return true iff any changes were made.
Dan Gohman48ff3cf2010-04-24 01:28:42 +00005837///
5838bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
Benjamin Kramer50b26eb2012-05-30 18:32:23 +00005839 const SCEV *&LHS, const SCEV *&RHS,
5840 unsigned Depth) {
Dan Gohman48ff3cf2010-04-24 01:28:42 +00005841 bool Changed = false;
5842
Benjamin Kramer50b26eb2012-05-30 18:32:23 +00005843 // If we hit the max recursion limit bail out.
5844 if (Depth >= 3)
5845 return false;
5846
Dan Gohman48ff3cf2010-04-24 01:28:42 +00005847 // Canonicalize a constant to the right side.
5848 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
5849 // Check for both operands constant.
5850 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
5851 if (ConstantExpr::getICmp(Pred,
5852 LHSC->getValue(),
5853 RHSC->getValue())->isNullValue())
5854 goto trivially_false;
5855 else
5856 goto trivially_true;
5857 }
5858 // Otherwise swap the operands to put the constant on the right.
5859 std::swap(LHS, RHS);
5860 Pred = ICmpInst::getSwappedPredicate(Pred);
5861 Changed = true;
5862 }
5863
5864 // If we're comparing an addrec with a value which is loop-invariant in the
Dan Gohmandf564ca2010-05-03 17:00:11 +00005865 // addrec's loop, put the addrec on the left. Also make a dominance check,
5866 // as both operands could be addrecs loop-invariant in each other's loop.
5867 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
5868 const Loop *L = AR->getLoop();
Dan Gohman20d9ce22010-11-17 21:41:58 +00005869 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
Dan Gohman48ff3cf2010-04-24 01:28:42 +00005870 std::swap(LHS, RHS);
5871 Pred = ICmpInst::getSwappedPredicate(Pred);
5872 Changed = true;
5873 }
Dan Gohmandf564ca2010-05-03 17:00:11 +00005874 }
Dan Gohman48ff3cf2010-04-24 01:28:42 +00005875
5876 // If there's a constant operand, canonicalize comparisons with boundary
5877 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
5878 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
5879 const APInt &RA = RC->getValue()->getValue();
5880 switch (Pred) {
5881 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5882 case ICmpInst::ICMP_EQ:
5883 case ICmpInst::ICMP_NE:
Benjamin Kramer50b26eb2012-05-30 18:32:23 +00005884 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
5885 if (!RA)
5886 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
5887 if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
Benjamin Kramer406a2db2012-05-30 18:42:43 +00005888 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
5889 ME->getOperand(0)->isAllOnesValue()) {
Benjamin Kramer50b26eb2012-05-30 18:32:23 +00005890 RHS = AE->getOperand(1);
5891 LHS = ME->getOperand(1);
5892 Changed = true;
5893 }
Dan Gohman48ff3cf2010-04-24 01:28:42 +00005894 break;
5895 case ICmpInst::ICMP_UGE:
5896 if ((RA - 1).isMinValue()) {
5897 Pred = ICmpInst::ICMP_NE;
5898 RHS = getConstant(RA - 1);
5899 Changed = true;
5900 break;
5901 }
5902 if (RA.isMaxValue()) {
5903 Pred = ICmpInst::ICMP_EQ;
5904 Changed = true;
5905 break;
5906 }
5907 if (RA.isMinValue()) goto trivially_true;
5908
5909 Pred = ICmpInst::ICMP_UGT;
5910 RHS = getConstant(RA - 1);
5911 Changed = true;
5912 break;
5913 case ICmpInst::ICMP_ULE:
5914 if ((RA + 1).isMaxValue()) {
5915 Pred = ICmpInst::ICMP_NE;
5916 RHS = getConstant(RA + 1);
5917 Changed = true;
5918 break;
5919 }
5920 if (RA.isMinValue()) {
5921 Pred = ICmpInst::ICMP_EQ;
5922 Changed = true;
5923 break;
5924 }
5925 if (RA.isMaxValue()) goto trivially_true;
5926
5927 Pred = ICmpInst::ICMP_ULT;
5928 RHS = getConstant(RA + 1);
5929 Changed = true;
5930 break;
5931 case ICmpInst::ICMP_SGE:
5932 if ((RA - 1).isMinSignedValue()) {
5933 Pred = ICmpInst::ICMP_NE;
5934 RHS = getConstant(RA - 1);
5935 Changed = true;
5936 break;
5937 }
5938 if (RA.isMaxSignedValue()) {
5939 Pred = ICmpInst::ICMP_EQ;
5940 Changed = true;
5941 break;
5942 }
5943 if (RA.isMinSignedValue()) goto trivially_true;
5944
5945 Pred = ICmpInst::ICMP_SGT;
5946 RHS = getConstant(RA - 1);
5947 Changed = true;
5948 break;
5949 case ICmpInst::ICMP_SLE:
5950 if ((RA + 1).isMaxSignedValue()) {
5951 Pred = ICmpInst::ICMP_NE;
5952 RHS = getConstant(RA + 1);
5953 Changed = true;
5954 break;
5955 }
5956 if (RA.isMinSignedValue()) {
5957 Pred = ICmpInst::ICMP_EQ;
5958 Changed = true;
5959 break;
5960 }
5961 if (RA.isMaxSignedValue()) goto trivially_true;
5962
5963 Pred = ICmpInst::ICMP_SLT;
5964 RHS = getConstant(RA + 1);
5965 Changed = true;
5966 break;
5967 case ICmpInst::ICMP_UGT:
5968 if (RA.isMinValue()) {
5969 Pred = ICmpInst::ICMP_NE;
5970 Changed = true;
5971 break;
5972 }
5973 if ((RA + 1).isMaxValue()) {
5974 Pred = ICmpInst::ICMP_EQ;
5975 RHS = getConstant(RA + 1);
5976 Changed = true;
5977 break;
5978 }
5979 if (RA.isMaxValue()) goto trivially_false;
5980 break;
5981 case ICmpInst::ICMP_ULT:
5982 if (RA.isMaxValue()) {
5983 Pred = ICmpInst::ICMP_NE;
5984 Changed = true;
5985 break;
5986 }
5987 if ((RA - 1).isMinValue()) {
5988 Pred = ICmpInst::ICMP_EQ;
5989 RHS = getConstant(RA - 1);
5990 Changed = true;
5991 break;
5992 }
5993 if (RA.isMinValue()) goto trivially_false;
5994 break;
5995 case ICmpInst::ICMP_SGT:
5996 if (RA.isMinSignedValue()) {
5997 Pred = ICmpInst::ICMP_NE;
5998 Changed = true;
5999 break;
6000 }
6001 if ((RA + 1).isMaxSignedValue()) {
6002 Pred = ICmpInst::ICMP_EQ;
6003 RHS = getConstant(RA + 1);
6004 Changed = true;
6005 break;
6006 }
6007 if (RA.isMaxSignedValue()) goto trivially_false;
6008 break;
6009 case ICmpInst::ICMP_SLT:
6010 if (RA.isMaxSignedValue()) {
6011 Pred = ICmpInst::ICMP_NE;
6012 Changed = true;
6013 break;
6014 }
6015 if ((RA - 1).isMinSignedValue()) {
6016 Pred = ICmpInst::ICMP_EQ;
6017 RHS = getConstant(RA - 1);
6018 Changed = true;
6019 break;
6020 }
6021 if (RA.isMinSignedValue()) goto trivially_false;
6022 break;
6023 }
6024 }
6025
6026 // Check for obvious equality.
6027 if (HasSameValue(LHS, RHS)) {
6028 if (ICmpInst::isTrueWhenEqual(Pred))
6029 goto trivially_true;
6030 if (ICmpInst::isFalseWhenEqual(Pred))
6031 goto trivially_false;
6032 }
6033
Dan Gohman81585c12010-05-03 16:35:17 +00006034 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
6035 // adding or subtracting 1 from one of the operands.
6036 switch (Pred) {
6037 case ICmpInst::ICMP_SLE:
6038 if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
6039 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
Andrew Trick8b55b732011-03-14 16:50:06 +00006040 SCEV::FlagNSW);
Dan Gohman81585c12010-05-03 16:35:17 +00006041 Pred = ICmpInst::ICMP_SLT;
6042 Changed = true;
6043 } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
Dan Gohman267700c2010-05-03 20:23:47 +00006044 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
Andrew Trick8b55b732011-03-14 16:50:06 +00006045 SCEV::FlagNSW);
Dan Gohman81585c12010-05-03 16:35:17 +00006046 Pred = ICmpInst::ICMP_SLT;
6047 Changed = true;
6048 }
6049 break;
6050 case ICmpInst::ICMP_SGE:
6051 if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
Dan Gohman267700c2010-05-03 20:23:47 +00006052 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
Andrew Trick8b55b732011-03-14 16:50:06 +00006053 SCEV::FlagNSW);
Dan Gohman81585c12010-05-03 16:35:17 +00006054 Pred = ICmpInst::ICMP_SGT;
6055 Changed = true;
6056 } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
6057 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
Andrew Trick8b55b732011-03-14 16:50:06 +00006058 SCEV::FlagNSW);
Dan Gohman81585c12010-05-03 16:35:17 +00006059 Pred = ICmpInst::ICMP_SGT;
6060 Changed = true;
6061 }
6062 break;
6063 case ICmpInst::ICMP_ULE:
6064 if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
Dan Gohman267700c2010-05-03 20:23:47 +00006065 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
Andrew Trick8b55b732011-03-14 16:50:06 +00006066 SCEV::FlagNUW);
Dan Gohman81585c12010-05-03 16:35:17 +00006067 Pred = ICmpInst::ICMP_ULT;
6068 Changed = true;
6069 } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
Dan Gohman267700c2010-05-03 20:23:47 +00006070 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
Andrew Trick8b55b732011-03-14 16:50:06 +00006071 SCEV::FlagNUW);
Dan Gohman81585c12010-05-03 16:35:17 +00006072 Pred = ICmpInst::ICMP_ULT;
6073 Changed = true;
6074 }
6075 break;
6076 case ICmpInst::ICMP_UGE:
6077 if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
Dan Gohman267700c2010-05-03 20:23:47 +00006078 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
Andrew Trick8b55b732011-03-14 16:50:06 +00006079 SCEV::FlagNUW);
Dan Gohman81585c12010-05-03 16:35:17 +00006080 Pred = ICmpInst::ICMP_UGT;
6081 Changed = true;
6082 } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
Dan Gohman267700c2010-05-03 20:23:47 +00006083 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
Andrew Trick8b55b732011-03-14 16:50:06 +00006084 SCEV::FlagNUW);
Dan Gohman81585c12010-05-03 16:35:17 +00006085 Pred = ICmpInst::ICMP_UGT;
6086 Changed = true;
6087 }
6088 break;
6089 default:
6090 break;
6091 }
6092
Dan Gohman48ff3cf2010-04-24 01:28:42 +00006093 // TODO: More simplifications are possible here.
6094
Benjamin Kramer50b26eb2012-05-30 18:32:23 +00006095 // Recursively simplify until we either hit a recursion limit or nothing
6096 // changes.
6097 if (Changed)
6098 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
6099
Dan Gohman48ff3cf2010-04-24 01:28:42 +00006100 return Changed;
6101
6102trivially_true:
6103 // Return 0 == 0.
Benjamin Kramerddd1b7b2010-11-20 18:43:35 +00006104 LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
Dan Gohman48ff3cf2010-04-24 01:28:42 +00006105 Pred = ICmpInst::ICMP_EQ;
6106 return true;
6107
6108trivially_false:
6109 // Return 0 != 0.
Benjamin Kramerddd1b7b2010-11-20 18:43:35 +00006110 LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
Dan Gohman48ff3cf2010-04-24 01:28:42 +00006111 Pred = ICmpInst::ICMP_NE;
6112 return true;
6113}
6114
Dan Gohmane65c9172009-07-13 21:35:55 +00006115bool ScalarEvolution::isKnownNegative(const SCEV *S) {
6116 return getSignedRange(S).getSignedMax().isNegative();
6117}
6118
6119bool ScalarEvolution::isKnownPositive(const SCEV *S) {
6120 return getSignedRange(S).getSignedMin().isStrictlyPositive();
6121}
6122
6123bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
6124 return !getSignedRange(S).getSignedMin().isNegative();
6125}
6126
6127bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
6128 return !getSignedRange(S).getSignedMax().isStrictlyPositive();
6129}
6130
6131bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
6132 return isKnownNegative(S) || isKnownPositive(S);
6133}
6134
6135bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
6136 const SCEV *LHS, const SCEV *RHS) {
Dan Gohman36cce7e2010-04-24 01:38:36 +00006137 // Canonicalize the inputs first.
6138 (void)SimplifyICmpOperands(Pred, LHS, RHS);
6139
Dan Gohman07591692010-04-11 22:16:48 +00006140 // If LHS or RHS is an addrec, check to see if the condition is true in
6141 // every iteration of the loop.
6142 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
6143 if (isLoopEntryGuardedByCond(
6144 AR->getLoop(), Pred, AR->getStart(), RHS) &&
6145 isLoopBackedgeGuardedByCond(
Dan Gohman70a3b122010-05-04 01:12:27 +00006146 AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
Dan Gohman07591692010-04-11 22:16:48 +00006147 return true;
6148 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
6149 if (isLoopEntryGuardedByCond(
6150 AR->getLoop(), Pred, LHS, AR->getStart()) &&
6151 isLoopBackedgeGuardedByCond(
Dan Gohman70a3b122010-05-04 01:12:27 +00006152 AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
Dan Gohman07591692010-04-11 22:16:48 +00006153 return true;
Dan Gohmane65c9172009-07-13 21:35:55 +00006154
Dan Gohman07591692010-04-11 22:16:48 +00006155 // Otherwise see what can be done with known constant ranges.
6156 return isKnownPredicateWithRanges(Pred, LHS, RHS);
6157}
6158
6159bool
6160ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
6161 const SCEV *LHS, const SCEV *RHS) {
Dan Gohmane65c9172009-07-13 21:35:55 +00006162 if (HasSameValue(LHS, RHS))
6163 return ICmpInst::isTrueWhenEqual(Pred);
6164
Dan Gohman07591692010-04-11 22:16:48 +00006165 // This code is split out from isKnownPredicate because it is called from
6166 // within isLoopEntryGuardedByCond.
Dan Gohmane65c9172009-07-13 21:35:55 +00006167 switch (Pred) {
6168 default:
Dan Gohman8c129d72009-07-16 17:34:36 +00006169 llvm_unreachable("Unexpected ICmpInst::Predicate value!");
Dan Gohmane65c9172009-07-13 21:35:55 +00006170 case ICmpInst::ICMP_SGT:
Dan Gohmane65c9172009-07-13 21:35:55 +00006171 std::swap(LHS, RHS);
6172 case ICmpInst::ICMP_SLT: {
6173 ConstantRange LHSRange = getSignedRange(LHS);
6174 ConstantRange RHSRange = getSignedRange(RHS);
6175 if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
6176 return true;
6177 if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
6178 return false;
Dan Gohmane65c9172009-07-13 21:35:55 +00006179 break;
6180 }
6181 case ICmpInst::ICMP_SGE:
Dan Gohmane65c9172009-07-13 21:35:55 +00006182 std::swap(LHS, RHS);
6183 case ICmpInst::ICMP_SLE: {
6184 ConstantRange LHSRange = getSignedRange(LHS);
6185 ConstantRange RHSRange = getSignedRange(RHS);
6186 if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
6187 return true;
6188 if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
6189 return false;
Dan Gohmane65c9172009-07-13 21:35:55 +00006190 break;
6191 }
6192 case ICmpInst::ICMP_UGT:
Dan Gohmane65c9172009-07-13 21:35:55 +00006193 std::swap(LHS, RHS);
6194 case ICmpInst::ICMP_ULT: {
6195 ConstantRange LHSRange = getUnsignedRange(LHS);
6196 ConstantRange RHSRange = getUnsignedRange(RHS);
6197 if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
6198 return true;
6199 if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
6200 return false;
Dan Gohmane65c9172009-07-13 21:35:55 +00006201 break;
6202 }
6203 case ICmpInst::ICMP_UGE:
Dan Gohmane65c9172009-07-13 21:35:55 +00006204 std::swap(LHS, RHS);
6205 case ICmpInst::ICMP_ULE: {
6206 ConstantRange LHSRange = getUnsignedRange(LHS);
6207 ConstantRange RHSRange = getUnsignedRange(RHS);
6208 if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
6209 return true;
6210 if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
6211 return false;
Dan Gohmane65c9172009-07-13 21:35:55 +00006212 break;
6213 }
6214 case ICmpInst::ICMP_NE: {
6215 if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
6216 return true;
6217 if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
6218 return true;
6219
6220 const SCEV *Diff = getMinusSCEV(LHS, RHS);
6221 if (isKnownNonZero(Diff))
6222 return true;
6223 break;
6224 }
6225 case ICmpInst::ICMP_EQ:
Dan Gohman34392622009-07-20 23:54:43 +00006226 // The check at the top of the function catches the case where
6227 // the values are known to be equal.
Dan Gohmane65c9172009-07-13 21:35:55 +00006228 break;
6229 }
6230 return false;
6231}
6232
6233/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
6234/// protected by a conditional between LHS and RHS. This is used to
6235/// to eliminate casts.
6236bool
6237ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
6238 ICmpInst::Predicate Pred,
6239 const SCEV *LHS, const SCEV *RHS) {
6240 // Interpret a null as meaning no loop, where there is obviously no guard
6241 // (interprocedural conditions notwithstanding).
6242 if (!L) return true;
6243
6244 BasicBlock *Latch = L->getLoopLatch();
6245 if (!Latch)
6246 return false;
6247
6248 BranchInst *LoopContinuePredicate =
6249 dyn_cast<BranchInst>(Latch->getTerminator());
6250 if (!LoopContinuePredicate ||
6251 LoopContinuePredicate->isUnconditional())
6252 return false;
6253
Dan Gohmane18c2d62010-08-10 23:46:30 +00006254 return isImpliedCond(Pred, LHS, RHS,
6255 LoopContinuePredicate->getCondition(),
Dan Gohman430f0cc2009-07-21 23:03:19 +00006256 LoopContinuePredicate->getSuccessor(0) != L->getHeader());
Dan Gohmane65c9172009-07-13 21:35:55 +00006257}
6258
Dan Gohmanb50349a2010-04-11 19:27:13 +00006259/// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
Dan Gohmane65c9172009-07-13 21:35:55 +00006260/// by a conditional between LHS and RHS. This is used to help avoid max
6261/// expressions in loop trip counts, and to eliminate casts.
6262bool
Dan Gohmanb50349a2010-04-11 19:27:13 +00006263ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
6264 ICmpInst::Predicate Pred,
6265 const SCEV *LHS, const SCEV *RHS) {
Dan Gohman9cf09f82009-05-18 16:03:58 +00006266 // Interpret a null as meaning no loop, where there is obviously no guard
6267 // (interprocedural conditions notwithstanding).
6268 if (!L) return false;
6269
Dan Gohman8c77f1a2009-05-18 15:36:09 +00006270 // Starting at the loop predecessor, climb up the predecessor chain, as long
6271 // as there are predecessors that can be found that have unique successors
Dan Gohmanf9081a22008-09-15 22:18:04 +00006272 // leading to the original header.
Dan Gohman4e3c1132010-04-15 16:19:08 +00006273 for (std::pair<BasicBlock *, BasicBlock *>
Dan Gohman75c6b0b2010-06-22 23:43:28 +00006274 Pair(L->getLoopPredecessor(), L->getHeader());
Dan Gohman4e3c1132010-04-15 16:19:08 +00006275 Pair.first;
6276 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
Dan Gohman2a62fd92008-08-12 20:17:31 +00006277
6278 BranchInst *LoopEntryPredicate =
Dan Gohman4e3c1132010-04-15 16:19:08 +00006279 dyn_cast<BranchInst>(Pair.first->getTerminator());
Dan Gohman2a62fd92008-08-12 20:17:31 +00006280 if (!LoopEntryPredicate ||
6281 LoopEntryPredicate->isUnconditional())
6282 continue;
6283
Dan Gohmane18c2d62010-08-10 23:46:30 +00006284 if (isImpliedCond(Pred, LHS, RHS,
6285 LoopEntryPredicate->getCondition(),
Dan Gohman4e3c1132010-04-15 16:19:08 +00006286 LoopEntryPredicate->getSuccessor(0) != Pair.second))
Dan Gohman2a62fd92008-08-12 20:17:31 +00006287 return true;
Nick Lewyckyb5688cc2008-07-12 07:41:32 +00006288 }
6289
Dan Gohman2a62fd92008-08-12 20:17:31 +00006290 return false;
Nick Lewyckyb5688cc2008-07-12 07:41:32 +00006291}
6292
Andrew Trick7fa4e0f2012-05-19 00:48:25 +00006293/// RAII wrapper to prevent recursive application of isImpliedCond.
6294/// ScalarEvolution's PendingLoopPredicates set must be empty unless we are
6295/// currently evaluating isImpliedCond.
6296struct MarkPendingLoopPredicate {
6297 Value *Cond;
6298 DenseSet<Value*> &LoopPreds;
6299 bool Pending;
6300
6301 MarkPendingLoopPredicate(Value *C, DenseSet<Value*> &LP)
6302 : Cond(C), LoopPreds(LP) {
6303 Pending = !LoopPreds.insert(Cond).second;
6304 }
6305 ~MarkPendingLoopPredicate() {
6306 if (!Pending)
6307 LoopPreds.erase(Cond);
6308 }
6309};
6310
Dan Gohman430f0cc2009-07-21 23:03:19 +00006311/// isImpliedCond - Test whether the condition described by Pred, LHS,
6312/// and RHS is true whenever the given Cond value evaluates to true.
Dan Gohmane18c2d62010-08-10 23:46:30 +00006313bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
Dan Gohman430f0cc2009-07-21 23:03:19 +00006314 const SCEV *LHS, const SCEV *RHS,
Dan Gohmane18c2d62010-08-10 23:46:30 +00006315 Value *FoundCondValue,
Dan Gohman430f0cc2009-07-21 23:03:19 +00006316 bool Inverse) {
Andrew Trick7fa4e0f2012-05-19 00:48:25 +00006317 MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates);
6318 if (Mark.Pending)
6319 return false;
6320
Dan Gohman8b0a4192010-03-01 17:49:51 +00006321 // Recursively handle And and Or conditions.
Dan Gohmane18c2d62010-08-10 23:46:30 +00006322 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
Dan Gohmanf19aeec2009-06-24 01:18:18 +00006323 if (BO->getOpcode() == Instruction::And) {
6324 if (!Inverse)
Dan Gohmane18c2d62010-08-10 23:46:30 +00006325 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
6326 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
Dan Gohmanf19aeec2009-06-24 01:18:18 +00006327 } else if (BO->getOpcode() == Instruction::Or) {
6328 if (Inverse)
Dan Gohmane18c2d62010-08-10 23:46:30 +00006329 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
6330 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
Dan Gohmanf19aeec2009-06-24 01:18:18 +00006331 }
6332 }
6333
Dan Gohmane18c2d62010-08-10 23:46:30 +00006334 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
Dan Gohmanf19aeec2009-06-24 01:18:18 +00006335 if (!ICI) return false;
6336
Dan Gohmane65c9172009-07-13 21:35:55 +00006337 // Bail if the ICmp's operands' types are wider than the needed type
6338 // before attempting to call getSCEV on them. This avoids infinite
6339 // recursion, since the analysis of widening casts can require loop
6340 // exit condition information for overflow checking, which would
6341 // lead back here.
6342 if (getTypeSizeInBits(LHS->getType()) <
Dan Gohman430f0cc2009-07-21 23:03:19 +00006343 getTypeSizeInBits(ICI->getOperand(0)->getType()))
Dan Gohmane65c9172009-07-13 21:35:55 +00006344 return false;
6345
Andrew Trickfa594032012-11-29 18:35:13 +00006346 // Now that we found a conditional branch that dominates the loop or controls
6347 // the loop latch. Check to see if it is the comparison we are looking for.
Dan Gohman430f0cc2009-07-21 23:03:19 +00006348 ICmpInst::Predicate FoundPred;
6349 if (Inverse)
6350 FoundPred = ICI->getInversePredicate();
6351 else
6352 FoundPred = ICI->getPredicate();
6353
6354 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
6355 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
Dan Gohmane65c9172009-07-13 21:35:55 +00006356
6357 // Balance the types. The case where FoundLHS' type is wider than
6358 // LHS' type is checked for above.
6359 if (getTypeSizeInBits(LHS->getType()) >
6360 getTypeSizeInBits(FoundLHS->getType())) {
Stepan Dyatkovskiy431993b2014-01-09 12:26:12 +00006361 if (CmpInst::isSigned(FoundPred)) {
Dan Gohmane65c9172009-07-13 21:35:55 +00006362 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
6363 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
6364 } else {
6365 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
6366 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
6367 }
6368 }
6369
Dan Gohman430f0cc2009-07-21 23:03:19 +00006370 // Canonicalize the query to match the way instcombine will have
6371 // canonicalized the comparison.
Dan Gohman3673aa12010-04-24 01:34:53 +00006372 if (SimplifyICmpOperands(Pred, LHS, RHS))
6373 if (LHS == RHS)
Dan Gohmanb5025c72010-05-03 18:00:24 +00006374 return CmpInst::isTrueWhenEqual(Pred);
Benjamin Kramerba11a982012-11-29 19:07:57 +00006375 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
6376 if (FoundLHS == FoundRHS)
6377 return CmpInst::isFalseWhenEqual(FoundPred);
Dan Gohman430f0cc2009-07-21 23:03:19 +00006378
6379 // Check to see if we can make the LHS or RHS match.
6380 if (LHS == FoundRHS || RHS == FoundLHS) {
6381 if (isa<SCEVConstant>(RHS)) {
6382 std::swap(FoundLHS, FoundRHS);
6383 FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
6384 } else {
6385 std::swap(LHS, RHS);
6386 Pred = ICmpInst::getSwappedPredicate(Pred);
6387 }
6388 }
6389
6390 // Check whether the found predicate is the same as the desired predicate.
6391 if (FoundPred == Pred)
6392 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
6393
6394 // Check whether swapping the found predicate makes it the same as the
6395 // desired predicate.
6396 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
6397 if (isa<SCEVConstant>(RHS))
6398 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
6399 else
6400 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
6401 RHS, LHS, FoundLHS, FoundRHS);
6402 }
6403
6404 // Check whether the actual condition is beyond sufficient.
6405 if (FoundPred == ICmpInst::ICMP_EQ)
6406 if (ICmpInst::isTrueWhenEqual(Pred))
6407 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
6408 return true;
6409 if (Pred == ICmpInst::ICMP_NE)
6410 if (!ICmpInst::isTrueWhenEqual(FoundPred))
6411 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
6412 return true;
6413
6414 // Otherwise assume the worst.
6415 return false;
Dan Gohmane65c9172009-07-13 21:35:55 +00006416}
6417
Dan Gohman430f0cc2009-07-21 23:03:19 +00006418/// isImpliedCondOperands - Test whether the condition described by Pred,
Dan Gohman8b0a4192010-03-01 17:49:51 +00006419/// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
Dan Gohman430f0cc2009-07-21 23:03:19 +00006420/// and FoundRHS is true.
6421bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
6422 const SCEV *LHS, const SCEV *RHS,
6423 const SCEV *FoundLHS,
6424 const SCEV *FoundRHS) {
6425 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
6426 FoundLHS, FoundRHS) ||
6427 // ~x < ~y --> x > y
6428 isImpliedCondOperandsHelper(Pred, LHS, RHS,
6429 getNotSCEV(FoundRHS),
6430 getNotSCEV(FoundLHS));
6431}
6432
6433/// isImpliedCondOperandsHelper - Test whether the condition described by
Dan Gohman8b0a4192010-03-01 17:49:51 +00006434/// Pred, LHS, and RHS is true whenever the condition described by Pred,
Dan Gohman430f0cc2009-07-21 23:03:19 +00006435/// FoundLHS, and FoundRHS is true.
Dan Gohmane65c9172009-07-13 21:35:55 +00006436bool
Dan Gohman430f0cc2009-07-21 23:03:19 +00006437ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
6438 const SCEV *LHS, const SCEV *RHS,
6439 const SCEV *FoundLHS,
6440 const SCEV *FoundRHS) {
Dan Gohmane65c9172009-07-13 21:35:55 +00006441 switch (Pred) {
Dan Gohman8c129d72009-07-16 17:34:36 +00006442 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
6443 case ICmpInst::ICMP_EQ:
6444 case ICmpInst::ICMP_NE:
6445 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
6446 return true;
6447 break;
Dan Gohmane65c9172009-07-13 21:35:55 +00006448 case ICmpInst::ICMP_SLT:
Dan Gohman8c129d72009-07-16 17:34:36 +00006449 case ICmpInst::ICMP_SLE:
Dan Gohman07591692010-04-11 22:16:48 +00006450 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
6451 isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
Dan Gohmane65c9172009-07-13 21:35:55 +00006452 return true;
6453 break;
6454 case ICmpInst::ICMP_SGT:
Dan Gohman8c129d72009-07-16 17:34:36 +00006455 case ICmpInst::ICMP_SGE:
Dan Gohman07591692010-04-11 22:16:48 +00006456 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
6457 isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
Dan Gohmane65c9172009-07-13 21:35:55 +00006458 return true;
6459 break;
6460 case ICmpInst::ICMP_ULT:
Dan Gohman8c129d72009-07-16 17:34:36 +00006461 case ICmpInst::ICMP_ULE:
Dan Gohman07591692010-04-11 22:16:48 +00006462 if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
6463 isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
Dan Gohmane65c9172009-07-13 21:35:55 +00006464 return true;
6465 break;
6466 case ICmpInst::ICMP_UGT:
Dan Gohman8c129d72009-07-16 17:34:36 +00006467 case ICmpInst::ICMP_UGE:
Dan Gohman07591692010-04-11 22:16:48 +00006468 if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
6469 isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
Dan Gohmane65c9172009-07-13 21:35:55 +00006470 return true;
6471 break;
6472 }
6473
6474 return false;
Dan Gohmanf19aeec2009-06-24 01:18:18 +00006475}
6476
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006477// Verify if an linear IV with positive stride can overflow when in a
6478// less-than comparison, knowing the invariant term of the comparison, the
6479// stride and the knowledge of NSW/NUW flags on the recurrence.
6480bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
6481 bool IsSigned, bool NoWrap) {
6482 if (NoWrap) return false;
Dan Gohman51aaf022010-01-26 04:40:18 +00006483
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006484 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
6485 const SCEV *One = getConstant(Stride->getType(), 1);
Andrew Trick2afa3252011-03-09 17:29:58 +00006486
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006487 if (IsSigned) {
6488 APInt MaxRHS = getSignedRange(RHS).getSignedMax();
6489 APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
6490 APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
6491 .getSignedMax();
Andrew Trick2afa3252011-03-09 17:29:58 +00006492
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006493 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
6494 return (MaxValue - MaxStrideMinusOne).slt(MaxRHS);
Dan Gohman36bad002009-09-17 18:05:20 +00006495 }
Dan Gohman01048422009-06-21 23:46:38 +00006496
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006497 APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax();
6498 APInt MaxValue = APInt::getMaxValue(BitWidth);
6499 APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
6500 .getUnsignedMax();
6501
6502 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
6503 return (MaxValue - MaxStrideMinusOne).ult(MaxRHS);
6504}
6505
6506// Verify if an linear IV with negative stride can overflow when in a
6507// greater-than comparison, knowing the invariant term of the comparison,
6508// the stride and the knowledge of NSW/NUW flags on the recurrence.
6509bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
6510 bool IsSigned, bool NoWrap) {
6511 if (NoWrap) return false;
6512
6513 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
6514 const SCEV *One = getConstant(Stride->getType(), 1);
6515
6516 if (IsSigned) {
6517 APInt MinRHS = getSignedRange(RHS).getSignedMin();
6518 APInt MinValue = APInt::getSignedMinValue(BitWidth);
6519 APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
6520 .getSignedMax();
6521
6522 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
6523 return (MinValue + MaxStrideMinusOne).sgt(MinRHS);
6524 }
6525
6526 APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin();
6527 APInt MinValue = APInt::getMinValue(BitWidth);
6528 APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
6529 .getUnsignedMax();
6530
6531 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
6532 return (MinValue + MaxStrideMinusOne).ugt(MinRHS);
6533}
6534
6535// Compute the backedge taken count knowing the interval difference, the
6536// stride and presence of the equality in the comparison.
6537const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
6538 bool Equality) {
6539 const SCEV *One = getConstant(Step->getType(), 1);
6540 Delta = Equality ? getAddExpr(Delta, Step)
6541 : getAddExpr(Delta, getMinusSCEV(Step, One));
6542 return getUDivExpr(Delta, Step);
Dan Gohman01048422009-06-21 23:46:38 +00006543}
6544
Chris Lattner587a75b2005-08-15 23:33:51 +00006545/// HowManyLessThans - Return the number of times a backedge containing the
6546/// specified less-than comparison will execute. If not computable, return
Dan Gohman4c720c02009-06-06 14:37:11 +00006547/// CouldNotCompute.
Andrew Trick5b245a12013-05-31 06:43:25 +00006548///
6549/// @param IsSubExpr is true when the LHS < RHS condition does not directly
6550/// control the branch. In this case, we can only compute an iteration count for
6551/// a subexpression that cannot overflow before evaluating true.
Andrew Trick3ca3f982011-07-26 17:19:55 +00006552ScalarEvolution::ExitLimit
Dan Gohmance973df2009-06-24 04:48:43 +00006553ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006554 const Loop *L, bool IsSigned,
Andrew Trick5b245a12013-05-31 06:43:25 +00006555 bool IsSubExpr) {
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006556 // We handle only IV < Invariant
6557 if (!isLoopInvariant(RHS, L))
Dan Gohmanc5c85c02009-06-27 21:21:31 +00006558 return getCouldNotCompute();
Chris Lattner587a75b2005-08-15 23:33:51 +00006559
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006560 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
Dan Gohman2b8da352009-04-30 20:47:05 +00006561
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006562 // Avoid weird loops
6563 if (!IV || IV->getLoop() != L || !IV->isAffine())
6564 return getCouldNotCompute();
Chris Lattner587a75b2005-08-15 23:33:51 +00006565
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006566 bool NoWrap = !IsSubExpr &&
6567 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
Wojciech Matyjewicz35545fd2008-02-13 11:51:34 +00006568
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006569 const SCEV *Stride = IV->getStepRecurrence(*this);
Wojciech Matyjewicz35545fd2008-02-13 11:51:34 +00006570
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006571 // Avoid negative or zero stride values
6572 if (!isKnownPositive(Stride))
6573 return getCouldNotCompute();
Dan Gohman2b8da352009-04-30 20:47:05 +00006574
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006575 // Avoid proven overflow cases: this will ensure that the backedge taken count
6576 // will not generate any unsigned overflow. Relaxed no-overflow conditions
6577 // exploit NoWrapFlags, allowing to optimize in presence of undefined
6578 // behaviors like the case of C language.
6579 if (!Stride->isOne() && doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
6580 return getCouldNotCompute();
Dan Gohman2b8da352009-04-30 20:47:05 +00006581
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006582 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT
6583 : ICmpInst::ICMP_ULT;
6584 const SCEV *Start = IV->getStart();
6585 const SCEV *End = RHS;
6586 if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS))
6587 End = IsSigned ? getSMaxExpr(RHS, Start)
6588 : getUMaxExpr(RHS, Start);
Dan Gohman51aaf022010-01-26 04:40:18 +00006589
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006590 const SCEV *BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
Dan Gohman2b8da352009-04-30 20:47:05 +00006591
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006592 APInt MinStart = IsSigned ? getSignedRange(Start).getSignedMin()
6593 : getUnsignedRange(Start).getUnsignedMin();
Andrew Trick2afa3252011-03-09 17:29:58 +00006594
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006595 APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
6596 : getUnsignedRange(Stride).getUnsignedMin();
Dan Gohman2b8da352009-04-30 20:47:05 +00006597
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006598 unsigned BitWidth = getTypeSizeInBits(LHS->getType());
6599 APInt Limit = IsSigned ? APInt::getSignedMaxValue(BitWidth) - (MinStride - 1)
6600 : APInt::getMaxValue(BitWidth) - (MinStride - 1);
Chris Lattner587a75b2005-08-15 23:33:51 +00006601
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006602 // Although End can be a MAX expression we estimate MaxEnd considering only
6603 // the case End = RHS. This is safe because in the other case (End - Start)
6604 // is zero, leading to a zero maximum backedge taken count.
6605 APInt MaxEnd =
6606 IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit)
6607 : APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit);
6608
Arnaud A. de Grandmaison75c9e6d2014-03-15 22:13:15 +00006609 const SCEV *MaxBECount;
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006610 if (isa<SCEVConstant>(BECount))
6611 MaxBECount = BECount;
6612 else
6613 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart),
6614 getConstant(MinStride), false);
6615
6616 if (isa<SCEVCouldNotCompute>(MaxBECount))
6617 MaxBECount = BECount;
6618
Andrew Trickee5aa7f2014-01-15 06:42:11 +00006619 return ExitLimit(BECount, MaxBECount, /*MustExit=*/true);
Andrew Trick34e2f0c2013-11-06 02:08:26 +00006620}
6621
6622ScalarEvolution::ExitLimit
6623ScalarEvolution::HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
6624 const Loop *L, bool IsSigned,
6625 bool IsSubExpr) {
6626 // We handle only IV > Invariant
6627 if (!isLoopInvariant(RHS, L))
6628 return getCouldNotCompute();
6629
6630 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
6631
6632 // Avoid weird loops
6633 if (!IV || IV->getLoop() != L || !IV->isAffine())
6634 return getCouldNotCompute();
6635
6636 bool NoWrap = !IsSubExpr &&
6637 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
6638
6639 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
6640
6641 // Avoid negative or zero stride values
6642 if (!isKnownPositive(Stride))
6643 return getCouldNotCompute();
6644
6645 // Avoid proven overflow cases: this will ensure that the backedge taken count
6646 // will not generate any unsigned overflow. Relaxed no-overflow conditions
6647 // exploit NoWrapFlags, allowing to optimize in presence of undefined
6648 // behaviors like the case of C language.
6649 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
6650 return getCouldNotCompute();
6651
6652 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT
6653 : ICmpInst::ICMP_UGT;
6654
6655 const SCEV *Start = IV->getStart();
6656 const SCEV *End = RHS;
6657 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS))
6658 End = IsSigned ? getSMinExpr(RHS, Start)
6659 : getUMinExpr(RHS, Start);
6660
6661 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false);
6662
6663 APInt MaxStart = IsSigned ? getSignedRange(Start).getSignedMax()
6664 : getUnsignedRange(Start).getUnsignedMax();
6665
6666 APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
6667 : getUnsignedRange(Stride).getUnsignedMin();
6668
6669 unsigned BitWidth = getTypeSizeInBits(LHS->getType());
6670 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
6671 : APInt::getMinValue(BitWidth) + (MinStride - 1);
6672
6673 // Although End can be a MIN expression we estimate MinEnd considering only
6674 // the case End = RHS. This is safe because in the other case (Start - End)
6675 // is zero, leading to a zero maximum backedge taken count.
6676 APInt MinEnd =
6677 IsSigned ? APIntOps::smax(getSignedRange(RHS).getSignedMin(), Limit)
6678 : APIntOps::umax(getUnsignedRange(RHS).getUnsignedMin(), Limit);
6679
6680
6681 const SCEV *MaxBECount = getCouldNotCompute();
6682 if (isa<SCEVConstant>(BECount))
6683 MaxBECount = BECount;
6684 else
6685 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd),
6686 getConstant(MinStride), false);
6687
6688 if (isa<SCEVCouldNotCompute>(MaxBECount))
6689 MaxBECount = BECount;
6690
Andrew Trickee5aa7f2014-01-15 06:42:11 +00006691 return ExitLimit(BECount, MaxBECount, /*MustExit=*/true);
Chris Lattner587a75b2005-08-15 23:33:51 +00006692}
6693
Chris Lattnerd934c702004-04-02 20:23:17 +00006694/// getNumIterationsInRange - Return the number of iterations of this loop that
6695/// produce values in the specified constant range. Another way of looking at
6696/// this is that it returns the first iteration number where the value is not in
6697/// the condition, thus computing the exit count. If the iteration count can't
6698/// be computed, an instance of SCEVCouldNotCompute is returned.
Dan Gohmanaf752342009-07-07 17:06:11 +00006699const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
Dan Gohmance973df2009-06-24 04:48:43 +00006700 ScalarEvolution &SE) const {
Chris Lattnerd934c702004-04-02 20:23:17 +00006701 if (Range.isFullSet()) // Infinite loop.
Dan Gohman31efa302009-04-18 17:58:19 +00006702 return SE.getCouldNotCompute();
Chris Lattnerd934c702004-04-02 20:23:17 +00006703
6704 // If the start is a non-zero constant, shift the range to simplify things.
Dan Gohmana30370b2009-05-04 22:02:23 +00006705 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
Reid Spencer2e54a152007-03-02 00:28:52 +00006706 if (!SC->getValue()->isZero()) {
Dan Gohmanaf752342009-07-07 17:06:11 +00006707 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
Dan Gohman1d2ded72010-05-03 22:09:21 +00006708 Operands[0] = SE.getConstant(SC->getType(), 0);
Andrew Trick8b55b732011-03-14 16:50:06 +00006709 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
Andrew Trickf6b01ff2011-03-15 00:37:00 +00006710 getNoWrapFlags(FlagNW));
Dan Gohmana30370b2009-05-04 22:02:23 +00006711 if (const SCEVAddRecExpr *ShiftedAddRec =
6712 dyn_cast<SCEVAddRecExpr>(Shifted))
Chris Lattnerd934c702004-04-02 20:23:17 +00006713 return ShiftedAddRec->getNumIterationsInRange(
Dan Gohmana37eaf22007-10-22 18:31:58 +00006714 Range.subtract(SC->getValue()->getValue()), SE);
Chris Lattnerd934c702004-04-02 20:23:17 +00006715 // This is strange and shouldn't happen.
Dan Gohman31efa302009-04-18 17:58:19 +00006716 return SE.getCouldNotCompute();
Chris Lattnerd934c702004-04-02 20:23:17 +00006717 }
6718
6719 // The only time we can solve this is when we have all constant indices.
6720 // Otherwise, we cannot determine the overflow conditions.
6721 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6722 if (!isa<SCEVConstant>(getOperand(i)))
Dan Gohman31efa302009-04-18 17:58:19 +00006723 return SE.getCouldNotCompute();
Chris Lattnerd934c702004-04-02 20:23:17 +00006724
6725
6726 // Okay at this point we know that all elements of the chrec are constants and
6727 // that the start element is zero.
6728
6729 // First check to see if the range contains zero. If not, the first
6730 // iteration exits.
Dan Gohmanb397e1a2009-04-21 01:07:12 +00006731 unsigned BitWidth = SE.getTypeSizeInBits(getType());
Dan Gohman0a40ad92009-04-16 03:18:22 +00006732 if (!Range.contains(APInt(BitWidth, 0)))
Dan Gohman1d2ded72010-05-03 22:09:21 +00006733 return SE.getConstant(getType(), 0);
Misha Brukman01808ca2005-04-21 21:13:18 +00006734
Chris Lattnerd934c702004-04-02 20:23:17 +00006735 if (isAffine()) {
6736 // If this is an affine expression then we have this situation:
6737 // Solve {0,+,A} in Range === Ax in Range
6738
Nick Lewycky52460262007-07-16 02:08:00 +00006739 // We know that zero is in the range. If A is positive then we know that
6740 // the upper value of the range must be the first possible exit value.
6741 // If A is negative then the lower of the range is the last possible loop
6742 // value. Also note that we already checked for a full range.
Dan Gohman0a40ad92009-04-16 03:18:22 +00006743 APInt One(BitWidth,1);
Nick Lewycky52460262007-07-16 02:08:00 +00006744 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
6745 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
Chris Lattnerd934c702004-04-02 20:23:17 +00006746
Nick Lewycky52460262007-07-16 02:08:00 +00006747 // The exit value should be (End+A)/A.
Nick Lewycky39349612007-09-27 14:12:54 +00006748 APInt ExitVal = (End + A).udiv(A);
Owen Andersonedb4a702009-07-24 23:12:02 +00006749 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
Chris Lattnerd934c702004-04-02 20:23:17 +00006750
6751 // Evaluate at the exit value. If we really did fall out of the valid
6752 // range, then we computed our trip count, otherwise wrap around or other
6753 // things must have happened.
Dan Gohmana37eaf22007-10-22 18:31:58 +00006754 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
Reid Spencer6a440332007-03-01 07:54:15 +00006755 if (Range.contains(Val->getValue()))
Dan Gohman31efa302009-04-18 17:58:19 +00006756 return SE.getCouldNotCompute(); // Something strange happened
Chris Lattnerd934c702004-04-02 20:23:17 +00006757
6758 // Ensure that the previous value is in the range. This is a sanity check.
Reid Spencer3a7e9d82007-02-28 19:57:34 +00006759 assert(Range.contains(
Dan Gohmance973df2009-06-24 04:48:43 +00006760 EvaluateConstantChrecAtConstant(this,
Owen Andersonedb4a702009-07-24 23:12:02 +00006761 ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
Chris Lattnerd934c702004-04-02 20:23:17 +00006762 "Linear scev computation is off in a bad way!");
Dan Gohmana37eaf22007-10-22 18:31:58 +00006763 return SE.getConstant(ExitValue);
Chris Lattnerd934c702004-04-02 20:23:17 +00006764 } else if (isQuadratic()) {
6765 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
6766 // quadratic equation to solve it. To do this, we must frame our problem in
6767 // terms of figuring out when zero is crossed, instead of when
6768 // Range.getUpper() is crossed.
Dan Gohmanaf752342009-07-07 17:06:11 +00006769 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
Dan Gohmana37eaf22007-10-22 18:31:58 +00006770 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
Andrew Trick8b55b732011-03-14 16:50:06 +00006771 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
6772 // getNoWrapFlags(FlagNW)
6773 FlagAnyWrap);
Chris Lattnerd934c702004-04-02 20:23:17 +00006774
6775 // Next, solve the constructed addrec
Dan Gohmanaf752342009-07-07 17:06:11 +00006776 std::pair<const SCEV *,const SCEV *> Roots =
Dan Gohmana37eaf22007-10-22 18:31:58 +00006777 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
Dan Gohman48f82222009-05-04 22:30:44 +00006778 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
6779 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
Chris Lattnerd934c702004-04-02 20:23:17 +00006780 if (R1) {
6781 // Pick the smallest positive root value.
Zhou Sheng75b871f2007-01-11 12:24:14 +00006782 if (ConstantInt *CB =
Owen Anderson487375e2009-07-29 18:55:55 +00006783 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
Owen Andersonf1f17432009-07-06 22:37:39 +00006784 R1->getValue(), R2->getValue()))) {
Reid Spencercddc9df2007-01-12 04:24:46 +00006785 if (CB->getZExtValue() == false)
Chris Lattnerd934c702004-04-02 20:23:17 +00006786 std::swap(R1, R2); // R1 is the minimum root now.
Misha Brukman01808ca2005-04-21 21:13:18 +00006787
Chris Lattnerd934c702004-04-02 20:23:17 +00006788 // Make sure the root is not off by one. The returned iteration should
6789 // not be in the range, but the previous one should be. When solving
6790 // for "X*X < 5", for example, we should not return a root of 2.
6791 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
Dan Gohmana37eaf22007-10-22 18:31:58 +00006792 R1->getValue(),
6793 SE);
Reid Spencer6a440332007-03-01 07:54:15 +00006794 if (Range.contains(R1Val->getValue())) {
Chris Lattnerd934c702004-04-02 20:23:17 +00006795 // The next iteration must be out of the range...
Owen Andersonf1f17432009-07-06 22:37:39 +00006796 ConstantInt *NextVal =
Owen Andersonedb4a702009-07-24 23:12:02 +00006797 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
Misha Brukman01808ca2005-04-21 21:13:18 +00006798
Dan Gohmana37eaf22007-10-22 18:31:58 +00006799 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
Reid Spencer6a440332007-03-01 07:54:15 +00006800 if (!Range.contains(R1Val->getValue()))
Dan Gohmana37eaf22007-10-22 18:31:58 +00006801 return SE.getConstant(NextVal);
Dan Gohman31efa302009-04-18 17:58:19 +00006802 return SE.getCouldNotCompute(); // Something strange happened
Chris Lattnerd934c702004-04-02 20:23:17 +00006803 }
Misha Brukman01808ca2005-04-21 21:13:18 +00006804
Chris Lattnerd934c702004-04-02 20:23:17 +00006805 // If R1 was not in the range, then it is a good return value. Make
6806 // sure that R1-1 WAS in the range though, just in case.
Owen Andersonf1f17432009-07-06 22:37:39 +00006807 ConstantInt *NextVal =
Owen Andersonedb4a702009-07-24 23:12:02 +00006808 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
Dan Gohmana37eaf22007-10-22 18:31:58 +00006809 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
Reid Spencer6a440332007-03-01 07:54:15 +00006810 if (Range.contains(R1Val->getValue()))
Chris Lattnerd934c702004-04-02 20:23:17 +00006811 return R1;
Dan Gohman31efa302009-04-18 17:58:19 +00006812 return SE.getCouldNotCompute(); // Something strange happened
Chris Lattnerd934c702004-04-02 20:23:17 +00006813 }
6814 }
6815 }
6816
Dan Gohman31efa302009-04-18 17:58:19 +00006817 return SE.getCouldNotCompute();
Chris Lattnerd934c702004-04-02 20:23:17 +00006818}
6819
Sebastian Popc62c6792013-11-12 22:47:20 +00006820static const APInt srem(const SCEVConstant *C1, const SCEVConstant *C2) {
6821 APInt A = C1->getValue()->getValue();
6822 APInt B = C2->getValue()->getValue();
6823 uint32_t ABW = A.getBitWidth();
6824 uint32_t BBW = B.getBitWidth();
6825
6826 if (ABW > BBW)
Benjamin Kramer5f2768c2013-11-16 16:25:41 +00006827 B = B.sext(ABW);
Sebastian Popc62c6792013-11-12 22:47:20 +00006828 else if (ABW < BBW)
Benjamin Kramer5f2768c2013-11-16 16:25:41 +00006829 A = A.sext(BBW);
Sebastian Popc62c6792013-11-12 22:47:20 +00006830
6831 return APIntOps::srem(A, B);
6832}
6833
6834static const APInt sdiv(const SCEVConstant *C1, const SCEVConstant *C2) {
6835 APInt A = C1->getValue()->getValue();
6836 APInt B = C2->getValue()->getValue();
6837 uint32_t ABW = A.getBitWidth();
6838 uint32_t BBW = B.getBitWidth();
6839
6840 if (ABW > BBW)
Benjamin Kramer5f2768c2013-11-16 16:25:41 +00006841 B = B.sext(ABW);
Sebastian Popc62c6792013-11-12 22:47:20 +00006842 else if (ABW < BBW)
Benjamin Kramer5f2768c2013-11-16 16:25:41 +00006843 A = A.sext(BBW);
Sebastian Popc62c6792013-11-12 22:47:20 +00006844
6845 return APIntOps::sdiv(A, B);
6846}
6847
6848namespace {
6849struct SCEVGCD : public SCEVVisitor<SCEVGCD, const SCEV *> {
6850public:
6851 // Pattern match Step into Start. When Step is a multiply expression, find
6852 // the largest subexpression of Step that appears in Start. When Start is an
6853 // add expression, try to match Step in the subexpressions of Start, non
6854 // matching subexpressions are returned under Remainder.
6855 static const SCEV *findGCD(ScalarEvolution &SE, const SCEV *Start,
6856 const SCEV *Step, const SCEV **Remainder) {
6857 assert(Remainder && "Remainder should not be NULL");
6858 SCEVGCD R(SE, Step, SE.getConstant(Step->getType(), 0));
6859 const SCEV *Res = R.visit(Start);
6860 *Remainder = R.Remainder;
6861 return Res;
6862 }
6863
6864 SCEVGCD(ScalarEvolution &S, const SCEV *G, const SCEV *R)
6865 : SE(S), GCD(G), Remainder(R) {
6866 Zero = SE.getConstant(GCD->getType(), 0);
6867 One = SE.getConstant(GCD->getType(), 1);
6868 }
6869
6870 const SCEV *visitConstant(const SCEVConstant *Constant) {
6871 if (GCD == Constant || Constant == Zero)
6872 return GCD;
6873
6874 if (const SCEVConstant *CGCD = dyn_cast<SCEVConstant>(GCD)) {
6875 const SCEV *Res = SE.getConstant(gcd(Constant, CGCD));
6876 if (Res != One)
6877 return Res;
6878
6879 Remainder = SE.getConstant(srem(Constant, CGCD));
6880 Constant = cast<SCEVConstant>(SE.getMinusSCEV(Constant, Remainder));
6881 Res = SE.getConstant(gcd(Constant, CGCD));
6882 return Res;
6883 }
6884
6885 // When GCD is not a constant, it could be that the GCD is an Add, Mul,
6886 // AddRec, etc., in which case we want to find out how many times the
6887 // Constant divides the GCD: we then return that as the new GCD.
6888 const SCEV *Rem = Zero;
6889 const SCEV *Res = findGCD(SE, GCD, Constant, &Rem);
6890
6891 if (Res == One || Rem != Zero) {
6892 Remainder = Constant;
6893 return One;
6894 }
6895
6896 assert(isa<SCEVConstant>(Res) && "Res should be a constant");
6897 Remainder = SE.getConstant(srem(Constant, cast<SCEVConstant>(Res)));
6898 return Res;
6899 }
6900
6901 const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
6902 if (GCD != Expr)
6903 Remainder = Expr;
6904 return GCD;
6905 }
6906
6907 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
6908 if (GCD != Expr)
6909 Remainder = Expr;
6910 return GCD;
6911 }
6912
6913 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
6914 if (GCD != Expr)
6915 Remainder = Expr;
6916 return GCD;
6917 }
6918
6919 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
6920 if (GCD == Expr)
6921 return GCD;
6922
6923 for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
6924 const SCEV *Rem = Zero;
6925 const SCEV *Res = findGCD(SE, Expr->getOperand(e - 1 - i), GCD, &Rem);
6926
6927 // FIXME: There may be ambiguous situations: for instance,
6928 // GCD(-4 + (3 * %m), 2 * %m) where 2 divides -4 and %m divides (3 * %m).
6929 // The order in which the AddExpr is traversed computes a different GCD
6930 // and Remainder.
6931 if (Res != One)
6932 GCD = Res;
6933 if (Rem != Zero)
6934 Remainder = SE.getAddExpr(Remainder, Rem);
6935 }
6936
6937 return GCD;
6938 }
6939
6940 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
6941 if (GCD == Expr)
6942 return GCD;
6943
6944 for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
6945 if (Expr->getOperand(i) == GCD)
6946 return GCD;
6947 }
6948
6949 // If we have not returned yet, it means that GCD is not part of Expr.
6950 const SCEV *PartialGCD = One;
6951 for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
6952 const SCEV *Rem = Zero;
6953 const SCEV *Res = findGCD(SE, Expr->getOperand(i), GCD, &Rem);
6954 if (Rem != Zero)
6955 // GCD does not divide Expr->getOperand(i).
6956 continue;
6957
6958 if (Res == GCD)
6959 return GCD;
6960 PartialGCD = SE.getMulExpr(PartialGCD, Res);
6961 if (PartialGCD == GCD)
6962 return GCD;
6963 }
6964
6965 if (PartialGCD != One)
6966 return PartialGCD;
6967
Sebastian Popb5b84e02014-04-08 21:21:05 +00006968 // Failed to find a PartialGCD: set the Remainder to the full expression,
6969 // and return the GCD.
Sebastian Popc62c6792013-11-12 22:47:20 +00006970 Remainder = Expr;
6971 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(GCD);
6972 if (!Mul)
Sebastian Popb5b84e02014-04-08 21:21:05 +00006973 return GCD;
Sebastian Popc62c6792013-11-12 22:47:20 +00006974
6975 // When the GCD is a multiply expression, try to decompose it:
6976 // this occurs when Step does not divide the Start expression
6977 // as in: {(-4 + (3 * %m)),+,(2 * %m)}
6978 for (int i = 0, e = Mul->getNumOperands(); i < e; ++i) {
6979 const SCEV *Rem = Zero;
6980 const SCEV *Res = findGCD(SE, Expr, Mul->getOperand(i), &Rem);
6981 if (Rem == Zero) {
6982 Remainder = Rem;
6983 return Res;
6984 }
6985 }
6986
Sebastian Popb5b84e02014-04-08 21:21:05 +00006987 return GCD;
Sebastian Popc62c6792013-11-12 22:47:20 +00006988 }
6989
6990 const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
6991 if (GCD != Expr)
6992 Remainder = Expr;
6993 return GCD;
6994 }
6995
6996 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
6997 if (GCD == Expr)
6998 return GCD;
6999
7000 if (!Expr->isAffine()) {
7001 Remainder = Expr;
7002 return GCD;
7003 }
7004
7005 const SCEV *Rem = Zero;
7006 const SCEV *Res = findGCD(SE, Expr->getOperand(0), GCD, &Rem);
Sebastian Pop9738e832014-04-08 21:21:10 +00007007 if (Res == One || Res->isAllOnesValue()) {
7008 Remainder = Expr;
7009 return GCD;
7010 }
7011
Sebastian Popc62c6792013-11-12 22:47:20 +00007012 if (Rem != Zero)
7013 Remainder = SE.getAddExpr(Remainder, Rem);
7014
7015 Rem = Zero;
7016 Res = findGCD(SE, Expr->getOperand(1), Res, &Rem);
Sebastian Pop9738e832014-04-08 21:21:10 +00007017 if (Rem != Zero || Res == One || Res->isAllOnesValue()) {
Sebastian Popc62c6792013-11-12 22:47:20 +00007018 Remainder = Expr;
7019 return GCD;
7020 }
7021
7022 return Res;
7023 }
7024
7025 const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
7026 if (GCD != Expr)
7027 Remainder = Expr;
7028 return GCD;
7029 }
7030
7031 const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
7032 if (GCD != Expr)
7033 Remainder = Expr;
7034 return GCD;
7035 }
7036
7037 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
7038 if (GCD != Expr)
7039 Remainder = Expr;
7040 return GCD;
7041 }
7042
7043 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
7044 return One;
7045 }
7046
7047private:
7048 ScalarEvolution &SE;
7049 const SCEV *GCD, *Remainder, *Zero, *One;
7050};
7051
7052struct SCEVDivision : public SCEVVisitor<SCEVDivision, const SCEV *> {
7053public:
7054 // Remove from Start all multiples of Step.
7055 static const SCEV *divide(ScalarEvolution &SE, const SCEV *Start,
7056 const SCEV *Step) {
7057 SCEVDivision D(SE, Step);
7058 const SCEV *Rem = D.Zero;
7059 (void)Rem;
7060 // The division is guaranteed to succeed: Step should divide Start with no
7061 // remainder.
7062 assert(Step == SCEVGCD::findGCD(SE, Start, Step, &Rem) && Rem == D.Zero &&
7063 "Step should divide Start with no remainder.");
7064 return D.visit(Start);
7065 }
7066
7067 SCEVDivision(ScalarEvolution &S, const SCEV *G) : SE(S), GCD(G) {
7068 Zero = SE.getConstant(GCD->getType(), 0);
7069 One = SE.getConstant(GCD->getType(), 1);
7070 }
7071
7072 const SCEV *visitConstant(const SCEVConstant *Constant) {
7073 if (GCD == Constant)
7074 return One;
7075
7076 if (const SCEVConstant *CGCD = dyn_cast<SCEVConstant>(GCD))
7077 return SE.getConstant(sdiv(Constant, CGCD));
7078 return Constant;
7079 }
7080
7081 const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
7082 if (GCD == Expr)
7083 return One;
7084 return Expr;
7085 }
7086
7087 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
7088 if (GCD == Expr)
7089 return One;
7090 return Expr;
7091 }
7092
7093 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
7094 if (GCD == Expr)
7095 return One;
7096 return Expr;
7097 }
7098
7099 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
7100 if (GCD == Expr)
7101 return One;
7102
7103 SmallVector<const SCEV *, 2> Operands;
7104 for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
7105 Operands.push_back(divide(SE, Expr->getOperand(i), GCD));
7106
7107 if (Operands.size() == 1)
7108 return Operands[0];
7109 return SE.getAddExpr(Operands);
7110 }
7111
7112 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
7113 if (GCD == Expr)
7114 return One;
7115
7116 bool FoundGCDTerm = false;
7117 for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
7118 if (Expr->getOperand(i) == GCD)
7119 FoundGCDTerm = true;
7120
7121 SmallVector<const SCEV *, 2> Operands;
7122 if (FoundGCDTerm) {
7123 FoundGCDTerm = false;
7124 for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
7125 if (FoundGCDTerm)
7126 Operands.push_back(Expr->getOperand(i));
7127 else if (Expr->getOperand(i) == GCD)
7128 FoundGCDTerm = true;
7129 else
7130 Operands.push_back(Expr->getOperand(i));
7131 }
7132 } else {
Sebastian Popc62c6792013-11-12 22:47:20 +00007133 const SCEV *PartialGCD = One;
7134 for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
7135 if (PartialGCD == GCD) {
7136 Operands.push_back(Expr->getOperand(i));
7137 continue;
7138 }
7139
7140 const SCEV *Rem = Zero;
7141 const SCEV *Res = SCEVGCD::findGCD(SE, Expr->getOperand(i), GCD, &Rem);
7142 if (Rem == Zero) {
7143 PartialGCD = SE.getMulExpr(PartialGCD, Res);
Sebastian Popb2fdacf2014-04-08 21:21:13 +00007144 Operands.push_back(divide(SE, Expr->getOperand(i), Res));
Sebastian Popc62c6792013-11-12 22:47:20 +00007145 } else {
7146 Operands.push_back(Expr->getOperand(i));
7147 }
7148 }
7149 }
7150
7151 if (Operands.size() == 1)
7152 return Operands[0];
7153 return SE.getMulExpr(Operands);
7154 }
7155
7156 const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
7157 if (GCD == Expr)
7158 return One;
7159 return Expr;
7160 }
7161
7162 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
7163 if (GCD == Expr)
7164 return One;
7165
7166 assert(Expr->isAffine() && "Expr should be affine");
7167
7168 const SCEV *Start = divide(SE, Expr->getStart(), GCD);
7169 const SCEV *Step = divide(SE, Expr->getStepRecurrence(SE), GCD);
7170
7171 return SE.getAddRecExpr(Start, Step, Expr->getLoop(),
7172 Expr->getNoWrapFlags());
7173 }
7174
7175 const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
7176 if (GCD == Expr)
7177 return One;
7178 return Expr;
7179 }
7180
7181 const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
7182 if (GCD == Expr)
7183 return One;
7184 return Expr;
7185 }
7186
7187 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
7188 if (GCD == Expr)
7189 return One;
7190 return Expr;
7191 }
7192
7193 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
7194 return Expr;
7195 }
7196
7197private:
7198 ScalarEvolution &SE;
7199 const SCEV *GCD, *Zero, *One;
7200};
7201}
7202
7203/// Splits the SCEV into two vectors of SCEVs representing the subscripts and
7204/// sizes of an array access. Returns the remainder of the delinearization that
Sebastian Pop7ee14722013-11-13 22:37:58 +00007205/// is the offset start of the array. The SCEV->delinearize algorithm computes
7206/// the multiples of SCEV coefficients: that is a pattern matching of sub
7207/// expressions in the stride and base of a SCEV corresponding to the
7208/// computation of a GCD (greatest common divisor) of base and stride. When
7209/// SCEV->delinearize fails, it returns the SCEV unchanged.
7210///
7211/// For example: when analyzing the memory access A[i][j][k] in this loop nest
7212///
7213/// void foo(long n, long m, long o, double A[n][m][o]) {
7214///
7215/// for (long i = 0; i < n; i++)
7216/// for (long j = 0; j < m; j++)
7217/// for (long k = 0; k < o; k++)
7218/// A[i][j][k] = 1.0;
7219/// }
7220///
7221/// the delinearization input is the following AddRec SCEV:
7222///
7223/// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
7224///
7225/// From this SCEV, we are able to say that the base offset of the access is %A
7226/// because it appears as an offset that does not divide any of the strides in
7227/// the loops:
7228///
7229/// CHECK: Base offset: %A
7230///
7231/// and then SCEV->delinearize determines the size of some of the dimensions of
7232/// the array as these are the multiples by which the strides are happening:
7233///
7234/// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
7235///
7236/// Note that the outermost dimension remains of UnknownSize because there are
7237/// no strides that would help identifying the size of the last dimension: when
7238/// the array has been statically allocated, one could compute the size of that
7239/// dimension by dividing the overall size of the array by the size of the known
7240/// dimensions: %m * %o * 8.
7241///
7242/// Finally delinearize provides the access functions for the array reference
7243/// that does correspond to A[i][j][k] of the above C testcase:
7244///
7245/// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
7246///
7247/// The testcases are checking the output of a function pass:
7248/// DelinearizationPass that walks through all loads and stores of a function
7249/// asking for the SCEV of the memory access with respect to all enclosing
7250/// loops, calling SCEV->delinearize on that and printing the results.
7251
Sebastian Popc62c6792013-11-12 22:47:20 +00007252const SCEV *
7253SCEVAddRecExpr::delinearize(ScalarEvolution &SE,
7254 SmallVectorImpl<const SCEV *> &Subscripts,
7255 SmallVectorImpl<const SCEV *> &Sizes) const {
Sebastian Pop7ee14722013-11-13 22:37:58 +00007256 // Early exit in case this SCEV is not an affine multivariate function.
Sebastian Popc62c6792013-11-12 22:47:20 +00007257 if (!this->isAffine())
7258 return this;
7259
7260 const SCEV *Start = this->getStart();
7261 const SCEV *Step = this->getStepRecurrence(SE);
Sebastian Pop7ee14722013-11-13 22:37:58 +00007262
Alp Tokercb402912014-01-24 17:20:08 +00007263 // Build the SCEV representation of the canonical induction variable in the
Sebastian Pop7ee14722013-11-13 22:37:58 +00007264 // loop of this SCEV.
Sebastian Popc62c6792013-11-12 22:47:20 +00007265 const SCEV *Zero = SE.getConstant(this->getType(), 0);
7266 const SCEV *One = SE.getConstant(this->getType(), 1);
7267 const SCEV *IV =
7268 SE.getAddRecExpr(Zero, One, this->getLoop(), this->getNoWrapFlags());
7269
7270 DEBUG(dbgs() << "(delinearize: " << *this << "\n");
7271
Sebastian Pop64f12d52014-02-21 18:15:15 +00007272 // When the stride of this SCEV is 1, do not compute the GCD: the size of this
7273 // subscript is 1, and this same SCEV for the access function.
7274 const SCEV *Remainder = Zero;
7275 const SCEV *GCD = One;
Sebastian Popc62c6792013-11-12 22:47:20 +00007276
Sebastian Pop7ee14722013-11-13 22:37:58 +00007277 // Find the GCD and Remainder of the Start and Step coefficients of this SCEV.
Sebastian Pop64f12d52014-02-21 18:15:15 +00007278 if (Step != One && !Step->isAllOnesValue())
7279 GCD = SCEVGCD::findGCD(SE, Start, Step, &Remainder);
Sebastian Popc62c6792013-11-12 22:47:20 +00007280
7281 DEBUG(dbgs() << "GCD: " << *GCD << "\n");
7282 DEBUG(dbgs() << "Remainder: " << *Remainder << "\n");
7283
Sebastian Pop64f12d52014-02-21 18:15:15 +00007284 const SCEV *Quotient = Start;
7285 if (GCD != One && !GCD->isAllOnesValue())
7286 // As findGCD computed Remainder, GCD divides "Start - Remainder." The
7287 // Quotient is then this SCEV without Remainder, scaled down by the GCD. The
7288 // Quotient is what will be used in the next subscript delinearization.
7289 Quotient = SCEVDivision::divide(SE, SE.getMinusSCEV(Start, Remainder), GCD);
Sebastian Popc62c6792013-11-12 22:47:20 +00007290
Sebastian Popc62c6792013-11-12 22:47:20 +00007291 DEBUG(dbgs() << "Quotient: " << *Quotient << "\n");
7292
Sebastian Pop64f12d52014-02-21 18:15:15 +00007293 const SCEV *Rem = Quotient;
Sebastian Popc62c6792013-11-12 22:47:20 +00007294 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Quotient))
Sebastian Pop7ee14722013-11-13 22:37:58 +00007295 // Recursively call delinearize on the Quotient until there are no more
7296 // multiples that can be recognized.
Sebastian Popc62c6792013-11-12 22:47:20 +00007297 Rem = AR->delinearize(SE, Subscripts, Sizes);
Sebastian Popc62c6792013-11-12 22:47:20 +00007298
Alp Tokercb402912014-01-24 17:20:08 +00007299 // Scale up the canonical induction variable IV by whatever remains from the
Sebastian Pop7ee14722013-11-13 22:37:58 +00007300 // Step after division by the GCD: the GCD is the size of all the sub-array.
Sebastian Pop64f12d52014-02-21 18:15:15 +00007301 if (Step != One && !Step->isAllOnesValue() && GCD != One &&
7302 !GCD->isAllOnesValue() && Step != GCD) {
Sebastian Popc62c6792013-11-12 22:47:20 +00007303 Step = SCEVDivision::divide(SE, Step, GCD);
7304 IV = SE.getMulExpr(IV, Step);
7305 }
Alp Tokercb402912014-01-24 17:20:08 +00007306 // The access function in the current subscript is computed as the canonical
Sebastian Pop7ee14722013-11-13 22:37:58 +00007307 // induction variable IV (potentially scaled up by the step) and offset by
7308 // Rem, the offset of delinearization in the sub-array.
Sebastian Popc62c6792013-11-12 22:47:20 +00007309 const SCEV *Index = SE.getAddExpr(IV, Rem);
7310
Sebastian Pop7ee14722013-11-13 22:37:58 +00007311 // Record the access function and the size of the current subscript.
Sebastian Popc62c6792013-11-12 22:47:20 +00007312 Subscripts.push_back(Index);
7313 Sizes.push_back(GCD);
7314
7315#ifndef NDEBUG
7316 int Size = Sizes.size();
7317 DEBUG(dbgs() << "succeeded to delinearize " << *this << "\n");
7318 DEBUG(dbgs() << "ArrayDecl[UnknownSize]");
7319 for (int i = 0; i < Size - 1; i++)
7320 DEBUG(dbgs() << "[" << *Sizes[i] << "]");
7321 DEBUG(dbgs() << " with elements of " << *Sizes[Size - 1] << " bytes.\n");
7322
7323 DEBUG(dbgs() << "ArrayRef");
7324 for (int i = 0; i < Size; i++)
7325 DEBUG(dbgs() << "[" << *Subscripts[i] << "]");
7326 DEBUG(dbgs() << "\n)\n");
7327#endif
7328
7329 return Remainder;
7330}
Chris Lattnerd934c702004-04-02 20:23:17 +00007331
7332//===----------------------------------------------------------------------===//
Dan Gohman48f82222009-05-04 22:30:44 +00007333// SCEVCallbackVH Class Implementation
7334//===----------------------------------------------------------------------===//
7335
Dan Gohmand33a0902009-05-19 19:22:47 +00007336void ScalarEvolution::SCEVCallbackVH::deleted() {
Dan Gohmandd707af2009-07-13 22:20:53 +00007337 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
Dan Gohman48f82222009-05-04 22:30:44 +00007338 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
7339 SE->ConstantEvolutionLoopExitValue.erase(PN);
Dan Gohman9bad2fb2010-08-27 18:55:03 +00007340 SE->ValueExprMap.erase(getValPtr());
Dan Gohman48f82222009-05-04 22:30:44 +00007341 // this now dangles!
7342}
7343
Dan Gohman7a066722010-07-28 01:09:07 +00007344void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
Dan Gohmandd707af2009-07-13 22:20:53 +00007345 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
Eric Christopheref6d5932010-07-29 01:25:38 +00007346
Dan Gohman48f82222009-05-04 22:30:44 +00007347 // Forget all the expressions associated with users of the old value,
7348 // so that future queries will recompute the expressions using the new
7349 // value.
Dan Gohman7cac9572010-08-02 23:49:30 +00007350 Value *Old = getValPtr();
Chandler Carruthcdf47882014-03-09 03:16:01 +00007351 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end());
Dan Gohmanf34f8632009-07-14 14:34:04 +00007352 SmallPtrSet<User *, 8> Visited;
Dan Gohman48f82222009-05-04 22:30:44 +00007353 while (!Worklist.empty()) {
7354 User *U = Worklist.pop_back_val();
7355 // Deleting the Old value will cause this to dangle. Postpone
7356 // that until everything else is done.
Dan Gohman8aeb0fb2010-07-28 00:28:25 +00007357 if (U == Old)
Dan Gohman48f82222009-05-04 22:30:44 +00007358 continue;
Dan Gohmanf34f8632009-07-14 14:34:04 +00007359 if (!Visited.insert(U))
7360 continue;
Dan Gohman48f82222009-05-04 22:30:44 +00007361 if (PHINode *PN = dyn_cast<PHINode>(U))
7362 SE->ConstantEvolutionLoopExitValue.erase(PN);
Dan Gohman9bad2fb2010-08-27 18:55:03 +00007363 SE->ValueExprMap.erase(U);
Chandler Carruthcdf47882014-03-09 03:16:01 +00007364 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end());
Dan Gohman48f82222009-05-04 22:30:44 +00007365 }
Dan Gohman8aeb0fb2010-07-28 00:28:25 +00007366 // Delete the Old value.
7367 if (PHINode *PN = dyn_cast<PHINode>(Old))
7368 SE->ConstantEvolutionLoopExitValue.erase(PN);
Dan Gohman9bad2fb2010-08-27 18:55:03 +00007369 SE->ValueExprMap.erase(Old);
Dan Gohman8aeb0fb2010-07-28 00:28:25 +00007370 // this now dangles!
Dan Gohman48f82222009-05-04 22:30:44 +00007371}
7372
Dan Gohmand33a0902009-05-19 19:22:47 +00007373ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
Dan Gohman48f82222009-05-04 22:30:44 +00007374 : CallbackVH(V), SE(se) {}
7375
7376//===----------------------------------------------------------------------===//
Chris Lattnerd934c702004-04-02 20:23:17 +00007377// ScalarEvolution Class Implementation
7378//===----------------------------------------------------------------------===//
7379
Dan Gohmanc8e23622009-04-21 23:15:49 +00007380ScalarEvolution::ScalarEvolution()
Craig Topper9f008862014-04-15 04:59:12 +00007381 : FunctionPass(ID), ValuesAtScopes(64), LoopDispositions(64),
7382 BlockDispositions(64), FirstUnknown(nullptr) {
Owen Anderson6c18d1a2010-10-19 17:21:58 +00007383 initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
Dan Gohmanc8e23622009-04-21 23:15:49 +00007384}
7385
Chris Lattnerd934c702004-04-02 20:23:17 +00007386bool ScalarEvolution::runOnFunction(Function &F) {
Dan Gohmanc8e23622009-04-21 23:15:49 +00007387 this->F = &F;
7388 LI = &getAnalysis<LoopInfo>();
Rafael Espindola93512512014-02-25 17:30:31 +00007389 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
Craig Topper9f008862014-04-15 04:59:12 +00007390 DL = DLP ? &DLP->getDataLayout() : nullptr;
Chad Rosierc24b86f2011-12-01 03:08:23 +00007391 TLI = &getAnalysis<TargetLibraryInfo>();
Chandler Carruth73523022014-01-13 13:07:17 +00007392 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
Chris Lattnerd934c702004-04-02 20:23:17 +00007393 return false;
7394}
7395
7396void ScalarEvolution::releaseMemory() {
Dan Gohman7cac9572010-08-02 23:49:30 +00007397 // Iterate through all the SCEVUnknown instances and call their
7398 // destructors, so that they release their references to their values.
7399 for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
7400 U->~SCEVUnknown();
Craig Topper9f008862014-04-15 04:59:12 +00007401 FirstUnknown = nullptr;
Dan Gohman7cac9572010-08-02 23:49:30 +00007402
Dan Gohman9bad2fb2010-08-27 18:55:03 +00007403 ValueExprMap.clear();
Andrew Trick3ca3f982011-07-26 17:19:55 +00007404
7405 // Free any extra memory created for ExitNotTakenInfo in the unlikely event
7406 // that a loop had multiple computable exits.
7407 for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
7408 BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end();
7409 I != E; ++I) {
7410 I->second.clear();
7411 }
7412
Andrew Trick7fa4e0f2012-05-19 00:48:25 +00007413 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
7414
Dan Gohmanc8e23622009-04-21 23:15:49 +00007415 BackedgeTakenCounts.clear();
7416 ConstantEvolutionLoopExitValue.clear();
Dan Gohman5122d612009-05-08 20:47:27 +00007417 ValuesAtScopes.clear();
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007418 LoopDispositions.clear();
Dan Gohman8ea83d82010-11-18 00:34:22 +00007419 BlockDispositions.clear();
Dan Gohman761065e2010-11-17 02:44:44 +00007420 UnsignedRanges.clear();
7421 SignedRanges.clear();
Dan Gohmanc5c85c02009-06-27 21:21:31 +00007422 UniqueSCEVs.clear();
7423 SCEVAllocator.Reset();
Chris Lattnerd934c702004-04-02 20:23:17 +00007424}
7425
7426void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
7427 AU.setPreservesAll();
Chris Lattnerd934c702004-04-02 20:23:17 +00007428 AU.addRequiredTransitive<LoopInfo>();
Chandler Carruth73523022014-01-13 13:07:17 +00007429 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
Chad Rosierc24b86f2011-12-01 03:08:23 +00007430 AU.addRequired<TargetLibraryInfo>();
Dan Gohman0a40ad92009-04-16 03:18:22 +00007431}
7432
Dan Gohmanc8e23622009-04-21 23:15:49 +00007433bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
Dan Gohman0bddac12009-02-24 18:55:53 +00007434 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
Chris Lattnerd934c702004-04-02 20:23:17 +00007435}
7436
Dan Gohmanc8e23622009-04-21 23:15:49 +00007437static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
Chris Lattnerd934c702004-04-02 20:23:17 +00007438 const Loop *L) {
7439 // Print all inner loops first
7440 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
7441 PrintLoopInfo(OS, SE, *I);
Misha Brukman01808ca2005-04-21 21:13:18 +00007442
Dan Gohmanbc694912010-01-09 18:17:45 +00007443 OS << "Loop ";
Chandler Carruthd48cdbf2014-01-09 02:29:41 +00007444 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
Dan Gohmanbc694912010-01-09 18:17:45 +00007445 OS << ": ";
Chris Lattnerd72c3eb2004-04-18 22:14:10 +00007446
Dan Gohmancb0efec2009-12-18 01:14:11 +00007447 SmallVector<BasicBlock *, 8> ExitBlocks;
Chris Lattnerd72c3eb2004-04-18 22:14:10 +00007448 L->getExitBlocks(ExitBlocks);
7449 if (ExitBlocks.size() != 1)
Nick Lewyckyd1200b02008-01-02 02:49:20 +00007450 OS << "<multiple exits> ";
Chris Lattnerd934c702004-04-02 20:23:17 +00007451
Dan Gohman0bddac12009-02-24 18:55:53 +00007452 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
7453 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
Chris Lattnerd934c702004-04-02 20:23:17 +00007454 } else {
Dan Gohman0bddac12009-02-24 18:55:53 +00007455 OS << "Unpredictable backedge-taken count. ";
Chris Lattnerd934c702004-04-02 20:23:17 +00007456 }
7457
Dan Gohmanbc694912010-01-09 18:17:45 +00007458 OS << "\n"
7459 "Loop ";
Chandler Carruthd48cdbf2014-01-09 02:29:41 +00007460 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
Dan Gohmanbc694912010-01-09 18:17:45 +00007461 OS << ": ";
Dan Gohman69942932009-06-24 00:33:16 +00007462
7463 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
7464 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
7465 } else {
7466 OS << "Unpredictable max backedge-taken count. ";
7467 }
7468
7469 OS << "\n";
Chris Lattnerd934c702004-04-02 20:23:17 +00007470}
7471
Dan Gohmancb0efec2009-12-18 01:14:11 +00007472void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
Dan Gohman8b0a4192010-03-01 17:49:51 +00007473 // ScalarEvolution's implementation of the print method is to print
Dan Gohmanc8e23622009-04-21 23:15:49 +00007474 // out SCEV values of all instructions that are interesting. Doing
7475 // this potentially causes it to create new SCEV objects though,
7476 // which technically conflicts with the const qualifier. This isn't
Dan Gohman028e6152009-07-10 20:25:29 +00007477 // observable from outside the class though, so casting away the
7478 // const isn't dangerous.
Dan Gohmancb0efec2009-12-18 01:14:11 +00007479 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
Chris Lattnerd934c702004-04-02 20:23:17 +00007480
Dan Gohmanbc694912010-01-09 18:17:45 +00007481 OS << "Classifying expressions for: ";
Chandler Carruthd48cdbf2014-01-09 02:29:41 +00007482 F->printAsOperand(OS, /*PrintType=*/false);
Dan Gohmanbc694912010-01-09 18:17:45 +00007483 OS << "\n";
Chris Lattnerd934c702004-04-02 20:23:17 +00007484 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
Dan Gohmand18dc2c2010-05-03 17:03:23 +00007485 if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
Dan Gohmanfda3c4a2009-07-13 23:03:05 +00007486 OS << *I << '\n';
Dan Gohman81313fd2008-09-14 17:21:12 +00007487 OS << " --> ";
Dan Gohmanaf752342009-07-07 17:06:11 +00007488 const SCEV *SV = SE.getSCEV(&*I);
Chris Lattnerd934c702004-04-02 20:23:17 +00007489 SV->print(OS);
Misha Brukman01808ca2005-04-21 21:13:18 +00007490
Dan Gohmanb9063a82009-06-19 17:49:54 +00007491 const Loop *L = LI->getLoopFor((*I).getParent());
7492
Dan Gohmanaf752342009-07-07 17:06:11 +00007493 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
Dan Gohmanb9063a82009-06-19 17:49:54 +00007494 if (AtUse != SV) {
7495 OS << " --> ";
7496 AtUse->print(OS);
7497 }
7498
7499 if (L) {
Dan Gohman94c468f2009-06-18 00:37:45 +00007500 OS << "\t\t" "Exits: ";
Dan Gohmanaf752342009-07-07 17:06:11 +00007501 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
Dan Gohmanafd6db92010-11-17 21:23:15 +00007502 if (!SE.isLoopInvariant(ExitValue, L)) {
Chris Lattnerd934c702004-04-02 20:23:17 +00007503 OS << "<<Unknown>>";
7504 } else {
7505 OS << *ExitValue;
7506 }
7507 }
7508
Chris Lattnerd934c702004-04-02 20:23:17 +00007509 OS << "\n";
7510 }
7511
Dan Gohmanbc694912010-01-09 18:17:45 +00007512 OS << "Determining loop execution counts for: ";
Chandler Carruthd48cdbf2014-01-09 02:29:41 +00007513 F->printAsOperand(OS, /*PrintType=*/false);
Dan Gohmanbc694912010-01-09 18:17:45 +00007514 OS << "\n";
Dan Gohmanc8e23622009-04-21 23:15:49 +00007515 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
7516 PrintLoopInfo(OS, &SE, *I);
Chris Lattnerd934c702004-04-02 20:23:17 +00007517}
Dan Gohmane20f8242009-04-21 00:47:46 +00007518
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007519ScalarEvolution::LoopDisposition
7520ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
Wan Xiaofeib2c8cdc2013-11-12 09:40:41 +00007521 SmallVector<std::pair<const Loop *, LoopDisposition>, 2> &Values = LoopDispositions[S];
7522 for (unsigned u = 0; u < Values.size(); u++) {
7523 if (Values[u].first == L)
7524 return Values[u].second;
7525 }
7526 Values.push_back(std::make_pair(L, LoopVariant));
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007527 LoopDisposition D = computeLoopDisposition(S, L);
Wan Xiaofeib2c8cdc2013-11-12 09:40:41 +00007528 SmallVector<std::pair<const Loop *, LoopDisposition>, 2> &Values2 = LoopDispositions[S];
7529 for (unsigned u = Values2.size(); u > 0; u--) {
7530 if (Values2[u - 1].first == L) {
7531 Values2[u - 1].second = D;
7532 break;
7533 }
7534 }
7535 return D;
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007536}
7537
7538ScalarEvolution::LoopDisposition
7539ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
Benjamin Kramer987b8502014-02-11 19:02:55 +00007540 switch (static_cast<SCEVTypes>(S->getSCEVType())) {
Dan Gohmanafd6db92010-11-17 21:23:15 +00007541 case scConstant:
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007542 return LoopInvariant;
Dan Gohmanafd6db92010-11-17 21:23:15 +00007543 case scTruncate:
7544 case scZeroExtend:
7545 case scSignExtend:
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007546 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
Dan Gohmanafd6db92010-11-17 21:23:15 +00007547 case scAddRecExpr: {
7548 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
7549
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007550 // If L is the addrec's loop, it's computable.
7551 if (AR->getLoop() == L)
7552 return LoopComputable;
7553
Dan Gohmanafd6db92010-11-17 21:23:15 +00007554 // Add recurrences are never invariant in the function-body (null loop).
7555 if (!L)
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007556 return LoopVariant;
Dan Gohmanafd6db92010-11-17 21:23:15 +00007557
7558 // This recurrence is variant w.r.t. L if L contains AR's loop.
7559 if (L->contains(AR->getLoop()))
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007560 return LoopVariant;
Dan Gohmanafd6db92010-11-17 21:23:15 +00007561
7562 // This recurrence is invariant w.r.t. L if AR's loop contains L.
7563 if (AR->getLoop()->contains(L))
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007564 return LoopInvariant;
Dan Gohmanafd6db92010-11-17 21:23:15 +00007565
7566 // This recurrence is variant w.r.t. L if any of its operands
7567 // are variant.
7568 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
7569 I != E; ++I)
7570 if (!isLoopInvariant(*I, L))
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007571 return LoopVariant;
Dan Gohmanafd6db92010-11-17 21:23:15 +00007572
7573 // Otherwise it's loop-invariant.
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007574 return LoopInvariant;
Dan Gohmanafd6db92010-11-17 21:23:15 +00007575 }
7576 case scAddExpr:
7577 case scMulExpr:
7578 case scUMaxExpr:
7579 case scSMaxExpr: {
7580 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
Dan Gohmanafd6db92010-11-17 21:23:15 +00007581 bool HasVarying = false;
7582 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
7583 I != E; ++I) {
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007584 LoopDisposition D = getLoopDisposition(*I, L);
7585 if (D == LoopVariant)
7586 return LoopVariant;
7587 if (D == LoopComputable)
7588 HasVarying = true;
Dan Gohmanafd6db92010-11-17 21:23:15 +00007589 }
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007590 return HasVarying ? LoopComputable : LoopInvariant;
Dan Gohmanafd6db92010-11-17 21:23:15 +00007591 }
7592 case scUDivExpr: {
7593 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007594 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
7595 if (LD == LoopVariant)
7596 return LoopVariant;
7597 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
7598 if (RD == LoopVariant)
7599 return LoopVariant;
7600 return (LD == LoopInvariant && RD == LoopInvariant) ?
7601 LoopInvariant : LoopComputable;
Dan Gohmanafd6db92010-11-17 21:23:15 +00007602 }
7603 case scUnknown:
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007604 // All non-instruction values are loop invariant. All instructions are loop
7605 // invariant if they are not contained in the specified loop.
7606 // Instructions are never considered invariant in the function body
7607 // (null loop) because they are defined within the "loop".
7608 if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
7609 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
7610 return LoopInvariant;
Dan Gohmanafd6db92010-11-17 21:23:15 +00007611 case scCouldNotCompute:
7612 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
Dan Gohmanafd6db92010-11-17 21:23:15 +00007613 }
Benjamin Kramer987b8502014-02-11 19:02:55 +00007614 llvm_unreachable("Unknown SCEV kind!");
Dan Gohman7ee1bbb2010-11-17 23:21:44 +00007615}
7616
7617bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
7618 return getLoopDisposition(S, L) == LoopInvariant;
7619}
7620
7621bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
7622 return getLoopDisposition(S, L) == LoopComputable;
Dan Gohmanafd6db92010-11-17 21:23:15 +00007623}
Dan Gohman20d9ce22010-11-17 21:41:58 +00007624
Dan Gohman8ea83d82010-11-18 00:34:22 +00007625ScalarEvolution::BlockDisposition
7626ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
Wan Xiaofeib2c8cdc2013-11-12 09:40:41 +00007627 SmallVector<std::pair<const BasicBlock *, BlockDisposition>, 2> &Values = BlockDispositions[S];
7628 for (unsigned u = 0; u < Values.size(); u++) {
7629 if (Values[u].first == BB)
7630 return Values[u].second;
7631 }
7632 Values.push_back(std::make_pair(BB, DoesNotDominateBlock));
Dan Gohman8ea83d82010-11-18 00:34:22 +00007633 BlockDisposition D = computeBlockDisposition(S, BB);
Wan Xiaofeib2c8cdc2013-11-12 09:40:41 +00007634 SmallVector<std::pair<const BasicBlock *, BlockDisposition>, 2> &Values2 = BlockDispositions[S];
7635 for (unsigned u = Values2.size(); u > 0; u--) {
7636 if (Values2[u - 1].first == BB) {
7637 Values2[u - 1].second = D;
7638 break;
7639 }
7640 }
7641 return D;
Dan Gohman20d9ce22010-11-17 21:41:58 +00007642}
7643
Dan Gohman8ea83d82010-11-18 00:34:22 +00007644ScalarEvolution::BlockDisposition
7645ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
Benjamin Kramer987b8502014-02-11 19:02:55 +00007646 switch (static_cast<SCEVTypes>(S->getSCEVType())) {
Dan Gohman20d9ce22010-11-17 21:41:58 +00007647 case scConstant:
Dan Gohman8ea83d82010-11-18 00:34:22 +00007648 return ProperlyDominatesBlock;
Dan Gohman20d9ce22010-11-17 21:41:58 +00007649 case scTruncate:
7650 case scZeroExtend:
7651 case scSignExtend:
Dan Gohman8ea83d82010-11-18 00:34:22 +00007652 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
Dan Gohman20d9ce22010-11-17 21:41:58 +00007653 case scAddRecExpr: {
7654 // This uses a "dominates" query instead of "properly dominates" query
Dan Gohman8ea83d82010-11-18 00:34:22 +00007655 // to test for proper dominance too, because the instruction which
7656 // produces the addrec's value is a PHI, and a PHI effectively properly
7657 // dominates its entire containing block.
Dan Gohman20d9ce22010-11-17 21:41:58 +00007658 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
7659 if (!DT->dominates(AR->getLoop()->getHeader(), BB))
Dan Gohman8ea83d82010-11-18 00:34:22 +00007660 return DoesNotDominateBlock;
Dan Gohman20d9ce22010-11-17 21:41:58 +00007661 }
7662 // FALL THROUGH into SCEVNAryExpr handling.
7663 case scAddExpr:
7664 case scMulExpr:
7665 case scUMaxExpr:
7666 case scSMaxExpr: {
7667 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
Dan Gohman8ea83d82010-11-18 00:34:22 +00007668 bool Proper = true;
Dan Gohman20d9ce22010-11-17 21:41:58 +00007669 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
Dan Gohman8ea83d82010-11-18 00:34:22 +00007670 I != E; ++I) {
7671 BlockDisposition D = getBlockDisposition(*I, BB);
7672 if (D == DoesNotDominateBlock)
7673 return DoesNotDominateBlock;
7674 if (D == DominatesBlock)
7675 Proper = false;
7676 }
7677 return Proper ? ProperlyDominatesBlock : DominatesBlock;
Dan Gohman20d9ce22010-11-17 21:41:58 +00007678 }
7679 case scUDivExpr: {
7680 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
Dan Gohman8ea83d82010-11-18 00:34:22 +00007681 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
7682 BlockDisposition LD = getBlockDisposition(LHS, BB);
7683 if (LD == DoesNotDominateBlock)
7684 return DoesNotDominateBlock;
7685 BlockDisposition RD = getBlockDisposition(RHS, BB);
7686 if (RD == DoesNotDominateBlock)
7687 return DoesNotDominateBlock;
7688 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
7689 ProperlyDominatesBlock : DominatesBlock;
Dan Gohman20d9ce22010-11-17 21:41:58 +00007690 }
7691 case scUnknown:
7692 if (Instruction *I =
Dan Gohman8ea83d82010-11-18 00:34:22 +00007693 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
7694 if (I->getParent() == BB)
7695 return DominatesBlock;
7696 if (DT->properlyDominates(I->getParent(), BB))
7697 return ProperlyDominatesBlock;
7698 return DoesNotDominateBlock;
7699 }
7700 return ProperlyDominatesBlock;
Dan Gohman20d9ce22010-11-17 21:41:58 +00007701 case scCouldNotCompute:
7702 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
Dan Gohman20d9ce22010-11-17 21:41:58 +00007703 }
Benjamin Kramer987b8502014-02-11 19:02:55 +00007704 llvm_unreachable("Unknown SCEV kind!");
Dan Gohman8ea83d82010-11-18 00:34:22 +00007705}
7706
7707bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
7708 return getBlockDisposition(S, BB) >= DominatesBlock;
7709}
7710
7711bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
7712 return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
Dan Gohman20d9ce22010-11-17 21:41:58 +00007713}
Dan Gohman534749b2010-11-17 22:27:42 +00007714
Andrew Trick365e31c2012-07-13 23:33:03 +00007715namespace {
7716// Search for a SCEV expression node within an expression tree.
7717// Implements SCEVTraversal::Visitor.
7718struct SCEVSearch {
7719 const SCEV *Node;
7720 bool IsFound;
7721
7722 SCEVSearch(const SCEV *N): Node(N), IsFound(false) {}
7723
7724 bool follow(const SCEV *S) {
7725 IsFound |= (S == Node);
7726 return !IsFound;
7727 }
7728 bool isDone() const { return IsFound; }
7729};
7730}
7731
Dan Gohman534749b2010-11-17 22:27:42 +00007732bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
Andrew Trick365e31c2012-07-13 23:33:03 +00007733 SCEVSearch Search(Op);
7734 visitAll(S, Search);
7735 return Search.IsFound;
Dan Gohman534749b2010-11-17 22:27:42 +00007736}
Dan Gohman7e6b3932010-11-17 23:28:48 +00007737
7738void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
7739 ValuesAtScopes.erase(S);
7740 LoopDispositions.erase(S);
Dan Gohman8ea83d82010-11-18 00:34:22 +00007741 BlockDispositions.erase(S);
Dan Gohman7e6b3932010-11-17 23:28:48 +00007742 UnsignedRanges.erase(S);
7743 SignedRanges.erase(S);
Andrew Trick9093e152013-03-26 03:14:53 +00007744
7745 for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
7746 BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end(); I != E; ) {
7747 BackedgeTakenInfo &BEInfo = I->second;
7748 if (BEInfo.hasOperand(S, this)) {
7749 BEInfo.clear();
7750 BackedgeTakenCounts.erase(I++);
7751 }
7752 else
7753 ++I;
7754 }
Dan Gohman7e6b3932010-11-17 23:28:48 +00007755}
Benjamin Kramer214935e2012-10-26 17:31:32 +00007756
7757typedef DenseMap<const Loop *, std::string> VerifyMap;
Benjamin Kramer24d270d2012-10-27 10:45:01 +00007758
Alp Tokercb402912014-01-24 17:20:08 +00007759/// replaceSubString - Replaces all occurrences of From in Str with To.
Benjamin Kramer24d270d2012-10-27 10:45:01 +00007760static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
7761 size_t Pos = 0;
7762 while ((Pos = Str.find(From, Pos)) != std::string::npos) {
7763 Str.replace(Pos, From.size(), To.data(), To.size());
7764 Pos += To.size();
7765 }
7766}
7767
Benjamin Kramer214935e2012-10-26 17:31:32 +00007768/// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
7769static void
7770getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
7771 for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) {
7772 getLoopBackedgeTakenCounts(*I, Map, SE); // recurse.
7773
7774 std::string &S = Map[L];
7775 if (S.empty()) {
7776 raw_string_ostream OS(S);
7777 SE.getBackedgeTakenCount(L)->print(OS);
Benjamin Kramer24d270d2012-10-27 10:45:01 +00007778
7779 // false and 0 are semantically equivalent. This can happen in dead loops.
7780 replaceSubString(OS.str(), "false", "0");
7781 // Remove wrap flags, their use in SCEV is highly fragile.
7782 // FIXME: Remove this when SCEV gets smarter about them.
7783 replaceSubString(OS.str(), "<nw>", "");
7784 replaceSubString(OS.str(), "<nsw>", "");
7785 replaceSubString(OS.str(), "<nuw>", "");
Benjamin Kramer214935e2012-10-26 17:31:32 +00007786 }
7787 }
7788}
7789
7790void ScalarEvolution::verifyAnalysis() const {
7791 if (!VerifySCEV)
7792 return;
7793
7794 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
7795
7796 // Gather stringified backedge taken counts for all loops using SCEV's caches.
7797 // FIXME: It would be much better to store actual values instead of strings,
7798 // but SCEV pointers will change if we drop the caches.
7799 VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
7800 for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
7801 getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
7802
7803 // Gather stringified backedge taken counts for all loops without using
7804 // SCEV's caches.
7805 SE.releaseMemory();
7806 for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
7807 getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
7808
7809 // Now compare whether they're the same with and without caches. This allows
7810 // verifying that no pass changed the cache.
7811 assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
7812 "New loops suddenly appeared!");
7813
7814 for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
7815 OldE = BackedgeDumpsOld.end(),
7816 NewI = BackedgeDumpsNew.begin();
7817 OldI != OldE; ++OldI, ++NewI) {
7818 assert(OldI->first == NewI->first && "Loop order changed!");
7819
7820 // Compare the stringified SCEVs. We don't care if undef backedgetaken count
7821 // changes.
Benjamin Kramer5bc077a2012-10-27 11:36:07 +00007822 // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
Benjamin Kramer214935e2012-10-26 17:31:32 +00007823 // means that a pass is buggy or SCEV has to learn a new pattern but is
7824 // usually not harmful.
7825 if (OldI->second != NewI->second &&
7826 OldI->second.find("undef") == std::string::npos &&
Benjamin Kramer5bc077a2012-10-27 11:36:07 +00007827 NewI->second.find("undef") == std::string::npos &&
7828 OldI->second != "***COULDNOTCOMPUTE***" &&
Benjamin Kramer214935e2012-10-26 17:31:32 +00007829 NewI->second != "***COULDNOTCOMPUTE***") {
Benjamin Kramer5bc077a2012-10-27 11:36:07 +00007830 dbgs() << "SCEVValidator: SCEV for loop '"
Benjamin Kramer214935e2012-10-26 17:31:32 +00007831 << OldI->first->getHeader()->getName()
Benjamin Kramer5bc077a2012-10-27 11:36:07 +00007832 << "' changed from '" << OldI->second
7833 << "' to '" << NewI->second << "'!\n";
Benjamin Kramer214935e2012-10-26 17:31:32 +00007834 std::abort();
7835 }
7836 }
7837
7838 // TODO: Verify more things.
7839}