Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1 | //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===// |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 2 | // |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | 4ee451d | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 7 | // |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file contains the implementation of the scalar evolution analysis |
| 11 | // engine, which is used primarily to analyze expressions involving induction |
| 12 | // variables in loops. |
| 13 | // |
| 14 | // There are several aspects to this library. First is the representation of |
| 15 | // scalar expressions, which are represented as subclasses of the SCEV class. |
| 16 | // These classes are used to represent certain types of subexpressions that we |
Dan Gohman | bc3d77a | 2009-07-25 16:18:07 +0000 | [diff] [blame] | 17 | // can handle. We only create one SCEV of a particular shape, so |
| 18 | // pointer-comparisons for equality are legal. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 19 | // |
| 20 | // One important aspect of the SCEV objects is that they are never cyclic, even |
| 21 | // if there is a cycle in the dataflow for an expression (ie, a PHI node). If |
| 22 | // the PHI node is one of the idioms that we can represent (e.g., a polynomial |
| 23 | // recurrence) then we represent it directly as a recurrence node, otherwise we |
| 24 | // represent it as a SCEVUnknown node. |
| 25 | // |
| 26 | // In addition to being able to represent expressions of various types, we also |
| 27 | // have folders that are used to build the *canonical* representation for a |
| 28 | // particular expression. These folders are capable of using a variety of |
| 29 | // rewrite rules to simplify the expressions. |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 30 | // |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 31 | // Once the folders are defined, we can implement the more interesting |
| 32 | // higher-level code, such as the code that recognizes PHI nodes of various |
| 33 | // types, computes the execution count of a loop, etc. |
| 34 | // |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 35 | // TODO: We should use these routines and value representations to implement |
| 36 | // dependence analysis! |
| 37 | // |
| 38 | //===----------------------------------------------------------------------===// |
| 39 | // |
| 40 | // There are several good references for the techniques used in this analysis. |
| 41 | // |
| 42 | // Chains of recurrences -- a method to expedite the evaluation |
| 43 | // of closed-form functions |
| 44 | // Olaf Bachmann, Paul S. Wang, Eugene V. Zima |
| 45 | // |
| 46 | // On computational properties of chains of recurrences |
| 47 | // Eugene V. Zima |
| 48 | // |
| 49 | // Symbolic Evaluation of Chains of Recurrences for Loop Optimization |
| 50 | // Robert A. van Engelen |
| 51 | // |
| 52 | // Efficient Symbolic Analysis for Optimizing Compilers |
| 53 | // Robert A. van Engelen |
| 54 | // |
| 55 | // Using the chains of recurrences algebra for data dependence testing and |
| 56 | // induction variable substitution |
| 57 | // MS Thesis, Johnie Birch |
| 58 | // |
| 59 | //===----------------------------------------------------------------------===// |
| 60 | |
Chris Lattner | 3b27d68 | 2006-12-19 22:30:33 +0000 | [diff] [blame] | 61 | #define DEBUG_TYPE "scalar-evolution" |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 62 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 63 | #include "llvm/Constants.h" |
| 64 | #include "llvm/DerivedTypes.h" |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 65 | #include "llvm/GlobalVariable.h" |
Dan Gohman | 2681232 | 2009-08-25 17:49:57 +0000 | [diff] [blame] | 66 | #include "llvm/GlobalAlias.h" |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 67 | #include "llvm/Instructions.h" |
Owen Anderson | 76f600b | 2009-07-06 22:37:39 +0000 | [diff] [blame] | 68 | #include "llvm/LLVMContext.h" |
Dan Gohman | ca17890 | 2009-07-17 20:47:02 +0000 | [diff] [blame] | 69 | #include "llvm/Operator.h" |
John Criswell | a115643 | 2005-10-27 15:54:34 +0000 | [diff] [blame] | 70 | #include "llvm/Analysis/ConstantFolding.h" |
Evan Cheng | 5a6c1a8 | 2009-02-17 00:13:06 +0000 | [diff] [blame] | 71 | #include "llvm/Analysis/Dominators.h" |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 72 | #include "llvm/Analysis/LoopInfo.h" |
Dan Gohman | 61ffa8e | 2009-06-16 19:52:01 +0000 | [diff] [blame] | 73 | #include "llvm/Analysis/ValueTracking.h" |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 74 | #include "llvm/Assembly/Writer.h" |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 75 | #include "llvm/Target/TargetData.h" |
Chris Lattner | 9525528 | 2006-06-28 23:17:24 +0000 | [diff] [blame] | 76 | #include "llvm/Support/CommandLine.h" |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 77 | #include "llvm/Support/ConstantRange.h" |
David Greene | 63c9463 | 2009-12-23 22:58:38 +0000 | [diff] [blame] | 78 | #include "llvm/Support/Debug.h" |
Torok Edwin | c25e758 | 2009-07-11 20:10:48 +0000 | [diff] [blame] | 79 | #include "llvm/Support/ErrorHandling.h" |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 80 | #include "llvm/Support/GetElementPtrTypeIterator.h" |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 81 | #include "llvm/Support/InstIterator.h" |
Chris Lattner | 75de5ab | 2006-12-19 01:16:02 +0000 | [diff] [blame] | 82 | #include "llvm/Support/MathExtras.h" |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 83 | #include "llvm/Support/raw_ostream.h" |
Reid Spencer | 551ccae | 2004-09-01 22:55:40 +0000 | [diff] [blame] | 84 | #include "llvm/ADT/Statistic.h" |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 85 | #include "llvm/ADT/STLExtras.h" |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 86 | #include "llvm/ADT/SmallPtrSet.h" |
Alkis Evlogimenos | 20aa474 | 2004-09-03 18:19:51 +0000 | [diff] [blame] | 87 | #include <algorithm> |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 88 | using namespace llvm; |
| 89 | |
Chris Lattner | 3b27d68 | 2006-12-19 22:30:33 +0000 | [diff] [blame] | 90 | STATISTIC(NumArrayLenItCounts, |
| 91 | "Number of trip counts computed with array length"); |
| 92 | STATISTIC(NumTripCountsComputed, |
| 93 | "Number of loops with predictable loop counts"); |
| 94 | STATISTIC(NumTripCountsNotComputed, |
| 95 | "Number of loops without predictable loop counts"); |
| 96 | STATISTIC(NumBruteForceTripCountsComputed, |
| 97 | "Number of loops with trip counts computed by force"); |
| 98 | |
Dan Gohman | 844731a | 2008-05-13 00:00:25 +0000 | [diff] [blame] | 99 | static cl::opt<unsigned> |
Chris Lattner | 3b27d68 | 2006-12-19 22:30:33 +0000 | [diff] [blame] | 100 | MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, |
| 101 | cl::desc("Maximum number of iterations SCEV will " |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 102 | "symbolically execute a constant " |
| 103 | "derived loop"), |
Chris Lattner | 3b27d68 | 2006-12-19 22:30:33 +0000 | [diff] [blame] | 104 | cl::init(100)); |
| 105 | |
Owen Anderson | d13db2c | 2010-07-21 22:09:45 +0000 | [diff] [blame] | 106 | INITIALIZE_PASS(ScalarEvolution, "scalar-evolution", |
| 107 | "Scalar Evolution Analysis", false, true); |
Devang Patel | 1997473 | 2007-05-03 01:11:54 +0000 | [diff] [blame] | 108 | char ScalarEvolution::ID = 0; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 109 | |
| 110 | //===----------------------------------------------------------------------===// |
| 111 | // SCEV class definitions |
| 112 | //===----------------------------------------------------------------------===// |
| 113 | |
| 114 | //===----------------------------------------------------------------------===// |
| 115 | // Implementation of the SCEV class. |
| 116 | // |
Dan Gohman | c39f44b | 2009-06-30 20:13:32 +0000 | [diff] [blame] | 117 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 118 | SCEV::~SCEV() {} |
Dan Gohman | c39f44b | 2009-06-30 20:13:32 +0000 | [diff] [blame] | 119 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 120 | void SCEV::dump() const { |
David Greene | 25e0e87 | 2009-12-23 22:18:14 +0000 | [diff] [blame] | 121 | print(dbgs()); |
| 122 | dbgs() << '\n'; |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 123 | } |
| 124 | |
Dan Gohman | cfeb6a4 | 2008-06-18 16:23:07 +0000 | [diff] [blame] | 125 | bool SCEV::isZero() const { |
| 126 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) |
| 127 | return SC->getValue()->isZero(); |
| 128 | return false; |
| 129 | } |
| 130 | |
Dan Gohman | 70a1fe7 | 2009-05-18 15:22:39 +0000 | [diff] [blame] | 131 | bool SCEV::isOne() const { |
| 132 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) |
| 133 | return SC->getValue()->isOne(); |
| 134 | return false; |
| 135 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 136 | |
Dan Gohman | 4d289bf | 2009-06-24 00:30:26 +0000 | [diff] [blame] | 137 | bool SCEV::isAllOnesValue() const { |
| 138 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) |
| 139 | return SC->getValue()->isAllOnesValue(); |
| 140 | return false; |
| 141 | } |
| 142 | |
Owen Anderson | 753ad61 | 2009-06-22 21:57:23 +0000 | [diff] [blame] | 143 | SCEVCouldNotCompute::SCEVCouldNotCompute() : |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 144 | SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 145 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 146 | bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const { |
Torok Edwin | c23197a | 2009-07-14 16:55:14 +0000 | [diff] [blame] | 147 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); |
Misha Brukman | bb2aff1 | 2004-04-05 19:00:46 +0000 | [diff] [blame] | 148 | return false; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | const Type *SCEVCouldNotCompute::getType() const { |
Torok Edwin | c23197a | 2009-07-14 16:55:14 +0000 | [diff] [blame] | 152 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); |
Misha Brukman | bb2aff1 | 2004-04-05 19:00:46 +0000 | [diff] [blame] | 153 | return 0; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 154 | } |
| 155 | |
| 156 | bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const { |
Torok Edwin | c23197a | 2009-07-14 16:55:14 +0000 | [diff] [blame] | 157 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 158 | return false; |
| 159 | } |
| 160 | |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 161 | bool SCEVCouldNotCompute::hasOperand(const SCEV *) const { |
| 162 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); |
| 163 | return false; |
Chris Lattner | 4dc534c | 2005-02-13 04:37:18 +0000 | [diff] [blame] | 164 | } |
| 165 | |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 166 | void SCEVCouldNotCompute::print(raw_ostream &OS) const { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 167 | OS << "***COULDNOTCOMPUTE***"; |
| 168 | } |
| 169 | |
| 170 | bool SCEVCouldNotCompute::classof(const SCEV *S) { |
| 171 | return S->getSCEVType() == scCouldNotCompute; |
| 172 | } |
| 173 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 174 | const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 175 | FoldingSetNodeID ID; |
| 176 | ID.AddInteger(scConstant); |
| 177 | ID.AddPointer(V); |
| 178 | void *IP = 0; |
| 179 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 180 | SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 181 | UniqueSCEVs.InsertNode(S, IP); |
| 182 | return S; |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 183 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 184 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 185 | const SCEV *ScalarEvolution::getConstant(const APInt& Val) { |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 186 | return getConstant(ConstantInt::get(getContext(), Val)); |
Dan Gohman | 9a6ae96 | 2007-07-09 15:25:17 +0000 | [diff] [blame] | 187 | } |
| 188 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 189 | const SCEV * |
Dan Gohman | 6de29f8 | 2009-06-15 22:12:54 +0000 | [diff] [blame] | 190 | ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { |
Dan Gohman | a560fd2 | 2010-04-21 16:04:04 +0000 | [diff] [blame] | 191 | const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); |
| 192 | return getConstant(ConstantInt::get(ITy, V, isSigned)); |
Dan Gohman | 6de29f8 | 2009-06-15 22:12:54 +0000 | [diff] [blame] | 193 | } |
| 194 | |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 195 | const Type *SCEVConstant::getType() const { return V->getType(); } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 196 | |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 197 | void SCEVConstant::print(raw_ostream &OS) const { |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 198 | WriteAsOperand(OS, V, false); |
| 199 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 200 | |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 201 | SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, |
Dan Gohman | c050fd9 | 2009-07-13 20:50:19 +0000 | [diff] [blame] | 202 | unsigned SCEVTy, const SCEV *op, const Type *ty) |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 203 | : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 204 | |
Dan Gohman | 8492360 | 2009-04-21 01:25:57 +0000 | [diff] [blame] | 205 | bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { |
| 206 | return Op->dominates(BB, DT); |
| 207 | } |
| 208 | |
Dan Gohman | 6e70e31 | 2009-09-27 15:26:03 +0000 | [diff] [blame] | 209 | bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { |
| 210 | return Op->properlyDominates(BB, DT); |
| 211 | } |
| 212 | |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 213 | SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, |
Dan Gohman | c050fd9 | 2009-07-13 20:50:19 +0000 | [diff] [blame] | 214 | const SCEV *op, const Type *ty) |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 215 | : SCEVCastExpr(ID, scTruncate, op, ty) { |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 216 | assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && |
| 217 | (Ty->isIntegerTy() || Ty->isPointerTy()) && |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 218 | "Cannot truncate non-integer value!"); |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 219 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 220 | |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 221 | void SCEVTruncateExpr::print(raw_ostream &OS) const { |
Dan Gohman | 36b8e53 | 2009-04-29 20:27:52 +0000 | [diff] [blame] | 222 | OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 223 | } |
| 224 | |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 225 | SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, |
Dan Gohman | c050fd9 | 2009-07-13 20:50:19 +0000 | [diff] [blame] | 226 | const SCEV *op, const Type *ty) |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 227 | : SCEVCastExpr(ID, scZeroExtend, op, ty) { |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 228 | assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && |
| 229 | (Ty->isIntegerTy() || Ty->isPointerTy()) && |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 230 | "Cannot zero extend non-integer value!"); |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 231 | } |
| 232 | |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 233 | void SCEVZeroExtendExpr::print(raw_ostream &OS) const { |
Dan Gohman | 36b8e53 | 2009-04-29 20:27:52 +0000 | [diff] [blame] | 234 | OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 235 | } |
| 236 | |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 237 | SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, |
Dan Gohman | c050fd9 | 2009-07-13 20:50:19 +0000 | [diff] [blame] | 238 | const SCEV *op, const Type *ty) |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 239 | : SCEVCastExpr(ID, scSignExtend, op, ty) { |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 240 | assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && |
| 241 | (Ty->isIntegerTy() || Ty->isPointerTy()) && |
Dan Gohman | d19534a | 2007-06-15 14:38:12 +0000 | [diff] [blame] | 242 | "Cannot sign extend non-integer value!"); |
Dan Gohman | d19534a | 2007-06-15 14:38:12 +0000 | [diff] [blame] | 243 | } |
| 244 | |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 245 | void SCEVSignExtendExpr::print(raw_ostream &OS) const { |
Dan Gohman | 36b8e53 | 2009-04-29 20:27:52 +0000 | [diff] [blame] | 246 | OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; |
Dan Gohman | d19534a | 2007-06-15 14:38:12 +0000 | [diff] [blame] | 247 | } |
| 248 | |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 249 | void SCEVCommutativeExpr::print(raw_ostream &OS) const { |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 250 | const char *OpStr = getOperationStr(); |
Dan Gohman | a5145c8 | 2010-04-16 15:03:25 +0000 | [diff] [blame] | 251 | OS << "("; |
| 252 | for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) { |
| 253 | OS << **I; |
Oscar Fuentes | ee56c42 | 2010-08-02 06:00:15 +0000 | [diff] [blame] | 254 | if (llvm::next(I) != E) |
Dan Gohman | a5145c8 | 2010-04-16 15:03:25 +0000 | [diff] [blame] | 255 | OS << OpStr; |
| 256 | } |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 257 | OS << ")"; |
| 258 | } |
| 259 | |
Dan Gohman | ecb403a | 2009-05-07 14:00:19 +0000 | [diff] [blame] | 260 | bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { |
Dan Gohman | bb85409 | 2010-08-16 16:16:11 +0000 | [diff] [blame] | 261 | for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) |
| 262 | if (!(*I)->dominates(BB, DT)) |
Evan Cheng | 5a6c1a8 | 2009-02-17 00:13:06 +0000 | [diff] [blame] | 263 | return false; |
Evan Cheng | 5a6c1a8 | 2009-02-17 00:13:06 +0000 | [diff] [blame] | 264 | return true; |
| 265 | } |
| 266 | |
Dan Gohman | 6e70e31 | 2009-09-27 15:26:03 +0000 | [diff] [blame] | 267 | bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { |
Dan Gohman | bb85409 | 2010-08-16 16:16:11 +0000 | [diff] [blame] | 268 | for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) |
| 269 | if (!(*I)->properlyDominates(BB, DT)) |
Dan Gohman | 6e70e31 | 2009-09-27 15:26:03 +0000 | [diff] [blame] | 270 | return false; |
Dan Gohman | 6e70e31 | 2009-09-27 15:26:03 +0000 | [diff] [blame] | 271 | return true; |
| 272 | } |
| 273 | |
Dan Gohman | 2f199f9 | 2010-08-16 16:21:27 +0000 | [diff] [blame] | 274 | bool SCEVNAryExpr::isLoopInvariant(const Loop *L) const { |
| 275 | for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) |
| 276 | if (!(*I)->isLoopInvariant(L)) |
| 277 | return false; |
| 278 | return true; |
| 279 | } |
| 280 | |
| 281 | // hasComputableLoopEvolution - N-ary expressions have computable loop |
| 282 | // evolutions iff they have at least one operand that varies with the loop, |
| 283 | // but that all varying operands are computable. |
| 284 | bool SCEVNAryExpr::hasComputableLoopEvolution(const Loop *L) const { |
| 285 | bool HasVarying = false; |
| 286 | for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) { |
| 287 | const SCEV *S = *I; |
| 288 | if (!S->isLoopInvariant(L)) { |
| 289 | if (S->hasComputableLoopEvolution(L)) |
| 290 | HasVarying = true; |
| 291 | else |
| 292 | return false; |
| 293 | } |
| 294 | } |
| 295 | return HasVarying; |
| 296 | } |
| 297 | |
| 298 | bool SCEVNAryExpr::hasOperand(const SCEV *O) const { |
| 299 | for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) { |
| 300 | const SCEV *S = *I; |
| 301 | if (O == S || S->hasOperand(O)) |
| 302 | return true; |
| 303 | } |
| 304 | return false; |
| 305 | } |
| 306 | |
Evan Cheng | 5a6c1a8 | 2009-02-17 00:13:06 +0000 | [diff] [blame] | 307 | bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { |
| 308 | return LHS->dominates(BB, DT) && RHS->dominates(BB, DT); |
| 309 | } |
| 310 | |
Dan Gohman | 6e70e31 | 2009-09-27 15:26:03 +0000 | [diff] [blame] | 311 | bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { |
| 312 | return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT); |
| 313 | } |
| 314 | |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 315 | void SCEVUDivExpr::print(raw_ostream &OS) const { |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 316 | OS << "(" << *LHS << " /u " << *RHS << ")"; |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 317 | } |
| 318 | |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 319 | const Type *SCEVUDivExpr::getType() const { |
Dan Gohman | 91bb61a | 2009-05-26 17:44:05 +0000 | [diff] [blame] | 320 | // In most cases the types of LHS and RHS will be the same, but in some |
| 321 | // crazy cases one or the other may be a pointer. ScalarEvolution doesn't |
| 322 | // depend on the type for correctness, but handling types carefully can |
| 323 | // avoid extra casts in the SCEVExpander. The LHS is more likely to be |
| 324 | // a pointer type than the RHS, so use the RHS' type here. |
| 325 | return RHS->getType(); |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 326 | } |
| 327 | |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 328 | bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const { |
Dan Gohman | a3035a6 | 2009-05-20 01:01:24 +0000 | [diff] [blame] | 329 | // Add recurrences are never invariant in the function-body (null loop). |
Dan Gohman | e890eea | 2009-06-26 22:17:21 +0000 | [diff] [blame] | 330 | if (!QueryLoop) |
| 331 | return false; |
| 332 | |
| 333 | // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L. |
Dan Gohman | 92329c7 | 2009-12-18 01:24:09 +0000 | [diff] [blame] | 334 | if (QueryLoop->contains(L)) |
Dan Gohman | e890eea | 2009-06-26 22:17:21 +0000 | [diff] [blame] | 335 | return false; |
| 336 | |
Dan Gohman | 71c4144 | 2010-08-13 20:11:39 +0000 | [diff] [blame] | 337 | // This recurrence is invariant w.r.t. QueryLoop if L contains QueryLoop. |
| 338 | if (L->contains(QueryLoop)) |
| 339 | return true; |
| 340 | |
Dan Gohman | e890eea | 2009-06-26 22:17:21 +0000 | [diff] [blame] | 341 | // This recurrence is variant w.r.t. QueryLoop if any of its operands |
| 342 | // are variant. |
| 343 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) |
| 344 | if (!getOperand(i)->isLoopInvariant(QueryLoop)) |
| 345 | return false; |
| 346 | |
| 347 | // Otherwise it's loop-invariant. |
| 348 | return true; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 349 | } |
| 350 | |
Dan Gohman | 39125d8 | 2010-02-13 00:19:39 +0000 | [diff] [blame] | 351 | bool |
| 352 | SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { |
| 353 | return DT->dominates(L->getHeader(), BB) && |
| 354 | SCEVNAryExpr::dominates(BB, DT); |
| 355 | } |
| 356 | |
| 357 | bool |
| 358 | SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { |
| 359 | // This uses a "dominates" query instead of "properly dominates" query because |
| 360 | // the instruction which produces the addrec's value is a PHI, and a PHI |
| 361 | // effectively properly dominates its entire containing block. |
| 362 | return DT->dominates(L->getHeader(), BB) && |
| 363 | SCEVNAryExpr::properlyDominates(BB, DT); |
| 364 | } |
| 365 | |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 366 | void SCEVAddRecExpr::print(raw_ostream &OS) const { |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 367 | OS << "{" << *Operands[0]; |
Dan Gohman | f9e6472 | 2010-03-18 01:17:13 +0000 | [diff] [blame] | 368 | for (unsigned i = 1, e = NumOperands; i != e; ++i) |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 369 | OS << ",+," << *Operands[i]; |
Dan Gohman | 3073329 | 2010-01-09 18:17:45 +0000 | [diff] [blame] | 370 | OS << "}<"; |
| 371 | WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); |
| 372 | OS << ">"; |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 373 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 374 | |
Dan Gohman | ab37f50 | 2010-08-02 23:49:30 +0000 | [diff] [blame] | 375 | void SCEVUnknown::deleted() { |
| 376 | // Clear this SCEVUnknown from ValuesAtScopes. |
| 377 | SE->ValuesAtScopes.erase(this); |
| 378 | |
| 379 | // Remove this SCEVUnknown from the uniquing map. |
| 380 | SE->UniqueSCEVs.RemoveNode(this); |
| 381 | |
| 382 | // Release the value. |
| 383 | setValPtr(0); |
| 384 | } |
| 385 | |
| 386 | void SCEVUnknown::allUsesReplacedWith(Value *New) { |
| 387 | // Clear this SCEVUnknown from ValuesAtScopes. |
| 388 | SE->ValuesAtScopes.erase(this); |
| 389 | |
| 390 | // Remove this SCEVUnknown from the uniquing map. |
| 391 | SE->UniqueSCEVs.RemoveNode(this); |
| 392 | |
| 393 | // Update this SCEVUnknown to point to the new value. This is needed |
| 394 | // because there may still be outstanding SCEVs which still point to |
| 395 | // this SCEVUnknown. |
| 396 | setValPtr(New); |
| 397 | } |
| 398 | |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 399 | bool SCEVUnknown::isLoopInvariant(const Loop *L) const { |
| 400 | // All non-instruction values are loop invariant. All instructions are loop |
| 401 | // invariant if they are not contained in the specified loop. |
Dan Gohman | a3035a6 | 2009-05-20 01:01:24 +0000 | [diff] [blame] | 402 | // Instructions are never considered invariant in the function body |
| 403 | // (null loop) because they are defined within the "loop". |
Dan Gohman | ab37f50 | 2010-08-02 23:49:30 +0000 | [diff] [blame] | 404 | if (Instruction *I = dyn_cast<Instruction>(getValue())) |
Dan Gohman | 92329c7 | 2009-12-18 01:24:09 +0000 | [diff] [blame] | 405 | return L && !L->contains(I); |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 406 | return true; |
| 407 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 408 | |
Evan Cheng | 5a6c1a8 | 2009-02-17 00:13:06 +0000 | [diff] [blame] | 409 | bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const { |
| 410 | if (Instruction *I = dyn_cast<Instruction>(getValue())) |
| 411 | return DT->dominates(I->getParent(), BB); |
| 412 | return true; |
| 413 | } |
| 414 | |
Dan Gohman | 6e70e31 | 2009-09-27 15:26:03 +0000 | [diff] [blame] | 415 | bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { |
| 416 | if (Instruction *I = dyn_cast<Instruction>(getValue())) |
| 417 | return DT->properlyDominates(I->getParent(), BB); |
| 418 | return true; |
| 419 | } |
| 420 | |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 421 | const Type *SCEVUnknown::getType() const { |
Dan Gohman | ab37f50 | 2010-08-02 23:49:30 +0000 | [diff] [blame] | 422 | return getValue()->getType(); |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 423 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 424 | |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 425 | bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const { |
Dan Gohman | ab37f50 | 2010-08-02 23:49:30 +0000 | [diff] [blame] | 426 | if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 427 | if (VCE->getOpcode() == Instruction::PtrToInt) |
| 428 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) |
Dan Gohman | 8db08df | 2010-02-02 01:38:49 +0000 | [diff] [blame] | 429 | if (CE->getOpcode() == Instruction::GetElementPtr && |
| 430 | CE->getOperand(0)->isNullValue() && |
| 431 | CE->getNumOperands() == 2) |
| 432 | if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) |
| 433 | if (CI->isOne()) { |
| 434 | AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) |
| 435 | ->getElementType(); |
| 436 | return true; |
| 437 | } |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 438 | |
| 439 | return false; |
| 440 | } |
| 441 | |
| 442 | bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const { |
Dan Gohman | ab37f50 | 2010-08-02 23:49:30 +0000 | [diff] [blame] | 443 | if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 444 | if (VCE->getOpcode() == Instruction::PtrToInt) |
| 445 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) |
Dan Gohman | 8db08df | 2010-02-02 01:38:49 +0000 | [diff] [blame] | 446 | if (CE->getOpcode() == Instruction::GetElementPtr && |
| 447 | CE->getOperand(0)->isNullValue()) { |
| 448 | const Type *Ty = |
| 449 | cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); |
| 450 | if (const StructType *STy = dyn_cast<StructType>(Ty)) |
| 451 | if (!STy->isPacked() && |
| 452 | CE->getNumOperands() == 3 && |
| 453 | CE->getOperand(1)->isNullValue()) { |
| 454 | if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) |
| 455 | if (CI->isOne() && |
| 456 | STy->getNumElements() == 2 && |
Duncan Sands | b0bc6c3 | 2010-02-15 16:12:20 +0000 | [diff] [blame] | 457 | STy->getElementType(0)->isIntegerTy(1)) { |
Dan Gohman | 8db08df | 2010-02-02 01:38:49 +0000 | [diff] [blame] | 458 | AllocTy = STy->getElementType(1); |
| 459 | return true; |
| 460 | } |
| 461 | } |
| 462 | } |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 463 | |
| 464 | return false; |
| 465 | } |
| 466 | |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 467 | bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const { |
Dan Gohman | ab37f50 | 2010-08-02 23:49:30 +0000 | [diff] [blame] | 468 | if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 469 | if (VCE->getOpcode() == Instruction::PtrToInt) |
| 470 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) |
| 471 | if (CE->getOpcode() == Instruction::GetElementPtr && |
| 472 | CE->getNumOperands() == 3 && |
| 473 | CE->getOperand(0)->isNullValue() && |
| 474 | CE->getOperand(1)->isNullValue()) { |
| 475 | const Type *Ty = |
| 476 | cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); |
| 477 | // Ignore vector types here so that ScalarEvolutionExpander doesn't |
| 478 | // emit getelementptrs that index into vectors. |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 479 | if (Ty->isStructTy() || Ty->isArrayTy()) { |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 480 | CTy = Ty; |
| 481 | FieldNo = CE->getOperand(2); |
| 482 | return true; |
| 483 | } |
| 484 | } |
| 485 | |
| 486 | return false; |
| 487 | } |
| 488 | |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 489 | void SCEVUnknown::print(raw_ostream &OS) const { |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 490 | const Type *AllocTy; |
| 491 | if (isSizeOf(AllocTy)) { |
| 492 | OS << "sizeof(" << *AllocTy << ")"; |
| 493 | return; |
| 494 | } |
| 495 | if (isAlignOf(AllocTy)) { |
| 496 | OS << "alignof(" << *AllocTy << ")"; |
| 497 | return; |
| 498 | } |
| 499 | |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 500 | const Type *CTy; |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 501 | Constant *FieldNo; |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 502 | if (isOffsetOf(CTy, FieldNo)) { |
| 503 | OS << "offsetof(" << *CTy << ", "; |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 504 | WriteAsOperand(OS, FieldNo, false); |
| 505 | OS << ")"; |
| 506 | return; |
| 507 | } |
| 508 | |
| 509 | // Otherwise just print it normally. |
Dan Gohman | ab37f50 | 2010-08-02 23:49:30 +0000 | [diff] [blame] | 510 | WriteAsOperand(OS, getValue(), false); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 511 | } |
| 512 | |
Chris Lattner | 8d741b8 | 2004-06-20 06:23:15 +0000 | [diff] [blame] | 513 | //===----------------------------------------------------------------------===// |
| 514 | // SCEV Utilities |
| 515 | //===----------------------------------------------------------------------===// |
| 516 | |
| 517 | namespace { |
| 518 | /// SCEVComplexityCompare - Return true if the complexity of the LHS is less |
| 519 | /// than the complexity of the RHS. This comparator is used to canonicalize |
| 520 | /// expressions. |
Nick Lewycky | 6726b6d | 2009-10-25 06:33:48 +0000 | [diff] [blame] | 521 | class SCEVComplexityCompare { |
Dan Gohman | 9f1fb42 | 2010-08-13 20:17:27 +0000 | [diff] [blame] | 522 | const LoopInfo *const LI; |
Dan Gohman | 7286130 | 2009-05-07 14:39:04 +0000 | [diff] [blame] | 523 | public: |
Dan Gohman | e72079a | 2010-07-23 21:18:55 +0000 | [diff] [blame] | 524 | explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {} |
Dan Gohman | 7286130 | 2009-05-07 14:39:04 +0000 | [diff] [blame] | 525 | |
Dan Gohman | f7b37b2 | 2008-04-14 18:23:56 +0000 | [diff] [blame] | 526 | bool operator()(const SCEV *LHS, const SCEV *RHS) const { |
Dan Gohman | 4221489 | 2009-08-31 21:15:23 +0000 | [diff] [blame] | 527 | // Fast-path: SCEVs are uniqued so we can do a quick equality check. |
| 528 | if (LHS == RHS) |
| 529 | return false; |
| 530 | |
Dan Gohman | 7286130 | 2009-05-07 14:39:04 +0000 | [diff] [blame] | 531 | // Primarily, sort the SCEVs by their getSCEVType(). |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 532 | unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); |
| 533 | if (LType != RType) |
| 534 | return LType < RType; |
Dan Gohman | 7286130 | 2009-05-07 14:39:04 +0000 | [diff] [blame] | 535 | |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 536 | // Aside from the getSCEVType() ordering, the particular ordering |
| 537 | // isn't very important except that it's beneficial to be consistent, |
| 538 | // so that (a + b) and (b + a) don't end up as different expressions. |
| 539 | |
| 540 | // Sort SCEVUnknown values with some loose heuristics. TODO: This is |
| 541 | // not as complete as it could be. |
| 542 | if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) { |
| 543 | const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); |
Dan Gohman | 0ad2c7a | 2010-08-13 21:24:58 +0000 | [diff] [blame] | 544 | const Value *LV = LU->getValue(), *RV = RU->getValue(); |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 545 | |
| 546 | // Order pointer values after integer values. This helps SCEVExpander |
| 547 | // form GEPs. |
Dan Gohman | 0ad2c7a | 2010-08-13 21:24:58 +0000 | [diff] [blame] | 548 | bool LIsPointer = LV->getType()->isPointerTy(), |
| 549 | RIsPointer = RV->getType()->isPointerTy(); |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 550 | if (LIsPointer != RIsPointer) |
| 551 | return RIsPointer; |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 552 | |
| 553 | // Compare getValueID values. |
Dan Gohman | 0ad2c7a | 2010-08-13 21:24:58 +0000 | [diff] [blame] | 554 | unsigned LID = LV->getValueID(), |
| 555 | RID = RV->getValueID(); |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 556 | if (LID != RID) |
| 557 | return LID < RID; |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 558 | |
| 559 | // Sort arguments by their position. |
Dan Gohman | 0ad2c7a | 2010-08-13 21:24:58 +0000 | [diff] [blame] | 560 | if (const Argument *LA = dyn_cast<Argument>(LV)) { |
| 561 | const Argument *RA = cast<Argument>(RV); |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 562 | return LA->getArgNo() < RA->getArgNo(); |
| 563 | } |
| 564 | |
| 565 | // For instructions, compare their loop depth, and their opcode. |
| 566 | // This is pretty loose. |
Dan Gohman | 0ad2c7a | 2010-08-13 21:24:58 +0000 | [diff] [blame] | 567 | if (const Instruction *LInst = dyn_cast<Instruction>(LV)) { |
| 568 | const Instruction *RInst = cast<Instruction>(RV); |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 569 | |
| 570 | // Compare loop depths. |
Dan Gohman | 0ad2c7a | 2010-08-13 21:24:58 +0000 | [diff] [blame] | 571 | const BasicBlock *LParent = LInst->getParent(), |
| 572 | *RParent = RInst->getParent(); |
| 573 | if (LParent != RParent) { |
| 574 | unsigned LDepth = LI->getLoopDepth(LParent), |
| 575 | RDepth = LI->getLoopDepth(RParent); |
| 576 | if (LDepth != RDepth) |
| 577 | return LDepth < RDepth; |
| 578 | } |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 579 | |
| 580 | // Compare the number of operands. |
Dan Gohman | 0ad2c7a | 2010-08-13 21:24:58 +0000 | [diff] [blame] | 581 | unsigned LNumOps = LInst->getNumOperands(), |
| 582 | RNumOps = RInst->getNumOperands(); |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 583 | if (LNumOps != RNumOps) |
| 584 | return LNumOps < RNumOps; |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 585 | } |
| 586 | |
| 587 | return false; |
| 588 | } |
| 589 | |
| 590 | // Compare constant values. |
| 591 | if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) { |
| 592 | const SCEVConstant *RC = cast<SCEVConstant>(RHS); |
Dan Gohman | e28d792 | 2010-08-16 16:25:35 +0000 | [diff] [blame^] | 593 | const APInt &LA = LC->getValue()->getValue(); |
| 594 | const APInt &RA = RC->getValue()->getValue(); |
| 595 | unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 596 | if (LBitWidth != RBitWidth) |
| 597 | return LBitWidth < RBitWidth; |
Dan Gohman | e28d792 | 2010-08-16 16:25:35 +0000 | [diff] [blame^] | 598 | return LA.ult(RA); |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 599 | } |
| 600 | |
| 601 | // Compare addrec loop depths. |
| 602 | if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) { |
| 603 | const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); |
Dan Gohman | 0ad2c7a | 2010-08-13 21:24:58 +0000 | [diff] [blame] | 604 | const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); |
| 605 | if (LLoop != RLoop) { |
| 606 | unsigned LDepth = LLoop->getLoopDepth(), |
| 607 | RDepth = RLoop->getLoopDepth(); |
| 608 | if (LDepth != RDepth) |
| 609 | return LDepth < RDepth; |
| 610 | } |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 611 | } |
| 612 | |
| 613 | // Lexicographically compare n-ary expressions. |
| 614 | if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) { |
| 615 | const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 616 | unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); |
| 617 | for (unsigned i = 0; i != LNumOps; ++i) { |
| 618 | if (i >= RNumOps) |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 619 | return false; |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 620 | const SCEV *LOp = LC->getOperand(i), *ROp = RC->getOperand(i); |
| 621 | if (operator()(LOp, ROp)) |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 622 | return true; |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 623 | if (operator()(ROp, LOp)) |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 624 | return false; |
| 625 | } |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 626 | return LNumOps < RNumOps; |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 627 | } |
| 628 | |
| 629 | // Lexicographically compare udiv expressions. |
| 630 | if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) { |
| 631 | const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 632 | const SCEV *LL = LC->getLHS(), *LR = LC->getRHS(), |
| 633 | *RL = RC->getLHS(), *RR = RC->getRHS(); |
| 634 | if (operator()(LL, RL)) |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 635 | return true; |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 636 | if (operator()(RL, LL)) |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 637 | return false; |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 638 | if (operator()(LR, RR)) |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 639 | return true; |
Dan Gohman | 304a7a6 | 2010-07-23 21:20:52 +0000 | [diff] [blame] | 640 | if (operator()(RR, LR)) |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 641 | return false; |
| 642 | return false; |
| 643 | } |
| 644 | |
| 645 | // Compare cast expressions by operand. |
| 646 | if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) { |
| 647 | const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); |
| 648 | return operator()(LC->getOperand(), RC->getOperand()); |
| 649 | } |
| 650 | |
| 651 | llvm_unreachable("Unknown SCEV kind!"); |
| 652 | return false; |
Chris Lattner | 8d741b8 | 2004-06-20 06:23:15 +0000 | [diff] [blame] | 653 | } |
| 654 | }; |
| 655 | } |
| 656 | |
| 657 | /// GroupByComplexity - Given a list of SCEV objects, order them by their |
| 658 | /// complexity, and group objects of the same complexity together by value. |
| 659 | /// When this routine is finished, we know that any duplicates in the vector are |
| 660 | /// consecutive and that complexity is monotonically increasing. |
| 661 | /// |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 662 | /// Note that we go take special precautions to ensure that we get deterministic |
Chris Lattner | 8d741b8 | 2004-06-20 06:23:15 +0000 | [diff] [blame] | 663 | /// results from this routine. In other words, we don't want the results of |
| 664 | /// this to depend on where the addresses of various SCEV objects happened to |
| 665 | /// land in memory. |
| 666 | /// |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 667 | static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, |
Dan Gohman | 7286130 | 2009-05-07 14:39:04 +0000 | [diff] [blame] | 668 | LoopInfo *LI) { |
Chris Lattner | 8d741b8 | 2004-06-20 06:23:15 +0000 | [diff] [blame] | 669 | if (Ops.size() < 2) return; // Noop |
| 670 | if (Ops.size() == 2) { |
| 671 | // This is the common case, which also happens to be trivially simple. |
| 672 | // Special case it. |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 673 | if (SCEVComplexityCompare(LI)(Ops[1], Ops[0])) |
Chris Lattner | 8d741b8 | 2004-06-20 06:23:15 +0000 | [diff] [blame] | 674 | std::swap(Ops[0], Ops[1]); |
| 675 | return; |
| 676 | } |
| 677 | |
Dan Gohman | 3bf6376 | 2010-06-18 19:54:20 +0000 | [diff] [blame] | 678 | // Do the rough sort by complexity. |
| 679 | std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); |
| 680 | |
| 681 | // Now that we are sorted by complexity, group elements of the same |
| 682 | // complexity. Note that this is, at worst, N^2, but the vector is likely to |
| 683 | // be extremely short in practice. Note that we take this approach because we |
| 684 | // do not want to depend on the addresses of the objects we are grouping. |
| 685 | for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { |
| 686 | const SCEV *S = Ops[i]; |
| 687 | unsigned Complexity = S->getSCEVType(); |
| 688 | |
| 689 | // If there are any objects of the same complexity and same value as this |
| 690 | // one, group them. |
| 691 | for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { |
| 692 | if (Ops[j] == S) { // Found a duplicate. |
| 693 | // Move it to immediately after i'th element. |
| 694 | std::swap(Ops[i+1], Ops[j]); |
| 695 | ++i; // no need to rescan it. |
| 696 | if (i == e-2) return; // Done! |
| 697 | } |
| 698 | } |
| 699 | } |
Chris Lattner | 8d741b8 | 2004-06-20 06:23:15 +0000 | [diff] [blame] | 700 | } |
| 701 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 702 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 703 | |
| 704 | //===----------------------------------------------------------------------===// |
| 705 | // Simple SCEV method implementations |
| 706 | //===----------------------------------------------------------------------===// |
| 707 | |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 708 | /// BinomialCoefficient - Compute BC(It, K). The result has width W. |
Dan Gohman | 6c0866c | 2009-05-24 23:45:28 +0000 | [diff] [blame] | 709 | /// Assume, K > 0. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 710 | static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, |
Dan Gohman | c2b015e | 2009-07-21 00:38:55 +0000 | [diff] [blame] | 711 | ScalarEvolution &SE, |
| 712 | const Type* ResultTy) { |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 713 | // Handle the simplest case efficiently. |
| 714 | if (K == 1) |
| 715 | return SE.getTruncateOrZeroExtend(It, ResultTy); |
| 716 | |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 717 | // We are using the following formula for BC(It, K): |
| 718 | // |
| 719 | // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! |
| 720 | // |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 721 | // Suppose, W is the bitwidth of the return value. We must be prepared for |
| 722 | // overflow. Hence, we must assure that the result of our computation is |
| 723 | // equal to the accurate one modulo 2^W. Unfortunately, division isn't |
| 724 | // safe in modular arithmetic. |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 725 | // |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 726 | // However, this code doesn't use exactly that formula; the formula it uses |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 727 | // is something like the following, where T is the number of factors of 2 in |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 728 | // K! (i.e. trailing zeros in the binary representation of K!), and ^ is |
| 729 | // exponentiation: |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 730 | // |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 731 | // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 732 | // |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 733 | // This formula is trivially equivalent to the previous formula. However, |
| 734 | // this formula can be implemented much more efficiently. The trick is that |
| 735 | // K! / 2^T is odd, and exact division by an odd number *is* safe in modular |
| 736 | // arithmetic. To do exact division in modular arithmetic, all we have |
| 737 | // to do is multiply by the inverse. Therefore, this step can be done at |
| 738 | // width W. |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 739 | // |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 740 | // The next issue is how to safely do the division by 2^T. The way this |
| 741 | // is done is by doing the multiplication step at a width of at least W + T |
| 742 | // bits. This way, the bottom W+T bits of the product are accurate. Then, |
| 743 | // when we perform the division by 2^T (which is equivalent to a right shift |
| 744 | // by T), the bottom W bits are accurate. Extra bits are okay; they'll get |
| 745 | // truncated out after the division by 2^T. |
| 746 | // |
| 747 | // In comparison to just directly using the first formula, this technique |
| 748 | // is much more efficient; using the first formula requires W * K bits, |
| 749 | // but this formula less than W + K bits. Also, the first formula requires |
| 750 | // a division step, whereas this formula only requires multiplies and shifts. |
| 751 | // |
| 752 | // It doesn't matter whether the subtraction step is done in the calculation |
| 753 | // width or the input iteration count's width; if the subtraction overflows, |
| 754 | // the result must be zero anyway. We prefer here to do it in the width of |
| 755 | // the induction variable because it helps a lot for certain cases; CodeGen |
| 756 | // isn't smart enough to ignore the overflow, which leads to much less |
| 757 | // efficient code if the width of the subtraction is wider than the native |
| 758 | // register width. |
| 759 | // |
| 760 | // (It's possible to not widen at all by pulling out factors of 2 before |
| 761 | // the multiplication; for example, K=2 can be calculated as |
| 762 | // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires |
| 763 | // extra arithmetic, so it's not an obvious win, and it gets |
| 764 | // much more complicated for K > 3.) |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 765 | |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 766 | // Protection from insane SCEVs; this bound is conservative, |
| 767 | // but it probably doesn't matter. |
| 768 | if (K > 1000) |
Dan Gohman | f4ccfcb | 2009-04-18 17:58:19 +0000 | [diff] [blame] | 769 | return SE.getCouldNotCompute(); |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 770 | |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 771 | unsigned W = SE.getTypeSizeInBits(ResultTy); |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 772 | |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 773 | // Calculate K! / 2^T and T; we divide out the factors of two before |
| 774 | // multiplying for calculating K! / 2^T to avoid overflow. |
| 775 | // Other overflow doesn't matter because we only care about the bottom |
| 776 | // W bits of the result. |
| 777 | APInt OddFactorial(W, 1); |
| 778 | unsigned T = 1; |
| 779 | for (unsigned i = 3; i <= K; ++i) { |
| 780 | APInt Mult(W, i); |
| 781 | unsigned TwoFactors = Mult.countTrailingZeros(); |
| 782 | T += TwoFactors; |
| 783 | Mult = Mult.lshr(TwoFactors); |
| 784 | OddFactorial *= Mult; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 785 | } |
Nick Lewycky | 6f8abf9 | 2008-06-13 04:38:55 +0000 | [diff] [blame] | 786 | |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 787 | // We need at least W + T bits for the multiplication step |
Nick Lewycky | 237d873 | 2009-01-25 08:16:27 +0000 | [diff] [blame] | 788 | unsigned CalculationBits = W + T; |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 789 | |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 790 | // Calculate 2^T, at width T+W. |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 791 | APInt DivFactor = APInt(CalculationBits, 1).shl(T); |
| 792 | |
| 793 | // Calculate the multiplicative inverse of K! / 2^T; |
| 794 | // this multiplication factor will perform the exact division by |
| 795 | // K! / 2^T. |
| 796 | APInt Mod = APInt::getSignedMinValue(W+1); |
| 797 | APInt MultiplyFactor = OddFactorial.zext(W+1); |
| 798 | MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); |
| 799 | MultiplyFactor = MultiplyFactor.trunc(W); |
| 800 | |
| 801 | // Calculate the product, at width T+W |
Owen Anderson | 1d0be15 | 2009-08-13 21:58:54 +0000 | [diff] [blame] | 802 | const IntegerType *CalculationTy = IntegerType::get(SE.getContext(), |
| 803 | CalculationBits); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 804 | const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 805 | for (unsigned i = 1; i != K; ++i) { |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 806 | const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 807 | Dividend = SE.getMulExpr(Dividend, |
| 808 | SE.getTruncateOrZeroExtend(S, CalculationTy)); |
| 809 | } |
| 810 | |
| 811 | // Divide by 2^T |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 812 | const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 813 | |
| 814 | // Truncate the result, and divide by K! / 2^T. |
| 815 | |
| 816 | return SE.getMulExpr(SE.getConstant(MultiplyFactor), |
| 817 | SE.getTruncateOrZeroExtend(DivResult, ResultTy)); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 818 | } |
| 819 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 820 | /// evaluateAtIteration - Return the value of this chain of recurrences at |
| 821 | /// the specified iteration number. We can evaluate this recurrence by |
| 822 | /// multiplying each element in the chain by the binomial coefficient |
| 823 | /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as: |
| 824 | /// |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 825 | /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 826 | /// |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 827 | /// where BC(It, k) stands for binomial coefficient. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 828 | /// |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 829 | const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, |
Dan Gohman | c2b015e | 2009-07-21 00:38:55 +0000 | [diff] [blame] | 830 | ScalarEvolution &SE) const { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 831 | const SCEV *Result = getStart(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 832 | for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { |
Wojciech Matyjewicz | e3320a1 | 2008-02-11 11:03:14 +0000 | [diff] [blame] | 833 | // The computation is correct in the face of overflow provided that the |
| 834 | // multiplication is performed _after_ the evaluation of the binomial |
| 835 | // coefficient. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 836 | const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); |
Nick Lewycky | cb8f1b5 | 2008-10-13 03:58:02 +0000 | [diff] [blame] | 837 | if (isa<SCEVCouldNotCompute>(Coeff)) |
| 838 | return Coeff; |
| 839 | |
| 840 | Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 841 | } |
| 842 | return Result; |
| 843 | } |
| 844 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 845 | //===----------------------------------------------------------------------===// |
| 846 | // SCEV Expression folder implementations |
| 847 | //===----------------------------------------------------------------------===// |
| 848 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 849 | const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, |
Dan Gohman | f5074ec | 2009-07-13 22:05:32 +0000 | [diff] [blame] | 850 | const Type *Ty) { |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 851 | assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && |
Dan Gohman | fb17fd2 | 2009-04-21 00:55:22 +0000 | [diff] [blame] | 852 | "This is not a truncating conversion!"); |
Dan Gohman | 10b9479 | 2009-05-01 16:44:18 +0000 | [diff] [blame] | 853 | assert(isSCEVable(Ty) && |
| 854 | "This is not a conversion to a SCEVable type!"); |
| 855 | Ty = getEffectiveSCEVType(Ty); |
Dan Gohman | fb17fd2 | 2009-04-21 00:55:22 +0000 | [diff] [blame] | 856 | |
Dan Gohman | c050fd9 | 2009-07-13 20:50:19 +0000 | [diff] [blame] | 857 | FoldingSetNodeID ID; |
| 858 | ID.AddInteger(scTruncate); |
| 859 | ID.AddPointer(Op); |
| 860 | ID.AddPointer(Ty); |
| 861 | void *IP = 0; |
| 862 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
| 863 | |
Dan Gohman | c39f44b | 2009-06-30 20:13:32 +0000 | [diff] [blame] | 864 | // Fold if the operand is constant. |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 865 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) |
Dan Gohman | b8be8b7 | 2009-06-24 00:38:39 +0000 | [diff] [blame] | 866 | return getConstant( |
Dan Gohman | 1faa882 | 2010-06-24 16:33:38 +0000 | [diff] [blame] | 867 | cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), |
| 868 | getEffectiveSCEVType(Ty)))); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 869 | |
Dan Gohman | 20900ca | 2009-04-22 16:20:48 +0000 | [diff] [blame] | 870 | // trunc(trunc(x)) --> trunc(x) |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 871 | if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) |
Dan Gohman | 20900ca | 2009-04-22 16:20:48 +0000 | [diff] [blame] | 872 | return getTruncateExpr(ST->getOperand(), Ty); |
| 873 | |
Nick Lewycky | 5cd28fa | 2009-04-23 05:15:08 +0000 | [diff] [blame] | 874 | // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 875 | if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) |
Nick Lewycky | 5cd28fa | 2009-04-23 05:15:08 +0000 | [diff] [blame] | 876 | return getTruncateOrSignExtend(SS->getOperand(), Ty); |
| 877 | |
| 878 | // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 879 | if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) |
Nick Lewycky | 5cd28fa | 2009-04-23 05:15:08 +0000 | [diff] [blame] | 880 | return getTruncateOrZeroExtend(SZ->getOperand(), Ty); |
| 881 | |
Dan Gohman | 6864db6 | 2009-06-18 16:24:47 +0000 | [diff] [blame] | 882 | // If the input value is a chrec scev, truncate the chrec's operands. |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 883 | if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 884 | SmallVector<const SCEV *, 4> Operands; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 885 | for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) |
Dan Gohman | 728c7f3 | 2009-05-08 21:03:19 +0000 | [diff] [blame] | 886 | Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); |
| 887 | return getAddRecExpr(Operands, AddRec->getLoop()); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 888 | } |
| 889 | |
Dan Gohman | f53462d | 2010-07-15 20:02:11 +0000 | [diff] [blame] | 890 | // As a special case, fold trunc(undef) to undef. We don't want to |
| 891 | // know too much about SCEVUnknowns, but this special case is handy |
| 892 | // and harmless. |
| 893 | if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op)) |
| 894 | if (isa<UndefValue>(U->getValue())) |
| 895 | return getSCEV(UndefValue::get(Ty)); |
| 896 | |
Dan Gohman | 420ab91 | 2010-06-25 18:47:08 +0000 | [diff] [blame] | 897 | // The cast wasn't folded; create an explicit cast node. We can reuse |
| 898 | // the existing insert position since if we get here, we won't have |
| 899 | // made any changes which would invalidate it. |
Dan Gohman | 9553188 | 2010-03-18 18:49:47 +0000 | [diff] [blame] | 900 | SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), |
| 901 | Op, Ty); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 902 | UniqueSCEVs.InsertNode(S, IP); |
| 903 | return S; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 904 | } |
| 905 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 906 | const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, |
Dan Gohman | f5074ec | 2009-07-13 22:05:32 +0000 | [diff] [blame] | 907 | const Type *Ty) { |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 908 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && |
Dan Gohman | 8170a68 | 2009-04-16 19:25:55 +0000 | [diff] [blame] | 909 | "This is not an extending conversion!"); |
Dan Gohman | 10b9479 | 2009-05-01 16:44:18 +0000 | [diff] [blame] | 910 | assert(isSCEVable(Ty) && |
| 911 | "This is not a conversion to a SCEVable type!"); |
| 912 | Ty = getEffectiveSCEVType(Ty); |
Dan Gohman | 8170a68 | 2009-04-16 19:25:55 +0000 | [diff] [blame] | 913 | |
Dan Gohman | c39f44b | 2009-06-30 20:13:32 +0000 | [diff] [blame] | 914 | // Fold if the operand is constant. |
Dan Gohman | eaf6cf2 | 2010-06-24 16:47:03 +0000 | [diff] [blame] | 915 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) |
| 916 | return getConstant( |
| 917 | cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), |
| 918 | getEffectiveSCEVType(Ty)))); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 919 | |
Dan Gohman | 20900ca | 2009-04-22 16:20:48 +0000 | [diff] [blame] | 920 | // zext(zext(x)) --> zext(x) |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 921 | if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) |
Dan Gohman | 20900ca | 2009-04-22 16:20:48 +0000 | [diff] [blame] | 922 | return getZeroExtendExpr(SZ->getOperand(), Ty); |
| 923 | |
Dan Gohman | 69fbc7f | 2009-07-13 20:55:53 +0000 | [diff] [blame] | 924 | // Before doing any expensive analysis, check to see if we've already |
| 925 | // computed a SCEV for this Op and Ty. |
| 926 | FoldingSetNodeID ID; |
| 927 | ID.AddInteger(scZeroExtend); |
| 928 | ID.AddPointer(Op); |
| 929 | ID.AddPointer(Ty); |
| 930 | void *IP = 0; |
| 931 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
| 932 | |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 933 | // If the input value is a chrec scev, and we can prove that the value |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 934 | // did not overflow the old, smaller, value, we can zero extend all of the |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 935 | // operands (often constants). This allows analysis of something like |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 936 | // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 937 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 938 | if (AR->isAffine()) { |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 939 | const SCEV *Start = AR->getStart(); |
| 940 | const SCEV *Step = AR->getStepRecurrence(*this); |
| 941 | unsigned BitWidth = getTypeSizeInBits(AR->getType()); |
| 942 | const Loop *L = AR->getLoop(); |
| 943 | |
Dan Gohman | eb490a7 | 2009-07-25 01:22:26 +0000 | [diff] [blame] | 944 | // If we have special knowledge that this addrec won't overflow, |
| 945 | // we don't need to do any further analysis. |
Dan Gohman | 5078f84 | 2009-08-20 17:11:38 +0000 | [diff] [blame] | 946 | if (AR->hasNoUnsignedWrap()) |
Dan Gohman | eb490a7 | 2009-07-25 01:22:26 +0000 | [diff] [blame] | 947 | return getAddRecExpr(getZeroExtendExpr(Start, Ty), |
| 948 | getZeroExtendExpr(Step, Ty), |
| 949 | L); |
| 950 | |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 951 | // Check whether the backedge-taken count is SCEVCouldNotCompute. |
| 952 | // Note that this serves two purposes: It filters out loops that are |
| 953 | // simply not analyzable, and it covers the case where this code is |
| 954 | // being called from within backedge-taken count analysis, such that |
| 955 | // attempting to ask for the backedge-taken count would likely result |
| 956 | // in infinite recursion. In the later case, the analysis code will |
| 957 | // cope with a conservative value, and it will take care to purge |
| 958 | // that value once it has finished. |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 959 | const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 960 | if (!isa<SCEVCouldNotCompute>(MaxBECount)) { |
Dan Gohman | f0aa485 | 2009-04-29 01:54:20 +0000 | [diff] [blame] | 961 | // Manually compute the final value for AR, checking for |
Dan Gohman | ac70cea | 2009-04-29 22:28:28 +0000 | [diff] [blame] | 962 | // overflow. |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 963 | |
| 964 | // Check whether the backedge-taken count can be losslessly casted to |
| 965 | // the addrec's type. The count is always unsigned. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 966 | const SCEV *CastedMaxBECount = |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 967 | getTruncateOrZeroExtend(MaxBECount, Start->getType()); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 968 | const SCEV *RecastedMaxBECount = |
Dan Gohman | 5183cae | 2009-05-18 15:58:39 +0000 | [diff] [blame] | 969 | getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); |
| 970 | if (MaxBECount == RecastedMaxBECount) { |
Owen Anderson | 1d0be15 | 2009-08-13 21:58:54 +0000 | [diff] [blame] | 971 | const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 972 | // Check whether Start+Step*MaxBECount has no unsigned overflow. |
Dan Gohman | 8f767d9 | 2010-02-24 19:31:06 +0000 | [diff] [blame] | 973 | const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 974 | const SCEV *Add = getAddExpr(Start, ZMul); |
| 975 | const SCEV *OperandExtendedAdd = |
Dan Gohman | 5183cae | 2009-05-18 15:58:39 +0000 | [diff] [blame] | 976 | getAddExpr(getZeroExtendExpr(Start, WideTy), |
| 977 | getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), |
| 978 | getZeroExtendExpr(Step, WideTy))); |
| 979 | if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) |
Dan Gohman | ac70cea | 2009-04-29 22:28:28 +0000 | [diff] [blame] | 980 | // Return the expression with the addrec on the outside. |
| 981 | return getAddRecExpr(getZeroExtendExpr(Start, Ty), |
| 982 | getZeroExtendExpr(Step, Ty), |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 983 | L); |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 984 | |
| 985 | // Similar to above, only this time treat the step value as signed. |
| 986 | // This covers loops that count down. |
Dan Gohman | 8f767d9 | 2010-02-24 19:31:06 +0000 | [diff] [blame] | 987 | const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); |
Dan Gohman | ac70cea | 2009-04-29 22:28:28 +0000 | [diff] [blame] | 988 | Add = getAddExpr(Start, SMul); |
Dan Gohman | 5183cae | 2009-05-18 15:58:39 +0000 | [diff] [blame] | 989 | OperandExtendedAdd = |
| 990 | getAddExpr(getZeroExtendExpr(Start, WideTy), |
| 991 | getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), |
| 992 | getSignExtendExpr(Step, WideTy))); |
| 993 | if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) |
Dan Gohman | ac70cea | 2009-04-29 22:28:28 +0000 | [diff] [blame] | 994 | // Return the expression with the addrec on the outside. |
| 995 | return getAddRecExpr(getZeroExtendExpr(Start, Ty), |
| 996 | getSignExtendExpr(Step, Ty), |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 997 | L); |
| 998 | } |
| 999 | |
| 1000 | // If the backedge is guarded by a comparison with the pre-inc value |
| 1001 | // the addrec is safe. Also, if the entry is guarded by a comparison |
| 1002 | // with the start value and the backedge is guarded by a comparison |
| 1003 | // with the post-inc value, the addrec is safe. |
| 1004 | if (isKnownPositive(Step)) { |
| 1005 | const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - |
| 1006 | getUnsignedRange(Step).getUnsignedMax()); |
| 1007 | if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || |
Dan Gohman | 3948d0b | 2010-04-11 19:27:13 +0000 | [diff] [blame] | 1008 | (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 1009 | isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, |
| 1010 | AR->getPostIncExpr(*this), N))) |
| 1011 | // Return the expression with the addrec on the outside. |
| 1012 | return getAddRecExpr(getZeroExtendExpr(Start, Ty), |
| 1013 | getZeroExtendExpr(Step, Ty), |
| 1014 | L); |
| 1015 | } else if (isKnownNegative(Step)) { |
| 1016 | const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - |
| 1017 | getSignedRange(Step).getSignedMin()); |
Dan Gohman | c0ed009 | 2010-05-04 01:11:15 +0000 | [diff] [blame] | 1018 | if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || |
| 1019 | (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 1020 | isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, |
| 1021 | AR->getPostIncExpr(*this), N))) |
| 1022 | // Return the expression with the addrec on the outside. |
| 1023 | return getAddRecExpr(getZeroExtendExpr(Start, Ty), |
| 1024 | getSignExtendExpr(Step, Ty), |
| 1025 | L); |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 1026 | } |
| 1027 | } |
| 1028 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1029 | |
Dan Gohman | 69fbc7f | 2009-07-13 20:55:53 +0000 | [diff] [blame] | 1030 | // The cast wasn't folded; create an explicit cast node. |
| 1031 | // Recompute the insert position, as it may have been invalidated. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 1032 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
Dan Gohman | 9553188 | 2010-03-18 18:49:47 +0000 | [diff] [blame] | 1033 | SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), |
| 1034 | Op, Ty); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 1035 | UniqueSCEVs.InsertNode(S, IP); |
| 1036 | return S; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1037 | } |
| 1038 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1039 | const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, |
Dan Gohman | f5074ec | 2009-07-13 22:05:32 +0000 | [diff] [blame] | 1040 | const Type *Ty) { |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 1041 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && |
Dan Gohman | fb17fd2 | 2009-04-21 00:55:22 +0000 | [diff] [blame] | 1042 | "This is not an extending conversion!"); |
Dan Gohman | 10b9479 | 2009-05-01 16:44:18 +0000 | [diff] [blame] | 1043 | assert(isSCEVable(Ty) && |
| 1044 | "This is not a conversion to a SCEVable type!"); |
| 1045 | Ty = getEffectiveSCEVType(Ty); |
Dan Gohman | fb17fd2 | 2009-04-21 00:55:22 +0000 | [diff] [blame] | 1046 | |
Dan Gohman | c39f44b | 2009-06-30 20:13:32 +0000 | [diff] [blame] | 1047 | // Fold if the operand is constant. |
Dan Gohman | eaf6cf2 | 2010-06-24 16:47:03 +0000 | [diff] [blame] | 1048 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) |
| 1049 | return getConstant( |
| 1050 | cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), |
| 1051 | getEffectiveSCEVType(Ty)))); |
Dan Gohman | d19534a | 2007-06-15 14:38:12 +0000 | [diff] [blame] | 1052 | |
Dan Gohman | 20900ca | 2009-04-22 16:20:48 +0000 | [diff] [blame] | 1053 | // sext(sext(x)) --> sext(x) |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 1054 | if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) |
Dan Gohman | 20900ca | 2009-04-22 16:20:48 +0000 | [diff] [blame] | 1055 | return getSignExtendExpr(SS->getOperand(), Ty); |
| 1056 | |
Dan Gohman | 69fbc7f | 2009-07-13 20:55:53 +0000 | [diff] [blame] | 1057 | // Before doing any expensive analysis, check to see if we've already |
| 1058 | // computed a SCEV for this Op and Ty. |
| 1059 | FoldingSetNodeID ID; |
| 1060 | ID.AddInteger(scSignExtend); |
| 1061 | ID.AddPointer(Op); |
| 1062 | ID.AddPointer(Ty); |
| 1063 | void *IP = 0; |
| 1064 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
| 1065 | |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 1066 | // If the input value is a chrec scev, and we can prove that the value |
Dan Gohman | d19534a | 2007-06-15 14:38:12 +0000 | [diff] [blame] | 1067 | // did not overflow the old, smaller, value, we can sign extend all of the |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 1068 | // operands (often constants). This allows analysis of something like |
Dan Gohman | d19534a | 2007-06-15 14:38:12 +0000 | [diff] [blame] | 1069 | // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 1070 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 1071 | if (AR->isAffine()) { |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 1072 | const SCEV *Start = AR->getStart(); |
| 1073 | const SCEV *Step = AR->getStepRecurrence(*this); |
| 1074 | unsigned BitWidth = getTypeSizeInBits(AR->getType()); |
| 1075 | const Loop *L = AR->getLoop(); |
| 1076 | |
Dan Gohman | eb490a7 | 2009-07-25 01:22:26 +0000 | [diff] [blame] | 1077 | // If we have special knowledge that this addrec won't overflow, |
| 1078 | // we don't need to do any further analysis. |
Dan Gohman | 5078f84 | 2009-08-20 17:11:38 +0000 | [diff] [blame] | 1079 | if (AR->hasNoSignedWrap()) |
Dan Gohman | eb490a7 | 2009-07-25 01:22:26 +0000 | [diff] [blame] | 1080 | return getAddRecExpr(getSignExtendExpr(Start, Ty), |
| 1081 | getSignExtendExpr(Step, Ty), |
| 1082 | L); |
| 1083 | |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 1084 | // Check whether the backedge-taken count is SCEVCouldNotCompute. |
| 1085 | // Note that this serves two purposes: It filters out loops that are |
| 1086 | // simply not analyzable, and it covers the case where this code is |
| 1087 | // being called from within backedge-taken count analysis, such that |
| 1088 | // attempting to ask for the backedge-taken count would likely result |
| 1089 | // in infinite recursion. In the later case, the analysis code will |
| 1090 | // cope with a conservative value, and it will take care to purge |
| 1091 | // that value once it has finished. |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 1092 | const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 1093 | if (!isa<SCEVCouldNotCompute>(MaxBECount)) { |
Dan Gohman | f0aa485 | 2009-04-29 01:54:20 +0000 | [diff] [blame] | 1094 | // Manually compute the final value for AR, checking for |
Dan Gohman | ac70cea | 2009-04-29 22:28:28 +0000 | [diff] [blame] | 1095 | // overflow. |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 1096 | |
| 1097 | // Check whether the backedge-taken count can be losslessly casted to |
Dan Gohman | ac70cea | 2009-04-29 22:28:28 +0000 | [diff] [blame] | 1098 | // the addrec's type. The count is always unsigned. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1099 | const SCEV *CastedMaxBECount = |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 1100 | getTruncateOrZeroExtend(MaxBECount, Start->getType()); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1101 | const SCEV *RecastedMaxBECount = |
Dan Gohman | 5183cae | 2009-05-18 15:58:39 +0000 | [diff] [blame] | 1102 | getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); |
| 1103 | if (MaxBECount == RecastedMaxBECount) { |
Owen Anderson | 1d0be15 | 2009-08-13 21:58:54 +0000 | [diff] [blame] | 1104 | const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 1105 | // Check whether Start+Step*MaxBECount has no signed overflow. |
Dan Gohman | 8f767d9 | 2010-02-24 19:31:06 +0000 | [diff] [blame] | 1106 | const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1107 | const SCEV *Add = getAddExpr(Start, SMul); |
| 1108 | const SCEV *OperandExtendedAdd = |
Dan Gohman | 5183cae | 2009-05-18 15:58:39 +0000 | [diff] [blame] | 1109 | getAddExpr(getSignExtendExpr(Start, WideTy), |
| 1110 | getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), |
| 1111 | getSignExtendExpr(Step, WideTy))); |
| 1112 | if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) |
Dan Gohman | ac70cea | 2009-04-29 22:28:28 +0000 | [diff] [blame] | 1113 | // Return the expression with the addrec on the outside. |
| 1114 | return getAddRecExpr(getSignExtendExpr(Start, Ty), |
| 1115 | getSignExtendExpr(Step, Ty), |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 1116 | L); |
Dan Gohman | 850f791 | 2009-07-16 17:34:36 +0000 | [diff] [blame] | 1117 | |
| 1118 | // Similar to above, only this time treat the step value as unsigned. |
| 1119 | // This covers loops that count up with an unsigned step. |
Dan Gohman | 8f767d9 | 2010-02-24 19:31:06 +0000 | [diff] [blame] | 1120 | const SCEV *UMul = getMulExpr(CastedMaxBECount, Step); |
Dan Gohman | 850f791 | 2009-07-16 17:34:36 +0000 | [diff] [blame] | 1121 | Add = getAddExpr(Start, UMul); |
| 1122 | OperandExtendedAdd = |
Dan Gohman | 19378d6 | 2009-07-25 16:03:30 +0000 | [diff] [blame] | 1123 | getAddExpr(getSignExtendExpr(Start, WideTy), |
Dan Gohman | 850f791 | 2009-07-16 17:34:36 +0000 | [diff] [blame] | 1124 | getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), |
| 1125 | getZeroExtendExpr(Step, WideTy))); |
Dan Gohman | 19378d6 | 2009-07-25 16:03:30 +0000 | [diff] [blame] | 1126 | if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) |
Dan Gohman | 850f791 | 2009-07-16 17:34:36 +0000 | [diff] [blame] | 1127 | // Return the expression with the addrec on the outside. |
| 1128 | return getAddRecExpr(getSignExtendExpr(Start, Ty), |
| 1129 | getZeroExtendExpr(Step, Ty), |
| 1130 | L); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 1131 | } |
| 1132 | |
| 1133 | // If the backedge is guarded by a comparison with the pre-inc value |
| 1134 | // the addrec is safe. Also, if the entry is guarded by a comparison |
| 1135 | // with the start value and the backedge is guarded by a comparison |
| 1136 | // with the post-inc value, the addrec is safe. |
| 1137 | if (isKnownPositive(Step)) { |
| 1138 | const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) - |
| 1139 | getSignedRange(Step).getSignedMax()); |
| 1140 | if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) || |
Dan Gohman | 3948d0b | 2010-04-11 19:27:13 +0000 | [diff] [blame] | 1141 | (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) && |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 1142 | isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, |
| 1143 | AR->getPostIncExpr(*this), N))) |
| 1144 | // Return the expression with the addrec on the outside. |
| 1145 | return getAddRecExpr(getSignExtendExpr(Start, Ty), |
| 1146 | getSignExtendExpr(Step, Ty), |
| 1147 | L); |
| 1148 | } else if (isKnownNegative(Step)) { |
| 1149 | const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) - |
| 1150 | getSignedRange(Step).getSignedMin()); |
| 1151 | if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) || |
Dan Gohman | 3948d0b | 2010-04-11 19:27:13 +0000 | [diff] [blame] | 1152 | (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) && |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 1153 | isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, |
| 1154 | AR->getPostIncExpr(*this), N))) |
| 1155 | // Return the expression with the addrec on the outside. |
| 1156 | return getAddRecExpr(getSignExtendExpr(Start, Ty), |
| 1157 | getSignExtendExpr(Step, Ty), |
| 1158 | L); |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 1159 | } |
| 1160 | } |
| 1161 | } |
Dan Gohman | d19534a | 2007-06-15 14:38:12 +0000 | [diff] [blame] | 1162 | |
Dan Gohman | 69fbc7f | 2009-07-13 20:55:53 +0000 | [diff] [blame] | 1163 | // The cast wasn't folded; create an explicit cast node. |
| 1164 | // Recompute the insert position, as it may have been invalidated. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 1165 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
Dan Gohman | 9553188 | 2010-03-18 18:49:47 +0000 | [diff] [blame] | 1166 | SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), |
| 1167 | Op, Ty); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 1168 | UniqueSCEVs.InsertNode(S, IP); |
| 1169 | return S; |
Dan Gohman | d19534a | 2007-06-15 14:38:12 +0000 | [diff] [blame] | 1170 | } |
| 1171 | |
Dan Gohman | 2ce84c8d | 2009-06-13 15:56:47 +0000 | [diff] [blame] | 1172 | /// getAnyExtendExpr - Return a SCEV for the given operand extended with |
| 1173 | /// unspecified bits out to the given type. |
| 1174 | /// |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1175 | const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, |
Dan Gohman | c40f17b | 2009-08-18 16:46:41 +0000 | [diff] [blame] | 1176 | const Type *Ty) { |
Dan Gohman | 2ce84c8d | 2009-06-13 15:56:47 +0000 | [diff] [blame] | 1177 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && |
| 1178 | "This is not an extending conversion!"); |
| 1179 | assert(isSCEVable(Ty) && |
| 1180 | "This is not a conversion to a SCEVable type!"); |
| 1181 | Ty = getEffectiveSCEVType(Ty); |
| 1182 | |
| 1183 | // Sign-extend negative constants. |
| 1184 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) |
| 1185 | if (SC->getValue()->getValue().isNegative()) |
| 1186 | return getSignExtendExpr(Op, Ty); |
| 1187 | |
| 1188 | // Peel off a truncate cast. |
| 1189 | if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1190 | const SCEV *NewOp = T->getOperand(); |
Dan Gohman | 2ce84c8d | 2009-06-13 15:56:47 +0000 | [diff] [blame] | 1191 | if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) |
| 1192 | return getAnyExtendExpr(NewOp, Ty); |
| 1193 | return getTruncateOrNoop(NewOp, Ty); |
| 1194 | } |
| 1195 | |
| 1196 | // Next try a zext cast. If the cast is folded, use it. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1197 | const SCEV *ZExt = getZeroExtendExpr(Op, Ty); |
Dan Gohman | 2ce84c8d | 2009-06-13 15:56:47 +0000 | [diff] [blame] | 1198 | if (!isa<SCEVZeroExtendExpr>(ZExt)) |
| 1199 | return ZExt; |
| 1200 | |
| 1201 | // Next try a sext cast. If the cast is folded, use it. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1202 | const SCEV *SExt = getSignExtendExpr(Op, Ty); |
Dan Gohman | 2ce84c8d | 2009-06-13 15:56:47 +0000 | [diff] [blame] | 1203 | if (!isa<SCEVSignExtendExpr>(SExt)) |
| 1204 | return SExt; |
| 1205 | |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 1206 | // Force the cast to be folded into the operands of an addrec. |
| 1207 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { |
| 1208 | SmallVector<const SCEV *, 4> Ops; |
| 1209 | for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end(); |
| 1210 | I != E; ++I) |
| 1211 | Ops.push_back(getAnyExtendExpr(*I, Ty)); |
| 1212 | return getAddRecExpr(Ops, AR->getLoop()); |
| 1213 | } |
| 1214 | |
Dan Gohman | f53462d | 2010-07-15 20:02:11 +0000 | [diff] [blame] | 1215 | // As a special case, fold anyext(undef) to undef. We don't want to |
| 1216 | // know too much about SCEVUnknowns, but this special case is handy |
| 1217 | // and harmless. |
| 1218 | if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op)) |
| 1219 | if (isa<UndefValue>(U->getValue())) |
| 1220 | return getSCEV(UndefValue::get(Ty)); |
| 1221 | |
Dan Gohman | 2ce84c8d | 2009-06-13 15:56:47 +0000 | [diff] [blame] | 1222 | // If the expression is obviously signed, use the sext cast value. |
| 1223 | if (isa<SCEVSMaxExpr>(Op)) |
| 1224 | return SExt; |
| 1225 | |
| 1226 | // Absent any other information, use the zext cast value. |
| 1227 | return ZExt; |
| 1228 | } |
| 1229 | |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1230 | /// CollectAddOperandsWithScales - Process the given Ops list, which is |
| 1231 | /// a list of operands to be added under the given scale, update the given |
| 1232 | /// map. This is a helper function for getAddRecExpr. As an example of |
| 1233 | /// what it does, given a sequence of operands that would form an add |
| 1234 | /// expression like this: |
| 1235 | /// |
| 1236 | /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r) |
| 1237 | /// |
| 1238 | /// where A and B are constants, update the map with these values: |
| 1239 | /// |
| 1240 | /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) |
| 1241 | /// |
| 1242 | /// and add 13 + A*B*29 to AccumulatedConstant. |
| 1243 | /// This will allow getAddRecExpr to produce this: |
| 1244 | /// |
| 1245 | /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) |
| 1246 | /// |
| 1247 | /// This form often exposes folding opportunities that are hidden in |
| 1248 | /// the original operand list. |
| 1249 | /// |
| 1250 | /// Return true iff it appears that any interesting folding opportunities |
| 1251 | /// may be exposed. This helps getAddRecExpr short-circuit extra work in |
| 1252 | /// the common case where no interesting opportunities are present, and |
| 1253 | /// is also used as a check to avoid infinite recursion. |
| 1254 | /// |
| 1255 | static bool |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1256 | CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, |
| 1257 | SmallVector<const SCEV *, 8> &NewOps, |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1258 | APInt &AccumulatedConstant, |
Dan Gohman | f9e6472 | 2010-03-18 01:17:13 +0000 | [diff] [blame] | 1259 | const SCEV *const *Ops, size_t NumOperands, |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1260 | const APInt &Scale, |
| 1261 | ScalarEvolution &SE) { |
| 1262 | bool Interesting = false; |
| 1263 | |
Dan Gohman | e0f0c7b | 2010-06-18 19:12:32 +0000 | [diff] [blame] | 1264 | // Iterate over the add operands. They are sorted, with constants first. |
| 1265 | unsigned i = 0; |
| 1266 | while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { |
| 1267 | ++i; |
| 1268 | // Pull a buried constant out to the outside. |
| 1269 | if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) |
| 1270 | Interesting = true; |
| 1271 | AccumulatedConstant += Scale * C->getValue()->getValue(); |
| 1272 | } |
| 1273 | |
| 1274 | // Next comes everything else. We're especially interested in multiplies |
| 1275 | // here, but they're in the middle, so just visit the rest with one loop. |
| 1276 | for (; i != NumOperands; ++i) { |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1277 | const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); |
| 1278 | if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { |
| 1279 | APInt NewScale = |
| 1280 | Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue(); |
| 1281 | if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { |
| 1282 | // A multiplication of a constant with another add; recurse. |
Dan Gohman | f9e6472 | 2010-03-18 01:17:13 +0000 | [diff] [blame] | 1283 | const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1284 | Interesting |= |
| 1285 | CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, |
Dan Gohman | f9e6472 | 2010-03-18 01:17:13 +0000 | [diff] [blame] | 1286 | Add->op_begin(), Add->getNumOperands(), |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1287 | NewScale, SE); |
| 1288 | } else { |
| 1289 | // A multiplication of a constant with some other value. Update |
| 1290 | // the map. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1291 | SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); |
| 1292 | const SCEV *Key = SE.getMulExpr(MulOps); |
| 1293 | std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = |
Dan Gohman | 23737e0 | 2009-06-29 18:25:52 +0000 | [diff] [blame] | 1294 | M.insert(std::make_pair(Key, NewScale)); |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1295 | if (Pair.second) { |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1296 | NewOps.push_back(Pair.first->first); |
| 1297 | } else { |
| 1298 | Pair.first->second += NewScale; |
| 1299 | // The map already had an entry for this value, which may indicate |
| 1300 | // a folding opportunity. |
| 1301 | Interesting = true; |
| 1302 | } |
| 1303 | } |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1304 | } else { |
| 1305 | // An ordinary operand. Update the map. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1306 | std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = |
Dan Gohman | 23737e0 | 2009-06-29 18:25:52 +0000 | [diff] [blame] | 1307 | M.insert(std::make_pair(Ops[i], Scale)); |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1308 | if (Pair.second) { |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1309 | NewOps.push_back(Pair.first->first); |
| 1310 | } else { |
| 1311 | Pair.first->second += Scale; |
| 1312 | // The map already had an entry for this value, which may indicate |
| 1313 | // a folding opportunity. |
| 1314 | Interesting = true; |
| 1315 | } |
| 1316 | } |
| 1317 | } |
| 1318 | |
| 1319 | return Interesting; |
| 1320 | } |
| 1321 | |
| 1322 | namespace { |
| 1323 | struct APIntCompare { |
| 1324 | bool operator()(const APInt &LHS, const APInt &RHS) const { |
| 1325 | return LHS.ult(RHS); |
| 1326 | } |
| 1327 | }; |
| 1328 | } |
| 1329 | |
Dan Gohman | 6c0866c | 2009-05-24 23:45:28 +0000 | [diff] [blame] | 1330 | /// getAddExpr - Get a canonical add expression, or something simpler if |
| 1331 | /// possible. |
Dan Gohman | 3645b01 | 2009-10-09 00:10:36 +0000 | [diff] [blame] | 1332 | const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, |
| 1333 | bool HasNUW, bool HasNSW) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1334 | assert(!Ops.empty() && "Cannot get empty add!"); |
Chris Lattner | 627018b | 2004-04-07 16:16:11 +0000 | [diff] [blame] | 1335 | if (Ops.size() == 1) return Ops[0]; |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 1336 | #ifndef NDEBUG |
Dan Gohman | c72f0c8 | 2010-06-18 19:09:27 +0000 | [diff] [blame] | 1337 | const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 1338 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) |
Dan Gohman | c72f0c8 | 2010-06-18 19:09:27 +0000 | [diff] [blame] | 1339 | assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 1340 | "SCEVAddExpr operand types don't match!"); |
| 1341 | #endif |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1342 | |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 1343 | // If HasNSW is true and all the operands are non-negative, infer HasNUW. |
| 1344 | if (!HasNUW && HasNSW) { |
| 1345 | bool All = true; |
| 1346 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
| 1347 | if (!isKnownNonNegative(Ops[i])) { |
| 1348 | All = false; |
| 1349 | break; |
| 1350 | } |
| 1351 | if (All) HasNUW = true; |
| 1352 | } |
| 1353 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1354 | // Sort by complexity, this groups all similar expression types together. |
Dan Gohman | 7286130 | 2009-05-07 14:39:04 +0000 | [diff] [blame] | 1355 | GroupByComplexity(Ops, LI); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1356 | |
| 1357 | // If there are any constants, fold them together. |
| 1358 | unsigned Idx = 0; |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 1359 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1360 | ++Idx; |
Chris Lattner | 627018b | 2004-04-07 16:16:11 +0000 | [diff] [blame] | 1361 | assert(Idx < Ops.size()); |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 1362 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1363 | // We found two constants, fold them together! |
Dan Gohman | a82752c | 2009-06-14 22:47:23 +0000 | [diff] [blame] | 1364 | Ops[0] = getConstant(LHSC->getValue()->getValue() + |
| 1365 | RHSC->getValue()->getValue()); |
Dan Gohman | 7f7c436 | 2009-06-14 22:53:57 +0000 | [diff] [blame] | 1366 | if (Ops.size() == 2) return Ops[0]; |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 1367 | Ops.erase(Ops.begin()+1); // Erase the folded element |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 1368 | LHSC = cast<SCEVConstant>(Ops[0]); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1369 | } |
| 1370 | |
| 1371 | // If we are left with a constant zero being added, strip it off. |
Dan Gohman | bca091d | 2010-04-12 23:08:18 +0000 | [diff] [blame] | 1372 | if (LHSC->getValue()->isZero()) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1373 | Ops.erase(Ops.begin()); |
| 1374 | --Idx; |
| 1375 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1376 | |
Dan Gohman | bca091d | 2010-04-12 23:08:18 +0000 | [diff] [blame] | 1377 | if (Ops.size() == 1) return Ops[0]; |
| 1378 | } |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 1379 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1380 | // Okay, check to see if the same value occurs in the operand list twice. If |
| 1381 | // so, merge them together into an multiply expression. Since we sorted the |
| 1382 | // list, these values are required to be adjacent. |
| 1383 | const Type *Ty = Ops[0]->getType(); |
Dan Gohman | dc7692b | 2010-08-12 14:46:54 +0000 | [diff] [blame] | 1384 | bool FoundMatch = false; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1385 | for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) |
| 1386 | if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 |
| 1387 | // Found a match, merge the two values into a multiply, and add any |
| 1388 | // remaining values to the result. |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 1389 | const SCEV *Two = getConstant(Ty, 2); |
Dan Gohman | 58a85b9 | 2010-08-13 20:17:14 +0000 | [diff] [blame] | 1390 | const SCEV *Mul = getMulExpr(Two, Ops[i]); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1391 | if (Ops.size() == 2) |
| 1392 | return Mul; |
Dan Gohman | dc7692b | 2010-08-12 14:46:54 +0000 | [diff] [blame] | 1393 | Ops[i] = Mul; |
| 1394 | Ops.erase(Ops.begin()+i+1); |
| 1395 | --i; --e; |
| 1396 | FoundMatch = true; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1397 | } |
Dan Gohman | dc7692b | 2010-08-12 14:46:54 +0000 | [diff] [blame] | 1398 | if (FoundMatch) |
| 1399 | return getAddExpr(Ops, HasNUW, HasNSW); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1400 | |
Dan Gohman | 728c7f3 | 2009-05-08 21:03:19 +0000 | [diff] [blame] | 1401 | // Check for truncates. If all the operands are truncated from the same |
| 1402 | // type, see if factoring out the truncate would permit the result to be |
| 1403 | // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) |
| 1404 | // if the contents of the resulting outer trunc fold to something simple. |
| 1405 | for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { |
| 1406 | const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); |
| 1407 | const Type *DstType = Trunc->getType(); |
| 1408 | const Type *SrcType = Trunc->getOperand()->getType(); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1409 | SmallVector<const SCEV *, 8> LargeOps; |
Dan Gohman | 728c7f3 | 2009-05-08 21:03:19 +0000 | [diff] [blame] | 1410 | bool Ok = true; |
| 1411 | // Check all the operands to see if they can be represented in the |
| 1412 | // source type of the truncate. |
| 1413 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) { |
| 1414 | if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { |
| 1415 | if (T->getOperand()->getType() != SrcType) { |
| 1416 | Ok = false; |
| 1417 | break; |
| 1418 | } |
| 1419 | LargeOps.push_back(T->getOperand()); |
| 1420 | } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { |
Dan Gohman | c686398 | 2010-04-23 01:51:29 +0000 | [diff] [blame] | 1421 | LargeOps.push_back(getAnyExtendExpr(C, SrcType)); |
Dan Gohman | 728c7f3 | 2009-05-08 21:03:19 +0000 | [diff] [blame] | 1422 | } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1423 | SmallVector<const SCEV *, 8> LargeMulOps; |
Dan Gohman | 728c7f3 | 2009-05-08 21:03:19 +0000 | [diff] [blame] | 1424 | for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { |
| 1425 | if (const SCEVTruncateExpr *T = |
| 1426 | dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { |
| 1427 | if (T->getOperand()->getType() != SrcType) { |
| 1428 | Ok = false; |
| 1429 | break; |
| 1430 | } |
| 1431 | LargeMulOps.push_back(T->getOperand()); |
| 1432 | } else if (const SCEVConstant *C = |
| 1433 | dyn_cast<SCEVConstant>(M->getOperand(j))) { |
Dan Gohman | c686398 | 2010-04-23 01:51:29 +0000 | [diff] [blame] | 1434 | LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); |
Dan Gohman | 728c7f3 | 2009-05-08 21:03:19 +0000 | [diff] [blame] | 1435 | } else { |
| 1436 | Ok = false; |
| 1437 | break; |
| 1438 | } |
| 1439 | } |
| 1440 | if (Ok) |
| 1441 | LargeOps.push_back(getMulExpr(LargeMulOps)); |
| 1442 | } else { |
| 1443 | Ok = false; |
| 1444 | break; |
| 1445 | } |
| 1446 | } |
| 1447 | if (Ok) { |
| 1448 | // Evaluate the expression in the larger type. |
Dan Gohman | 3645b01 | 2009-10-09 00:10:36 +0000 | [diff] [blame] | 1449 | const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW); |
Dan Gohman | 728c7f3 | 2009-05-08 21:03:19 +0000 | [diff] [blame] | 1450 | // If it folds to something simple, use it. Otherwise, don't. |
| 1451 | if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) |
| 1452 | return getTruncateExpr(Fold, DstType); |
| 1453 | } |
| 1454 | } |
| 1455 | |
| 1456 | // Skip past any other cast SCEVs. |
Dan Gohman | f50cd74 | 2007-06-18 19:30:09 +0000 | [diff] [blame] | 1457 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) |
| 1458 | ++Idx; |
| 1459 | |
| 1460 | // If there are add operands they would be next. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1461 | if (Idx < Ops.size()) { |
| 1462 | bool DeletedAdd = false; |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 1463 | while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1464 | // If we have an add, expand the add operands onto the end of the operands |
| 1465 | // list. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1466 | Ops.erase(Ops.begin()+Idx); |
Dan Gohman | 403a8cd | 2010-06-21 19:47:52 +0000 | [diff] [blame] | 1467 | Ops.append(Add->op_begin(), Add->op_end()); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1468 | DeletedAdd = true; |
| 1469 | } |
| 1470 | |
| 1471 | // If we deleted at least one add, we added operands to the end of the list, |
| 1472 | // and they are not necessarily sorted. Recurse to resort and resimplify |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 1473 | // any operands we just acquired. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1474 | if (DeletedAdd) |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1475 | return getAddExpr(Ops); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1476 | } |
| 1477 | |
| 1478 | // Skip over the add expression until we get to a multiply. |
| 1479 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) |
| 1480 | ++Idx; |
| 1481 | |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1482 | // Check to see if there are any folding opportunities present with |
| 1483 | // operands multiplied by constant values. |
| 1484 | if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { |
| 1485 | uint64_t BitWidth = getTypeSizeInBits(Ty); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1486 | DenseMap<const SCEV *, APInt> M; |
| 1487 | SmallVector<const SCEV *, 8> NewOps; |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1488 | APInt AccumulatedConstant(BitWidth, 0); |
| 1489 | if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, |
Dan Gohman | f9e6472 | 2010-03-18 01:17:13 +0000 | [diff] [blame] | 1490 | Ops.data(), Ops.size(), |
| 1491 | APInt(BitWidth, 1), *this)) { |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1492 | // Some interesting folding opportunity is present, so its worthwhile to |
| 1493 | // re-generate the operands list. Group the operands by constant scale, |
| 1494 | // to avoid multiplying by the same constant scale multiple times. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1495 | std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; |
| 1496 | for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(), |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1497 | E = NewOps.end(); I != E; ++I) |
| 1498 | MulOpLists[M.find(*I)->second].push_back(*I); |
| 1499 | // Re-generate the operands list. |
| 1500 | Ops.clear(); |
| 1501 | if (AccumulatedConstant != 0) |
| 1502 | Ops.push_back(getConstant(AccumulatedConstant)); |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 1503 | for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator |
| 1504 | I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1505 | if (I->first != 0) |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 1506 | Ops.push_back(getMulExpr(getConstant(I->first), |
| 1507 | getAddExpr(I->second))); |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1508 | if (Ops.empty()) |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 1509 | return getConstant(Ty, 0); |
Dan Gohman | bd59d7b | 2009-06-14 22:58:51 +0000 | [diff] [blame] | 1510 | if (Ops.size() == 1) |
| 1511 | return Ops[0]; |
| 1512 | return getAddExpr(Ops); |
| 1513 | } |
| 1514 | } |
| 1515 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1516 | // If we are adding something to a multiply expression, make sure the |
| 1517 | // something is not already an operand of the multiply. If so, merge it into |
| 1518 | // the multiply. |
| 1519 | for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 1520 | const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1521 | for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 1522 | const SCEV *MulOpSCEV = Mul->getOperand(MulOp); |
Dan Gohman | 918e76b | 2010-08-12 14:52:55 +0000 | [diff] [blame] | 1523 | if (isa<SCEVConstant>(MulOpSCEV)) |
| 1524 | continue; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1525 | for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) |
Dan Gohman | 918e76b | 2010-08-12 14:52:55 +0000 | [diff] [blame] | 1526 | if (MulOpSCEV == Ops[AddOp]) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1527 | // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1528 | const SCEV *InnerMul = Mul->getOperand(MulOp == 0); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1529 | if (Mul->getNumOperands() != 2) { |
| 1530 | // If the multiply has more than two operands, we must get the |
| 1531 | // Y*Z term. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1532 | SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end()); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1533 | MulOps.erase(MulOps.begin()+MulOp); |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1534 | InnerMul = getMulExpr(MulOps); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1535 | } |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 1536 | const SCEV *One = getConstant(Ty, 1); |
Dan Gohman | 58a85b9 | 2010-08-13 20:17:14 +0000 | [diff] [blame] | 1537 | const SCEV *AddOne = getAddExpr(One, InnerMul); |
Dan Gohman | 918e76b | 2010-08-12 14:52:55 +0000 | [diff] [blame] | 1538 | const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1539 | if (Ops.size() == 2) return OuterMul; |
| 1540 | if (AddOp < Idx) { |
| 1541 | Ops.erase(Ops.begin()+AddOp); |
| 1542 | Ops.erase(Ops.begin()+Idx-1); |
| 1543 | } else { |
| 1544 | Ops.erase(Ops.begin()+Idx); |
| 1545 | Ops.erase(Ops.begin()+AddOp-1); |
| 1546 | } |
| 1547 | Ops.push_back(OuterMul); |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1548 | return getAddExpr(Ops); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1549 | } |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 1550 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1551 | // Check this multiply against other multiplies being added together. |
Dan Gohman | 727356f | 2010-08-12 15:00:23 +0000 | [diff] [blame] | 1552 | bool AnyFold = false; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1553 | for (unsigned OtherMulIdx = Idx+1; |
| 1554 | OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); |
| 1555 | ++OtherMulIdx) { |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 1556 | const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1557 | // If MulOp occurs in OtherMul, we can fold the two multiplies |
| 1558 | // together. |
| 1559 | for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); |
| 1560 | OMulOp != e; ++OMulOp) |
| 1561 | if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { |
| 1562 | // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1563 | const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1564 | if (Mul->getNumOperands() != 2) { |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 1565 | SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), |
| 1566 | Mul->op_end()); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1567 | MulOps.erase(MulOps.begin()+MulOp); |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1568 | InnerMul1 = getMulExpr(MulOps); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1569 | } |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1570 | const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1571 | if (OtherMul->getNumOperands() != 2) { |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 1572 | SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), |
| 1573 | OtherMul->op_end()); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1574 | MulOps.erase(MulOps.begin()+OMulOp); |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1575 | InnerMul2 = getMulExpr(MulOps); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1576 | } |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1577 | const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2); |
| 1578 | const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1579 | if (Ops.size() == 2) return OuterMul; |
Dan Gohman | 727356f | 2010-08-12 15:00:23 +0000 | [diff] [blame] | 1580 | Ops[Idx] = OuterMul; |
| 1581 | Ops.erase(Ops.begin()+OtherMulIdx); |
| 1582 | OtherMulIdx = Idx; |
| 1583 | AnyFold = true; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1584 | } |
| 1585 | } |
Dan Gohman | 727356f | 2010-08-12 15:00:23 +0000 | [diff] [blame] | 1586 | if (AnyFold) |
| 1587 | return getAddExpr(Ops); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1588 | } |
| 1589 | } |
| 1590 | |
| 1591 | // If there are any add recurrences in the operands list, see if any other |
| 1592 | // added values are loop invariant. If so, we can fold them into the |
| 1593 | // recurrence. |
| 1594 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) |
| 1595 | ++Idx; |
| 1596 | |
| 1597 | // Scan over all recurrences, trying to fold loop invariants into them. |
| 1598 | for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { |
| 1599 | // Scan all of the other operands to this add and add them to the vector if |
| 1600 | // they are loop invariant w.r.t. the recurrence. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1601 | SmallVector<const SCEV *, 8> LIOps; |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 1602 | const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); |
Dan Gohman | bca091d | 2010-04-12 23:08:18 +0000 | [diff] [blame] | 1603 | const Loop *AddRecLoop = AddRec->getLoop(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1604 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
Dan Gohman | bca091d | 2010-04-12 23:08:18 +0000 | [diff] [blame] | 1605 | if (Ops[i]->isLoopInvariant(AddRecLoop)) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1606 | LIOps.push_back(Ops[i]); |
| 1607 | Ops.erase(Ops.begin()+i); |
| 1608 | --i; --e; |
| 1609 | } |
| 1610 | |
| 1611 | // If we found some loop invariants, fold them into the recurrence. |
| 1612 | if (!LIOps.empty()) { |
Dan Gohman | 8dae138 | 2008-09-14 17:21:12 +0000 | [diff] [blame] | 1613 | // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1614 | LIOps.push_back(AddRec->getStart()); |
| 1615 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1616 | SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), |
Dan Gohman | 3a5d409 | 2009-12-18 03:57:04 +0000 | [diff] [blame] | 1617 | AddRec->op_end()); |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1618 | AddRecOps[0] = getAddExpr(LIOps); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1619 | |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 1620 | // Build the new addrec. Propagate the NUW and NSW flags if both the |
| 1621 | // outer add and the inner addrec are guaranteed to have no overflow. |
| 1622 | const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, |
| 1623 | HasNUW && AddRec->hasNoUnsignedWrap(), |
| 1624 | HasNSW && AddRec->hasNoSignedWrap()); |
Dan Gohman | 59de33e | 2009-12-18 18:45:31 +0000 | [diff] [blame] | 1625 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1626 | // If all of the other operands were loop invariant, we are done. |
| 1627 | if (Ops.size() == 1) return NewRec; |
| 1628 | |
| 1629 | // Otherwise, add the folded AddRec by the non-liv parts. |
| 1630 | for (unsigned i = 0;; ++i) |
| 1631 | if (Ops[i] == AddRec) { |
| 1632 | Ops[i] = NewRec; |
| 1633 | break; |
| 1634 | } |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1635 | return getAddExpr(Ops); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1636 | } |
| 1637 | |
| 1638 | // Okay, if there weren't any loop invariants to be folded, check to see if |
| 1639 | // there are multiple AddRec's with the same loop induction variable being |
| 1640 | // added together. If so, we can fold them. |
| 1641 | for (unsigned OtherIdx = Idx+1; |
| 1642 | OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) |
| 1643 | if (OtherIdx != Idx) { |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 1644 | const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); |
Dan Gohman | bca091d | 2010-04-12 23:08:18 +0000 | [diff] [blame] | 1645 | if (AddRecLoop == OtherAddRec->getLoop()) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1646 | // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D} |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 1647 | SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(), |
| 1648 | AddRec->op_end()); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1649 | for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { |
| 1650 | if (i >= NewOps.size()) { |
Dan Gohman | 403a8cd | 2010-06-21 19:47:52 +0000 | [diff] [blame] | 1651 | NewOps.append(OtherAddRec->op_begin()+i, |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1652 | OtherAddRec->op_end()); |
| 1653 | break; |
| 1654 | } |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1655 | NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i)); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1656 | } |
Dan Gohman | bca091d | 2010-04-12 23:08:18 +0000 | [diff] [blame] | 1657 | const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRecLoop); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1658 | |
| 1659 | if (Ops.size() == 2) return NewAddRec; |
| 1660 | |
| 1661 | Ops.erase(Ops.begin()+Idx); |
| 1662 | Ops.erase(Ops.begin()+OtherIdx-1); |
| 1663 | Ops.push_back(NewAddRec); |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1664 | return getAddExpr(Ops); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1665 | } |
| 1666 | } |
| 1667 | |
| 1668 | // Otherwise couldn't fold anything into this recurrence. Move onto the |
| 1669 | // next one. |
| 1670 | } |
| 1671 | |
| 1672 | // Okay, it looks like we really DO need an add expr. Check to see if we |
| 1673 | // already have one, otherwise create a new one. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 1674 | FoldingSetNodeID ID; |
| 1675 | ID.AddInteger(scAddExpr); |
| 1676 | ID.AddInteger(Ops.size()); |
| 1677 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
| 1678 | ID.AddPointer(Ops[i]); |
| 1679 | void *IP = 0; |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 1680 | SCEVAddExpr *S = |
| 1681 | static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); |
| 1682 | if (!S) { |
Dan Gohman | f9e6472 | 2010-03-18 01:17:13 +0000 | [diff] [blame] | 1683 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); |
| 1684 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); |
Dan Gohman | 9553188 | 2010-03-18 18:49:47 +0000 | [diff] [blame] | 1685 | S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator), |
| 1686 | O, Ops.size()); |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 1687 | UniqueSCEVs.InsertNode(S, IP); |
| 1688 | } |
Dan Gohman | 3645b01 | 2009-10-09 00:10:36 +0000 | [diff] [blame] | 1689 | if (HasNUW) S->setHasNoUnsignedWrap(true); |
| 1690 | if (HasNSW) S->setHasNoSignedWrap(true); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 1691 | return S; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1692 | } |
| 1693 | |
Dan Gohman | 6c0866c | 2009-05-24 23:45:28 +0000 | [diff] [blame] | 1694 | /// getMulExpr - Get a canonical multiply expression, or something simpler if |
| 1695 | /// possible. |
Dan Gohman | 3645b01 | 2009-10-09 00:10:36 +0000 | [diff] [blame] | 1696 | const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, |
| 1697 | bool HasNUW, bool HasNSW) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1698 | assert(!Ops.empty() && "Cannot get empty mul!"); |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 1699 | if (Ops.size() == 1) return Ops[0]; |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 1700 | #ifndef NDEBUG |
Dan Gohman | c4f7798 | 2010-08-16 16:13:54 +0000 | [diff] [blame] | 1701 | const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 1702 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) |
Dan Gohman | c4f7798 | 2010-08-16 16:13:54 +0000 | [diff] [blame] | 1703 | assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 1704 | "SCEVMulExpr operand types don't match!"); |
| 1705 | #endif |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1706 | |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 1707 | // If HasNSW is true and all the operands are non-negative, infer HasNUW. |
| 1708 | if (!HasNUW && HasNSW) { |
| 1709 | bool All = true; |
| 1710 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
| 1711 | if (!isKnownNonNegative(Ops[i])) { |
| 1712 | All = false; |
| 1713 | break; |
| 1714 | } |
| 1715 | if (All) HasNUW = true; |
| 1716 | } |
| 1717 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1718 | // Sort by complexity, this groups all similar expression types together. |
Dan Gohman | 7286130 | 2009-05-07 14:39:04 +0000 | [diff] [blame] | 1719 | GroupByComplexity(Ops, LI); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1720 | |
| 1721 | // If there are any constants, fold them together. |
| 1722 | unsigned Idx = 0; |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 1723 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1724 | |
| 1725 | // C1*(C2+V) -> C1*C2 + C1*V |
| 1726 | if (Ops.size() == 2) |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 1727 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1728 | if (Add->getNumOperands() == 2 && |
| 1729 | isa<SCEVConstant>(Add->getOperand(0))) |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1730 | return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), |
| 1731 | getMulExpr(LHSC, Add->getOperand(1))); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1732 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1733 | ++Idx; |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 1734 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1735 | // We found two constants, fold them together! |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 1736 | ConstantInt *Fold = ConstantInt::get(getContext(), |
| 1737 | LHSC->getValue()->getValue() * |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 1738 | RHSC->getValue()->getValue()); |
| 1739 | Ops[0] = getConstant(Fold); |
| 1740 | Ops.erase(Ops.begin()+1); // Erase the folded element |
| 1741 | if (Ops.size() == 1) return Ops[0]; |
| 1742 | LHSC = cast<SCEVConstant>(Ops[0]); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1743 | } |
| 1744 | |
| 1745 | // If we are left with a constant one being multiplied, strip it off. |
| 1746 | if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { |
| 1747 | Ops.erase(Ops.begin()); |
| 1748 | --Idx; |
Reid Spencer | cae5754 | 2007-03-02 00:28:52 +0000 | [diff] [blame] | 1749 | } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1750 | // If we have a multiply of zero, it will always be zero. |
| 1751 | return Ops[0]; |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 1752 | } else if (Ops[0]->isAllOnesValue()) { |
| 1753 | // If we have a mul by -1 of an add, try distributing the -1 among the |
| 1754 | // add operands. |
| 1755 | if (Ops.size() == 2) |
| 1756 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { |
| 1757 | SmallVector<const SCEV *, 4> NewOps; |
| 1758 | bool AnyFolded = false; |
| 1759 | for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); |
| 1760 | I != E; ++I) { |
| 1761 | const SCEV *Mul = getMulExpr(Ops[0], *I); |
| 1762 | if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; |
| 1763 | NewOps.push_back(Mul); |
| 1764 | } |
| 1765 | if (AnyFolded) |
| 1766 | return getAddExpr(NewOps); |
| 1767 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1768 | } |
Dan Gohman | 3ab1312 | 2010-04-13 16:49:23 +0000 | [diff] [blame] | 1769 | |
| 1770 | if (Ops.size() == 1) |
| 1771 | return Ops[0]; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1772 | } |
| 1773 | |
| 1774 | // Skip over the add expression until we get to a multiply. |
| 1775 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) |
| 1776 | ++Idx; |
| 1777 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1778 | // If there are mul operands inline them all into this expression. |
| 1779 | if (Idx < Ops.size()) { |
| 1780 | bool DeletedMul = false; |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 1781 | while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1782 | // If we have an mul, expand the mul operands onto the end of the operands |
| 1783 | // list. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1784 | Ops.erase(Ops.begin()+Idx); |
Dan Gohman | 403a8cd | 2010-06-21 19:47:52 +0000 | [diff] [blame] | 1785 | Ops.append(Mul->op_begin(), Mul->op_end()); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1786 | DeletedMul = true; |
| 1787 | } |
| 1788 | |
| 1789 | // If we deleted at least one mul, we added operands to the end of the list, |
| 1790 | // and they are not necessarily sorted. Recurse to resort and resimplify |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 1791 | // any operands we just acquired. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1792 | if (DeletedMul) |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1793 | return getMulExpr(Ops); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1794 | } |
| 1795 | |
| 1796 | // If there are any add recurrences in the operands list, see if any other |
| 1797 | // added values are loop invariant. If so, we can fold them into the |
| 1798 | // recurrence. |
| 1799 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) |
| 1800 | ++Idx; |
| 1801 | |
| 1802 | // Scan over all recurrences, trying to fold loop invariants into them. |
| 1803 | for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { |
| 1804 | // Scan all of the other operands to this mul and add them to the vector if |
| 1805 | // they are loop invariant w.r.t. the recurrence. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1806 | SmallVector<const SCEV *, 8> LIOps; |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 1807 | const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1808 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
| 1809 | if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { |
| 1810 | LIOps.push_back(Ops[i]); |
| 1811 | Ops.erase(Ops.begin()+i); |
| 1812 | --i; --e; |
| 1813 | } |
| 1814 | |
| 1815 | // If we found some loop invariants, fold them into the recurrence. |
| 1816 | if (!LIOps.empty()) { |
Dan Gohman | 8dae138 | 2008-09-14 17:21:12 +0000 | [diff] [blame] | 1817 | // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1818 | SmallVector<const SCEV *, 4> NewOps; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1819 | NewOps.reserve(AddRec->getNumOperands()); |
Dan Gohman | 27ed6a4 | 2010-06-17 23:34:09 +0000 | [diff] [blame] | 1820 | const SCEV *Scale = getMulExpr(LIOps); |
| 1821 | for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) |
| 1822 | NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1823 | |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 1824 | // Build the new addrec. Propagate the NUW and NSW flags if both the |
| 1825 | // outer mul and the inner addrec are guaranteed to have no overflow. |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 1826 | const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(), |
| 1827 | HasNUW && AddRec->hasNoUnsignedWrap(), |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 1828 | HasNSW && AddRec->hasNoSignedWrap()); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1829 | |
| 1830 | // If all of the other operands were loop invariant, we are done. |
| 1831 | if (Ops.size() == 1) return NewRec; |
| 1832 | |
| 1833 | // Otherwise, multiply the folded AddRec by the non-liv parts. |
| 1834 | for (unsigned i = 0;; ++i) |
| 1835 | if (Ops[i] == AddRec) { |
| 1836 | Ops[i] = NewRec; |
| 1837 | break; |
| 1838 | } |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1839 | return getMulExpr(Ops); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1840 | } |
| 1841 | |
| 1842 | // Okay, if there weren't any loop invariants to be folded, check to see if |
| 1843 | // there are multiple AddRec's with the same loop induction variable being |
| 1844 | // multiplied together. If so, we can fold them. |
| 1845 | for (unsigned OtherIdx = Idx+1; |
| 1846 | OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) |
| 1847 | if (OtherIdx != Idx) { |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 1848 | const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1849 | if (AddRec->getLoop() == OtherAddRec->getLoop()) { |
| 1850 | // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D} |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 1851 | const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1852 | const SCEV *NewStart = getMulExpr(F->getStart(), |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1853 | G->getStart()); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1854 | const SCEV *B = F->getStepRecurrence(*this); |
| 1855 | const SCEV *D = G->getStepRecurrence(*this); |
| 1856 | const SCEV *NewStep = getAddExpr(getMulExpr(F, D), |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1857 | getMulExpr(G, B), |
| 1858 | getMulExpr(B, D)); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 1859 | const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep, |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1860 | F->getLoop()); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1861 | if (Ops.size() == 2) return NewAddRec; |
| 1862 | |
| 1863 | Ops.erase(Ops.begin()+Idx); |
| 1864 | Ops.erase(Ops.begin()+OtherIdx-1); |
| 1865 | Ops.push_back(NewAddRec); |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 1866 | return getMulExpr(Ops); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1867 | } |
| 1868 | } |
| 1869 | |
| 1870 | // Otherwise couldn't fold anything into this recurrence. Move onto the |
| 1871 | // next one. |
| 1872 | } |
| 1873 | |
| 1874 | // Okay, it looks like we really DO need an mul expr. Check to see if we |
| 1875 | // already have one, otherwise create a new one. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 1876 | FoldingSetNodeID ID; |
| 1877 | ID.AddInteger(scMulExpr); |
| 1878 | ID.AddInteger(Ops.size()); |
| 1879 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
| 1880 | ID.AddPointer(Ops[i]); |
| 1881 | void *IP = 0; |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 1882 | SCEVMulExpr *S = |
| 1883 | static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); |
| 1884 | if (!S) { |
Dan Gohman | f9e6472 | 2010-03-18 01:17:13 +0000 | [diff] [blame] | 1885 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); |
| 1886 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); |
Dan Gohman | 9553188 | 2010-03-18 18:49:47 +0000 | [diff] [blame] | 1887 | S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), |
| 1888 | O, Ops.size()); |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 1889 | UniqueSCEVs.InsertNode(S, IP); |
| 1890 | } |
Dan Gohman | 3645b01 | 2009-10-09 00:10:36 +0000 | [diff] [blame] | 1891 | if (HasNUW) S->setHasNoUnsignedWrap(true); |
| 1892 | if (HasNSW) S->setHasNoSignedWrap(true); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 1893 | return S; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1894 | } |
| 1895 | |
Andreas Bolka | 8a11c98 | 2009-08-07 22:55:26 +0000 | [diff] [blame] | 1896 | /// getUDivExpr - Get a canonical unsigned division expression, or something |
| 1897 | /// simpler if possible. |
Dan Gohman | 9311ef6 | 2009-06-24 14:49:00 +0000 | [diff] [blame] | 1898 | const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, |
| 1899 | const SCEV *RHS) { |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 1900 | assert(getEffectiveSCEVType(LHS->getType()) == |
| 1901 | getEffectiveSCEVType(RHS->getType()) && |
| 1902 | "SCEVUDivExpr operand types don't match!"); |
| 1903 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 1904 | if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1905 | if (RHSC->getValue()->equalsInt(1)) |
Dan Gohman | 4c0d5d5 | 2009-08-20 16:42:55 +0000 | [diff] [blame] | 1906 | return LHS; // X udiv 1 --> x |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 1907 | // If the denominator is zero, the result of the udiv is undefined. Don't |
| 1908 | // try to analyze it, because the resolution chosen here may differ from |
| 1909 | // the resolution chosen in other parts of the compiler. |
| 1910 | if (!RHSC->getValue()->isZero()) { |
| 1911 | // Determine if the division can be folded into the operands of |
| 1912 | // its operands. |
| 1913 | // TODO: Generalize this to non-constants by using known-bits information. |
| 1914 | const Type *Ty = LHS->getType(); |
| 1915 | unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); |
Dan Gohman | ddd3a88 | 2010-08-04 19:52:50 +0000 | [diff] [blame] | 1916 | unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 1917 | // For non-power-of-two values, effectively round the value up to the |
| 1918 | // nearest power of two. |
| 1919 | if (!RHSC->getValue()->getValue().isPowerOf2()) |
| 1920 | ++MaxShiftAmt; |
| 1921 | const IntegerType *ExtTy = |
| 1922 | IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); |
| 1923 | // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. |
| 1924 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) |
| 1925 | if (const SCEVConstant *Step = |
| 1926 | dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) |
| 1927 | if (!Step->getValue()->getValue() |
| 1928 | .urem(RHSC->getValue()->getValue()) && |
| 1929 | getZeroExtendExpr(AR, ExtTy) == |
| 1930 | getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), |
| 1931 | getZeroExtendExpr(Step, ExtTy), |
| 1932 | AR->getLoop())) { |
| 1933 | SmallVector<const SCEV *, 4> Operands; |
| 1934 | for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) |
| 1935 | Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); |
| 1936 | return getAddRecExpr(Operands, AR->getLoop()); |
Dan Gohman | 185cf03 | 2009-05-08 20:18:49 +0000 | [diff] [blame] | 1937 | } |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 1938 | // (A*B)/C --> A*(B/C) if safe and B/C can be folded. |
| 1939 | if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { |
| 1940 | SmallVector<const SCEV *, 4> Operands; |
| 1941 | for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) |
| 1942 | Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); |
| 1943 | if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) |
| 1944 | // Find an operand that's safely divisible. |
| 1945 | for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { |
| 1946 | const SCEV *Op = M->getOperand(i); |
| 1947 | const SCEV *Div = getUDivExpr(Op, RHSC); |
| 1948 | if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { |
| 1949 | Operands = SmallVector<const SCEV *, 4>(M->op_begin(), |
| 1950 | M->op_end()); |
| 1951 | Operands[i] = Div; |
| 1952 | return getMulExpr(Operands); |
| 1953 | } |
| 1954 | } |
Dan Gohman | 185cf03 | 2009-05-08 20:18:49 +0000 | [diff] [blame] | 1955 | } |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 1956 | // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. |
| 1957 | if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) { |
| 1958 | SmallVector<const SCEV *, 4> Operands; |
| 1959 | for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) |
| 1960 | Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); |
| 1961 | if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { |
| 1962 | Operands.clear(); |
| 1963 | for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { |
| 1964 | const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); |
| 1965 | if (isa<SCEVUDivExpr>(Op) || |
| 1966 | getMulExpr(Op, RHS) != A->getOperand(i)) |
| 1967 | break; |
| 1968 | Operands.push_back(Op); |
| 1969 | } |
| 1970 | if (Operands.size() == A->getNumOperands()) |
| 1971 | return getAddExpr(Operands); |
| 1972 | } |
| 1973 | } |
Dan Gohman | 185cf03 | 2009-05-08 20:18:49 +0000 | [diff] [blame] | 1974 | |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 1975 | // Fold if both operands are constant. |
| 1976 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { |
| 1977 | Constant *LHSCV = LHSC->getValue(); |
| 1978 | Constant *RHSCV = RHSC->getValue(); |
| 1979 | return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, |
| 1980 | RHSCV))); |
| 1981 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1982 | } |
| 1983 | } |
| 1984 | |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 1985 | FoldingSetNodeID ID; |
| 1986 | ID.AddInteger(scUDivExpr); |
| 1987 | ID.AddPointer(LHS); |
| 1988 | ID.AddPointer(RHS); |
| 1989 | void *IP = 0; |
| 1990 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
Dan Gohman | 9553188 | 2010-03-18 18:49:47 +0000 | [diff] [blame] | 1991 | SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), |
| 1992 | LHS, RHS); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 1993 | UniqueSCEVs.InsertNode(S, IP); |
| 1994 | return S; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 1995 | } |
| 1996 | |
| 1997 | |
Dan Gohman | 6c0866c | 2009-05-24 23:45:28 +0000 | [diff] [blame] | 1998 | /// getAddRecExpr - Get an add recurrence expression for the specified loop. |
| 1999 | /// Simplify the expression as much as possible. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2000 | const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, |
Dan Gohman | 3645b01 | 2009-10-09 00:10:36 +0000 | [diff] [blame] | 2001 | const SCEV *Step, const Loop *L, |
| 2002 | bool HasNUW, bool HasNSW) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2003 | SmallVector<const SCEV *, 4> Operands; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2004 | Operands.push_back(Start); |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2005 | if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2006 | if (StepChrec->getLoop() == L) { |
Dan Gohman | 403a8cd | 2010-06-21 19:47:52 +0000 | [diff] [blame] | 2007 | Operands.append(StepChrec->op_begin(), StepChrec->op_end()); |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 2008 | return getAddRecExpr(Operands, L); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2009 | } |
| 2010 | |
| 2011 | Operands.push_back(Step); |
Dan Gohman | 3645b01 | 2009-10-09 00:10:36 +0000 | [diff] [blame] | 2012 | return getAddRecExpr(Operands, L, HasNUW, HasNSW); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2013 | } |
| 2014 | |
Dan Gohman | 6c0866c | 2009-05-24 23:45:28 +0000 | [diff] [blame] | 2015 | /// getAddRecExpr - Get an add recurrence expression for the specified loop. |
| 2016 | /// Simplify the expression as much as possible. |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 2017 | const SCEV * |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2018 | ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, |
Dan Gohman | 3645b01 | 2009-10-09 00:10:36 +0000 | [diff] [blame] | 2019 | const Loop *L, |
| 2020 | bool HasNUW, bool HasNSW) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2021 | if (Operands.size() == 1) return Operands[0]; |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 2022 | #ifndef NDEBUG |
Dan Gohman | c4f7798 | 2010-08-16 16:13:54 +0000 | [diff] [blame] | 2023 | const Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 2024 | for (unsigned i = 1, e = Operands.size(); i != e; ++i) |
Dan Gohman | c4f7798 | 2010-08-16 16:13:54 +0000 | [diff] [blame] | 2025 | assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 2026 | "SCEVAddRecExpr operand types don't match!"); |
| 2027 | #endif |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2028 | |
Dan Gohman | cfeb6a4 | 2008-06-18 16:23:07 +0000 | [diff] [blame] | 2029 | if (Operands.back()->isZero()) { |
| 2030 | Operands.pop_back(); |
Dan Gohman | 3645b01 | 2009-10-09 00:10:36 +0000 | [diff] [blame] | 2031 | return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0} --> X |
Dan Gohman | cfeb6a4 | 2008-06-18 16:23:07 +0000 | [diff] [blame] | 2032 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2033 | |
Dan Gohman | bc02853 | 2010-02-19 18:49:22 +0000 | [diff] [blame] | 2034 | // It's tempting to want to call getMaxBackedgeTakenCount count here and |
| 2035 | // use that information to infer NUW and NSW flags. However, computing a |
| 2036 | // BE count requires calling getAddRecExpr, so we may not yet have a |
| 2037 | // meaningful BE count at this point (and if we don't, we'd be stuck |
| 2038 | // with a SCEVCouldNotCompute as the cached BE count). |
| 2039 | |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 2040 | // If HasNSW is true and all the operands are non-negative, infer HasNUW. |
| 2041 | if (!HasNUW && HasNSW) { |
| 2042 | bool All = true; |
| 2043 | for (unsigned i = 0, e = Operands.size(); i != e; ++i) |
| 2044 | if (!isKnownNonNegative(Operands[i])) { |
| 2045 | All = false; |
| 2046 | break; |
| 2047 | } |
| 2048 | if (All) HasNUW = true; |
| 2049 | } |
| 2050 | |
Dan Gohman | d9cc749 | 2008-08-08 18:33:12 +0000 | [diff] [blame] | 2051 | // Canonicalize nested AddRecs in by nesting them in order of loop depth. |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2052 | if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { |
Dan Gohman | 5d98491 | 2009-12-18 01:14:11 +0000 | [diff] [blame] | 2053 | const Loop *NestedLoop = NestedAR->getLoop(); |
Dan Gohman | 9cba978 | 2010-08-13 20:23:25 +0000 | [diff] [blame] | 2054 | if (L->contains(NestedLoop) ? |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 2055 | (L->getLoopDepth() < NestedLoop->getLoopDepth()) : |
Dan Gohman | 9cba978 | 2010-08-13 20:23:25 +0000 | [diff] [blame] | 2056 | (!NestedLoop->contains(L) && |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 2057 | DT->dominates(L->getHeader(), NestedLoop->getHeader()))) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2058 | SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), |
Dan Gohman | 5d98491 | 2009-12-18 01:14:11 +0000 | [diff] [blame] | 2059 | NestedAR->op_end()); |
Dan Gohman | d9cc749 | 2008-08-08 18:33:12 +0000 | [diff] [blame] | 2060 | Operands[0] = NestedAR->getStart(); |
Dan Gohman | 9a80b45 | 2009-06-26 22:36:20 +0000 | [diff] [blame] | 2061 | // AddRecs require their operands be loop-invariant with respect to their |
| 2062 | // loops. Don't perform this transformation if it would break this |
| 2063 | // requirement. |
| 2064 | bool AllInvariant = true; |
| 2065 | for (unsigned i = 0, e = Operands.size(); i != e; ++i) |
| 2066 | if (!Operands[i]->isLoopInvariant(L)) { |
| 2067 | AllInvariant = false; |
| 2068 | break; |
| 2069 | } |
| 2070 | if (AllInvariant) { |
| 2071 | NestedOperands[0] = getAddRecExpr(Operands, L); |
| 2072 | AllInvariant = true; |
| 2073 | for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i) |
| 2074 | if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) { |
| 2075 | AllInvariant = false; |
| 2076 | break; |
| 2077 | } |
| 2078 | if (AllInvariant) |
| 2079 | // Ok, both add recurrences are valid after the transformation. |
Dan Gohman | 3645b01 | 2009-10-09 00:10:36 +0000 | [diff] [blame] | 2080 | return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW); |
Dan Gohman | 9a80b45 | 2009-06-26 22:36:20 +0000 | [diff] [blame] | 2081 | } |
| 2082 | // Reset Operands to its original state. |
| 2083 | Operands[0] = NestedAR; |
Dan Gohman | d9cc749 | 2008-08-08 18:33:12 +0000 | [diff] [blame] | 2084 | } |
| 2085 | } |
| 2086 | |
Dan Gohman | 6784753 | 2010-01-19 22:27:22 +0000 | [diff] [blame] | 2087 | // Okay, it looks like we really DO need an addrec expr. Check to see if we |
| 2088 | // already have one, otherwise create a new one. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 2089 | FoldingSetNodeID ID; |
| 2090 | ID.AddInteger(scAddRecExpr); |
| 2091 | ID.AddInteger(Operands.size()); |
| 2092 | for (unsigned i = 0, e = Operands.size(); i != e; ++i) |
| 2093 | ID.AddPointer(Operands[i]); |
| 2094 | ID.AddPointer(L); |
| 2095 | void *IP = 0; |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 2096 | SCEVAddRecExpr *S = |
| 2097 | static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); |
| 2098 | if (!S) { |
Dan Gohman | f9e6472 | 2010-03-18 01:17:13 +0000 | [diff] [blame] | 2099 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); |
| 2100 | std::uninitialized_copy(Operands.begin(), Operands.end(), O); |
Dan Gohman | 9553188 | 2010-03-18 18:49:47 +0000 | [diff] [blame] | 2101 | S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), |
| 2102 | O, Operands.size(), L); |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 2103 | UniqueSCEVs.InsertNode(S, IP); |
| 2104 | } |
Dan Gohman | 3645b01 | 2009-10-09 00:10:36 +0000 | [diff] [blame] | 2105 | if (HasNUW) S->setHasNoUnsignedWrap(true); |
| 2106 | if (HasNSW) S->setHasNoSignedWrap(true); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 2107 | return S; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2108 | } |
| 2109 | |
Dan Gohman | 9311ef6 | 2009-06-24 14:49:00 +0000 | [diff] [blame] | 2110 | const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, |
| 2111 | const SCEV *RHS) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2112 | SmallVector<const SCEV *, 2> Ops; |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2113 | Ops.push_back(LHS); |
| 2114 | Ops.push_back(RHS); |
| 2115 | return getSMaxExpr(Ops); |
| 2116 | } |
| 2117 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2118 | const SCEV * |
| 2119 | ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2120 | assert(!Ops.empty() && "Cannot get empty smax!"); |
| 2121 | if (Ops.size() == 1) return Ops[0]; |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 2122 | #ifndef NDEBUG |
Dan Gohman | c4f7798 | 2010-08-16 16:13:54 +0000 | [diff] [blame] | 2123 | const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 2124 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) |
Dan Gohman | c4f7798 | 2010-08-16 16:13:54 +0000 | [diff] [blame] | 2125 | assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 2126 | "SCEVSMaxExpr operand types don't match!"); |
| 2127 | #endif |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2128 | |
| 2129 | // Sort by complexity, this groups all similar expression types together. |
Dan Gohman | 7286130 | 2009-05-07 14:39:04 +0000 | [diff] [blame] | 2130 | GroupByComplexity(Ops, LI); |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2131 | |
| 2132 | // If there are any constants, fold them together. |
| 2133 | unsigned Idx = 0; |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2134 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2135 | ++Idx; |
| 2136 | assert(Idx < Ops.size()); |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2137 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2138 | // We found two constants, fold them together! |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 2139 | ConstantInt *Fold = ConstantInt::get(getContext(), |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2140 | APIntOps::smax(LHSC->getValue()->getValue(), |
| 2141 | RHSC->getValue()->getValue())); |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2142 | Ops[0] = getConstant(Fold); |
| 2143 | Ops.erase(Ops.begin()+1); // Erase the folded element |
| 2144 | if (Ops.size() == 1) return Ops[0]; |
| 2145 | LHSC = cast<SCEVConstant>(Ops[0]); |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2146 | } |
| 2147 | |
Dan Gohman | e5aceed | 2009-06-24 14:46:22 +0000 | [diff] [blame] | 2148 | // If we are left with a constant minimum-int, strip it off. |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2149 | if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { |
| 2150 | Ops.erase(Ops.begin()); |
| 2151 | --Idx; |
Dan Gohman | e5aceed | 2009-06-24 14:46:22 +0000 | [diff] [blame] | 2152 | } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { |
| 2153 | // If we have an smax with a constant maximum-int, it will always be |
| 2154 | // maximum-int. |
| 2155 | return Ops[0]; |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2156 | } |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2157 | |
Dan Gohman | 3ab1312 | 2010-04-13 16:49:23 +0000 | [diff] [blame] | 2158 | if (Ops.size() == 1) return Ops[0]; |
| 2159 | } |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2160 | |
| 2161 | // Find the first SMax |
| 2162 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) |
| 2163 | ++Idx; |
| 2164 | |
| 2165 | // Check to see if one of the operands is an SMax. If so, expand its operands |
| 2166 | // onto our operand list, and recurse to simplify. |
| 2167 | if (Idx < Ops.size()) { |
| 2168 | bool DeletedSMax = false; |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2169 | while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2170 | Ops.erase(Ops.begin()+Idx); |
Dan Gohman | 403a8cd | 2010-06-21 19:47:52 +0000 | [diff] [blame] | 2171 | Ops.append(SMax->op_begin(), SMax->op_end()); |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2172 | DeletedSMax = true; |
| 2173 | } |
| 2174 | |
| 2175 | if (DeletedSMax) |
| 2176 | return getSMaxExpr(Ops); |
| 2177 | } |
| 2178 | |
| 2179 | // Okay, check to see if the same value occurs in the operand list twice. If |
| 2180 | // so, delete one. Since we sorted the list, these values are required to |
| 2181 | // be adjacent. |
| 2182 | for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) |
Dan Gohman | 2828779 | 2010-04-13 16:51:03 +0000 | [diff] [blame] | 2183 | // X smax Y smax Y --> X smax Y |
| 2184 | // X smax Y --> X, if X is always greater than Y |
| 2185 | if (Ops[i] == Ops[i+1] || |
| 2186 | isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { |
| 2187 | Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); |
| 2188 | --i; --e; |
| 2189 | } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2190 | Ops.erase(Ops.begin()+i, Ops.begin()+i+1); |
| 2191 | --i; --e; |
| 2192 | } |
| 2193 | |
| 2194 | if (Ops.size() == 1) return Ops[0]; |
| 2195 | |
| 2196 | assert(!Ops.empty() && "Reduced smax down to nothing!"); |
| 2197 | |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2198 | // Okay, it looks like we really DO need an smax expr. Check to see if we |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2199 | // already have one, otherwise create a new one. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 2200 | FoldingSetNodeID ID; |
| 2201 | ID.AddInteger(scSMaxExpr); |
| 2202 | ID.AddInteger(Ops.size()); |
| 2203 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
| 2204 | ID.AddPointer(Ops[i]); |
| 2205 | void *IP = 0; |
| 2206 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
Dan Gohman | f9e6472 | 2010-03-18 01:17:13 +0000 | [diff] [blame] | 2207 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); |
| 2208 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); |
Dan Gohman | 9553188 | 2010-03-18 18:49:47 +0000 | [diff] [blame] | 2209 | SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), |
| 2210 | O, Ops.size()); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 2211 | UniqueSCEVs.InsertNode(S, IP); |
| 2212 | return S; |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2213 | } |
| 2214 | |
Dan Gohman | 9311ef6 | 2009-06-24 14:49:00 +0000 | [diff] [blame] | 2215 | const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, |
| 2216 | const SCEV *RHS) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2217 | SmallVector<const SCEV *, 2> Ops; |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2218 | Ops.push_back(LHS); |
| 2219 | Ops.push_back(RHS); |
| 2220 | return getUMaxExpr(Ops); |
| 2221 | } |
| 2222 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2223 | const SCEV * |
| 2224 | ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2225 | assert(!Ops.empty() && "Cannot get empty umax!"); |
| 2226 | if (Ops.size() == 1) return Ops[0]; |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 2227 | #ifndef NDEBUG |
Dan Gohman | c4f7798 | 2010-08-16 16:13:54 +0000 | [diff] [blame] | 2228 | const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 2229 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) |
Dan Gohman | c4f7798 | 2010-08-16 16:13:54 +0000 | [diff] [blame] | 2230 | assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && |
Dan Gohman | f78a978 | 2009-05-18 15:44:58 +0000 | [diff] [blame] | 2231 | "SCEVUMaxExpr operand types don't match!"); |
| 2232 | #endif |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2233 | |
| 2234 | // Sort by complexity, this groups all similar expression types together. |
Dan Gohman | 7286130 | 2009-05-07 14:39:04 +0000 | [diff] [blame] | 2235 | GroupByComplexity(Ops, LI); |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2236 | |
| 2237 | // If there are any constants, fold them together. |
| 2238 | unsigned Idx = 0; |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2239 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2240 | ++Idx; |
| 2241 | assert(Idx < Ops.size()); |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2242 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2243 | // We found two constants, fold them together! |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 2244 | ConstantInt *Fold = ConstantInt::get(getContext(), |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2245 | APIntOps::umax(LHSC->getValue()->getValue(), |
| 2246 | RHSC->getValue()->getValue())); |
| 2247 | Ops[0] = getConstant(Fold); |
| 2248 | Ops.erase(Ops.begin()+1); // Erase the folded element |
| 2249 | if (Ops.size() == 1) return Ops[0]; |
| 2250 | LHSC = cast<SCEVConstant>(Ops[0]); |
| 2251 | } |
| 2252 | |
Dan Gohman | e5aceed | 2009-06-24 14:46:22 +0000 | [diff] [blame] | 2253 | // If we are left with a constant minimum-int, strip it off. |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2254 | if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { |
| 2255 | Ops.erase(Ops.begin()); |
| 2256 | --Idx; |
Dan Gohman | e5aceed | 2009-06-24 14:46:22 +0000 | [diff] [blame] | 2257 | } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { |
| 2258 | // If we have an umax with a constant maximum-int, it will always be |
| 2259 | // maximum-int. |
| 2260 | return Ops[0]; |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2261 | } |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2262 | |
Dan Gohman | 3ab1312 | 2010-04-13 16:49:23 +0000 | [diff] [blame] | 2263 | if (Ops.size() == 1) return Ops[0]; |
| 2264 | } |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2265 | |
| 2266 | // Find the first UMax |
| 2267 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) |
| 2268 | ++Idx; |
| 2269 | |
| 2270 | // Check to see if one of the operands is a UMax. If so, expand its operands |
| 2271 | // onto our operand list, and recurse to simplify. |
| 2272 | if (Idx < Ops.size()) { |
| 2273 | bool DeletedUMax = false; |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2274 | while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2275 | Ops.erase(Ops.begin()+Idx); |
Dan Gohman | 403a8cd | 2010-06-21 19:47:52 +0000 | [diff] [blame] | 2276 | Ops.append(UMax->op_begin(), UMax->op_end()); |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2277 | DeletedUMax = true; |
| 2278 | } |
| 2279 | |
| 2280 | if (DeletedUMax) |
| 2281 | return getUMaxExpr(Ops); |
| 2282 | } |
| 2283 | |
| 2284 | // Okay, check to see if the same value occurs in the operand list twice. If |
| 2285 | // so, delete one. Since we sorted the list, these values are required to |
| 2286 | // be adjacent. |
| 2287 | for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) |
Dan Gohman | 2828779 | 2010-04-13 16:51:03 +0000 | [diff] [blame] | 2288 | // X umax Y umax Y --> X umax Y |
| 2289 | // X umax Y --> X, if X is always greater than Y |
| 2290 | if (Ops[i] == Ops[i+1] || |
| 2291 | isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { |
| 2292 | Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); |
| 2293 | --i; --e; |
| 2294 | } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2295 | Ops.erase(Ops.begin()+i, Ops.begin()+i+1); |
| 2296 | --i; --e; |
| 2297 | } |
| 2298 | |
| 2299 | if (Ops.size() == 1) return Ops[0]; |
| 2300 | |
| 2301 | assert(!Ops.empty() && "Reduced umax down to nothing!"); |
| 2302 | |
| 2303 | // Okay, it looks like we really DO need a umax expr. Check to see if we |
| 2304 | // already have one, otherwise create a new one. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 2305 | FoldingSetNodeID ID; |
| 2306 | ID.AddInteger(scUMaxExpr); |
| 2307 | ID.AddInteger(Ops.size()); |
| 2308 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
| 2309 | ID.AddPointer(Ops[i]); |
| 2310 | void *IP = 0; |
| 2311 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
Dan Gohman | f9e6472 | 2010-03-18 01:17:13 +0000 | [diff] [blame] | 2312 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); |
| 2313 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); |
Dan Gohman | 9553188 | 2010-03-18 18:49:47 +0000 | [diff] [blame] | 2314 | SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), |
| 2315 | O, Ops.size()); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 2316 | UniqueSCEVs.InsertNode(S, IP); |
| 2317 | return S; |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2318 | } |
| 2319 | |
Dan Gohman | 9311ef6 | 2009-06-24 14:49:00 +0000 | [diff] [blame] | 2320 | const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, |
| 2321 | const SCEV *RHS) { |
Dan Gohman | f9a9a99 | 2009-06-22 03:18:45 +0000 | [diff] [blame] | 2322 | // ~smax(~x, ~y) == smin(x, y). |
| 2323 | return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); |
| 2324 | } |
| 2325 | |
Dan Gohman | 9311ef6 | 2009-06-24 14:49:00 +0000 | [diff] [blame] | 2326 | const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, |
| 2327 | const SCEV *RHS) { |
Dan Gohman | f9a9a99 | 2009-06-22 03:18:45 +0000 | [diff] [blame] | 2328 | // ~umax(~x, ~y) == umin(x, y) |
| 2329 | return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); |
| 2330 | } |
| 2331 | |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 2332 | const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) { |
Dan Gohman | 6ab10f6 | 2010-04-12 23:03:26 +0000 | [diff] [blame] | 2333 | // If we have TargetData, we can bypass creating a target-independent |
| 2334 | // constant expression and then folding it back into a ConstantInt. |
| 2335 | // This is just a compile-time optimization. |
| 2336 | if (TD) |
| 2337 | return getConstant(TD->getIntPtrType(getContext()), |
| 2338 | TD->getTypeAllocSize(AllocTy)); |
| 2339 | |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 2340 | Constant *C = ConstantExpr::getSizeOf(AllocTy); |
| 2341 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) |
Dan Gohman | 7000122 | 2010-05-28 16:12:08 +0000 | [diff] [blame] | 2342 | if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) |
| 2343 | C = Folded; |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 2344 | const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); |
| 2345 | return getTruncateOrZeroExtend(getSCEV(C), Ty); |
| 2346 | } |
| 2347 | |
| 2348 | const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) { |
| 2349 | Constant *C = ConstantExpr::getAlignOf(AllocTy); |
| 2350 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) |
Dan Gohman | 7000122 | 2010-05-28 16:12:08 +0000 | [diff] [blame] | 2351 | if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) |
| 2352 | C = Folded; |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 2353 | const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); |
| 2354 | return getTruncateOrZeroExtend(getSCEV(C), Ty); |
| 2355 | } |
| 2356 | |
| 2357 | const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy, |
| 2358 | unsigned FieldNo) { |
Dan Gohman | 6ab10f6 | 2010-04-12 23:03:26 +0000 | [diff] [blame] | 2359 | // If we have TargetData, we can bypass creating a target-independent |
| 2360 | // constant expression and then folding it back into a ConstantInt. |
| 2361 | // This is just a compile-time optimization. |
| 2362 | if (TD) |
| 2363 | return getConstant(TD->getIntPtrType(getContext()), |
| 2364 | TD->getStructLayout(STy)->getElementOffset(FieldNo)); |
| 2365 | |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 2366 | Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo); |
| 2367 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) |
Dan Gohman | 7000122 | 2010-05-28 16:12:08 +0000 | [diff] [blame] | 2368 | if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) |
| 2369 | C = Folded; |
Dan Gohman | c40f17b | 2009-08-18 16:46:41 +0000 | [diff] [blame] | 2370 | const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy)); |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 2371 | return getTruncateOrZeroExtend(getSCEV(C), Ty); |
Dan Gohman | c40f17b | 2009-08-18 16:46:41 +0000 | [diff] [blame] | 2372 | } |
| 2373 | |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 2374 | const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy, |
| 2375 | Constant *FieldNo) { |
| 2376 | Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo); |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 2377 | if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) |
Dan Gohman | 7000122 | 2010-05-28 16:12:08 +0000 | [diff] [blame] | 2378 | if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) |
| 2379 | C = Folded; |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 2380 | const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy)); |
Dan Gohman | 0f5efe5 | 2010-01-28 02:15:55 +0000 | [diff] [blame] | 2381 | return getTruncateOrZeroExtend(getSCEV(C), Ty); |
Dan Gohman | c40f17b | 2009-08-18 16:46:41 +0000 | [diff] [blame] | 2382 | } |
| 2383 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2384 | const SCEV *ScalarEvolution::getUnknown(Value *V) { |
Dan Gohman | 6bbcba1 | 2009-06-24 00:54:57 +0000 | [diff] [blame] | 2385 | // Don't attempt to do anything other than create a SCEVUnknown object |
| 2386 | // here. createSCEV only calls getUnknown after checking for all other |
| 2387 | // interesting possibilities, and any other code that calls getUnknown |
| 2388 | // is doing so in order to hide a value from SCEV canonicalization. |
| 2389 | |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 2390 | FoldingSetNodeID ID; |
| 2391 | ID.AddInteger(scUnknown); |
| 2392 | ID.AddPointer(V); |
| 2393 | void *IP = 0; |
Dan Gohman | ab37f50 | 2010-08-02 23:49:30 +0000 | [diff] [blame] | 2394 | if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { |
| 2395 | assert(cast<SCEVUnknown>(S)->getValue() == V && |
| 2396 | "Stale SCEVUnknown in uniquing map!"); |
| 2397 | return S; |
| 2398 | } |
| 2399 | SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, |
| 2400 | FirstUnknown); |
| 2401 | FirstUnknown = cast<SCEVUnknown>(S); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 2402 | UniqueSCEVs.InsertNode(S, IP); |
| 2403 | return S; |
Chris Lattner | 0a7f98c | 2004-04-15 15:07:24 +0000 | [diff] [blame] | 2404 | } |
| 2405 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2406 | //===----------------------------------------------------------------------===// |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2407 | // Basic SCEV Analysis and PHI Idiom Recognition Code |
| 2408 | // |
| 2409 | |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 2410 | /// isSCEVable - Test if values of the given type are analyzable within |
| 2411 | /// the SCEV framework. This primarily includes integer types, and it |
| 2412 | /// can optionally include pointer types if the ScalarEvolution class |
| 2413 | /// has access to target-specific information. |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 2414 | bool ScalarEvolution::isSCEVable(const Type *Ty) const { |
Dan Gohman | c40f17b | 2009-08-18 16:46:41 +0000 | [diff] [blame] | 2415 | // Integers and pointers are always SCEVable. |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 2416 | return Ty->isIntegerTy() || Ty->isPointerTy(); |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 2417 | } |
| 2418 | |
| 2419 | /// getTypeSizeInBits - Return the size in bits of the specified type, |
| 2420 | /// for which isSCEVable must return true. |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 2421 | uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const { |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 2422 | assert(isSCEVable(Ty) && "Type is not SCEVable!"); |
| 2423 | |
| 2424 | // If we have a TargetData, use it! |
| 2425 | if (TD) |
| 2426 | return TD->getTypeSizeInBits(Ty); |
| 2427 | |
Dan Gohman | c40f17b | 2009-08-18 16:46:41 +0000 | [diff] [blame] | 2428 | // Integer types have fixed sizes. |
Duncan Sands | b0bc6c3 | 2010-02-15 16:12:20 +0000 | [diff] [blame] | 2429 | if (Ty->isIntegerTy()) |
Dan Gohman | c40f17b | 2009-08-18 16:46:41 +0000 | [diff] [blame] | 2430 | return Ty->getPrimitiveSizeInBits(); |
| 2431 | |
| 2432 | // The only other support type is pointer. Without TargetData, conservatively |
| 2433 | // assume pointers are 64-bit. |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 2434 | assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!"); |
Dan Gohman | c40f17b | 2009-08-18 16:46:41 +0000 | [diff] [blame] | 2435 | return 64; |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 2436 | } |
| 2437 | |
| 2438 | /// getEffectiveSCEVType - Return a type with the same bitwidth as |
| 2439 | /// the given type and which represents how SCEV will treat the given |
| 2440 | /// type, for which isSCEVable must return true. For pointer types, |
| 2441 | /// this is the pointer-sized integer type. |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 2442 | const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 2443 | assert(isSCEVable(Ty) && "Type is not SCEVable!"); |
| 2444 | |
Duncan Sands | b0bc6c3 | 2010-02-15 16:12:20 +0000 | [diff] [blame] | 2445 | if (Ty->isIntegerTy()) |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 2446 | return Ty; |
| 2447 | |
Dan Gohman | c40f17b | 2009-08-18 16:46:41 +0000 | [diff] [blame] | 2448 | // The only other support type is pointer. |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 2449 | assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); |
Dan Gohman | c40f17b | 2009-08-18 16:46:41 +0000 | [diff] [blame] | 2450 | if (TD) return TD->getIntPtrType(getContext()); |
| 2451 | |
| 2452 | // Without TargetData, conservatively assume pointers are 64-bit. |
| 2453 | return Type::getInt64Ty(getContext()); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2454 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2455 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2456 | const SCEV *ScalarEvolution::getCouldNotCompute() { |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 2457 | return &CouldNotCompute; |
Dan Gohman | f4ccfcb | 2009-04-18 17:58:19 +0000 | [diff] [blame] | 2458 | } |
| 2459 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2460 | /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the |
| 2461 | /// expression and create a new one. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2462 | const SCEV *ScalarEvolution::getSCEV(Value *V) { |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 2463 | assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2464 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2465 | std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2466 | if (I != Scalars.end()) return I->second; |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2467 | const SCEV *S = createSCEV(V); |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 2468 | Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S)); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2469 | return S; |
| 2470 | } |
| 2471 | |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2472 | /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V |
| 2473 | /// |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2474 | const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) { |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2475 | if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) |
Owen Anderson | 0a5372e | 2009-07-13 04:09:18 +0000 | [diff] [blame] | 2476 | return getConstant( |
Owen Anderson | baf3c40 | 2009-07-29 18:55:55 +0000 | [diff] [blame] | 2477 | cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2478 | |
| 2479 | const Type *Ty = V->getType(); |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 2480 | Ty = getEffectiveSCEVType(Ty); |
Owen Anderson | 73c6b71 | 2009-07-13 20:58:05 +0000 | [diff] [blame] | 2481 | return getMulExpr(V, |
Owen Anderson | a7235ea | 2009-07-31 20:28:14 +0000 | [diff] [blame] | 2482 | getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)))); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2483 | } |
| 2484 | |
| 2485 | /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2486 | const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2487 | if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) |
Owen Anderson | 73c6b71 | 2009-07-13 20:58:05 +0000 | [diff] [blame] | 2488 | return getConstant( |
Owen Anderson | baf3c40 | 2009-07-29 18:55:55 +0000 | [diff] [blame] | 2489 | cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2490 | |
| 2491 | const Type *Ty = V->getType(); |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 2492 | Ty = getEffectiveSCEVType(Ty); |
Owen Anderson | 73c6b71 | 2009-07-13 20:58:05 +0000 | [diff] [blame] | 2493 | const SCEV *AllOnes = |
Owen Anderson | a7235ea | 2009-07-31 20:28:14 +0000 | [diff] [blame] | 2494 | getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2495 | return getMinusSCEV(AllOnes, V); |
| 2496 | } |
| 2497 | |
| 2498 | /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS. |
| 2499 | /// |
Dan Gohman | 9311ef6 | 2009-06-24 14:49:00 +0000 | [diff] [blame] | 2500 | const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, |
| 2501 | const SCEV *RHS) { |
Dan Gohman | eb4152c | 2010-07-20 16:53:00 +0000 | [diff] [blame] | 2502 | // Fast path: X - X --> 0. |
| 2503 | if (LHS == RHS) |
| 2504 | return getConstant(LHS->getType(), 0); |
| 2505 | |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2506 | // X - Y --> X + -Y |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 2507 | return getAddExpr(LHS, getNegativeSCEV(RHS)); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2508 | } |
| 2509 | |
| 2510 | /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the |
| 2511 | /// input value to the specified type. If the type must be extended, it is zero |
| 2512 | /// extended. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2513 | const SCEV * |
| 2514 | ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, |
Nick Lewycky | 5cd28fa | 2009-04-23 05:15:08 +0000 | [diff] [blame] | 2515 | const Type *Ty) { |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2516 | const Type *SrcTy = V->getType(); |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 2517 | assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && |
| 2518 | (Ty->isIntegerTy() || Ty->isPointerTy()) && |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2519 | "Cannot truncate or zero extend with non-integer arguments!"); |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 2520 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2521 | return V; // No conversion |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 2522 | if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 2523 | return getTruncateExpr(V, Ty); |
| 2524 | return getZeroExtendExpr(V, Ty); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2525 | } |
| 2526 | |
| 2527 | /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the |
| 2528 | /// input value to the specified type. If the type must be extended, it is sign |
| 2529 | /// extended. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2530 | const SCEV * |
| 2531 | ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, |
Nick Lewycky | 5cd28fa | 2009-04-23 05:15:08 +0000 | [diff] [blame] | 2532 | const Type *Ty) { |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2533 | const Type *SrcTy = V->getType(); |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 2534 | assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && |
| 2535 | (Ty->isIntegerTy() || Ty->isPointerTy()) && |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2536 | "Cannot truncate or zero extend with non-integer arguments!"); |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 2537 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2538 | return V; // No conversion |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 2539 | if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 2540 | return getTruncateExpr(V, Ty); |
| 2541 | return getSignExtendExpr(V, Ty); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 2542 | } |
| 2543 | |
Dan Gohman | 467c430 | 2009-05-13 03:46:30 +0000 | [diff] [blame] | 2544 | /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the |
| 2545 | /// input value to the specified type. If the type must be extended, it is zero |
| 2546 | /// extended. The conversion must not be narrowing. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2547 | const SCEV * |
| 2548 | ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) { |
Dan Gohman | 467c430 | 2009-05-13 03:46:30 +0000 | [diff] [blame] | 2549 | const Type *SrcTy = V->getType(); |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 2550 | assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && |
| 2551 | (Ty->isIntegerTy() || Ty->isPointerTy()) && |
Dan Gohman | 467c430 | 2009-05-13 03:46:30 +0000 | [diff] [blame] | 2552 | "Cannot noop or zero extend with non-integer arguments!"); |
| 2553 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && |
| 2554 | "getNoopOrZeroExtend cannot truncate!"); |
| 2555 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
| 2556 | return V; // No conversion |
| 2557 | return getZeroExtendExpr(V, Ty); |
| 2558 | } |
| 2559 | |
| 2560 | /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the |
| 2561 | /// input value to the specified type. If the type must be extended, it is sign |
| 2562 | /// extended. The conversion must not be narrowing. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2563 | const SCEV * |
| 2564 | ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) { |
Dan Gohman | 467c430 | 2009-05-13 03:46:30 +0000 | [diff] [blame] | 2565 | const Type *SrcTy = V->getType(); |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 2566 | assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && |
| 2567 | (Ty->isIntegerTy() || Ty->isPointerTy()) && |
Dan Gohman | 467c430 | 2009-05-13 03:46:30 +0000 | [diff] [blame] | 2568 | "Cannot noop or sign extend with non-integer arguments!"); |
| 2569 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && |
| 2570 | "getNoopOrSignExtend cannot truncate!"); |
| 2571 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
| 2572 | return V; // No conversion |
| 2573 | return getSignExtendExpr(V, Ty); |
| 2574 | } |
| 2575 | |
Dan Gohman | 2ce84c8d | 2009-06-13 15:56:47 +0000 | [diff] [blame] | 2576 | /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of |
| 2577 | /// the input value to the specified type. If the type must be extended, |
| 2578 | /// it is extended with unspecified bits. The conversion must not be |
| 2579 | /// narrowing. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2580 | const SCEV * |
| 2581 | ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) { |
Dan Gohman | 2ce84c8d | 2009-06-13 15:56:47 +0000 | [diff] [blame] | 2582 | const Type *SrcTy = V->getType(); |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 2583 | assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && |
| 2584 | (Ty->isIntegerTy() || Ty->isPointerTy()) && |
Dan Gohman | 2ce84c8d | 2009-06-13 15:56:47 +0000 | [diff] [blame] | 2585 | "Cannot noop or any extend with non-integer arguments!"); |
| 2586 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && |
| 2587 | "getNoopOrAnyExtend cannot truncate!"); |
| 2588 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
| 2589 | return V; // No conversion |
| 2590 | return getAnyExtendExpr(V, Ty); |
| 2591 | } |
| 2592 | |
Dan Gohman | 467c430 | 2009-05-13 03:46:30 +0000 | [diff] [blame] | 2593 | /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the |
| 2594 | /// input value to the specified type. The conversion must not be widening. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2595 | const SCEV * |
| 2596 | ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) { |
Dan Gohman | 467c430 | 2009-05-13 03:46:30 +0000 | [diff] [blame] | 2597 | const Type *SrcTy = V->getType(); |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 2598 | assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && |
| 2599 | (Ty->isIntegerTy() || Ty->isPointerTy()) && |
Dan Gohman | 467c430 | 2009-05-13 03:46:30 +0000 | [diff] [blame] | 2600 | "Cannot truncate or noop with non-integer arguments!"); |
| 2601 | assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && |
| 2602 | "getTruncateOrNoop cannot extend!"); |
| 2603 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
| 2604 | return V; // No conversion |
| 2605 | return getTruncateExpr(V, Ty); |
| 2606 | } |
| 2607 | |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 2608 | /// getUMaxFromMismatchedTypes - Promote the operands to the wider of |
| 2609 | /// the types using zero-extension, and then perform a umax operation |
| 2610 | /// with them. |
Dan Gohman | 9311ef6 | 2009-06-24 14:49:00 +0000 | [diff] [blame] | 2611 | const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, |
| 2612 | const SCEV *RHS) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2613 | const SCEV *PromotedLHS = LHS; |
| 2614 | const SCEV *PromotedRHS = RHS; |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 2615 | |
| 2616 | if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) |
| 2617 | PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); |
| 2618 | else |
| 2619 | PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); |
| 2620 | |
| 2621 | return getUMaxExpr(PromotedLHS, PromotedRHS); |
| 2622 | } |
| 2623 | |
Dan Gohman | c9759e8 | 2009-06-22 15:03:27 +0000 | [diff] [blame] | 2624 | /// getUMinFromMismatchedTypes - Promote the operands to the wider of |
| 2625 | /// the types using zero-extension, and then perform a umin operation |
| 2626 | /// with them. |
Dan Gohman | 9311ef6 | 2009-06-24 14:49:00 +0000 | [diff] [blame] | 2627 | const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, |
| 2628 | const SCEV *RHS) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2629 | const SCEV *PromotedLHS = LHS; |
| 2630 | const SCEV *PromotedRHS = RHS; |
Dan Gohman | c9759e8 | 2009-06-22 15:03:27 +0000 | [diff] [blame] | 2631 | |
| 2632 | if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) |
| 2633 | PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); |
| 2634 | else |
| 2635 | PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); |
| 2636 | |
| 2637 | return getUMinExpr(PromotedLHS, PromotedRHS); |
| 2638 | } |
| 2639 | |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2640 | /// PushDefUseChildren - Push users of the given Instruction |
| 2641 | /// onto the given Worklist. |
| 2642 | static void |
| 2643 | PushDefUseChildren(Instruction *I, |
| 2644 | SmallVectorImpl<Instruction *> &Worklist) { |
| 2645 | // Push the def-use children onto the Worklist stack. |
| 2646 | for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); |
| 2647 | UI != UE; ++UI) |
Gabor Greif | 96f1d8e | 2010-07-22 13:36:47 +0000 | [diff] [blame] | 2648 | Worklist.push_back(cast<Instruction>(*UI)); |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2649 | } |
| 2650 | |
| 2651 | /// ForgetSymbolicValue - This looks up computed SCEV values for all |
| 2652 | /// instructions that depend on the given instruction and removes them from |
| 2653 | /// the Scalars map if they reference SymName. This is used during PHI |
| 2654 | /// resolution. |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 2655 | void |
Dan Gohman | 8566963 | 2010-02-25 06:57:05 +0000 | [diff] [blame] | 2656 | ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) { |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2657 | SmallVector<Instruction *, 16> Worklist; |
Dan Gohman | 8566963 | 2010-02-25 06:57:05 +0000 | [diff] [blame] | 2658 | PushDefUseChildren(PN, Worklist); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2659 | |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2660 | SmallPtrSet<Instruction *, 8> Visited; |
Dan Gohman | 8566963 | 2010-02-25 06:57:05 +0000 | [diff] [blame] | 2661 | Visited.insert(PN); |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2662 | while (!Worklist.empty()) { |
Dan Gohman | 8566963 | 2010-02-25 06:57:05 +0000 | [diff] [blame] | 2663 | Instruction *I = Worklist.pop_back_val(); |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2664 | if (!Visited.insert(I)) continue; |
Chris Lattner | 4dc534c | 2005-02-13 04:37:18 +0000 | [diff] [blame] | 2665 | |
Dan Gohman | 5d98491 | 2009-12-18 01:14:11 +0000 | [diff] [blame] | 2666 | std::map<SCEVCallbackVH, const SCEV *>::iterator It = |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2667 | Scalars.find(static_cast<Value *>(I)); |
| 2668 | if (It != Scalars.end()) { |
| 2669 | // Short-circuit the def-use traversal if the symbolic name |
| 2670 | // ceases to appear in expressions. |
Dan Gohman | 50922bb | 2010-02-15 10:28:37 +0000 | [diff] [blame] | 2671 | if (It->second != SymName && !It->second->hasOperand(SymName)) |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2672 | continue; |
Chris Lattner | 4dc534c | 2005-02-13 04:37:18 +0000 | [diff] [blame] | 2673 | |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2674 | // SCEVUnknown for a PHI either means that it has an unrecognized |
Dan Gohman | 8566963 | 2010-02-25 06:57:05 +0000 | [diff] [blame] | 2675 | // structure, it's a PHI that's in the progress of being computed |
| 2676 | // by createNodeForPHI, or it's a single-value PHI. In the first case, |
| 2677 | // additional loop trip count information isn't going to change anything. |
| 2678 | // In the second case, createNodeForPHI will perform the necessary |
| 2679 | // updates on its own when it gets to that point. In the third, we do |
| 2680 | // want to forget the SCEVUnknown. |
| 2681 | if (!isa<PHINode>(I) || |
| 2682 | !isa<SCEVUnknown>(It->second) || |
| 2683 | (I != PN && It->second == SymName)) { |
Dan Gohman | 4221489 | 2009-08-31 21:15:23 +0000 | [diff] [blame] | 2684 | ValuesAtScopes.erase(It->second); |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2685 | Scalars.erase(It); |
Dan Gohman | 4221489 | 2009-08-31 21:15:23 +0000 | [diff] [blame] | 2686 | } |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2687 | } |
| 2688 | |
| 2689 | PushDefUseChildren(I, Worklist); |
| 2690 | } |
Chris Lattner | 4dc534c | 2005-02-13 04:37:18 +0000 | [diff] [blame] | 2691 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2692 | |
| 2693 | /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in |
| 2694 | /// a loop header, making it a potential recurrence, or it doesn't. |
| 2695 | /// |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2696 | const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { |
Dan Gohman | 27dead4 | 2010-04-12 07:49:36 +0000 | [diff] [blame] | 2697 | if (const Loop *L = LI->getLoopFor(PN->getParent())) |
| 2698 | if (L->getHeader() == PN->getParent()) { |
| 2699 | // The loop may have multiple entrances or multiple exits; we can analyze |
| 2700 | // this phi as an addrec if it has a unique entry value and a unique |
| 2701 | // backedge value. |
| 2702 | Value *BEValueV = 0, *StartValueV = 0; |
| 2703 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { |
| 2704 | Value *V = PN->getIncomingValue(i); |
| 2705 | if (L->contains(PN->getIncomingBlock(i))) { |
| 2706 | if (!BEValueV) { |
| 2707 | BEValueV = V; |
| 2708 | } else if (BEValueV != V) { |
| 2709 | BEValueV = 0; |
| 2710 | break; |
| 2711 | } |
| 2712 | } else if (!StartValueV) { |
| 2713 | StartValueV = V; |
| 2714 | } else if (StartValueV != V) { |
| 2715 | StartValueV = 0; |
| 2716 | break; |
| 2717 | } |
| 2718 | } |
| 2719 | if (BEValueV && StartValueV) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2720 | // While we are analyzing this PHI node, handle its value symbolically. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2721 | const SCEV *SymbolicName = getUnknown(PN); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2722 | assert(Scalars.find(PN) == Scalars.end() && |
| 2723 | "PHI node already processed?"); |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 2724 | Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2725 | |
| 2726 | // Using this symbolic name for the PHI, analyze the value coming around |
| 2727 | // the back-edge. |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2728 | const SCEV *BEValue = getSCEV(BEValueV); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2729 | |
| 2730 | // NOTE: If BEValue is loop invariant, we know that the PHI node just |
| 2731 | // has a special value for the first iteration of the loop. |
| 2732 | |
| 2733 | // If the value coming around the backedge is an add with the symbolic |
| 2734 | // value we just inserted, then we found a simple induction variable! |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2735 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2736 | // If there is a single occurrence of the symbolic value, replace it |
| 2737 | // with a recurrence. |
| 2738 | unsigned FoundIndex = Add->getNumOperands(); |
| 2739 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) |
| 2740 | if (Add->getOperand(i) == SymbolicName) |
| 2741 | if (FoundIndex == e) { |
| 2742 | FoundIndex = i; |
| 2743 | break; |
| 2744 | } |
| 2745 | |
| 2746 | if (FoundIndex != Add->getNumOperands()) { |
| 2747 | // Create an add with everything but the specified operand. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2748 | SmallVector<const SCEV *, 8> Ops; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2749 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) |
| 2750 | if (i != FoundIndex) |
| 2751 | Ops.push_back(Add->getOperand(i)); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2752 | const SCEV *Accum = getAddExpr(Ops); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2753 | |
| 2754 | // This is not a valid addrec if the step amount is varying each |
| 2755 | // loop iteration, but is not itself an addrec in this loop. |
| 2756 | if (Accum->isLoopInvariant(L) || |
| 2757 | (isa<SCEVAddRecExpr>(Accum) && |
| 2758 | cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 2759 | bool HasNUW = false; |
| 2760 | bool HasNSW = false; |
| 2761 | |
| 2762 | // If the increment doesn't overflow, then neither the addrec nor |
| 2763 | // the post-increment will overflow. |
| 2764 | if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) { |
| 2765 | if (OBO->hasNoUnsignedWrap()) |
| 2766 | HasNUW = true; |
| 2767 | if (OBO->hasNoSignedWrap()) |
| 2768 | HasNSW = true; |
| 2769 | } |
| 2770 | |
Dan Gohman | 27dead4 | 2010-04-12 07:49:36 +0000 | [diff] [blame] | 2771 | const SCEV *StartVal = getSCEV(StartValueV); |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 2772 | const SCEV *PHISCEV = |
| 2773 | getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW); |
Dan Gohman | eb490a7 | 2009-07-25 01:22:26 +0000 | [diff] [blame] | 2774 | |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 2775 | // Since the no-wrap flags are on the increment, they apply to the |
| 2776 | // post-incremented value as well. |
| 2777 | if (Accum->isLoopInvariant(L)) |
| 2778 | (void)getAddRecExpr(getAddExpr(StartVal, Accum), |
| 2779 | Accum, L, HasNUW, HasNSW); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2780 | |
| 2781 | // Okay, for the entire analysis of this edge we assumed the PHI |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2782 | // to be symbolic. We now need to go back and purge all of the |
| 2783 | // entries for the scalars that use the symbolic expression. |
| 2784 | ForgetSymbolicName(PN, SymbolicName); |
| 2785 | Scalars[SCEVCallbackVH(PN, this)] = PHISCEV; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2786 | return PHISCEV; |
| 2787 | } |
| 2788 | } |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2789 | } else if (const SCEVAddRecExpr *AddRec = |
| 2790 | dyn_cast<SCEVAddRecExpr>(BEValue)) { |
Chris Lattner | 97156e7 | 2006-04-26 18:34:07 +0000 | [diff] [blame] | 2791 | // Otherwise, this could be a loop like this: |
| 2792 | // i = 0; for (j = 1; ..; ++j) { .... i = j; } |
| 2793 | // In this case, j = {1,+,1} and BEValue is j. |
| 2794 | // Because the other in-value of i (0) fits the evolution of BEValue |
| 2795 | // i really is an addrec evolution. |
| 2796 | if (AddRec->getLoop() == L && AddRec->isAffine()) { |
Dan Gohman | 27dead4 | 2010-04-12 07:49:36 +0000 | [diff] [blame] | 2797 | const SCEV *StartVal = getSCEV(StartValueV); |
Chris Lattner | 97156e7 | 2006-04-26 18:34:07 +0000 | [diff] [blame] | 2798 | |
| 2799 | // If StartVal = j.start - j.stride, we can use StartVal as the |
| 2800 | // initial step of the addrec evolution. |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 2801 | if (StartVal == getMinusSCEV(AddRec->getOperand(0), |
Dan Gohman | 5ee60f7 | 2010-04-11 23:44:58 +0000 | [diff] [blame] | 2802 | AddRec->getOperand(1))) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2803 | const SCEV *PHISCEV = |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 2804 | getAddRecExpr(StartVal, AddRec->getOperand(1), L); |
Chris Lattner | 97156e7 | 2006-04-26 18:34:07 +0000 | [diff] [blame] | 2805 | |
| 2806 | // Okay, for the entire analysis of this edge we assumed the PHI |
Dan Gohman | fef8bb2 | 2009-07-25 01:13:03 +0000 | [diff] [blame] | 2807 | // to be symbolic. We now need to go back and purge all of the |
| 2808 | // entries for the scalars that use the symbolic expression. |
| 2809 | ForgetSymbolicName(PN, SymbolicName); |
| 2810 | Scalars[SCEVCallbackVH(PN, this)] = PHISCEV; |
Chris Lattner | 97156e7 | 2006-04-26 18:34:07 +0000 | [diff] [blame] | 2811 | return PHISCEV; |
| 2812 | } |
| 2813 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2814 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2815 | } |
Dan Gohman | 27dead4 | 2010-04-12 07:49:36 +0000 | [diff] [blame] | 2816 | } |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 2817 | |
Dan Gohman | 8566963 | 2010-02-25 06:57:05 +0000 | [diff] [blame] | 2818 | // If the PHI has a single incoming value, follow that value, unless the |
| 2819 | // PHI's incoming blocks are in a different loop, in which case doing so |
| 2820 | // risks breaking LCSSA form. Instcombine would normally zap these, but |
| 2821 | // it doesn't have DominatorTree information, so it may miss cases. |
| 2822 | if (Value *V = PN->hasConstantValue(DT)) { |
| 2823 | bool AllSameLoop = true; |
| 2824 | Loop *PNLoop = LI->getLoopFor(PN->getParent()); |
| 2825 | for (size_t i = 0, e = PN->getNumIncomingValues(); i != e; ++i) |
| 2826 | if (LI->getLoopFor(PN->getIncomingBlock(i)) != PNLoop) { |
| 2827 | AllSameLoop = false; |
| 2828 | break; |
| 2829 | } |
| 2830 | if (AllSameLoop) |
| 2831 | return getSCEV(V); |
| 2832 | } |
Dan Gohman | a653fc5 | 2009-07-14 14:06:25 +0000 | [diff] [blame] | 2833 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2834 | // If it's not a loop phi, we can't handle it yet. |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 2835 | return getUnknown(PN); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2836 | } |
| 2837 | |
Dan Gohman | 26466c0 | 2009-05-08 20:26:55 +0000 | [diff] [blame] | 2838 | /// createNodeForGEP - Expand GEP instructions into add and multiply |
| 2839 | /// operations. This allows them to be analyzed by regular SCEV code. |
| 2840 | /// |
Dan Gohman | d281ed2 | 2009-12-18 02:09:29 +0000 | [diff] [blame] | 2841 | const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { |
Dan Gohman | 26466c0 | 2009-05-08 20:26:55 +0000 | [diff] [blame] | 2842 | |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 2843 | // Don't blindly transfer the inbounds flag from the GEP instruction to the |
| 2844 | // Add expression, because the Instruction may be guarded by control flow |
| 2845 | // and the no-overflow bits may not be valid for the expression in any |
Dan Gohman | 70eff63 | 2010-06-30 17:27:11 +0000 | [diff] [blame] | 2846 | // context. |
Dan Gohman | 7a64257 | 2010-06-29 01:41:41 +0000 | [diff] [blame] | 2847 | |
Dan Gohman | c40f17b | 2009-08-18 16:46:41 +0000 | [diff] [blame] | 2848 | const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType()); |
Dan Gohman | e810b0d | 2009-05-08 20:36:47 +0000 | [diff] [blame] | 2849 | Value *Base = GEP->getOperand(0); |
Dan Gohman | c63a627 | 2009-05-09 00:14:52 +0000 | [diff] [blame] | 2850 | // Don't attempt to analyze GEPs over unsized objects. |
| 2851 | if (!cast<PointerType>(Base->getType())->getElementType()->isSized()) |
| 2852 | return getUnknown(GEP); |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 2853 | const SCEV *TotalOffset = getConstant(IntPtrTy, 0); |
Dan Gohman | e810b0d | 2009-05-08 20:36:47 +0000 | [diff] [blame] | 2854 | gep_type_iterator GTI = gep_type_begin(GEP); |
Oscar Fuentes | ee56c42 | 2010-08-02 06:00:15 +0000 | [diff] [blame] | 2855 | for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()), |
Dan Gohman | e810b0d | 2009-05-08 20:36:47 +0000 | [diff] [blame] | 2856 | E = GEP->op_end(); |
Dan Gohman | 26466c0 | 2009-05-08 20:26:55 +0000 | [diff] [blame] | 2857 | I != E; ++I) { |
| 2858 | Value *Index = *I; |
| 2859 | // Compute the (potentially symbolic) offset in bytes for this index. |
| 2860 | if (const StructType *STy = dyn_cast<StructType>(*GTI++)) { |
| 2861 | // For a struct, add the member offset. |
Dan Gohman | 26466c0 | 2009-05-08 20:26:55 +0000 | [diff] [blame] | 2862 | unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 2863 | const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo); |
| 2864 | |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 2865 | // Add the field offset to the running total offset. |
Dan Gohman | 70eff63 | 2010-06-30 17:27:11 +0000 | [diff] [blame] | 2866 | TotalOffset = getAddExpr(TotalOffset, FieldOffset); |
Dan Gohman | 26466c0 | 2009-05-08 20:26:55 +0000 | [diff] [blame] | 2867 | } else { |
| 2868 | // For an array, add the element offset, explicitly scaled. |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 2869 | const SCEV *ElementSize = getSizeOfExpr(*GTI); |
| 2870 | const SCEV *IndexS = getSCEV(Index); |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 2871 | // Getelementptr indices are signed. |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 2872 | IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy); |
| 2873 | |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 2874 | // Multiply the index by the element size to compute the element offset. |
Dan Gohman | 70eff63 | 2010-06-30 17:27:11 +0000 | [diff] [blame] | 2875 | const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize); |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 2876 | |
| 2877 | // Add the element offset to the running total offset. |
Dan Gohman | 70eff63 | 2010-06-30 17:27:11 +0000 | [diff] [blame] | 2878 | TotalOffset = getAddExpr(TotalOffset, LocalOffset); |
Dan Gohman | 26466c0 | 2009-05-08 20:26:55 +0000 | [diff] [blame] | 2879 | } |
| 2880 | } |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 2881 | |
| 2882 | // Get the SCEV for the GEP base. |
| 2883 | const SCEV *BaseS = getSCEV(Base); |
| 2884 | |
Dan Gohman | b9f9651 | 2010-06-30 07:16:37 +0000 | [diff] [blame] | 2885 | // Add the total offset from all the GEP indices to the base. |
Dan Gohman | 70eff63 | 2010-06-30 17:27:11 +0000 | [diff] [blame] | 2886 | return getAddExpr(BaseS, TotalOffset); |
Dan Gohman | 26466c0 | 2009-05-08 20:26:55 +0000 | [diff] [blame] | 2887 | } |
| 2888 | |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2889 | /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is |
| 2890 | /// guaranteed to end in (at every loop iteration). It is, at the same time, |
| 2891 | /// the minimum number of times S is divisible by 2. For example, given {4,+,8} |
| 2892 | /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2893 | uint32_t |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 2894 | ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2895 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) |
Chris Lattner | 8314a0c | 2007-11-23 22:36:49 +0000 | [diff] [blame] | 2896 | return C->getValue()->getValue().countTrailingZeros(); |
Chris Lattner | a17f039 | 2006-12-12 02:26:09 +0000 | [diff] [blame] | 2897 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2898 | if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2899 | return std::min(GetMinTrailingZeros(T->getOperand()), |
| 2900 | (uint32_t)getTypeSizeInBits(T->getType())); |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2901 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2902 | if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2903 | uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); |
| 2904 | return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? |
| 2905 | getTypeSizeInBits(E->getType()) : OpRes; |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2906 | } |
| 2907 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2908 | if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2909 | uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); |
| 2910 | return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? |
| 2911 | getTypeSizeInBits(E->getType()) : OpRes; |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2912 | } |
| 2913 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2914 | if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2915 | // The result is the min of all operands results. |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2916 | uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2917 | for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2918 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2919 | return MinOpRes; |
Chris Lattner | a17f039 | 2006-12-12 02:26:09 +0000 | [diff] [blame] | 2920 | } |
| 2921 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2922 | if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2923 | // The result is the sum of all operands results. |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2924 | uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); |
| 2925 | uint32_t BitWidth = getTypeSizeInBits(M->getType()); |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2926 | for (unsigned i = 1, e = M->getNumOperands(); |
| 2927 | SumOpRes != BitWidth && i != e; ++i) |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2928 | SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2929 | BitWidth); |
| 2930 | return SumOpRes; |
Chris Lattner | a17f039 | 2006-12-12 02:26:09 +0000 | [diff] [blame] | 2931 | } |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2932 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2933 | if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2934 | // The result is the min of all operands results. |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2935 | uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2936 | for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2937 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2938 | return MinOpRes; |
Chris Lattner | a17f039 | 2006-12-12 02:26:09 +0000 | [diff] [blame] | 2939 | } |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2940 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2941 | if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2942 | // The result is the min of all operands results. |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2943 | uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2944 | for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2945 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 2946 | return MinOpRes; |
| 2947 | } |
| 2948 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 2949 | if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2950 | // The result is the min of all operands results. |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2951 | uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2952 | for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2953 | MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 2954 | return MinOpRes; |
| 2955 | } |
| 2956 | |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2957 | if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { |
| 2958 | // For a SCEVUnknown, ask ValueTracking. |
| 2959 | unsigned BitWidth = getTypeSizeInBits(U->getType()); |
| 2960 | APInt Mask = APInt::getAllOnesValue(BitWidth); |
| 2961 | APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); |
| 2962 | ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones); |
| 2963 | return Zeros.countTrailingOnes(); |
| 2964 | } |
| 2965 | |
| 2966 | // SCEVUDivExpr |
Nick Lewycky | 83bb005 | 2007-11-22 07:59:40 +0000 | [diff] [blame] | 2967 | return 0; |
Chris Lattner | a17f039 | 2006-12-12 02:26:09 +0000 | [diff] [blame] | 2968 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 2969 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 2970 | /// getUnsignedRange - Determine the unsigned range for a particular SCEV. |
| 2971 | /// |
| 2972 | ConstantRange |
| 2973 | ScalarEvolution::getUnsignedRange(const SCEV *S) { |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2974 | |
| 2975 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 2976 | return ConstantRange(C->getValue()->getValue()); |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 2977 | |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 2978 | unsigned BitWidth = getTypeSizeInBits(S->getType()); |
| 2979 | ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); |
| 2980 | |
| 2981 | // If the value has known zeros, the maximum unsigned value will have those |
| 2982 | // known zeros as well. |
| 2983 | uint32_t TZ = GetMinTrailingZeros(S); |
| 2984 | if (TZ != 0) |
| 2985 | ConservativeResult = |
| 2986 | ConstantRange(APInt::getMinValue(BitWidth), |
| 2987 | APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); |
| 2988 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 2989 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { |
| 2990 | ConstantRange X = getUnsignedRange(Add->getOperand(0)); |
| 2991 | for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) |
| 2992 | X = X.add(getUnsignedRange(Add->getOperand(i))); |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 2993 | return ConservativeResult.intersectWith(X); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 2994 | } |
| 2995 | |
| 2996 | if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { |
| 2997 | ConstantRange X = getUnsignedRange(Mul->getOperand(0)); |
| 2998 | for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) |
| 2999 | X = X.multiply(getUnsignedRange(Mul->getOperand(i))); |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3000 | return ConservativeResult.intersectWith(X); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3001 | } |
| 3002 | |
| 3003 | if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { |
| 3004 | ConstantRange X = getUnsignedRange(SMax->getOperand(0)); |
| 3005 | for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) |
| 3006 | X = X.smax(getUnsignedRange(SMax->getOperand(i))); |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3007 | return ConservativeResult.intersectWith(X); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3008 | } |
| 3009 | |
| 3010 | if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { |
| 3011 | ConstantRange X = getUnsignedRange(UMax->getOperand(0)); |
| 3012 | for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) |
| 3013 | X = X.umax(getUnsignedRange(UMax->getOperand(i))); |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3014 | return ConservativeResult.intersectWith(X); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3015 | } |
| 3016 | |
| 3017 | if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { |
| 3018 | ConstantRange X = getUnsignedRange(UDiv->getLHS()); |
| 3019 | ConstantRange Y = getUnsignedRange(UDiv->getRHS()); |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3020 | return ConservativeResult.intersectWith(X.udiv(Y)); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3021 | } |
| 3022 | |
| 3023 | if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { |
| 3024 | ConstantRange X = getUnsignedRange(ZExt->getOperand()); |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3025 | return ConservativeResult.intersectWith(X.zeroExtend(BitWidth)); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3026 | } |
| 3027 | |
| 3028 | if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { |
| 3029 | ConstantRange X = getUnsignedRange(SExt->getOperand()); |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3030 | return ConservativeResult.intersectWith(X.signExtend(BitWidth)); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3031 | } |
| 3032 | |
| 3033 | if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { |
| 3034 | ConstantRange X = getUnsignedRange(Trunc->getOperand()); |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3035 | return ConservativeResult.intersectWith(X.truncate(BitWidth)); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3036 | } |
| 3037 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3038 | if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 3039 | // If there's no unsigned wrap, the value will never be less than its |
| 3040 | // initial value. |
| 3041 | if (AddRec->hasNoUnsignedWrap()) |
| 3042 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) |
Dan Gohman | bca091d | 2010-04-12 23:08:18 +0000 | [diff] [blame] | 3043 | if (!C->getValue()->isZero()) |
Dan Gohman | bc7129f | 2010-04-11 22:12:18 +0000 | [diff] [blame] | 3044 | ConservativeResult = |
Dan Gohman | 8a18d6b | 2010-06-30 06:58:35 +0000 | [diff] [blame] | 3045 | ConservativeResult.intersectWith( |
| 3046 | ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0))); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3047 | |
| 3048 | // TODO: non-affine addrec |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3049 | if (AddRec->isAffine()) { |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3050 | const Type *Ty = AddRec->getType(); |
| 3051 | const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3052 | if (!isa<SCEVCouldNotCompute>(MaxBECount) && |
| 3053 | getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3054 | MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); |
| 3055 | |
| 3056 | const SCEV *Start = AddRec->getStart(); |
Dan Gohman | 646e047 | 2010-04-12 07:39:33 +0000 | [diff] [blame] | 3057 | const SCEV *Step = AddRec->getStepRecurrence(*this); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3058 | |
| 3059 | ConstantRange StartRange = getUnsignedRange(Start); |
Dan Gohman | 646e047 | 2010-04-12 07:39:33 +0000 | [diff] [blame] | 3060 | ConstantRange StepRange = getSignedRange(Step); |
| 3061 | ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); |
| 3062 | ConstantRange EndRange = |
| 3063 | StartRange.add(MaxBECountRange.multiply(StepRange)); |
| 3064 | |
| 3065 | // Check for overflow. This must be done with ConstantRange arithmetic |
| 3066 | // because we could be called from within the ScalarEvolution overflow |
| 3067 | // checking code. |
| 3068 | ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1); |
| 3069 | ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1); |
| 3070 | ConstantRange ExtMaxBECountRange = |
| 3071 | MaxBECountRange.zextOrTrunc(BitWidth*2+1); |
| 3072 | ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1); |
| 3073 | if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) != |
| 3074 | ExtEndRange) |
| 3075 | return ConservativeResult; |
| 3076 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3077 | APInt Min = APIntOps::umin(StartRange.getUnsignedMin(), |
| 3078 | EndRange.getUnsignedMin()); |
| 3079 | APInt Max = APIntOps::umax(StartRange.getUnsignedMax(), |
| 3080 | EndRange.getUnsignedMax()); |
| 3081 | if (Min.isMinValue() && Max.isMaxValue()) |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 3082 | return ConservativeResult; |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3083 | return ConservativeResult.intersectWith(ConstantRange(Min, Max+1)); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3084 | } |
| 3085 | } |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 3086 | |
| 3087 | return ConservativeResult; |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 3088 | } |
| 3089 | |
| 3090 | if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { |
| 3091 | // For a SCEVUnknown, ask ValueTracking. |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 3092 | APInt Mask = APInt::getAllOnesValue(BitWidth); |
| 3093 | APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); |
| 3094 | ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD); |
Dan Gohman | 746f3b1 | 2009-07-20 22:34:18 +0000 | [diff] [blame] | 3095 | if (Ones == ~Zeros + 1) |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3096 | return ConservativeResult; |
| 3097 | return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)); |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 3098 | } |
| 3099 | |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3100 | return ConservativeResult; |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 3101 | } |
| 3102 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3103 | /// getSignedRange - Determine the signed range for a particular SCEV. |
| 3104 | /// |
| 3105 | ConstantRange |
| 3106 | ScalarEvolution::getSignedRange(const SCEV *S) { |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 3107 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3108 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) |
| 3109 | return ConstantRange(C->getValue()->getValue()); |
| 3110 | |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3111 | unsigned BitWidth = getTypeSizeInBits(S->getType()); |
| 3112 | ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); |
| 3113 | |
| 3114 | // If the value has known zeros, the maximum signed value will have those |
| 3115 | // known zeros as well. |
| 3116 | uint32_t TZ = GetMinTrailingZeros(S); |
| 3117 | if (TZ != 0) |
| 3118 | ConservativeResult = |
| 3119 | ConstantRange(APInt::getSignedMinValue(BitWidth), |
| 3120 | APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); |
| 3121 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3122 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { |
| 3123 | ConstantRange X = getSignedRange(Add->getOperand(0)); |
| 3124 | for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) |
| 3125 | X = X.add(getSignedRange(Add->getOperand(i))); |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3126 | return ConservativeResult.intersectWith(X); |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 3127 | } |
| 3128 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3129 | if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { |
| 3130 | ConstantRange X = getSignedRange(Mul->getOperand(0)); |
| 3131 | for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) |
| 3132 | X = X.multiply(getSignedRange(Mul->getOperand(i))); |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3133 | return ConservativeResult.intersectWith(X); |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 3134 | } |
| 3135 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3136 | if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { |
| 3137 | ConstantRange X = getSignedRange(SMax->getOperand(0)); |
| 3138 | for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) |
| 3139 | X = X.smax(getSignedRange(SMax->getOperand(i))); |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3140 | return ConservativeResult.intersectWith(X); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3141 | } |
Dan Gohman | 62849c0 | 2009-06-24 01:05:09 +0000 | [diff] [blame] | 3142 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3143 | if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { |
| 3144 | ConstantRange X = getSignedRange(UMax->getOperand(0)); |
| 3145 | for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) |
| 3146 | X = X.umax(getSignedRange(UMax->getOperand(i))); |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3147 | return ConservativeResult.intersectWith(X); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3148 | } |
Dan Gohman | 62849c0 | 2009-06-24 01:05:09 +0000 | [diff] [blame] | 3149 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3150 | if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { |
| 3151 | ConstantRange X = getSignedRange(UDiv->getLHS()); |
| 3152 | ConstantRange Y = getSignedRange(UDiv->getRHS()); |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3153 | return ConservativeResult.intersectWith(X.udiv(Y)); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3154 | } |
Dan Gohman | 62849c0 | 2009-06-24 01:05:09 +0000 | [diff] [blame] | 3155 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3156 | if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { |
| 3157 | ConstantRange X = getSignedRange(ZExt->getOperand()); |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3158 | return ConservativeResult.intersectWith(X.zeroExtend(BitWidth)); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3159 | } |
| 3160 | |
| 3161 | if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { |
| 3162 | ConstantRange X = getSignedRange(SExt->getOperand()); |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3163 | return ConservativeResult.intersectWith(X.signExtend(BitWidth)); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3164 | } |
| 3165 | |
| 3166 | if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { |
| 3167 | ConstantRange X = getSignedRange(Trunc->getOperand()); |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3168 | return ConservativeResult.intersectWith(X.truncate(BitWidth)); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3169 | } |
| 3170 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3171 | if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 3172 | // If there's no signed wrap, and all the operands have the same sign or |
| 3173 | // zero, the value won't ever change sign. |
| 3174 | if (AddRec->hasNoSignedWrap()) { |
| 3175 | bool AllNonNeg = true; |
| 3176 | bool AllNonPos = true; |
| 3177 | for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { |
| 3178 | if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; |
| 3179 | if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; |
| 3180 | } |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 3181 | if (AllNonNeg) |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3182 | ConservativeResult = ConservativeResult.intersectWith( |
| 3183 | ConstantRange(APInt(BitWidth, 0), |
| 3184 | APInt::getSignedMinValue(BitWidth))); |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 3185 | else if (AllNonPos) |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3186 | ConservativeResult = ConservativeResult.intersectWith( |
| 3187 | ConstantRange(APInt::getSignedMinValue(BitWidth), |
| 3188 | APInt(BitWidth, 1))); |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 3189 | } |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3190 | |
| 3191 | // TODO: non-affine addrec |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3192 | if (AddRec->isAffine()) { |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3193 | const Type *Ty = AddRec->getType(); |
| 3194 | const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); |
Dan Gohman | c9c36cb | 2010-01-26 19:19:05 +0000 | [diff] [blame] | 3195 | if (!isa<SCEVCouldNotCompute>(MaxBECount) && |
| 3196 | getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3197 | MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); |
| 3198 | |
| 3199 | const SCEV *Start = AddRec->getStart(); |
Dan Gohman | 646e047 | 2010-04-12 07:39:33 +0000 | [diff] [blame] | 3200 | const SCEV *Step = AddRec->getStepRecurrence(*this); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3201 | |
| 3202 | ConstantRange StartRange = getSignedRange(Start); |
Dan Gohman | 646e047 | 2010-04-12 07:39:33 +0000 | [diff] [blame] | 3203 | ConstantRange StepRange = getSignedRange(Step); |
| 3204 | ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); |
| 3205 | ConstantRange EndRange = |
| 3206 | StartRange.add(MaxBECountRange.multiply(StepRange)); |
| 3207 | |
| 3208 | // Check for overflow. This must be done with ConstantRange arithmetic |
| 3209 | // because we could be called from within the ScalarEvolution overflow |
| 3210 | // checking code. |
| 3211 | ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1); |
| 3212 | ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1); |
| 3213 | ConstantRange ExtMaxBECountRange = |
| 3214 | MaxBECountRange.zextOrTrunc(BitWidth*2+1); |
| 3215 | ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1); |
| 3216 | if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) != |
| 3217 | ExtEndRange) |
| 3218 | return ConservativeResult; |
| 3219 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3220 | APInt Min = APIntOps::smin(StartRange.getSignedMin(), |
| 3221 | EndRange.getSignedMin()); |
| 3222 | APInt Max = APIntOps::smax(StartRange.getSignedMax(), |
| 3223 | EndRange.getSignedMax()); |
| 3224 | if (Min.isMinSignedValue() && Max.isMaxSignedValue()) |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 3225 | return ConservativeResult; |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3226 | return ConservativeResult.intersectWith(ConstantRange(Min, Max+1)); |
Dan Gohman | 62849c0 | 2009-06-24 01:05:09 +0000 | [diff] [blame] | 3227 | } |
Dan Gohman | 62849c0 | 2009-06-24 01:05:09 +0000 | [diff] [blame] | 3228 | } |
Dan Gohman | a10756e | 2010-01-21 02:09:26 +0000 | [diff] [blame] | 3229 | |
| 3230 | return ConservativeResult; |
Dan Gohman | 62849c0 | 2009-06-24 01:05:09 +0000 | [diff] [blame] | 3231 | } |
| 3232 | |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 3233 | if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { |
| 3234 | // For a SCEVUnknown, ask ValueTracking. |
Duncan Sands | b0bc6c3 | 2010-02-15 16:12:20 +0000 | [diff] [blame] | 3235 | if (!U->getValue()->getType()->isIntegerTy() && !TD) |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3236 | return ConservativeResult; |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3237 | unsigned NS = ComputeNumSignBits(U->getValue(), TD); |
| 3238 | if (NS == 1) |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3239 | return ConservativeResult; |
| 3240 | return ConservativeResult.intersectWith( |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 3241 | ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3242 | APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)); |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 3243 | } |
| 3244 | |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 3245 | return ConservativeResult; |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 3246 | } |
| 3247 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3248 | /// createSCEV - We know that there is no SCEV for the specified value. |
| 3249 | /// Analyze the expression. |
| 3250 | /// |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 3251 | const SCEV *ScalarEvolution::createSCEV(Value *V) { |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 3252 | if (!isSCEVable(V->getType())) |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3253 | return getUnknown(V); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 3254 | |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3255 | unsigned Opcode = Instruction::UserOp1; |
Dan Gohman | 4ecbca5 | 2010-03-09 23:46:50 +0000 | [diff] [blame] | 3256 | if (Instruction *I = dyn_cast<Instruction>(V)) { |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3257 | Opcode = I->getOpcode(); |
Dan Gohman | 4ecbca5 | 2010-03-09 23:46:50 +0000 | [diff] [blame] | 3258 | |
| 3259 | // Don't attempt to analyze instructions in blocks that aren't |
| 3260 | // reachable. Such instructions don't matter, and they aren't required |
| 3261 | // to obey basic rules for definitions dominating uses which this |
| 3262 | // analysis depends on. |
| 3263 | if (!DT->isReachableFromEntry(I->getParent())) |
| 3264 | return getUnknown(V); |
| 3265 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3266 | Opcode = CE->getOpcode(); |
Dan Gohman | 6bbcba1 | 2009-06-24 00:54:57 +0000 | [diff] [blame] | 3267 | else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) |
| 3268 | return getConstant(CI); |
| 3269 | else if (isa<ConstantPointerNull>(V)) |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 3270 | return getConstant(V->getType(), 0); |
Dan Gohman | 2681232 | 2009-08-25 17:49:57 +0000 | [diff] [blame] | 3271 | else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) |
| 3272 | return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee()); |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3273 | else |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3274 | return getUnknown(V); |
Chris Lattner | 2811f2a | 2007-04-02 05:41:38 +0000 | [diff] [blame] | 3275 | |
Dan Gohman | ca17890 | 2009-07-17 20:47:02 +0000 | [diff] [blame] | 3276 | Operator *U = cast<Operator>(V); |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3277 | switch (Opcode) { |
Dan Gohman | d3f171d | 2010-08-16 16:03:49 +0000 | [diff] [blame] | 3278 | case Instruction::Add: { |
| 3279 | // The simple thing to do would be to just call getSCEV on both operands |
| 3280 | // and call getAddExpr with the result. However if we're looking at a |
| 3281 | // bunch of things all added together, this can be quite inefficient, |
| 3282 | // because it leads to N-1 getAddExpr calls for N ultimate operands. |
| 3283 | // Instead, gather up all the operands and make a single getAddExpr call. |
| 3284 | // LLVM IR canonical form means we need only traverse the left operands. |
| 3285 | SmallVector<const SCEV *, 4> AddOps; |
| 3286 | AddOps.push_back(getSCEV(U->getOperand(1))); |
| 3287 | for (Value *Op = U->getOperand(0); |
| 3288 | Op->getValueID() == Instruction::Add + Value::InstructionVal; |
| 3289 | Op = U->getOperand(0)) { |
| 3290 | U = cast<Operator>(Op); |
| 3291 | AddOps.push_back(getSCEV(U->getOperand(1))); |
| 3292 | } |
| 3293 | AddOps.push_back(getSCEV(U->getOperand(0))); |
| 3294 | return getAddExpr(AddOps); |
| 3295 | } |
| 3296 | case Instruction::Mul: { |
| 3297 | // See the Add code above. |
| 3298 | SmallVector<const SCEV *, 4> MulOps; |
| 3299 | MulOps.push_back(getSCEV(U->getOperand(1))); |
| 3300 | for (Value *Op = U->getOperand(0); |
| 3301 | Op->getValueID() == Instruction::Mul + Value::InstructionVal; |
| 3302 | Op = U->getOperand(0)) { |
| 3303 | U = cast<Operator>(Op); |
| 3304 | MulOps.push_back(getSCEV(U->getOperand(1))); |
| 3305 | } |
| 3306 | MulOps.push_back(getSCEV(U->getOperand(0))); |
| 3307 | return getMulExpr(MulOps); |
| 3308 | } |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3309 | case Instruction::UDiv: |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3310 | return getUDivExpr(getSCEV(U->getOperand(0)), |
| 3311 | getSCEV(U->getOperand(1))); |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3312 | case Instruction::Sub: |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3313 | return getMinusSCEV(getSCEV(U->getOperand(0)), |
| 3314 | getSCEV(U->getOperand(1))); |
Dan Gohman | 4ee29af | 2009-04-21 02:26:00 +0000 | [diff] [blame] | 3315 | case Instruction::And: |
| 3316 | // For an expression like x&255 that merely masks off the high bits, |
| 3317 | // use zext(trunc(x)) as the SCEV expression. |
| 3318 | if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { |
Dan Gohman | 2c73d5f | 2009-04-25 17:05:40 +0000 | [diff] [blame] | 3319 | if (CI->isNullValue()) |
| 3320 | return getSCEV(U->getOperand(1)); |
Dan Gohman | d6c3295 | 2009-04-27 01:41:10 +0000 | [diff] [blame] | 3321 | if (CI->isAllOnesValue()) |
| 3322 | return getSCEV(U->getOperand(0)); |
Dan Gohman | 4ee29af | 2009-04-21 02:26:00 +0000 | [diff] [blame] | 3323 | const APInt &A = CI->getValue(); |
Dan Gohman | 61ffa8e | 2009-06-16 19:52:01 +0000 | [diff] [blame] | 3324 | |
| 3325 | // Instcombine's ShrinkDemandedConstant may strip bits out of |
| 3326 | // constants, obscuring what would otherwise be a low-bits mask. |
| 3327 | // Use ComputeMaskedBits to compute what ShrinkDemandedConstant |
| 3328 | // knew about to reconstruct a low-bits mask value. |
| 3329 | unsigned LZ = A.countLeadingZeros(); |
| 3330 | unsigned BitWidth = A.getBitWidth(); |
| 3331 | APInt AllOnes = APInt::getAllOnesValue(BitWidth); |
| 3332 | APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); |
| 3333 | ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD); |
| 3334 | |
| 3335 | APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ); |
| 3336 | |
Dan Gohman | fc3641b | 2009-06-17 23:54:37 +0000 | [diff] [blame] | 3337 | if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask)) |
Dan Gohman | 4ee29af | 2009-04-21 02:26:00 +0000 | [diff] [blame] | 3338 | return |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3339 | getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)), |
Owen Anderson | 1d0be15 | 2009-08-13 21:58:54 +0000 | [diff] [blame] | 3340 | IntegerType::get(getContext(), BitWidth - LZ)), |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3341 | U->getType()); |
Dan Gohman | 4ee29af | 2009-04-21 02:26:00 +0000 | [diff] [blame] | 3342 | } |
| 3343 | break; |
Dan Gohman | 61ffa8e | 2009-06-16 19:52:01 +0000 | [diff] [blame] | 3344 | |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3345 | case Instruction::Or: |
| 3346 | // If the RHS of the Or is a constant, we may have something like: |
| 3347 | // X*4+1 which got turned into X*4|1. Handle this as an Add so loop |
| 3348 | // optimizations will transparently handle this case. |
| 3349 | // |
| 3350 | // In order for this transformation to be safe, the LHS must be of the |
| 3351 | // form X*(2^n) and the Or constant must be less than 2^n. |
| 3352 | if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 3353 | const SCEV *LHS = getSCEV(U->getOperand(0)); |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3354 | const APInt &CIVal = CI->getValue(); |
Dan Gohman | 2c364ad | 2009-06-19 23:29:04 +0000 | [diff] [blame] | 3355 | if (GetMinTrailingZeros(LHS) >= |
Dan Gohman | 1f96e67 | 2009-09-17 18:05:20 +0000 | [diff] [blame] | 3356 | (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { |
| 3357 | // Build a plain add SCEV. |
| 3358 | const SCEV *S = getAddExpr(LHS, getSCEV(CI)); |
| 3359 | // If the LHS of the add was an addrec and it has no-wrap flags, |
| 3360 | // transfer the no-wrap flags, since an or won't introduce a wrap. |
| 3361 | if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { |
| 3362 | const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); |
| 3363 | if (OldAR->hasNoUnsignedWrap()) |
| 3364 | const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true); |
| 3365 | if (OldAR->hasNoSignedWrap()) |
| 3366 | const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true); |
| 3367 | } |
| 3368 | return S; |
| 3369 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3370 | } |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3371 | break; |
| 3372 | case Instruction::Xor: |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3373 | if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { |
Nick Lewycky | 01eaf80 | 2008-07-07 06:15:49 +0000 | [diff] [blame] | 3374 | // If the RHS of the xor is a signbit, then this is just an add. |
| 3375 | // Instcombine turns add of signbit into xor as a strength reduction step. |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3376 | if (CI->getValue().isSignBit()) |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3377 | return getAddExpr(getSCEV(U->getOperand(0)), |
| 3378 | getSCEV(U->getOperand(1))); |
Nick Lewycky | 01eaf80 | 2008-07-07 06:15:49 +0000 | [diff] [blame] | 3379 | |
| 3380 | // If the RHS of xor is -1, then this is a not operation. |
Dan Gohman | 0bac95e | 2009-05-18 16:17:44 +0000 | [diff] [blame] | 3381 | if (CI->isAllOnesValue()) |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3382 | return getNotSCEV(getSCEV(U->getOperand(0))); |
Dan Gohman | 10978bd | 2009-05-18 16:29:04 +0000 | [diff] [blame] | 3383 | |
| 3384 | // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. |
| 3385 | // This is a variant of the check for xor with -1, and it handles |
| 3386 | // the case where instcombine has trimmed non-demanded bits out |
| 3387 | // of an xor with -1. |
| 3388 | if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) |
| 3389 | if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1))) |
| 3390 | if (BO->getOpcode() == Instruction::And && |
| 3391 | LCI->getValue() == CI->getValue()) |
| 3392 | if (const SCEVZeroExtendExpr *Z = |
Dan Gohman | 3034c10 | 2009-06-17 01:22:39 +0000 | [diff] [blame] | 3393 | dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) { |
Dan Gohman | 8205283 | 2009-06-18 00:00:20 +0000 | [diff] [blame] | 3394 | const Type *UTy = U->getType(); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 3395 | const SCEV *Z0 = Z->getOperand(); |
Dan Gohman | 8205283 | 2009-06-18 00:00:20 +0000 | [diff] [blame] | 3396 | const Type *Z0Ty = Z0->getType(); |
| 3397 | unsigned Z0TySize = getTypeSizeInBits(Z0Ty); |
| 3398 | |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 3399 | // If C is a low-bits mask, the zero extend is serving to |
Dan Gohman | 8205283 | 2009-06-18 00:00:20 +0000 | [diff] [blame] | 3400 | // mask off the high bits. Complement the operand and |
| 3401 | // re-apply the zext. |
| 3402 | if (APIntOps::isMask(Z0TySize, CI->getValue())) |
| 3403 | return getZeroExtendExpr(getNotSCEV(Z0), UTy); |
| 3404 | |
| 3405 | // If C is a single bit, it may be in the sign-bit position |
| 3406 | // before the zero-extend. In this case, represent the xor |
| 3407 | // using an add, which is equivalent, and re-apply the zext. |
| 3408 | APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize); |
| 3409 | if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() && |
| 3410 | Trunc.isSignBit()) |
| 3411 | return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), |
| 3412 | UTy); |
Dan Gohman | 3034c10 | 2009-06-17 01:22:39 +0000 | [diff] [blame] | 3413 | } |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3414 | } |
| 3415 | break; |
| 3416 | |
| 3417 | case Instruction::Shl: |
| 3418 | // Turn shift left of a constant amount into a multiply. |
| 3419 | if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 3420 | uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 3421 | |
| 3422 | // If the shift count is not less than the bitwidth, the result of |
| 3423 | // the shift is undefined. Don't try to analyze it, because the |
| 3424 | // resolution chosen here may differ from the resolution chosen in |
| 3425 | // other parts of the compiler. |
| 3426 | if (SA->getValue().uge(BitWidth)) |
| 3427 | break; |
| 3428 | |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 3429 | Constant *X = ConstantInt::get(getContext(), |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 3430 | APInt(BitWidth, 1).shl(SA->getZExtValue())); |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3431 | return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X)); |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3432 | } |
| 3433 | break; |
| 3434 | |
Nick Lewycky | 01eaf80 | 2008-07-07 06:15:49 +0000 | [diff] [blame] | 3435 | case Instruction::LShr: |
Nick Lewycky | 789558d | 2009-01-13 09:18:58 +0000 | [diff] [blame] | 3436 | // Turn logical shift right of a constant into a unsigned divide. |
Nick Lewycky | 01eaf80 | 2008-07-07 06:15:49 +0000 | [diff] [blame] | 3437 | if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 3438 | uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 3439 | |
| 3440 | // If the shift count is not less than the bitwidth, the result of |
| 3441 | // the shift is undefined. Don't try to analyze it, because the |
| 3442 | // resolution chosen here may differ from the resolution chosen in |
| 3443 | // other parts of the compiler. |
| 3444 | if (SA->getValue().uge(BitWidth)) |
| 3445 | break; |
| 3446 | |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 3447 | Constant *X = ConstantInt::get(getContext(), |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 3448 | APInt(BitWidth, 1).shl(SA->getZExtValue())); |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3449 | return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X)); |
Nick Lewycky | 01eaf80 | 2008-07-07 06:15:49 +0000 | [diff] [blame] | 3450 | } |
| 3451 | break; |
| 3452 | |
Dan Gohman | 4ee29af | 2009-04-21 02:26:00 +0000 | [diff] [blame] | 3453 | case Instruction::AShr: |
| 3454 | // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. |
| 3455 | if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 3456 | if (Operator *L = dyn_cast<Operator>(U->getOperand(0))) |
Dan Gohman | 4ee29af | 2009-04-21 02:26:00 +0000 | [diff] [blame] | 3457 | if (L->getOpcode() == Instruction::Shl && |
| 3458 | L->getOperand(1) == U->getOperand(1)) { |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 3459 | uint64_t BitWidth = getTypeSizeInBits(U->getType()); |
| 3460 | |
| 3461 | // If the shift count is not less than the bitwidth, the result of |
| 3462 | // the shift is undefined. Don't try to analyze it, because the |
| 3463 | // resolution chosen here may differ from the resolution chosen in |
| 3464 | // other parts of the compiler. |
| 3465 | if (CI->getValue().uge(BitWidth)) |
| 3466 | break; |
| 3467 | |
Dan Gohman | 2c73d5f | 2009-04-25 17:05:40 +0000 | [diff] [blame] | 3468 | uint64_t Amt = BitWidth - CI->getZExtValue(); |
| 3469 | if (Amt == BitWidth) |
| 3470 | return getSCEV(L->getOperand(0)); // shift by zero --> noop |
Dan Gohman | 4ee29af | 2009-04-21 02:26:00 +0000 | [diff] [blame] | 3471 | return |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3472 | getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)), |
Dan Gohman | ddb3eaf | 2010-04-22 01:35:11 +0000 | [diff] [blame] | 3473 | IntegerType::get(getContext(), |
| 3474 | Amt)), |
| 3475 | U->getType()); |
Dan Gohman | 4ee29af | 2009-04-21 02:26:00 +0000 | [diff] [blame] | 3476 | } |
| 3477 | break; |
| 3478 | |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3479 | case Instruction::Trunc: |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3480 | return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3481 | |
| 3482 | case Instruction::ZExt: |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3483 | return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3484 | |
| 3485 | case Instruction::SExt: |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3486 | return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3487 | |
| 3488 | case Instruction::BitCast: |
| 3489 | // BitCasts are no-op casts so we just eliminate the cast. |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 3490 | if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3491 | return getSCEV(U->getOperand(0)); |
| 3492 | break; |
| 3493 | |
Dan Gohman | 4f8eea8 | 2010-02-01 18:27:38 +0000 | [diff] [blame] | 3494 | // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can |
| 3495 | // lead to pointer expressions which cannot safely be expanded to GEPs, |
| 3496 | // because ScalarEvolution doesn't respect the GEP aliasing rules when |
| 3497 | // simplifying integer expressions. |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 3498 | |
Dan Gohman | 26466c0 | 2009-05-08 20:26:55 +0000 | [diff] [blame] | 3499 | case Instruction::GetElementPtr: |
Dan Gohman | d281ed2 | 2009-12-18 02:09:29 +0000 | [diff] [blame] | 3500 | return createNodeForGEP(cast<GEPOperator>(U)); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 3501 | |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3502 | case Instruction::PHI: |
| 3503 | return createNodeForPHI(cast<PHINode>(U)); |
| 3504 | |
| 3505 | case Instruction::Select: |
| 3506 | // This could be a smax or umax that was lowered earlier. |
| 3507 | // Try to recover it. |
| 3508 | if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) { |
| 3509 | Value *LHS = ICI->getOperand(0); |
| 3510 | Value *RHS = ICI->getOperand(1); |
| 3511 | switch (ICI->getPredicate()) { |
| 3512 | case ICmpInst::ICMP_SLT: |
| 3513 | case ICmpInst::ICMP_SLE: |
| 3514 | std::swap(LHS, RHS); |
| 3515 | // fall through |
| 3516 | case ICmpInst::ICMP_SGT: |
| 3517 | case ICmpInst::ICMP_SGE: |
Dan Gohman | 9f93d30 | 2010-04-24 03:09:42 +0000 | [diff] [blame] | 3518 | // a >s b ? a+x : b+x -> smax(a, b)+x |
| 3519 | // a >s b ? b+x : a+x -> smin(a, b)+x |
| 3520 | if (LHS->getType() == U->getType()) { |
| 3521 | const SCEV *LS = getSCEV(LHS); |
| 3522 | const SCEV *RS = getSCEV(RHS); |
| 3523 | const SCEV *LA = getSCEV(U->getOperand(1)); |
| 3524 | const SCEV *RA = getSCEV(U->getOperand(2)); |
| 3525 | const SCEV *LDiff = getMinusSCEV(LA, LS); |
| 3526 | const SCEV *RDiff = getMinusSCEV(RA, RS); |
| 3527 | if (LDiff == RDiff) |
| 3528 | return getAddExpr(getSMaxExpr(LS, RS), LDiff); |
| 3529 | LDiff = getMinusSCEV(LA, RS); |
| 3530 | RDiff = getMinusSCEV(RA, LS); |
| 3531 | if (LDiff == RDiff) |
| 3532 | return getAddExpr(getSMinExpr(LS, RS), LDiff); |
| 3533 | } |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3534 | break; |
| 3535 | case ICmpInst::ICMP_ULT: |
| 3536 | case ICmpInst::ICMP_ULE: |
| 3537 | std::swap(LHS, RHS); |
| 3538 | // fall through |
| 3539 | case ICmpInst::ICMP_UGT: |
| 3540 | case ICmpInst::ICMP_UGE: |
Dan Gohman | 9f93d30 | 2010-04-24 03:09:42 +0000 | [diff] [blame] | 3541 | // a >u b ? a+x : b+x -> umax(a, b)+x |
| 3542 | // a >u b ? b+x : a+x -> umin(a, b)+x |
| 3543 | if (LHS->getType() == U->getType()) { |
| 3544 | const SCEV *LS = getSCEV(LHS); |
| 3545 | const SCEV *RS = getSCEV(RHS); |
| 3546 | const SCEV *LA = getSCEV(U->getOperand(1)); |
| 3547 | const SCEV *RA = getSCEV(U->getOperand(2)); |
| 3548 | const SCEV *LDiff = getMinusSCEV(LA, LS); |
| 3549 | const SCEV *RDiff = getMinusSCEV(RA, RS); |
| 3550 | if (LDiff == RDiff) |
| 3551 | return getAddExpr(getUMaxExpr(LS, RS), LDiff); |
| 3552 | LDiff = getMinusSCEV(LA, RS); |
| 3553 | RDiff = getMinusSCEV(RA, LS); |
| 3554 | if (LDiff == RDiff) |
| 3555 | return getAddExpr(getUMinExpr(LS, RS), LDiff); |
| 3556 | } |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3557 | break; |
Dan Gohman | 30fb512 | 2009-06-18 20:21:07 +0000 | [diff] [blame] | 3558 | case ICmpInst::ICMP_NE: |
Dan Gohman | 9f93d30 | 2010-04-24 03:09:42 +0000 | [diff] [blame] | 3559 | // n != 0 ? n+x : 1+x -> umax(n, 1)+x |
| 3560 | if (LHS->getType() == U->getType() && |
Dan Gohman | 30fb512 | 2009-06-18 20:21:07 +0000 | [diff] [blame] | 3561 | isa<ConstantInt>(RHS) && |
Dan Gohman | 9f93d30 | 2010-04-24 03:09:42 +0000 | [diff] [blame] | 3562 | cast<ConstantInt>(RHS)->isZero()) { |
| 3563 | const SCEV *One = getConstant(LHS->getType(), 1); |
| 3564 | const SCEV *LS = getSCEV(LHS); |
| 3565 | const SCEV *LA = getSCEV(U->getOperand(1)); |
| 3566 | const SCEV *RA = getSCEV(U->getOperand(2)); |
| 3567 | const SCEV *LDiff = getMinusSCEV(LA, LS); |
| 3568 | const SCEV *RDiff = getMinusSCEV(RA, One); |
| 3569 | if (LDiff == RDiff) |
Dan Gohman | 58a85b9 | 2010-08-13 20:17:14 +0000 | [diff] [blame] | 3570 | return getAddExpr(getUMaxExpr(One, LS), LDiff); |
Dan Gohman | 9f93d30 | 2010-04-24 03:09:42 +0000 | [diff] [blame] | 3571 | } |
Dan Gohman | 30fb512 | 2009-06-18 20:21:07 +0000 | [diff] [blame] | 3572 | break; |
| 3573 | case ICmpInst::ICMP_EQ: |
Dan Gohman | 9f93d30 | 2010-04-24 03:09:42 +0000 | [diff] [blame] | 3574 | // n == 0 ? 1+x : n+x -> umax(n, 1)+x |
| 3575 | if (LHS->getType() == U->getType() && |
Dan Gohman | 30fb512 | 2009-06-18 20:21:07 +0000 | [diff] [blame] | 3576 | isa<ConstantInt>(RHS) && |
Dan Gohman | 9f93d30 | 2010-04-24 03:09:42 +0000 | [diff] [blame] | 3577 | cast<ConstantInt>(RHS)->isZero()) { |
| 3578 | const SCEV *One = getConstant(LHS->getType(), 1); |
| 3579 | const SCEV *LS = getSCEV(LHS); |
| 3580 | const SCEV *LA = getSCEV(U->getOperand(1)); |
| 3581 | const SCEV *RA = getSCEV(U->getOperand(2)); |
| 3582 | const SCEV *LDiff = getMinusSCEV(LA, One); |
| 3583 | const SCEV *RDiff = getMinusSCEV(RA, LS); |
| 3584 | if (LDiff == RDiff) |
Dan Gohman | 58a85b9 | 2010-08-13 20:17:14 +0000 | [diff] [blame] | 3585 | return getAddExpr(getUMaxExpr(One, LS), LDiff); |
Dan Gohman | 9f93d30 | 2010-04-24 03:09:42 +0000 | [diff] [blame] | 3586 | } |
Dan Gohman | 30fb512 | 2009-06-18 20:21:07 +0000 | [diff] [blame] | 3587 | break; |
Dan Gohman | 6c459a2 | 2008-06-22 19:56:46 +0000 | [diff] [blame] | 3588 | default: |
| 3589 | break; |
| 3590 | } |
| 3591 | } |
| 3592 | |
| 3593 | default: // We cannot analyze this expression. |
| 3594 | break; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3595 | } |
| 3596 | |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 3597 | return getUnknown(V); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3598 | } |
| 3599 | |
| 3600 | |
| 3601 | |
| 3602 | //===----------------------------------------------------------------------===// |
| 3603 | // Iteration Count Computation Code |
| 3604 | // |
| 3605 | |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 3606 | /// getBackedgeTakenCount - If the specified loop has a predictable |
| 3607 | /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute |
| 3608 | /// object. The backedge-taken count is the number of times the loop header |
| 3609 | /// will be branched to from within the loop. This is one less than the |
| 3610 | /// trip count of the loop, since it doesn't count the first iteration, |
| 3611 | /// when the header is branched to from outside the loop. |
| 3612 | /// |
| 3613 | /// Note that it is not valid to call this method on a loop without a |
| 3614 | /// loop-invariant backedge-taken count (see |
| 3615 | /// hasLoopInvariantBackedgeTakenCount). |
| 3616 | /// |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 3617 | const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 3618 | return getBackedgeTakenInfo(L).Exact; |
| 3619 | } |
| 3620 | |
| 3621 | /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except |
| 3622 | /// return the least SCEV value that is known never to be less than the |
| 3623 | /// actual backedge taken count. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 3624 | const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 3625 | return getBackedgeTakenInfo(L).Max; |
| 3626 | } |
| 3627 | |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3628 | /// PushLoopPHIs - Push PHI nodes in the header of the given loop |
| 3629 | /// onto the given Worklist. |
| 3630 | static void |
| 3631 | PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { |
| 3632 | BasicBlock *Header = L->getHeader(); |
| 3633 | |
| 3634 | // Push all Loop-header PHIs onto the Worklist stack. |
| 3635 | for (BasicBlock::iterator I = Header->begin(); |
| 3636 | PHINode *PN = dyn_cast<PHINode>(I); ++I) |
| 3637 | Worklist.push_back(PN); |
| 3638 | } |
| 3639 | |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 3640 | const ScalarEvolution::BackedgeTakenInfo & |
| 3641 | ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 3642 | // Initially insert a CouldNotCompute for this loop. If the insertion |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 3643 | // succeeds, proceed to actually compute a backedge-taken count and |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 3644 | // update the value. The temporary CouldNotCompute value tells SCEV |
| 3645 | // code elsewhere that it shouldn't attempt to request a new |
| 3646 | // backedge-taken count, which could result in infinite recursion. |
Dan Gohman | 5d98491 | 2009-12-18 01:14:11 +0000 | [diff] [blame] | 3647 | std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 3648 | BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute())); |
| 3649 | if (Pair.second) { |
Dan Gohman | 93dacad | 2010-01-26 16:46:18 +0000 | [diff] [blame] | 3650 | BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L); |
| 3651 | if (BECount.Exact != getCouldNotCompute()) { |
| 3652 | assert(BECount.Exact->isLoopInvariant(L) && |
| 3653 | BECount.Max->isLoopInvariant(L) && |
| 3654 | "Computed backedge-taken count isn't loop invariant for loop!"); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3655 | ++NumTripCountsComputed; |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 3656 | |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 3657 | // Update the value in the map. |
Dan Gohman | 93dacad | 2010-01-26 16:46:18 +0000 | [diff] [blame] | 3658 | Pair.first->second = BECount; |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3659 | } else { |
Dan Gohman | 93dacad | 2010-01-26 16:46:18 +0000 | [diff] [blame] | 3660 | if (BECount.Max != getCouldNotCompute()) |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3661 | // Update the value in the map. |
Dan Gohman | 93dacad | 2010-01-26 16:46:18 +0000 | [diff] [blame] | 3662 | Pair.first->second = BECount; |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3663 | if (isa<PHINode>(L->getHeader()->begin())) |
| 3664 | // Only count loops that have phi nodes as not being computable. |
| 3665 | ++NumTripCountsNotComputed; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3666 | } |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 3667 | |
| 3668 | // Now that we know more about the trip count for this loop, forget any |
| 3669 | // existing SCEV values for PHI nodes in this loop since they are only |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3670 | // conservative estimates made without the benefit of trip count |
Dan Gohman | 4c7279a | 2009-10-31 15:04:55 +0000 | [diff] [blame] | 3671 | // information. This is similar to the code in forgetLoop, except that |
| 3672 | // it handles SCEVUnknown PHI nodes specially. |
Dan Gohman | 93dacad | 2010-01-26 16:46:18 +0000 | [diff] [blame] | 3673 | if (BECount.hasAnyInfo()) { |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3674 | SmallVector<Instruction *, 16> Worklist; |
| 3675 | PushLoopPHIs(L, Worklist); |
| 3676 | |
| 3677 | SmallPtrSet<Instruction *, 8> Visited; |
| 3678 | while (!Worklist.empty()) { |
| 3679 | Instruction *I = Worklist.pop_back_val(); |
| 3680 | if (!Visited.insert(I)) continue; |
| 3681 | |
Dan Gohman | 5d98491 | 2009-12-18 01:14:11 +0000 | [diff] [blame] | 3682 | std::map<SCEVCallbackVH, const SCEV *>::iterator It = |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3683 | Scalars.find(static_cast<Value *>(I)); |
| 3684 | if (It != Scalars.end()) { |
| 3685 | // SCEVUnknown for a PHI either means that it has an unrecognized |
| 3686 | // structure, or it's a PHI that's in the progress of being computed |
Dan Gohman | ba70188 | 2009-07-13 22:04:06 +0000 | [diff] [blame] | 3687 | // by createNodeForPHI. In the former case, additional loop trip |
| 3688 | // count information isn't going to change anything. In the later |
| 3689 | // case, createNodeForPHI will perform the necessary updates on its |
| 3690 | // own when it gets to that point. |
Dan Gohman | 4221489 | 2009-08-31 21:15:23 +0000 | [diff] [blame] | 3691 | if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) { |
| 3692 | ValuesAtScopes.erase(It->second); |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3693 | Scalars.erase(It); |
Dan Gohman | 4221489 | 2009-08-31 21:15:23 +0000 | [diff] [blame] | 3694 | } |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3695 | if (PHINode *PN = dyn_cast<PHINode>(I)) |
| 3696 | ConstantEvolutionLoopExitValue.erase(PN); |
| 3697 | } |
| 3698 | |
| 3699 | PushDefUseChildren(I, Worklist); |
| 3700 | } |
| 3701 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3702 | } |
Dan Gohman | 01ecca2 | 2009-04-27 20:16:15 +0000 | [diff] [blame] | 3703 | return Pair.first->second; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3704 | } |
| 3705 | |
Dan Gohman | 4c7279a | 2009-10-31 15:04:55 +0000 | [diff] [blame] | 3706 | /// forgetLoop - This method should be called by the client when it has |
| 3707 | /// changed a loop in a way that may effect ScalarEvolution's ability to |
| 3708 | /// compute a trip count, or if the loop is deleted. |
| 3709 | void ScalarEvolution::forgetLoop(const Loop *L) { |
| 3710 | // Drop any stored trip count value. |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 3711 | BackedgeTakenCounts.erase(L); |
Dan Gohman | fb7d35f | 2009-05-02 17:43:35 +0000 | [diff] [blame] | 3712 | |
Dan Gohman | 4c7279a | 2009-10-31 15:04:55 +0000 | [diff] [blame] | 3713 | // Drop information about expressions based on loop-header PHIs. |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 3714 | SmallVector<Instruction *, 16> Worklist; |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3715 | PushLoopPHIs(L, Worklist); |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 3716 | |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3717 | SmallPtrSet<Instruction *, 8> Visited; |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 3718 | while (!Worklist.empty()) { |
| 3719 | Instruction *I = Worklist.pop_back_val(); |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3720 | if (!Visited.insert(I)) continue; |
| 3721 | |
Dan Gohman | 5d98491 | 2009-12-18 01:14:11 +0000 | [diff] [blame] | 3722 | std::map<SCEVCallbackVH, const SCEV *>::iterator It = |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3723 | Scalars.find(static_cast<Value *>(I)); |
| 3724 | if (It != Scalars.end()) { |
Dan Gohman | 4221489 | 2009-08-31 21:15:23 +0000 | [diff] [blame] | 3725 | ValuesAtScopes.erase(It->second); |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3726 | Scalars.erase(It); |
Dan Gohman | 59ae6b9 | 2009-07-08 19:23:34 +0000 | [diff] [blame] | 3727 | if (PHINode *PN = dyn_cast<PHINode>(I)) |
| 3728 | ConstantEvolutionLoopExitValue.erase(PN); |
| 3729 | } |
| 3730 | |
| 3731 | PushDefUseChildren(I, Worklist); |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 3732 | } |
Dan Gohman | 60f8a63 | 2009-02-17 20:49:49 +0000 | [diff] [blame] | 3733 | } |
| 3734 | |
Eric Christopher | e6cbfa6 | 2010-07-29 01:25:38 +0000 | [diff] [blame] | 3735 | /// forgetValue - This method should be called by the client when it has |
| 3736 | /// changed a value in a way that may effect its value, or which may |
| 3737 | /// disconnect it from a def-use chain linking it to a loop. |
| 3738 | void ScalarEvolution::forgetValue(Value *V) { |
Dale Johannesen | 45a2d7d | 2010-02-19 07:14:22 +0000 | [diff] [blame] | 3739 | Instruction *I = dyn_cast<Instruction>(V); |
| 3740 | if (!I) return; |
| 3741 | |
| 3742 | // Drop information about expressions based on loop-header PHIs. |
| 3743 | SmallVector<Instruction *, 16> Worklist; |
| 3744 | Worklist.push_back(I); |
| 3745 | |
| 3746 | SmallPtrSet<Instruction *, 8> Visited; |
| 3747 | while (!Worklist.empty()) { |
| 3748 | I = Worklist.pop_back_val(); |
| 3749 | if (!Visited.insert(I)) continue; |
| 3750 | |
| 3751 | std::map<SCEVCallbackVH, const SCEV *>::iterator It = |
| 3752 | Scalars.find(static_cast<Value *>(I)); |
| 3753 | if (It != Scalars.end()) { |
| 3754 | ValuesAtScopes.erase(It->second); |
| 3755 | Scalars.erase(It); |
| 3756 | if (PHINode *PN = dyn_cast<PHINode>(I)) |
| 3757 | ConstantEvolutionLoopExitValue.erase(PN); |
| 3758 | } |
| 3759 | |
| 3760 | PushDefUseChildren(I, Worklist); |
| 3761 | } |
| 3762 | } |
| 3763 | |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 3764 | /// ComputeBackedgeTakenCount - Compute the number of times the backedge |
| 3765 | /// of the specified loop will execute. |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 3766 | ScalarEvolution::BackedgeTakenInfo |
| 3767 | ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { |
Dan Gohman | 5d98491 | 2009-12-18 01:14:11 +0000 | [diff] [blame] | 3768 | SmallVector<BasicBlock *, 8> ExitingBlocks; |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3769 | L->getExitingBlocks(ExitingBlocks); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3770 | |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3771 | // Examine all exits and pick the most conservative values. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 3772 | const SCEV *BECount = getCouldNotCompute(); |
| 3773 | const SCEV *MaxBECount = getCouldNotCompute(); |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3774 | bool CouldNotComputeBECount = false; |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3775 | for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { |
| 3776 | BackedgeTakenInfo NewBTI = |
| 3777 | ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3778 | |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3779 | if (NewBTI.Exact == getCouldNotCompute()) { |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3780 | // We couldn't compute an exact value for this exit, so |
Dan Gohman | d32f5bf | 2009-06-22 21:10:22 +0000 | [diff] [blame] | 3781 | // we won't be able to compute an exact value for the loop. |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3782 | CouldNotComputeBECount = true; |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3783 | BECount = getCouldNotCompute(); |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3784 | } else if (!CouldNotComputeBECount) { |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3785 | if (BECount == getCouldNotCompute()) |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3786 | BECount = NewBTI.Exact; |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3787 | else |
Dan Gohman | 40a5a1b | 2009-06-24 01:18:18 +0000 | [diff] [blame] | 3788 | BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact); |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3789 | } |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3790 | if (MaxBECount == getCouldNotCompute()) |
Dan Gohman | 40a5a1b | 2009-06-24 01:18:18 +0000 | [diff] [blame] | 3791 | MaxBECount = NewBTI.Max; |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3792 | else if (NewBTI.Max != getCouldNotCompute()) |
Dan Gohman | 40a5a1b | 2009-06-24 01:18:18 +0000 | [diff] [blame] | 3793 | MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max); |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3794 | } |
| 3795 | |
| 3796 | return BackedgeTakenInfo(BECount, MaxBECount); |
| 3797 | } |
| 3798 | |
| 3799 | /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge |
| 3800 | /// of the specified loop will execute if it exits via the specified block. |
| 3801 | ScalarEvolution::BackedgeTakenInfo |
| 3802 | ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L, |
| 3803 | BasicBlock *ExitingBlock) { |
| 3804 | |
| 3805 | // Okay, we've chosen an exiting block. See what condition causes us to |
| 3806 | // exit at this block. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3807 | // |
| 3808 | // FIXME: we should be able to handle switch instructions (with a single exit) |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3809 | BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3810 | if (ExitBr == 0) return getCouldNotCompute(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3811 | assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!"); |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 3812 | |
Chris Lattner | 8b0e360 | 2007-01-07 02:24:26 +0000 | [diff] [blame] | 3813 | // At this point, we know we have a conditional branch that determines whether |
| 3814 | // the loop is exited. However, we don't know if the branch is executed each |
| 3815 | // time through the loop. If not, then the execution count of the branch will |
| 3816 | // not be equal to the trip count of the loop. |
| 3817 | // |
| 3818 | // Currently we check for this by checking to see if the Exit branch goes to |
| 3819 | // the loop header. If so, we know it will always execute the same number of |
Chris Lattner | 192e403 | 2007-01-14 01:24:47 +0000 | [diff] [blame] | 3820 | // times as the loop. We also handle the case where the exit block *is* the |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3821 | // loop header. This is common for un-rotated loops. |
| 3822 | // |
| 3823 | // If both of those tests fail, walk up the unique predecessor chain to the |
| 3824 | // header, stopping if there is an edge that doesn't exit the loop. If the |
| 3825 | // header is reached, the execution count of the branch will be equal to the |
| 3826 | // trip count of the loop. |
| 3827 | // |
| 3828 | // More extensive analysis could be done to handle more cases here. |
| 3829 | // |
Chris Lattner | 8b0e360 | 2007-01-07 02:24:26 +0000 | [diff] [blame] | 3830 | if (ExitBr->getSuccessor(0) != L->getHeader() && |
Chris Lattner | 192e403 | 2007-01-14 01:24:47 +0000 | [diff] [blame] | 3831 | ExitBr->getSuccessor(1) != L->getHeader() && |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3832 | ExitBr->getParent() != L->getHeader()) { |
| 3833 | // The simple checks failed, try climbing the unique predecessor chain |
| 3834 | // up to the header. |
| 3835 | bool Ok = false; |
| 3836 | for (BasicBlock *BB = ExitBr->getParent(); BB; ) { |
| 3837 | BasicBlock *Pred = BB->getUniquePredecessor(); |
| 3838 | if (!Pred) |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3839 | return getCouldNotCompute(); |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3840 | TerminatorInst *PredTerm = Pred->getTerminator(); |
| 3841 | for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) { |
| 3842 | BasicBlock *PredSucc = PredTerm->getSuccessor(i); |
| 3843 | if (PredSucc == BB) |
| 3844 | continue; |
| 3845 | // If the predecessor has a successor that isn't BB and isn't |
| 3846 | // outside the loop, assume the worst. |
| 3847 | if (L->contains(PredSucc)) |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3848 | return getCouldNotCompute(); |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3849 | } |
| 3850 | if (Pred == L->getHeader()) { |
| 3851 | Ok = true; |
| 3852 | break; |
| 3853 | } |
| 3854 | BB = Pred; |
| 3855 | } |
| 3856 | if (!Ok) |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3857 | return getCouldNotCompute(); |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3858 | } |
| 3859 | |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 3860 | // Proceed to the next level to examine the exit condition expression. |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3861 | return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(), |
| 3862 | ExitBr->getSuccessor(0), |
| 3863 | ExitBr->getSuccessor(1)); |
| 3864 | } |
| 3865 | |
| 3866 | /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the |
| 3867 | /// backedge of the specified loop will execute if its exit condition |
| 3868 | /// were a conditional branch of ExitCond, TBB, and FBB. |
| 3869 | ScalarEvolution::BackedgeTakenInfo |
| 3870 | ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, |
| 3871 | Value *ExitCond, |
| 3872 | BasicBlock *TBB, |
| 3873 | BasicBlock *FBB) { |
Dan Gohman | 40a5a1b | 2009-06-24 01:18:18 +0000 | [diff] [blame] | 3874 | // Check if the controlling expression for this loop is an And or Or. |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3875 | if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { |
| 3876 | if (BO->getOpcode() == Instruction::And) { |
| 3877 | // Recurse on the operands of the and. |
| 3878 | BackedgeTakenInfo BTI0 = |
| 3879 | ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); |
| 3880 | BackedgeTakenInfo BTI1 = |
| 3881 | ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 3882 | const SCEV *BECount = getCouldNotCompute(); |
| 3883 | const SCEV *MaxBECount = getCouldNotCompute(); |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3884 | if (L->contains(TBB)) { |
| 3885 | // Both conditions must be true for the loop to continue executing. |
| 3886 | // Choose the less conservative count. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3887 | if (BTI0.Exact == getCouldNotCompute() || |
| 3888 | BTI1.Exact == getCouldNotCompute()) |
| 3889 | BECount = getCouldNotCompute(); |
Dan Gohman | 60e9b07 | 2009-06-22 15:09:28 +0000 | [diff] [blame] | 3890 | else |
| 3891 | BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3892 | if (BTI0.Max == getCouldNotCompute()) |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3893 | MaxBECount = BTI1.Max; |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3894 | else if (BTI1.Max == getCouldNotCompute()) |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3895 | MaxBECount = BTI0.Max; |
Dan Gohman | 60e9b07 | 2009-06-22 15:09:28 +0000 | [diff] [blame] | 3896 | else |
| 3897 | MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3898 | } else { |
Dan Gohman | 4ee8739 | 2010-08-11 00:12:36 +0000 | [diff] [blame] | 3899 | // Both conditions must be true at the same time for the loop to exit. |
| 3900 | // For now, be conservative. |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3901 | assert(L->contains(FBB) && "Loop block has no successor in loop!"); |
Dan Gohman | 4ee8739 | 2010-08-11 00:12:36 +0000 | [diff] [blame] | 3902 | if (BTI0.Max == BTI1.Max) |
| 3903 | MaxBECount = BTI0.Max; |
| 3904 | if (BTI0.Exact == BTI1.Exact) |
| 3905 | BECount = BTI0.Exact; |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3906 | } |
| 3907 | |
| 3908 | return BackedgeTakenInfo(BECount, MaxBECount); |
| 3909 | } |
| 3910 | if (BO->getOpcode() == Instruction::Or) { |
| 3911 | // Recurse on the operands of the or. |
| 3912 | BackedgeTakenInfo BTI0 = |
| 3913 | ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); |
| 3914 | BackedgeTakenInfo BTI1 = |
| 3915 | ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 3916 | const SCEV *BECount = getCouldNotCompute(); |
| 3917 | const SCEV *MaxBECount = getCouldNotCompute(); |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3918 | if (L->contains(FBB)) { |
| 3919 | // Both conditions must be false for the loop to continue executing. |
| 3920 | // Choose the less conservative count. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3921 | if (BTI0.Exact == getCouldNotCompute() || |
| 3922 | BTI1.Exact == getCouldNotCompute()) |
| 3923 | BECount = getCouldNotCompute(); |
Dan Gohman | 60e9b07 | 2009-06-22 15:09:28 +0000 | [diff] [blame] | 3924 | else |
| 3925 | BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3926 | if (BTI0.Max == getCouldNotCompute()) |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3927 | MaxBECount = BTI1.Max; |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 3928 | else if (BTI1.Max == getCouldNotCompute()) |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3929 | MaxBECount = BTI0.Max; |
Dan Gohman | 60e9b07 | 2009-06-22 15:09:28 +0000 | [diff] [blame] | 3930 | else |
| 3931 | MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3932 | } else { |
Dan Gohman | 4ee8739 | 2010-08-11 00:12:36 +0000 | [diff] [blame] | 3933 | // Both conditions must be false at the same time for the loop to exit. |
| 3934 | // For now, be conservative. |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3935 | assert(L->contains(TBB) && "Loop block has no successor in loop!"); |
Dan Gohman | 4ee8739 | 2010-08-11 00:12:36 +0000 | [diff] [blame] | 3936 | if (BTI0.Max == BTI1.Max) |
| 3937 | MaxBECount = BTI0.Max; |
| 3938 | if (BTI0.Exact == BTI1.Exact) |
| 3939 | BECount = BTI0.Exact; |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3940 | } |
| 3941 | |
| 3942 | return BackedgeTakenInfo(BECount, MaxBECount); |
| 3943 | } |
| 3944 | } |
| 3945 | |
| 3946 | // With an icmp, it may be feasible to compute an exact backedge-taken count. |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 3947 | // Proceed to the next level to examine the icmp. |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3948 | if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) |
| 3949 | return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB); |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 3950 | |
Dan Gohman | 00cb5b7 | 2010-02-19 18:12:07 +0000 | [diff] [blame] | 3951 | // Check for a constant condition. These are normally stripped out by |
| 3952 | // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to |
| 3953 | // preserve the CFG and is temporarily leaving constant conditions |
| 3954 | // in place. |
| 3955 | if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { |
| 3956 | if (L->contains(FBB) == !CI->getZExtValue()) |
| 3957 | // The backedge is always taken. |
| 3958 | return getCouldNotCompute(); |
| 3959 | else |
| 3960 | // The backedge is never taken. |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 3961 | return getConstant(CI->getType(), 0); |
Dan Gohman | 00cb5b7 | 2010-02-19 18:12:07 +0000 | [diff] [blame] | 3962 | } |
| 3963 | |
Eli Friedman | 361e54d | 2009-05-09 12:32:42 +0000 | [diff] [blame] | 3964 | // If it's not an integer or pointer comparison then compute it the hard way. |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3965 | return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); |
| 3966 | } |
| 3967 | |
| 3968 | /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the |
| 3969 | /// backedge of the specified loop will execute if its exit condition |
| 3970 | /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB. |
| 3971 | ScalarEvolution::BackedgeTakenInfo |
| 3972 | ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, |
| 3973 | ICmpInst *ExitCond, |
| 3974 | BasicBlock *TBB, |
| 3975 | BasicBlock *FBB) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3976 | |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 3977 | // If the condition was exit on true, convert the condition to exit on false |
| 3978 | ICmpInst::Predicate Cond; |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 3979 | if (!L->contains(FBB)) |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 3980 | Cond = ExitCond->getPredicate(); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 3981 | else |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 3982 | Cond = ExitCond->getInversePredicate(); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 3983 | |
| 3984 | // Handle common loops like: for (X = "string"; *X; ++X) |
| 3985 | if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) |
| 3986 | if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { |
Dan Gohman | f6d009f | 2010-02-24 17:31:30 +0000 | [diff] [blame] | 3987 | BackedgeTakenInfo ItCnt = |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 3988 | ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond); |
Dan Gohman | f6d009f | 2010-02-24 17:31:30 +0000 | [diff] [blame] | 3989 | if (ItCnt.hasAnyInfo()) |
| 3990 | return ItCnt; |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 3991 | } |
| 3992 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 3993 | const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); |
| 3994 | const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3995 | |
| 3996 | // Try to evaluate any dependencies out of the loop. |
Dan Gohman | d594e6f | 2009-05-24 23:25:42 +0000 | [diff] [blame] | 3997 | LHS = getSCEVAtScope(LHS, L); |
| 3998 | RHS = getSCEVAtScope(RHS, L); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 3999 | |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4000 | // At this point, we would like to compute how many iterations of the |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 4001 | // loop the predicate will return true for these inputs. |
Dan Gohman | 70ff4cf | 2008-09-16 18:52:57 +0000 | [diff] [blame] | 4002 | if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) { |
| 4003 | // If there is a loop-invariant, force it into the RHS. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4004 | std::swap(LHS, RHS); |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 4005 | Cond = ICmpInst::getSwappedPredicate(Cond); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4006 | } |
| 4007 | |
Dan Gohman | 03557dc | 2010-05-03 16:35:17 +0000 | [diff] [blame] | 4008 | // Simplify the operands before analyzing them. |
| 4009 | (void)SimplifyICmpOperands(Cond, LHS, RHS); |
| 4010 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4011 | // If we have a comparison of a chrec against a constant, try to use value |
| 4012 | // ranges to answer this query. |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4013 | if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) |
| 4014 | if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4015 | if (AddRec->getLoop() == L) { |
Eli Friedman | 361e54d | 2009-05-09 12:32:42 +0000 | [diff] [blame] | 4016 | // Form the constant range. |
| 4017 | ConstantRange CompRange( |
| 4018 | ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4019 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4020 | const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); |
Eli Friedman | 361e54d | 2009-05-09 12:32:42 +0000 | [diff] [blame] | 4021 | if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4022 | } |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4023 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4024 | switch (Cond) { |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 4025 | case ICmpInst::ICMP_NE: { // while (X != Y) |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4026 | // Convert to: while (X-Y != 0) |
Dan Gohman | f6d009f | 2010-02-24 17:31:30 +0000 | [diff] [blame] | 4027 | BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L); |
| 4028 | if (BTI.hasAnyInfo()) return BTI; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4029 | break; |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 4030 | } |
Dan Gohman | 4c0d5d5 | 2009-08-20 16:42:55 +0000 | [diff] [blame] | 4031 | case ICmpInst::ICMP_EQ: { // while (X == Y) |
| 4032 | // Convert to: while (X-Y == 0) |
Dan Gohman | f6d009f | 2010-02-24 17:31:30 +0000 | [diff] [blame] | 4033 | BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); |
| 4034 | if (BTI.hasAnyInfo()) return BTI; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4035 | break; |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 4036 | } |
| 4037 | case ICmpInst::ICMP_SLT: { |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 4038 | BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true); |
| 4039 | if (BTI.hasAnyInfo()) return BTI; |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 4040 | break; |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 4041 | } |
| 4042 | case ICmpInst::ICMP_SGT: { |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 4043 | BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), |
| 4044 | getNotSCEV(RHS), L, true); |
| 4045 | if (BTI.hasAnyInfo()) return BTI; |
Nick Lewycky | d6dac0e | 2007-08-06 19:21:00 +0000 | [diff] [blame] | 4046 | break; |
| 4047 | } |
| 4048 | case ICmpInst::ICMP_ULT: { |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 4049 | BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false); |
| 4050 | if (BTI.hasAnyInfo()) return BTI; |
Nick Lewycky | d6dac0e | 2007-08-06 19:21:00 +0000 | [diff] [blame] | 4051 | break; |
| 4052 | } |
| 4053 | case ICmpInst::ICMP_UGT: { |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 4054 | BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), |
| 4055 | getNotSCEV(RHS), L, false); |
| 4056 | if (BTI.hasAnyInfo()) return BTI; |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 4057 | break; |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 4058 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4059 | default: |
Chris Lattner | d18d9dc | 2004-04-02 20:26:46 +0000 | [diff] [blame] | 4060 | #if 0 |
David Greene | 25e0e87 | 2009-12-23 22:18:14 +0000 | [diff] [blame] | 4061 | dbgs() << "ComputeBackedgeTakenCount "; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4062 | if (ExitCond->getOperand(0)->getType()->isUnsigned()) |
David Greene | 25e0e87 | 2009-12-23 22:18:14 +0000 | [diff] [blame] | 4063 | dbgs() << "[unsigned] "; |
| 4064 | dbgs() << *LHS << " " |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4065 | << Instruction::getOpcodeName(Instruction::ICmp) |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 4066 | << " " << *RHS << "\n"; |
Chris Lattner | d18d9dc | 2004-04-02 20:26:46 +0000 | [diff] [blame] | 4067 | #endif |
Chris Lattner | e34c0b4 | 2004-04-03 00:43:03 +0000 | [diff] [blame] | 4068 | break; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4069 | } |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 4070 | return |
Dan Gohman | a334aa7 | 2009-06-22 00:31:57 +0000 | [diff] [blame] | 4071 | ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); |
Chris Lattner | 7980fb9 | 2004-04-17 18:36:24 +0000 | [diff] [blame] | 4072 | } |
| 4073 | |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4074 | static ConstantInt * |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 4075 | EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, |
| 4076 | ScalarEvolution &SE) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4077 | const SCEV *InVal = SE.getConstant(C); |
| 4078 | const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4079 | assert(isa<SCEVConstant>(Val) && |
| 4080 | "Evaluation of SCEV at constant didn't fold correctly?"); |
| 4081 | return cast<SCEVConstant>(Val)->getValue(); |
| 4082 | } |
| 4083 | |
| 4084 | /// GetAddressedElementFromGlobal - Given a global variable with an initializer |
| 4085 | /// and a GEP expression (missing the pointer index) indexing into it, return |
| 4086 | /// the addressed element of the initializer or null if the index expression is |
| 4087 | /// invalid. |
| 4088 | static Constant * |
Nick Lewycky | c6501b1 | 2009-11-23 03:26:09 +0000 | [diff] [blame] | 4089 | GetAddressedElementFromGlobal(GlobalVariable *GV, |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4090 | const std::vector<ConstantInt*> &Indices) { |
| 4091 | Constant *Init = GV->getInitializer(); |
| 4092 | for (unsigned i = 0, e = Indices.size(); i != e; ++i) { |
Reid Spencer | b83eb64 | 2006-10-20 07:07:24 +0000 | [diff] [blame] | 4093 | uint64_t Idx = Indices[i]->getZExtValue(); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4094 | if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { |
| 4095 | assert(Idx < CS->getNumOperands() && "Bad struct index!"); |
| 4096 | Init = cast<Constant>(CS->getOperand(Idx)); |
| 4097 | } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { |
| 4098 | if (Idx >= CA->getNumOperands()) return 0; // Bogus program |
| 4099 | Init = cast<Constant>(CA->getOperand(Idx)); |
| 4100 | } else if (isa<ConstantAggregateZero>(Init)) { |
| 4101 | if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { |
| 4102 | assert(Idx < STy->getNumElements() && "Bad struct index!"); |
Owen Anderson | a7235ea | 2009-07-31 20:28:14 +0000 | [diff] [blame] | 4103 | Init = Constant::getNullValue(STy->getElementType(Idx)); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4104 | } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) { |
| 4105 | if (Idx >= ATy->getNumElements()) return 0; // Bogus program |
Owen Anderson | a7235ea | 2009-07-31 20:28:14 +0000 | [diff] [blame] | 4106 | Init = Constant::getNullValue(ATy->getElementType()); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4107 | } else { |
Torok Edwin | c23197a | 2009-07-14 16:55:14 +0000 | [diff] [blame] | 4108 | llvm_unreachable("Unknown constant aggregate type!"); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4109 | } |
| 4110 | return 0; |
| 4111 | } else { |
| 4112 | return 0; // Unknown initializer type |
| 4113 | } |
| 4114 | } |
| 4115 | return Init; |
| 4116 | } |
| 4117 | |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 4118 | /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of |
| 4119 | /// 'icmp op load X, cst', try to see if we can compute the backedge |
| 4120 | /// execution count. |
Dan Gohman | f6d009f | 2010-02-24 17:31:30 +0000 | [diff] [blame] | 4121 | ScalarEvolution::BackedgeTakenInfo |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4122 | ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount( |
| 4123 | LoadInst *LI, |
| 4124 | Constant *RHS, |
| 4125 | const Loop *L, |
| 4126 | ICmpInst::Predicate predicate) { |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4127 | if (LI->isVolatile()) return getCouldNotCompute(); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4128 | |
| 4129 | // Check to see if the loaded pointer is a getelementptr of a global. |
Dan Gohman | f6d009f | 2010-02-24 17:31:30 +0000 | [diff] [blame] | 4130 | // TODO: Use SCEV instead of manually grubbing with GEPs. |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4131 | GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4132 | if (!GEP) return getCouldNotCompute(); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4133 | |
| 4134 | // Make sure that it is really a constant global we are gepping, with an |
| 4135 | // initializer, and make sure the first IDX is really 0. |
| 4136 | GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); |
Dan Gohman | 8255573 | 2009-08-19 18:20:44 +0000 | [diff] [blame] | 4137 | if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4138 | GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || |
| 4139 | !cast<Constant>(GEP->getOperand(1))->isNullValue()) |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4140 | return getCouldNotCompute(); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4141 | |
| 4142 | // Okay, we allow one non-constant index into the GEP instruction. |
| 4143 | Value *VarIdx = 0; |
| 4144 | std::vector<ConstantInt*> Indexes; |
| 4145 | unsigned VarIdxNum = 0; |
| 4146 | for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) |
| 4147 | if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { |
| 4148 | Indexes.push_back(CI); |
| 4149 | } else if (!isa<ConstantInt>(GEP->getOperand(i))) { |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4150 | if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4151 | VarIdx = GEP->getOperand(i); |
| 4152 | VarIdxNum = i-2; |
| 4153 | Indexes.push_back(0); |
| 4154 | } |
| 4155 | |
| 4156 | // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. |
| 4157 | // Check to see if X is a loop variant variable value now. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4158 | const SCEV *Idx = getSCEV(VarIdx); |
Dan Gohman | d594e6f | 2009-05-24 23:25:42 +0000 | [diff] [blame] | 4159 | Idx = getSCEVAtScope(Idx, L); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4160 | |
| 4161 | // We can only recognize very limited forms of loop index expressions, in |
| 4162 | // particular, only affine AddRec's like {C1,+,C2}. |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 4163 | const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4164 | if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) || |
| 4165 | !isa<SCEVConstant>(IdxExpr->getOperand(0)) || |
| 4166 | !isa<SCEVConstant>(IdxExpr->getOperand(1))) |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4167 | return getCouldNotCompute(); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4168 | |
| 4169 | unsigned MaxSteps = MaxBruteForceIterations; |
| 4170 | for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 4171 | ConstantInt *ItCst = ConstantInt::get( |
Owen Anderson | 9adc0ab | 2009-07-14 23:09:55 +0000 | [diff] [blame] | 4172 | cast<IntegerType>(IdxExpr->getType()), IterationNum); |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4173 | ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4174 | |
| 4175 | // Form the GEP offset. |
| 4176 | Indexes[VarIdxNum] = Val; |
| 4177 | |
Nick Lewycky | c6501b1 | 2009-11-23 03:26:09 +0000 | [diff] [blame] | 4178 | Constant *Result = GetAddressedElementFromGlobal(GV, Indexes); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4179 | if (Result == 0) break; // Cannot compute! |
| 4180 | |
| 4181 | // Evaluate the condition for this iteration. |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 4182 | Result = ConstantExpr::getICmp(predicate, Result, RHS); |
Zhou Sheng | 6b6b6ef | 2007-01-11 12:24:14 +0000 | [diff] [blame] | 4183 | if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4184 | if (cast<ConstantInt>(Result)->getValue().isMinValue()) { |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4185 | #if 0 |
David Greene | 25e0e87 | 2009-12-23 22:18:14 +0000 | [diff] [blame] | 4186 | dbgs() << "\n***\n*** Computed loop count " << *ItCst |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 4187 | << "\n*** From global " << *GV << "*** BB: " << *L->getHeader() |
| 4188 | << "***\n"; |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4189 | #endif |
| 4190 | ++NumArrayLenItCounts; |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4191 | return getConstant(ItCst); // Found terminating iteration! |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4192 | } |
| 4193 | } |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4194 | return getCouldNotCompute(); |
Chris Lattner | 673e02b | 2004-10-12 01:49:27 +0000 | [diff] [blame] | 4195 | } |
| 4196 | |
| 4197 | |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4198 | /// CanConstantFold - Return true if we can constant fold an instruction of the |
| 4199 | /// specified type, assuming that all operands were constants. |
| 4200 | static bool CanConstantFold(const Instruction *I) { |
Reid Spencer | 832254e | 2007-02-02 02:16:23 +0000 | [diff] [blame] | 4201 | if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4202 | isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I)) |
| 4203 | return true; |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4204 | |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4205 | if (const CallInst *CI = dyn_cast<CallInst>(I)) |
| 4206 | if (const Function *F = CI->getCalledFunction()) |
Dan Gohman | fa9b80e | 2008-01-31 01:05:10 +0000 | [diff] [blame] | 4207 | return canConstantFoldCallTo(F); |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4208 | return false; |
Chris Lattner | 7980fb9 | 2004-04-17 18:36:24 +0000 | [diff] [blame] | 4209 | } |
| 4210 | |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4211 | /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node |
| 4212 | /// in the loop that V is derived from. We allow arbitrary operations along the |
| 4213 | /// way, but the operands of an operation must either be constants or a value |
| 4214 | /// derived from a constant PHI. If this expression does not fit with these |
| 4215 | /// constraints, return null. |
| 4216 | static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { |
| 4217 | // If this is not an instruction, or if this is an instruction outside of the |
| 4218 | // loop, it can't be derived from a loop PHI. |
| 4219 | Instruction *I = dyn_cast<Instruction>(V); |
Dan Gohman | 92329c7 | 2009-12-18 01:24:09 +0000 | [diff] [blame] | 4220 | if (I == 0 || !L->contains(I)) return 0; |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4221 | |
Anton Korobeynikov | ae9f3a3 | 2008-02-20 11:08:44 +0000 | [diff] [blame] | 4222 | if (PHINode *PN = dyn_cast<PHINode>(I)) { |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4223 | if (L->getHeader() == I->getParent()) |
| 4224 | return PN; |
| 4225 | else |
| 4226 | // We don't currently keep track of the control flow needed to evaluate |
| 4227 | // PHIs, so we cannot handle PHIs inside of loops. |
| 4228 | return 0; |
Anton Korobeynikov | ae9f3a3 | 2008-02-20 11:08:44 +0000 | [diff] [blame] | 4229 | } |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4230 | |
| 4231 | // If we won't be able to constant fold this expression even if the operands |
| 4232 | // are constants, return early. |
| 4233 | if (!CanConstantFold(I)) return 0; |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4234 | |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4235 | // Otherwise, we can evaluate this instruction if all of its operands are |
| 4236 | // constant or derived from a PHI node themselves. |
| 4237 | PHINode *PHI = 0; |
| 4238 | for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op) |
Dan Gohman | 9d4588f | 2010-06-22 13:15:46 +0000 | [diff] [blame] | 4239 | if (!isa<Constant>(I->getOperand(Op))) { |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4240 | PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L); |
| 4241 | if (P == 0) return 0; // Not evolving from PHI |
| 4242 | if (PHI == 0) |
| 4243 | PHI = P; |
| 4244 | else if (PHI != P) |
| 4245 | return 0; // Evolving from multiple different PHIs. |
| 4246 | } |
| 4247 | |
| 4248 | // This is a expression evolving from a constant PHI! |
| 4249 | return PHI; |
| 4250 | } |
| 4251 | |
| 4252 | /// EvaluateExpression - Given an expression that passes the |
| 4253 | /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node |
| 4254 | /// in the loop has the value PHIVal. If we can't fold this expression for some |
| 4255 | /// reason, return null. |
Dan Gohman | 1ba3b6c | 2009-11-09 23:34:17 +0000 | [diff] [blame] | 4256 | static Constant *EvaluateExpression(Value *V, Constant *PHIVal, |
| 4257 | const TargetData *TD) { |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4258 | if (isa<PHINode>(V)) return PHIVal; |
Reid Spencer | e840434 | 2004-07-18 00:18:30 +0000 | [diff] [blame] | 4259 | if (Constant *C = dyn_cast<Constant>(V)) return C; |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4260 | Instruction *I = cast<Instruction>(V); |
| 4261 | |
Dan Gohman | 9d4588f | 2010-06-22 13:15:46 +0000 | [diff] [blame] | 4262 | std::vector<Constant*> Operands(I->getNumOperands()); |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4263 | |
| 4264 | for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { |
Dan Gohman | 1ba3b6c | 2009-11-09 23:34:17 +0000 | [diff] [blame] | 4265 | Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD); |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4266 | if (Operands[i] == 0) return 0; |
| 4267 | } |
| 4268 | |
Chris Lattner | f286f6f | 2007-12-10 22:53:04 +0000 | [diff] [blame] | 4269 | if (const CmpInst *CI = dyn_cast<CmpInst>(I)) |
Chris Lattner | 8f73dea | 2009-11-09 23:06:58 +0000 | [diff] [blame] | 4270 | return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], |
Dan Gohman | 1ba3b6c | 2009-11-09 23:34:17 +0000 | [diff] [blame] | 4271 | Operands[1], TD); |
Chris Lattner | 8f73dea | 2009-11-09 23:06:58 +0000 | [diff] [blame] | 4272 | return ConstantFoldInstOperands(I->getOpcode(), I->getType(), |
Dan Gohman | 1ba3b6c | 2009-11-09 23:34:17 +0000 | [diff] [blame] | 4273 | &Operands[0], Operands.size(), TD); |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4274 | } |
| 4275 | |
| 4276 | /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is |
| 4277 | /// in the header of its containing loop, we know the loop executes a |
| 4278 | /// constant number of times, and the PHI node is just a recurrence |
| 4279 | /// involving constants, fold it. |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4280 | Constant * |
| 4281 | ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, |
Dan Gohman | 5d98491 | 2009-12-18 01:14:11 +0000 | [diff] [blame] | 4282 | const APInt &BEs, |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4283 | const Loop *L) { |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4284 | std::map<PHINode*, Constant*>::iterator I = |
| 4285 | ConstantEvolutionLoopExitValue.find(PN); |
| 4286 | if (I != ConstantEvolutionLoopExitValue.end()) |
| 4287 | return I->second; |
| 4288 | |
Dan Gohman | e056781 | 2010-04-08 23:03:40 +0000 | [diff] [blame] | 4289 | if (BEs.ugt(MaxBruteForceIterations)) |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4290 | return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it. |
| 4291 | |
| 4292 | Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; |
| 4293 | |
| 4294 | // Since the loop is canonicalized, the PHI node must have two entries. One |
| 4295 | // entry must be a constant (coming in from outside of the loop), and the |
| 4296 | // second must be derived from the same PHI. |
| 4297 | bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); |
| 4298 | Constant *StartCST = |
| 4299 | dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); |
| 4300 | if (StartCST == 0) |
| 4301 | return RetVal = 0; // Must be a constant. |
| 4302 | |
| 4303 | Value *BEValue = PN->getIncomingValue(SecondIsBackedge); |
Dan Gohman | 9d4588f | 2010-06-22 13:15:46 +0000 | [diff] [blame] | 4304 | if (getConstantEvolvingPHI(BEValue, L) != PN && |
| 4305 | !isa<Constant>(BEValue)) |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4306 | return RetVal = 0; // Not derived from same PHI. |
| 4307 | |
| 4308 | // Execute the loop symbolically to determine the exit value. |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 4309 | if (BEs.getActiveBits() >= 32) |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4310 | return RetVal = 0; // More than 2^32-1 iterations?? Not doing it! |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4311 | |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 4312 | unsigned NumIterations = BEs.getZExtValue(); // must be in range |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4313 | unsigned IterationNum = 0; |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4314 | for (Constant *PHIVal = StartCST; ; ++IterationNum) { |
| 4315 | if (IterationNum == NumIterations) |
| 4316 | return RetVal = PHIVal; // Got exit value! |
| 4317 | |
| 4318 | // Compute the value of the PHI node for the next iteration. |
Dan Gohman | 1ba3b6c | 2009-11-09 23:34:17 +0000 | [diff] [blame] | 4319 | Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD); |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4320 | if (NextPHI == PHIVal) |
| 4321 | return RetVal = NextPHI; // Stopped evolving! |
| 4322 | if (NextPHI == 0) |
| 4323 | return 0; // Couldn't evaluate! |
| 4324 | PHIVal = NextPHI; |
| 4325 | } |
| 4326 | } |
| 4327 | |
Dan Gohman | 07ad19b | 2009-07-27 16:09:48 +0000 | [diff] [blame] | 4328 | /// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a |
Chris Lattner | 7980fb9 | 2004-04-17 18:36:24 +0000 | [diff] [blame] | 4329 | /// constant number of times (the condition evolves only from constants), |
| 4330 | /// try to evaluate a few iterations of the loop until we get the exit |
| 4331 | /// condition gets a value of ExitWhen (true or false). If we cannot |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4332 | /// evaluate the trip count of the loop, return getCouldNotCompute(). |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4333 | const SCEV * |
| 4334 | ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L, |
| 4335 | Value *Cond, |
| 4336 | bool ExitWhen) { |
Chris Lattner | 7980fb9 | 2004-04-17 18:36:24 +0000 | [diff] [blame] | 4337 | PHINode *PN = getConstantEvolvingPHI(Cond, L); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4338 | if (PN == 0) return getCouldNotCompute(); |
Chris Lattner | 7980fb9 | 2004-04-17 18:36:24 +0000 | [diff] [blame] | 4339 | |
Dan Gohman | b92654d | 2010-06-19 14:17:24 +0000 | [diff] [blame] | 4340 | // If the loop is canonicalized, the PHI will have exactly two entries. |
| 4341 | // That's the only form we support here. |
| 4342 | if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); |
| 4343 | |
| 4344 | // One entry must be a constant (coming in from outside of the loop), and the |
Chris Lattner | 7980fb9 | 2004-04-17 18:36:24 +0000 | [diff] [blame] | 4345 | // second must be derived from the same PHI. |
| 4346 | bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); |
| 4347 | Constant *StartCST = |
| 4348 | dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4349 | if (StartCST == 0) return getCouldNotCompute(); // Must be a constant. |
Chris Lattner | 7980fb9 | 2004-04-17 18:36:24 +0000 | [diff] [blame] | 4350 | |
| 4351 | Value *BEValue = PN->getIncomingValue(SecondIsBackedge); |
Dan Gohman | 9d4588f | 2010-06-22 13:15:46 +0000 | [diff] [blame] | 4352 | if (getConstantEvolvingPHI(BEValue, L) != PN && |
| 4353 | !isa<Constant>(BEValue)) |
| 4354 | return getCouldNotCompute(); // Not derived from same PHI. |
Chris Lattner | 7980fb9 | 2004-04-17 18:36:24 +0000 | [diff] [blame] | 4355 | |
| 4356 | // Okay, we find a PHI node that defines the trip count of this loop. Execute |
| 4357 | // the loop symbolically to determine when the condition gets a value of |
| 4358 | // "ExitWhen". |
| 4359 | unsigned IterationNum = 0; |
| 4360 | unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. |
| 4361 | for (Constant *PHIVal = StartCST; |
| 4362 | IterationNum != MaxIterations; ++IterationNum) { |
Zhou Sheng | 6b6b6ef | 2007-01-11 12:24:14 +0000 | [diff] [blame] | 4363 | ConstantInt *CondVal = |
Dan Gohman | 1ba3b6c | 2009-11-09 23:34:17 +0000 | [diff] [blame] | 4364 | dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD)); |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4365 | |
Zhou Sheng | 6b6b6ef | 2007-01-11 12:24:14 +0000 | [diff] [blame] | 4366 | // Couldn't symbolically evaluate. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4367 | if (!CondVal) return getCouldNotCompute(); |
Zhou Sheng | 6b6b6ef | 2007-01-11 12:24:14 +0000 | [diff] [blame] | 4368 | |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4369 | if (CondVal->getValue() == uint64_t(ExitWhen)) { |
Chris Lattner | 7980fb9 | 2004-04-17 18:36:24 +0000 | [diff] [blame] | 4370 | ++NumBruteForceTripCountsComputed; |
Owen Anderson | 1d0be15 | 2009-08-13 21:58:54 +0000 | [diff] [blame] | 4371 | return getConstant(Type::getInt32Ty(getContext()), IterationNum); |
Chris Lattner | 7980fb9 | 2004-04-17 18:36:24 +0000 | [diff] [blame] | 4372 | } |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4373 | |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4374 | // Compute the value of the PHI node for the next iteration. |
Dan Gohman | 1ba3b6c | 2009-11-09 23:34:17 +0000 | [diff] [blame] | 4375 | Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD); |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4376 | if (NextPHI == 0 || NextPHI == PHIVal) |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4377 | return getCouldNotCompute();// Couldn't evaluate or not making progress... |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4378 | PHIVal = NextPHI; |
Chris Lattner | 7980fb9 | 2004-04-17 18:36:24 +0000 | [diff] [blame] | 4379 | } |
| 4380 | |
| 4381 | // Too many iterations were needed to evaluate. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4382 | return getCouldNotCompute(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4383 | } |
| 4384 | |
Dan Gohman | e7125f4 | 2009-09-03 15:00:26 +0000 | [diff] [blame] | 4385 | /// getSCEVAtScope - Return a SCEV expression for the specified value |
Dan Gohman | 66a7e85 | 2009-05-08 20:38:54 +0000 | [diff] [blame] | 4386 | /// at the specified scope in the program. The L value specifies a loop |
| 4387 | /// nest to evaluate the expression at, where null is the top-level or a |
| 4388 | /// specified loop is immediately inside of the loop. |
| 4389 | /// |
| 4390 | /// This method can be used to compute the exit value for a variable defined |
| 4391 | /// in a loop by querying what the value will hold in the parent loop. |
| 4392 | /// |
Dan Gohman | d594e6f | 2009-05-24 23:25:42 +0000 | [diff] [blame] | 4393 | /// In the case that a relevant loop exit value cannot be computed, the |
| 4394 | /// original value V is returned. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4395 | const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { |
Dan Gohman | 4221489 | 2009-08-31 21:15:23 +0000 | [diff] [blame] | 4396 | // Check to see if we've folded this expression at this loop before. |
| 4397 | std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V]; |
| 4398 | std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair = |
| 4399 | Values.insert(std::make_pair(L, static_cast<const SCEV *>(0))); |
| 4400 | if (!Pair.second) |
| 4401 | return Pair.first->second ? Pair.first->second : V; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4402 | |
Dan Gohman | 4221489 | 2009-08-31 21:15:23 +0000 | [diff] [blame] | 4403 | // Otherwise compute it. |
| 4404 | const SCEV *C = computeSCEVAtScope(V, L); |
Dan Gohman | a5505cb | 2009-08-31 21:58:28 +0000 | [diff] [blame] | 4405 | ValuesAtScopes[V][L] = C; |
Dan Gohman | 4221489 | 2009-08-31 21:15:23 +0000 | [diff] [blame] | 4406 | return C; |
| 4407 | } |
| 4408 | |
| 4409 | const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4410 | if (isa<SCEVConstant>(V)) return V; |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4411 | |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 4412 | // If this instruction is evolved from a constant-evolving PHI, compute the |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4413 | // exit value from the loop without using SCEVs. |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4414 | if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4415 | if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4416 | const Loop *LI = (*this->LI)[I->getParent()]; |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4417 | if (LI && LI->getParentLoop() == L) // Looking for loop exit value. |
| 4418 | if (PHINode *PN = dyn_cast<PHINode>(I)) |
| 4419 | if (PN->getParent() == LI->getHeader()) { |
| 4420 | // Okay, there is no closed form solution for the PHI node. Check |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 4421 | // to see if the loop that contains it has a known backedge-taken |
| 4422 | // count. If so, we may be able to force computation of the exit |
| 4423 | // value. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4424 | const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4425 | if (const SCEVConstant *BTCC = |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 4426 | dyn_cast<SCEVConstant>(BackedgeTakenCount)) { |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4427 | // Okay, we know how many times the containing loop executes. If |
| 4428 | // this is a constant evolving PHI node, get the final value at |
| 4429 | // the specified iteration number. |
| 4430 | Constant *RV = getConstantEvolutionLoopExitValue(PN, |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 4431 | BTCC->getValue()->getValue(), |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4432 | LI); |
Dan Gohman | 0998796 | 2009-06-29 21:31:18 +0000 | [diff] [blame] | 4433 | if (RV) return getSCEV(RV); |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4434 | } |
| 4435 | } |
| 4436 | |
Reid Spencer | 09906f3 | 2006-12-04 21:33:23 +0000 | [diff] [blame] | 4437 | // Okay, this is an expression that we cannot symbolically evaluate |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4438 | // into a SCEV. Check to see if it's possible to symbolically evaluate |
Reid Spencer | 09906f3 | 2006-12-04 21:33:23 +0000 | [diff] [blame] | 4439 | // the arguments into constants, and if so, try to constant propagate the |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4440 | // result. This is particularly useful for computing loop exit values. |
| 4441 | if (CanConstantFold(I)) { |
Dan Gohman | 1104645 | 2010-06-29 23:43:06 +0000 | [diff] [blame] | 4442 | SmallVector<Constant *, 4> Operands; |
| 4443 | bool MadeImprovement = false; |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4444 | for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { |
| 4445 | Value *Op = I->getOperand(i); |
| 4446 | if (Constant *C = dyn_cast<Constant>(Op)) { |
| 4447 | Operands.push_back(C); |
Dan Gohman | 1104645 | 2010-06-29 23:43:06 +0000 | [diff] [blame] | 4448 | continue; |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4449 | } |
Dan Gohman | 1104645 | 2010-06-29 23:43:06 +0000 | [diff] [blame] | 4450 | |
| 4451 | // If any of the operands is non-constant and if they are |
| 4452 | // non-integer and non-pointer, don't even try to analyze them |
| 4453 | // with scev techniques. |
| 4454 | if (!isSCEVable(Op->getType())) |
| 4455 | return V; |
| 4456 | |
| 4457 | const SCEV *OrigV = getSCEV(Op); |
| 4458 | const SCEV *OpV = getSCEVAtScope(OrigV, L); |
| 4459 | MadeImprovement |= OrigV != OpV; |
| 4460 | |
| 4461 | Constant *C = 0; |
| 4462 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) |
| 4463 | C = SC->getValue(); |
| 4464 | if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) |
| 4465 | C = dyn_cast<Constant>(SU->getValue()); |
| 4466 | if (!C) return V; |
| 4467 | if (C->getType() != Op->getType()) |
| 4468 | C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, |
| 4469 | Op->getType(), |
| 4470 | false), |
| 4471 | C, Op->getType()); |
| 4472 | Operands.push_back(C); |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4473 | } |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4474 | |
Dan Gohman | 1104645 | 2010-06-29 23:43:06 +0000 | [diff] [blame] | 4475 | // Check to see if getSCEVAtScope actually made an improvement. |
| 4476 | if (MadeImprovement) { |
| 4477 | Constant *C = 0; |
| 4478 | if (const CmpInst *CI = dyn_cast<CmpInst>(I)) |
| 4479 | C = ConstantFoldCompareInstOperands(CI->getPredicate(), |
| 4480 | Operands[0], Operands[1], TD); |
| 4481 | else |
| 4482 | C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), |
| 4483 | &Operands[0], Operands.size(), TD); |
| 4484 | if (!C) return V; |
Dan Gohman | e177c9a | 2010-02-24 19:31:47 +0000 | [diff] [blame] | 4485 | return getSCEV(C); |
Dan Gohman | 1104645 | 2010-06-29 23:43:06 +0000 | [diff] [blame] | 4486 | } |
Chris Lattner | 3221ad0 | 2004-04-17 22:58:41 +0000 | [diff] [blame] | 4487 | } |
| 4488 | } |
| 4489 | |
| 4490 | // This is some other type of SCEVUnknown, just return it. |
| 4491 | return V; |
| 4492 | } |
| 4493 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4494 | if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4495 | // Avoid performing the look-up in the common case where the specified |
| 4496 | // expression has no loop-variant portions. |
| 4497 | for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4498 | const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4499 | if (OpAtScope != Comm->getOperand(i)) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4500 | // Okay, at least one of these operands is loop variant but might be |
| 4501 | // foldable. Build a new instance of the folded commutative expression. |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4502 | SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), |
| 4503 | Comm->op_begin()+i); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4504 | NewOps.push_back(OpAtScope); |
| 4505 | |
| 4506 | for (++i; i != e; ++i) { |
| 4507 | OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4508 | NewOps.push_back(OpAtScope); |
| 4509 | } |
| 4510 | if (isa<SCEVAddExpr>(Comm)) |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4511 | return getAddExpr(NewOps); |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 4512 | if (isa<SCEVMulExpr>(Comm)) |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4513 | return getMulExpr(NewOps); |
Nick Lewycky | c54c561 | 2007-11-25 22:41:31 +0000 | [diff] [blame] | 4514 | if (isa<SCEVSMaxExpr>(Comm)) |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4515 | return getSMaxExpr(NewOps); |
Nick Lewycky | 3e63076 | 2008-02-20 06:48:22 +0000 | [diff] [blame] | 4516 | if (isa<SCEVUMaxExpr>(Comm)) |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4517 | return getUMaxExpr(NewOps); |
Torok Edwin | c23197a | 2009-07-14 16:55:14 +0000 | [diff] [blame] | 4518 | llvm_unreachable("Unknown commutative SCEV type!"); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4519 | } |
| 4520 | } |
| 4521 | // If we got here, all operands are loop invariant. |
| 4522 | return Comm; |
| 4523 | } |
| 4524 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4525 | if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4526 | const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); |
| 4527 | const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); |
Nick Lewycky | 789558d | 2009-01-13 09:18:58 +0000 | [diff] [blame] | 4528 | if (LHS == Div->getLHS() && RHS == Div->getRHS()) |
| 4529 | return Div; // must be loop invariant |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4530 | return getUDivExpr(LHS, RHS); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4531 | } |
| 4532 | |
| 4533 | // If this is a loop recurrence for a loop that does not contain L, then we |
| 4534 | // are dealing with the final value computed by the loop. |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4535 | if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { |
Dan Gohman | 1104645 | 2010-06-29 23:43:06 +0000 | [diff] [blame] | 4536 | // First, attempt to evaluate each operand. |
| 4537 | // Avoid performing the look-up in the common case where the specified |
| 4538 | // expression has no loop-variant portions. |
| 4539 | for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { |
| 4540 | const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); |
| 4541 | if (OpAtScope == AddRec->getOperand(i)) |
| 4542 | continue; |
| 4543 | |
| 4544 | // Okay, at least one of these operands is loop variant but might be |
| 4545 | // foldable. Build a new instance of the folded commutative expression. |
| 4546 | SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), |
| 4547 | AddRec->op_begin()+i); |
| 4548 | NewOps.push_back(OpAtScope); |
| 4549 | for (++i; i != e; ++i) |
| 4550 | NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); |
| 4551 | |
| 4552 | AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop())); |
| 4553 | break; |
| 4554 | } |
| 4555 | |
| 4556 | // If the scope is outside the addrec's loop, evaluate it by using the |
| 4557 | // loop exit value of the addrec. |
| 4558 | if (!AddRec->getLoop()->contains(L)) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4559 | // To evaluate this recurrence, we need to know how many times the AddRec |
| 4560 | // loop iterates. Compute this now. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4561 | const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4562 | if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4563 | |
Eli Friedman | b42a626 | 2008-08-04 23:49:06 +0000 | [diff] [blame] | 4564 | // Then, evaluate the AddRec. |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4565 | return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4566 | } |
Dan Gohman | 1104645 | 2010-06-29 23:43:06 +0000 | [diff] [blame] | 4567 | |
Dan Gohman | d594e6f | 2009-05-24 23:25:42 +0000 | [diff] [blame] | 4568 | return AddRec; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4569 | } |
| 4570 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4571 | if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4572 | const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); |
Dan Gohman | eb3948b | 2009-04-29 22:29:01 +0000 | [diff] [blame] | 4573 | if (Op == Cast->getOperand()) |
| 4574 | return Cast; // must be loop invariant |
| 4575 | return getZeroExtendExpr(Op, Cast->getType()); |
| 4576 | } |
| 4577 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4578 | if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4579 | const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); |
Dan Gohman | eb3948b | 2009-04-29 22:29:01 +0000 | [diff] [blame] | 4580 | if (Op == Cast->getOperand()) |
| 4581 | return Cast; // must be loop invariant |
| 4582 | return getSignExtendExpr(Op, Cast->getType()); |
| 4583 | } |
| 4584 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4585 | if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4586 | const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); |
Dan Gohman | eb3948b | 2009-04-29 22:29:01 +0000 | [diff] [blame] | 4587 | if (Op == Cast->getOperand()) |
| 4588 | return Cast; // must be loop invariant |
| 4589 | return getTruncateExpr(Op, Cast->getType()); |
| 4590 | } |
| 4591 | |
Torok Edwin | c23197a | 2009-07-14 16:55:14 +0000 | [diff] [blame] | 4592 | llvm_unreachable("Unknown SCEV type!"); |
Daniel Dunbar | 8c562e2 | 2009-05-18 16:43:04 +0000 | [diff] [blame] | 4593 | return 0; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4594 | } |
| 4595 | |
Dan Gohman | 66a7e85 | 2009-05-08 20:38:54 +0000 | [diff] [blame] | 4596 | /// getSCEVAtScope - This is a convenience function which does |
| 4597 | /// getSCEVAtScope(getSCEV(V), L). |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4598 | const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4599 | return getSCEVAtScope(getSCEV(V), L); |
| 4600 | } |
| 4601 | |
Wojciech Matyjewicz | de0f238 | 2008-07-20 15:55:14 +0000 | [diff] [blame] | 4602 | /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the |
| 4603 | /// following equation: |
| 4604 | /// |
| 4605 | /// A * X = B (mod N) |
| 4606 | /// |
| 4607 | /// where N = 2^BW and BW is the common bit width of A and B. The signedness of |
| 4608 | /// A and B isn't important. |
| 4609 | /// |
| 4610 | /// If the equation does not have a solution, SCEVCouldNotCompute is returned. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4611 | static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B, |
Wojciech Matyjewicz | de0f238 | 2008-07-20 15:55:14 +0000 | [diff] [blame] | 4612 | ScalarEvolution &SE) { |
| 4613 | uint32_t BW = A.getBitWidth(); |
| 4614 | assert(BW == B.getBitWidth() && "Bit widths must be the same."); |
| 4615 | assert(A != 0 && "A must be non-zero."); |
| 4616 | |
| 4617 | // 1. D = gcd(A, N) |
| 4618 | // |
| 4619 | // The gcd of A and N may have only one prime factor: 2. The number of |
| 4620 | // trailing zeros in A is its multiplicity |
| 4621 | uint32_t Mult2 = A.countTrailingZeros(); |
| 4622 | // D = 2^Mult2 |
| 4623 | |
| 4624 | // 2. Check if B is divisible by D. |
| 4625 | // |
| 4626 | // B is divisible by D if and only if the multiplicity of prime factor 2 for B |
| 4627 | // is not less than multiplicity of this prime factor for D. |
| 4628 | if (B.countTrailingZeros() < Mult2) |
Dan Gohman | f4ccfcb | 2009-04-18 17:58:19 +0000 | [diff] [blame] | 4629 | return SE.getCouldNotCompute(); |
Wojciech Matyjewicz | de0f238 | 2008-07-20 15:55:14 +0000 | [diff] [blame] | 4630 | |
| 4631 | // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic |
| 4632 | // modulo (N / D). |
| 4633 | // |
| 4634 | // (N / D) may need BW+1 bits in its representation. Hence, we'll use this |
| 4635 | // bit width during computations. |
| 4636 | APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D |
| 4637 | APInt Mod(BW + 1, 0); |
| 4638 | Mod.set(BW - Mult2); // Mod = N / D |
| 4639 | APInt I = AD.multiplicativeInverse(Mod); |
| 4640 | |
| 4641 | // 4. Compute the minimum unsigned root of the equation: |
| 4642 | // I * (B / D) mod (N / D) |
| 4643 | APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); |
| 4644 | |
| 4645 | // The result is guaranteed to be less than 2^BW so we may truncate it to BW |
| 4646 | // bits. |
| 4647 | return SE.getConstant(Result.trunc(BW)); |
| 4648 | } |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4649 | |
| 4650 | /// SolveQuadraticEquation - Find the roots of the quadratic equation for the |
| 4651 | /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which |
| 4652 | /// might be the same) or two SCEVCouldNotCompute objects. |
| 4653 | /// |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4654 | static std::pair<const SCEV *,const SCEV *> |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 4655 | SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4656 | assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 4657 | const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); |
| 4658 | const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); |
| 4659 | const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4660 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4661 | // We currently can only solve this if the coefficients are constants. |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4662 | if (!LC || !MC || !NC) { |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 4663 | const SCEV *CNC = SE.getCouldNotCompute(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4664 | return std::make_pair(CNC, CNC); |
| 4665 | } |
| 4666 | |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4667 | uint32_t BitWidth = LC->getValue()->getValue().getBitWidth(); |
Chris Lattner | fe560b8 | 2007-04-15 19:52:49 +0000 | [diff] [blame] | 4668 | const APInt &L = LC->getValue()->getValue(); |
| 4669 | const APInt &M = MC->getValue()->getValue(); |
| 4670 | const APInt &N = NC->getValue()->getValue(); |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4671 | APInt Two(BitWidth, 2); |
| 4672 | APInt Four(BitWidth, 4); |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4673 | |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4674 | { |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4675 | using namespace APIntOps; |
Zhou Sheng | 414de4d | 2007-04-07 17:48:27 +0000 | [diff] [blame] | 4676 | const APInt& C = L; |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4677 | // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C |
| 4678 | // The B coefficient is M-N/2 |
| 4679 | APInt B(M); |
| 4680 | B -= sdiv(N,Two); |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4681 | |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4682 | // The A coefficient is N/2 |
Zhou Sheng | 414de4d | 2007-04-07 17:48:27 +0000 | [diff] [blame] | 4683 | APInt A(N.sdiv(Two)); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4684 | |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4685 | // Compute the B^2-4ac term. |
| 4686 | APInt SqrtTerm(B); |
| 4687 | SqrtTerm *= B; |
| 4688 | SqrtTerm -= Four * (A * C); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4689 | |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4690 | // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest |
| 4691 | // integer value or else APInt::sqrt() will assert. |
| 4692 | APInt SqrtVal(SqrtTerm.sqrt()); |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4693 | |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4694 | // Compute the two solutions for the quadratic formula. |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4695 | // The divisions must be performed as signed divisions. |
| 4696 | APInt NegB(-B); |
Reid Spencer | 3e35c8d | 2007-04-16 02:24:41 +0000 | [diff] [blame] | 4697 | APInt TwoA( A << 1 ); |
Nick Lewycky | 8f4d5eb | 2008-11-03 02:43:49 +0000 | [diff] [blame] | 4698 | if (TwoA.isMinValue()) { |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 4699 | const SCEV *CNC = SE.getCouldNotCompute(); |
Nick Lewycky | 8f4d5eb | 2008-11-03 02:43:49 +0000 | [diff] [blame] | 4700 | return std::make_pair(CNC, CNC); |
| 4701 | } |
| 4702 | |
Owen Anderson | e922c02 | 2009-07-22 00:24:57 +0000 | [diff] [blame] | 4703 | LLVMContext &Context = SE.getContext(); |
Owen Anderson | 76f600b | 2009-07-06 22:37:39 +0000 | [diff] [blame] | 4704 | |
| 4705 | ConstantInt *Solution1 = |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 4706 | ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); |
Owen Anderson | 76f600b | 2009-07-06 22:37:39 +0000 | [diff] [blame] | 4707 | ConstantInt *Solution2 = |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 4708 | ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4709 | |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4710 | return std::make_pair(SE.getConstant(Solution1), |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 4711 | SE.getConstant(Solution2)); |
Reid Spencer | e8019bb | 2007-03-01 07:25:48 +0000 | [diff] [blame] | 4712 | } // end APIntOps namespace |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4713 | } |
| 4714 | |
| 4715 | /// HowFarToZero - Return the number of times a backedge comparing the specified |
Dan Gohman | 86fbf2f | 2009-06-06 14:37:11 +0000 | [diff] [blame] | 4716 | /// value to zero will execute. If not computable, return CouldNotCompute. |
Dan Gohman | f6d009f | 2010-02-24 17:31:30 +0000 | [diff] [blame] | 4717 | ScalarEvolution::BackedgeTakenInfo |
| 4718 | ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4719 | // If the value is a constant |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4720 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4721 | // If the value is already zero, the branch will execute zero times. |
Reid Spencer | cae5754 | 2007-03-02 00:28:52 +0000 | [diff] [blame] | 4722 | if (C->getValue()->isZero()) return C; |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4723 | return getCouldNotCompute(); // Otherwise it will loop infinitely. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4724 | } |
| 4725 | |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 4726 | const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4727 | if (!AddRec || AddRec->getLoop() != L) |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4728 | return getCouldNotCompute(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4729 | |
| 4730 | if (AddRec->isAffine()) { |
Wojciech Matyjewicz | de0f238 | 2008-07-20 15:55:14 +0000 | [diff] [blame] | 4731 | // If this is an affine expression, the execution count of this branch is |
| 4732 | // the minimum unsigned root of the following equation: |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4733 | // |
Wojciech Matyjewicz | de0f238 | 2008-07-20 15:55:14 +0000 | [diff] [blame] | 4734 | // Start + Step*N = 0 (mod 2^BW) |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4735 | // |
Wojciech Matyjewicz | de0f238 | 2008-07-20 15:55:14 +0000 | [diff] [blame] | 4736 | // equivalent to: |
| 4737 | // |
| 4738 | // Step*N = -Start (mod 2^BW) |
| 4739 | // |
| 4740 | // where BW is the common bit width of Start and Step. |
| 4741 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4742 | // Get the initial value for the loop. |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 4743 | const SCEV *Start = getSCEVAtScope(AddRec->getStart(), |
| 4744 | L->getParentLoop()); |
| 4745 | const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), |
| 4746 | L->getParentLoop()); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4747 | |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4748 | if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) { |
Wojciech Matyjewicz | de0f238 | 2008-07-20 15:55:14 +0000 | [diff] [blame] | 4749 | // For now we handle only constant steps. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4750 | |
Wojciech Matyjewicz | de0f238 | 2008-07-20 15:55:14 +0000 | [diff] [blame] | 4751 | // First, handle unitary steps. |
| 4752 | if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so: |
Dan Gohman | 4c0d5d5 | 2009-08-20 16:42:55 +0000 | [diff] [blame] | 4753 | return getNegativeSCEV(Start); // N = -Start (as unsigned) |
Wojciech Matyjewicz | de0f238 | 2008-07-20 15:55:14 +0000 | [diff] [blame] | 4754 | if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so: |
| 4755 | return Start; // N = Start (as unsigned) |
| 4756 | |
| 4757 | // Then, try to solve the above equation provided that Start is constant. |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4758 | if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) |
Wojciech Matyjewicz | de0f238 | 2008-07-20 15:55:14 +0000 | [diff] [blame] | 4759 | return SolveLinEquationWithOverflow(StepC->getValue()->getValue(), |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4760 | -StartC->getValue()->getValue(), |
| 4761 | *this); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4762 | } |
Duncan Sands | b0bc6c3 | 2010-02-15 16:12:20 +0000 | [diff] [blame] | 4763 | } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4764 | // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of |
| 4765 | // the quadratic equation to solve it. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4766 | std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec, |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4767 | *this); |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 4768 | const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); |
| 4769 | const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4770 | if (R1) { |
Chris Lattner | d18d9dc | 2004-04-02 20:26:46 +0000 | [diff] [blame] | 4771 | #if 0 |
David Greene | 25e0e87 | 2009-12-23 22:18:14 +0000 | [diff] [blame] | 4772 | dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1 |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 4773 | << " sol#2: " << *R2 << "\n"; |
Chris Lattner | d18d9dc | 2004-04-02 20:26:46 +0000 | [diff] [blame] | 4774 | #endif |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4775 | // Pick the smallest positive root value. |
Zhou Sheng | 6b6b6ef | 2007-01-11 12:24:14 +0000 | [diff] [blame] | 4776 | if (ConstantInt *CB = |
Owen Anderson | baf3c40 | 2009-07-29 18:55:55 +0000 | [diff] [blame] | 4777 | dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, |
Reid Spencer | e4d87aa | 2006-12-23 06:05:41 +0000 | [diff] [blame] | 4778 | R1->getValue(), R2->getValue()))) { |
Reid Spencer | 579dca1 | 2007-01-12 04:24:46 +0000 | [diff] [blame] | 4779 | if (CB->getZExtValue() == false) |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4780 | std::swap(R1, R2); // R1 is the minimum root now. |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4781 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4782 | // We can only use this value if the chrec ends up with an exact zero |
| 4783 | // value at this index. When solving for "X*X != 5", for example, we |
| 4784 | // should not accept a root of 2. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4785 | const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); |
Dan Gohman | cfeb6a4 | 2008-06-18 16:23:07 +0000 | [diff] [blame] | 4786 | if (Val->isZero()) |
| 4787 | return R1; // We found a quadratic root! |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4788 | } |
| 4789 | } |
| 4790 | } |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4791 | |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4792 | return getCouldNotCompute(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4793 | } |
| 4794 | |
| 4795 | /// HowFarToNonZero - Return the number of times a backedge checking the |
| 4796 | /// specified value for nonzero will execute. If not computable, return |
Dan Gohman | 86fbf2f | 2009-06-06 14:37:11 +0000 | [diff] [blame] | 4797 | /// CouldNotCompute |
Dan Gohman | f6d009f | 2010-02-24 17:31:30 +0000 | [diff] [blame] | 4798 | ScalarEvolution::BackedgeTakenInfo |
| 4799 | ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4800 | // Loops that look like: while (X == 0) are very strange indeed. We don't |
| 4801 | // handle them yet except for the trivial case. This could be expanded in the |
| 4802 | // future as needed. |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4803 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4804 | // If the value is a constant, check to see if it is known to be non-zero |
| 4805 | // already. If so, the backedge will execute zero times. |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 4806 | if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { |
Nick Lewycky | 39442af | 2008-02-21 09:14:53 +0000 | [diff] [blame] | 4807 | if (!C->getValue()->isNullValue()) |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 4808 | return getConstant(C->getType(), 0); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4809 | return getCouldNotCompute(); // Otherwise it will loop infinitely. |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4810 | } |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 4811 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4812 | // We could implement others, but I really doubt anyone writes loops like |
| 4813 | // this, and if they did, they would already be constant folded. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 4814 | return getCouldNotCompute(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 4815 | } |
| 4816 | |
Dan Gohman | fd6edef | 2008-09-15 22:18:04 +0000 | [diff] [blame] | 4817 | /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB |
| 4818 | /// (which may not be an immediate predecessor) which has exactly one |
| 4819 | /// successor from which BB is reachable, or null if no such block is |
| 4820 | /// found. |
| 4821 | /// |
Dan Gohman | 005752b | 2010-04-15 16:19:08 +0000 | [diff] [blame] | 4822 | std::pair<BasicBlock *, BasicBlock *> |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4823 | ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { |
Dan Gohman | 3d739fe | 2009-04-30 20:48:53 +0000 | [diff] [blame] | 4824 | // If the block has a unique predecessor, then there is no path from the |
| 4825 | // predecessor to the block that does not go through the direct edge |
| 4826 | // from the predecessor to the block. |
Dan Gohman | fd6edef | 2008-09-15 22:18:04 +0000 | [diff] [blame] | 4827 | if (BasicBlock *Pred = BB->getSinglePredecessor()) |
Dan Gohman | 005752b | 2010-04-15 16:19:08 +0000 | [diff] [blame] | 4828 | return std::make_pair(Pred, BB); |
Dan Gohman | fd6edef | 2008-09-15 22:18:04 +0000 | [diff] [blame] | 4829 | |
| 4830 | // A loop's header is defined to be a block that dominates the loop. |
Dan Gohman | 859b482 | 2009-05-18 15:36:09 +0000 | [diff] [blame] | 4831 | // If the header has a unique predecessor outside the loop, it must be |
| 4832 | // a block that has exactly one successor that can reach the loop. |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 4833 | if (Loop *L = LI->getLoopFor(BB)) |
Dan Gohman | 605c14f | 2010-06-22 23:43:28 +0000 | [diff] [blame] | 4834 | return std::make_pair(L->getLoopPredecessor(), L->getHeader()); |
Dan Gohman | fd6edef | 2008-09-15 22:18:04 +0000 | [diff] [blame] | 4835 | |
Dan Gohman | 005752b | 2010-04-15 16:19:08 +0000 | [diff] [blame] | 4836 | return std::pair<BasicBlock *, BasicBlock *>(); |
Dan Gohman | fd6edef | 2008-09-15 22:18:04 +0000 | [diff] [blame] | 4837 | } |
| 4838 | |
Dan Gohman | 763bad1 | 2009-06-20 00:35:32 +0000 | [diff] [blame] | 4839 | /// HasSameValue - SCEV structural equivalence is usually sufficient for |
| 4840 | /// testing whether two expressions are equal, however for the purposes of |
| 4841 | /// looking for a condition guarding a loop, it can be useful to be a little |
| 4842 | /// more general, since a front-end may have replicated the controlling |
| 4843 | /// expression. |
| 4844 | /// |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 4845 | static bool HasSameValue(const SCEV *A, const SCEV *B) { |
Dan Gohman | 763bad1 | 2009-06-20 00:35:32 +0000 | [diff] [blame] | 4846 | // Quick check to see if they are the same SCEV. |
| 4847 | if (A == B) return true; |
| 4848 | |
| 4849 | // Otherwise, if they're both SCEVUnknown, it's possible that they hold |
| 4850 | // two different instructions with the same value. Check for this case. |
| 4851 | if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) |
| 4852 | if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) |
| 4853 | if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) |
| 4854 | if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) |
Dan Gohman | 041de42 | 2009-08-25 17:56:57 +0000 | [diff] [blame] | 4855 | if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory()) |
Dan Gohman | 763bad1 | 2009-06-20 00:35:32 +0000 | [diff] [blame] | 4856 | return true; |
| 4857 | |
| 4858 | // Otherwise assume they may have a different value. |
| 4859 | return false; |
| 4860 | } |
| 4861 | |
Dan Gohman | e979650 | 2010-04-24 01:28:42 +0000 | [diff] [blame] | 4862 | /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with |
| 4863 | /// predicate Pred. Return true iff any changes were made. |
| 4864 | /// |
| 4865 | bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, |
| 4866 | const SCEV *&LHS, const SCEV *&RHS) { |
| 4867 | bool Changed = false; |
| 4868 | |
| 4869 | // Canonicalize a constant to the right side. |
| 4870 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { |
| 4871 | // Check for both operands constant. |
| 4872 | if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { |
| 4873 | if (ConstantExpr::getICmp(Pred, |
| 4874 | LHSC->getValue(), |
| 4875 | RHSC->getValue())->isNullValue()) |
| 4876 | goto trivially_false; |
| 4877 | else |
| 4878 | goto trivially_true; |
| 4879 | } |
| 4880 | // Otherwise swap the operands to put the constant on the right. |
| 4881 | std::swap(LHS, RHS); |
| 4882 | Pred = ICmpInst::getSwappedPredicate(Pred); |
| 4883 | Changed = true; |
| 4884 | } |
| 4885 | |
| 4886 | // If we're comparing an addrec with a value which is loop-invariant in the |
Dan Gohman | 3abb69c | 2010-05-03 17:00:11 +0000 | [diff] [blame] | 4887 | // addrec's loop, put the addrec on the left. Also make a dominance check, |
| 4888 | // as both operands could be addrecs loop-invariant in each other's loop. |
| 4889 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { |
| 4890 | const Loop *L = AR->getLoop(); |
| 4891 | if (LHS->isLoopInvariant(L) && LHS->properlyDominates(L->getHeader(), DT)) { |
Dan Gohman | e979650 | 2010-04-24 01:28:42 +0000 | [diff] [blame] | 4892 | std::swap(LHS, RHS); |
| 4893 | Pred = ICmpInst::getSwappedPredicate(Pred); |
| 4894 | Changed = true; |
| 4895 | } |
Dan Gohman | 3abb69c | 2010-05-03 17:00:11 +0000 | [diff] [blame] | 4896 | } |
Dan Gohman | e979650 | 2010-04-24 01:28:42 +0000 | [diff] [blame] | 4897 | |
| 4898 | // If there's a constant operand, canonicalize comparisons with boundary |
| 4899 | // cases, and canonicalize *-or-equal comparisons to regular comparisons. |
| 4900 | if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { |
| 4901 | const APInt &RA = RC->getValue()->getValue(); |
| 4902 | switch (Pred) { |
| 4903 | default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); |
| 4904 | case ICmpInst::ICMP_EQ: |
| 4905 | case ICmpInst::ICMP_NE: |
| 4906 | break; |
| 4907 | case ICmpInst::ICMP_UGE: |
| 4908 | if ((RA - 1).isMinValue()) { |
| 4909 | Pred = ICmpInst::ICMP_NE; |
| 4910 | RHS = getConstant(RA - 1); |
| 4911 | Changed = true; |
| 4912 | break; |
| 4913 | } |
| 4914 | if (RA.isMaxValue()) { |
| 4915 | Pred = ICmpInst::ICMP_EQ; |
| 4916 | Changed = true; |
| 4917 | break; |
| 4918 | } |
| 4919 | if (RA.isMinValue()) goto trivially_true; |
| 4920 | |
| 4921 | Pred = ICmpInst::ICMP_UGT; |
| 4922 | RHS = getConstant(RA - 1); |
| 4923 | Changed = true; |
| 4924 | break; |
| 4925 | case ICmpInst::ICMP_ULE: |
| 4926 | if ((RA + 1).isMaxValue()) { |
| 4927 | Pred = ICmpInst::ICMP_NE; |
| 4928 | RHS = getConstant(RA + 1); |
| 4929 | Changed = true; |
| 4930 | break; |
| 4931 | } |
| 4932 | if (RA.isMinValue()) { |
| 4933 | Pred = ICmpInst::ICMP_EQ; |
| 4934 | Changed = true; |
| 4935 | break; |
| 4936 | } |
| 4937 | if (RA.isMaxValue()) goto trivially_true; |
| 4938 | |
| 4939 | Pred = ICmpInst::ICMP_ULT; |
| 4940 | RHS = getConstant(RA + 1); |
| 4941 | Changed = true; |
| 4942 | break; |
| 4943 | case ICmpInst::ICMP_SGE: |
| 4944 | if ((RA - 1).isMinSignedValue()) { |
| 4945 | Pred = ICmpInst::ICMP_NE; |
| 4946 | RHS = getConstant(RA - 1); |
| 4947 | Changed = true; |
| 4948 | break; |
| 4949 | } |
| 4950 | if (RA.isMaxSignedValue()) { |
| 4951 | Pred = ICmpInst::ICMP_EQ; |
| 4952 | Changed = true; |
| 4953 | break; |
| 4954 | } |
| 4955 | if (RA.isMinSignedValue()) goto trivially_true; |
| 4956 | |
| 4957 | Pred = ICmpInst::ICMP_SGT; |
| 4958 | RHS = getConstant(RA - 1); |
| 4959 | Changed = true; |
| 4960 | break; |
| 4961 | case ICmpInst::ICMP_SLE: |
| 4962 | if ((RA + 1).isMaxSignedValue()) { |
| 4963 | Pred = ICmpInst::ICMP_NE; |
| 4964 | RHS = getConstant(RA + 1); |
| 4965 | Changed = true; |
| 4966 | break; |
| 4967 | } |
| 4968 | if (RA.isMinSignedValue()) { |
| 4969 | Pred = ICmpInst::ICMP_EQ; |
| 4970 | Changed = true; |
| 4971 | break; |
| 4972 | } |
| 4973 | if (RA.isMaxSignedValue()) goto trivially_true; |
| 4974 | |
| 4975 | Pred = ICmpInst::ICMP_SLT; |
| 4976 | RHS = getConstant(RA + 1); |
| 4977 | Changed = true; |
| 4978 | break; |
| 4979 | case ICmpInst::ICMP_UGT: |
| 4980 | if (RA.isMinValue()) { |
| 4981 | Pred = ICmpInst::ICMP_NE; |
| 4982 | Changed = true; |
| 4983 | break; |
| 4984 | } |
| 4985 | if ((RA + 1).isMaxValue()) { |
| 4986 | Pred = ICmpInst::ICMP_EQ; |
| 4987 | RHS = getConstant(RA + 1); |
| 4988 | Changed = true; |
| 4989 | break; |
| 4990 | } |
| 4991 | if (RA.isMaxValue()) goto trivially_false; |
| 4992 | break; |
| 4993 | case ICmpInst::ICMP_ULT: |
| 4994 | if (RA.isMaxValue()) { |
| 4995 | Pred = ICmpInst::ICMP_NE; |
| 4996 | Changed = true; |
| 4997 | break; |
| 4998 | } |
| 4999 | if ((RA - 1).isMinValue()) { |
| 5000 | Pred = ICmpInst::ICMP_EQ; |
| 5001 | RHS = getConstant(RA - 1); |
| 5002 | Changed = true; |
| 5003 | break; |
| 5004 | } |
| 5005 | if (RA.isMinValue()) goto trivially_false; |
| 5006 | break; |
| 5007 | case ICmpInst::ICMP_SGT: |
| 5008 | if (RA.isMinSignedValue()) { |
| 5009 | Pred = ICmpInst::ICMP_NE; |
| 5010 | Changed = true; |
| 5011 | break; |
| 5012 | } |
| 5013 | if ((RA + 1).isMaxSignedValue()) { |
| 5014 | Pred = ICmpInst::ICMP_EQ; |
| 5015 | RHS = getConstant(RA + 1); |
| 5016 | Changed = true; |
| 5017 | break; |
| 5018 | } |
| 5019 | if (RA.isMaxSignedValue()) goto trivially_false; |
| 5020 | break; |
| 5021 | case ICmpInst::ICMP_SLT: |
| 5022 | if (RA.isMaxSignedValue()) { |
| 5023 | Pred = ICmpInst::ICMP_NE; |
| 5024 | Changed = true; |
| 5025 | break; |
| 5026 | } |
| 5027 | if ((RA - 1).isMinSignedValue()) { |
| 5028 | Pred = ICmpInst::ICMP_EQ; |
| 5029 | RHS = getConstant(RA - 1); |
| 5030 | Changed = true; |
| 5031 | break; |
| 5032 | } |
| 5033 | if (RA.isMinSignedValue()) goto trivially_false; |
| 5034 | break; |
| 5035 | } |
| 5036 | } |
| 5037 | |
| 5038 | // Check for obvious equality. |
| 5039 | if (HasSameValue(LHS, RHS)) { |
| 5040 | if (ICmpInst::isTrueWhenEqual(Pred)) |
| 5041 | goto trivially_true; |
| 5042 | if (ICmpInst::isFalseWhenEqual(Pred)) |
| 5043 | goto trivially_false; |
| 5044 | } |
| 5045 | |
Dan Gohman | 03557dc | 2010-05-03 16:35:17 +0000 | [diff] [blame] | 5046 | // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by |
| 5047 | // adding or subtracting 1 from one of the operands. |
| 5048 | switch (Pred) { |
| 5049 | case ICmpInst::ICMP_SLE: |
| 5050 | if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) { |
| 5051 | RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, |
| 5052 | /*HasNUW=*/false, /*HasNSW=*/true); |
| 5053 | Pred = ICmpInst::ICMP_SLT; |
| 5054 | Changed = true; |
| 5055 | } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) { |
Dan Gohman | f16c680 | 2010-05-03 20:23:47 +0000 | [diff] [blame] | 5056 | LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, |
Dan Gohman | 03557dc | 2010-05-03 16:35:17 +0000 | [diff] [blame] | 5057 | /*HasNUW=*/false, /*HasNSW=*/true); |
| 5058 | Pred = ICmpInst::ICMP_SLT; |
| 5059 | Changed = true; |
| 5060 | } |
| 5061 | break; |
| 5062 | case ICmpInst::ICMP_SGE: |
| 5063 | if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) { |
Dan Gohman | f16c680 | 2010-05-03 20:23:47 +0000 | [diff] [blame] | 5064 | RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, |
Dan Gohman | 03557dc | 2010-05-03 16:35:17 +0000 | [diff] [blame] | 5065 | /*HasNUW=*/false, /*HasNSW=*/true); |
| 5066 | Pred = ICmpInst::ICMP_SGT; |
| 5067 | Changed = true; |
| 5068 | } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) { |
| 5069 | LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, |
| 5070 | /*HasNUW=*/false, /*HasNSW=*/true); |
| 5071 | Pred = ICmpInst::ICMP_SGT; |
| 5072 | Changed = true; |
| 5073 | } |
| 5074 | break; |
| 5075 | case ICmpInst::ICMP_ULE: |
| 5076 | if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) { |
Dan Gohman | f16c680 | 2010-05-03 20:23:47 +0000 | [diff] [blame] | 5077 | RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, |
Dan Gohman | 03557dc | 2010-05-03 16:35:17 +0000 | [diff] [blame] | 5078 | /*HasNUW=*/true, /*HasNSW=*/false); |
| 5079 | Pred = ICmpInst::ICMP_ULT; |
| 5080 | Changed = true; |
| 5081 | } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) { |
Dan Gohman | f16c680 | 2010-05-03 20:23:47 +0000 | [diff] [blame] | 5082 | LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, |
Dan Gohman | 03557dc | 2010-05-03 16:35:17 +0000 | [diff] [blame] | 5083 | /*HasNUW=*/true, /*HasNSW=*/false); |
| 5084 | Pred = ICmpInst::ICMP_ULT; |
| 5085 | Changed = true; |
| 5086 | } |
| 5087 | break; |
| 5088 | case ICmpInst::ICMP_UGE: |
| 5089 | if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) { |
Dan Gohman | f16c680 | 2010-05-03 20:23:47 +0000 | [diff] [blame] | 5090 | RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, |
Dan Gohman | 03557dc | 2010-05-03 16:35:17 +0000 | [diff] [blame] | 5091 | /*HasNUW=*/true, /*HasNSW=*/false); |
| 5092 | Pred = ICmpInst::ICMP_UGT; |
| 5093 | Changed = true; |
| 5094 | } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) { |
Dan Gohman | f16c680 | 2010-05-03 20:23:47 +0000 | [diff] [blame] | 5095 | LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, |
Dan Gohman | 03557dc | 2010-05-03 16:35:17 +0000 | [diff] [blame] | 5096 | /*HasNUW=*/true, /*HasNSW=*/false); |
| 5097 | Pred = ICmpInst::ICMP_UGT; |
| 5098 | Changed = true; |
| 5099 | } |
| 5100 | break; |
| 5101 | default: |
| 5102 | break; |
| 5103 | } |
| 5104 | |
Dan Gohman | e979650 | 2010-04-24 01:28:42 +0000 | [diff] [blame] | 5105 | // TODO: More simplifications are possible here. |
| 5106 | |
| 5107 | return Changed; |
| 5108 | |
| 5109 | trivially_true: |
| 5110 | // Return 0 == 0. |
| 5111 | LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0); |
| 5112 | Pred = ICmpInst::ICMP_EQ; |
| 5113 | return true; |
| 5114 | |
| 5115 | trivially_false: |
| 5116 | // Return 0 != 0. |
| 5117 | LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0); |
| 5118 | Pred = ICmpInst::ICMP_NE; |
| 5119 | return true; |
| 5120 | } |
| 5121 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5122 | bool ScalarEvolution::isKnownNegative(const SCEV *S) { |
| 5123 | return getSignedRange(S).getSignedMax().isNegative(); |
| 5124 | } |
| 5125 | |
| 5126 | bool ScalarEvolution::isKnownPositive(const SCEV *S) { |
| 5127 | return getSignedRange(S).getSignedMin().isStrictlyPositive(); |
| 5128 | } |
| 5129 | |
| 5130 | bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { |
| 5131 | return !getSignedRange(S).getSignedMin().isNegative(); |
| 5132 | } |
| 5133 | |
| 5134 | bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { |
| 5135 | return !getSignedRange(S).getSignedMax().isStrictlyPositive(); |
| 5136 | } |
| 5137 | |
| 5138 | bool ScalarEvolution::isKnownNonZero(const SCEV *S) { |
| 5139 | return isKnownNegative(S) || isKnownPositive(S); |
| 5140 | } |
| 5141 | |
| 5142 | bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, |
| 5143 | const SCEV *LHS, const SCEV *RHS) { |
Dan Gohman | d19bba6 | 2010-04-24 01:38:36 +0000 | [diff] [blame] | 5144 | // Canonicalize the inputs first. |
| 5145 | (void)SimplifyICmpOperands(Pred, LHS, RHS); |
| 5146 | |
Dan Gohman | 53c66ea | 2010-04-11 22:16:48 +0000 | [diff] [blame] | 5147 | // If LHS or RHS is an addrec, check to see if the condition is true in |
| 5148 | // every iteration of the loop. |
| 5149 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) |
| 5150 | if (isLoopEntryGuardedByCond( |
| 5151 | AR->getLoop(), Pred, AR->getStart(), RHS) && |
| 5152 | isLoopBackedgeGuardedByCond( |
Dan Gohman | acd8cab | 2010-05-04 01:12:27 +0000 | [diff] [blame] | 5153 | AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS)) |
Dan Gohman | 53c66ea | 2010-04-11 22:16:48 +0000 | [diff] [blame] | 5154 | return true; |
| 5155 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) |
| 5156 | if (isLoopEntryGuardedByCond( |
| 5157 | AR->getLoop(), Pred, LHS, AR->getStart()) && |
| 5158 | isLoopBackedgeGuardedByCond( |
Dan Gohman | acd8cab | 2010-05-04 01:12:27 +0000 | [diff] [blame] | 5159 | AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this))) |
Dan Gohman | 53c66ea | 2010-04-11 22:16:48 +0000 | [diff] [blame] | 5160 | return true; |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5161 | |
Dan Gohman | 53c66ea | 2010-04-11 22:16:48 +0000 | [diff] [blame] | 5162 | // Otherwise see what can be done with known constant ranges. |
| 5163 | return isKnownPredicateWithRanges(Pred, LHS, RHS); |
| 5164 | } |
| 5165 | |
| 5166 | bool |
| 5167 | ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred, |
| 5168 | const SCEV *LHS, const SCEV *RHS) { |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5169 | if (HasSameValue(LHS, RHS)) |
| 5170 | return ICmpInst::isTrueWhenEqual(Pred); |
| 5171 | |
Dan Gohman | 53c66ea | 2010-04-11 22:16:48 +0000 | [diff] [blame] | 5172 | // This code is split out from isKnownPredicate because it is called from |
| 5173 | // within isLoopEntryGuardedByCond. |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5174 | switch (Pred) { |
| 5175 | default: |
Dan Gohman | 850f791 | 2009-07-16 17:34:36 +0000 | [diff] [blame] | 5176 | llvm_unreachable("Unexpected ICmpInst::Predicate value!"); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5177 | break; |
| 5178 | case ICmpInst::ICMP_SGT: |
| 5179 | Pred = ICmpInst::ICMP_SLT; |
| 5180 | std::swap(LHS, RHS); |
| 5181 | case ICmpInst::ICMP_SLT: { |
| 5182 | ConstantRange LHSRange = getSignedRange(LHS); |
| 5183 | ConstantRange RHSRange = getSignedRange(RHS); |
| 5184 | if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin())) |
| 5185 | return true; |
| 5186 | if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax())) |
| 5187 | return false; |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5188 | break; |
| 5189 | } |
| 5190 | case ICmpInst::ICMP_SGE: |
| 5191 | Pred = ICmpInst::ICMP_SLE; |
| 5192 | std::swap(LHS, RHS); |
| 5193 | case ICmpInst::ICMP_SLE: { |
| 5194 | ConstantRange LHSRange = getSignedRange(LHS); |
| 5195 | ConstantRange RHSRange = getSignedRange(RHS); |
| 5196 | if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin())) |
| 5197 | return true; |
| 5198 | if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax())) |
| 5199 | return false; |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5200 | break; |
| 5201 | } |
| 5202 | case ICmpInst::ICMP_UGT: |
| 5203 | Pred = ICmpInst::ICMP_ULT; |
| 5204 | std::swap(LHS, RHS); |
| 5205 | case ICmpInst::ICMP_ULT: { |
| 5206 | ConstantRange LHSRange = getUnsignedRange(LHS); |
| 5207 | ConstantRange RHSRange = getUnsignedRange(RHS); |
| 5208 | if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin())) |
| 5209 | return true; |
| 5210 | if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax())) |
| 5211 | return false; |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5212 | break; |
| 5213 | } |
| 5214 | case ICmpInst::ICMP_UGE: |
| 5215 | Pred = ICmpInst::ICMP_ULE; |
| 5216 | std::swap(LHS, RHS); |
| 5217 | case ICmpInst::ICMP_ULE: { |
| 5218 | ConstantRange LHSRange = getUnsignedRange(LHS); |
| 5219 | ConstantRange RHSRange = getUnsignedRange(RHS); |
| 5220 | if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin())) |
| 5221 | return true; |
| 5222 | if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax())) |
| 5223 | return false; |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5224 | break; |
| 5225 | } |
| 5226 | case ICmpInst::ICMP_NE: { |
| 5227 | if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet()) |
| 5228 | return true; |
| 5229 | if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet()) |
| 5230 | return true; |
| 5231 | |
| 5232 | const SCEV *Diff = getMinusSCEV(LHS, RHS); |
| 5233 | if (isKnownNonZero(Diff)) |
| 5234 | return true; |
| 5235 | break; |
| 5236 | } |
| 5237 | case ICmpInst::ICMP_EQ: |
Dan Gohman | f117ed4 | 2009-07-20 23:54:43 +0000 | [diff] [blame] | 5238 | // The check at the top of the function catches the case where |
| 5239 | // the values are known to be equal. |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5240 | break; |
| 5241 | } |
| 5242 | return false; |
| 5243 | } |
| 5244 | |
| 5245 | /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is |
| 5246 | /// protected by a conditional between LHS and RHS. This is used to |
| 5247 | /// to eliminate casts. |
| 5248 | bool |
| 5249 | ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, |
| 5250 | ICmpInst::Predicate Pred, |
| 5251 | const SCEV *LHS, const SCEV *RHS) { |
| 5252 | // Interpret a null as meaning no loop, where there is obviously no guard |
| 5253 | // (interprocedural conditions notwithstanding). |
| 5254 | if (!L) return true; |
| 5255 | |
| 5256 | BasicBlock *Latch = L->getLoopLatch(); |
| 5257 | if (!Latch) |
| 5258 | return false; |
| 5259 | |
| 5260 | BranchInst *LoopContinuePredicate = |
| 5261 | dyn_cast<BranchInst>(Latch->getTerminator()); |
| 5262 | if (!LoopContinuePredicate || |
| 5263 | LoopContinuePredicate->isUnconditional()) |
| 5264 | return false; |
| 5265 | |
Dan Gohman | af08a36 | 2010-08-10 23:46:30 +0000 | [diff] [blame] | 5266 | return isImpliedCond(Pred, LHS, RHS, |
| 5267 | LoopContinuePredicate->getCondition(), |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5268 | LoopContinuePredicate->getSuccessor(0) != L->getHeader()); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5269 | } |
| 5270 | |
Dan Gohman | 3948d0b | 2010-04-11 19:27:13 +0000 | [diff] [blame] | 5271 | /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5272 | /// by a conditional between LHS and RHS. This is used to help avoid max |
| 5273 | /// expressions in loop trip counts, and to eliminate casts. |
| 5274 | bool |
Dan Gohman | 3948d0b | 2010-04-11 19:27:13 +0000 | [diff] [blame] | 5275 | ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, |
| 5276 | ICmpInst::Predicate Pred, |
| 5277 | const SCEV *LHS, const SCEV *RHS) { |
Dan Gohman | 8ea9452 | 2009-05-18 16:03:58 +0000 | [diff] [blame] | 5278 | // Interpret a null as meaning no loop, where there is obviously no guard |
| 5279 | // (interprocedural conditions notwithstanding). |
| 5280 | if (!L) return false; |
| 5281 | |
Dan Gohman | 859b482 | 2009-05-18 15:36:09 +0000 | [diff] [blame] | 5282 | // Starting at the loop predecessor, climb up the predecessor chain, as long |
| 5283 | // as there are predecessors that can be found that have unique successors |
Dan Gohman | fd6edef | 2008-09-15 22:18:04 +0000 | [diff] [blame] | 5284 | // leading to the original header. |
Dan Gohman | 005752b | 2010-04-15 16:19:08 +0000 | [diff] [blame] | 5285 | for (std::pair<BasicBlock *, BasicBlock *> |
Dan Gohman | 605c14f | 2010-06-22 23:43:28 +0000 | [diff] [blame] | 5286 | Pair(L->getLoopPredecessor(), L->getHeader()); |
Dan Gohman | 005752b | 2010-04-15 16:19:08 +0000 | [diff] [blame] | 5287 | Pair.first; |
| 5288 | Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { |
Dan Gohman | 3837218 | 2008-08-12 20:17:31 +0000 | [diff] [blame] | 5289 | |
| 5290 | BranchInst *LoopEntryPredicate = |
Dan Gohman | 005752b | 2010-04-15 16:19:08 +0000 | [diff] [blame] | 5291 | dyn_cast<BranchInst>(Pair.first->getTerminator()); |
Dan Gohman | 3837218 | 2008-08-12 20:17:31 +0000 | [diff] [blame] | 5292 | if (!LoopEntryPredicate || |
| 5293 | LoopEntryPredicate->isUnconditional()) |
| 5294 | continue; |
| 5295 | |
Dan Gohman | af08a36 | 2010-08-10 23:46:30 +0000 | [diff] [blame] | 5296 | if (isImpliedCond(Pred, LHS, RHS, |
| 5297 | LoopEntryPredicate->getCondition(), |
Dan Gohman | 005752b | 2010-04-15 16:19:08 +0000 | [diff] [blame] | 5298 | LoopEntryPredicate->getSuccessor(0) != Pair.second)) |
Dan Gohman | 3837218 | 2008-08-12 20:17:31 +0000 | [diff] [blame] | 5299 | return true; |
Nick Lewycky | 59cff12 | 2008-07-12 07:41:32 +0000 | [diff] [blame] | 5300 | } |
| 5301 | |
Dan Gohman | 3837218 | 2008-08-12 20:17:31 +0000 | [diff] [blame] | 5302 | return false; |
Nick Lewycky | 59cff12 | 2008-07-12 07:41:32 +0000 | [diff] [blame] | 5303 | } |
| 5304 | |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5305 | /// isImpliedCond - Test whether the condition described by Pred, LHS, |
| 5306 | /// and RHS is true whenever the given Cond value evaluates to true. |
Dan Gohman | af08a36 | 2010-08-10 23:46:30 +0000 | [diff] [blame] | 5307 | bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5308 | const SCEV *LHS, const SCEV *RHS, |
Dan Gohman | af08a36 | 2010-08-10 23:46:30 +0000 | [diff] [blame] | 5309 | Value *FoundCondValue, |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5310 | bool Inverse) { |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 5311 | // Recursively handle And and Or conditions. |
Dan Gohman | af08a36 | 2010-08-10 23:46:30 +0000 | [diff] [blame] | 5312 | if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { |
Dan Gohman | 40a5a1b | 2009-06-24 01:18:18 +0000 | [diff] [blame] | 5313 | if (BO->getOpcode() == Instruction::And) { |
| 5314 | if (!Inverse) |
Dan Gohman | af08a36 | 2010-08-10 23:46:30 +0000 | [diff] [blame] | 5315 | return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || |
| 5316 | isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); |
Dan Gohman | 40a5a1b | 2009-06-24 01:18:18 +0000 | [diff] [blame] | 5317 | } else if (BO->getOpcode() == Instruction::Or) { |
| 5318 | if (Inverse) |
Dan Gohman | af08a36 | 2010-08-10 23:46:30 +0000 | [diff] [blame] | 5319 | return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || |
| 5320 | isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); |
Dan Gohman | 40a5a1b | 2009-06-24 01:18:18 +0000 | [diff] [blame] | 5321 | } |
| 5322 | } |
| 5323 | |
Dan Gohman | af08a36 | 2010-08-10 23:46:30 +0000 | [diff] [blame] | 5324 | ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); |
Dan Gohman | 40a5a1b | 2009-06-24 01:18:18 +0000 | [diff] [blame] | 5325 | if (!ICI) return false; |
| 5326 | |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5327 | // Bail if the ICmp's operands' types are wider than the needed type |
| 5328 | // before attempting to call getSCEV on them. This avoids infinite |
| 5329 | // recursion, since the analysis of widening casts can require loop |
| 5330 | // exit condition information for overflow checking, which would |
| 5331 | // lead back here. |
| 5332 | if (getTypeSizeInBits(LHS->getType()) < |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5333 | getTypeSizeInBits(ICI->getOperand(0)->getType())) |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5334 | return false; |
| 5335 | |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5336 | // Now that we found a conditional branch that dominates the loop, check to |
| 5337 | // see if it is the comparison we are looking for. |
| 5338 | ICmpInst::Predicate FoundPred; |
| 5339 | if (Inverse) |
| 5340 | FoundPred = ICI->getInversePredicate(); |
| 5341 | else |
| 5342 | FoundPred = ICI->getPredicate(); |
| 5343 | |
| 5344 | const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); |
| 5345 | const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5346 | |
| 5347 | // Balance the types. The case where FoundLHS' type is wider than |
| 5348 | // LHS' type is checked for above. |
| 5349 | if (getTypeSizeInBits(LHS->getType()) > |
| 5350 | getTypeSizeInBits(FoundLHS->getType())) { |
| 5351 | if (CmpInst::isSigned(Pred)) { |
| 5352 | FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); |
| 5353 | FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); |
| 5354 | } else { |
| 5355 | FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); |
| 5356 | FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); |
| 5357 | } |
| 5358 | } |
| 5359 | |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5360 | // Canonicalize the query to match the way instcombine will have |
| 5361 | // canonicalized the comparison. |
Dan Gohman | d4da5af | 2010-04-24 01:34:53 +0000 | [diff] [blame] | 5362 | if (SimplifyICmpOperands(Pred, LHS, RHS)) |
| 5363 | if (LHS == RHS) |
Dan Gohman | 34c3e36 | 2010-05-03 18:00:24 +0000 | [diff] [blame] | 5364 | return CmpInst::isTrueWhenEqual(Pred); |
Dan Gohman | d4da5af | 2010-04-24 01:34:53 +0000 | [diff] [blame] | 5365 | if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) |
| 5366 | if (FoundLHS == FoundRHS) |
Dan Gohman | 34c3e36 | 2010-05-03 18:00:24 +0000 | [diff] [blame] | 5367 | return CmpInst::isFalseWhenEqual(Pred); |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5368 | |
| 5369 | // Check to see if we can make the LHS or RHS match. |
| 5370 | if (LHS == FoundRHS || RHS == FoundLHS) { |
| 5371 | if (isa<SCEVConstant>(RHS)) { |
| 5372 | std::swap(FoundLHS, FoundRHS); |
| 5373 | FoundPred = ICmpInst::getSwappedPredicate(FoundPred); |
| 5374 | } else { |
| 5375 | std::swap(LHS, RHS); |
| 5376 | Pred = ICmpInst::getSwappedPredicate(Pred); |
| 5377 | } |
| 5378 | } |
| 5379 | |
| 5380 | // Check whether the found predicate is the same as the desired predicate. |
| 5381 | if (FoundPred == Pred) |
| 5382 | return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); |
| 5383 | |
| 5384 | // Check whether swapping the found predicate makes it the same as the |
| 5385 | // desired predicate. |
| 5386 | if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { |
| 5387 | if (isa<SCEVConstant>(RHS)) |
| 5388 | return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); |
| 5389 | else |
| 5390 | return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), |
| 5391 | RHS, LHS, FoundLHS, FoundRHS); |
| 5392 | } |
| 5393 | |
| 5394 | // Check whether the actual condition is beyond sufficient. |
| 5395 | if (FoundPred == ICmpInst::ICMP_EQ) |
| 5396 | if (ICmpInst::isTrueWhenEqual(Pred)) |
| 5397 | if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) |
| 5398 | return true; |
| 5399 | if (Pred == ICmpInst::ICMP_NE) |
| 5400 | if (!ICmpInst::isTrueWhenEqual(FoundPred)) |
| 5401 | if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) |
| 5402 | return true; |
| 5403 | |
| 5404 | // Otherwise assume the worst. |
| 5405 | return false; |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5406 | } |
| 5407 | |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5408 | /// isImpliedCondOperands - Test whether the condition described by Pred, |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 5409 | /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS, |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5410 | /// and FoundRHS is true. |
| 5411 | bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, |
| 5412 | const SCEV *LHS, const SCEV *RHS, |
| 5413 | const SCEV *FoundLHS, |
| 5414 | const SCEV *FoundRHS) { |
| 5415 | return isImpliedCondOperandsHelper(Pred, LHS, RHS, |
| 5416 | FoundLHS, FoundRHS) || |
| 5417 | // ~x < ~y --> x > y |
| 5418 | isImpliedCondOperandsHelper(Pred, LHS, RHS, |
| 5419 | getNotSCEV(FoundRHS), |
| 5420 | getNotSCEV(FoundLHS)); |
| 5421 | } |
| 5422 | |
| 5423 | /// isImpliedCondOperandsHelper - Test whether the condition described by |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 5424 | /// Pred, LHS, and RHS is true whenever the condition described by Pred, |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5425 | /// FoundLHS, and FoundRHS is true. |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5426 | bool |
Dan Gohman | 0f4b285 | 2009-07-21 23:03:19 +0000 | [diff] [blame] | 5427 | ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, |
| 5428 | const SCEV *LHS, const SCEV *RHS, |
| 5429 | const SCEV *FoundLHS, |
| 5430 | const SCEV *FoundRHS) { |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5431 | switch (Pred) { |
Dan Gohman | 850f791 | 2009-07-16 17:34:36 +0000 | [diff] [blame] | 5432 | default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); |
| 5433 | case ICmpInst::ICMP_EQ: |
| 5434 | case ICmpInst::ICMP_NE: |
| 5435 | if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) |
| 5436 | return true; |
| 5437 | break; |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5438 | case ICmpInst::ICMP_SLT: |
Dan Gohman | 850f791 | 2009-07-16 17:34:36 +0000 | [diff] [blame] | 5439 | case ICmpInst::ICMP_SLE: |
Dan Gohman | 53c66ea | 2010-04-11 22:16:48 +0000 | [diff] [blame] | 5440 | if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) && |
| 5441 | isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS)) |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5442 | return true; |
| 5443 | break; |
| 5444 | case ICmpInst::ICMP_SGT: |
Dan Gohman | 850f791 | 2009-07-16 17:34:36 +0000 | [diff] [blame] | 5445 | case ICmpInst::ICMP_SGE: |
Dan Gohman | 53c66ea | 2010-04-11 22:16:48 +0000 | [diff] [blame] | 5446 | if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) && |
| 5447 | isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS)) |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5448 | return true; |
| 5449 | break; |
| 5450 | case ICmpInst::ICMP_ULT: |
Dan Gohman | 850f791 | 2009-07-16 17:34:36 +0000 | [diff] [blame] | 5451 | case ICmpInst::ICMP_ULE: |
Dan Gohman | 53c66ea | 2010-04-11 22:16:48 +0000 | [diff] [blame] | 5452 | if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) && |
| 5453 | isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS)) |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5454 | return true; |
| 5455 | break; |
| 5456 | case ICmpInst::ICMP_UGT: |
Dan Gohman | 850f791 | 2009-07-16 17:34:36 +0000 | [diff] [blame] | 5457 | case ICmpInst::ICMP_UGE: |
Dan Gohman | 53c66ea | 2010-04-11 22:16:48 +0000 | [diff] [blame] | 5458 | if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) && |
| 5459 | isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS)) |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5460 | return true; |
| 5461 | break; |
| 5462 | } |
| 5463 | |
| 5464 | return false; |
Dan Gohman | 40a5a1b | 2009-06-24 01:18:18 +0000 | [diff] [blame] | 5465 | } |
| 5466 | |
Dan Gohman | 51f53b7 | 2009-06-21 23:46:38 +0000 | [diff] [blame] | 5467 | /// getBECount - Subtract the end and start values and divide by the step, |
| 5468 | /// rounding up, to get the number of times the backedge is executed. Return |
| 5469 | /// CouldNotCompute if an intermediate computation overflows. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5470 | const SCEV *ScalarEvolution::getBECount(const SCEV *Start, |
Dan Gohman | f5074ec | 2009-07-13 22:05:32 +0000 | [diff] [blame] | 5471 | const SCEV *End, |
Dan Gohman | 1f96e67 | 2009-09-17 18:05:20 +0000 | [diff] [blame] | 5472 | const SCEV *Step, |
| 5473 | bool NoWrap) { |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 5474 | assert(!isKnownNegative(Step) && |
| 5475 | "This code doesn't handle negative strides yet!"); |
| 5476 | |
Dan Gohman | 51f53b7 | 2009-06-21 23:46:38 +0000 | [diff] [blame] | 5477 | const Type *Ty = Start->getType(); |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 5478 | const SCEV *NegOne = getConstant(Ty, (uint64_t)-1); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5479 | const SCEV *Diff = getMinusSCEV(End, Start); |
| 5480 | const SCEV *RoundUp = getAddExpr(Step, NegOne); |
Dan Gohman | 51f53b7 | 2009-06-21 23:46:38 +0000 | [diff] [blame] | 5481 | |
| 5482 | // Add an adjustment to the difference between End and Start so that |
| 5483 | // the division will effectively round up. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5484 | const SCEV *Add = getAddExpr(Diff, RoundUp); |
Dan Gohman | 51f53b7 | 2009-06-21 23:46:38 +0000 | [diff] [blame] | 5485 | |
Dan Gohman | 1f96e67 | 2009-09-17 18:05:20 +0000 | [diff] [blame] | 5486 | if (!NoWrap) { |
| 5487 | // Check Add for unsigned overflow. |
| 5488 | // TODO: More sophisticated things could be done here. |
| 5489 | const Type *WideTy = IntegerType::get(getContext(), |
| 5490 | getTypeSizeInBits(Ty) + 1); |
| 5491 | const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy); |
| 5492 | const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy); |
| 5493 | const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp); |
| 5494 | if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd) |
| 5495 | return getCouldNotCompute(); |
| 5496 | } |
Dan Gohman | 51f53b7 | 2009-06-21 23:46:38 +0000 | [diff] [blame] | 5497 | |
| 5498 | return getUDivExpr(Add, Step); |
| 5499 | } |
| 5500 | |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 5501 | /// HowManyLessThans - Return the number of times a backedge containing the |
| 5502 | /// specified less-than comparison will execute. If not computable, return |
Dan Gohman | 86fbf2f | 2009-06-06 14:37:11 +0000 | [diff] [blame] | 5503 | /// CouldNotCompute. |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 5504 | ScalarEvolution::BackedgeTakenInfo |
| 5505 | ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, |
| 5506 | const Loop *L, bool isSigned) { |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 5507 | // Only handle: "ADDREC < LoopInvariant". |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 5508 | if (!RHS->isLoopInvariant(L)) return getCouldNotCompute(); |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 5509 | |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5510 | const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS); |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 5511 | if (!AddRec || AddRec->getLoop() != L) |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 5512 | return getCouldNotCompute(); |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 5513 | |
Dan Gohman | 1f96e67 | 2009-09-17 18:05:20 +0000 | [diff] [blame] | 5514 | // Check to see if we have a flag which makes analysis easy. |
| 5515 | bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() : |
| 5516 | AddRec->hasNoUnsignedWrap(); |
| 5517 | |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 5518 | if (AddRec->isAffine()) { |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5519 | unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5520 | const SCEV *Step = AddRec->getStepRecurrence(*this); |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5521 | |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 5522 | if (Step->isZero()) |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 5523 | return getCouldNotCompute(); |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 5524 | if (Step->isOne()) { |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5525 | // With unit stride, the iteration never steps past the limit value. |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 5526 | } else if (isKnownPositive(Step)) { |
Dan Gohman | f451cb8 | 2010-02-10 16:03:48 +0000 | [diff] [blame] | 5527 | // Test whether a positive iteration can step past the limit |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 5528 | // value and past the maximum value for its type in a single step. |
| 5529 | // Note that it's not sufficient to check NoWrap here, because even |
| 5530 | // though the value after a wrap is undefined, it's not undefined |
| 5531 | // behavior, so if wrap does occur, the loop could either terminate or |
Dan Gohman | 155eec7 | 2010-01-26 18:32:54 +0000 | [diff] [blame] | 5532 | // loop infinitely, but in either case, the loop is guaranteed to |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 5533 | // iterate at least until the iteration where the wrapping occurs. |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 5534 | const SCEV *One = getConstant(Step->getType(), 1); |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 5535 | if (isSigned) { |
| 5536 | APInt Max = APInt::getSignedMaxValue(BitWidth); |
| 5537 | if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax()) |
| 5538 | .slt(getSignedRange(RHS).getSignedMax())) |
| 5539 | return getCouldNotCompute(); |
| 5540 | } else { |
| 5541 | APInt Max = APInt::getMaxValue(BitWidth); |
| 5542 | if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax()) |
| 5543 | .ult(getUnsignedRange(RHS).getUnsignedMax())) |
| 5544 | return getCouldNotCompute(); |
| 5545 | } |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5546 | } else |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 5547 | // TODO: Handle negative strides here and below. |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 5548 | return getCouldNotCompute(); |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 5549 | |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5550 | // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant |
| 5551 | // m. So, we count the number of iterations in which {n,+,s} < m is true. |
| 5552 | // Note that we cannot simply return max(m-n,0)/s because it's not safe to |
Wojciech Matyjewicz | a65ee03 | 2008-02-13 12:21:32 +0000 | [diff] [blame] | 5553 | // treat m-n as signed nor unsigned due to overflow possibility. |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 5554 | |
Wojciech Matyjewicz | 3a4cbe2 | 2008-02-13 11:51:34 +0000 | [diff] [blame] | 5555 | // First, we get the value of the LHS in the first iteration: n |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5556 | const SCEV *Start = AddRec->getOperand(0); |
Wojciech Matyjewicz | 3a4cbe2 | 2008-02-13 11:51:34 +0000 | [diff] [blame] | 5557 | |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5558 | // Determine the minimum constant start value. |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5559 | const SCEV *MinStart = getConstant(isSigned ? |
| 5560 | getSignedRange(Start).getSignedMin() : |
| 5561 | getUnsignedRange(Start).getUnsignedMin()); |
Wojciech Matyjewicz | 3a4cbe2 | 2008-02-13 11:51:34 +0000 | [diff] [blame] | 5562 | |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5563 | // If we know that the condition is true in order to enter the loop, |
| 5564 | // then we know that it will run exactly (m-n)/s times. Otherwise, we |
Dan Gohman | 6c0866c | 2009-05-24 23:45:28 +0000 | [diff] [blame] | 5565 | // only know that it will execute (max(m,n)-n)/s times. In both cases, |
| 5566 | // the division must round up. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5567 | const SCEV *End = RHS; |
Dan Gohman | 3948d0b | 2010-04-11 19:27:13 +0000 | [diff] [blame] | 5568 | if (!isLoopEntryGuardedByCond(L, |
| 5569 | isSigned ? ICmpInst::ICMP_SLT : |
| 5570 | ICmpInst::ICMP_ULT, |
| 5571 | getMinusSCEV(Start, Step), RHS)) |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5572 | End = isSigned ? getSMaxExpr(RHS, Start) |
| 5573 | : getUMaxExpr(RHS, Start); |
| 5574 | |
| 5575 | // Determine the maximum constant end value. |
Dan Gohman | 85b05a2 | 2009-07-13 21:35:55 +0000 | [diff] [blame] | 5576 | const SCEV *MaxEnd = getConstant(isSigned ? |
| 5577 | getSignedRange(End).getSignedMax() : |
| 5578 | getUnsignedRange(End).getUnsignedMax()); |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5579 | |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 5580 | // If MaxEnd is within a step of the maximum integer value in its type, |
| 5581 | // adjust it down to the minimum value which would produce the same effect. |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 5582 | // This allows the subsequent ceiling division of (N+(step-1))/step to |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 5583 | // compute the correct value. |
| 5584 | const SCEV *StepMinusOne = getMinusSCEV(Step, |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 5585 | getConstant(Step->getType(), 1)); |
Dan Gohman | 52fddd3 | 2010-01-26 04:40:18 +0000 | [diff] [blame] | 5586 | MaxEnd = isSigned ? |
| 5587 | getSMinExpr(MaxEnd, |
| 5588 | getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)), |
| 5589 | StepMinusOne)) : |
| 5590 | getUMinExpr(MaxEnd, |
| 5591 | getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)), |
| 5592 | StepMinusOne)); |
| 5593 | |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5594 | // Finally, we subtract these two values and divide, rounding up, to get |
| 5595 | // the number of times the backedge is executed. |
Dan Gohman | 1f96e67 | 2009-09-17 18:05:20 +0000 | [diff] [blame] | 5596 | const SCEV *BECount = getBECount(Start, End, Step, NoWrap); |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5597 | |
| 5598 | // The maximum backedge count is similar, except using the minimum start |
| 5599 | // value and the maximum end value. |
Dan Gohman | 1f96e67 | 2009-09-17 18:05:20 +0000 | [diff] [blame] | 5600 | const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap); |
Dan Gohman | a1af757 | 2009-04-30 20:47:05 +0000 | [diff] [blame] | 5601 | |
| 5602 | return BackedgeTakenInfo(BECount, MaxBECount); |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 5603 | } |
| 5604 | |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 5605 | return getCouldNotCompute(); |
Chris Lattner | db25de4 | 2005-08-15 23:33:51 +0000 | [diff] [blame] | 5606 | } |
| 5607 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5608 | /// getNumIterationsInRange - Return the number of iterations of this loop that |
| 5609 | /// produce values in the specified constant range. Another way of looking at |
| 5610 | /// this is that it returns the first iteration number where the value is not in |
| 5611 | /// the condition, thus computing the exit count. If the iteration count can't |
| 5612 | /// be computed, an instance of SCEVCouldNotCompute is returned. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5613 | const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 5614 | ScalarEvolution &SE) const { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5615 | if (Range.isFullSet()) // Infinite loop. |
Dan Gohman | f4ccfcb | 2009-04-18 17:58:19 +0000 | [diff] [blame] | 5616 | return SE.getCouldNotCompute(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5617 | |
| 5618 | // If the start is a non-zero constant, shift the range to simplify things. |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 5619 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) |
Reid Spencer | cae5754 | 2007-03-02 00:28:52 +0000 | [diff] [blame] | 5620 | if (!SC->getValue()->isZero()) { |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5621 | SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 5622 | Operands[0] = SE.getConstant(SC->getType(), 0); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5623 | const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop()); |
Dan Gohman | 622ed67 | 2009-05-04 22:02:23 +0000 | [diff] [blame] | 5624 | if (const SCEVAddRecExpr *ShiftedAddRec = |
| 5625 | dyn_cast<SCEVAddRecExpr>(Shifted)) |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5626 | return ShiftedAddRec->getNumIterationsInRange( |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 5627 | Range.subtract(SC->getValue()->getValue()), SE); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5628 | // This is strange and shouldn't happen. |
Dan Gohman | f4ccfcb | 2009-04-18 17:58:19 +0000 | [diff] [blame] | 5629 | return SE.getCouldNotCompute(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5630 | } |
| 5631 | |
| 5632 | // The only time we can solve this is when we have all constant indices. |
| 5633 | // Otherwise, we cannot determine the overflow conditions. |
| 5634 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) |
| 5635 | if (!isa<SCEVConstant>(getOperand(i))) |
Dan Gohman | f4ccfcb | 2009-04-18 17:58:19 +0000 | [diff] [blame] | 5636 | return SE.getCouldNotCompute(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5637 | |
| 5638 | |
| 5639 | // Okay at this point we know that all elements of the chrec are constants and |
| 5640 | // that the start element is zero. |
| 5641 | |
| 5642 | // First check to see if the range contains zero. If not, the first |
| 5643 | // iteration exits. |
Dan Gohman | af79fb5 | 2009-04-21 01:07:12 +0000 | [diff] [blame] | 5644 | unsigned BitWidth = SE.getTypeSizeInBits(getType()); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 5645 | if (!Range.contains(APInt(BitWidth, 0))) |
Dan Gohman | deff621 | 2010-05-03 22:09:21 +0000 | [diff] [blame] | 5646 | return SE.getConstant(getType(), 0); |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 5647 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5648 | if (isAffine()) { |
| 5649 | // If this is an affine expression then we have this situation: |
| 5650 | // Solve {0,+,A} in Range === Ax in Range |
| 5651 | |
Nick Lewycky | eefdebe | 2007-07-16 02:08:00 +0000 | [diff] [blame] | 5652 | // We know that zero is in the range. If A is positive then we know that |
| 5653 | // the upper value of the range must be the first possible exit value. |
| 5654 | // If A is negative then the lower of the range is the last possible loop |
| 5655 | // value. Also note that we already checked for a full range. |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 5656 | APInt One(BitWidth,1); |
Nick Lewycky | eefdebe | 2007-07-16 02:08:00 +0000 | [diff] [blame] | 5657 | APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue(); |
| 5658 | APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5659 | |
Nick Lewycky | eefdebe | 2007-07-16 02:08:00 +0000 | [diff] [blame] | 5660 | // The exit value should be (End+A)/A. |
Nick Lewycky | 9a2f931 | 2007-09-27 14:12:54 +0000 | [diff] [blame] | 5661 | APInt ExitVal = (End + A).udiv(A); |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 5662 | ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5663 | |
| 5664 | // Evaluate at the exit value. If we really did fall out of the valid |
| 5665 | // range, then we computed our trip count, otherwise wrap around or other |
| 5666 | // things must have happened. |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 5667 | ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); |
Reid Spencer | a6e8a95 | 2007-03-01 07:54:15 +0000 | [diff] [blame] | 5668 | if (Range.contains(Val->getValue())) |
Dan Gohman | f4ccfcb | 2009-04-18 17:58:19 +0000 | [diff] [blame] | 5669 | return SE.getCouldNotCompute(); // Something strange happened |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5670 | |
| 5671 | // Ensure that the previous value is in the range. This is a sanity check. |
Reid Spencer | 581b0d4 | 2007-02-28 19:57:34 +0000 | [diff] [blame] | 5672 | assert(Range.contains( |
Dan Gohman | 64a845e | 2009-06-24 04:48:43 +0000 | [diff] [blame] | 5673 | EvaluateConstantChrecAtConstant(this, |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 5674 | ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) && |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5675 | "Linear scev computation is off in a bad way!"); |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 5676 | return SE.getConstant(ExitValue); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5677 | } else if (isQuadratic()) { |
| 5678 | // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the |
| 5679 | // quadratic equation to solve it. To do this, we must frame our problem in |
| 5680 | // terms of figuring out when zero is crossed, instead of when |
| 5681 | // Range.getUpper() is crossed. |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5682 | SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 5683 | NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5684 | const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5685 | |
| 5686 | // Next, solve the constructed addrec |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5687 | std::pair<const SCEV *,const SCEV *> Roots = |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 5688 | SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE); |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5689 | const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); |
| 5690 | const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5691 | if (R1) { |
| 5692 | // Pick the smallest positive root value. |
Zhou Sheng | 6b6b6ef | 2007-01-11 12:24:14 +0000 | [diff] [blame] | 5693 | if (ConstantInt *CB = |
Owen Anderson | baf3c40 | 2009-07-29 18:55:55 +0000 | [diff] [blame] | 5694 | dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, |
Owen Anderson | 76f600b | 2009-07-06 22:37:39 +0000 | [diff] [blame] | 5695 | R1->getValue(), R2->getValue()))) { |
Reid Spencer | 579dca1 | 2007-01-12 04:24:46 +0000 | [diff] [blame] | 5696 | if (CB->getZExtValue() == false) |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5697 | std::swap(R1, R2); // R1 is the minimum root now. |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 5698 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5699 | // Make sure the root is not off by one. The returned iteration should |
| 5700 | // not be in the range, but the previous one should be. When solving |
| 5701 | // for "X*X < 5", for example, we should not return a root of 2. |
| 5702 | ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this, |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 5703 | R1->getValue(), |
| 5704 | SE); |
Reid Spencer | a6e8a95 | 2007-03-01 07:54:15 +0000 | [diff] [blame] | 5705 | if (Range.contains(R1Val->getValue())) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5706 | // The next iteration must be out of the range... |
Owen Anderson | 76f600b | 2009-07-06 22:37:39 +0000 | [diff] [blame] | 5707 | ConstantInt *NextVal = |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 5708 | ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1); |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 5709 | |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 5710 | R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); |
Reid Spencer | a6e8a95 | 2007-03-01 07:54:15 +0000 | [diff] [blame] | 5711 | if (!Range.contains(R1Val->getValue())) |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 5712 | return SE.getConstant(NextVal); |
Dan Gohman | f4ccfcb | 2009-04-18 17:58:19 +0000 | [diff] [blame] | 5713 | return SE.getCouldNotCompute(); // Something strange happened |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5714 | } |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 5715 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5716 | // If R1 was not in the range, then it is a good return value. Make |
| 5717 | // sure that R1-1 WAS in the range though, just in case. |
Owen Anderson | 76f600b | 2009-07-06 22:37:39 +0000 | [diff] [blame] | 5718 | ConstantInt *NextVal = |
Owen Anderson | eed707b | 2009-07-24 23:12:02 +0000 | [diff] [blame] | 5719 | ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1); |
Dan Gohman | 246b256 | 2007-10-22 18:31:58 +0000 | [diff] [blame] | 5720 | R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); |
Reid Spencer | a6e8a95 | 2007-03-01 07:54:15 +0000 | [diff] [blame] | 5721 | if (Range.contains(R1Val->getValue())) |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5722 | return R1; |
Dan Gohman | f4ccfcb | 2009-04-18 17:58:19 +0000 | [diff] [blame] | 5723 | return SE.getCouldNotCompute(); // Something strange happened |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5724 | } |
| 5725 | } |
| 5726 | } |
| 5727 | |
Dan Gohman | f4ccfcb | 2009-04-18 17:58:19 +0000 | [diff] [blame] | 5728 | return SE.getCouldNotCompute(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5729 | } |
| 5730 | |
| 5731 | |
| 5732 | |
| 5733 | //===----------------------------------------------------------------------===// |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5734 | // SCEVCallbackVH Class Implementation |
| 5735 | //===----------------------------------------------------------------------===// |
| 5736 | |
Dan Gohman | 1959b75 | 2009-05-19 19:22:47 +0000 | [diff] [blame] | 5737 | void ScalarEvolution::SCEVCallbackVH::deleted() { |
Dan Gohman | ddf9f99 | 2009-07-13 22:20:53 +0000 | [diff] [blame] | 5738 | assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5739 | if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) |
| 5740 | SE->ConstantEvolutionLoopExitValue.erase(PN); |
| 5741 | SE->Scalars.erase(getValPtr()); |
| 5742 | // this now dangles! |
| 5743 | } |
| 5744 | |
Dan Gohman | 81f9121 | 2010-07-28 01:09:07 +0000 | [diff] [blame] | 5745 | void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { |
Dan Gohman | ddf9f99 | 2009-07-13 22:20:53 +0000 | [diff] [blame] | 5746 | assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); |
Eric Christopher | e6cbfa6 | 2010-07-29 01:25:38 +0000 | [diff] [blame] | 5747 | |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5748 | // Forget all the expressions associated with users of the old value, |
| 5749 | // so that future queries will recompute the expressions using the new |
| 5750 | // value. |
Dan Gohman | ab37f50 | 2010-08-02 23:49:30 +0000 | [diff] [blame] | 5751 | Value *Old = getValPtr(); |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5752 | SmallVector<User *, 16> Worklist; |
Dan Gohman | 69fcae9 | 2009-07-14 14:34:04 +0000 | [diff] [blame] | 5753 | SmallPtrSet<User *, 8> Visited; |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5754 | for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end(); |
| 5755 | UI != UE; ++UI) |
| 5756 | Worklist.push_back(*UI); |
| 5757 | while (!Worklist.empty()) { |
| 5758 | User *U = Worklist.pop_back_val(); |
| 5759 | // Deleting the Old value will cause this to dangle. Postpone |
| 5760 | // that until everything else is done. |
Dan Gohman | 59846ac | 2010-07-28 00:28:25 +0000 | [diff] [blame] | 5761 | if (U == Old) |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5762 | continue; |
Dan Gohman | 69fcae9 | 2009-07-14 14:34:04 +0000 | [diff] [blame] | 5763 | if (!Visited.insert(U)) |
| 5764 | continue; |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5765 | if (PHINode *PN = dyn_cast<PHINode>(U)) |
| 5766 | SE->ConstantEvolutionLoopExitValue.erase(PN); |
Dan Gohman | 69fcae9 | 2009-07-14 14:34:04 +0000 | [diff] [blame] | 5767 | SE->Scalars.erase(U); |
| 5768 | for (Value::use_iterator UI = U->use_begin(), UE = U->use_end(); |
| 5769 | UI != UE; ++UI) |
| 5770 | Worklist.push_back(*UI); |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5771 | } |
Dan Gohman | 59846ac | 2010-07-28 00:28:25 +0000 | [diff] [blame] | 5772 | // Delete the Old value. |
| 5773 | if (PHINode *PN = dyn_cast<PHINode>(Old)) |
| 5774 | SE->ConstantEvolutionLoopExitValue.erase(PN); |
| 5775 | SE->Scalars.erase(Old); |
| 5776 | // this now dangles! |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5777 | } |
| 5778 | |
Dan Gohman | 1959b75 | 2009-05-19 19:22:47 +0000 | [diff] [blame] | 5779 | ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) |
Dan Gohman | 35738ac | 2009-05-04 22:30:44 +0000 | [diff] [blame] | 5780 | : CallbackVH(V), SE(se) {} |
| 5781 | |
| 5782 | //===----------------------------------------------------------------------===// |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5783 | // ScalarEvolution Class Implementation |
| 5784 | //===----------------------------------------------------------------------===// |
| 5785 | |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 5786 | ScalarEvolution::ScalarEvolution() |
Owen Anderson | 90c579d | 2010-08-06 18:33:48 +0000 | [diff] [blame] | 5787 | : FunctionPass(ID), FirstUnknown(0) { |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 5788 | } |
| 5789 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5790 | bool ScalarEvolution::runOnFunction(Function &F) { |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 5791 | this->F = &F; |
| 5792 | LI = &getAnalysis<LoopInfo>(); |
| 5793 | TD = getAnalysisIfAvailable<TargetData>(); |
Dan Gohman | 454d26d | 2010-02-22 04:11:59 +0000 | [diff] [blame] | 5794 | DT = &getAnalysis<DominatorTree>(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5795 | return false; |
| 5796 | } |
| 5797 | |
| 5798 | void ScalarEvolution::releaseMemory() { |
Dan Gohman | ab37f50 | 2010-08-02 23:49:30 +0000 | [diff] [blame] | 5799 | // Iterate through all the SCEVUnknown instances and call their |
| 5800 | // destructors, so that they release their references to their values. |
| 5801 | for (SCEVUnknown *U = FirstUnknown; U; U = U->Next) |
| 5802 | U->~SCEVUnknown(); |
| 5803 | FirstUnknown = 0; |
| 5804 | |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 5805 | Scalars.clear(); |
| 5806 | BackedgeTakenCounts.clear(); |
| 5807 | ConstantEvolutionLoopExitValue.clear(); |
Dan Gohman | 6bce643 | 2009-05-08 20:47:27 +0000 | [diff] [blame] | 5808 | ValuesAtScopes.clear(); |
Dan Gohman | 1c34375 | 2009-06-27 21:21:31 +0000 | [diff] [blame] | 5809 | UniqueSCEVs.clear(); |
| 5810 | SCEVAllocator.Reset(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5811 | } |
| 5812 | |
| 5813 | void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const { |
| 5814 | AU.setPreservesAll(); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5815 | AU.addRequiredTransitive<LoopInfo>(); |
Dan Gohman | 1cd9275 | 2010-01-19 22:21:27 +0000 | [diff] [blame] | 5816 | AU.addRequiredTransitive<DominatorTree>(); |
Dan Gohman | 2d1be87 | 2009-04-16 03:18:22 +0000 | [diff] [blame] | 5817 | } |
| 5818 | |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 5819 | bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 5820 | return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5821 | } |
| 5822 | |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 5823 | static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5824 | const Loop *L) { |
| 5825 | // Print all inner loops first |
| 5826 | for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) |
| 5827 | PrintLoopInfo(OS, SE, *I); |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 5828 | |
Dan Gohman | 3073329 | 2010-01-09 18:17:45 +0000 | [diff] [blame] | 5829 | OS << "Loop "; |
| 5830 | WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); |
| 5831 | OS << ": "; |
Chris Lattner | f1ab4b4 | 2004-04-18 22:14:10 +0000 | [diff] [blame] | 5832 | |
Dan Gohman | 5d98491 | 2009-12-18 01:14:11 +0000 | [diff] [blame] | 5833 | SmallVector<BasicBlock *, 8> ExitBlocks; |
Chris Lattner | f1ab4b4 | 2004-04-18 22:14:10 +0000 | [diff] [blame] | 5834 | L->getExitBlocks(ExitBlocks); |
| 5835 | if (ExitBlocks.size() != 1) |
Nick Lewycky | aeb5e5c | 2008-01-02 02:49:20 +0000 | [diff] [blame] | 5836 | OS << "<multiple exits> "; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5837 | |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 5838 | if (SE->hasLoopInvariantBackedgeTakenCount(L)) { |
| 5839 | OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5840 | } else { |
Dan Gohman | 46bdfb0 | 2009-02-24 18:55:53 +0000 | [diff] [blame] | 5841 | OS << "Unpredictable backedge-taken count. "; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5842 | } |
| 5843 | |
Dan Gohman | 3073329 | 2010-01-09 18:17:45 +0000 | [diff] [blame] | 5844 | OS << "\n" |
| 5845 | "Loop "; |
| 5846 | WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); |
| 5847 | OS << ": "; |
Dan Gohman | aa551ae | 2009-06-24 00:33:16 +0000 | [diff] [blame] | 5848 | |
| 5849 | if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { |
| 5850 | OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); |
| 5851 | } else { |
| 5852 | OS << "Unpredictable max backedge-taken count. "; |
| 5853 | } |
| 5854 | |
| 5855 | OS << "\n"; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5856 | } |
| 5857 | |
Dan Gohman | 5d98491 | 2009-12-18 01:14:11 +0000 | [diff] [blame] | 5858 | void ScalarEvolution::print(raw_ostream &OS, const Module *) const { |
Dan Gohman | 3f46a3a | 2010-03-01 17:49:51 +0000 | [diff] [blame] | 5859 | // ScalarEvolution's implementation of the print method is to print |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 5860 | // out SCEV values of all instructions that are interesting. Doing |
| 5861 | // this potentially causes it to create new SCEV objects though, |
| 5862 | // which technically conflicts with the const qualifier. This isn't |
Dan Gohman | 1afdc5f | 2009-07-10 20:25:29 +0000 | [diff] [blame] | 5863 | // observable from outside the class though, so casting away the |
| 5864 | // const isn't dangerous. |
Dan Gohman | 5d98491 | 2009-12-18 01:14:11 +0000 | [diff] [blame] | 5865 | ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5866 | |
Dan Gohman | 3073329 | 2010-01-09 18:17:45 +0000 | [diff] [blame] | 5867 | OS << "Classifying expressions for: "; |
| 5868 | WriteAsOperand(OS, F, /*PrintType=*/false); |
| 5869 | OS << "\n"; |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5870 | for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) |
Dan Gohman | a189bae | 2010-05-03 17:03:23 +0000 | [diff] [blame] | 5871 | if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) { |
Dan Gohman | c902e13 | 2009-07-13 23:03:05 +0000 | [diff] [blame] | 5872 | OS << *I << '\n'; |
Dan Gohman | 8dae138 | 2008-09-14 17:21:12 +0000 | [diff] [blame] | 5873 | OS << " --> "; |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5874 | const SCEV *SV = SE.getSCEV(&*I); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5875 | SV->print(OS); |
Misha Brukman | 2b37d7c | 2005-04-21 21:13:18 +0000 | [diff] [blame] | 5876 | |
Dan Gohman | 0c689c5 | 2009-06-19 17:49:54 +0000 | [diff] [blame] | 5877 | const Loop *L = LI->getLoopFor((*I).getParent()); |
| 5878 | |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5879 | const SCEV *AtUse = SE.getSCEVAtScope(SV, L); |
Dan Gohman | 0c689c5 | 2009-06-19 17:49:54 +0000 | [diff] [blame] | 5880 | if (AtUse != SV) { |
| 5881 | OS << " --> "; |
| 5882 | AtUse->print(OS); |
| 5883 | } |
| 5884 | |
| 5885 | if (L) { |
Dan Gohman | 9e7d988 | 2009-06-18 00:37:45 +0000 | [diff] [blame] | 5886 | OS << "\t\t" "Exits: "; |
Dan Gohman | 0bba49c | 2009-07-07 17:06:11 +0000 | [diff] [blame] | 5887 | const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); |
Dan Gohman | d594e6f | 2009-05-24 23:25:42 +0000 | [diff] [blame] | 5888 | if (!ExitValue->isLoopInvariant(L)) { |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5889 | OS << "<<Unknown>>"; |
| 5890 | } else { |
| 5891 | OS << *ExitValue; |
| 5892 | } |
| 5893 | } |
| 5894 | |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5895 | OS << "\n"; |
| 5896 | } |
| 5897 | |
Dan Gohman | 3073329 | 2010-01-09 18:17:45 +0000 | [diff] [blame] | 5898 | OS << "Determining loop execution counts for: "; |
| 5899 | WriteAsOperand(OS, F, /*PrintType=*/false); |
| 5900 | OS << "\n"; |
Dan Gohman | f8a8be8 | 2009-04-21 23:15:49 +0000 | [diff] [blame] | 5901 | for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) |
| 5902 | PrintLoopInfo(OS, &SE, *I); |
Chris Lattner | 53e677a | 2004-04-02 20:23:17 +0000 | [diff] [blame] | 5903 | } |
Dan Gohman | b7ef729 | 2009-04-21 00:47:46 +0000 | [diff] [blame] | 5904 | |