Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1 | //===- ValueTracking.cpp - Walk computations to compute properties --------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file contains routines that help analyze properties that chains of |
| 11 | // computations have. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "llvm/Analysis/ValueTracking.h" |
Dan Gohman | 2437127 | 2010-12-15 20:10:26 +0000 | [diff] [blame] | 16 | #include "llvm/Analysis/InstructionSimplify.h" |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 17 | #include "llvm/Constants.h" |
| 18 | #include "llvm/Instructions.h" |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 19 | #include "llvm/GlobalVariable.h" |
Dan Gohman | 307a7c4 | 2009-09-15 16:14:44 +0000 | [diff] [blame] | 20 | #include "llvm/GlobalAlias.h" |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 21 | #include "llvm/IntrinsicInst.h" |
Owen Anderson | 76f600b | 2009-07-06 22:37:39 +0000 | [diff] [blame] | 22 | #include "llvm/LLVMContext.h" |
Dan Gohman | ca17890 | 2009-07-17 20:47:02 +0000 | [diff] [blame] | 23 | #include "llvm/Operator.h" |
Bill Wendling | 0582ae9 | 2009-03-13 04:39:26 +0000 | [diff] [blame] | 24 | #include "llvm/Target/TargetData.h" |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 25 | #include "llvm/Support/GetElementPtrTypeIterator.h" |
| 26 | #include "llvm/Support/MathExtras.h" |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 27 | #include "llvm/Support/PatternMatch.h" |
Eric Christopher | 25ec483 | 2010-03-05 06:58:57 +0000 | [diff] [blame] | 28 | #include "llvm/ADT/SmallPtrSet.h" |
Chris Lattner | 32a9e7a | 2008-06-04 04:46:14 +0000 | [diff] [blame] | 29 | #include <cstring> |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 30 | using namespace llvm; |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 31 | using namespace llvm::PatternMatch; |
| 32 | |
| 33 | const unsigned MaxDepth = 6; |
| 34 | |
| 35 | /// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if |
| 36 | /// unknown returns 0). For vector types, returns the element type's bitwidth. |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 37 | static unsigned getBitWidth(Type *Ty, const TargetData *TD) { |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 38 | if (unsigned BitWidth = Ty->getScalarSizeInBits()) |
| 39 | return BitWidth; |
| 40 | assert(isa<PointerType>(Ty) && "Expected a pointer type!"); |
| 41 | return TD ? TD->getPointerSizeInBits() : 0; |
| 42 | } |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 43 | |
Nick Lewycky | 00cbccc | 2012-03-09 09:23:50 +0000 | [diff] [blame^] | 44 | static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, |
| 45 | const APInt &Mask, |
| 46 | APInt &KnownZero, APInt &KnownOne, |
| 47 | APInt &KnownZero2, APInt &KnownOne2, |
| 48 | const TargetData *TD, unsigned Depth) { |
| 49 | if (!Add) { |
| 50 | if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) { |
| 51 | // We know that the top bits of C-X are clear if X contains less bits |
| 52 | // than C (i.e. no wrap-around can happen). For example, 20-X is |
| 53 | // positive if we can prove that X is >= 0 and < 16. |
| 54 | if (!CLHS->getValue().isNegative()) { |
| 55 | unsigned BitWidth = Mask.getBitWidth(); |
| 56 | unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); |
| 57 | // NLZ can't be BitWidth with no sign bit |
| 58 | APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); |
| 59 | llvm::ComputeMaskedBits(Op1, MaskV, KnownZero2, KnownOne2, TD, Depth+1); |
| 60 | |
| 61 | // If all of the MaskV bits are known to be zero, then we know the |
| 62 | // output top bits are zero, because we now know that the output is |
| 63 | // from [0-C]. |
| 64 | if ((KnownZero2 & MaskV) == MaskV) { |
| 65 | unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); |
| 66 | // Top bits known zero. |
| 67 | KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask; |
| 68 | } |
| 69 | } |
| 70 | } |
| 71 | } |
| 72 | |
| 73 | unsigned BitWidth = Mask.getBitWidth(); |
| 74 | |
| 75 | // If one of the operands has trailing zeros, then the bits that the |
| 76 | // other operand has in those bit positions will be preserved in the |
| 77 | // result. For an add, this works with either operand. For a subtract, |
| 78 | // this only works if the known zeros are in the right operand. |
| 79 | APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); |
| 80 | APInt Mask2 = APInt::getLowBitsSet(BitWidth, |
| 81 | BitWidth - Mask.countLeadingZeros()); |
| 82 | llvm::ComputeMaskedBits(Op0, Mask2, LHSKnownZero, LHSKnownOne, TD, Depth+1); |
| 83 | assert((LHSKnownZero & LHSKnownOne) == 0 && |
| 84 | "Bits known to be one AND zero?"); |
| 85 | unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes(); |
| 86 | |
| 87 | llvm::ComputeMaskedBits(Op1, Mask2, KnownZero2, KnownOne2, TD, Depth+1); |
| 88 | assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); |
| 89 | unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes(); |
| 90 | |
| 91 | // Determine which operand has more trailing zeros, and use that |
| 92 | // many bits from the other operand. |
| 93 | if (LHSKnownZeroOut > RHSKnownZeroOut) { |
| 94 | if (Add) { |
| 95 | APInt Mask = APInt::getLowBitsSet(BitWidth, LHSKnownZeroOut); |
| 96 | KnownZero |= KnownZero2 & Mask; |
| 97 | KnownOne |= KnownOne2 & Mask; |
| 98 | } else { |
| 99 | // If the known zeros are in the left operand for a subtract, |
| 100 | // fall back to the minimum known zeros in both operands. |
| 101 | KnownZero |= APInt::getLowBitsSet(BitWidth, |
| 102 | std::min(LHSKnownZeroOut, |
| 103 | RHSKnownZeroOut)); |
| 104 | } |
| 105 | } else if (RHSKnownZeroOut >= LHSKnownZeroOut) { |
| 106 | APInt Mask = APInt::getLowBitsSet(BitWidth, RHSKnownZeroOut); |
| 107 | KnownZero |= LHSKnownZero & Mask; |
| 108 | KnownOne |= LHSKnownOne & Mask; |
| 109 | } |
| 110 | |
| 111 | // Are we still trying to solve for the sign bit? |
| 112 | if (Mask.isNegative() && !KnownZero.isNegative() && !KnownOne.isNegative()) { |
| 113 | if (NSW) { |
| 114 | if (Add) { |
| 115 | // Adding two positive numbers can't wrap into negative |
| 116 | if (LHSKnownZero.isNegative() && KnownZero2.isNegative()) |
| 117 | KnownZero |= APInt::getSignBit(BitWidth); |
| 118 | // and adding two negative numbers can't wrap into positive. |
| 119 | else if (LHSKnownOne.isNegative() && KnownOne2.isNegative()) |
| 120 | KnownOne |= APInt::getSignBit(BitWidth); |
| 121 | } else { |
| 122 | // Subtracting a negative number from a positive one can't wrap |
| 123 | if (LHSKnownZero.isNegative() && KnownOne2.isNegative()) |
| 124 | KnownZero |= APInt::getSignBit(BitWidth); |
| 125 | // neither can subtracting a positive number from a negative one. |
| 126 | else if (LHSKnownOne.isNegative() && KnownZero2.isNegative()) |
| 127 | KnownOne |= APInt::getSignBit(BitWidth); |
| 128 | } |
| 129 | } |
| 130 | } |
| 131 | } |
| 132 | |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 133 | /// ComputeMaskedBits - Determine which of the bits specified in Mask are |
| 134 | /// known to be either zero or one and return them in the KnownZero/KnownOne |
| 135 | /// bit sets. This code only analyzes bits in Mask, in order to short-circuit |
| 136 | /// processing. |
| 137 | /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that |
| 138 | /// we cannot optimize based on the assumption that it is zero without changing |
| 139 | /// it to be an explicit zero. If we don't change it to zero, other code could |
| 140 | /// optimized based on the contradictory assumption that it is non-zero. |
| 141 | /// Because instcombine aggressively folds operations with undef args anyway, |
| 142 | /// this won't lose us code quality. |
Chris Lattner | cf5128e | 2009-09-08 00:06:16 +0000 | [diff] [blame] | 143 | /// |
| 144 | /// This function is defined on values with integer type, values with pointer |
| 145 | /// type (but only if TD is non-null), and vectors of integers. In the case |
| 146 | /// where V is a vector, the mask, known zero, and known one values are the |
| 147 | /// same width as the vector element, and the bit is set only if it is true |
| 148 | /// for all of the elements in the vector. |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 149 | void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, |
| 150 | APInt &KnownZero, APInt &KnownOne, |
Dan Gohman | 846a2f2 | 2009-08-27 17:51:25 +0000 | [diff] [blame] | 151 | const TargetData *TD, unsigned Depth) { |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 152 | assert(V && "No Value?"); |
Dan Gohman | 9004c8a | 2009-05-21 02:28:33 +0000 | [diff] [blame] | 153 | assert(Depth <= MaxDepth && "Limit Search Depth"); |
Chris Lattner | 79abedb | 2009-01-20 18:22:57 +0000 | [diff] [blame] | 154 | unsigned BitWidth = Mask.getBitWidth(); |
Nadav Rotem | 1608769 | 2011-12-05 06:29:09 +0000 | [diff] [blame] | 155 | assert((V->getType()->isIntOrIntVectorTy() || |
| 156 | V->getType()->getScalarType()->isPointerTy()) && |
| 157 | "Not integer or pointer type!"); |
Dan Gohman | 6de29f8 | 2009-06-15 22:12:54 +0000 | [diff] [blame] | 158 | assert((!TD || |
| 159 | TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && |
Duncan Sands | b0bc6c3 | 2010-02-15 16:12:20 +0000 | [diff] [blame] | 160 | (!V->getType()->isIntOrIntVectorTy() || |
Dan Gohman | 6de29f8 | 2009-06-15 22:12:54 +0000 | [diff] [blame] | 161 | V->getType()->getScalarSizeInBits() == BitWidth) && |
Nadav Rotem | 1608769 | 2011-12-05 06:29:09 +0000 | [diff] [blame] | 162 | KnownZero.getBitWidth() == BitWidth && |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 163 | KnownOne.getBitWidth() == BitWidth && |
| 164 | "V, Mask, KnownOne and KnownZero should have same BitWidth"); |
| 165 | |
| 166 | if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { |
| 167 | // We know all of the bits for a constant! |
| 168 | KnownOne = CI->getValue() & Mask; |
| 169 | KnownZero = ~KnownOne & Mask; |
| 170 | return; |
| 171 | } |
Dan Gohman | 6de29f8 | 2009-06-15 22:12:54 +0000 | [diff] [blame] | 172 | // Null and aggregate-zero are all-zeros. |
| 173 | if (isa<ConstantPointerNull>(V) || |
| 174 | isa<ConstantAggregateZero>(V)) { |
Jay Foad | 7a874dd | 2010-12-01 08:53:58 +0000 | [diff] [blame] | 175 | KnownOne.clearAllBits(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 176 | KnownZero = Mask; |
| 177 | return; |
| 178 | } |
Dan Gohman | 6de29f8 | 2009-06-15 22:12:54 +0000 | [diff] [blame] | 179 | // Handle a constant vector by taking the intersection of the known bits of |
Chris Lattner | 7302d80 | 2012-02-06 21:56:39 +0000 | [diff] [blame] | 180 | // each element. There is no real need to handle ConstantVector here, because |
| 181 | // we don't handle undef in any particularly useful way. |
Chris Lattner | df39028 | 2012-01-24 07:54:10 +0000 | [diff] [blame] | 182 | if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { |
| 183 | // We know that CDS must be a vector of integers. Take the intersection of |
| 184 | // each element. |
| 185 | KnownZero.setAllBits(); KnownOne.setAllBits(); |
| 186 | APInt Elt(KnownZero.getBitWidth(), 0); |
Chris Lattner | 0f193b8 | 2012-01-25 01:27:20 +0000 | [diff] [blame] | 187 | for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { |
Chris Lattner | df39028 | 2012-01-24 07:54:10 +0000 | [diff] [blame] | 188 | Elt = CDS->getElementAsInteger(i); |
| 189 | KnownZero &= ~Elt; |
| 190 | KnownOne &= Elt; |
| 191 | } |
| 192 | return; |
| 193 | } |
| 194 | |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 195 | // The address of an aligned GlobalValue has trailing zeros. |
| 196 | if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { |
| 197 | unsigned Align = GV->getAlignment(); |
Nick Lewycky | 891495e | 2012-03-07 02:27:53 +0000 | [diff] [blame] | 198 | if (Align == 0 && TD) { |
Eli Friedman | c4c2a02 | 2011-11-28 22:48:22 +0000 | [diff] [blame] | 199 | if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) { |
| 200 | Type *ObjectType = GVar->getType()->getElementType(); |
Nick Lewycky | 891495e | 2012-03-07 02:27:53 +0000 | [diff] [blame] | 201 | if (ObjectType->isSized()) { |
| 202 | // If the object is defined in the current Module, we'll be giving |
| 203 | // it the preferred alignment. Otherwise, we have to assume that it |
| 204 | // may only have the minimum ABI alignment. |
| 205 | if (!GVar->isDeclaration() && !GVar->isWeakForLinker()) |
| 206 | Align = TD->getPreferredAlignment(GVar); |
| 207 | else |
| 208 | Align = TD->getABITypeAlignment(ObjectType); |
| 209 | } |
Eli Friedman | c4c2a02 | 2011-11-28 22:48:22 +0000 | [diff] [blame] | 210 | } |
Dan Gohman | 0040725 | 2009-08-11 15:50:03 +0000 | [diff] [blame] | 211 | } |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 212 | if (Align > 0) |
| 213 | KnownZero = Mask & APInt::getLowBitsSet(BitWidth, |
| 214 | CountTrailingZeros_32(Align)); |
| 215 | else |
Jay Foad | 7a874dd | 2010-12-01 08:53:58 +0000 | [diff] [blame] | 216 | KnownZero.clearAllBits(); |
| 217 | KnownOne.clearAllBits(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 218 | return; |
| 219 | } |
Dan Gohman | 307a7c4 | 2009-09-15 16:14:44 +0000 | [diff] [blame] | 220 | // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has |
| 221 | // the bits of its aliasee. |
| 222 | if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { |
| 223 | if (GA->mayBeOverridden()) { |
Jay Foad | 7a874dd | 2010-12-01 08:53:58 +0000 | [diff] [blame] | 224 | KnownZero.clearAllBits(); KnownOne.clearAllBits(); |
Dan Gohman | 307a7c4 | 2009-09-15 16:14:44 +0000 | [diff] [blame] | 225 | } else { |
| 226 | ComputeMaskedBits(GA->getAliasee(), Mask, KnownZero, KnownOne, |
| 227 | TD, Depth+1); |
| 228 | } |
| 229 | return; |
| 230 | } |
Chris Lattner | b3f0673 | 2011-05-23 00:03:39 +0000 | [diff] [blame] | 231 | |
| 232 | if (Argument *A = dyn_cast<Argument>(V)) { |
| 233 | // Get alignment information off byval arguments if specified in the IR. |
| 234 | if (A->hasByValAttr()) |
| 235 | if (unsigned Align = A->getParamAlignment()) |
| 236 | KnownZero = Mask & APInt::getLowBitsSet(BitWidth, |
| 237 | CountTrailingZeros_32(Align)); |
| 238 | return; |
| 239 | } |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 240 | |
Chris Lattner | b3f0673 | 2011-05-23 00:03:39 +0000 | [diff] [blame] | 241 | // Start out not knowing anything. |
| 242 | KnownZero.clearAllBits(); KnownOne.clearAllBits(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 243 | |
Dan Gohman | 9004c8a | 2009-05-21 02:28:33 +0000 | [diff] [blame] | 244 | if (Depth == MaxDepth || Mask == 0) |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 245 | return; // Limit search depth. |
| 246 | |
Dan Gohman | ca17890 | 2009-07-17 20:47:02 +0000 | [diff] [blame] | 247 | Operator *I = dyn_cast<Operator>(V); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 248 | if (!I) return; |
| 249 | |
| 250 | APInt KnownZero2(KnownZero), KnownOne2(KnownOne); |
Dan Gohman | ca17890 | 2009-07-17 20:47:02 +0000 | [diff] [blame] | 251 | switch (I->getOpcode()) { |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 252 | default: break; |
| 253 | case Instruction::And: { |
| 254 | // If either the LHS or the RHS are Zero, the result is zero. |
| 255 | ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); |
| 256 | APInt Mask2(Mask & ~KnownZero); |
| 257 | ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, |
| 258 | Depth+1); |
| 259 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
| 260 | assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); |
| 261 | |
| 262 | // Output known-1 bits are only known if set in both the LHS & RHS. |
| 263 | KnownOne &= KnownOne2; |
| 264 | // Output known-0 are known to be clear if zero in either the LHS | RHS. |
| 265 | KnownZero |= KnownZero2; |
| 266 | return; |
| 267 | } |
| 268 | case Instruction::Or: { |
| 269 | ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); |
| 270 | APInt Mask2(Mask & ~KnownOne); |
| 271 | ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, |
| 272 | Depth+1); |
| 273 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
| 274 | assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); |
| 275 | |
| 276 | // Output known-0 bits are only known if clear in both the LHS & RHS. |
| 277 | KnownZero &= KnownZero2; |
| 278 | // Output known-1 are known to be set if set in either the LHS | RHS. |
| 279 | KnownOne |= KnownOne2; |
| 280 | return; |
| 281 | } |
| 282 | case Instruction::Xor: { |
| 283 | ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); |
| 284 | ComputeMaskedBits(I->getOperand(0), Mask, KnownZero2, KnownOne2, TD, |
| 285 | Depth+1); |
| 286 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
| 287 | assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); |
| 288 | |
| 289 | // Output known-0 bits are known if clear or set in both the LHS & RHS. |
| 290 | APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); |
| 291 | // Output known-1 are known to be set if set in only one of the LHS, RHS. |
| 292 | KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); |
| 293 | KnownZero = KnownZeroOut; |
| 294 | return; |
| 295 | } |
| 296 | case Instruction::Mul: { |
| 297 | APInt Mask2 = APInt::getAllOnesValue(BitWidth); |
| 298 | ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero, KnownOne, TD,Depth+1); |
| 299 | ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, |
| 300 | Depth+1); |
Duncan Sands | 32a43cc | 2011-10-27 19:16:21 +0000 | [diff] [blame] | 301 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
| 302 | assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); |
| 303 | |
| 304 | bool isKnownNegative = false; |
| 305 | bool isKnownNonNegative = false; |
| 306 | // If the multiplication is known not to overflow, compute the sign bit. |
| 307 | if (Mask.isNegative() && |
| 308 | cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap()) { |
| 309 | Value *Op1 = I->getOperand(1), *Op2 = I->getOperand(0); |
| 310 | if (Op1 == Op2) { |
| 311 | // The product of a number with itself is non-negative. |
| 312 | isKnownNonNegative = true; |
| 313 | } else { |
| 314 | bool isKnownNonNegative1 = KnownZero.isNegative(); |
| 315 | bool isKnownNonNegative2 = KnownZero2.isNegative(); |
| 316 | bool isKnownNegative1 = KnownOne.isNegative(); |
| 317 | bool isKnownNegative2 = KnownOne2.isNegative(); |
| 318 | // The product of two numbers with the same sign is non-negative. |
| 319 | isKnownNonNegative = (isKnownNegative1 && isKnownNegative2) || |
| 320 | (isKnownNonNegative1 && isKnownNonNegative2); |
| 321 | // The product of a negative number and a non-negative number is either |
| 322 | // negative or zero. |
| 323 | if (!isKnownNonNegative) |
| 324 | isKnownNegative = (isKnownNegative1 && isKnownNonNegative2 && |
| 325 | isKnownNonZero(Op2, TD, Depth)) || |
| 326 | (isKnownNegative2 && isKnownNonNegative1 && |
| 327 | isKnownNonZero(Op1, TD, Depth)); |
| 328 | } |
| 329 | } |
| 330 | |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 331 | // If low bits are zero in either operand, output low known-0 bits. |
| 332 | // Also compute a conserative estimate for high known-0 bits. |
| 333 | // More trickiness is possible, but this is sufficient for the |
| 334 | // interesting case of alignment computation. |
Jay Foad | 7a874dd | 2010-12-01 08:53:58 +0000 | [diff] [blame] | 335 | KnownOne.clearAllBits(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 336 | unsigned TrailZ = KnownZero.countTrailingOnes() + |
| 337 | KnownZero2.countTrailingOnes(); |
| 338 | unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + |
| 339 | KnownZero2.countLeadingOnes(), |
| 340 | BitWidth) - BitWidth; |
| 341 | |
| 342 | TrailZ = std::min(TrailZ, BitWidth); |
| 343 | LeadZ = std::min(LeadZ, BitWidth); |
| 344 | KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | |
| 345 | APInt::getHighBitsSet(BitWidth, LeadZ); |
| 346 | KnownZero &= Mask; |
Duncan Sands | 32a43cc | 2011-10-27 19:16:21 +0000 | [diff] [blame] | 347 | |
Duncan Sands | a8f5cd3 | 2011-11-23 16:26:47 +0000 | [diff] [blame] | 348 | // Only make use of no-wrap flags if we failed to compute the sign bit |
| 349 | // directly. This matters if the multiplication always overflows, in |
| 350 | // which case we prefer to follow the result of the direct computation, |
| 351 | // though as the program is invoking undefined behaviour we can choose |
| 352 | // whatever we like here. |
| 353 | if (isKnownNonNegative && !KnownOne.isNegative()) |
Duncan Sands | 32a43cc | 2011-10-27 19:16:21 +0000 | [diff] [blame] | 354 | KnownZero.setBit(BitWidth - 1); |
Duncan Sands | a8f5cd3 | 2011-11-23 16:26:47 +0000 | [diff] [blame] | 355 | else if (isKnownNegative && !KnownZero.isNegative()) |
Duncan Sands | 32a43cc | 2011-10-27 19:16:21 +0000 | [diff] [blame] | 356 | KnownOne.setBit(BitWidth - 1); |
| 357 | |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 358 | return; |
| 359 | } |
| 360 | case Instruction::UDiv: { |
| 361 | // For the purposes of computing leading zeros we can conservatively |
| 362 | // treat a udiv as a logical right shift by the power of 2 known to |
| 363 | // be less than the denominator. |
| 364 | APInt AllOnes = APInt::getAllOnesValue(BitWidth); |
| 365 | ComputeMaskedBits(I->getOperand(0), |
| 366 | AllOnes, KnownZero2, KnownOne2, TD, Depth+1); |
| 367 | unsigned LeadZ = KnownZero2.countLeadingOnes(); |
| 368 | |
Jay Foad | 7a874dd | 2010-12-01 08:53:58 +0000 | [diff] [blame] | 369 | KnownOne2.clearAllBits(); |
| 370 | KnownZero2.clearAllBits(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 371 | ComputeMaskedBits(I->getOperand(1), |
| 372 | AllOnes, KnownZero2, KnownOne2, TD, Depth+1); |
| 373 | unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); |
| 374 | if (RHSUnknownLeadingOnes != BitWidth) |
| 375 | LeadZ = std::min(BitWidth, |
| 376 | LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); |
| 377 | |
| 378 | KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask; |
| 379 | return; |
| 380 | } |
| 381 | case Instruction::Select: |
| 382 | ComputeMaskedBits(I->getOperand(2), Mask, KnownZero, KnownOne, TD, Depth+1); |
| 383 | ComputeMaskedBits(I->getOperand(1), Mask, KnownZero2, KnownOne2, TD, |
| 384 | Depth+1); |
| 385 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
| 386 | assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); |
| 387 | |
| 388 | // Only known if known in both the LHS and RHS. |
| 389 | KnownOne &= KnownOne2; |
| 390 | KnownZero &= KnownZero2; |
| 391 | return; |
| 392 | case Instruction::FPTrunc: |
| 393 | case Instruction::FPExt: |
| 394 | case Instruction::FPToUI: |
| 395 | case Instruction::FPToSI: |
| 396 | case Instruction::SIToFP: |
| 397 | case Instruction::UIToFP: |
| 398 | return; // Can't work with floating point. |
| 399 | case Instruction::PtrToInt: |
| 400 | case Instruction::IntToPtr: |
| 401 | // We can't handle these if we don't know the pointer size. |
| 402 | if (!TD) return; |
| 403 | // FALL THROUGH and handle them the same as zext/trunc. |
| 404 | case Instruction::ZExt: |
| 405 | case Instruction::Trunc: { |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 406 | Type *SrcTy = I->getOperand(0)->getType(); |
Chris Lattner | b9a4ddb | 2009-09-08 00:13:52 +0000 | [diff] [blame] | 407 | |
| 408 | unsigned SrcBitWidth; |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 409 | // Note that we handle pointer operands here because of inttoptr/ptrtoint |
| 410 | // which fall through here. |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 411 | if (SrcTy->isPointerTy()) |
Chris Lattner | b9a4ddb | 2009-09-08 00:13:52 +0000 | [diff] [blame] | 412 | SrcBitWidth = TD->getTypeSizeInBits(SrcTy); |
| 413 | else |
| 414 | SrcBitWidth = SrcTy->getScalarSizeInBits(); |
| 415 | |
Jay Foad | 40f8f62 | 2010-12-07 08:25:19 +0000 | [diff] [blame] | 416 | APInt MaskIn = Mask.zextOrTrunc(SrcBitWidth); |
| 417 | KnownZero = KnownZero.zextOrTrunc(SrcBitWidth); |
| 418 | KnownOne = KnownOne.zextOrTrunc(SrcBitWidth); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 419 | ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD, |
| 420 | Depth+1); |
Jay Foad | 40f8f62 | 2010-12-07 08:25:19 +0000 | [diff] [blame] | 421 | KnownZero = KnownZero.zextOrTrunc(BitWidth); |
| 422 | KnownOne = KnownOne.zextOrTrunc(BitWidth); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 423 | // Any top bits are known to be zero. |
| 424 | if (BitWidth > SrcBitWidth) |
| 425 | KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); |
| 426 | return; |
| 427 | } |
| 428 | case Instruction::BitCast: { |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 429 | Type *SrcTy = I->getOperand(0)->getType(); |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 430 | if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && |
Chris Lattner | 0dabb0b | 2009-07-02 16:04:08 +0000 | [diff] [blame] | 431 | // TODO: For now, not handling conversions like: |
| 432 | // (bitcast i64 %x to <2 x i32>) |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 433 | !I->getType()->isVectorTy()) { |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 434 | ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, TD, |
| 435 | Depth+1); |
| 436 | return; |
| 437 | } |
| 438 | break; |
| 439 | } |
| 440 | case Instruction::SExt: { |
| 441 | // Compute the bits in the result that are not present in the input. |
Chris Lattner | b9a4ddb | 2009-09-08 00:13:52 +0000 | [diff] [blame] | 442 | unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 443 | |
Jay Foad | 40f8f62 | 2010-12-07 08:25:19 +0000 | [diff] [blame] | 444 | APInt MaskIn = Mask.trunc(SrcBitWidth); |
| 445 | KnownZero = KnownZero.trunc(SrcBitWidth); |
| 446 | KnownOne = KnownOne.trunc(SrcBitWidth); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 447 | ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD, |
| 448 | Depth+1); |
| 449 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
Jay Foad | 40f8f62 | 2010-12-07 08:25:19 +0000 | [diff] [blame] | 450 | KnownZero = KnownZero.zext(BitWidth); |
| 451 | KnownOne = KnownOne.zext(BitWidth); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 452 | |
| 453 | // If the sign bit of the input is known set or clear, then we know the |
| 454 | // top bits of the result. |
| 455 | if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero |
| 456 | KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); |
| 457 | else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set |
| 458 | KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); |
| 459 | return; |
| 460 | } |
| 461 | case Instruction::Shl: |
| 462 | // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 |
| 463 | if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { |
| 464 | uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); |
| 465 | APInt Mask2(Mask.lshr(ShiftAmt)); |
| 466 | ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, |
| 467 | Depth+1); |
| 468 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
| 469 | KnownZero <<= ShiftAmt; |
| 470 | KnownOne <<= ShiftAmt; |
| 471 | KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0 |
| 472 | return; |
| 473 | } |
| 474 | break; |
| 475 | case Instruction::LShr: |
| 476 | // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 |
| 477 | if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { |
| 478 | // Compute the new bits that are at the top now. |
| 479 | uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); |
| 480 | |
| 481 | // Unsigned shift right. |
| 482 | APInt Mask2(Mask.shl(ShiftAmt)); |
| 483 | ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero,KnownOne, TD, |
| 484 | Depth+1); |
Nick Lewycky | ae3d802 | 2009-11-23 03:29:18 +0000 | [diff] [blame] | 485 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 486 | KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); |
| 487 | KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); |
| 488 | // high bits known zero. |
| 489 | KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt); |
| 490 | return; |
| 491 | } |
| 492 | break; |
| 493 | case Instruction::AShr: |
| 494 | // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 |
| 495 | if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { |
| 496 | // Compute the new bits that are at the top now. |
Chris Lattner | 43b40a4 | 2011-01-04 18:19:15 +0000 | [diff] [blame] | 497 | uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 498 | |
| 499 | // Signed shift right. |
| 500 | APInt Mask2(Mask.shl(ShiftAmt)); |
| 501 | ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, |
| 502 | Depth+1); |
Nick Lewycky | ae3d802 | 2009-11-23 03:29:18 +0000 | [diff] [blame] | 503 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 504 | KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); |
| 505 | KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); |
| 506 | |
| 507 | APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); |
| 508 | if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero. |
| 509 | KnownZero |= HighBits; |
| 510 | else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one. |
| 511 | KnownOne |= HighBits; |
| 512 | return; |
| 513 | } |
| 514 | break; |
| 515 | case Instruction::Sub: { |
Nick Lewycky | 00cbccc | 2012-03-09 09:23:50 +0000 | [diff] [blame^] | 516 | bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); |
| 517 | ComputeMaskedBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, |
| 518 | Mask, KnownZero, KnownOne, KnownZero2, KnownOne2, |
| 519 | TD, Depth); |
| 520 | break; |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 521 | } |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 522 | case Instruction::Add: { |
Nick Lewycky | 00cbccc | 2012-03-09 09:23:50 +0000 | [diff] [blame^] | 523 | bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); |
| 524 | ComputeMaskedBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, |
| 525 | Mask, KnownZero, KnownOne, KnownZero2, KnownOne2, |
| 526 | TD, Depth); |
| 527 | break; |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 528 | } |
| 529 | case Instruction::SRem: |
| 530 | if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { |
Duncan Sands | cfd5418 | 2010-01-29 06:18:37 +0000 | [diff] [blame] | 531 | APInt RA = Rem->getValue().abs(); |
| 532 | if (RA.isPowerOf2()) { |
| 533 | APInt LowBits = RA - 1; |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 534 | APInt Mask2 = LowBits | APInt::getSignBit(BitWidth); |
| 535 | ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, |
| 536 | Depth+1); |
| 537 | |
Duncan Sands | cfd5418 | 2010-01-29 06:18:37 +0000 | [diff] [blame] | 538 | // The low bits of the first operand are unchanged by the srem. |
| 539 | KnownZero = KnownZero2 & LowBits; |
| 540 | KnownOne = KnownOne2 & LowBits; |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 541 | |
Duncan Sands | cfd5418 | 2010-01-29 06:18:37 +0000 | [diff] [blame] | 542 | // If the first operand is non-negative or has all low bits zero, then |
| 543 | // the upper bits are all zero. |
| 544 | if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) |
| 545 | KnownZero |= ~LowBits; |
| 546 | |
| 547 | // If the first operand is negative and not all low bits are zero, then |
| 548 | // the upper bits are all one. |
| 549 | if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) |
| 550 | KnownOne |= ~LowBits; |
| 551 | |
| 552 | KnownZero &= Mask; |
| 553 | KnownOne &= Mask; |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 554 | |
Nick Lewycky | ae3d802 | 2009-11-23 03:29:18 +0000 | [diff] [blame] | 555 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 556 | } |
| 557 | } |
Nick Lewycky | c14bc77 | 2011-03-07 01:50:10 +0000 | [diff] [blame] | 558 | |
| 559 | // The sign bit is the LHS's sign bit, except when the result of the |
| 560 | // remainder is zero. |
| 561 | if (Mask.isNegative() && KnownZero.isNonNegative()) { |
| 562 | APInt Mask2 = APInt::getSignBit(BitWidth); |
| 563 | APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); |
| 564 | ComputeMaskedBits(I->getOperand(0), Mask2, LHSKnownZero, LHSKnownOne, TD, |
| 565 | Depth+1); |
| 566 | // If it's known zero, our sign bit is also zero. |
| 567 | if (LHSKnownZero.isNegative()) |
| 568 | KnownZero |= LHSKnownZero; |
| 569 | } |
| 570 | |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 571 | break; |
| 572 | case Instruction::URem: { |
| 573 | if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { |
| 574 | APInt RA = Rem->getValue(); |
| 575 | if (RA.isPowerOf2()) { |
| 576 | APInt LowBits = (RA - 1); |
| 577 | APInt Mask2 = LowBits & Mask; |
| 578 | KnownZero |= ~LowBits & Mask; |
| 579 | ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, |
| 580 | Depth+1); |
Nick Lewycky | ae3d802 | 2009-11-23 03:29:18 +0000 | [diff] [blame] | 581 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 582 | break; |
| 583 | } |
| 584 | } |
| 585 | |
| 586 | // Since the result is less than or equal to either operand, any leading |
| 587 | // zero bits in either operand must also exist in the result. |
| 588 | APInt AllOnes = APInt::getAllOnesValue(BitWidth); |
| 589 | ComputeMaskedBits(I->getOperand(0), AllOnes, KnownZero, KnownOne, |
| 590 | TD, Depth+1); |
| 591 | ComputeMaskedBits(I->getOperand(1), AllOnes, KnownZero2, KnownOne2, |
| 592 | TD, Depth+1); |
| 593 | |
Chris Lattner | 79abedb | 2009-01-20 18:22:57 +0000 | [diff] [blame] | 594 | unsigned Leaders = std::max(KnownZero.countLeadingOnes(), |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 595 | KnownZero2.countLeadingOnes()); |
Jay Foad | 7a874dd | 2010-12-01 08:53:58 +0000 | [diff] [blame] | 596 | KnownOne.clearAllBits(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 597 | KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask; |
| 598 | break; |
| 599 | } |
| 600 | |
Victor Hernandez | a276c60 | 2009-10-17 01:18:07 +0000 | [diff] [blame] | 601 | case Instruction::Alloca: { |
Victor Hernandez | 7b929da | 2009-10-23 21:09:37 +0000 | [diff] [blame] | 602 | AllocaInst *AI = cast<AllocaInst>(V); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 603 | unsigned Align = AI->getAlignment(); |
Victor Hernandez | a276c60 | 2009-10-17 01:18:07 +0000 | [diff] [blame] | 604 | if (Align == 0 && TD) |
| 605 | Align = TD->getABITypeAlignment(AI->getType()->getElementType()); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 606 | |
| 607 | if (Align > 0) |
| 608 | KnownZero = Mask & APInt::getLowBitsSet(BitWidth, |
| 609 | CountTrailingZeros_32(Align)); |
| 610 | break; |
| 611 | } |
| 612 | case Instruction::GetElementPtr: { |
| 613 | // Analyze all of the subscripts of this getelementptr instruction |
| 614 | // to determine if we can prove known low zero bits. |
| 615 | APInt LocalMask = APInt::getAllOnesValue(BitWidth); |
| 616 | APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); |
| 617 | ComputeMaskedBits(I->getOperand(0), LocalMask, |
| 618 | LocalKnownZero, LocalKnownOne, TD, Depth+1); |
| 619 | unsigned TrailZ = LocalKnownZero.countTrailingOnes(); |
| 620 | |
| 621 | gep_type_iterator GTI = gep_type_begin(I); |
| 622 | for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { |
| 623 | Value *Index = I->getOperand(i); |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 624 | if (StructType *STy = dyn_cast<StructType>(*GTI)) { |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 625 | // Handle struct member offset arithmetic. |
| 626 | if (!TD) return; |
| 627 | const StructLayout *SL = TD->getStructLayout(STy); |
| 628 | unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); |
| 629 | uint64_t Offset = SL->getElementOffset(Idx); |
| 630 | TrailZ = std::min(TrailZ, |
| 631 | CountTrailingZeros_64(Offset)); |
| 632 | } else { |
| 633 | // Handle array index arithmetic. |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 634 | Type *IndexedTy = GTI.getIndexedType(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 635 | if (!IndexedTy->isSized()) return; |
Dan Gohman | 6de29f8 | 2009-06-15 22:12:54 +0000 | [diff] [blame] | 636 | unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); |
Duncan Sands | 777d230 | 2009-05-09 07:06:46 +0000 | [diff] [blame] | 637 | uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1; |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 638 | LocalMask = APInt::getAllOnesValue(GEPOpiBits); |
| 639 | LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); |
| 640 | ComputeMaskedBits(Index, LocalMask, |
| 641 | LocalKnownZero, LocalKnownOne, TD, Depth+1); |
| 642 | TrailZ = std::min(TrailZ, |
Chris Lattner | 79abedb | 2009-01-20 18:22:57 +0000 | [diff] [blame] | 643 | unsigned(CountTrailingZeros_64(TypeSize) + |
| 644 | LocalKnownZero.countTrailingOnes())); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 645 | } |
| 646 | } |
| 647 | |
| 648 | KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) & Mask; |
| 649 | break; |
| 650 | } |
| 651 | case Instruction::PHI: { |
| 652 | PHINode *P = cast<PHINode>(I); |
| 653 | // Handle the case of a simple two-predecessor recurrence PHI. |
| 654 | // There's a lot more that could theoretically be done here, but |
| 655 | // this is sufficient to catch some interesting cases. |
| 656 | if (P->getNumIncomingValues() == 2) { |
| 657 | for (unsigned i = 0; i != 2; ++i) { |
| 658 | Value *L = P->getIncomingValue(i); |
| 659 | Value *R = P->getIncomingValue(!i); |
Dan Gohman | ca17890 | 2009-07-17 20:47:02 +0000 | [diff] [blame] | 660 | Operator *LU = dyn_cast<Operator>(L); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 661 | if (!LU) |
| 662 | continue; |
Dan Gohman | ca17890 | 2009-07-17 20:47:02 +0000 | [diff] [blame] | 663 | unsigned Opcode = LU->getOpcode(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 664 | // Check for operations that have the property that if |
| 665 | // both their operands have low zero bits, the result |
| 666 | // will have low zero bits. |
| 667 | if (Opcode == Instruction::Add || |
| 668 | Opcode == Instruction::Sub || |
| 669 | Opcode == Instruction::And || |
| 670 | Opcode == Instruction::Or || |
| 671 | Opcode == Instruction::Mul) { |
| 672 | Value *LL = LU->getOperand(0); |
| 673 | Value *LR = LU->getOperand(1); |
| 674 | // Find a recurrence. |
| 675 | if (LL == I) |
| 676 | L = LR; |
| 677 | else if (LR == I) |
| 678 | L = LL; |
| 679 | else |
| 680 | break; |
| 681 | // Ok, we have a PHI of the form L op= R. Check for low |
| 682 | // zero bits. |
| 683 | APInt Mask2 = APInt::getAllOnesValue(BitWidth); |
| 684 | ComputeMaskedBits(R, Mask2, KnownZero2, KnownOne2, TD, Depth+1); |
| 685 | Mask2 = APInt::getLowBitsSet(BitWidth, |
| 686 | KnownZero2.countTrailingOnes()); |
David Greene | c714f13 | 2008-10-27 23:24:03 +0000 | [diff] [blame] | 687 | |
| 688 | // We need to take the minimum number of known bits |
| 689 | APInt KnownZero3(KnownZero), KnownOne3(KnownOne); |
| 690 | ComputeMaskedBits(L, Mask2, KnownZero3, KnownOne3, TD, Depth+1); |
| 691 | |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 692 | KnownZero = Mask & |
| 693 | APInt::getLowBitsSet(BitWidth, |
David Greene | c714f13 | 2008-10-27 23:24:03 +0000 | [diff] [blame] | 694 | std::min(KnownZero2.countTrailingOnes(), |
| 695 | KnownZero3.countTrailingOnes())); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 696 | break; |
| 697 | } |
| 698 | } |
| 699 | } |
Dan Gohman | 9004c8a | 2009-05-21 02:28:33 +0000 | [diff] [blame] | 700 | |
Nick Lewycky | 3b739d2 | 2011-02-10 23:54:10 +0000 | [diff] [blame] | 701 | // Unreachable blocks may have zero-operand PHI nodes. |
| 702 | if (P->getNumIncomingValues() == 0) |
| 703 | return; |
| 704 | |
Dan Gohman | 9004c8a | 2009-05-21 02:28:33 +0000 | [diff] [blame] | 705 | // Otherwise take the unions of the known bit sets of the operands, |
| 706 | // taking conservative care to avoid excessive recursion. |
| 707 | if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { |
Duncan Sands | 606199f | 2011-03-08 12:39:03 +0000 | [diff] [blame] | 708 | // Skip if every incoming value references to ourself. |
| 709 | if (P->hasConstantValue() == P) |
| 710 | break; |
| 711 | |
Eli Friedman | 049d08f | 2012-03-05 23:09:40 +0000 | [diff] [blame] | 712 | KnownZero = Mask; |
| 713 | KnownOne = Mask; |
Dan Gohman | 9004c8a | 2009-05-21 02:28:33 +0000 | [diff] [blame] | 714 | for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) { |
| 715 | // Skip direct self references. |
| 716 | if (P->getIncomingValue(i) == P) continue; |
| 717 | |
| 718 | KnownZero2 = APInt(BitWidth, 0); |
| 719 | KnownOne2 = APInt(BitWidth, 0); |
| 720 | // Recurse, but cap the recursion to one level, because we don't |
| 721 | // want to waste time spinning around in loops. |
| 722 | ComputeMaskedBits(P->getIncomingValue(i), KnownZero | KnownOne, |
| 723 | KnownZero2, KnownOne2, TD, MaxDepth-1); |
| 724 | KnownZero &= KnownZero2; |
| 725 | KnownOne &= KnownOne2; |
| 726 | // If all bits have been ruled out, there's no need to check |
| 727 | // more operands. |
| 728 | if (!KnownZero && !KnownOne) |
| 729 | break; |
| 730 | } |
| 731 | } |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 732 | break; |
| 733 | } |
| 734 | case Instruction::Call: |
| 735 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { |
| 736 | switch (II->getIntrinsicID()) { |
| 737 | default: break; |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 738 | case Intrinsic::ctlz: |
| 739 | case Intrinsic::cttz: { |
| 740 | unsigned LowBits = Log2_32(BitWidth)+1; |
Benjamin Kramer | 009da05 | 2011-12-24 17:31:46 +0000 | [diff] [blame] | 741 | // If this call is undefined for 0, the result will be less than 2^n. |
| 742 | if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) |
| 743 | LowBits -= 1; |
Eli Friedman | 923bb41 | 2012-03-05 23:22:40 +0000 | [diff] [blame] | 744 | KnownZero = Mask & APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); |
Benjamin Kramer | 009da05 | 2011-12-24 17:31:46 +0000 | [diff] [blame] | 745 | break; |
| 746 | } |
| 747 | case Intrinsic::ctpop: { |
| 748 | unsigned LowBits = Log2_32(BitWidth)+1; |
Eli Friedman | 923bb41 | 2012-03-05 23:22:40 +0000 | [diff] [blame] | 749 | KnownZero = Mask & APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 750 | break; |
| 751 | } |
Chad Rosier | 6266031 | 2011-05-26 23:13:19 +0000 | [diff] [blame] | 752 | case Intrinsic::x86_sse42_crc32_64_8: |
| 753 | case Intrinsic::x86_sse42_crc32_64_64: |
Eli Friedman | 923bb41 | 2012-03-05 23:22:40 +0000 | [diff] [blame] | 754 | KnownZero = Mask & APInt::getHighBitsSet(64, 32); |
Evan Cheng | cb559c1 | 2011-05-22 18:25:30 +0000 | [diff] [blame] | 755 | break; |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 756 | } |
| 757 | } |
| 758 | break; |
Nick Lewycky | 00cbccc | 2012-03-09 09:23:50 +0000 | [diff] [blame^] | 759 | case Instruction::ExtractValue: |
| 760 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { |
| 761 | ExtractValueInst *EVI = cast<ExtractValueInst>(I); |
| 762 | if (EVI->getNumIndices() != 1) break; |
| 763 | if (EVI->getIndices()[0] == 0) { |
| 764 | switch (II->getIntrinsicID()) { |
| 765 | default: break; |
| 766 | case Intrinsic::uadd_with_overflow: |
| 767 | case Intrinsic::sadd_with_overflow: |
| 768 | ComputeMaskedBitsAddSub(true, II->getArgOperand(0), |
| 769 | II->getArgOperand(1), false, Mask, |
| 770 | KnownZero, KnownOne, KnownZero2, KnownOne2, |
| 771 | TD, Depth); |
| 772 | break; |
| 773 | case Intrinsic::usub_with_overflow: |
| 774 | case Intrinsic::ssub_with_overflow: |
| 775 | ComputeMaskedBitsAddSub(false, II->getArgOperand(0), |
| 776 | II->getArgOperand(1), false, Mask, |
| 777 | KnownZero, KnownOne, KnownZero2, KnownOne2, |
| 778 | TD, Depth); |
| 779 | break; |
| 780 | } |
| 781 | } |
| 782 | } |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 783 | } |
| 784 | } |
| 785 | |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 786 | /// ComputeSignBit - Determine whether the sign bit is known to be zero or |
| 787 | /// one. Convenience wrapper around ComputeMaskedBits. |
| 788 | void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, |
| 789 | const TargetData *TD, unsigned Depth) { |
| 790 | unsigned BitWidth = getBitWidth(V->getType(), TD); |
| 791 | if (!BitWidth) { |
| 792 | KnownZero = false; |
| 793 | KnownOne = false; |
| 794 | return; |
| 795 | } |
| 796 | APInt ZeroBits(BitWidth, 0); |
| 797 | APInt OneBits(BitWidth, 0); |
| 798 | ComputeMaskedBits(V, APInt::getSignBit(BitWidth), ZeroBits, OneBits, TD, |
| 799 | Depth); |
| 800 | KnownOne = OneBits[BitWidth - 1]; |
| 801 | KnownZero = ZeroBits[BitWidth - 1]; |
| 802 | } |
| 803 | |
| 804 | /// isPowerOfTwo - Return true if the given value is known to have exactly one |
| 805 | /// bit set when defined. For vectors return true if every element is known to |
| 806 | /// be a power of two when defined. Supports values with integer or pointer |
| 807 | /// types and vectors of integers. |
Duncan Sands | dd3149d | 2011-10-26 20:55:21 +0000 | [diff] [blame] | 808 | bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero, |
| 809 | unsigned Depth) { |
| 810 | if (Constant *C = dyn_cast<Constant>(V)) { |
| 811 | if (C->isNullValue()) |
| 812 | return OrZero; |
| 813 | if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) |
| 814 | return CI->getValue().isPowerOf2(); |
| 815 | // TODO: Handle vector constants. |
| 816 | } |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 817 | |
| 818 | // 1 << X is clearly a power of two if the one is not shifted off the end. If |
| 819 | // it is shifted off the end then the result is undefined. |
| 820 | if (match(V, m_Shl(m_One(), m_Value()))) |
| 821 | return true; |
| 822 | |
| 823 | // (signbit) >>l X is clearly a power of two if the one is not shifted off the |
| 824 | // bottom. If it is shifted off the bottom then the result is undefined. |
Duncan Sands | 93c7802 | 2011-02-01 08:50:33 +0000 | [diff] [blame] | 825 | if (match(V, m_LShr(m_SignBit(), m_Value()))) |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 826 | return true; |
| 827 | |
| 828 | // The remaining tests are all recursive, so bail out if we hit the limit. |
| 829 | if (Depth++ == MaxDepth) |
| 830 | return false; |
| 831 | |
Duncan Sands | 4604fc7 | 2011-10-28 18:30:05 +0000 | [diff] [blame] | 832 | Value *X = 0, *Y = 0; |
| 833 | // A shift of a power of two is a power of two or zero. |
| 834 | if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || |
| 835 | match(V, m_Shr(m_Value(X), m_Value())))) |
| 836 | return isPowerOfTwo(X, TD, /*OrZero*/true, Depth); |
| 837 | |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 838 | if (ZExtInst *ZI = dyn_cast<ZExtInst>(V)) |
Duncan Sands | dd3149d | 2011-10-26 20:55:21 +0000 | [diff] [blame] | 839 | return isPowerOfTwo(ZI->getOperand(0), TD, OrZero, Depth); |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 840 | |
| 841 | if (SelectInst *SI = dyn_cast<SelectInst>(V)) |
Duncan Sands | dd3149d | 2011-10-26 20:55:21 +0000 | [diff] [blame] | 842 | return isPowerOfTwo(SI->getTrueValue(), TD, OrZero, Depth) && |
| 843 | isPowerOfTwo(SI->getFalseValue(), TD, OrZero, Depth); |
| 844 | |
Duncan Sands | dd3149d | 2011-10-26 20:55:21 +0000 | [diff] [blame] | 845 | if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { |
| 846 | // A power of two and'd with anything is a power of two or zero. |
| 847 | if (isPowerOfTwo(X, TD, /*OrZero*/true, Depth) || |
| 848 | isPowerOfTwo(Y, TD, /*OrZero*/true, Depth)) |
| 849 | return true; |
| 850 | // X & (-X) is always a power of two or zero. |
| 851 | if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) |
| 852 | return true; |
| 853 | return false; |
| 854 | } |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 855 | |
Nick Lewycky | 3dfd987 | 2011-02-28 08:02:21 +0000 | [diff] [blame] | 856 | // An exact divide or right shift can only shift off zero bits, so the result |
Nick Lewycky | 1f7bc70 | 2011-03-21 21:40:32 +0000 | [diff] [blame] | 857 | // is a power of two only if the first operand is a power of two and not |
| 858 | // copying a sign bit (sdiv int_min, 2). |
Benjamin Kramer | 55c6d57 | 2012-01-01 17:55:30 +0000 | [diff] [blame] | 859 | if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || |
| 860 | match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { |
| 861 | return isPowerOfTwo(cast<Operator>(V)->getOperand(0), TD, OrZero, Depth); |
Nick Lewycky | 3dfd987 | 2011-02-28 08:02:21 +0000 | [diff] [blame] | 862 | } |
| 863 | |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 864 | return false; |
| 865 | } |
| 866 | |
| 867 | /// isKnownNonZero - Return true if the given value is known to be non-zero |
| 868 | /// when defined. For vectors return true if every element is known to be |
| 869 | /// non-zero when defined. Supports values with integer or pointer type and |
| 870 | /// vectors of integers. |
| 871 | bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) { |
| 872 | if (Constant *C = dyn_cast<Constant>(V)) { |
| 873 | if (C->isNullValue()) |
| 874 | return false; |
| 875 | if (isa<ConstantInt>(C)) |
| 876 | // Must be non-zero due to null test above. |
| 877 | return true; |
| 878 | // TODO: Handle vectors |
| 879 | return false; |
| 880 | } |
| 881 | |
| 882 | // The remaining tests are all recursive, so bail out if we hit the limit. |
Duncan Sands | 32a43cc | 2011-10-27 19:16:21 +0000 | [diff] [blame] | 883 | if (Depth++ >= MaxDepth) |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 884 | return false; |
| 885 | |
| 886 | unsigned BitWidth = getBitWidth(V->getType(), TD); |
| 887 | |
| 888 | // X | Y != 0 if X != 0 or Y != 0. |
| 889 | Value *X = 0, *Y = 0; |
| 890 | if (match(V, m_Or(m_Value(X), m_Value(Y)))) |
| 891 | return isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth); |
| 892 | |
| 893 | // ext X != 0 if X != 0. |
| 894 | if (isa<SExtInst>(V) || isa<ZExtInst>(V)) |
| 895 | return isKnownNonZero(cast<Instruction>(V)->getOperand(0), TD, Depth); |
| 896 | |
Duncan Sands | 9136782 | 2011-01-29 13:27:00 +0000 | [diff] [blame] | 897 | // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 898 | // if the lowest bit is shifted off the end. |
| 899 | if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) { |
Nick Lewycky | 3dfd987 | 2011-02-28 08:02:21 +0000 | [diff] [blame] | 900 | // shl nuw can't remove any non-zero bits. |
Duncan Sands | 32a43cc | 2011-10-27 19:16:21 +0000 | [diff] [blame] | 901 | OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); |
Nick Lewycky | 3dfd987 | 2011-02-28 08:02:21 +0000 | [diff] [blame] | 902 | if (BO->hasNoUnsignedWrap()) |
| 903 | return isKnownNonZero(X, TD, Depth); |
| 904 | |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 905 | APInt KnownZero(BitWidth, 0); |
| 906 | APInt KnownOne(BitWidth, 0); |
Duncan Sands | 9136782 | 2011-01-29 13:27:00 +0000 | [diff] [blame] | 907 | ComputeMaskedBits(X, APInt(BitWidth, 1), KnownZero, KnownOne, TD, Depth); |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 908 | if (KnownOne[0]) |
| 909 | return true; |
| 910 | } |
Duncan Sands | 9136782 | 2011-01-29 13:27:00 +0000 | [diff] [blame] | 911 | // shr X, Y != 0 if X is negative. Note that the value of the shift is not |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 912 | // defined if the sign bit is shifted off the end. |
| 913 | else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { |
Nick Lewycky | 3dfd987 | 2011-02-28 08:02:21 +0000 | [diff] [blame] | 914 | // shr exact can only shift out zero bits. |
Duncan Sands | 32a43cc | 2011-10-27 19:16:21 +0000 | [diff] [blame] | 915 | PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); |
Nick Lewycky | 3dfd987 | 2011-02-28 08:02:21 +0000 | [diff] [blame] | 916 | if (BO->isExact()) |
| 917 | return isKnownNonZero(X, TD, Depth); |
| 918 | |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 919 | bool XKnownNonNegative, XKnownNegative; |
| 920 | ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth); |
| 921 | if (XKnownNegative) |
| 922 | return true; |
| 923 | } |
Nick Lewycky | 3dfd987 | 2011-02-28 08:02:21 +0000 | [diff] [blame] | 924 | // div exact can only produce a zero if the dividend is zero. |
Benjamin Kramer | 55c6d57 | 2012-01-01 17:55:30 +0000 | [diff] [blame] | 925 | else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { |
| 926 | return isKnownNonZero(X, TD, Depth); |
Nick Lewycky | 3dfd987 | 2011-02-28 08:02:21 +0000 | [diff] [blame] | 927 | } |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 928 | // X + Y. |
| 929 | else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { |
| 930 | bool XKnownNonNegative, XKnownNegative; |
| 931 | bool YKnownNonNegative, YKnownNegative; |
| 932 | ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth); |
| 933 | ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, TD, Depth); |
| 934 | |
| 935 | // If X and Y are both non-negative (as signed values) then their sum is not |
Duncan Sands | 227fba1 | 2011-01-25 15:14:15 +0000 | [diff] [blame] | 936 | // zero unless both X and Y are zero. |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 937 | if (XKnownNonNegative && YKnownNonNegative) |
Duncan Sands | 227fba1 | 2011-01-25 15:14:15 +0000 | [diff] [blame] | 938 | if (isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth)) |
| 939 | return true; |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 940 | |
| 941 | // If X and Y are both negative (as signed values) then their sum is not |
| 942 | // zero unless both X and Y equal INT_MIN. |
| 943 | if (BitWidth && XKnownNegative && YKnownNegative) { |
| 944 | APInt KnownZero(BitWidth, 0); |
| 945 | APInt KnownOne(BitWidth, 0); |
| 946 | APInt Mask = APInt::getSignedMaxValue(BitWidth); |
| 947 | // The sign bit of X is set. If some other bit is set then X is not equal |
| 948 | // to INT_MIN. |
| 949 | ComputeMaskedBits(X, Mask, KnownZero, KnownOne, TD, Depth); |
| 950 | if ((KnownOne & Mask) != 0) |
| 951 | return true; |
| 952 | // The sign bit of Y is set. If some other bit is set then Y is not equal |
| 953 | // to INT_MIN. |
| 954 | ComputeMaskedBits(Y, Mask, KnownZero, KnownOne, TD, Depth); |
| 955 | if ((KnownOne & Mask) != 0) |
| 956 | return true; |
| 957 | } |
| 958 | |
| 959 | // The sum of a non-negative number and a power of two is not zero. |
Duncan Sands | dd3149d | 2011-10-26 20:55:21 +0000 | [diff] [blame] | 960 | if (XKnownNonNegative && isPowerOfTwo(Y, TD, /*OrZero*/false, Depth)) |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 961 | return true; |
Duncan Sands | dd3149d | 2011-10-26 20:55:21 +0000 | [diff] [blame] | 962 | if (YKnownNonNegative && isPowerOfTwo(X, TD, /*OrZero*/false, Depth)) |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 963 | return true; |
| 964 | } |
Duncan Sands | 32a43cc | 2011-10-27 19:16:21 +0000 | [diff] [blame] | 965 | // X * Y. |
| 966 | else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { |
| 967 | OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); |
| 968 | // If X and Y are non-zero then so is X * Y as long as the multiplication |
| 969 | // does not overflow. |
| 970 | if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && |
| 971 | isKnownNonZero(X, TD, Depth) && isKnownNonZero(Y, TD, Depth)) |
| 972 | return true; |
| 973 | } |
Duncan Sands | d70d1a5 | 2011-01-25 09:38:29 +0000 | [diff] [blame] | 974 | // (C ? X : Y) != 0 if X != 0 and Y != 0. |
| 975 | else if (SelectInst *SI = dyn_cast<SelectInst>(V)) { |
| 976 | if (isKnownNonZero(SI->getTrueValue(), TD, Depth) && |
| 977 | isKnownNonZero(SI->getFalseValue(), TD, Depth)) |
| 978 | return true; |
| 979 | } |
| 980 | |
| 981 | if (!BitWidth) return false; |
| 982 | APInt KnownZero(BitWidth, 0); |
| 983 | APInt KnownOne(BitWidth, 0); |
| 984 | ComputeMaskedBits(V, APInt::getAllOnesValue(BitWidth), KnownZero, KnownOne, |
| 985 | TD, Depth); |
| 986 | return KnownOne != 0; |
| 987 | } |
| 988 | |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 989 | /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use |
| 990 | /// this predicate to simplify operations downstream. Mask is known to be zero |
| 991 | /// for bits that V cannot have. |
Chris Lattner | cf5128e | 2009-09-08 00:06:16 +0000 | [diff] [blame] | 992 | /// |
| 993 | /// This function is defined on values with integer type, values with pointer |
| 994 | /// type (but only if TD is non-null), and vectors of integers. In the case |
| 995 | /// where V is a vector, the mask, known zero, and known one values are the |
| 996 | /// same width as the vector element, and the bit is set only if it is true |
| 997 | /// for all of the elements in the vector. |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 998 | bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, |
Dan Gohman | 846a2f2 | 2009-08-27 17:51:25 +0000 | [diff] [blame] | 999 | const TargetData *TD, unsigned Depth) { |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1000 | APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); |
| 1001 | ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth); |
| 1002 | assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); |
| 1003 | return (KnownZero & Mask) == Mask; |
| 1004 | } |
| 1005 | |
| 1006 | |
| 1007 | |
| 1008 | /// ComputeNumSignBits - Return the number of times the sign bit of the |
| 1009 | /// register is replicated into the other bits. We know that at least 1 bit |
| 1010 | /// is always equal to the sign bit (itself), but other cases can give us |
| 1011 | /// information. For example, immediately after an "ashr X, 2", we know that |
| 1012 | /// the top 3 bits are all equal to each other, so we return 3. |
| 1013 | /// |
| 1014 | /// 'Op' must have a scalar integer type. |
| 1015 | /// |
Dan Gohman | 846a2f2 | 2009-08-27 17:51:25 +0000 | [diff] [blame] | 1016 | unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD, |
| 1017 | unsigned Depth) { |
Duncan Sands | b0bc6c3 | 2010-02-15 16:12:20 +0000 | [diff] [blame] | 1018 | assert((TD || V->getType()->isIntOrIntVectorTy()) && |
Dan Gohman | bd5ce52 | 2009-06-22 22:02:32 +0000 | [diff] [blame] | 1019 | "ComputeNumSignBits requires a TargetData object to operate " |
| 1020 | "on non-integer values!"); |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 1021 | Type *Ty = V->getType(); |
Dan Gohman | bd5ce52 | 2009-06-22 22:02:32 +0000 | [diff] [blame] | 1022 | unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) : |
| 1023 | Ty->getScalarSizeInBits(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1024 | unsigned Tmp, Tmp2; |
| 1025 | unsigned FirstAnswer = 1; |
| 1026 | |
Chris Lattner | d82e511 | 2008-06-02 18:39:07 +0000 | [diff] [blame] | 1027 | // Note that ConstantInt is handled by the general ComputeMaskedBits case |
| 1028 | // below. |
| 1029 | |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1030 | if (Depth == 6) |
| 1031 | return 1; // Limit search depth. |
| 1032 | |
Dan Gohman | ca17890 | 2009-07-17 20:47:02 +0000 | [diff] [blame] | 1033 | Operator *U = dyn_cast<Operator>(V); |
| 1034 | switch (Operator::getOpcode(V)) { |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1035 | default: break; |
| 1036 | case Instruction::SExt: |
Mon P Wang | 69a0080 | 2009-12-02 04:59:58 +0000 | [diff] [blame] | 1037 | Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1038 | return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp; |
| 1039 | |
Chris Lattner | 6b0dc92 | 2012-01-26 21:37:55 +0000 | [diff] [blame] | 1040 | case Instruction::AShr: { |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1041 | Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); |
Chris Lattner | 6b0dc92 | 2012-01-26 21:37:55 +0000 | [diff] [blame] | 1042 | // ashr X, C -> adds C sign bits. Vectors too. |
| 1043 | const APInt *ShAmt; |
| 1044 | if (match(U->getOperand(1), m_APInt(ShAmt))) { |
| 1045 | Tmp += ShAmt->getZExtValue(); |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1046 | if (Tmp > TyBits) Tmp = TyBits; |
| 1047 | } |
| 1048 | return Tmp; |
Chris Lattner | 6b0dc92 | 2012-01-26 21:37:55 +0000 | [diff] [blame] | 1049 | } |
| 1050 | case Instruction::Shl: { |
| 1051 | const APInt *ShAmt; |
| 1052 | if (match(U->getOperand(1), m_APInt(ShAmt))) { |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1053 | // shl destroys sign bits. |
| 1054 | Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); |
Chris Lattner | 6b0dc92 | 2012-01-26 21:37:55 +0000 | [diff] [blame] | 1055 | Tmp2 = ShAmt->getZExtValue(); |
| 1056 | if (Tmp2 >= TyBits || // Bad shift. |
| 1057 | Tmp2 >= Tmp) break; // Shifted all sign bits out. |
| 1058 | return Tmp - Tmp2; |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1059 | } |
| 1060 | break; |
Chris Lattner | 6b0dc92 | 2012-01-26 21:37:55 +0000 | [diff] [blame] | 1061 | } |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1062 | case Instruction::And: |
| 1063 | case Instruction::Or: |
| 1064 | case Instruction::Xor: // NOT is handled here. |
| 1065 | // Logical binary ops preserve the number of sign bits at the worst. |
| 1066 | Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); |
| 1067 | if (Tmp != 1) { |
| 1068 | Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); |
| 1069 | FirstAnswer = std::min(Tmp, Tmp2); |
| 1070 | // We computed what we know about the sign bits as our first |
| 1071 | // answer. Now proceed to the generic code that uses |
| 1072 | // ComputeMaskedBits, and pick whichever answer is better. |
| 1073 | } |
| 1074 | break; |
| 1075 | |
| 1076 | case Instruction::Select: |
| 1077 | Tmp = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); |
| 1078 | if (Tmp == 1) return 1; // Early out. |
| 1079 | Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1); |
| 1080 | return std::min(Tmp, Tmp2); |
| 1081 | |
| 1082 | case Instruction::Add: |
| 1083 | // Add can have at most one carry bit. Thus we know that the output |
| 1084 | // is, at worst, one more bit than the inputs. |
| 1085 | Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); |
| 1086 | if (Tmp == 1) return 1; // Early out. |
| 1087 | |
| 1088 | // Special case decrementing a value (ADD X, -1): |
Dan Gohman | 0001e56 | 2009-02-24 02:00:40 +0000 | [diff] [blame] | 1089 | if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1))) |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1090 | if (CRHS->isAllOnesValue()) { |
| 1091 | APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); |
| 1092 | APInt Mask = APInt::getAllOnesValue(TyBits); |
| 1093 | ComputeMaskedBits(U->getOperand(0), Mask, KnownZero, KnownOne, TD, |
| 1094 | Depth+1); |
| 1095 | |
| 1096 | // If the input is known to be 0 or 1, the output is 0/-1, which is all |
| 1097 | // sign bits set. |
| 1098 | if ((KnownZero | APInt(TyBits, 1)) == Mask) |
| 1099 | return TyBits; |
| 1100 | |
| 1101 | // If we are subtracting one from a positive number, there is no carry |
| 1102 | // out of the result. |
| 1103 | if (KnownZero.isNegative()) |
| 1104 | return Tmp; |
| 1105 | } |
| 1106 | |
| 1107 | Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); |
| 1108 | if (Tmp2 == 1) return 1; |
Chris Lattner | 8d10f9d | 2010-01-07 23:44:37 +0000 | [diff] [blame] | 1109 | return std::min(Tmp, Tmp2)-1; |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1110 | |
| 1111 | case Instruction::Sub: |
| 1112 | Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); |
| 1113 | if (Tmp2 == 1) return 1; |
| 1114 | |
| 1115 | // Handle NEG. |
| 1116 | if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0))) |
| 1117 | if (CLHS->isNullValue()) { |
| 1118 | APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); |
| 1119 | APInt Mask = APInt::getAllOnesValue(TyBits); |
| 1120 | ComputeMaskedBits(U->getOperand(1), Mask, KnownZero, KnownOne, |
| 1121 | TD, Depth+1); |
| 1122 | // If the input is known to be 0 or 1, the output is 0/-1, which is all |
| 1123 | // sign bits set. |
| 1124 | if ((KnownZero | APInt(TyBits, 1)) == Mask) |
| 1125 | return TyBits; |
| 1126 | |
| 1127 | // If the input is known to be positive (the sign bit is known clear), |
| 1128 | // the output of the NEG has the same number of sign bits as the input. |
| 1129 | if (KnownZero.isNegative()) |
| 1130 | return Tmp2; |
| 1131 | |
| 1132 | // Otherwise, we treat this like a SUB. |
| 1133 | } |
| 1134 | |
| 1135 | // Sub can have at most one carry bit. Thus we know that the output |
| 1136 | // is, at worst, one more bit than the inputs. |
| 1137 | Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); |
| 1138 | if (Tmp == 1) return 1; // Early out. |
Chris Lattner | 8d10f9d | 2010-01-07 23:44:37 +0000 | [diff] [blame] | 1139 | return std::min(Tmp, Tmp2)-1; |
| 1140 | |
| 1141 | case Instruction::PHI: { |
| 1142 | PHINode *PN = cast<PHINode>(U); |
| 1143 | // Don't analyze large in-degree PHIs. |
| 1144 | if (PN->getNumIncomingValues() > 4) break; |
| 1145 | |
| 1146 | // Take the minimum of all incoming values. This can't infinitely loop |
| 1147 | // because of our depth threshold. |
| 1148 | Tmp = ComputeNumSignBits(PN->getIncomingValue(0), TD, Depth+1); |
| 1149 | for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { |
| 1150 | if (Tmp == 1) return Tmp; |
| 1151 | Tmp = std::min(Tmp, |
Evan Cheng | 0af20d8 | 2010-03-13 02:20:29 +0000 | [diff] [blame] | 1152 | ComputeNumSignBits(PN->getIncomingValue(i), TD, Depth+1)); |
Chris Lattner | 8d10f9d | 2010-01-07 23:44:37 +0000 | [diff] [blame] | 1153 | } |
| 1154 | return Tmp; |
| 1155 | } |
| 1156 | |
Chris Lattner | 173234a | 2008-06-02 01:18:21 +0000 | [diff] [blame] | 1157 | case Instruction::Trunc: |
| 1158 | // FIXME: it's tricky to do anything useful for this, but it is an important |
| 1159 | // case for targets like X86. |
| 1160 | break; |
| 1161 | } |
| 1162 | |
| 1163 | // Finally, if we can prove that the top bits of the result are 0's or 1's, |
| 1164 | // use this information. |
| 1165 | APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); |
| 1166 | APInt Mask = APInt::getAllOnesValue(TyBits); |
| 1167 | ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth); |
| 1168 | |
| 1169 | if (KnownZero.isNegative()) { // sign bit is 0 |
| 1170 | Mask = KnownZero; |
| 1171 | } else if (KnownOne.isNegative()) { // sign bit is 1; |
| 1172 | Mask = KnownOne; |
| 1173 | } else { |
| 1174 | // Nothing known. |
| 1175 | return FirstAnswer; |
| 1176 | } |
| 1177 | |
| 1178 | // Okay, we know that the sign bit in Mask is set. Use CLZ to determine |
| 1179 | // the number of identical bits in the top of the input value. |
| 1180 | Mask = ~Mask; |
| 1181 | Mask <<= Mask.getBitWidth()-TyBits; |
| 1182 | // Return # leading zeros. We use 'min' here in case Val was zero before |
| 1183 | // shifting. We don't want to return '64' as for an i32 "0". |
| 1184 | return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros())); |
| 1185 | } |
Chris Lattner | 833f25d | 2008-06-02 01:29:46 +0000 | [diff] [blame] | 1186 | |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1187 | /// ComputeMultiple - This function computes the integer multiple of Base that |
| 1188 | /// equals V. If successful, it returns true and returns the multiple in |
Dan Gohman | 3dbb9e6 | 2009-11-18 00:58:27 +0000 | [diff] [blame] | 1189 | /// Multiple. If unsuccessful, it returns false. It looks |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1190 | /// through SExt instructions only if LookThroughSExt is true. |
| 1191 | bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, |
Dan Gohman | 3dbb9e6 | 2009-11-18 00:58:27 +0000 | [diff] [blame] | 1192 | bool LookThroughSExt, unsigned Depth) { |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1193 | const unsigned MaxDepth = 6; |
| 1194 | |
Dan Gohman | 3dbb9e6 | 2009-11-18 00:58:27 +0000 | [diff] [blame] | 1195 | assert(V && "No Value?"); |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1196 | assert(Depth <= MaxDepth && "Limit Search Depth"); |
Duncan Sands | b0bc6c3 | 2010-02-15 16:12:20 +0000 | [diff] [blame] | 1197 | assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1198 | |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 1199 | Type *T = V->getType(); |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1200 | |
Dan Gohman | 3dbb9e6 | 2009-11-18 00:58:27 +0000 | [diff] [blame] | 1201 | ConstantInt *CI = dyn_cast<ConstantInt>(V); |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1202 | |
| 1203 | if (Base == 0) |
| 1204 | return false; |
| 1205 | |
| 1206 | if (Base == 1) { |
| 1207 | Multiple = V; |
| 1208 | return true; |
| 1209 | } |
| 1210 | |
| 1211 | ConstantExpr *CO = dyn_cast<ConstantExpr>(V); |
| 1212 | Constant *BaseVal = ConstantInt::get(T, Base); |
| 1213 | if (CO && CO == BaseVal) { |
| 1214 | // Multiple is 1. |
| 1215 | Multiple = ConstantInt::get(T, 1); |
| 1216 | return true; |
| 1217 | } |
| 1218 | |
| 1219 | if (CI && CI->getZExtValue() % Base == 0) { |
| 1220 | Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); |
| 1221 | return true; |
| 1222 | } |
| 1223 | |
| 1224 | if (Depth == MaxDepth) return false; // Limit search depth. |
| 1225 | |
| 1226 | Operator *I = dyn_cast<Operator>(V); |
| 1227 | if (!I) return false; |
| 1228 | |
| 1229 | switch (I->getOpcode()) { |
| 1230 | default: break; |
Chris Lattner | 11fe726 | 2009-11-26 01:50:12 +0000 | [diff] [blame] | 1231 | case Instruction::SExt: |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1232 | if (!LookThroughSExt) return false; |
| 1233 | // otherwise fall through to ZExt |
Chris Lattner | 11fe726 | 2009-11-26 01:50:12 +0000 | [diff] [blame] | 1234 | case Instruction::ZExt: |
Dan Gohman | 3dbb9e6 | 2009-11-18 00:58:27 +0000 | [diff] [blame] | 1235 | return ComputeMultiple(I->getOperand(0), Base, Multiple, |
| 1236 | LookThroughSExt, Depth+1); |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1237 | case Instruction::Shl: |
| 1238 | case Instruction::Mul: { |
| 1239 | Value *Op0 = I->getOperand(0); |
| 1240 | Value *Op1 = I->getOperand(1); |
| 1241 | |
| 1242 | if (I->getOpcode() == Instruction::Shl) { |
| 1243 | ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); |
| 1244 | if (!Op1CI) return false; |
| 1245 | // Turn Op0 << Op1 into Op0 * 2^Op1 |
| 1246 | APInt Op1Int = Op1CI->getValue(); |
| 1247 | uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); |
Jay Foad | a99793c | 2010-11-30 09:02:01 +0000 | [diff] [blame] | 1248 | APInt API(Op1Int.getBitWidth(), 0); |
Jay Foad | 7a874dd | 2010-12-01 08:53:58 +0000 | [diff] [blame] | 1249 | API.setBit(BitToSet); |
Jay Foad | a99793c | 2010-11-30 09:02:01 +0000 | [diff] [blame] | 1250 | Op1 = ConstantInt::get(V->getContext(), API); |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1251 | } |
| 1252 | |
| 1253 | Value *Mul0 = NULL; |
Chris Lattner | e971131 | 2010-09-05 17:20:46 +0000 | [diff] [blame] | 1254 | if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { |
| 1255 | if (Constant *Op1C = dyn_cast<Constant>(Op1)) |
| 1256 | if (Constant *MulC = dyn_cast<Constant>(Mul0)) { |
| 1257 | if (Op1C->getType()->getPrimitiveSizeInBits() < |
| 1258 | MulC->getType()->getPrimitiveSizeInBits()) |
| 1259 | Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); |
| 1260 | if (Op1C->getType()->getPrimitiveSizeInBits() > |
| 1261 | MulC->getType()->getPrimitiveSizeInBits()) |
| 1262 | MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); |
| 1263 | |
| 1264 | // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) |
| 1265 | Multiple = ConstantExpr::getMul(MulC, Op1C); |
| 1266 | return true; |
| 1267 | } |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1268 | |
| 1269 | if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) |
| 1270 | if (Mul0CI->getValue() == 1) { |
| 1271 | // V == Base * Op1, so return Op1 |
| 1272 | Multiple = Op1; |
| 1273 | return true; |
| 1274 | } |
| 1275 | } |
| 1276 | |
Chris Lattner | e971131 | 2010-09-05 17:20:46 +0000 | [diff] [blame] | 1277 | Value *Mul1 = NULL; |
| 1278 | if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { |
| 1279 | if (Constant *Op0C = dyn_cast<Constant>(Op0)) |
| 1280 | if (Constant *MulC = dyn_cast<Constant>(Mul1)) { |
| 1281 | if (Op0C->getType()->getPrimitiveSizeInBits() < |
| 1282 | MulC->getType()->getPrimitiveSizeInBits()) |
| 1283 | Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); |
| 1284 | if (Op0C->getType()->getPrimitiveSizeInBits() > |
| 1285 | MulC->getType()->getPrimitiveSizeInBits()) |
| 1286 | MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); |
| 1287 | |
| 1288 | // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) |
| 1289 | Multiple = ConstantExpr::getMul(MulC, Op0C); |
| 1290 | return true; |
| 1291 | } |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1292 | |
| 1293 | if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) |
| 1294 | if (Mul1CI->getValue() == 1) { |
| 1295 | // V == Base * Op0, so return Op0 |
| 1296 | Multiple = Op0; |
| 1297 | return true; |
| 1298 | } |
| 1299 | } |
Victor Hernandez | 2b6705f | 2009-11-10 08:28:35 +0000 | [diff] [blame] | 1300 | } |
| 1301 | } |
| 1302 | |
| 1303 | // We could not determine if V is a multiple of Base. |
| 1304 | return false; |
| 1305 | } |
| 1306 | |
Chris Lattner | 833f25d | 2008-06-02 01:29:46 +0000 | [diff] [blame] | 1307 | /// CannotBeNegativeZero - Return true if we can prove that the specified FP |
| 1308 | /// value is never equal to -0.0. |
| 1309 | /// |
| 1310 | /// NOTE: this function will need to be revisited when we support non-default |
| 1311 | /// rounding modes! |
| 1312 | /// |
| 1313 | bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { |
| 1314 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) |
| 1315 | return !CFP->getValueAPF().isNegZero(); |
| 1316 | |
| 1317 | if (Depth == 6) |
| 1318 | return 1; // Limit search depth. |
| 1319 | |
Dan Gohman | ca17890 | 2009-07-17 20:47:02 +0000 | [diff] [blame] | 1320 | const Operator *I = dyn_cast<Operator>(V); |
Chris Lattner | 833f25d | 2008-06-02 01:29:46 +0000 | [diff] [blame] | 1321 | if (I == 0) return false; |
| 1322 | |
| 1323 | // (add x, 0.0) is guaranteed to return +0.0, not -0.0. |
Dan Gohman | ae3a0be | 2009-06-04 22:49:04 +0000 | [diff] [blame] | 1324 | if (I->getOpcode() == Instruction::FAdd && |
Chris Lattner | 833f25d | 2008-06-02 01:29:46 +0000 | [diff] [blame] | 1325 | isa<ConstantFP>(I->getOperand(1)) && |
| 1326 | cast<ConstantFP>(I->getOperand(1))->isNullValue()) |
| 1327 | return true; |
| 1328 | |
| 1329 | // sitofp and uitofp turn into +0.0 for zero. |
| 1330 | if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) |
| 1331 | return true; |
| 1332 | |
| 1333 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) |
| 1334 | // sqrt(-0.0) = -0.0, no other negative results are possible. |
| 1335 | if (II->getIntrinsicID() == Intrinsic::sqrt) |
Gabor Greif | 71339c9 | 2010-06-23 23:38:07 +0000 | [diff] [blame] | 1336 | return CannotBeNegativeZero(II->getArgOperand(0), Depth+1); |
Chris Lattner | 833f25d | 2008-06-02 01:29:46 +0000 | [diff] [blame] | 1337 | |
| 1338 | if (const CallInst *CI = dyn_cast<CallInst>(I)) |
| 1339 | if (const Function *F = CI->getCalledFunction()) { |
| 1340 | if (F->isDeclaration()) { |
Daniel Dunbar | f0443c1 | 2009-07-26 08:34:35 +0000 | [diff] [blame] | 1341 | // abs(x) != -0.0 |
| 1342 | if (F->getName() == "abs") return true; |
Dale Johannesen | 9d06175 | 2009-09-25 20:54:50 +0000 | [diff] [blame] | 1343 | // fabs[lf](x) != -0.0 |
| 1344 | if (F->getName() == "fabs") return true; |
| 1345 | if (F->getName() == "fabsf") return true; |
| 1346 | if (F->getName() == "fabsl") return true; |
| 1347 | if (F->getName() == "sqrt" || F->getName() == "sqrtf" || |
| 1348 | F->getName() == "sqrtl") |
Gabor Greif | 71339c9 | 2010-06-23 23:38:07 +0000 | [diff] [blame] | 1349 | return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1); |
Chris Lattner | 833f25d | 2008-06-02 01:29:46 +0000 | [diff] [blame] | 1350 | } |
| 1351 | } |
| 1352 | |
| 1353 | return false; |
| 1354 | } |
| 1355 | |
Chris Lattner | bb89710 | 2010-12-26 20:15:01 +0000 | [diff] [blame] | 1356 | /// isBytewiseValue - If the specified value can be set by repeating the same |
| 1357 | /// byte in memory, return the i8 value that it is represented with. This is |
| 1358 | /// true for all i8 values obviously, but is also true for i32 0, i32 -1, |
| 1359 | /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated |
| 1360 | /// byte store (e.g. i16 0x1234), return null. |
| 1361 | Value *llvm::isBytewiseValue(Value *V) { |
| 1362 | // All byte-wide stores are splatable, even of arbitrary variables. |
| 1363 | if (V->getType()->isIntegerTy(8)) return V; |
Chris Lattner | 41bfbb0 | 2011-02-19 19:35:49 +0000 | [diff] [blame] | 1364 | |
| 1365 | // Handle 'null' ConstantArrayZero etc. |
| 1366 | if (Constant *C = dyn_cast<Constant>(V)) |
| 1367 | if (C->isNullValue()) |
| 1368 | return Constant::getNullValue(Type::getInt8Ty(V->getContext())); |
Chris Lattner | bb89710 | 2010-12-26 20:15:01 +0000 | [diff] [blame] | 1369 | |
| 1370 | // Constant float and double values can be handled as integer values if the |
| 1371 | // corresponding integer value is "byteable". An important case is 0.0. |
| 1372 | if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { |
| 1373 | if (CFP->getType()->isFloatTy()) |
| 1374 | V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); |
| 1375 | if (CFP->getType()->isDoubleTy()) |
| 1376 | V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); |
| 1377 | // Don't handle long double formats, which have strange constraints. |
| 1378 | } |
| 1379 | |
| 1380 | // We can handle constant integers that are power of two in size and a |
| 1381 | // multiple of 8 bits. |
| 1382 | if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { |
| 1383 | unsigned Width = CI->getBitWidth(); |
| 1384 | if (isPowerOf2_32(Width) && Width > 8) { |
| 1385 | // We can handle this value if the recursive binary decomposition is the |
| 1386 | // same at all levels. |
| 1387 | APInt Val = CI->getValue(); |
| 1388 | APInt Val2; |
| 1389 | while (Val.getBitWidth() != 8) { |
| 1390 | unsigned NextWidth = Val.getBitWidth()/2; |
| 1391 | Val2 = Val.lshr(NextWidth); |
| 1392 | Val2 = Val2.trunc(Val.getBitWidth()/2); |
| 1393 | Val = Val.trunc(Val.getBitWidth()/2); |
| 1394 | |
| 1395 | // If the top/bottom halves aren't the same, reject it. |
| 1396 | if (Val != Val2) |
| 1397 | return 0; |
| 1398 | } |
| 1399 | return ConstantInt::get(V->getContext(), Val); |
| 1400 | } |
| 1401 | } |
| 1402 | |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1403 | // A ConstantDataArray/Vector is splatable if all its members are equal and |
| 1404 | // also splatable. |
| 1405 | if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { |
| 1406 | Value *Elt = CA->getElementAsConstant(0); |
| 1407 | Value *Val = isBytewiseValue(Elt); |
Chris Lattner | bb89710 | 2010-12-26 20:15:01 +0000 | [diff] [blame] | 1408 | if (!Val) |
| 1409 | return 0; |
| 1410 | |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1411 | for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) |
| 1412 | if (CA->getElementAsConstant(I) != Elt) |
Chris Lattner | bb89710 | 2010-12-26 20:15:01 +0000 | [diff] [blame] | 1413 | return 0; |
| 1414 | |
| 1415 | return Val; |
| 1416 | } |
Chad Rosier | dce42b7 | 2011-12-06 00:19:08 +0000 | [diff] [blame] | 1417 | |
Chris Lattner | bb89710 | 2010-12-26 20:15:01 +0000 | [diff] [blame] | 1418 | // Conceptually, we could handle things like: |
| 1419 | // %a = zext i8 %X to i16 |
| 1420 | // %b = shl i16 %a, 8 |
| 1421 | // %c = or i16 %a, %b |
| 1422 | // but until there is an example that actually needs this, it doesn't seem |
| 1423 | // worth worrying about. |
| 1424 | return 0; |
| 1425 | } |
| 1426 | |
| 1427 | |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1428 | // This is the recursive version of BuildSubAggregate. It takes a few different |
| 1429 | // arguments. Idxs is the index within the nested struct From that we are |
| 1430 | // looking at now (which is of type IndexedType). IdxSkip is the number of |
| 1431 | // indices from Idxs that should be left out when inserting into the resulting |
| 1432 | // struct. To is the result struct built so far, new insertvalue instructions |
| 1433 | // build on that. |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 1434 | static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, |
Dan Gohman | 7db949d | 2009-08-07 01:32:21 +0000 | [diff] [blame] | 1435 | SmallVector<unsigned, 10> &Idxs, |
| 1436 | unsigned IdxSkip, |
Dan Gohman | 7db949d | 2009-08-07 01:32:21 +0000 | [diff] [blame] | 1437 | Instruction *InsertBefore) { |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 1438 | llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1439 | if (STy) { |
Matthijs Kooijman | 0a9aaf4 | 2008-06-16 14:13:46 +0000 | [diff] [blame] | 1440 | // Save the original To argument so we can modify it |
| 1441 | Value *OrigTo = To; |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1442 | // General case, the type indexed by Idxs is a struct |
| 1443 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
| 1444 | // Process each struct element recursively |
| 1445 | Idxs.push_back(i); |
Matthijs Kooijman | 0a9aaf4 | 2008-06-16 14:13:46 +0000 | [diff] [blame] | 1446 | Value *PrevTo = To; |
Matthijs Kooijman | 710eb23 | 2008-06-16 12:57:37 +0000 | [diff] [blame] | 1447 | To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, |
Nick Lewycky | ae3d802 | 2009-11-23 03:29:18 +0000 | [diff] [blame] | 1448 | InsertBefore); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1449 | Idxs.pop_back(); |
Matthijs Kooijman | 0a9aaf4 | 2008-06-16 14:13:46 +0000 | [diff] [blame] | 1450 | if (!To) { |
| 1451 | // Couldn't find any inserted value for this index? Cleanup |
| 1452 | while (PrevTo != OrigTo) { |
| 1453 | InsertValueInst* Del = cast<InsertValueInst>(PrevTo); |
| 1454 | PrevTo = Del->getAggregateOperand(); |
| 1455 | Del->eraseFromParent(); |
| 1456 | } |
| 1457 | // Stop processing elements |
| 1458 | break; |
| 1459 | } |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1460 | } |
Chris Lattner | 7a2bdde | 2011-04-15 05:18:47 +0000 | [diff] [blame] | 1461 | // If we successfully found a value for each of our subaggregates |
Matthijs Kooijman | 0a9aaf4 | 2008-06-16 14:13:46 +0000 | [diff] [blame] | 1462 | if (To) |
| 1463 | return To; |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1464 | } |
Matthijs Kooijman | 0a9aaf4 | 2008-06-16 14:13:46 +0000 | [diff] [blame] | 1465 | // Base case, the type indexed by SourceIdxs is not a struct, or not all of |
| 1466 | // the struct's elements had a value that was inserted directly. In the latter |
| 1467 | // case, perhaps we can't determine each of the subelements individually, but |
| 1468 | // we might be able to find the complete struct somewhere. |
| 1469 | |
| 1470 | // Find the value that is at that particular spot |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1471 | Value *V = FindInsertedValue(From, Idxs); |
Matthijs Kooijman | 0a9aaf4 | 2008-06-16 14:13:46 +0000 | [diff] [blame] | 1472 | |
| 1473 | if (!V) |
| 1474 | return NULL; |
| 1475 | |
| 1476 | // Insert the value in the new (sub) aggregrate |
Frits van Bommel | 39b5abf | 2011-07-18 12:00:32 +0000 | [diff] [blame] | 1477 | return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1478 | "tmp", InsertBefore); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1479 | } |
| 1480 | |
| 1481 | // This helper takes a nested struct and extracts a part of it (which is again a |
| 1482 | // struct) into a new value. For example, given the struct: |
| 1483 | // { a, { b, { c, d }, e } } |
| 1484 | // and the indices "1, 1" this returns |
| 1485 | // { c, d }. |
| 1486 | // |
Matthijs Kooijman | 0a9aaf4 | 2008-06-16 14:13:46 +0000 | [diff] [blame] | 1487 | // It does this by inserting an insertvalue for each element in the resulting |
| 1488 | // struct, as opposed to just inserting a single struct. This will only work if |
| 1489 | // each of the elements of the substruct are known (ie, inserted into From by an |
| 1490 | // insertvalue instruction somewhere). |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1491 | // |
Matthijs Kooijman | 0a9aaf4 | 2008-06-16 14:13:46 +0000 | [diff] [blame] | 1492 | // All inserted insertvalue instructions are inserted before InsertBefore |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1493 | static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, |
Dan Gohman | 7db949d | 2009-08-07 01:32:21 +0000 | [diff] [blame] | 1494 | Instruction *InsertBefore) { |
Matthijs Kooijman | 9772891 | 2008-06-16 13:28:31 +0000 | [diff] [blame] | 1495 | assert(InsertBefore && "Must have someplace to insert!"); |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 1496 | Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1497 | idx_range); |
Owen Anderson | 9e9a0d5 | 2009-07-30 23:03:37 +0000 | [diff] [blame] | 1498 | Value *To = UndefValue::get(IndexedType); |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1499 | SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1500 | unsigned IdxSkip = Idxs.size(); |
| 1501 | |
Nick Lewycky | ae3d802 | 2009-11-23 03:29:18 +0000 | [diff] [blame] | 1502 | return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1503 | } |
| 1504 | |
Matthijs Kooijman | 710eb23 | 2008-06-16 12:57:37 +0000 | [diff] [blame] | 1505 | /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if |
| 1506 | /// the scalar value indexed is already around as a register, for example if it |
| 1507 | /// were inserted directly into the aggregrate. |
Matthijs Kooijman | 0a9aaf4 | 2008-06-16 14:13:46 +0000 | [diff] [blame] | 1508 | /// |
| 1509 | /// If InsertBefore is not null, this function will duplicate (modified) |
| 1510 | /// insertvalues when a part of a nested struct is extracted. |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1511 | Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, |
| 1512 | Instruction *InsertBefore) { |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1513 | // Nothing to index? Just return V then (this is useful at the end of our |
Chris Lattner | df39028 | 2012-01-24 07:54:10 +0000 | [diff] [blame] | 1514 | // recursion). |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1515 | if (idx_range.empty()) |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1516 | return V; |
Chris Lattner | df39028 | 2012-01-24 07:54:10 +0000 | [diff] [blame] | 1517 | // We have indices, so V should have an indexable type. |
| 1518 | assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && |
| 1519 | "Not looking at a struct or array?"); |
| 1520 | assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && |
| 1521 | "Invalid indices for type?"); |
Owen Anderson | 76f600b | 2009-07-06 22:37:39 +0000 | [diff] [blame] | 1522 | |
Chris Lattner | a1f00f4 | 2012-01-25 06:48:06 +0000 | [diff] [blame] | 1523 | if (Constant *C = dyn_cast<Constant>(V)) { |
| 1524 | C = C->getAggregateElement(idx_range[0]); |
| 1525 | if (C == 0) return 0; |
| 1526 | return FindInsertedValue(C, idx_range.slice(1), InsertBefore); |
| 1527 | } |
Chris Lattner | df39028 | 2012-01-24 07:54:10 +0000 | [diff] [blame] | 1528 | |
| 1529 | if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1530 | // Loop the indices for the insertvalue instruction in parallel with the |
| 1531 | // requested indices |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1532 | const unsigned *req_idx = idx_range.begin(); |
Matthijs Kooijman | 710eb23 | 2008-06-16 12:57:37 +0000 | [diff] [blame] | 1533 | for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); |
| 1534 | i != e; ++i, ++req_idx) { |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1535 | if (req_idx == idx_range.end()) { |
Chris Lattner | df39028 | 2012-01-24 07:54:10 +0000 | [diff] [blame] | 1536 | // We can't handle this without inserting insertvalues |
| 1537 | if (!InsertBefore) |
Matthijs Kooijman | 9772891 | 2008-06-16 13:28:31 +0000 | [diff] [blame] | 1538 | return 0; |
Chris Lattner | df39028 | 2012-01-24 07:54:10 +0000 | [diff] [blame] | 1539 | |
| 1540 | // The requested index identifies a part of a nested aggregate. Handle |
| 1541 | // this specially. For example, |
| 1542 | // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 |
| 1543 | // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 |
| 1544 | // %C = extractvalue {i32, { i32, i32 } } %B, 1 |
| 1545 | // This can be changed into |
| 1546 | // %A = insertvalue {i32, i32 } undef, i32 10, 0 |
| 1547 | // %C = insertvalue {i32, i32 } %A, i32 11, 1 |
| 1548 | // which allows the unused 0,0 element from the nested struct to be |
| 1549 | // removed. |
| 1550 | return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), |
| 1551 | InsertBefore); |
Duncan Sands | 9954c76 | 2008-06-19 08:47:31 +0000 | [diff] [blame] | 1552 | } |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1553 | |
| 1554 | // This insert value inserts something else than what we are looking for. |
| 1555 | // See if the (aggregrate) value inserted into has the value we are |
| 1556 | // looking for, then. |
| 1557 | if (*req_idx != *i) |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1558 | return FindInsertedValue(I->getAggregateOperand(), idx_range, |
Nick Lewycky | ae3d802 | 2009-11-23 03:29:18 +0000 | [diff] [blame] | 1559 | InsertBefore); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1560 | } |
| 1561 | // If we end up here, the indices of the insertvalue match with those |
| 1562 | // requested (though possibly only partially). Now we recursively look at |
| 1563 | // the inserted value, passing any remaining indices. |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1564 | return FindInsertedValue(I->getInsertedValueOperand(), |
Frits van Bommel | 39b5abf | 2011-07-18 12:00:32 +0000 | [diff] [blame] | 1565 | makeArrayRef(req_idx, idx_range.end()), |
Nick Lewycky | ae3d802 | 2009-11-23 03:29:18 +0000 | [diff] [blame] | 1566 | InsertBefore); |
Chris Lattner | df39028 | 2012-01-24 07:54:10 +0000 | [diff] [blame] | 1567 | } |
| 1568 | |
| 1569 | if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1570 | // If we're extracting a value from an aggregrate that was extracted from |
| 1571 | // something else, we can extract from that something else directly instead. |
| 1572 | // However, we will need to chain I's indices with the requested indices. |
| 1573 | |
| 1574 | // Calculate the number of indices required |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1575 | unsigned size = I->getNumIndices() + idx_range.size(); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1576 | // Allocate some space to put the new indices in |
Matthijs Kooijman | 3faf9df | 2008-06-17 08:24:37 +0000 | [diff] [blame] | 1577 | SmallVector<unsigned, 5> Idxs; |
| 1578 | Idxs.reserve(size); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1579 | // Add indices from the extract value instruction |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1580 | Idxs.append(I->idx_begin(), I->idx_end()); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1581 | |
| 1582 | // Add requested indices |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1583 | Idxs.append(idx_range.begin(), idx_range.end()); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1584 | |
Matthijs Kooijman | 3faf9df | 2008-06-17 08:24:37 +0000 | [diff] [blame] | 1585 | assert(Idxs.size() == size |
Matthijs Kooijman | 710eb23 | 2008-06-16 12:57:37 +0000 | [diff] [blame] | 1586 | && "Number of indices added not correct?"); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1587 | |
Jay Foad | fc6d3a4 | 2011-07-13 10:26:04 +0000 | [diff] [blame] | 1588 | return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); |
Matthijs Kooijman | b23d5ad | 2008-06-16 12:48:21 +0000 | [diff] [blame] | 1589 | } |
| 1590 | // Otherwise, we don't know (such as, extracting from a function return value |
| 1591 | // or load instruction) |
| 1592 | return 0; |
| 1593 | } |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1594 | |
Chris Lattner | ed58a6f | 2010-11-30 22:25:26 +0000 | [diff] [blame] | 1595 | /// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if |
| 1596 | /// it can be expressed as a base pointer plus a constant offset. Return the |
| 1597 | /// base and offset to the caller. |
| 1598 | Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, |
| 1599 | const TargetData &TD) { |
| 1600 | Operator *PtrOp = dyn_cast<Operator>(Ptr); |
Nadav Rotem | 1608769 | 2011-12-05 06:29:09 +0000 | [diff] [blame] | 1601 | if (PtrOp == 0 || Ptr->getType()->isVectorTy()) |
| 1602 | return Ptr; |
Chris Lattner | ed58a6f | 2010-11-30 22:25:26 +0000 | [diff] [blame] | 1603 | |
| 1604 | // Just look through bitcasts. |
| 1605 | if (PtrOp->getOpcode() == Instruction::BitCast) |
| 1606 | return GetPointerBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); |
| 1607 | |
| 1608 | // If this is a GEP with constant indices, we can look through it. |
| 1609 | GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); |
| 1610 | if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; |
| 1611 | |
| 1612 | gep_type_iterator GTI = gep_type_begin(GEP); |
| 1613 | for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; |
| 1614 | ++I, ++GTI) { |
| 1615 | ConstantInt *OpC = cast<ConstantInt>(*I); |
| 1616 | if (OpC->isZero()) continue; |
| 1617 | |
| 1618 | // Handle a struct and array indices which add their offset to the pointer. |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 1619 | if (StructType *STy = dyn_cast<StructType>(*GTI)) { |
Chris Lattner | ed58a6f | 2010-11-30 22:25:26 +0000 | [diff] [blame] | 1620 | Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); |
| 1621 | } else { |
| 1622 | uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); |
| 1623 | Offset += OpC->getSExtValue()*Size; |
| 1624 | } |
| 1625 | } |
| 1626 | |
| 1627 | // Re-sign extend from the pointer size if needed to get overflow edge cases |
| 1628 | // right. |
| 1629 | unsigned PtrSize = TD.getPointerSizeInBits(); |
| 1630 | if (PtrSize < 64) |
| 1631 | Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); |
| 1632 | |
| 1633 | return GetPointerBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); |
| 1634 | } |
| 1635 | |
| 1636 | |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1637 | /// getConstantStringInfo - This function computes the length of a |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1638 | /// null-terminated C string pointed to by V. If successful, it returns true |
| 1639 | /// and returns the string in Str. If unsuccessful, it returns false. |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1640 | bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, |
| 1641 | uint64_t Offset, bool TrimAtNul) { |
| 1642 | assert(V); |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1643 | |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1644 | // Look through bitcast instructions and geps. |
| 1645 | V = V->stripPointerCasts(); |
Bill Wendling | 0582ae9 | 2009-03-13 04:39:26 +0000 | [diff] [blame] | 1646 | |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1647 | // If the value is a GEP instructionor constant expression, treat it as an |
| 1648 | // offset. |
| 1649 | if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1650 | // Make sure the GEP has exactly three arguments. |
Bill Wendling | 0582ae9 | 2009-03-13 04:39:26 +0000 | [diff] [blame] | 1651 | if (GEP->getNumOperands() != 3) |
| 1652 | return false; |
| 1653 | |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1654 | // Make sure the index-ee is a pointer to array of i8. |
Chris Lattner | db125cf | 2011-07-18 04:54:35 +0000 | [diff] [blame] | 1655 | PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType()); |
| 1656 | ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType()); |
Duncan Sands | b0bc6c3 | 2010-02-15 16:12:20 +0000 | [diff] [blame] | 1657 | if (AT == 0 || !AT->getElementType()->isIntegerTy(8)) |
Bill Wendling | 0582ae9 | 2009-03-13 04:39:26 +0000 | [diff] [blame] | 1658 | return false; |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1659 | |
| 1660 | // Check to make sure that the first operand of the GEP is an integer and |
| 1661 | // has value 0 so that we are sure we're indexing into the initializer. |
Dan Gohman | 0a60fa3 | 2010-04-14 22:20:45 +0000 | [diff] [blame] | 1662 | const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); |
Bill Wendling | 0582ae9 | 2009-03-13 04:39:26 +0000 | [diff] [blame] | 1663 | if (FirstIdx == 0 || !FirstIdx->isZero()) |
| 1664 | return false; |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1665 | |
| 1666 | // If the second index isn't a ConstantInt, then this is a variable index |
| 1667 | // into the array. If this occurs, we can't say anything meaningful about |
| 1668 | // the string. |
| 1669 | uint64_t StartIdx = 0; |
Dan Gohman | 0a60fa3 | 2010-04-14 22:20:45 +0000 | [diff] [blame] | 1670 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1671 | StartIdx = CI->getZExtValue(); |
Bill Wendling | 0582ae9 | 2009-03-13 04:39:26 +0000 | [diff] [blame] | 1672 | else |
| 1673 | return false; |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1674 | return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset); |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1675 | } |
Nick Lewycky | 0cd0fee | 2011-10-20 00:34:35 +0000 | [diff] [blame] | 1676 | |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1677 | // The GEP instruction, constant or instruction, must reference a global |
| 1678 | // variable that is a constant and is initialized. The referenced constant |
| 1679 | // initializer is the array that we'll use for optimization. |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1680 | const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); |
Dan Gohman | 8255573 | 2009-08-19 18:20:44 +0000 | [diff] [blame] | 1681 | if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) |
Bill Wendling | 0582ae9 | 2009-03-13 04:39:26 +0000 | [diff] [blame] | 1682 | return false; |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1683 | |
Nick Lewycky | 0cd0fee | 2011-10-20 00:34:35 +0000 | [diff] [blame] | 1684 | // Handle the all-zeros case |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1685 | if (GV->getInitializer()->isNullValue()) { |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1686 | // This is a degenerate case. The initializer is constant zero so the |
| 1687 | // length of the string must be zero. |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1688 | Str = ""; |
Bill Wendling | 0582ae9 | 2009-03-13 04:39:26 +0000 | [diff] [blame] | 1689 | return true; |
| 1690 | } |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1691 | |
| 1692 | // Must be a Constant Array |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1693 | const ConstantDataArray *Array = |
| 1694 | dyn_cast<ConstantDataArray>(GV->getInitializer()); |
| 1695 | if (Array == 0 || !Array->isString()) |
Bill Wendling | 0582ae9 | 2009-03-13 04:39:26 +0000 | [diff] [blame] | 1696 | return false; |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1697 | |
| 1698 | // Get the number of elements in the array |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1699 | uint64_t NumElts = Array->getType()->getArrayNumElements(); |
| 1700 | |
| 1701 | // Start out with the entire array in the StringRef. |
| 1702 | Str = Array->getAsString(); |
| 1703 | |
Bill Wendling | 0582ae9 | 2009-03-13 04:39:26 +0000 | [diff] [blame] | 1704 | if (Offset > NumElts) |
| 1705 | return false; |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1706 | |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1707 | // Skip over 'offset' bytes. |
| 1708 | Str = Str.substr(Offset); |
Argyrios Kyrtzidis | 91766fe | 2012-02-01 04:51:17 +0000 | [diff] [blame] | 1709 | |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1710 | if (TrimAtNul) { |
| 1711 | // Trim off the \0 and anything after it. If the array is not nul |
| 1712 | // terminated, we just return the whole end of string. The client may know |
| 1713 | // some other way that the string is length-bound. |
| 1714 | Str = Str.substr(0, Str.find('\0')); |
| 1715 | } |
Bill Wendling | 0582ae9 | 2009-03-13 04:39:26 +0000 | [diff] [blame] | 1716 | return true; |
Evan Cheng | 0ff39b3 | 2008-06-30 07:31:25 +0000 | [diff] [blame] | 1717 | } |
Eric Christopher | 25ec483 | 2010-03-05 06:58:57 +0000 | [diff] [blame] | 1718 | |
| 1719 | // These next two are very similar to the above, but also look through PHI |
| 1720 | // nodes. |
| 1721 | // TODO: See if we can integrate these two together. |
| 1722 | |
| 1723 | /// GetStringLengthH - If we can compute the length of the string pointed to by |
| 1724 | /// the specified pointer, return 'len+1'. If we can't, return 0. |
| 1725 | static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) { |
| 1726 | // Look through noop bitcast instructions. |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1727 | V = V->stripPointerCasts(); |
Eric Christopher | 25ec483 | 2010-03-05 06:58:57 +0000 | [diff] [blame] | 1728 | |
| 1729 | // If this is a PHI node, there are two cases: either we have already seen it |
| 1730 | // or we haven't. |
| 1731 | if (PHINode *PN = dyn_cast<PHINode>(V)) { |
| 1732 | if (!PHIs.insert(PN)) |
| 1733 | return ~0ULL; // already in the set. |
| 1734 | |
| 1735 | // If it was new, see if all the input strings are the same length. |
| 1736 | uint64_t LenSoFar = ~0ULL; |
| 1737 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { |
| 1738 | uint64_t Len = GetStringLengthH(PN->getIncomingValue(i), PHIs); |
| 1739 | if (Len == 0) return 0; // Unknown length -> unknown. |
| 1740 | |
| 1741 | if (Len == ~0ULL) continue; |
| 1742 | |
| 1743 | if (Len != LenSoFar && LenSoFar != ~0ULL) |
| 1744 | return 0; // Disagree -> unknown. |
| 1745 | LenSoFar = Len; |
| 1746 | } |
| 1747 | |
| 1748 | // Success, all agree. |
| 1749 | return LenSoFar; |
| 1750 | } |
| 1751 | |
| 1752 | // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) |
| 1753 | if (SelectInst *SI = dyn_cast<SelectInst>(V)) { |
| 1754 | uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs); |
| 1755 | if (Len1 == 0) return 0; |
| 1756 | uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs); |
| 1757 | if (Len2 == 0) return 0; |
| 1758 | if (Len1 == ~0ULL) return Len2; |
| 1759 | if (Len2 == ~0ULL) return Len1; |
| 1760 | if (Len1 != Len2) return 0; |
| 1761 | return Len1; |
| 1762 | } |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1763 | |
| 1764 | // Otherwise, see if we can read the string. |
| 1765 | StringRef StrData; |
| 1766 | if (!getConstantStringInfo(V, StrData)) |
Eric Christopher | 25ec483 | 2010-03-05 06:58:57 +0000 | [diff] [blame] | 1767 | return 0; |
| 1768 | |
Chris Lattner | 18c7f80 | 2012-02-05 02:29:43 +0000 | [diff] [blame] | 1769 | return StrData.size()+1; |
Eric Christopher | 25ec483 | 2010-03-05 06:58:57 +0000 | [diff] [blame] | 1770 | } |
| 1771 | |
| 1772 | /// GetStringLength - If we can compute the length of the string pointed to by |
| 1773 | /// the specified pointer, return 'len+1'. If we can't, return 0. |
| 1774 | uint64_t llvm::GetStringLength(Value *V) { |
| 1775 | if (!V->getType()->isPointerTy()) return 0; |
| 1776 | |
| 1777 | SmallPtrSet<PHINode*, 32> PHIs; |
| 1778 | uint64_t Len = GetStringLengthH(V, PHIs); |
| 1779 | // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return |
| 1780 | // an empty string as a length. |
| 1781 | return Len == ~0ULL ? 1 : Len; |
| 1782 | } |
Dan Gohman | 5034dd3 | 2010-12-15 20:02:24 +0000 | [diff] [blame] | 1783 | |
Dan Gohman | bd1801b | 2011-01-24 18:53:32 +0000 | [diff] [blame] | 1784 | Value * |
| 1785 | llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) { |
Dan Gohman | 5034dd3 | 2010-12-15 20:02:24 +0000 | [diff] [blame] | 1786 | if (!V->getType()->isPointerTy()) |
| 1787 | return V; |
| 1788 | for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { |
| 1789 | if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { |
| 1790 | V = GEP->getPointerOperand(); |
| 1791 | } else if (Operator::getOpcode(V) == Instruction::BitCast) { |
| 1792 | V = cast<Operator>(V)->getOperand(0); |
| 1793 | } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { |
| 1794 | if (GA->mayBeOverridden()) |
| 1795 | return V; |
| 1796 | V = GA->getAliasee(); |
| 1797 | } else { |
Dan Gohman | c01895c | 2010-12-15 20:49:55 +0000 | [diff] [blame] | 1798 | // See if InstructionSimplify knows any relevant tricks. |
| 1799 | if (Instruction *I = dyn_cast<Instruction>(V)) |
Chris Lattner | 7a2bdde | 2011-04-15 05:18:47 +0000 | [diff] [blame] | 1800 | // TODO: Acquire a DominatorTree and use it. |
Dan Gohman | bd1801b | 2011-01-24 18:53:32 +0000 | [diff] [blame] | 1801 | if (Value *Simplified = SimplifyInstruction(I, TD, 0)) { |
Dan Gohman | c01895c | 2010-12-15 20:49:55 +0000 | [diff] [blame] | 1802 | V = Simplified; |
| 1803 | continue; |
| 1804 | } |
| 1805 | |
Dan Gohman | 5034dd3 | 2010-12-15 20:02:24 +0000 | [diff] [blame] | 1806 | return V; |
| 1807 | } |
| 1808 | assert(V->getType()->isPointerTy() && "Unexpected operand type!"); |
| 1809 | } |
| 1810 | return V; |
| 1811 | } |
Nick Lewycky | 99e0b2a | 2011-06-27 04:20:45 +0000 | [diff] [blame] | 1812 | |
| 1813 | /// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer |
| 1814 | /// are lifetime markers. |
| 1815 | /// |
| 1816 | bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { |
| 1817 | for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); |
| 1818 | UI != UE; ++UI) { |
| 1819 | const IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI); |
| 1820 | if (!II) return false; |
| 1821 | |
| 1822 | if (II->getIntrinsicID() != Intrinsic::lifetime_start && |
| 1823 | II->getIntrinsicID() != Intrinsic::lifetime_end) |
| 1824 | return false; |
| 1825 | } |
| 1826 | return true; |
| 1827 | } |
Dan Gohman | f042660 | 2011-12-14 23:49:11 +0000 | [diff] [blame] | 1828 | |
Dan Gohman | febaf84 | 2012-01-04 23:01:09 +0000 | [diff] [blame] | 1829 | bool llvm::isSafeToSpeculativelyExecute(const Value *V, |
Dan Gohman | f042660 | 2011-12-14 23:49:11 +0000 | [diff] [blame] | 1830 | const TargetData *TD) { |
Dan Gohman | febaf84 | 2012-01-04 23:01:09 +0000 | [diff] [blame] | 1831 | const Operator *Inst = dyn_cast<Operator>(V); |
| 1832 | if (!Inst) |
| 1833 | return false; |
| 1834 | |
Dan Gohman | f042660 | 2011-12-14 23:49:11 +0000 | [diff] [blame] | 1835 | for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) |
| 1836 | if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) |
| 1837 | if (C->canTrap()) |
| 1838 | return false; |
| 1839 | |
| 1840 | switch (Inst->getOpcode()) { |
| 1841 | default: |
| 1842 | return true; |
| 1843 | case Instruction::UDiv: |
| 1844 | case Instruction::URem: |
| 1845 | // x / y is undefined if y == 0, but calcuations like x / 3 are safe. |
| 1846 | return isKnownNonZero(Inst->getOperand(1), TD); |
| 1847 | case Instruction::SDiv: |
| 1848 | case Instruction::SRem: { |
| 1849 | Value *Op = Inst->getOperand(1); |
| 1850 | // x / y is undefined if y == 0 |
| 1851 | if (!isKnownNonZero(Op, TD)) |
| 1852 | return false; |
| 1853 | // x / y might be undefined if y == -1 |
| 1854 | unsigned BitWidth = getBitWidth(Op->getType(), TD); |
| 1855 | if (BitWidth == 0) |
| 1856 | return false; |
| 1857 | APInt KnownZero(BitWidth, 0); |
| 1858 | APInt KnownOne(BitWidth, 0); |
| 1859 | ComputeMaskedBits(Op, APInt::getAllOnesValue(BitWidth), |
| 1860 | KnownZero, KnownOne, TD); |
| 1861 | return !!KnownZero; |
| 1862 | } |
| 1863 | case Instruction::Load: { |
| 1864 | const LoadInst *LI = cast<LoadInst>(Inst); |
| 1865 | if (!LI->isUnordered()) |
| 1866 | return false; |
| 1867 | return LI->getPointerOperand()->isDereferenceablePointer(); |
| 1868 | } |
Nick Lewycky | 8369687 | 2011-12-21 05:52:02 +0000 | [diff] [blame] | 1869 | case Instruction::Call: { |
| 1870 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { |
| 1871 | switch (II->getIntrinsicID()) { |
| 1872 | case Intrinsic::bswap: |
| 1873 | case Intrinsic::ctlz: |
| 1874 | case Intrinsic::ctpop: |
| 1875 | case Intrinsic::cttz: |
| 1876 | case Intrinsic::objectsize: |
| 1877 | case Intrinsic::sadd_with_overflow: |
| 1878 | case Intrinsic::smul_with_overflow: |
| 1879 | case Intrinsic::ssub_with_overflow: |
| 1880 | case Intrinsic::uadd_with_overflow: |
| 1881 | case Intrinsic::umul_with_overflow: |
| 1882 | case Intrinsic::usub_with_overflow: |
| 1883 | return true; |
| 1884 | // TODO: some fp intrinsics are marked as having the same error handling |
| 1885 | // as libm. They're safe to speculate when they won't error. |
| 1886 | // TODO: are convert_{from,to}_fp16 safe? |
| 1887 | // TODO: can we list target-specific intrinsics here? |
| 1888 | default: break; |
| 1889 | } |
| 1890 | } |
Dan Gohman | f042660 | 2011-12-14 23:49:11 +0000 | [diff] [blame] | 1891 | return false; // The called function could have undefined behavior or |
Nick Lewycky | 8369687 | 2011-12-21 05:52:02 +0000 | [diff] [blame] | 1892 | // side-effects, even if marked readnone nounwind. |
| 1893 | } |
Dan Gohman | f042660 | 2011-12-14 23:49:11 +0000 | [diff] [blame] | 1894 | case Instruction::VAArg: |
| 1895 | case Instruction::Alloca: |
| 1896 | case Instruction::Invoke: |
| 1897 | case Instruction::PHI: |
| 1898 | case Instruction::Store: |
| 1899 | case Instruction::Ret: |
| 1900 | case Instruction::Br: |
| 1901 | case Instruction::IndirectBr: |
| 1902 | case Instruction::Switch: |
Dan Gohman | f042660 | 2011-12-14 23:49:11 +0000 | [diff] [blame] | 1903 | case Instruction::Unreachable: |
| 1904 | case Instruction::Fence: |
| 1905 | case Instruction::LandingPad: |
| 1906 | case Instruction::AtomicRMW: |
| 1907 | case Instruction::AtomicCmpXchg: |
| 1908 | case Instruction::Resume: |
| 1909 | return false; // Misc instructions which have effects |
| 1910 | } |
| 1911 | } |