Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1 | //===- InstCombineMulDivRem.cpp -------------------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv, |
| 11 | // srem, urem, frem. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
Chandler Carruth | a917458 | 2015-01-22 05:25:13 +0000 | [diff] [blame] | 15 | #include "InstCombineInternal.h" |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 16 | #include "llvm/ADT/APFloat.h" |
| 17 | #include "llvm/ADT/APInt.h" |
| 18 | #include "llvm/ADT/SmallVector.h" |
Duncan Sands | d0eb6d3 | 2010-12-21 14:00:22 +0000 | [diff] [blame] | 19 | #include "llvm/Analysis/InstructionSimplify.h" |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 20 | #include "llvm/IR/BasicBlock.h" |
| 21 | #include "llvm/IR/Constant.h" |
| 22 | #include "llvm/IR/Constants.h" |
| 23 | #include "llvm/IR/InstrTypes.h" |
| 24 | #include "llvm/IR/Instruction.h" |
| 25 | #include "llvm/IR/Instructions.h" |
Chandler Carruth | 9fb823b | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 26 | #include "llvm/IR/IntrinsicInst.h" |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 27 | #include "llvm/IR/Intrinsics.h" |
| 28 | #include "llvm/IR/Operator.h" |
Chandler Carruth | 820a908 | 2014-03-04 11:08:18 +0000 | [diff] [blame] | 29 | #include "llvm/IR/PatternMatch.h" |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 30 | #include "llvm/IR/Type.h" |
| 31 | #include "llvm/IR/Value.h" |
| 32 | #include "llvm/Support/Casting.h" |
| 33 | #include "llvm/Support/ErrorHandling.h" |
| 34 | #include "llvm/Support/KnownBits.h" |
| 35 | #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" |
Dmitry Venikov | e5fbf59 | 2018-01-11 06:33:00 +0000 | [diff] [blame] | 36 | #include "llvm/Transforms/Utils/BuildLibCalls.h" |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 37 | #include <cassert> |
| 38 | #include <cstddef> |
| 39 | #include <cstdint> |
| 40 | #include <utility> |
| 41 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 42 | using namespace llvm; |
| 43 | using namespace PatternMatch; |
| 44 | |
Chandler Carruth | 964daaa | 2014-04-22 02:55:47 +0000 | [diff] [blame] | 45 | #define DEBUG_TYPE "instcombine" |
| 46 | |
Sanjay Patel | 6eccf48 | 2015-09-09 15:24:36 +0000 | [diff] [blame] | 47 | /// The specific integer value is used in a context where it is known to be |
| 48 | /// non-zero. If this allows us to simplify the computation, do so and return |
| 49 | /// the new operand, otherwise return null. |
Hal Finkel | 60db058 | 2014-09-07 18:57:58 +0000 | [diff] [blame] | 50 | static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC, |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 51 | Instruction &CxtI) { |
Chris Lattner | 7c99f19 | 2011-05-22 18:18:41 +0000 | [diff] [blame] | 52 | // If V has multiple uses, then we would have to do more analysis to determine |
| 53 | // if this is safe. For example, the use could be in dynamically unreached |
| 54 | // code. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 55 | if (!V->hasOneUse()) return nullptr; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 56 | |
Chris Lattner | 388cb8a | 2011-05-23 00:32:19 +0000 | [diff] [blame] | 57 | bool MadeChange = false; |
| 58 | |
Chris Lattner | 7c99f19 | 2011-05-22 18:18:41 +0000 | [diff] [blame] | 59 | // ((1 << A) >>u B) --> (1 << (A-B)) |
| 60 | // Because V cannot be zero, we know that B is less than A. |
David Majnemer | dad2103 | 2014-10-14 20:28:40 +0000 | [diff] [blame] | 61 | Value *A = nullptr, *B = nullptr, *One = nullptr; |
| 62 | if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) && |
| 63 | match(One, m_One())) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 64 | A = IC.Builder.CreateSub(A, B); |
| 65 | return IC.Builder.CreateShl(One, A); |
Chris Lattner | 7c99f19 | 2011-05-22 18:18:41 +0000 | [diff] [blame] | 66 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 67 | |
Chris Lattner | 388cb8a | 2011-05-23 00:32:19 +0000 | [diff] [blame] | 68 | // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it |
| 69 | // inexact. Similarly for <<. |
Sanjay Patel | a8ef4a5 | 2016-05-22 17:08:52 +0000 | [diff] [blame] | 70 | BinaryOperator *I = dyn_cast<BinaryOperator>(V); |
| 71 | if (I && I->isLogicalShift() && |
Craig Topper | d4039f7 | 2017-05-25 21:51:12 +0000 | [diff] [blame] | 72 | IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) { |
Sanjay Patel | a8ef4a5 | 2016-05-22 17:08:52 +0000 | [diff] [blame] | 73 | // We know that this is an exact/nuw shift and that the input is a |
| 74 | // non-zero context as well. |
| 75 | if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) { |
| 76 | I->setOperand(0, V2); |
| 77 | MadeChange = true; |
Chris Lattner | 388cb8a | 2011-05-23 00:32:19 +0000 | [diff] [blame] | 78 | } |
| 79 | |
Sanjay Patel | a8ef4a5 | 2016-05-22 17:08:52 +0000 | [diff] [blame] | 80 | if (I->getOpcode() == Instruction::LShr && !I->isExact()) { |
| 81 | I->setIsExact(); |
| 82 | MadeChange = true; |
| 83 | } |
| 84 | |
| 85 | if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) { |
| 86 | I->setHasNoUnsignedWrap(); |
| 87 | MadeChange = true; |
| 88 | } |
| 89 | } |
| 90 | |
Chris Lattner | 162dfc3 | 2011-05-22 18:26:48 +0000 | [diff] [blame] | 91 | // TODO: Lots more we could do here: |
Chris Lattner | 162dfc3 | 2011-05-22 18:26:48 +0000 | [diff] [blame] | 92 | // If V is a phi node, we can call this on each of its operands. |
| 93 | // "select cond, X, 0" can simplify to "X". |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 94 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 95 | return MadeChange ? V : nullptr; |
Chris Lattner | 7c99f19 | 2011-05-22 18:18:41 +0000 | [diff] [blame] | 96 | } |
| 97 | |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 98 | /// \brief A helper routine of InstCombiner::visitMul(). |
| 99 | /// |
Simon Pilgrim | 0b9f391 | 2018-02-08 14:10:01 +0000 | [diff] [blame] | 100 | /// If C is a scalar/vector of known powers of 2, then this function returns |
| 101 | /// a new scalar/vector obtained from logBase2 of C. |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 102 | /// Return a null pointer otherwise. |
Simon Pilgrim | 0b9f391 | 2018-02-08 14:10:01 +0000 | [diff] [blame] | 103 | static Constant *getLogBase2(Type *Ty, Constant *C) { |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 104 | const APInt *IVal; |
Simon Pilgrim | be0dd72 | 2018-02-13 13:16:26 +0000 | [diff] [blame] | 105 | if (match(C, m_APInt(IVal)) && IVal->isPowerOf2()) |
| 106 | return ConstantInt::get(Ty, IVal->logBase2()); |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 107 | |
Simon Pilgrim | 0b9f391 | 2018-02-08 14:10:01 +0000 | [diff] [blame] | 108 | if (!Ty->isVectorTy()) |
| 109 | return nullptr; |
| 110 | |
| 111 | SmallVector<Constant *, 4> Elts; |
| 112 | for (unsigned I = 0, E = Ty->getVectorNumElements(); I != E; ++I) { |
| 113 | Constant *Elt = C->getAggregateElement(I); |
| 114 | if (!Elt) |
| 115 | return nullptr; |
| 116 | if (isa<UndefValue>(Elt)) { |
| 117 | Elts.push_back(UndefValue::get(Ty->getScalarType())); |
| 118 | continue; |
| 119 | } |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 120 | if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2()) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 121 | return nullptr; |
Simon Pilgrim | 0b9f391 | 2018-02-08 14:10:01 +0000 | [diff] [blame] | 122 | Elts.push_back(ConstantInt::get(Ty->getScalarType(), IVal->logBase2())); |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | return ConstantVector::get(Elts); |
| 126 | } |
| 127 | |
David Majnemer | 54c2ca2 | 2014-12-26 09:10:14 +0000 | [diff] [blame] | 128 | /// \brief Return true if we can prove that: |
| 129 | /// (mul LHS, RHS) === (mul nsw LHS, RHS) |
Craig Topper | 2b1fc32 | 2017-05-22 06:25:31 +0000 | [diff] [blame] | 130 | bool InstCombiner::willNotOverflowSignedMul(const Value *LHS, |
| 131 | const Value *RHS, |
| 132 | const Instruction &CxtI) const { |
David Majnemer | 54c2ca2 | 2014-12-26 09:10:14 +0000 | [diff] [blame] | 133 | // Multiplying n * m significant bits yields a result of n + m significant |
| 134 | // bits. If the total number of significant bits does not exceed the |
| 135 | // result bit width (minus 1), there is no overflow. |
| 136 | // This means if we have enough leading sign bits in the operands |
| 137 | // we can guarantee that the result does not overflow. |
| 138 | // Ref: "Hacker's Delight" by Henry Warren |
| 139 | unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); |
| 140 | |
| 141 | // Note that underestimating the number of sign bits gives a more |
| 142 | // conservative answer. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 143 | unsigned SignBits = |
| 144 | ComputeNumSignBits(LHS, 0, &CxtI) + ComputeNumSignBits(RHS, 0, &CxtI); |
David Majnemer | 54c2ca2 | 2014-12-26 09:10:14 +0000 | [diff] [blame] | 145 | |
| 146 | // First handle the easy case: if we have enough sign bits there's |
| 147 | // definitely no overflow. |
| 148 | if (SignBits > BitWidth + 1) |
| 149 | return true; |
| 150 | |
| 151 | // There are two ambiguous cases where there can be no overflow: |
| 152 | // SignBits == BitWidth + 1 and |
| 153 | // SignBits == BitWidth |
| 154 | // The second case is difficult to check, therefore we only handle the |
| 155 | // first case. |
| 156 | if (SignBits == BitWidth + 1) { |
| 157 | // It overflows only when both arguments are negative and the true |
| 158 | // product is exactly the minimum negative number. |
| 159 | // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 |
| 160 | // For simplicity we just check if at least one side is not negative. |
Craig Topper | 1a36b7d | 2017-05-15 06:39:41 +0000 | [diff] [blame] | 161 | KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, &CxtI); |
| 162 | KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, &CxtI); |
| 163 | if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) |
David Majnemer | 54c2ca2 | 2014-12-26 09:10:14 +0000 | [diff] [blame] | 164 | return true; |
| 165 | } |
| 166 | return false; |
| 167 | } |
| 168 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 169 | Instruction *InstCombiner::visitMul(BinaryOperator &I) { |
Duncan Sands | 641baf1 | 2010-11-13 15:10:37 +0000 | [diff] [blame] | 170 | bool Changed = SimplifyAssociativeOrCommutative(I); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 171 | Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); |
| 172 | |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 173 | if (Value *V = SimplifyVectorOp(I)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 174 | return replaceInstUsesWith(I, V); |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 175 | |
Craig Topper | a420562 | 2017-06-09 03:21:29 +0000 | [diff] [blame] | 176 | if (Value *V = SimplifyMulInst(Op0, Op1, SQ.getWithInstruction(&I))) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 177 | return replaceInstUsesWith(I, V); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 178 | |
Duncan Sands | fbb9ac3 | 2010-12-22 13:36:08 +0000 | [diff] [blame] | 179 | if (Value *V = SimplifyUsingDistributiveLaws(I)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 180 | return replaceInstUsesWith(I, V); |
Duncan Sands | fbb9ac3 | 2010-12-22 13:36:08 +0000 | [diff] [blame] | 181 | |
David Majnemer | 027bc80 | 2014-11-22 04:52:38 +0000 | [diff] [blame] | 182 | // X * -1 == 0 - X |
| 183 | if (match(Op1, m_AllOnes())) { |
| 184 | BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName()); |
| 185 | if (I.hasNoSignedWrap()) |
| 186 | BO->setHasNoSignedWrap(); |
| 187 | return BO; |
| 188 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 189 | |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 190 | // Also allow combining multiply instructions on vectors. |
| 191 | { |
| 192 | Value *NewOp; |
| 193 | Constant *C1, *C2; |
| 194 | const APInt *IVal; |
| 195 | if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)), |
| 196 | m_Constant(C1))) && |
David Majnemer | fd4a6d2 | 2014-11-22 04:52:52 +0000 | [diff] [blame] | 197 | match(C1, m_APInt(IVal))) { |
| 198 | // ((X << C2)*C1) == (X * (C1 << C2)) |
| 199 | Constant *Shl = ConstantExpr::getShl(C1, C2); |
| 200 | BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0)); |
| 201 | BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl); |
| 202 | if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap()) |
| 203 | BO->setHasNoUnsignedWrap(); |
| 204 | if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() && |
| 205 | Shl->isNotMinSignedValue()) |
| 206 | BO->setHasNoSignedWrap(); |
| 207 | return BO; |
| 208 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 209 | |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 210 | if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) { |
Simon Pilgrim | 0b9f391 | 2018-02-08 14:10:01 +0000 | [diff] [blame] | 211 | // Replace X*(2^C) with X << C, where C is either a scalar or a vector. |
| 212 | if (Constant *NewCst = getLogBase2(NewOp->getType(), C1)) { |
David Majnemer | 45951a6 | 2015-04-18 04:41:30 +0000 | [diff] [blame] | 213 | unsigned Width = NewCst->getType()->getPrimitiveSizeInBits(); |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 214 | BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst); |
Tilmann Scheller | 2bc5cb6 | 2014-10-07 10:19:34 +0000 | [diff] [blame] | 215 | |
Tilmann Scheller | 2bc5cb6 | 2014-10-07 10:19:34 +0000 | [diff] [blame] | 216 | if (I.hasNoUnsignedWrap()) |
| 217 | Shl->setHasNoUnsignedWrap(); |
David Majnemer | 45951a6 | 2015-04-18 04:41:30 +0000 | [diff] [blame] | 218 | if (I.hasNoSignedWrap()) { |
Craig Topper | 5fe0197 | 2017-06-27 19:57:53 +0000 | [diff] [blame] | 219 | const APInt *V; |
| 220 | if (match(NewCst, m_APInt(V)) && *V != Width - 1) |
David Majnemer | 45951a6 | 2015-04-18 04:41:30 +0000 | [diff] [blame] | 221 | Shl->setHasNoSignedWrap(); |
| 222 | } |
Tilmann Scheller | 2bc5cb6 | 2014-10-07 10:19:34 +0000 | [diff] [blame] | 223 | |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 224 | return Shl; |
| 225 | } |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 226 | } |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 227 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 228 | |
Rafael Espindola | 65281bf | 2013-05-31 14:27:15 +0000 | [diff] [blame] | 229 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { |
Stuart Hastings | 2380483 | 2011-06-01 16:42:47 +0000 | [diff] [blame] | 230 | // (Y - X) * (-(2**n)) -> (X - Y) * (2**n), for positive nonzero n |
| 231 | // (Y + const) * (-(2**n)) -> (-constY) * (2**n), for positive nonzero n |
| 232 | // The "* (2**n)" thus becomes a potential shifting opportunity. |
Stuart Hastings | 8284374 | 2011-05-30 20:00:33 +0000 | [diff] [blame] | 233 | { |
| 234 | const APInt & Val = CI->getValue(); |
| 235 | const APInt &PosVal = Val.abs(); |
| 236 | if (Val.isNegative() && PosVal.isPowerOf2()) { |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 237 | Value *X = nullptr, *Y = nullptr; |
Stuart Hastings | 2380483 | 2011-06-01 16:42:47 +0000 | [diff] [blame] | 238 | if (Op0->hasOneUse()) { |
| 239 | ConstantInt *C1; |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 240 | Value *Sub = nullptr; |
Stuart Hastings | 2380483 | 2011-06-01 16:42:47 +0000 | [diff] [blame] | 241 | if (match(Op0, m_Sub(m_Value(Y), m_Value(X)))) |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 242 | Sub = Builder.CreateSub(X, Y, "suba"); |
Stuart Hastings | 2380483 | 2011-06-01 16:42:47 +0000 | [diff] [blame] | 243 | else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1)))) |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 244 | Sub = Builder.CreateSub(Builder.CreateNeg(C1), Y, "subc"); |
Stuart Hastings | 2380483 | 2011-06-01 16:42:47 +0000 | [diff] [blame] | 245 | if (Sub) |
| 246 | return |
| 247 | BinaryOperator::CreateMul(Sub, |
| 248 | ConstantInt::get(Y->getType(), PosVal)); |
Stuart Hastings | 8284374 | 2011-05-30 20:00:33 +0000 | [diff] [blame] | 249 | } |
| 250 | } |
| 251 | } |
Chris Lattner | 6b657ae | 2011-02-10 05:36:31 +0000 | [diff] [blame] | 252 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 253 | |
Chris Lattner | 6b657ae | 2011-02-10 05:36:31 +0000 | [diff] [blame] | 254 | // Simplify mul instructions with a constant RHS. |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 255 | if (isa<Constant>(Op1)) { |
Sanjay Patel | db0938f | 2017-01-10 23:49:07 +0000 | [diff] [blame] | 256 | if (Instruction *FoldedMul = foldOpWithConstantIntoOperand(I)) |
| 257 | return FoldedMul; |
Benjamin Kramer | 72196f3 | 2014-01-19 15:24:22 +0000 | [diff] [blame] | 258 | |
| 259 | // Canonicalize (X+C1)*CI -> X*CI+C1*CI. |
| 260 | { |
| 261 | Value *X; |
| 262 | Constant *C1; |
| 263 | if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 264 | Value *Mul = Builder.CreateMul(C1, Op1); |
David Majnemer | 6cf6c05 | 2014-06-19 07:14:33 +0000 | [diff] [blame] | 265 | // Only go forward with the transform if C1*CI simplifies to a tidier |
| 266 | // constant. |
| 267 | if (!match(Mul, m_Mul(m_Value(), m_Value()))) |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 268 | return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul); |
Benjamin Kramer | 72196f3 | 2014-01-19 15:24:22 +0000 | [diff] [blame] | 269 | } |
| 270 | } |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 271 | } |
| 272 | |
Sanjay Patel | 604cb9e | 2018-02-14 16:50:55 +0000 | [diff] [blame] | 273 | // -X * C --> X * -C |
| 274 | Value *X, *Y; |
| 275 | Constant *Op1C; |
| 276 | if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C))) |
| 277 | return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C)); |
| 278 | |
| 279 | // -X * -Y --> X * Y |
| 280 | if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) { |
| 281 | auto *NewMul = BinaryOperator::CreateMul(X, Y); |
| 282 | if (I.hasNoSignedWrap() && |
| 283 | cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() && |
| 284 | cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap()) |
| 285 | NewMul->setHasNoSignedWrap(); |
| 286 | return NewMul; |
David Majnemer | 8279a750 | 2014-11-22 07:25:19 +0000 | [diff] [blame] | 287 | } |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 288 | |
| 289 | // (X / Y) * Y = X - (X % Y) |
| 290 | // (X / Y) * -Y = (X % Y) - X |
| 291 | { |
Sanjay Patel | a0a5682 | 2017-03-14 17:27:27 +0000 | [diff] [blame] | 292 | Value *Y = Op1; |
| 293 | BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0); |
| 294 | if (!Div || (Div->getOpcode() != Instruction::UDiv && |
| 295 | Div->getOpcode() != Instruction::SDiv)) { |
| 296 | Y = Op0; |
| 297 | Div = dyn_cast<BinaryOperator>(Op1); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 298 | } |
Sanjay Patel | a0a5682 | 2017-03-14 17:27:27 +0000 | [diff] [blame] | 299 | Value *Neg = dyn_castNegVal(Y); |
| 300 | if (Div && Div->hasOneUse() && |
| 301 | (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) && |
| 302 | (Div->getOpcode() == Instruction::UDiv || |
| 303 | Div->getOpcode() == Instruction::SDiv)) { |
| 304 | Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 305 | |
Chris Lattner | 35315d0 | 2011-02-06 21:44:57 +0000 | [diff] [blame] | 306 | // If the division is exact, X % Y is zero, so we end up with X or -X. |
Sanjay Patel | a0a5682 | 2017-03-14 17:27:27 +0000 | [diff] [blame] | 307 | if (Div->isExact()) { |
| 308 | if (DivOp1 == Y) |
| 309 | return replaceInstUsesWith(I, X); |
| 310 | return BinaryOperator::CreateNeg(X); |
| 311 | } |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 312 | |
Sanjay Patel | a0a5682 | 2017-03-14 17:27:27 +0000 | [diff] [blame] | 313 | auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem |
| 314 | : Instruction::SRem; |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 315 | Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1); |
Sanjay Patel | a0a5682 | 2017-03-14 17:27:27 +0000 | [diff] [blame] | 316 | if (DivOp1 == Y) |
| 317 | return BinaryOperator::CreateSub(X, Rem); |
| 318 | return BinaryOperator::CreateSub(Rem, X); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 319 | } |
| 320 | } |
| 321 | |
| 322 | /// i1 mul -> i1 and. |
Craig Topper | fde4723 | 2017-07-09 07:04:03 +0000 | [diff] [blame] | 323 | if (I.getType()->isIntOrIntVectorTy(1)) |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 324 | return BinaryOperator::CreateAnd(Op0, Op1); |
| 325 | |
| 326 | // X*(1 << Y) --> X << Y |
| 327 | // (1 << Y)*X --> X << Y |
| 328 | { |
| 329 | Value *Y; |
David Majnemer | 546f810 | 2014-11-22 08:57:02 +0000 | [diff] [blame] | 330 | BinaryOperator *BO = nullptr; |
| 331 | bool ShlNSW = false; |
| 332 | if (match(Op0, m_Shl(m_One(), m_Value(Y)))) { |
| 333 | BO = BinaryOperator::CreateShl(Op1, Y); |
David Majnemer | 087dc8b | 2015-01-04 07:36:02 +0000 | [diff] [blame] | 334 | ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap(); |
David Majnemer | 8e6f6a9 | 2014-11-24 16:41:13 +0000 | [diff] [blame] | 335 | } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) { |
David Majnemer | 546f810 | 2014-11-22 08:57:02 +0000 | [diff] [blame] | 336 | BO = BinaryOperator::CreateShl(Op0, Y); |
David Majnemer | 087dc8b | 2015-01-04 07:36:02 +0000 | [diff] [blame] | 337 | ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap(); |
David Majnemer | 546f810 | 2014-11-22 08:57:02 +0000 | [diff] [blame] | 338 | } |
| 339 | if (BO) { |
| 340 | if (I.hasNoUnsignedWrap()) |
| 341 | BO->setHasNoUnsignedWrap(); |
| 342 | if (I.hasNoSignedWrap() && ShlNSW) |
| 343 | BO->setHasNoSignedWrap(); |
| 344 | return BO; |
| 345 | } |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 346 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 347 | |
Sanjay Patel | cb8ac00 | 2018-02-13 20:41:22 +0000 | [diff] [blame] | 348 | // (bool X) * Y --> X ? Y : 0 |
Sanjay Patel | 7558d86 | 2018-02-13 22:24:37 +0000 | [diff] [blame] | 349 | // Y * (bool X) --> X ? Y : 0 |
Sanjay Patel | cb8ac00 | 2018-02-13 20:41:22 +0000 | [diff] [blame] | 350 | if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) |
| 351 | return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0)); |
Sanjay Patel | cb8ac00 | 2018-02-13 20:41:22 +0000 | [diff] [blame] | 352 | if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) |
| 353 | return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0)); |
| 354 | |
Sanjay Patel | 7558d86 | 2018-02-13 22:24:37 +0000 | [diff] [blame] | 355 | // (lshr X, 31) * Y --> (ashr X, 31) & Y |
| 356 | // Y * (lshr X, 31) --> (ashr X, 31) & Y |
| 357 | // TODO: We are not checking one-use because the elimination of the multiply |
| 358 | // is better for analysis? |
| 359 | // TODO: Should we canonicalize to '(X < 0) ? Y : 0' instead? That would be |
| 360 | // more similar to what we're doing above. |
| 361 | const APInt *C; |
| 362 | if (match(Op0, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) |
| 363 | return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op1); |
| 364 | if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) |
| 365 | return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 366 | |
David Majnemer | a1cfd7c | 2016-12-30 00:28:58 +0000 | [diff] [blame] | 367 | // Check for (mul (sext x), y), see if we can merge this into an |
| 368 | // integer mul followed by a sext. |
| 369 | if (SExtInst *Op0Conv = dyn_cast<SExtInst>(Op0)) { |
| 370 | // (mul (sext x), cst) --> (sext (mul x, cst')) |
| 371 | if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { |
| 372 | if (Op0Conv->hasOneUse()) { |
| 373 | Constant *CI = |
| 374 | ConstantExpr::getTrunc(Op1C, Op0Conv->getOperand(0)->getType()); |
| 375 | if (ConstantExpr::getSExt(CI, I.getType()) == Op1C && |
Craig Topper | 2b1fc32 | 2017-05-22 06:25:31 +0000 | [diff] [blame] | 376 | willNotOverflowSignedMul(Op0Conv->getOperand(0), CI, I)) { |
David Majnemer | a1cfd7c | 2016-12-30 00:28:58 +0000 | [diff] [blame] | 377 | // Insert the new, smaller mul. |
| 378 | Value *NewMul = |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 379 | Builder.CreateNSWMul(Op0Conv->getOperand(0), CI, "mulconv"); |
David Majnemer | a1cfd7c | 2016-12-30 00:28:58 +0000 | [diff] [blame] | 380 | return new SExtInst(NewMul, I.getType()); |
| 381 | } |
| 382 | } |
| 383 | } |
| 384 | |
| 385 | // (mul (sext x), (sext y)) --> (sext (mul int x, y)) |
| 386 | if (SExtInst *Op1Conv = dyn_cast<SExtInst>(Op1)) { |
| 387 | // Only do this if x/y have the same type, if at last one of them has a |
| 388 | // single use (so we don't increase the number of sexts), and if the |
| 389 | // integer mul will not overflow. |
| 390 | if (Op0Conv->getOperand(0)->getType() == |
| 391 | Op1Conv->getOperand(0)->getType() && |
| 392 | (Op0Conv->hasOneUse() || Op1Conv->hasOneUse()) && |
Craig Topper | 2b1fc32 | 2017-05-22 06:25:31 +0000 | [diff] [blame] | 393 | willNotOverflowSignedMul(Op0Conv->getOperand(0), |
David Majnemer | a1cfd7c | 2016-12-30 00:28:58 +0000 | [diff] [blame] | 394 | Op1Conv->getOperand(0), I)) { |
| 395 | // Insert the new integer mul. |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 396 | Value *NewMul = Builder.CreateNSWMul( |
David Majnemer | a1cfd7c | 2016-12-30 00:28:58 +0000 | [diff] [blame] | 397 | Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv"); |
| 398 | return new SExtInst(NewMul, I.getType()); |
| 399 | } |
| 400 | } |
| 401 | } |
| 402 | |
| 403 | // Check for (mul (zext x), y), see if we can merge this into an |
| 404 | // integer mul followed by a zext. |
| 405 | if (auto *Op0Conv = dyn_cast<ZExtInst>(Op0)) { |
| 406 | // (mul (zext x), cst) --> (zext (mul x, cst')) |
| 407 | if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { |
| 408 | if (Op0Conv->hasOneUse()) { |
| 409 | Constant *CI = |
| 410 | ConstantExpr::getTrunc(Op1C, Op0Conv->getOperand(0)->getType()); |
| 411 | if (ConstantExpr::getZExt(CI, I.getType()) == Op1C && |
Craig Topper | bb97372 | 2017-05-15 02:44:08 +0000 | [diff] [blame] | 412 | willNotOverflowUnsignedMul(Op0Conv->getOperand(0), CI, I)) { |
David Majnemer | a1cfd7c | 2016-12-30 00:28:58 +0000 | [diff] [blame] | 413 | // Insert the new, smaller mul. |
| 414 | Value *NewMul = |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 415 | Builder.CreateNUWMul(Op0Conv->getOperand(0), CI, "mulconv"); |
David Majnemer | a1cfd7c | 2016-12-30 00:28:58 +0000 | [diff] [blame] | 416 | return new ZExtInst(NewMul, I.getType()); |
| 417 | } |
| 418 | } |
| 419 | } |
| 420 | |
| 421 | // (mul (zext x), (zext y)) --> (zext (mul int x, y)) |
| 422 | if (auto *Op1Conv = dyn_cast<ZExtInst>(Op1)) { |
| 423 | // Only do this if x/y have the same type, if at last one of them has a |
| 424 | // single use (so we don't increase the number of zexts), and if the |
| 425 | // integer mul will not overflow. |
| 426 | if (Op0Conv->getOperand(0)->getType() == |
| 427 | Op1Conv->getOperand(0)->getType() && |
| 428 | (Op0Conv->hasOneUse() || Op1Conv->hasOneUse()) && |
Craig Topper | bb97372 | 2017-05-15 02:44:08 +0000 | [diff] [blame] | 429 | willNotOverflowUnsignedMul(Op0Conv->getOperand(0), |
| 430 | Op1Conv->getOperand(0), I)) { |
David Majnemer | a1cfd7c | 2016-12-30 00:28:58 +0000 | [diff] [blame] | 431 | // Insert the new integer mul. |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 432 | Value *NewMul = Builder.CreateNUWMul( |
David Majnemer | a1cfd7c | 2016-12-30 00:28:58 +0000 | [diff] [blame] | 433 | Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv"); |
| 434 | return new ZExtInst(NewMul, I.getType()); |
| 435 | } |
| 436 | } |
| 437 | } |
| 438 | |
Craig Topper | 2b1fc32 | 2017-05-22 06:25:31 +0000 | [diff] [blame] | 439 | if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) { |
David Majnemer | 54c2ca2 | 2014-12-26 09:10:14 +0000 | [diff] [blame] | 440 | Changed = true; |
| 441 | I.setHasNoSignedWrap(true); |
| 442 | } |
| 443 | |
Craig Topper | bb97372 | 2017-05-15 02:44:08 +0000 | [diff] [blame] | 444 | if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) { |
David Majnemer | b1296ec | 2014-12-26 09:50:35 +0000 | [diff] [blame] | 445 | Changed = true; |
| 446 | I.setHasNoUnsignedWrap(true); |
| 447 | } |
| 448 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 449 | return Changed ? &I : nullptr; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 450 | } |
| 451 | |
Sanjay Patel | 17045f7 | 2014-10-14 00:33:23 +0000 | [diff] [blame] | 452 | /// Detect pattern log2(Y * 0.5) with corresponding fast math flags. |
Pedro Artigas | 993acd0 | 2012-11-30 22:07:05 +0000 | [diff] [blame] | 453 | static void detectLog2OfHalf(Value *&Op, Value *&Y, IntrinsicInst *&Log2) { |
Sanjay Patel | 17045f7 | 2014-10-14 00:33:23 +0000 | [diff] [blame] | 454 | if (!Op->hasOneUse()) |
| 455 | return; |
Pedro Artigas | 00b83c9 | 2012-11-30 22:47:15 +0000 | [diff] [blame] | 456 | |
Sanjay Patel | 17045f7 | 2014-10-14 00:33:23 +0000 | [diff] [blame] | 457 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op); |
| 458 | if (!II) |
| 459 | return; |
Sanjay Patel | 629c411 | 2017-11-06 16:27:15 +0000 | [diff] [blame] | 460 | if (II->getIntrinsicID() != Intrinsic::log2 || !II->isFast()) |
Sanjay Patel | 17045f7 | 2014-10-14 00:33:23 +0000 | [diff] [blame] | 461 | return; |
| 462 | Log2 = II; |
Pedro Artigas | 00b83c9 | 2012-11-30 22:47:15 +0000 | [diff] [blame] | 463 | |
Sanjay Patel | 17045f7 | 2014-10-14 00:33:23 +0000 | [diff] [blame] | 464 | Value *OpLog2Of = II->getArgOperand(0); |
| 465 | if (!OpLog2Of->hasOneUse()) |
| 466 | return; |
Pedro Artigas | 00b83c9 | 2012-11-30 22:47:15 +0000 | [diff] [blame] | 467 | |
Sanjay Patel | 17045f7 | 2014-10-14 00:33:23 +0000 | [diff] [blame] | 468 | Instruction *I = dyn_cast<Instruction>(OpLog2Of); |
| 469 | if (!I) |
| 470 | return; |
Sanjay Patel | 629c411 | 2017-11-06 16:27:15 +0000 | [diff] [blame] | 471 | |
| 472 | if (I->getOpcode() != Instruction::FMul || !I->isFast()) |
Sanjay Patel | 17045f7 | 2014-10-14 00:33:23 +0000 | [diff] [blame] | 473 | return; |
Pedro Artigas | 00b83c9 | 2012-11-30 22:47:15 +0000 | [diff] [blame] | 474 | |
Sanjay Patel | 17045f7 | 2014-10-14 00:33:23 +0000 | [diff] [blame] | 475 | if (match(I->getOperand(0), m_SpecificFP(0.5))) |
| 476 | Y = I->getOperand(1); |
| 477 | else if (match(I->getOperand(1), m_SpecificFP(0.5))) |
| 478 | Y = I->getOperand(0); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 479 | } |
Pedro Artigas | 993acd0 | 2012-11-30 22:07:05 +0000 | [diff] [blame] | 480 | |
Sanjay Patel | 5df4d88 | 2018-02-14 17:16:33 +0000 | [diff] [blame] | 481 | /// Helper function of InstCombiner::visitFMul(). Return true iff the given |
| 482 | /// value is FMul or FDiv with one and only one operand being a finite-non-zero |
| 483 | /// constant (i.e. not Zero/NaN/Infinity). |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 484 | static bool isFMulOrFDivWithConstant(Value *V) { |
Sanjay Patel | 5df4d88 | 2018-02-14 17:16:33 +0000 | [diff] [blame] | 485 | Constant *C; |
| 486 | return (match(V, m_FMul(m_Value(), m_Constant(C))) || |
| 487 | match(V, m_FDiv(m_Value(), m_Constant(C))) || |
Sanjay Patel | 08868e494 | 2018-02-16 22:32:54 +0000 | [diff] [blame^] | 488 | match(V, m_FDiv(m_Constant(C), m_Value()))) && C->isFiniteNonZeroFP(); |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 489 | } |
| 490 | |
| 491 | /// foldFMulConst() is a helper routine of InstCombiner::visitFMul(). |
| 492 | /// The input \p FMulOrDiv is a FMul/FDiv with one and only one operand |
| 493 | /// being a constant (i.e. isFMulOrFDivWithConstant(FMulOrDiv) == true). |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 494 | /// This function is to simplify "FMulOrDiv * C" and returns the |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 495 | /// resulting expression. Note that this function could return NULL in |
| 496 | /// case the constants cannot be folded into a normal floating-point. |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 497 | Value *InstCombiner::foldFMulConst(Instruction *FMulOrDiv, Constant *C, |
Shuxin Yang | 8013866 | 2013-01-07 22:41:28 +0000 | [diff] [blame] | 498 | Instruction *InsertBefore) { |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 499 | assert(isFMulOrFDivWithConstant(FMulOrDiv) && "V is invalid"); |
| 500 | |
| 501 | Value *Opnd0 = FMulOrDiv->getOperand(0); |
| 502 | Value *Opnd1 = FMulOrDiv->getOperand(1); |
| 503 | |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 504 | Constant *C0 = dyn_cast<Constant>(Opnd0); |
| 505 | Constant *C1 = dyn_cast<Constant>(Opnd1); |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 506 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 507 | BinaryOperator *R = nullptr; |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 508 | |
| 509 | // (X * C0) * C => X * (C0*C) |
| 510 | if (FMulOrDiv->getOpcode() == Instruction::FMul) { |
| 511 | Constant *F = ConstantExpr::getFMul(C1 ? C1 : C0, C); |
Sanjay Patel | 08868e494 | 2018-02-16 22:32:54 +0000 | [diff] [blame^] | 512 | if (F->isNormalFP()) |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 513 | R = BinaryOperator::CreateFMul(C1 ? Opnd0 : Opnd1, F); |
| 514 | } else { |
| 515 | if (C0) { |
| 516 | // (C0 / X) * C => (C0 * C) / X |
Shuxin Yang | 3a7ca6e | 2013-09-19 21:13:46 +0000 | [diff] [blame] | 517 | if (FMulOrDiv->hasOneUse()) { |
| 518 | // It would otherwise introduce another div. |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 519 | Constant *F = ConstantExpr::getFMul(C0, C); |
Sanjay Patel | 08868e494 | 2018-02-16 22:32:54 +0000 | [diff] [blame^] | 520 | if (F->isNormalFP()) |
Shuxin Yang | 3a7ca6e | 2013-09-19 21:13:46 +0000 | [diff] [blame] | 521 | R = BinaryOperator::CreateFDiv(F, Opnd1); |
| 522 | } |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 523 | } else { |
| 524 | // (X / C1) * C => X * (C/C1) if C/C1 is not a denormal |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 525 | Constant *F = ConstantExpr::getFDiv(C, C1); |
Sanjay Patel | 08868e494 | 2018-02-16 22:32:54 +0000 | [diff] [blame^] | 526 | if (F->isNormalFP()) { |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 527 | R = BinaryOperator::CreateFMul(Opnd0, F); |
| 528 | } else { |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 529 | // (X / C1) * C => X / (C1/C) |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 530 | Constant *F = ConstantExpr::getFDiv(C1, C); |
Sanjay Patel | 08868e494 | 2018-02-16 22:32:54 +0000 | [diff] [blame^] | 531 | if (F->isNormalFP()) |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 532 | R = BinaryOperator::CreateFDiv(Opnd0, F); |
| 533 | } |
| 534 | } |
| 535 | } |
| 536 | |
| 537 | if (R) { |
Sanjay Patel | 629c411 | 2017-11-06 16:27:15 +0000 | [diff] [blame] | 538 | R->setFast(true); |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 539 | InsertNewInstWith(R, *InsertBefore); |
| 540 | } |
| 541 | |
| 542 | return R; |
| 543 | } |
| 544 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 545 | Instruction *InstCombiner::visitFMul(BinaryOperator &I) { |
Duncan Sands | 641baf1 | 2010-11-13 15:10:37 +0000 | [diff] [blame] | 546 | bool Changed = SimplifyAssociativeOrCommutative(I); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 547 | Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); |
| 548 | |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 549 | if (Value *V = SimplifyVectorOp(I)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 550 | return replaceInstUsesWith(I, V); |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 551 | |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 552 | if (isa<Constant>(Op0)) |
| 553 | std::swap(Op0, Op1); |
| 554 | |
Craig Topper | a420562 | 2017-06-09 03:21:29 +0000 | [diff] [blame] | 555 | if (Value *V = SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), |
| 556 | SQ.getWithInstruction(&I))) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 557 | return replaceInstUsesWith(I, V); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 558 | |
Sanjay Patel | 629c411 | 2017-11-06 16:27:15 +0000 | [diff] [blame] | 559 | bool AllowReassociate = I.isFast(); |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 560 | |
Michael Ilseman | d5787be | 2012-12-12 00:28:32 +0000 | [diff] [blame] | 561 | // Simplify mul instructions with a constant RHS. |
Sanjay Patel | 58dab85 | 2018-02-14 16:56:44 +0000 | [diff] [blame] | 562 | if (auto *C = dyn_cast<Constant>(Op1)) { |
Sanjay Patel | db0938f | 2017-01-10 23:49:07 +0000 | [diff] [blame] | 563 | if (Instruction *FoldedMul = foldOpWithConstantIntoOperand(I)) |
| 564 | return FoldedMul; |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 565 | |
Owen Anderson | f74cfe0 | 2014-01-16 20:36:42 +0000 | [diff] [blame] | 566 | // (fmul X, -1.0) --> (fsub -0.0, X) |
Sanjay Patel | 58dab85 | 2018-02-14 16:56:44 +0000 | [diff] [blame] | 567 | if (match(C, m_SpecificFP(-1.0))) { |
Benjamin Kramer | fea9ac9 | 2014-01-18 16:43:14 +0000 | [diff] [blame] | 568 | Constant *NegZero = ConstantFP::getNegativeZero(Op1->getType()); |
| 569 | Instruction *RI = BinaryOperator::CreateFSub(NegZero, Op0); |
Owen Anderson | f74cfe0 | 2014-01-16 20:36:42 +0000 | [diff] [blame] | 570 | RI->copyFastMathFlags(&I); |
| 571 | return RI; |
| 572 | } |
| 573 | |
Sanjay Patel | 08868e494 | 2018-02-16 22:32:54 +0000 | [diff] [blame^] | 574 | if (AllowReassociate && C->isFiniteNonZeroFP()) { |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 575 | // Let MDC denote an expression in one of these forms: |
| 576 | // X * C, C/X, X/C, where C is a constant. |
| 577 | // |
| 578 | // Try to simplify "MDC * Constant" |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 579 | if (isFMulOrFDivWithConstant(Op0)) |
| 580 | if (Value *V = foldFMulConst(cast<Instruction>(Op0), C, &I)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 581 | return replaceInstUsesWith(I, V); |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 582 | |
Quentin Colombet | e684a6d | 2013-02-28 21:12:40 +0000 | [diff] [blame] | 583 | // (MDC +/- C1) * C => (MDC * C) +/- (C1 * C) |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 584 | Instruction *FAddSub = dyn_cast<Instruction>(Op0); |
| 585 | if (FAddSub && |
| 586 | (FAddSub->getOpcode() == Instruction::FAdd || |
| 587 | FAddSub->getOpcode() == Instruction::FSub)) { |
| 588 | Value *Opnd0 = FAddSub->getOperand(0); |
| 589 | Value *Opnd1 = FAddSub->getOperand(1); |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 590 | Constant *C0 = dyn_cast<Constant>(Opnd0); |
| 591 | Constant *C1 = dyn_cast<Constant>(Opnd1); |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 592 | bool Swap = false; |
| 593 | if (C0) { |
Shuxin Yang | 8013866 | 2013-01-07 22:41:28 +0000 | [diff] [blame] | 594 | std::swap(C0, C1); |
| 595 | std::swap(Opnd0, Opnd1); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 596 | Swap = true; |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 597 | } |
| 598 | |
Sanjay Patel | 08868e494 | 2018-02-16 22:32:54 +0000 | [diff] [blame^] | 599 | if (C1 && C1->isFiniteNonZeroFP() && isFMulOrFDivWithConstant(Opnd0)) { |
Quentin Colombet | e684a6d | 2013-02-28 21:12:40 +0000 | [diff] [blame] | 600 | Value *M1 = ConstantExpr::getFMul(C1, C); |
Sanjay Patel | 08868e494 | 2018-02-16 22:32:54 +0000 | [diff] [blame^] | 601 | Value *M0 = cast<Constant>(M1)->isNormalFP() ? |
| 602 | foldFMulConst(cast<Instruction>(Opnd0), C, &I) : |
| 603 | nullptr; |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 604 | if (M0 && M1) { |
| 605 | if (Swap && FAddSub->getOpcode() == Instruction::FSub) |
| 606 | std::swap(M0, M1); |
| 607 | |
Benjamin Kramer | 6748576 | 2013-09-30 15:39:59 +0000 | [diff] [blame] | 608 | Instruction *RI = (FAddSub->getOpcode() == Instruction::FAdd) |
| 609 | ? BinaryOperator::CreateFAdd(M0, M1) |
| 610 | : BinaryOperator::CreateFSub(M0, M1); |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 611 | RI->copyFastMathFlags(&I); |
Shuxin Yang | df0e61e | 2013-01-07 21:39:23 +0000 | [diff] [blame] | 612 | return RI; |
| 613 | } |
| 614 | } |
| 615 | } |
| 616 | } |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 617 | } |
| 618 | |
Matt Arsenault | 56c079f | 2016-01-30 05:02:00 +0000 | [diff] [blame] | 619 | if (Op0 == Op1) { |
| 620 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op0)) { |
| 621 | // sqrt(X) * sqrt(X) -> X |
| 622 | if (AllowReassociate && II->getIntrinsicID() == Intrinsic::sqrt) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 623 | return replaceInstUsesWith(I, II->getOperand(0)); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 624 | |
Matt Arsenault | 56c079f | 2016-01-30 05:02:00 +0000 | [diff] [blame] | 625 | // fabs(X) * fabs(X) -> X * X |
| 626 | if (II->getIntrinsicID() == Intrinsic::fabs) { |
| 627 | Instruction *FMulVal = BinaryOperator::CreateFMul(II->getOperand(0), |
| 628 | II->getOperand(0), |
| 629 | I.getName()); |
| 630 | FMulVal->copyFastMathFlags(&I); |
| 631 | return FMulVal; |
| 632 | } |
| 633 | } |
| 634 | } |
| 635 | |
Pedro Artigas | d879504 | 2012-11-30 19:09:41 +0000 | [diff] [blame] | 636 | // Under unsafe algebra do: |
| 637 | // X * log2(0.5*Y) = X*log2(Y) - X |
Sanjay Patel | b41d461 | 2014-10-02 15:20:45 +0000 | [diff] [blame] | 638 | if (AllowReassociate) { |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 639 | Value *OpX = nullptr; |
| 640 | Value *OpY = nullptr; |
Pedro Artigas | d879504 | 2012-11-30 19:09:41 +0000 | [diff] [blame] | 641 | IntrinsicInst *Log2; |
Pedro Artigas | 993acd0 | 2012-11-30 22:07:05 +0000 | [diff] [blame] | 642 | detectLog2OfHalf(Op0, OpY, Log2); |
| 643 | if (OpY) { |
| 644 | OpX = Op1; |
| 645 | } else { |
| 646 | detectLog2OfHalf(Op1, OpY, Log2); |
| 647 | if (OpY) { |
| 648 | OpX = Op0; |
Pedro Artigas | d879504 | 2012-11-30 19:09:41 +0000 | [diff] [blame] | 649 | } |
| 650 | } |
| 651 | // if pattern detected emit alternate sequence |
| 652 | if (OpX && OpY) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 653 | BuilderTy::FastMathFlagGuard Guard(Builder); |
| 654 | Builder.setFastMathFlags(Log2->getFastMathFlags()); |
Pedro Artigas | d879504 | 2012-11-30 19:09:41 +0000 | [diff] [blame] | 655 | Log2->setArgOperand(0, OpY); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 656 | Value *FMulVal = Builder.CreateFMul(OpX, Log2); |
| 657 | Value *FSub = Builder.CreateFSub(FMulVal, OpX); |
Benjamin Kramer | 6748576 | 2013-09-30 15:39:59 +0000 | [diff] [blame] | 658 | FSub->takeName(&I); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 659 | return replaceInstUsesWith(I, FSub); |
Pedro Artigas | d879504 | 2012-11-30 19:09:41 +0000 | [diff] [blame] | 660 | } |
| 661 | } |
| 662 | |
Dmitry Venikov | a58d8de | 2018-01-02 05:58:11 +0000 | [diff] [blame] | 663 | // sqrt(a) * sqrt(b) -> sqrt(a * b) |
Sanjay Patel | 1998cc6 | 2018-02-12 18:38:35 +0000 | [diff] [blame] | 664 | if (AllowReassociate && Op0->hasOneUse() && Op1->hasOneUse()) { |
Dmitry Venikov | a58d8de | 2018-01-02 05:58:11 +0000 | [diff] [blame] | 665 | Value *Opnd0 = nullptr; |
| 666 | Value *Opnd1 = nullptr; |
| 667 | if (match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(Opnd0))) && |
| 668 | match(Op1, m_Intrinsic<Intrinsic::sqrt>(m_Value(Opnd1)))) { |
| 669 | BuilderTy::FastMathFlagGuard Guard(Builder); |
| 670 | Builder.setFastMathFlags(I.getFastMathFlags()); |
| 671 | Value *FMulVal = Builder.CreateFMul(Opnd0, Opnd1); |
| 672 | Value *Sqrt = Intrinsic::getDeclaration(I.getModule(), |
| 673 | Intrinsic::sqrt, I.getType()); |
| 674 | Value *SqrtCall = Builder.CreateCall(Sqrt, FMulVal); |
| 675 | return replaceInstUsesWith(I, SqrtCall); |
| 676 | } |
| 677 | } |
| 678 | |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 679 | // Handle symmetric situation in a 2-iteration loop |
| 680 | Value *Opnd0 = Op0; |
| 681 | Value *Opnd1 = Op1; |
| 682 | for (int i = 0; i < 2; i++) { |
| 683 | bool IgnoreZeroSign = I.hasNoSignedZeros(); |
| 684 | if (BinaryOperator::isFNeg(Opnd0, IgnoreZeroSign)) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 685 | BuilderTy::FastMathFlagGuard Guard(Builder); |
| 686 | Builder.setFastMathFlags(I.getFastMathFlags()); |
Benjamin Kramer | 6748576 | 2013-09-30 15:39:59 +0000 | [diff] [blame] | 687 | |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 688 | Value *N0 = dyn_castFNegVal(Opnd0, IgnoreZeroSign); |
| 689 | Value *N1 = dyn_castFNegVal(Opnd1, IgnoreZeroSign); |
Shuxin Yang | f8e9a5a | 2012-12-14 18:46:06 +0000 | [diff] [blame] | 690 | |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 691 | // -X * -Y => X*Y |
Owen Anderson | e8537fc | 2014-01-16 20:59:41 +0000 | [diff] [blame] | 692 | if (N1) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 693 | Value *FMul = Builder.CreateFMul(N0, N1); |
Owen Anderson | e8537fc | 2014-01-16 20:59:41 +0000 | [diff] [blame] | 694 | FMul->takeName(&I); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 695 | return replaceInstUsesWith(I, FMul); |
Owen Anderson | e8537fc | 2014-01-16 20:59:41 +0000 | [diff] [blame] | 696 | } |
Shuxin Yang | f8e9a5a | 2012-12-14 18:46:06 +0000 | [diff] [blame] | 697 | |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 698 | if (Opnd0->hasOneUse()) { |
| 699 | // -X * Y => -(X*Y) (Promote negation as high as possible) |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 700 | Value *T = Builder.CreateFMul(N0, Opnd1); |
| 701 | Value *Neg = Builder.CreateFNeg(T); |
Benjamin Kramer | 6748576 | 2013-09-30 15:39:59 +0000 | [diff] [blame] | 702 | Neg->takeName(&I); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 703 | return replaceInstUsesWith(I, Neg); |
Shuxin Yang | f8e9a5a | 2012-12-14 18:46:06 +0000 | [diff] [blame] | 704 | } |
| 705 | } |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 706 | |
Quentin Colombet | aa103b3 | 2017-09-20 17:32:16 +0000 | [diff] [blame] | 707 | // Handle specials cases for FMul with selects feeding the operation |
| 708 | if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1)) |
| 709 | return replaceInstUsesWith(I, V); |
| 710 | |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 711 | // (X*Y) * X => (X*X) * Y where Y != X |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 712 | // The purpose is two-fold: |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 713 | // 1) to form a power expression (of X). |
| 714 | // 2) potentially shorten the critical path: After transformation, the |
| 715 | // latency of the instruction Y is amortized by the expression of X*X, |
| 716 | // and therefore Y is in a "less critical" position compared to what it |
| 717 | // was before the transformation. |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 718 | if (AllowReassociate) { |
| 719 | Value *Opnd0_0, *Opnd0_1; |
| 720 | if (Opnd0->hasOneUse() && |
| 721 | match(Opnd0, m_FMul(m_Value(Opnd0_0), m_Value(Opnd0_1)))) { |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 722 | Value *Y = nullptr; |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 723 | if (Opnd0_0 == Opnd1 && Opnd0_1 != Opnd1) |
| 724 | Y = Opnd0_1; |
| 725 | else if (Opnd0_1 == Opnd1 && Opnd0_0 != Opnd1) |
| 726 | Y = Opnd0_0; |
| 727 | |
| 728 | if (Y) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 729 | BuilderTy::FastMathFlagGuard Guard(Builder); |
| 730 | Builder.setFastMathFlags(I.getFastMathFlags()); |
| 731 | Value *T = Builder.CreateFMul(Opnd1, Opnd1); |
| 732 | Value *R = Builder.CreateFMul(T, Y); |
Benjamin Kramer | 6748576 | 2013-09-30 15:39:59 +0000 | [diff] [blame] | 733 | R->takeName(&I); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 734 | return replaceInstUsesWith(I, R); |
Shuxin Yang | e822745 | 2013-01-15 21:09:32 +0000 | [diff] [blame] | 735 | } |
| 736 | } |
| 737 | } |
| 738 | |
| 739 | if (!isa<Constant>(Op1)) |
| 740 | std::swap(Opnd0, Opnd1); |
| 741 | else |
| 742 | break; |
Shuxin Yang | f8e9a5a | 2012-12-14 18:46:06 +0000 | [diff] [blame] | 743 | } |
| 744 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 745 | return Changed ? &I : nullptr; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 746 | } |
| 747 | |
Sanjay Patel | ae2e3a4 | 2017-10-06 23:20:16 +0000 | [diff] [blame] | 748 | /// Fold a divide or remainder with a select instruction divisor when one of the |
| 749 | /// select operands is zero. In that case, we can use the other select operand |
| 750 | /// because div/rem by zero is undefined. |
| 751 | bool InstCombiner::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) { |
| 752 | SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1)); |
| 753 | if (!SI) |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 754 | return false; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 755 | |
Sanjay Patel | ae2e3a4 | 2017-10-06 23:20:16 +0000 | [diff] [blame] | 756 | int NonNullOperand; |
| 757 | if (match(SI->getTrueValue(), m_Zero())) |
| 758 | // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y |
| 759 | NonNullOperand = 2; |
| 760 | else if (match(SI->getFalseValue(), m_Zero())) |
| 761 | // div/rem X, (Cond ? Y : 0) -> div/rem X, Y |
| 762 | NonNullOperand = 1; |
| 763 | else |
| 764 | return false; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 765 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 766 | // Change the div/rem to use 'Y' instead of the select. |
| 767 | I.setOperand(1, SI->getOperand(NonNullOperand)); |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 768 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 769 | // Okay, we know we replace the operand of the div/rem with 'Y' with no |
| 770 | // problem. However, the select, or the condition of the select may have |
| 771 | // multiple uses. Based on our knowledge that the operand must be non-zero, |
| 772 | // propagate the known value for the select into other uses of it, and |
| 773 | // propagate a known value of the condition into its other users. |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 774 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 775 | // If the select and condition only have a single use, don't bother with this, |
| 776 | // early exit. |
Sanjay Patel | ae2e3a4 | 2017-10-06 23:20:16 +0000 | [diff] [blame] | 777 | Value *SelectCond = SI->getCondition(); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 778 | if (SI->use_empty() && SelectCond->hasOneUse()) |
| 779 | return true; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 780 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 781 | // Scan the current block backward, looking for other uses of SI. |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 782 | BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin(); |
Sanjay Patel | 72d339a | 2017-10-06 23:43:06 +0000 | [diff] [blame] | 783 | Type *CondTy = SelectCond->getType(); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 784 | while (BBI != BBFront) { |
| 785 | --BBI; |
| 786 | // If we found a call to a function, we can't assume it will return, so |
| 787 | // information from below it cannot be propagated above it. |
| 788 | if (isa<CallInst>(BBI) && !isa<IntrinsicInst>(BBI)) |
| 789 | break; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 790 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 791 | // Replace uses of the select or its condition with the known values. |
| 792 | for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end(); |
| 793 | I != E; ++I) { |
| 794 | if (*I == SI) { |
| 795 | *I = SI->getOperand(NonNullOperand); |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 796 | Worklist.Add(&*BBI); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 797 | } else if (*I == SelectCond) { |
Sanjay Patel | 72d339a | 2017-10-06 23:43:06 +0000 | [diff] [blame] | 798 | *I = NonNullOperand == 1 ? ConstantInt::getTrue(CondTy) |
| 799 | : ConstantInt::getFalse(CondTy); |
Duncan P. N. Exon Smith | 9f8aaf2 | 2015-10-13 16:59:33 +0000 | [diff] [blame] | 800 | Worklist.Add(&*BBI); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 801 | } |
| 802 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 803 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 804 | // If we past the instruction, quit looking for it. |
| 805 | if (&*BBI == SI) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 806 | SI = nullptr; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 807 | if (&*BBI == SelectCond) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 808 | SelectCond = nullptr; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 809 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 810 | // If we ran out of things to eliminate, break out of the loop. |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 811 | if (!SelectCond && !SI) |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 812 | break; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 813 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 814 | } |
| 815 | return true; |
| 816 | } |
| 817 | |
Sanjay Patel | 1998cc6 | 2018-02-12 18:38:35 +0000 | [diff] [blame] | 818 | /// True if the multiply can not be expressed in an int this size. |
| 819 | static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product, |
| 820 | bool IsSigned) { |
| 821 | bool Overflow; |
| 822 | Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow); |
| 823 | return Overflow; |
| 824 | } |
| 825 | |
| 826 | /// True if C2 is a multiple of C1. Quotient contains C2/C1. |
| 827 | static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient, |
| 828 | bool IsSigned) { |
| 829 | assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal"); |
| 830 | |
| 831 | // Bail if we will divide by zero. |
| 832 | if (C2.isNullValue()) |
| 833 | return false; |
| 834 | |
| 835 | // Bail if we would divide INT_MIN by -1. |
| 836 | if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue()) |
| 837 | return false; |
| 838 | |
| 839 | APInt Remainder(C1.getBitWidth(), /*Val=*/0ULL, IsSigned); |
| 840 | if (IsSigned) |
| 841 | APInt::sdivrem(C1, C2, Quotient, Remainder); |
| 842 | else |
| 843 | APInt::udivrem(C1, C2, Quotient, Remainder); |
| 844 | |
| 845 | return Remainder.isMinValue(); |
| 846 | } |
| 847 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 848 | /// This function implements the transforms common to both integer division |
| 849 | /// instructions (udiv and sdiv). It is called by the visitors to those integer |
| 850 | /// division instructions. |
| 851 | /// @brief Common integer divide transforms |
| 852 | Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) { |
| 853 | Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); |
Sanjay Patel | 9530f18 | 2018-01-21 16:14:51 +0000 | [diff] [blame] | 854 | bool IsSigned = I.getOpcode() == Instruction::SDiv; |
Sanjay Patel | 39059d2 | 2018-02-12 14:14:56 +0000 | [diff] [blame] | 855 | Type *Ty = I.getType(); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 856 | |
Chris Lattner | 7c99f19 | 2011-05-22 18:18:41 +0000 | [diff] [blame] | 857 | // The RHS is known non-zero. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 858 | if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) { |
Chris Lattner | 7c99f19 | 2011-05-22 18:18:41 +0000 | [diff] [blame] | 859 | I.setOperand(1, V); |
| 860 | return &I; |
| 861 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 862 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 863 | // Handle cases involving: [su]div X, (select Cond, Y, Z) |
| 864 | // This does not apply for fdiv. |
Sanjay Patel | ae2e3a4 | 2017-10-06 23:20:16 +0000 | [diff] [blame] | 865 | if (simplifyDivRemOfSelectWithZeroOp(I)) |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 866 | return &I; |
| 867 | |
Sanjay Patel | 1998cc6 | 2018-02-12 18:38:35 +0000 | [diff] [blame] | 868 | const APInt *C2; |
| 869 | if (match(Op1, m_APInt(C2))) { |
| 870 | Value *X; |
| 871 | const APInt *C1; |
David Majnemer | f9a095d | 2014-08-16 08:55:06 +0000 | [diff] [blame] | 872 | |
Sanjay Patel | 1998cc6 | 2018-02-12 18:38:35 +0000 | [diff] [blame] | 873 | // (X / C1) / C2 -> X / (C1*C2) |
| 874 | if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) || |
| 875 | (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) { |
| 876 | APInt Product(C1->getBitWidth(), /*Val=*/0ULL, IsSigned); |
| 877 | if (!multiplyOverflows(*C1, *C2, Product, IsSigned)) |
| 878 | return BinaryOperator::Create(I.getOpcode(), X, |
| 879 | ConstantInt::get(Ty, Product)); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 880 | } |
Sanjay Patel | 1998cc6 | 2018-02-12 18:38:35 +0000 | [diff] [blame] | 881 | |
| 882 | if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) || |
| 883 | (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) { |
| 884 | APInt Quotient(C1->getBitWidth(), /*Val=*/0ULL, IsSigned); |
| 885 | |
| 886 | // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1. |
| 887 | if (isMultiple(*C2, *C1, Quotient, IsSigned)) { |
| 888 | auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X, |
| 889 | ConstantInt::get(Ty, Quotient)); |
| 890 | NewDiv->setIsExact(I.isExact()); |
| 891 | return NewDiv; |
| 892 | } |
| 893 | |
| 894 | // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2. |
| 895 | if (isMultiple(*C1, *C2, Quotient, IsSigned)) { |
| 896 | auto *Mul = BinaryOperator::Create(Instruction::Mul, X, |
| 897 | ConstantInt::get(Ty, Quotient)); |
| 898 | auto *OBO = cast<OverflowingBinaryOperator>(Op0); |
| 899 | Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); |
| 900 | Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); |
| 901 | return Mul; |
| 902 | } |
| 903 | } |
| 904 | |
| 905 | if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) && |
| 906 | *C1 != C1->getBitWidth() - 1) || |
| 907 | (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) { |
| 908 | APInt Quotient(C1->getBitWidth(), /*Val=*/0ULL, IsSigned); |
| 909 | APInt C1Shifted = APInt::getOneBitSet( |
| 910 | C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue())); |
| 911 | |
| 912 | // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of C1. |
| 913 | if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) { |
| 914 | auto *BO = BinaryOperator::Create(I.getOpcode(), X, |
| 915 | ConstantInt::get(Ty, Quotient)); |
| 916 | BO->setIsExact(I.isExact()); |
| 917 | return BO; |
| 918 | } |
| 919 | |
| 920 | // (X << C1) / C2 -> X * (C2 >> C1) if C1 is a multiple of C2. |
| 921 | if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) { |
| 922 | auto *Mul = BinaryOperator::Create(Instruction::Mul, X, |
| 923 | ConstantInt::get(Ty, Quotient)); |
| 924 | auto *OBO = cast<OverflowingBinaryOperator>(Op0); |
| 925 | Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); |
| 926 | Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); |
| 927 | return Mul; |
| 928 | } |
| 929 | } |
| 930 | |
| 931 | if (!C2->isNullValue()) // avoid X udiv 0 |
| 932 | if (Instruction *FoldedDiv = foldOpWithConstantIntoOperand(I)) |
| 933 | return FoldedDiv; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 934 | } |
| 935 | |
Craig Topper | 218a359 | 2017-04-17 03:41:47 +0000 | [diff] [blame] | 936 | if (match(Op0, m_One())) { |
Sanjay Patel | 39059d2 | 2018-02-12 14:14:56 +0000 | [diff] [blame] | 937 | assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?"); |
| 938 | if (IsSigned) { |
Craig Topper | 218a359 | 2017-04-17 03:41:47 +0000 | [diff] [blame] | 939 | // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the |
| 940 | // result is one, if Op1 is -1 then the result is minus one, otherwise |
| 941 | // it's zero. |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 942 | Value *Inc = Builder.CreateAdd(Op1, Op0); |
Sanjay Patel | 39059d2 | 2018-02-12 14:14:56 +0000 | [diff] [blame] | 943 | Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3)); |
| 944 | return SelectInst::Create(Cmp, Op1, ConstantInt::get(Ty, 0)); |
Craig Topper | 218a359 | 2017-04-17 03:41:47 +0000 | [diff] [blame] | 945 | } else { |
| 946 | // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the |
| 947 | // result is one, otherwise it's zero. |
Sanjay Patel | 39059d2 | 2018-02-12 14:14:56 +0000 | [diff] [blame] | 948 | return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty); |
Nick Lewycky | f0cf8fa | 2014-05-14 03:03:05 +0000 | [diff] [blame] | 949 | } |
| 950 | } |
| 951 | |
Benjamin Kramer | 57b3df5 | 2011-04-30 18:16:00 +0000 | [diff] [blame] | 952 | // See if we can fold away this div instruction. |
| 953 | if (SimplifyDemandedInstructionBits(I)) |
| 954 | return &I; |
| 955 | |
Duncan Sands | 771e82a | 2011-01-28 16:51:11 +0000 | [diff] [blame] | 956 | // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y |
Sanjay Patel | 9530f18 | 2018-01-21 16:14:51 +0000 | [diff] [blame] | 957 | Value *X, *Z; |
| 958 | if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1 |
| 959 | if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) || |
| 960 | (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1))))) |
Duncan Sands | 771e82a | 2011-01-28 16:51:11 +0000 | [diff] [blame] | 961 | return BinaryOperator::Create(I.getOpcode(), X, Op1); |
Sanjay Patel | 9530f18 | 2018-01-21 16:14:51 +0000 | [diff] [blame] | 962 | |
| 963 | // (X << Y) / X -> 1 << Y |
| 964 | Value *Y; |
| 965 | if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y)))) |
Sanjay Patel | 39059d2 | 2018-02-12 14:14:56 +0000 | [diff] [blame] | 966 | return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y); |
Sanjay Patel | 9530f18 | 2018-01-21 16:14:51 +0000 | [diff] [blame] | 967 | if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y)))) |
Sanjay Patel | 39059d2 | 2018-02-12 14:14:56 +0000 | [diff] [blame] | 968 | return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 969 | |
Sanjay Patel | 510d647 | 2018-02-11 17:20:32 +0000 | [diff] [blame] | 970 | // X / (X * Y) -> 1 / Y if the multiplication does not overflow. |
| 971 | if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) { |
| 972 | bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap(); |
| 973 | bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap(); |
| 974 | if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) { |
Sanjay Patel | 39059d2 | 2018-02-12 14:14:56 +0000 | [diff] [blame] | 975 | I.setOperand(0, ConstantInt::get(Ty, 1)); |
Sanjay Patel | 510d647 | 2018-02-11 17:20:32 +0000 | [diff] [blame] | 976 | I.setOperand(1, Y); |
| 977 | return &I; |
| 978 | } |
| 979 | } |
| 980 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 981 | return nullptr; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 982 | } |
| 983 | |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 984 | static const unsigned MaxDepth = 6; |
| 985 | |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 986 | namespace { |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 987 | |
| 988 | using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1, |
| 989 | const BinaryOperator &I, |
| 990 | InstCombiner &IC); |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 991 | |
| 992 | /// \brief Used to maintain state for visitUDivOperand(). |
| 993 | struct UDivFoldAction { |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 994 | /// Informs visitUDiv() how to fold this operand. This can be zero if this |
| 995 | /// action joins two actions together. |
| 996 | FoldUDivOperandCb FoldAction; |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 997 | |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 998 | /// Which operand to fold. |
| 999 | Value *OperandToFold; |
| 1000 | |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1001 | union { |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 1002 | /// The instruction returned when FoldAction is invoked. |
| 1003 | Instruction *FoldResult; |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1004 | |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 1005 | /// Stores the LHS action index if this action joins two actions together. |
| 1006 | size_t SelectLHSIdx; |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1007 | }; |
| 1008 | |
| 1009 | UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1010 | : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {} |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1011 | UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS) |
| 1012 | : FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {} |
| 1013 | }; |
Eugene Zelenko | 7f0f9bc | 2017-10-24 21:24:53 +0000 | [diff] [blame] | 1014 | |
| 1015 | } // end anonymous namespace |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1016 | |
| 1017 | // X udiv 2^C -> X >> C |
| 1018 | static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1, |
| 1019 | const BinaryOperator &I, InstCombiner &IC) { |
Simon Pilgrim | 94cc89d | 2018-02-08 14:46:10 +0000 | [diff] [blame] | 1020 | Constant *C1 = getLogBase2(Op0->getType(), cast<Constant>(Op1)); |
| 1021 | if (!C1) |
| 1022 | llvm_unreachable("Failed to constant fold udiv -> logbase2"); |
| 1023 | BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, C1); |
Suyog Sarda | 65f5ae9 | 2014-10-07 12:04:07 +0000 | [diff] [blame] | 1024 | if (I.isExact()) |
| 1025 | LShr->setIsExact(); |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1026 | return LShr; |
| 1027 | } |
| 1028 | |
| 1029 | // X udiv C, where C >= signbit |
| 1030 | static Instruction *foldUDivNegCst(Value *Op0, Value *Op1, |
| 1031 | const BinaryOperator &I, InstCombiner &IC) { |
Simon Pilgrim | 9620f4b | 2018-02-09 10:43:59 +0000 | [diff] [blame] | 1032 | Value *ICI = IC.Builder.CreateICmpULT(Op0, cast<Constant>(Op1)); |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1033 | return SelectInst::Create(ICI, Constant::getNullValue(I.getType()), |
| 1034 | ConstantInt::get(I.getType(), 1)); |
| 1035 | } |
| 1036 | |
| 1037 | // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) |
Andrea Di Biagio | a82d52d | 2016-09-26 12:07:23 +0000 | [diff] [blame] | 1038 | // X udiv (zext (C1 << N)), where C1 is "1<<C2" --> X >> (N+C2) |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1039 | static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I, |
| 1040 | InstCombiner &IC) { |
Andrea Di Biagio | a82d52d | 2016-09-26 12:07:23 +0000 | [diff] [blame] | 1041 | Value *ShiftLeft; |
| 1042 | if (!match(Op1, m_ZExt(m_Value(ShiftLeft)))) |
| 1043 | ShiftLeft = Op1; |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1044 | |
Simon Pilgrim | 2a90acd | 2018-02-08 15:19:38 +0000 | [diff] [blame] | 1045 | Constant *CI; |
Andrea Di Biagio | a82d52d | 2016-09-26 12:07:23 +0000 | [diff] [blame] | 1046 | Value *N; |
Simon Pilgrim | 2a90acd | 2018-02-08 15:19:38 +0000 | [diff] [blame] | 1047 | if (!match(ShiftLeft, m_Shl(m_Constant(CI), m_Value(N)))) |
Andrea Di Biagio | a82d52d | 2016-09-26 12:07:23 +0000 | [diff] [blame] | 1048 | llvm_unreachable("match should never fail here!"); |
Simon Pilgrim | 2a90acd | 2018-02-08 15:19:38 +0000 | [diff] [blame] | 1049 | Constant *Log2Base = getLogBase2(N->getType(), CI); |
| 1050 | if (!Log2Base) |
| 1051 | llvm_unreachable("getLogBase2 should never fail here!"); |
| 1052 | N = IC.Builder.CreateAdd(N, Log2Base); |
Andrea Di Biagio | a82d52d | 2016-09-26 12:07:23 +0000 | [diff] [blame] | 1053 | if (Op1 != ShiftLeft) |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1054 | N = IC.Builder.CreateZExt(N, Op1->getType()); |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1055 | BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N); |
Suyog Sarda | 65f5ae9 | 2014-10-07 12:04:07 +0000 | [diff] [blame] | 1056 | if (I.isExact()) |
| 1057 | LShr->setIsExact(); |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1058 | return LShr; |
| 1059 | } |
| 1060 | |
| 1061 | // \brief Recursively visits the possible right hand operands of a udiv |
| 1062 | // instruction, seeing through select instructions, to determine if we can |
| 1063 | // replace the udiv with something simpler. If we find that an operand is not |
| 1064 | // able to simplify the udiv, we abort the entire transformation. |
| 1065 | static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I, |
| 1066 | SmallVectorImpl<UDivFoldAction> &Actions, |
| 1067 | unsigned Depth = 0) { |
| 1068 | // Check to see if this is an unsigned division with an exact power of 2, |
| 1069 | // if so, convert to a right shift. |
| 1070 | if (match(Op1, m_Power2())) { |
| 1071 | Actions.push_back(UDivFoldAction(foldUDivPow2Cst, Op1)); |
| 1072 | return Actions.size(); |
| 1073 | } |
| 1074 | |
Simon Pilgrim | 9620f4b | 2018-02-09 10:43:59 +0000 | [diff] [blame] | 1075 | // X udiv C, where C >= signbit |
| 1076 | if (match(Op1, m_Negative())) { |
| 1077 | Actions.push_back(UDivFoldAction(foldUDivNegCst, Op1)); |
| 1078 | return Actions.size(); |
| 1079 | } |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1080 | |
| 1081 | // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) |
| 1082 | if (match(Op1, m_Shl(m_Power2(), m_Value())) || |
| 1083 | match(Op1, m_ZExt(m_Shl(m_Power2(), m_Value())))) { |
| 1084 | Actions.push_back(UDivFoldAction(foldUDivShl, Op1)); |
| 1085 | return Actions.size(); |
| 1086 | } |
| 1087 | |
| 1088 | // The remaining tests are all recursive, so bail out if we hit the limit. |
| 1089 | if (Depth++ == MaxDepth) |
| 1090 | return 0; |
| 1091 | |
| 1092 | if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) |
David Majnemer | 492e612 | 2014-08-30 09:19:05 +0000 | [diff] [blame] | 1093 | if (size_t LHSIdx = |
| 1094 | visitUDivOperand(Op0, SI->getOperand(1), I, Actions, Depth)) |
| 1095 | if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions, Depth)) { |
| 1096 | Actions.push_back(UDivFoldAction(nullptr, Op1, LHSIdx - 1)); |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1097 | return Actions.size(); |
| 1098 | } |
| 1099 | |
| 1100 | return 0; |
| 1101 | } |
| 1102 | |
Sanjay Patel | bb78938 | 2017-08-24 22:54:01 +0000 | [diff] [blame] | 1103 | /// If we have zero-extended operands of an unsigned div or rem, we may be able |
| 1104 | /// to narrow the operation (sink the zext below the math). |
| 1105 | static Instruction *narrowUDivURem(BinaryOperator &I, |
| 1106 | InstCombiner::BuilderTy &Builder) { |
| 1107 | Instruction::BinaryOps Opcode = I.getOpcode(); |
| 1108 | Value *N = I.getOperand(0); |
| 1109 | Value *D = I.getOperand(1); |
| 1110 | Type *Ty = I.getType(); |
| 1111 | Value *X, *Y; |
| 1112 | if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) && |
| 1113 | X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) { |
| 1114 | // udiv (zext X), (zext Y) --> zext (udiv X, Y) |
| 1115 | // urem (zext X), (zext Y) --> zext (urem X, Y) |
| 1116 | Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y); |
| 1117 | return new ZExtInst(NarrowOp, Ty); |
| 1118 | } |
| 1119 | |
| 1120 | Constant *C; |
| 1121 | if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) || |
| 1122 | (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) { |
| 1123 | // If the constant is the same in the smaller type, use the narrow version. |
| 1124 | Constant *TruncC = ConstantExpr::getTrunc(C, X->getType()); |
| 1125 | if (ConstantExpr::getZExt(TruncC, Ty) != C) |
| 1126 | return nullptr; |
| 1127 | |
| 1128 | // udiv (zext X), C --> zext (udiv X, C') |
| 1129 | // urem (zext X), C --> zext (urem X, C') |
| 1130 | // udiv C, (zext X) --> zext (udiv C', X) |
| 1131 | // urem C, (zext X) --> zext (urem C', X) |
| 1132 | Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC) |
| 1133 | : Builder.CreateBinOp(Opcode, TruncC, X); |
| 1134 | return new ZExtInst(NarrowOp, Ty); |
| 1135 | } |
| 1136 | |
| 1137 | return nullptr; |
| 1138 | } |
| 1139 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1140 | Instruction *InstCombiner::visitUDiv(BinaryOperator &I) { |
| 1141 | Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); |
| 1142 | |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1143 | if (Value *V = SimplifyVectorOp(I)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1144 | return replaceInstUsesWith(I, V); |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1145 | |
Craig Topper | a420562 | 2017-06-09 03:21:29 +0000 | [diff] [blame] | 1146 | if (Value *V = SimplifyUDivInst(Op0, Op1, SQ.getWithInstruction(&I))) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1147 | return replaceInstUsesWith(I, V); |
Duncan Sands | 771e82a | 2011-01-28 16:51:11 +0000 | [diff] [blame] | 1148 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1149 | // Handle the integer div common cases |
| 1150 | if (Instruction *Common = commonIDivTransforms(I)) |
| 1151 | return Common; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1152 | |
Benjamin Kramer | d4a6471 | 2012-08-30 15:07:40 +0000 | [diff] [blame] | 1153 | // (x lshr C1) udiv C2 --> x udiv (C2 << C1) |
David Majnemer | a252138 | 2014-10-13 21:48:30 +0000 | [diff] [blame] | 1154 | { |
Benjamin Kramer | 9c0a807 | 2012-08-28 13:08:13 +0000 | [diff] [blame] | 1155 | Value *X; |
David Majnemer | a252138 | 2014-10-13 21:48:30 +0000 | [diff] [blame] | 1156 | const APInt *C1, *C2; |
| 1157 | if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && |
| 1158 | match(Op1, m_APInt(C2))) { |
| 1159 | bool Overflow; |
| 1160 | APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow); |
David Majnemer | a3aeb15 | 2014-11-22 18:16:54 +0000 | [diff] [blame] | 1161 | if (!Overflow) { |
| 1162 | bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value())); |
| 1163 | BinaryOperator *BO = BinaryOperator::CreateUDiv( |
David Majnemer | a252138 | 2014-10-13 21:48:30 +0000 | [diff] [blame] | 1164 | X, ConstantInt::get(X->getType(), C2ShlC1)); |
David Majnemer | a3aeb15 | 2014-11-22 18:16:54 +0000 | [diff] [blame] | 1165 | if (IsExact) |
| 1166 | BO->setIsExact(); |
| 1167 | return BO; |
| 1168 | } |
David Majnemer | a252138 | 2014-10-13 21:48:30 +0000 | [diff] [blame] | 1169 | } |
Nadav Rotem | 11935b2 | 2012-08-28 10:01:43 +0000 | [diff] [blame] | 1170 | } |
| 1171 | |
Sanjay Patel | bb78938 | 2017-08-24 22:54:01 +0000 | [diff] [blame] | 1172 | if (Instruction *NarrowDiv = narrowUDivURem(I, Builder)) |
| 1173 | return NarrowDiv; |
Benjamin Kramer | 9aa91b1 | 2011-04-30 18:16:07 +0000 | [diff] [blame] | 1174 | |
David Majnemer | 37f8f44 | 2013-07-04 21:17:49 +0000 | [diff] [blame] | 1175 | // (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...)))) |
| 1176 | SmallVector<UDivFoldAction, 6> UDivActions; |
| 1177 | if (visitUDivOperand(Op0, Op1, I, UDivActions)) |
| 1178 | for (unsigned i = 0, e = UDivActions.size(); i != e; ++i) { |
| 1179 | FoldUDivOperandCb Action = UDivActions[i].FoldAction; |
| 1180 | Value *ActionOp1 = UDivActions[i].OperandToFold; |
| 1181 | Instruction *Inst; |
| 1182 | if (Action) |
| 1183 | Inst = Action(Op0, ActionOp1, I, *this); |
| 1184 | else { |
| 1185 | // This action joins two actions together. The RHS of this action is |
| 1186 | // simply the last action we processed, we saved the LHS action index in |
| 1187 | // the joining action. |
| 1188 | size_t SelectRHSIdx = i - 1; |
| 1189 | Value *SelectRHS = UDivActions[SelectRHSIdx].FoldResult; |
| 1190 | size_t SelectLHSIdx = UDivActions[i].SelectLHSIdx; |
| 1191 | Value *SelectLHS = UDivActions[SelectLHSIdx].FoldResult; |
| 1192 | Inst = SelectInst::Create(cast<SelectInst>(ActionOp1)->getCondition(), |
| 1193 | SelectLHS, SelectRHS); |
| 1194 | } |
| 1195 | |
| 1196 | // If this is the last action to process, return it to the InstCombiner. |
| 1197 | // Otherwise, we insert it before the UDiv and record it so that we may |
| 1198 | // use it as part of a joining action (i.e., a SelectInst). |
| 1199 | if (e - i != 1) { |
| 1200 | Inst->insertBefore(&I); |
| 1201 | UDivActions[i].FoldResult = Inst; |
| 1202 | } else |
| 1203 | return Inst; |
| 1204 | } |
| 1205 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1206 | return nullptr; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1207 | } |
| 1208 | |
| 1209 | Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { |
| 1210 | Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); |
| 1211 | |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1212 | if (Value *V = SimplifyVectorOp(I)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1213 | return replaceInstUsesWith(I, V); |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1214 | |
Craig Topper | a420562 | 2017-06-09 03:21:29 +0000 | [diff] [blame] | 1215 | if (Value *V = SimplifySDivInst(Op0, Op1, SQ.getWithInstruction(&I))) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1216 | return replaceInstUsesWith(I, V); |
Duncan Sands | 771e82a | 2011-01-28 16:51:11 +0000 | [diff] [blame] | 1217 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1218 | // Handle the integer div common cases |
| 1219 | if (Instruction *Common = commonIDivTransforms(I)) |
| 1220 | return Common; |
| 1221 | |
Sanjay Patel | c6ada53 | 2016-06-27 17:25:57 +0000 | [diff] [blame] | 1222 | const APInt *Op1C; |
Sanjay Patel | bedd1f9 | 2016-06-27 18:38:40 +0000 | [diff] [blame] | 1223 | if (match(Op1, m_APInt(Op1C))) { |
| 1224 | // sdiv X, -1 == -X |
| 1225 | if (Op1C->isAllOnesValue()) |
| 1226 | return BinaryOperator::CreateNeg(Op0); |
| 1227 | |
| 1228 | // sdiv exact X, C --> ashr exact X, log2(C) |
| 1229 | if (I.isExact() && Op1C->isNonNegative() && Op1C->isPowerOf2()) { |
| 1230 | Value *ShAmt = ConstantInt::get(Op1->getType(), Op1C->exactLogBase2()); |
| 1231 | return BinaryOperator::CreateExactAShr(Op0, ShAmt, I.getName()); |
| 1232 | } |
Sanjay Patel | 59ed2ff | 2016-06-27 22:27:11 +0000 | [diff] [blame] | 1233 | |
| 1234 | // If the dividend is sign-extended and the constant divisor is small enough |
| 1235 | // to fit in the source type, shrink the division to the narrower type: |
| 1236 | // (sext X) sdiv C --> sext (X sdiv C) |
| 1237 | Value *Op0Src; |
| 1238 | if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) && |
| 1239 | Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) { |
| 1240 | |
| 1241 | // In the general case, we need to make sure that the dividend is not the |
| 1242 | // minimum signed value because dividing that by -1 is UB. But here, we |
| 1243 | // know that the -1 divisor case is already handled above. |
| 1244 | |
| 1245 | Constant *NarrowDivisor = |
| 1246 | ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType()); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1247 | Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor); |
Sanjay Patel | 59ed2ff | 2016-06-27 22:27:11 +0000 | [diff] [blame] | 1248 | return new SExtInst(NarrowOp, Op0->getType()); |
| 1249 | } |
Benjamin Kramer | 72196f3 | 2014-01-19 15:24:22 +0000 | [diff] [blame] | 1250 | } |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1251 | |
Benjamin Kramer | 72196f3 | 2014-01-19 15:24:22 +0000 | [diff] [blame] | 1252 | if (Constant *RHS = dyn_cast<Constant>(Op1)) { |
David Majnemer | f28e2a4 | 2014-07-02 06:42:13 +0000 | [diff] [blame] | 1253 | // X/INT_MIN -> X == INT_MIN |
| 1254 | if (RHS->isMinSignedValue()) |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1255 | return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), I.getType()); |
David Majnemer | f28e2a4 | 2014-07-02 06:42:13 +0000 | [diff] [blame] | 1256 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1257 | // -X/C --> X/-C provided the negation doesn't overflow. |
David Majnemer | fa4699e | 2014-11-22 20:00:34 +0000 | [diff] [blame] | 1258 | Value *X; |
| 1259 | if (match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) { |
| 1260 | auto *BO = BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(RHS)); |
| 1261 | BO->setIsExact(I.isExact()); |
| 1262 | return BO; |
| 1263 | } |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1264 | } |
| 1265 | |
| 1266 | // If the sign bits of both operands are zero (i.e. we can prove they are |
| 1267 | // unsigned inputs), turn this into a udiv. |
Craig Topper | bcfd2d1 | 2017-04-20 16:56:25 +0000 | [diff] [blame] | 1268 | APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); |
Craig Topper | f248468 | 2017-04-17 01:51:19 +0000 | [diff] [blame] | 1269 | if (MaskedValueIsZero(Op0, Mask, 0, &I)) { |
| 1270 | if (MaskedValueIsZero(Op1, Mask, 0, &I)) { |
| 1271 | // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set |
| 1272 | auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); |
| 1273 | BO->setIsExact(I.isExact()); |
| 1274 | return BO; |
| 1275 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1276 | |
Craig Topper | d4039f7 | 2017-05-25 21:51:12 +0000 | [diff] [blame] | 1277 | if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { |
Craig Topper | f248468 | 2017-04-17 01:51:19 +0000 | [diff] [blame] | 1278 | // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y) |
| 1279 | // Safe because the only negative value (1 << Y) can take on is |
| 1280 | // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have |
| 1281 | // the sign bit set. |
| 1282 | auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); |
| 1283 | BO->setIsExact(I.isExact()); |
| 1284 | return BO; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1285 | } |
| 1286 | } |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1287 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1288 | return nullptr; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1289 | } |
| 1290 | |
Sanjay Patel | b39bcc0 | 2018-02-14 23:04:17 +0000 | [diff] [blame] | 1291 | /// Try to convert X/C into X * (1/C). |
| 1292 | static Instruction *foldFDivConstantDivisor(BinaryOperator &FDiv) { |
Sanjay Patel | 6a0f667 | 2018-02-15 13:55:52 +0000 | [diff] [blame] | 1293 | // TODO: Handle non-splat vector constants. |
| 1294 | const APFloat *C; |
| 1295 | if (!match(FDiv.getOperand(1), m_APFloat(C))) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1296 | return nullptr; |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 1297 | |
Sanjay Patel | b39bcc0 | 2018-02-14 23:04:17 +0000 | [diff] [blame] | 1298 | // This returns false if the inverse would be a denormal. |
Sanjay Patel | 6a0f667 | 2018-02-15 13:55:52 +0000 | [diff] [blame] | 1299 | APFloat Reciprocal(C->getSemantics()); |
| 1300 | bool HasRecip = C->getExactInverse(&Reciprocal); |
Sanjay Patel | b39bcc0 | 2018-02-14 23:04:17 +0000 | [diff] [blame] | 1301 | // If the inverse is not exact, we may still be able to convert if we are |
| 1302 | // not operating with strict math. |
Sanjay Patel | 6a0f667 | 2018-02-15 13:55:52 +0000 | [diff] [blame] | 1303 | if (!HasRecip && FDiv.hasAllowReciprocal() && C->isFiniteNonZero()) { |
| 1304 | Reciprocal = APFloat(C->getSemantics(), 1.0f); |
| 1305 | Reciprocal.divide(*C, APFloat::rmNearestTiesToEven); |
Sanjay Patel | b39bcc0 | 2018-02-14 23:04:17 +0000 | [diff] [blame] | 1306 | // Disallow denormal constants because we don't know what would happen |
| 1307 | // on all targets. |
| 1308 | // TODO: Function attributes can tell us that denorms are flushed? |
| 1309 | HasRecip = !Reciprocal.isDenormal(); |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1310 | } |
| 1311 | |
Sanjay Patel | b39bcc0 | 2018-02-14 23:04:17 +0000 | [diff] [blame] | 1312 | if (!HasRecip) |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1313 | return nullptr; |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1314 | |
Sanjay Patel | 6a0f667 | 2018-02-15 13:55:52 +0000 | [diff] [blame] | 1315 | auto *RecipCFP = ConstantFP::get(FDiv.getType(), Reciprocal); |
Sanjay Patel | b39bcc0 | 2018-02-14 23:04:17 +0000 | [diff] [blame] | 1316 | return BinaryOperator::CreateFMul(FDiv.getOperand(0), RecipCFP); |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1317 | } |
| 1318 | |
Frits van Bommel | 2a55951 | 2011-01-29 17:50:27 +0000 | [diff] [blame] | 1319 | Instruction *InstCombiner::visitFDiv(BinaryOperator &I) { |
| 1320 | Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); |
| 1321 | |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1322 | if (Value *V = SimplifyVectorOp(I)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1323 | return replaceInstUsesWith(I, V); |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1324 | |
Craig Topper | a420562 | 2017-06-09 03:21:29 +0000 | [diff] [blame] | 1325 | if (Value *V = SimplifyFDivInst(Op0, Op1, I.getFastMathFlags(), |
| 1326 | SQ.getWithInstruction(&I))) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1327 | return replaceInstUsesWith(I, V); |
Frits van Bommel | 2a55951 | 2011-01-29 17:50:27 +0000 | [diff] [blame] | 1328 | |
Sanjay Patel | b39bcc0 | 2018-02-14 23:04:17 +0000 | [diff] [blame] | 1329 | if (Instruction *FMul = foldFDivConstantDivisor(I)) { |
| 1330 | FMul->copyFastMathFlags(&I); |
| 1331 | return FMul; |
| 1332 | } |
| 1333 | |
Stephen Lin | a9b57f6 | 2013-07-20 07:13:13 +0000 | [diff] [blame] | 1334 | if (isa<Constant>(Op0)) |
| 1335 | if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) |
| 1336 | if (Instruction *R = FoldOpIntoSelect(I, SI)) |
| 1337 | return R; |
| 1338 | |
Sanjay Patel | 629c411 | 2017-11-06 16:27:15 +0000 | [diff] [blame] | 1339 | bool AllowReassociate = I.isFast(); |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 1340 | if (Constant *Op1C = dyn_cast<Constant>(Op1)) { |
Stephen Lin | a9b57f6 | 2013-07-20 07:13:13 +0000 | [diff] [blame] | 1341 | if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) |
| 1342 | if (Instruction *R = FoldOpIntoSelect(I, SI)) |
| 1343 | return R; |
| 1344 | |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1345 | if (AllowReassociate) { |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1346 | Constant *C1 = nullptr; |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 1347 | Constant *C2 = Op1C; |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1348 | Value *X; |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1349 | Instruction *Res = nullptr; |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1350 | |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 1351 | if (match(Op0, m_FMul(m_Value(X), m_Constant(C1)))) { |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1352 | // (X*C1)/C2 => X * (C1/C2) |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1353 | Constant *C = ConstantExpr::getFDiv(C1, C2); |
Sanjay Patel | 08868e494 | 2018-02-16 22:32:54 +0000 | [diff] [blame^] | 1354 | if (C->isNormalFP()) |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1355 | Res = BinaryOperator::CreateFMul(X, C); |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 1356 | } else if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) { |
Sanjay Patel | b39bcc0 | 2018-02-14 23:04:17 +0000 | [diff] [blame] | 1357 | // (X/C1)/C2 => X /(C2*C1) |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1358 | Constant *C = ConstantExpr::getFMul(C1, C2); |
Sanjay Patel | 08868e494 | 2018-02-16 22:32:54 +0000 | [diff] [blame^] | 1359 | if (C->isNormalFP()) |
Sanjay Patel | b39bcc0 | 2018-02-14 23:04:17 +0000 | [diff] [blame] | 1360 | Res = BinaryOperator::CreateFDiv(X, C); |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1361 | } |
| 1362 | |
| 1363 | if (Res) { |
| 1364 | Res->setFastMathFlags(I.getFastMathFlags()); |
| 1365 | return Res; |
| 1366 | } |
| 1367 | } |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1368 | return nullptr; |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1369 | } |
| 1370 | |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 1371 | if (AllowReassociate && isa<Constant>(Op0)) { |
| 1372 | Constant *C1 = cast<Constant>(Op0), *C2; |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1373 | Constant *Fold = nullptr; |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1374 | Value *X; |
| 1375 | bool CreateDiv = true; |
| 1376 | |
| 1377 | // C1 / (X*C2) => (C1/C2) / X |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 1378 | if (match(Op1, m_FMul(m_Value(X), m_Constant(C2)))) |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1379 | Fold = ConstantExpr::getFDiv(C1, C2); |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 1380 | else if (match(Op1, m_FDiv(m_Value(X), m_Constant(C2)))) { |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1381 | // C1 / (X/C2) => (C1*C2) / X |
| 1382 | Fold = ConstantExpr::getFMul(C1, C2); |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 1383 | } else if (match(Op1, m_FDiv(m_Constant(C2), m_Value(X)))) { |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1384 | // C1 / (C2/X) => (C1/C2) * X |
| 1385 | Fold = ConstantExpr::getFDiv(C1, C2); |
| 1386 | CreateDiv = false; |
| 1387 | } |
| 1388 | |
Sanjay Patel | 08868e494 | 2018-02-16 22:32:54 +0000 | [diff] [blame^] | 1389 | if (Fold && Fold->isNormalFP()) { |
Benjamin Kramer | 76b15d0 | 2014-01-19 13:36:27 +0000 | [diff] [blame] | 1390 | Instruction *R = CreateDiv ? BinaryOperator::CreateFDiv(Fold, X) |
| 1391 | : BinaryOperator::CreateFMul(X, Fold); |
| 1392 | R->setFastMathFlags(I.getFastMathFlags()); |
| 1393 | return R; |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1394 | } |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1395 | return nullptr; |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1396 | } |
| 1397 | |
| 1398 | if (AllowReassociate) { |
| 1399 | Value *X, *Y; |
Sanjay Patel | 91bb775 | 2018-02-16 17:52:32 +0000 | [diff] [blame] | 1400 | if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && |
| 1401 | (!isa<Constant>(Y) || !isa<Constant>(Op1))) { |
| 1402 | // (X / Y) / Z => X / (Y * Z) |
| 1403 | Value *YZ = Builder.CreateFMul(Y, Op1); |
| 1404 | if (auto *YZInst = dyn_cast<Instruction>(YZ)) { |
| 1405 | FastMathFlags FMFIntersect = I.getFastMathFlags(); |
| 1406 | FMFIntersect &= cast<Instruction>(Op0)->getFastMathFlags(); |
| 1407 | YZInst->setFastMathFlags(FMFIntersect); |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1408 | } |
Sanjay Patel | 91bb775 | 2018-02-16 17:52:32 +0000 | [diff] [blame] | 1409 | Instruction *NewDiv = BinaryOperator::CreateFDiv(X, YZ); |
| 1410 | NewDiv->setFastMathFlags(I.getFastMathFlags()); |
| 1411 | return NewDiv; |
Shuxin Yang | 320f52a | 2013-01-14 22:48:41 +0000 | [diff] [blame] | 1412 | } |
Sanjay Patel | 91bb775 | 2018-02-16 17:52:32 +0000 | [diff] [blame] | 1413 | if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && |
| 1414 | (!isa<Constant>(Y) || !isa<Constant>(Op0))) { |
| 1415 | // Z / (X / Y) => (Y * Z) / X |
| 1416 | Value *YZ = Builder.CreateFMul(Y, Op0); |
| 1417 | if (auto *YZInst = dyn_cast<Instruction>(YZ)) { |
| 1418 | FastMathFlags FMFIntersect = I.getFastMathFlags(); |
| 1419 | FMFIntersect &= cast<Instruction>(Op1)->getFastMathFlags(); |
| 1420 | YZInst->setFastMathFlags(FMFIntersect); |
| 1421 | } |
| 1422 | Instruction *NewDiv = BinaryOperator::CreateFDiv(YZ, X); |
| 1423 | NewDiv->setFastMathFlags(I.getFastMathFlags()); |
| 1424 | return NewDiv; |
Benjamin Kramer | 8564e0d | 2011-03-30 15:42:35 +0000 | [diff] [blame] | 1425 | } |
| 1426 | } |
| 1427 | |
Sanjay Patel | 339b4d3 | 2018-02-15 15:07:12 +0000 | [diff] [blame] | 1428 | if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) { |
Sanjay Patel | 65da14d | 2018-02-16 16:13:20 +0000 | [diff] [blame] | 1429 | // sin(X) / cos(X) -> tan(X) |
| 1430 | // cos(X) / sin(X) -> 1/tan(X) (cotangent) |
| 1431 | Value *X; |
| 1432 | bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) && |
| 1433 | match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X))); |
| 1434 | bool IsCot = |
| 1435 | !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) && |
| 1436 | match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X))); |
Dmitry Venikov | e5fbf59 | 2018-01-11 06:33:00 +0000 | [diff] [blame] | 1437 | |
Sanjay Patel | 65da14d | 2018-02-16 16:13:20 +0000 | [diff] [blame] | 1438 | if ((IsTan || IsCot) && hasUnaryFloatFn(&TLI, I.getType(), LibFunc_tan, |
| 1439 | LibFunc_tanf, LibFunc_tanl)) { |
| 1440 | IRBuilder<> B(&I); |
| 1441 | IRBuilder<>::FastMathFlagGuard FMFGuard(B); |
| 1442 | B.setFastMathFlags(I.getFastMathFlags()); |
| 1443 | AttributeList Attrs = CallSite(Op0).getCalledFunction()->getAttributes(); |
| 1444 | Value *Res = emitUnaryFloatFnCall(X, TLI.getName(LibFunc_tan), B, Attrs); |
| 1445 | if (IsCot) |
| 1446 | Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res); |
| 1447 | return replaceInstUsesWith(I, Res); |
Dmitry Venikov | e5fbf59 | 2018-01-11 06:33:00 +0000 | [diff] [blame] | 1448 | } |
| 1449 | } |
| 1450 | |
Sanjay Patel | 1998cc6 | 2018-02-12 18:38:35 +0000 | [diff] [blame] | 1451 | // -X / -Y -> X / Y |
| 1452 | Value *X, *Y; |
| 1453 | if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) { |
| 1454 | I.setOperand(0, X); |
| 1455 | I.setOperand(1, Y); |
Matt Arsenault | fdb78f8 | 2017-01-10 23:08:54 +0000 | [diff] [blame] | 1456 | return &I; |
| 1457 | } |
| 1458 | |
Sanjay Patel | 4a4f35f | 2018-02-12 19:39:21 +0000 | [diff] [blame] | 1459 | // X / (X * Y) --> 1.0 / Y |
| 1460 | // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed. |
| 1461 | // We can ignore the possibility that X is infinity because INF/INF is NaN. |
| 1462 | if (I.hasNoNaNs() && I.hasAllowReassoc() && |
| 1463 | match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) { |
| 1464 | I.setOperand(0, ConstantFP::get(I.getType(), 1.0)); |
| 1465 | I.setOperand(1, Y); |
| 1466 | return &I; |
| 1467 | } |
| 1468 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1469 | return nullptr; |
Frits van Bommel | 2a55951 | 2011-01-29 17:50:27 +0000 | [diff] [blame] | 1470 | } |
| 1471 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1472 | /// This function implements the transforms common to both integer remainder |
| 1473 | /// instructions (urem and srem). It is called by the visitors to those integer |
| 1474 | /// remainder instructions. |
| 1475 | /// @brief Common integer remainder transforms |
| 1476 | Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) { |
| 1477 | Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); |
| 1478 | |
Chris Lattner | 7c99f19 | 2011-05-22 18:18:41 +0000 | [diff] [blame] | 1479 | // The RHS is known non-zero. |
Mehdi Amini | a28d91d | 2015-03-10 02:37:25 +0000 | [diff] [blame] | 1480 | if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) { |
Chris Lattner | 7c99f19 | 2011-05-22 18:18:41 +0000 | [diff] [blame] | 1481 | I.setOperand(1, V); |
| 1482 | return &I; |
| 1483 | } |
| 1484 | |
Duncan Sands | a3e3699 | 2011-05-02 16:27:02 +0000 | [diff] [blame] | 1485 | // Handle cases involving: rem X, (select Cond, Y, Z) |
Sanjay Patel | ae2e3a4 | 2017-10-06 23:20:16 +0000 | [diff] [blame] | 1486 | if (simplifyDivRemOfSelectWithZeroOp(I)) |
Duncan Sands | a3e3699 | 2011-05-02 16:27:02 +0000 | [diff] [blame] | 1487 | return &I; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1488 | |
Benjamin Kramer | 72196f3 | 2014-01-19 15:24:22 +0000 | [diff] [blame] | 1489 | if (isa<Constant>(Op1)) { |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1490 | if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) { |
| 1491 | if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) { |
| 1492 | if (Instruction *R = FoldOpIntoSelect(I, SI)) |
| 1493 | return R; |
Craig Topper | fb71b7d | 2017-04-14 19:20:12 +0000 | [diff] [blame] | 1494 | } else if (auto *PN = dyn_cast<PHINode>(Op0I)) { |
Sanjoy Das | b7e861a | 2016-06-05 21:17:04 +0000 | [diff] [blame] | 1495 | const APInt *Op1Int; |
| 1496 | if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() && |
| 1497 | (I.getOpcode() == Instruction::URem || |
| 1498 | !Op1Int->isMinSignedValue())) { |
Craig Topper | fb71b7d | 2017-04-14 19:20:12 +0000 | [diff] [blame] | 1499 | // foldOpIntoPhi will speculate instructions to the end of the PHI's |
Sanjoy Das | b7e861a | 2016-06-05 21:17:04 +0000 | [diff] [blame] | 1500 | // predecessor blocks, so do this only if we know the srem or urem |
| 1501 | // will not fault. |
Craig Topper | fb71b7d | 2017-04-14 19:20:12 +0000 | [diff] [blame] | 1502 | if (Instruction *NV = foldOpIntoPhi(I, PN)) |
Sanjoy Das | b7e861a | 2016-06-05 21:17:04 +0000 | [diff] [blame] | 1503 | return NV; |
| 1504 | } |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1505 | } |
| 1506 | |
| 1507 | // See if we can fold away this rem instruction. |
| 1508 | if (SimplifyDemandedInstructionBits(I)) |
| 1509 | return &I; |
| 1510 | } |
| 1511 | } |
| 1512 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1513 | return nullptr; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1514 | } |
| 1515 | |
| 1516 | Instruction *InstCombiner::visitURem(BinaryOperator &I) { |
| 1517 | Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); |
| 1518 | |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1519 | if (Value *V = SimplifyVectorOp(I)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1520 | return replaceInstUsesWith(I, V); |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1521 | |
Craig Topper | a420562 | 2017-06-09 03:21:29 +0000 | [diff] [blame] | 1522 | if (Value *V = SimplifyURemInst(Op0, Op1, SQ.getWithInstruction(&I))) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1523 | return replaceInstUsesWith(I, V); |
Duncan Sands | a3e3699 | 2011-05-02 16:27:02 +0000 | [diff] [blame] | 1524 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1525 | if (Instruction *common = commonIRemTransforms(I)) |
| 1526 | return common; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1527 | |
Sanjay Patel | bb78938 | 2017-08-24 22:54:01 +0000 | [diff] [blame] | 1528 | if (Instruction *NarrowRem = narrowUDivURem(I, Builder)) |
| 1529 | return NarrowRem; |
David Majnemer | 6c30f49 | 2013-05-12 00:07:05 +0000 | [diff] [blame] | 1530 | |
David Majnemer | 470b077 | 2013-05-11 09:01:28 +0000 | [diff] [blame] | 1531 | // X urem Y -> X and Y-1, where Y is a power of 2, |
Craig Topper | d4039f7 | 2017-05-25 21:51:12 +0000 | [diff] [blame] | 1532 | if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { |
Chris Lattner | 6b657ae | 2011-02-10 05:36:31 +0000 | [diff] [blame] | 1533 | Constant *N1 = Constant::getAllOnesValue(I.getType()); |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1534 | Value *Add = Builder.CreateAdd(Op1, N1); |
Chris Lattner | 6b657ae | 2011-02-10 05:36:31 +0000 | [diff] [blame] | 1535 | return BinaryOperator::CreateAnd(Op0, Add); |
| 1536 | } |
| 1537 | |
Nick Lewycky | 7459be6 | 2013-07-13 01:16:47 +0000 | [diff] [blame] | 1538 | // 1 urem X -> zext(X != 1) |
| 1539 | if (match(Op0, m_One())) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1540 | Value *Cmp = Builder.CreateICmpNE(Op1, Op0); |
| 1541 | Value *Ext = Builder.CreateZExt(Cmp, I.getType()); |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1542 | return replaceInstUsesWith(I, Ext); |
Nick Lewycky | 7459be6 | 2013-07-13 01:16:47 +0000 | [diff] [blame] | 1543 | } |
| 1544 | |
Sanjay Patel | 30ef70b | 2016-09-22 22:36:26 +0000 | [diff] [blame] | 1545 | // X urem C -> X < C ? X : X - C, where C >= signbit. |
Simon Pilgrim | 1889f26 | 2018-02-08 18:36:01 +0000 | [diff] [blame] | 1546 | if (match(Op1, m_Negative())) { |
Craig Topper | bb4069e | 2017-07-07 23:16:26 +0000 | [diff] [blame] | 1547 | Value *Cmp = Builder.CreateICmpULT(Op0, Op1); |
| 1548 | Value *Sub = Builder.CreateSub(Op0, Op1); |
Sanjay Patel | 30ef70b | 2016-09-22 22:36:26 +0000 | [diff] [blame] | 1549 | return SelectInst::Create(Cmp, Op0, Sub); |
| 1550 | } |
| 1551 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1552 | return nullptr; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1553 | } |
| 1554 | |
| 1555 | Instruction *InstCombiner::visitSRem(BinaryOperator &I) { |
| 1556 | Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); |
| 1557 | |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1558 | if (Value *V = SimplifyVectorOp(I)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1559 | return replaceInstUsesWith(I, V); |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1560 | |
Craig Topper | a420562 | 2017-06-09 03:21:29 +0000 | [diff] [blame] | 1561 | if (Value *V = SimplifySRemInst(Op0, Op1, SQ.getWithInstruction(&I))) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1562 | return replaceInstUsesWith(I, V); |
Duncan Sands | a3e3699 | 2011-05-02 16:27:02 +0000 | [diff] [blame] | 1563 | |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1564 | // Handle the integer rem common cases |
| 1565 | if (Instruction *Common = commonIRemTransforms(I)) |
| 1566 | return Common; |
Jim Grosbach | bdbd734 | 2013-04-05 21:20:12 +0000 | [diff] [blame] | 1567 | |
David Majnemer | db07730 | 2014-10-13 22:37:51 +0000 | [diff] [blame] | 1568 | { |
| 1569 | const APInt *Y; |
| 1570 | // X % -Y -> X % Y |
Simon Pilgrim | a54e8e4 | 2018-02-08 19:00:45 +0000 | [diff] [blame] | 1571 | if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue()) { |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1572 | Worklist.AddValue(I.getOperand(1)); |
David Majnemer | db07730 | 2014-10-13 22:37:51 +0000 | [diff] [blame] | 1573 | I.setOperand(1, ConstantInt::get(I.getType(), -*Y)); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1574 | return &I; |
| 1575 | } |
David Majnemer | db07730 | 2014-10-13 22:37:51 +0000 | [diff] [blame] | 1576 | } |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1577 | |
| 1578 | // If the sign bits of both operands are zero (i.e. we can prove they are |
| 1579 | // unsigned inputs), turn this into a urem. |
Craig Topper | bcfd2d1 | 2017-04-20 16:56:25 +0000 | [diff] [blame] | 1580 | APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); |
Craig Topper | 1a18a7c | 2017-04-17 01:51:24 +0000 | [diff] [blame] | 1581 | if (MaskedValueIsZero(Op1, Mask, 0, &I) && |
| 1582 | MaskedValueIsZero(Op0, Mask, 0, &I)) { |
| 1583 | // X srem Y -> X urem Y, iff X and Y don't have sign bit set |
| 1584 | return BinaryOperator::CreateURem(Op0, Op1, I.getName()); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1585 | } |
| 1586 | |
| 1587 | // If it's a constant vector, flip any negative values positive. |
Chris Lattner | 0256be9 | 2012-01-27 03:08:05 +0000 | [diff] [blame] | 1588 | if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) { |
| 1589 | Constant *C = cast<Constant>(Op1); |
| 1590 | unsigned VWidth = C->getType()->getVectorNumElements(); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1591 | |
| 1592 | bool hasNegative = false; |
Chris Lattner | 0256be9 | 2012-01-27 03:08:05 +0000 | [diff] [blame] | 1593 | bool hasMissing = false; |
| 1594 | for (unsigned i = 0; i != VWidth; ++i) { |
| 1595 | Constant *Elt = C->getAggregateElement(i); |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1596 | if (!Elt) { |
Chris Lattner | 0256be9 | 2012-01-27 03:08:05 +0000 | [diff] [blame] | 1597 | hasMissing = true; |
| 1598 | break; |
| 1599 | } |
| 1600 | |
| 1601 | if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt)) |
Chris Lattner | b1a1512 | 2011-07-15 06:08:15 +0000 | [diff] [blame] | 1602 | if (RHS->isNegative()) |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1603 | hasNegative = true; |
Chris Lattner | 0256be9 | 2012-01-27 03:08:05 +0000 | [diff] [blame] | 1604 | } |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1605 | |
Chris Lattner | 0256be9 | 2012-01-27 03:08:05 +0000 | [diff] [blame] | 1606 | if (hasNegative && !hasMissing) { |
Chris Lattner | 47a86bd | 2012-01-25 06:02:56 +0000 | [diff] [blame] | 1607 | SmallVector<Constant *, 16> Elts(VWidth); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1608 | for (unsigned i = 0; i != VWidth; ++i) { |
Chris Lattner | 8213c8a | 2012-02-06 21:56:39 +0000 | [diff] [blame] | 1609 | Elts[i] = C->getAggregateElement(i); // Handle undef, etc. |
Chris Lattner | 0256be9 | 2012-01-27 03:08:05 +0000 | [diff] [blame] | 1610 | if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) { |
Chris Lattner | b1a1512 | 2011-07-15 06:08:15 +0000 | [diff] [blame] | 1611 | if (RHS->isNegative()) |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1612 | Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS)); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1613 | } |
| 1614 | } |
| 1615 | |
| 1616 | Constant *NewRHSV = ConstantVector::get(Elts); |
Chris Lattner | 0256be9 | 2012-01-27 03:08:05 +0000 | [diff] [blame] | 1617 | if (NewRHSV != C) { // Don't loop on -MININT |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1618 | Worklist.AddValue(I.getOperand(1)); |
| 1619 | I.setOperand(1, NewRHSV); |
| 1620 | return &I; |
| 1621 | } |
| 1622 | } |
| 1623 | } |
| 1624 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1625 | return nullptr; |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1626 | } |
| 1627 | |
| 1628 | Instruction *InstCombiner::visitFRem(BinaryOperator &I) { |
Duncan Sands | a3e3699 | 2011-05-02 16:27:02 +0000 | [diff] [blame] | 1629 | Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); |
Chris Lattner | dc054bf | 2010-01-05 06:09:35 +0000 | [diff] [blame] | 1630 | |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1631 | if (Value *V = SimplifyVectorOp(I)) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1632 | return replaceInstUsesWith(I, V); |
Serge Pavlov | 9ef66a8 | 2014-05-11 08:46:12 +0000 | [diff] [blame] | 1633 | |
Craig Topper | a420562 | 2017-06-09 03:21:29 +0000 | [diff] [blame] | 1634 | if (Value *V = SimplifyFRemInst(Op0, Op1, I.getFastMathFlags(), |
| 1635 | SQ.getWithInstruction(&I))) |
Sanjay Patel | 4b19880 | 2016-02-01 22:23:39 +0000 | [diff] [blame] | 1636 | return replaceInstUsesWith(I, V); |
Duncan Sands | a3e3699 | 2011-05-02 16:27:02 +0000 | [diff] [blame] | 1637 | |
Craig Topper | f40110f | 2014-04-25 05:29:35 +0000 | [diff] [blame] | 1638 | return nullptr; |
Duncan Sands | a3e3699 | 2011-05-02 16:27:02 +0000 | [diff] [blame] | 1639 | } |