[InstCombine] fold (X >>u C) << C --> X & (-1 << C)
We already have this fold when the lshr has one use, but it doesn't need that
restriction. We may be able to remove some code from foldShiftedShift().
Also, move the similar:
(X << C) >>u C --> X & (-1 >>u C)
...directly into visitLShr to help clean up foldShiftByConstOfShiftByConst().
That whole function seems questionable since it is called by commonShiftTransforms(),
but there's really not much in common if we're checking the shift opcodes for every
fold.
llvm-svn: 293215
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index a4ff5f7..6507bb9 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -360,21 +360,8 @@
if (ShiftAmt1 == 0)
return nullptr; // Will be simplified in the future.
- if (ShiftAmt1 == ShiftAmt2) {
- // FIXME: This repeats a fold that exists in foldShiftedShift(), but we're
- // not handling the related fold here:
- // (X >>u C) << C --> X & (-1 << C).
- // foldShiftedShift() is always called before this, but it is restricted to
- // only handle cases where the ShiftOp has one use. We don't have that
- // restriction here.
- if (I.getOpcode() != Instruction::LShr ||
- ShiftOp->getOpcode() != Instruction::Shl)
- return nullptr;
-
- // (X << C) >>u C --> X & (-1 >>u C).
- APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
- return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getType(), Mask));
- }
+ if (ShiftAmt1 == ShiftAmt2)
+ return nullptr;
// FIXME: Everything under here should be extended to work with vector types.
@@ -714,6 +701,7 @@
const APInt *ShAmtAPInt;
if (match(Op1, m_APInt(ShAmtAPInt))) {
unsigned ShAmt = ShAmtAPInt->getZExtValue();
+ unsigned BitWidth = I.getType()->getScalarSizeInBits();
// shl (zext X), ShAmt --> zext (shl X, ShAmt)
// This is only valid if X would have zeros shifted out.
@@ -725,11 +713,15 @@
return new ZExtInst(Builder->CreateShl(X, ShAmt), I.getType());
}
+ // (X >>u C) << C --> X & (-1 << C)
+ if (match(Op0, m_LShr(m_Value(X), m_Specific(Op1)))) {
+ APInt Mask(APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt));
+ return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getType(), Mask));
+ }
+
// If the shifted-out value is known-zero, then this is a NUW shift.
if (!I.hasNoUnsignedWrap() &&
- MaskedValueIsZero(
- Op0, APInt::getHighBitsSet(ShAmtAPInt->getBitWidth(), ShAmt), 0,
- &I)) {
+ MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, ShAmt), 0, &I)) {
I.setHasNoUnsignedWrap();
return &I;
}
@@ -780,6 +772,13 @@
return new ZExtInst(Cmp, II->getType());
}
+ // (X << C) >>u C --> X & (-1 >>u C)
+ Value *X;
+ if (match(Op0, m_Shl(m_Value(X), m_Specific(Op1)))) {
+ APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
+ return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getType(), Mask));
+ }
+
// If the shifted-out value is known-zero, then this is an exact shift.
if (!I.isExact() &&
MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {