InstCombine: Restrict computeKnownBits() on all Values to OptLevel > 2
As part of r251146 InstCombine was extended to call computeKnownBits on
every value in the function to determine whether it happens to be
constant. This increases typical compiletime by 1-3% (5% in irgen+opt
time) in my measurements. On the other hand this case did not trigger
once in the whole llvm-testsuite.
This patch introduces the notion of ExpensiveCombines which are only
enabled for OptLevel > 2. I removed the check in InstructionSimplify as
that is called from various places where the OptLevel is not known but
given the rarity of the situation I think a check in InstCombine is
enough.
Differential Revision: http://reviews.llvm.org/D16835
llvm-svn: 263047
diff --git a/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp b/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
index c490e06..ae4d802 100644
--- a/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -185,6 +185,12 @@
PM.add(createScopedNoAliasAAWrapperPass());
}
+void PassManagerBuilder::addInstructionCombiningPass(
+ legacy::PassManagerBase &PM) const {
+ bool ExpensiveCombines = OptLevel > 2;
+ PM.add(createInstructionCombiningPass(ExpensiveCombines));
+}
+
void PassManagerBuilder::populateFunctionPassManager(
legacy::FunctionPassManager &FPM) {
addExtensionsToPM(EP_EarlyAsPossible, FPM);
@@ -230,7 +236,8 @@
MPM.add(createJumpThreadingPass()); // Thread jumps.
MPM.add(createCorrelatedValuePropagationPass()); // Propagate conditionals
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
- MPM.add(createInstructionCombiningPass()); // Combine silly seq's
+ // Combine silly seq's
+ addInstructionCombiningPass(MPM);
addExtensionsToPM(EP_Peephole, MPM);
MPM.add(createTailCallEliminationPass()); // Eliminate tail calls
@@ -238,7 +245,7 @@
MPM.add(createReassociatePass()); // Reassociate expressions
if (PrepareForThinLTO) {
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
- MPM.add(createInstructionCombiningPass()); // Combine silly seq's
+ addInstructionCombiningPass(MPM); // Combine silly seq's
return;
}
// Rotate Loop - disable header duplication at -Oz
@@ -246,7 +253,7 @@
MPM.add(createLICMPass()); // Hoist loop invariants
MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3));
MPM.add(createCFGSimplificationPass());
- MPM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(MPM);
MPM.add(createIndVarSimplifyPass()); // Canonicalize indvars
MPM.add(createLoopIdiomPass()); // Recognize idioms like memset.
MPM.add(createLoopDeletionPass()); // Delete dead loops
@@ -273,7 +280,7 @@
// Run instcombine after redundancy elimination to exploit opportunities
// opened up by them.
- MPM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(MPM);
addExtensionsToPM(EP_Peephole, MPM);
MPM.add(createJumpThreadingPass()); // Thread jumps
MPM.add(createCorrelatedValuePropagationPass());
@@ -290,7 +297,7 @@
if (BBVectorize) {
MPM.add(createBBVectorizePass());
- MPM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(MPM);
addExtensionsToPM(EP_Peephole, MPM);
if (OptLevel > 1 && UseGVNAfterVectorization)
MPM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies
@@ -308,7 +315,8 @@
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
- MPM.add(createInstructionCombiningPass()); // Clean up after everything.
+ // Clean up after everything.
+ addInstructionCombiningPass(MPM);
addExtensionsToPM(EP_Peephole, MPM);
}
@@ -359,7 +367,7 @@
MPM.add(createDeadArgEliminationPass()); // Dead argument elimination
- MPM.add(createInstructionCombiningPass()); // Clean up after IPCP & DAE
+ addInstructionCombiningPass(MPM); // Clean up after IPCP & DAE
addExtensionsToPM(EP_Peephole, MPM);
MPM.add(createCFGSimplificationPass()); // Clean up after IPCP & DAE
}
@@ -480,7 +488,7 @@
// on -O1 and no #pragma is found). Would be good to have these two passes
// as function calls, so that we can only pass them when the vectorizer
// changed the code.
- MPM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(MPM);
if (OptLevel > 1 && ExtraVectorizerPasses) {
// At higher optimization levels, try to clean up any runtime overlap and
// alignment checks inserted by the vectorizer. We want to track correllated
@@ -490,11 +498,11 @@
// dead (or speculatable) control flows or more combining opportunities.
MPM.add(createEarlyCSEPass());
MPM.add(createCorrelatedValuePropagationPass());
- MPM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(MPM);
MPM.add(createLICMPass());
MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3));
MPM.add(createCFGSimplificationPass());
- MPM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(MPM);
}
if (RunSLPAfterLoopVectorization) {
@@ -507,7 +515,7 @@
if (BBVectorize) {
MPM.add(createBBVectorizePass());
- MPM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(MPM);
addExtensionsToPM(EP_Peephole, MPM);
if (OptLevel > 1 && UseGVNAfterVectorization)
MPM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies
@@ -522,13 +530,13 @@
addExtensionsToPM(EP_Peephole, MPM);
MPM.add(createCFGSimplificationPass());
- MPM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(MPM);
if (!DisableUnrollLoops) {
MPM.add(createLoopUnrollPass()); // Unroll small loops
// LoopUnroll may generate some redundency to cleanup.
- MPM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(MPM);
// Runtime unrolling will introduce runtime check in loop prologue. If the
// unrolled loop is a inner loop, then the prologue will be inside the
@@ -595,7 +603,7 @@
// simplification opportunities, and both can propagate functions through
// function pointers. When this happens, we often have to resolve varargs
// calls, etc, so let instcombine do this.
- PM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(PM);
addExtensionsToPM(EP_Peephole, PM);
// Inline small functions
@@ -617,7 +625,7 @@
PM.add(createArgumentPromotionPass());
// The IPO passes may leave cruft around. Clean up after them.
- PM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(PM);
addExtensionsToPM(EP_Peephole, PM);
PM.add(createJumpThreadingPass());
@@ -656,10 +664,10 @@
// Now that we've optimized loops (in particular loop induction variables),
// we may have exposed more scalar opportunities. Run parts of the scalar
// optimizer again at this point.
- PM.add(createInstructionCombiningPass()); // Initial cleanup
+ addInstructionCombiningPass(PM); // Initial cleanup
PM.add(createCFGSimplificationPass()); // if-convert
PM.add(createSCCPPass()); // Propagate exposed constants
- PM.add(createInstructionCombiningPass()); // Clean up again
+ addInstructionCombiningPass(PM); // Clean up again
PM.add(createBitTrackingDCEPass());
// More scalar chains could be vectorized due to more alias information
@@ -675,7 +683,7 @@
PM.add(createLoadCombinePass());
// Cleanup and simplify the code after the scalar optimizations.
- PM.add(createInstructionCombiningPass());
+ addInstructionCombiningPass(PM);
addExtensionsToPM(EP_Peephole, PM);
PM.add(createJumpThreadingPass());