Revert "Reland "r364412 [ExpandMemCmp][MergeICmps] Move passes out of CodeGen into opt pipeline.""

This reverts commit r371502, it broke tests
(clang/test/CodeGenCXX/auto-var-init.cpp).

llvm-svn: 371507
diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt
index 63e553f..3cf0c60 100644
--- a/llvm/lib/CodeGen/CMakeLists.txt
+++ b/llvm/lib/CodeGen/CMakeLists.txt
@@ -21,6 +21,7 @@
   EarlyIfConversion.cpp
   EdgeBundles.cpp
   ExecutionDomainFix.cpp
+  ExpandMemCmp.cpp
   ExpandPostRAPseudos.cpp
   ExpandReductions.cpp
   FaultMaps.cpp
diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp
index 095f86e..d369160 100644
--- a/llvm/lib/CodeGen/CodeGen.cpp
+++ b/llvm/lib/CodeGen/CodeGen.cpp
@@ -31,6 +31,7 @@
   initializeEarlyIfPredicatorPass(Registry);
   initializeEarlyMachineLICMPass(Registry);
   initializeEarlyTailDuplicatePass(Registry);
+  initializeExpandMemCmpPassPass(Registry);
   initializeExpandPostRAPass(Registry);
   initializeFEntryInserterPass(Registry);
   initializeFinalizeISelPass(Registry);
diff --git a/llvm/lib/Transforms/Scalar/ExpandMemCmp.cpp b/llvm/lib/CodeGen/ExpandMemCmp.cpp
similarity index 89%
rename from llvm/lib/Transforms/Scalar/ExpandMemCmp.cpp
rename to llvm/lib/CodeGen/ExpandMemCmp.cpp
index 801df77..9916f2d 100644
--- a/llvm/lib/Transforms/Scalar/ExpandMemCmp.cpp
+++ b/llvm/lib/CodeGen/ExpandMemCmp.cpp
@@ -13,15 +13,13 @@
 
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/DomTreeUpdater.h"
-#include "llvm/Analysis/GlobalsModRef.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
 #include "llvm/Analysis/TargetTransformInfo.h"
 #include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
 #include "llvm/CodeGen/TargetSubtargetInfo.h"
-#include "llvm/IR/Dominators.h"
 #include "llvm/IR/IRBuilder.h"
-#include "llvm/Transforms/Scalar.h"
 
 using namespace llvm;
 
@@ -48,6 +46,7 @@
 
 namespace {
 
+
 // This class provides helper functions to expand a memcmp library call into an
 // inline expansion.
 class MemCmpExpansion {
@@ -66,18 +65,18 @@
   uint64_t NumLoadsNonOneByte;
   const uint64_t NumLoadsPerBlockForZeroCmp;
   std::vector<BasicBlock *> LoadCmpBlocks;
-  BasicBlock *EndBlock = nullptr;
+  BasicBlock *EndBlock;
   PHINode *PhiRes;
   const bool IsUsedForZeroCmp;
   const DataLayout &DL;
   IRBuilder<> Builder;
-  DomTreeUpdater DTU;
   // Represents the decomposition in blocks of the expansion. For example,
   // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
   // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {32, 1}.
   struct LoadEntry {
     LoadEntry(unsigned LoadSize, uint64_t Offset)
-        : LoadSize(LoadSize), Offset(Offset) {}
+        : LoadSize(LoadSize), Offset(Offset) {
+    }
 
     // The size of the load for this block, in bytes.
     unsigned LoadSize;
@@ -114,8 +113,7 @@
 public:
   MemCmpExpansion(CallInst *CI, uint64_t Size,
                   const TargetTransformInfo::MemCmpExpansionOptions &Options,
-                  const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout,
-                  DominatorTree *DT);
+                  const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout);
 
   unsigned getNumBlocks();
   uint64_t getNumLoads() const { return LoadSequence.size(); }
@@ -204,13 +202,10 @@
 MemCmpExpansion::MemCmpExpansion(
     CallInst *const CI, uint64_t Size,
     const TargetTransformInfo::MemCmpExpansionOptions &Options,
-    const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout,
-    DominatorTree *DT)
+    const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout)
     : CI(CI), Size(Size), MaxLoadSize(0), NumLoadsNonOneByte(0),
       NumLoadsPerBlockForZeroCmp(Options.NumLoadsPerBlock),
-      IsUsedForZeroCmp(IsUsedForZeroCmp), DL(TheDataLayout), Builder(CI),
-      DTU(DT, /*PostDominator*/ nullptr,
-          DomTreeUpdater::UpdateStrategy::Eager) {
+      IsUsedForZeroCmp(IsUsedForZeroCmp), DL(TheDataLayout), Builder(CI) {
   assert(Size > 0 && "zero blocks");
   // Scale the max size down if the target can load more bytes than we need.
   llvm::ArrayRef<unsigned> LoadSizes(Options.LoadSizes);
@@ -250,7 +245,6 @@
 }
 
 void MemCmpExpansion::createLoadCmpBlocks() {
-  assert(ResBlock.BB && "ResBlock must be created before LoadCmpBlocks");
   for (unsigned i = 0; i < getNumBlocks(); i++) {
     BasicBlock *BB = BasicBlock::Create(CI->getContext(), "loadbb",
                                         EndBlock->getParent(), EndBlock);
@@ -259,7 +253,6 @@
 }
 
 void MemCmpExpansion::createResultBlock() {
-  assert(EndBlock && "EndBlock must be created before ResultBlock");
   ResBlock.BB = BasicBlock::Create(CI->getContext(), "res_block",
                                    EndBlock->getParent(), EndBlock);
 }
@@ -284,8 +277,7 @@
 // final phi node for selecting the memcmp result.
 void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex,
                                                unsigned OffsetBytes) {
-  BasicBlock *const BB = LoadCmpBlocks[BlockIndex];
-  Builder.SetInsertPoint(BB);
+  Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
   Type *LoadSizeType = Type::getInt8Ty(CI->getContext());
   Value *Source1 =
       getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType, OffsetBytes);
@@ -306,16 +298,13 @@
     // next LoadCmpBlock,
     Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_NE, Diff,
                                     ConstantInt::get(Diff->getType(), 0));
-    BasicBlock *const NextBB = LoadCmpBlocks[BlockIndex + 1];
-    BranchInst *CmpBr = BranchInst::Create(EndBlock, NextBB, Cmp);
+    BranchInst *CmpBr =
+        BranchInst::Create(EndBlock, LoadCmpBlocks[BlockIndex + 1], Cmp);
     Builder.Insert(CmpBr);
-    DTU.applyUpdates({{DominatorTree::Insert, BB, EndBlock},
-                      {DominatorTree::Insert, BB, NextBB}});
   } else {
     // The last block has an unconditional branch to EndBlock.
     BranchInst *CmpBr = BranchInst::Create(EndBlock);
     Builder.Insert(CmpBr);
-    DTU.applyUpdates({{DominatorTree::Insert, BB, EndBlock}});
   }
 }
 
@@ -423,17 +412,14 @@
   // continue to next LoadCmpBlock or EndBlock.
   BranchInst *CmpBr = BranchInst::Create(ResBlock.BB, NextBB, Cmp);
   Builder.Insert(CmpBr);
-  BasicBlock *const BB = LoadCmpBlocks[BlockIndex];
 
   // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
   // since early exit to ResultBlock was not taken (no difference was found in
   // any of the bytes).
   if (BlockIndex == LoadCmpBlocks.size() - 1) {
     Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
-    PhiRes->addIncoming(Zero, BB);
+    PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
   }
-  DTU.applyUpdates({{DominatorTree::Insert, BB, ResBlock.BB},
-                    {DominatorTree::Insert, BB, NextBB}});
 }
 
 // This function creates the IR intructions for loading and comparing using the
@@ -459,8 +445,7 @@
   Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
   assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type");
 
-  BasicBlock *const BB = LoadCmpBlocks[BlockIndex];
-  Builder.SetInsertPoint(BB);
+  Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
 
   Value *Source1 = getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType,
                                            CurLoadEntry.Offset);
@@ -504,10 +489,8 @@
   // any of the bytes).
   if (BlockIndex == LoadCmpBlocks.size() - 1) {
     Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
-    PhiRes->addIncoming(Zero, BB);
+    PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
   }
-  DTU.applyUpdates({{DominatorTree::Insert, BB, ResBlock.BB},
-                    {DominatorTree::Insert, BB, NextBB}});
 }
 
 // This function populates the ResultBlock with a sequence to calculate the
@@ -523,7 +506,6 @@
     PhiRes->addIncoming(Res, ResBlock.BB);
     BranchInst *NewBr = BranchInst::Create(EndBlock);
     Builder.Insert(NewBr);
-    DTU.applyUpdates({{DominatorTree::Insert, ResBlock.BB, EndBlock}});
     return;
   }
   BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
@@ -539,7 +521,6 @@
   BranchInst *NewBr = BranchInst::Create(EndBlock);
   Builder.Insert(NewBr);
   PhiRes->addIncoming(Res, ResBlock.BB);
-  DTU.applyUpdates({{DominatorTree::Insert, ResBlock.BB, EndBlock}});
 }
 
 void MemCmpExpansion::setupResultBlockPHINodes() {
@@ -631,7 +612,6 @@
   if (getNumBlocks() != 1) {
     BasicBlock *StartBlock = CI->getParent();
     EndBlock = StartBlock->splitBasicBlock(CI, "endblock");
-    DTU.applyUpdates({{DominatorTree::Insert, StartBlock, EndBlock}});
     setupEndBlockPHINodes();
     createResultBlock();
 
@@ -639,18 +619,14 @@
     // calculate which source was larger. The calculation requires the
     // two loaded source values of each load compare block.
     // These will be saved in the phi nodes created by setupResultBlockPHINodes.
-    if (!IsUsedForZeroCmp)
-      setupResultBlockPHINodes();
+    if (!IsUsedForZeroCmp) setupResultBlockPHINodes();
 
     // Create the number of required load compare basic blocks.
     createLoadCmpBlocks();
 
     // Update the terminator added by splitBasicBlock to branch to the first
     // LoadCmpBlock.
-    BasicBlock *const FirstLoadBB = LoadCmpBlocks[0];
-    StartBlock->getTerminator()->setSuccessor(0, FirstLoadBB);
-    DTU.applyUpdates({{DominatorTree::Delete, StartBlock, EndBlock},
-                      {DominatorTree::Insert, StartBlock, FirstLoadBB}});
+    StartBlock->getTerminator()->setSuccessor(0, LoadCmpBlocks[0]);
   }
 
   Builder.SetCurrentDebugLocation(CI->getDebugLoc());
@@ -744,7 +720,7 @@
 ///  %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ]
 ///  ret i32 %phi.res
 static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
-                         const DataLayout *DL, DominatorTree *DT) {
+                         const TargetLowering *TLI, const DataLayout *DL) {
   NumMemCmpCalls++;
 
   // Early exit from expansion if -Oz.
@@ -767,8 +743,7 @@
   const bool IsUsedForZeroCmp = isOnlyUsedInZeroEqualityComparison(CI);
   auto Options = TTI->enableMemCmpExpansion(CI->getFunction()->hasOptSize(),
                                             IsUsedForZeroCmp);
-  if (!Options)
-    return false;
+  if (!Options) return false;
 
   if (MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences())
     Options.NumLoadsPerBlock = MemCmpEqZeroNumLoadsPerBlock;
@@ -780,7 +755,7 @@
   if (!CI->getFunction()->hasOptSize() && MaxLoadsPerMemcmp.getNumOccurrences())
     Options.MaxNumLoads = MaxLoadsPerMemcmp;
 
-  MemCmpExpansion Expansion(CI, SizeVal, Options, IsUsedForZeroCmp, *DL, DT);
+  MemCmpExpansion Expansion(CI, SizeVal, Options, IsUsedForZeroCmp, *DL);
 
   // Don't expand if this will require more loads than desired by the target.
   if (Expansion.getNumLoads() == 0) {
@@ -799,6 +774,8 @@
   return true;
 }
 
+
+
 class ExpandMemCmpPass : public FunctionPass {
 public:
   static char ID;
@@ -808,17 +785,20 @@
   }
 
   bool runOnFunction(Function &F) override {
-    if (skipFunction(F))
+    if (skipFunction(F)) return false;
+
+    auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
+    if (!TPC) {
       return false;
+    }
+    const TargetLowering* TL =
+        TPC->getTM<TargetMachine>().getSubtargetImpl(F)->getTargetLowering();
 
     const TargetLibraryInfo *TLI =
         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
     const TargetTransformInfo *TTI =
         &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
-    // ExpandMemCmp does not need the DominatorTree, but we update it if it's
-    // already available.
-    auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
-    auto PA = runImpl(F, TLI, TTI, DTWP ? &DTWP->getDomTree() : nullptr);
+    auto PA = runImpl(F, TLI, TTI, TL);
     return !PA.areAllPreserved();
   }
 
@@ -826,24 +806,23 @@
   void getAnalysisUsage(AnalysisUsage &AU) const override {
     AU.addRequired<TargetLibraryInfoWrapperPass>();
     AU.addRequired<TargetTransformInfoWrapperPass>();
-    AU.addUsedIfAvailable<DominatorTreeWrapperPass>();
-    AU.addPreserved<GlobalsAAWrapperPass>();
-    AU.addPreserved<DominatorTreeWrapperPass>();
     FunctionPass::getAnalysisUsage(AU);
   }
 
   PreservedAnalyses runImpl(Function &F, const TargetLibraryInfo *TLI,
-                            const TargetTransformInfo *TTI, DominatorTree *DT);
+                            const TargetTransformInfo *TTI,
+                            const TargetLowering* TL);
   // Returns true if a change was made.
   bool runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
-                  const TargetTransformInfo *TTI, const DataLayout &DL,
-                  DominatorTree *DT);
+                  const TargetTransformInfo *TTI, const TargetLowering* TL,
+                  const DataLayout& DL);
 };
 
-bool ExpandMemCmpPass::runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
-                                  const TargetTransformInfo *TTI,
-                                  const DataLayout &DL, DominatorTree *DT) {
-  for (Instruction &I : BB) {
+bool ExpandMemCmpPass::runOnBlock(
+    BasicBlock &BB, const TargetLibraryInfo *TLI,
+    const TargetTransformInfo *TTI, const TargetLowering* TL,
+    const DataLayout& DL) {
+  for (Instruction& I : BB) {
     CallInst *CI = dyn_cast<CallInst>(&I);
     if (!CI) {
       continue;
@@ -851,21 +830,21 @@
     LibFunc Func;
     if (TLI->getLibFunc(ImmutableCallSite(CI), Func) &&
         (Func == LibFunc_memcmp || Func == LibFunc_bcmp) &&
-        expandMemCmp(CI, TTI, &DL, DT)) {
+        expandMemCmp(CI, TTI, TL, &DL)) {
       return true;
     }
   }
   return false;
 }
 
-PreservedAnalyses ExpandMemCmpPass::runImpl(Function &F,
-                                            const TargetLibraryInfo *TLI,
-                                            const TargetTransformInfo *TTI,
-                                            DominatorTree *DT) {
-  const DataLayout &DL = F.getParent()->getDataLayout();
+
+PreservedAnalyses ExpandMemCmpPass::runImpl(
+    Function &F, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI,
+    const TargetLowering* TL) {
+  const DataLayout& DL = F.getParent()->getDataLayout();
   bool MadeChanges = false;
   for (auto BBIt = F.begin(); BBIt != F.end();) {
-    if (runOnBlock(*BBIt, TLI, TTI, DL, DT)) {
+    if (runOnBlock(*BBIt, TLI, TTI, TL, DL)) {
       MadeChanges = true;
       // If changes were made, restart the function from the beginning, since
       // the structure of the function was changed.
@@ -874,12 +853,7 @@
       ++BBIt;
     }
   }
-  if (!MadeChanges)
-    return PreservedAnalyses::all();
-  PreservedAnalyses PA;
-  PA.preserve<GlobalsAA>();
-  PA.preserve<DominatorTreeAnalysis>();
-  return PA;
+  return MadeChanges ? PreservedAnalyses::none() : PreservedAnalyses::all();
 }
 
 } // namespace
@@ -892,4 +866,6 @@
 INITIALIZE_PASS_END(ExpandMemCmpPass, "expandmemcmp",
                     "Expand memcmp() to load/stores", false, false)
 
-Pass *llvm::createExpandMemCmpPass() { return new ExpandMemCmpPass(); }
+FunctionPass *llvm::createExpandMemCmpPass() {
+  return new ExpandMemCmpPass();
+}
diff --git a/llvm/lib/CodeGen/TargetPassConfig.cpp b/llvm/lib/CodeGen/TargetPassConfig.cpp
index e372b42..ba780e7 100644
--- a/llvm/lib/CodeGen/TargetPassConfig.cpp
+++ b/llvm/lib/CodeGen/TargetPassConfig.cpp
@@ -100,6 +100,9 @@
     "enable-implicit-null-checks",
     cl::desc("Fold null checks into faulting memory operations"),
     cl::init(false), cl::Hidden);
+static cl::opt<bool> DisableMergeICmps("disable-mergeicmps",
+    cl::desc("Disable MergeICmps Pass"),
+    cl::init(false), cl::Hidden);
 static cl::opt<bool> PrintLSR("print-lsr-output", cl::Hidden,
     cl::desc("Print LLVM IR produced by the loop-reduce pass"));
 static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
@@ -640,6 +643,16 @@
       addPass(createPrintFunctionPass(dbgs(), "\n\n*** Code after LSR ***\n"));
   }
 
+  if (getOptLevel() != CodeGenOpt::None) {
+    // The MergeICmpsPass tries to create memcmp calls by grouping sequences of
+    // loads and compares. ExpandMemCmpPass then tries to expand those calls
+    // into optimally-sized loads and compares. The transforms are enabled by a
+    // target lowering hook.
+    if (!DisableMergeICmps)
+      addPass(createMergeICmpsLegacyPass());
+    addPass(createExpandMemCmpPass());
+  }
+
   // Run GC lowering passes for builtin collectors
   // TODO: add a pass insertion point here
   addPass(createGCLoweringPass());
diff --git a/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp b/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
index d06dade..3ea77f0 100644
--- a/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -246,18 +246,6 @@
   PM.add(createInstructionCombiningPass(ExpensiveCombines));
 }
 
-void PassManagerBuilder::addMemcmpPasses(legacy::PassManagerBase &PM) const {
-  if (OptLevel > 0) {
-    // The MergeICmpsPass tries to create memcmp calls by grouping sequences of
-    // loads and compares. ExpandMemCmpPass then tries to expand those calls
-    // into optimally-sized loads and compares. The transforms are enabled by a
-    // target transform info hook.
-    PM.add(createMergeICmpsLegacyPass());
-    PM.add(createExpandMemCmpPass());
-    PM.add(createEarlyCSEPass());
-  }
-}
-
 void PassManagerBuilder::populateFunctionPassManager(
     legacy::FunctionPassManager &FPM) {
   addExtensionsToPM(EP_EarlyAsPossible, FPM);
@@ -421,7 +409,6 @@
 
   addExtensionsToPM(EP_ScalarOptimizerLate, MPM);
 
-  addMemcmpPasses(MPM);                       // Merge/Expand comparisons.
   if (RerollLoops)
     MPM.add(createLoopRerollPass());
 
@@ -923,7 +910,6 @@
   PM.add(NewGVN ? createNewGVNPass()
                 : createGVNPass(DisableGVNLoadPRE)); // Remove redundancies.
   PM.add(createMemCpyOptPass());            // Remove dead memcpys.
-  addMemcmpPasses(PM);                      // Merge/Expand comparisons.
 
   // Nuke dead stores.
   PM.add(createDeadStoreEliminationPass());
diff --git a/llvm/lib/Transforms/Scalar/CMakeLists.txt b/llvm/lib/Transforms/Scalar/CMakeLists.txt
index 18bb9e8..e6f8901 100644
--- a/llvm/lib/Transforms/Scalar/CMakeLists.txt
+++ b/llvm/lib/Transforms/Scalar/CMakeLists.txt
@@ -10,7 +10,6 @@
   DeadStoreElimination.cpp
   DivRemPairs.cpp
   EarlyCSE.cpp
-  ExpandMemCmp.cpp
   FlattenCFGPass.cpp
   Float2Int.cpp
   GuardWidening.cpp
diff --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
index 8ed24a7..98a45b3 100644
--- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp
+++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
@@ -866,7 +866,7 @@
 
   // We only try merging comparisons if the target wants to expand memcmp later.
   // The rationale is to avoid turning small chains into memcmp calls.
-  if (!TTI.enableMemCmpExpansion(F.hasOptSize(), /*IsZeroCmp*/ true))
+  if (!TTI.enableMemCmpExpansion(F.hasOptSize(), true))
     return false;
 
   // If we don't have memcmp avaiable we can't emit calls to it.
diff --git a/llvm/lib/Transforms/Scalar/Scalar.cpp b/llvm/lib/Transforms/Scalar/Scalar.cpp
index 3e92ade..869cf00 100644
--- a/llvm/lib/Transforms/Scalar/Scalar.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalar.cpp
@@ -84,7 +84,6 @@
   initializeLowerWidenableConditionLegacyPassPass(Registry);
   initializeMemCpyOptLegacyPassPass(Registry);
   initializeMergeICmpsLegacyPassPass(Registry);
-  initializeExpandMemCmpPassPass(Registry);
   initializeMergedLoadStoreMotionLegacyPassPass(Registry);
   initializeNaryReassociateLegacyPassPass(Registry);
   initializePartiallyInlineLibCallsLegacyPassPass(Registry);