Introduce and use a new MemDepResult class to hold the results of a memdep
query.  This makes it crystal clear what cases can escape from MemDep that
the clients have to handle.  This also gives the clients a nice simplified
interface to it that is easy to poke at.

This patch also makes DepResultTy and MemoryDependenceAnalysis::DepType
private, yay.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60231 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 8217a44..c06015a 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -47,10 +47,8 @@
       return Changed;
     }
     
-    typedef MemoryDependenceAnalysis::DepResultTy DepResultTy;
-
     bool runOnBasicBlock(BasicBlock &BB);
-    bool handleFreeWithNonTrivialDependency(FreeInst *F, DepResultTy Dep);
+    bool handleFreeWithNonTrivialDependency(FreeInst *F, MemDepResult Dep);
     bool handleEndBlock(BasicBlock &BB);
     bool RemoveUndeadPointers(Value* pointer, uint64_t killPointerSize,
                               BasicBlock::iterator& BBI,
@@ -110,16 +108,15 @@
  
     // ... to a pointer that has been stored to before...
     if (last) {
-      DepResultTy dep = MD.getDependency(Inst);
+      MemDepResult dep = MD.getDependency(Inst);
       bool deletedStore = false;
     
       // ... and no other memory dependencies are between them....
-      while (dep.getInt() == MemoryDependenceAnalysis::Normal &&
-             isa<StoreInst>(dep.getPointer())) {
-        if (dep.getPointer() != last ||
-             TD.getTypeStoreSize(last->getOperand(0)->getType()) >
-             TD.getTypeStoreSize(Inst->getOperand(0)->getType())) {
-          dep = MD.getDependency(Inst, dep.getPointer());
+      while (StoreInst *DepStore = dyn_cast_or_null<StoreInst>(dep.getInst())) {
+        if (DepStore != last ||
+            TD.getTypeStoreSize(last->getOperand(0)->getType()) >
+            TD.getTypeStoreSize(Inst->getOperand(0)->getType())) {
+          dep = MD.getDependency(Inst, DepStore);
           continue;
         }
         
@@ -152,14 +149,12 @@
       // loaded from, then the store can be removed;
       if (LoadInst* L = dyn_cast<LoadInst>(S->getOperand(0))) {
         // FIXME: Don't do dep query if Parents don't match and other stuff!
-        DepResultTy dep = MD.getDependency(S);
+        MemDepResult dep = MD.getDependency(S);
         DominatorTree& DT = getAnalysis<DominatorTree>();
         
         if (!S->isVolatile() && S->getParent() == L->getParent() &&
             S->getPointerOperand() == L->getPointerOperand() &&
-            (dep.getInt() == MemoryDependenceAnalysis::None ||
-             dep.getInt() == MemoryDependenceAnalysis::NonLocal ||
-             DT.dominates(dep.getPointer(), L))) {
+            (!dep.isNormal() || DT.dominates(dep.getInst(), L))) {
           
           DeleteDeadInstruction(S);
           if (!isa<TerminatorInst>(BB.begin()))
@@ -185,15 +180,11 @@
 
 /// handleFreeWithNonTrivialDependency - Handle frees of entire structures whose
 /// dependency is a store to a field of that structure.
-bool DSE::handleFreeWithNonTrivialDependency(FreeInst* F, DepResultTy dep) {
+bool DSE::handleFreeWithNonTrivialDependency(FreeInst* F, MemDepResult dep) {
   TargetData &TD = getAnalysis<TargetData>();
   AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
   
-  if (dep.getInt() == MemoryDependenceAnalysis::None ||
-      dep.getInt() == MemoryDependenceAnalysis::NonLocal)
-    return false;
-  
-  StoreInst* dependency = dyn_cast<StoreInst>(dep.getPointer());
+  StoreInst* dependency = dyn_cast_or_null<StoreInst>(dep.getInst());
   if (!dependency)
     return false;
   else if (dependency->isVolatile())
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 64cac8f..63fabc6 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -456,19 +456,19 @@
         return nextValueNumber++;
       }
       
-      MemoryDependenceAnalysis::DepResultTy local_dep = MD->getDependency(C);
+      MemDepResult local_dep = MD->getDependency(C);
       
-      if (local_dep.getInt() == MemoryDependenceAnalysis::None) {
+      if (local_dep.isNone()) {
         valueNumbering.insert(std::make_pair(V, nextValueNumber));
         return nextValueNumber++;
-      } else if (local_dep.getInt() != MemoryDependenceAnalysis::NonLocal) {
+      } else if (Instruction *LocalDepInst = local_dep.getInst()) {
         // FIXME: INDENT PROPERLY!
-        if (!isa<CallInst>(local_dep.getPointer())) {
+        if (!isa<CallInst>(LocalDepInst)) {
           valueNumbering.insert(std::make_pair(V, nextValueNumber));
           return nextValueNumber++;
         }
         
-        CallInst* local_cdep = cast<CallInst>(local_dep.getPointer());
+        CallInst* local_cdep = cast<CallInst>(LocalDepInst);
         
         // FIXME: INDENT PROPERLY.
         if (local_cdep->getCalledFunction() != C->getCalledFunction() ||
@@ -495,20 +495,21 @@
       }
       
       
-      DenseMap<BasicBlock*, MemoryDependenceAnalysis::DepResultTy> deps;
+      DenseMap<BasicBlock*, MemDepResult> deps;
       MD->getNonLocalDependency(C, deps);
       CallInst* cdep = 0;
       
-      for (DenseMap<BasicBlock*, MemoryDependenceAnalysis::DepResultTy>
+      for (DenseMap<BasicBlock*, MemDepResult>
              ::iterator I = deps.begin(), E = deps.end(); I != E; ++I) {
-        if (I->second.getInt() == MemoryDependenceAnalysis::None) {
+        if (I->second.isNone()) {
           valueNumbering.insert(std::make_pair(V, nextValueNumber));
 
           return nextValueNumber++;
-        } else if (I->second.getInt() != MemoryDependenceAnalysis::NonLocal) {
+        } else if (Instruction *NonLocalDepInst = I->second.getInst()) {
           // FIXME: INDENT PROPERLY
+          // FIXME: All duplicated with non-local case.
           if (DT->properlyDominates(I->first, C->getParent())) {
-            if (CallInst* CD = dyn_cast<CallInst>(I->second.getPointer()))
+            if (CallInst* CD = dyn_cast<CallInst>(NonLocalDepInst))
               cdep = CD;
             else {
               valueNumbering.insert(std::make_pair(V, nextValueNumber));
@@ -721,8 +722,6 @@
       AU.addPreserved<AliasAnalysis>();
     }
   
-    typedef MemoryDependenceAnalysis::DepResultTy DepResultTy;
-
     // Helper fuctions
     // FIXME: eliminate or document these better
     bool processLoad(LoadInst* L,
@@ -866,7 +865,7 @@
   MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
   
   // Find the non-local dependencies of the load
-  DenseMap<BasicBlock*, DepResultTy> deps;
+  DenseMap<BasicBlock*, MemDepResult> deps;
   MD.getNonLocalDependency(L, deps);
   
   // If we had to process more than one hundred blocks to find the
@@ -878,19 +877,19 @@
   DenseMap<BasicBlock*, Value*> repl;
   
   // Filter out useless results (non-locals, etc)
-  for (DenseMap<BasicBlock*, DepResultTy>::iterator I = deps.begin(),
+  for (DenseMap<BasicBlock*, MemDepResult>::iterator I = deps.begin(),
        E = deps.end(); I != E; ++I) {
-    if (I->second.getInt() == MemoryDependenceAnalysis::None)
+    if (I->second.isNone())
       return false;
   
-    if (I->second.getInt() == MemoryDependenceAnalysis::NonLocal)
+    if (I->second.isNonLocal())
       continue;
   
-    if (StoreInst* S = dyn_cast<StoreInst>(I->second.getPointer())) {
+    if (StoreInst* S = dyn_cast<StoreInst>(I->second.getInst())) {
       if (S->getPointerOperand() != L->getPointerOperand())
         return false;
       repl[I->first] = S->getOperand(0);
-    } else if (LoadInst* LD = dyn_cast<LoadInst>(I->second.getPointer())) {
+    } else if (LoadInst* LD = dyn_cast<LoadInst>(I->second.getInst())) {
       if (LD->getPointerOperand() != L->getPointerOperand())
         return false;
       repl[I->first] = LD;
@@ -941,8 +940,8 @@
   // ... to a pointer that has been loaded from before...
   MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
   bool removedNonLocal = false;
-  DepResultTy dep = MD.getDependency(L);
-  if (dep.getInt() == MemoryDependenceAnalysis::NonLocal &&
+  MemDepResult dep = MD.getDependency(L);
+  if (dep.isNonLocal() &&
       L->getParent() != &L->getParent()->getParent()->getEntryBlock()) {
     removedNonLocal = processNonLocalLoad(L, toErase);
     
@@ -957,10 +956,9 @@
   
   // Walk up the dependency chain until we either find
   // a dependency we can use, or we can't walk any further
-  while (dep.getInt() == MemoryDependenceAnalysis::Normal &&
-         (isa<LoadInst>(dep.getPointer()) || isa<StoreInst>(dep.getPointer()))){
+  while (Instruction *DepInst = dep.getInst()) {
     // ... that depends on a store ...
-    if (StoreInst* S = dyn_cast<StoreInst>(dep.getPointer())) {
+    if (StoreInst* S = dyn_cast<StoreInst>(DepInst)) {
       if (S->getPointerOperand() == pointer) {
         // Remove it!
         MD.removeInstruction(L);
@@ -974,11 +972,14 @@
       // Whether we removed it or not, we can't
       // go any further
       break;
+    } else if (!isa<LoadInst>(DepInst)) {
+      // Only want to handle loads below.
+      break;
     } else if (!last) {
       // If we don't depend on a store, and we haven't
       // been loaded before, bail.
       break;
-    } else if (dep.getPointer() == last) {
+    } else if (DepInst == last) {
       // Remove it!
       MD.removeInstruction(L);
       
@@ -989,15 +990,14 @@
         
       break;
     } else {
-      dep = MD.getDependency(L, dep.getPointer());
+      dep = MD.getDependency(L, DepInst);
     }
   }
 
-  if (dep.getInt() == MemoryDependenceAnalysis::Normal &&
-      isa<AllocationInst>(dep.getPointer())) {
+  if (AllocationInst *DepAI = dyn_cast_or_null<AllocationInst>(dep.getInst())) {
     // Check that this load is actually from the
     // allocation we found
-    if (L->getOperand(0)->getUnderlyingObject() == dep.getPointer()) {
+    if (L->getOperand(0)->getUnderlyingObject() == DepAI) {
       // If this load depends directly on an allocation, there isn't
       // anything stored there; therefore, we can optimize this load
       // to undef.
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index acc6630..40eaa1d 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -629,18 +629,17 @@
   // The are two possible optimizations we can do for memcpy:
   //   a) memcpy-memcpy xform which exposes redundance for DSE
   //   b) call-memcpy xform for return slot optimization
-  MemoryDependenceAnalysis::DepResultTy dep = MD.getDependency(M);
-  if (dep.getInt() == MemoryDependenceAnalysis::None ||
-      dep.getInt() == MemoryDependenceAnalysis::NonLocal)
+  MemDepResult dep = MD.getDependency(M);
+  if (!dep.isNormal())
     return false;
-  else if (!isa<MemCpyInst>(dep.getPointer())) {
-    if (CallInst* C = dyn_cast<CallInst>(dep.getPointer()))
+  else if (!isa<MemCpyInst>(dep.getInst())) {
+    if (CallInst* C = dyn_cast<CallInst>(dep.getInst()))
       return performCallSlotOptzn(M, C);
     else
       return false;
   }
   
-  MemCpyInst* MDep = cast<MemCpyInst>(dep.getPointer());
+  MemCpyInst* MDep = cast<MemCpyInst>(dep.getInst());
   
   // We can only transforms memcpy's where the dest of one is the source of the
   // other