Generalize target-independent folding rules for sizeof to handle more
cases, and implement target-independent folding rules for alignof and
offsetof. Also, reassociate reassociative operators when it leads to
more folding.

Generalize ScalarEvolution's isOffsetOf to recognize offsetof on
arrays. Rename getAllocSizeExpr to getSizeOfExpr, and getFieldOffsetExpr
to getOffsetOfExpr, for consistency with analagous ConstantExpr routines.

Make the target-dependent folder promote GEP array indices to
pointer-sized integers, to make implicit casting explicit and exposed
to subsequent folding.

And add a bunch of testcases for this new functionality, and a bunch
of related existing functionality.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@94987 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index 07196fd..4310e3c 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -369,7 +369,7 @@
     // array indexing.
     SmallVector<const SCEV *, 8> ScaledOps;
     if (ElTy->isSized()) {
-      const SCEV *ElSize = SE.getAllocSizeExpr(ElTy);
+      const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
       if (!ElSize->isZero()) {
         SmallVector<const SCEV *, 8> NewOps;
         for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
@@ -433,9 +433,9 @@
         // appropriate struct type.
         for (unsigned i = 0, e = Ops.size(); i != e; ++i)
           if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
-            const StructType *StructTy;
+            const Type *CTy;
             Constant *FieldNo;
-            if (U->isOffsetOf(StructTy, FieldNo) && StructTy == STy) {
+            if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
               GepIndices.push_back(FieldNo);
               ElTy =
                 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());