Enhance ComputeMaskedBits to know that aligned frameindexes
have their low bits set to zero.  This allows us to optimize
out explicit stack alignment code like in stack-align.ll:test4 when
it is redundant.

Doing this causes the code generator to start turning FI+cst into
FI|cst all over the place, which is general goodness (that is the
canonical form) except that various pieces of the code generator
don't handle OR aggressively.  Fix this by introducing a new
SelectionDAG::isBaseWithConstantOffset predicate, and using it
in places that are looking for ADD(X,CST).  The ARM backend in
particular was missing a lot of addressing mode folding opportunities
around OR.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@125470 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/stack-align.ll b/test/CodeGen/X86/stack-align.ll
index 8ca0b12..793c026 100644
--- a/test/CodeGen/X86/stack-align.ll
+++ b/test/CodeGen/X86/stack-align.ll
@@ -7,7 +7,7 @@
 target triple = "i686-apple-darwin8"
 @G = external global double
 
-define void @test({ double, double }* byval  %z, double* %P) {
+define void @test({ double, double }* byval  %z, double* %P) nounwind {
 entry:
 	%tmp3 = load double* @G, align 16		; <double> [#uses=1]
 	%tmp4 = tail call double @fabs( double %tmp3 )		; <double> [#uses=1]
@@ -21,14 +21,14 @@
 	ret void
 }
 
-define void @test2() alignstack(16) {
+define void @test2() alignstack(16) nounwind {
 entry:
     ; CHECK: andl{{.*}}$-16, %esp
     ret void
 }
 
 ; Use a call to force a spill.
-define <2 x double> @test3(<2 x double> %x, <2 x double> %y) alignstack(32) {
+define <2 x double> @test3(<2 x double> %x, <2 x double> %y) alignstack(32) nounwind {
 entry:
     ; CHECK: andl{{.*}}$-32, %esp
     call void @test2()
@@ -38,3 +38,14 @@
 
 declare double @fabs(double)
 
+; The pointer is already known aligned, so and x,-16 is eliminable.
+define i32 @test4() nounwind {
+entry:
+  %buffer = alloca [2048 x i8], align 16
+  %0 = ptrtoint [2048 x i8]* %buffer to i32
+  %and = and i32 %0, -16
+  ret i32 %and
+; CHECK: test4:
+; CHECK-NOT: and
+; CHECK: ret
+}