Use the bit size of the operand instead of the hard-coded 32 to generate the
mask.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48750 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 5a3a51f..5cf08a6 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -822,8 +822,10 @@
       // provably disjoint.
       APInt LHSKnownZero, LHSKnownOne;
       DAG.ComputeMaskedBits(N.getOperand(0),
-                            APInt::getAllOnesValue(32),
+                            APInt::getAllOnesValue(N.getOperand(0)
+                                                   .getValueSizeInBits()),
                             LHSKnownZero, LHSKnownOne);
+
       if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
         // If all of the bits are known zero on the LHS or RHS, the add won't
         // carry.
@@ -932,7 +934,8 @@
       // provably disjoint.
       APInt LHSKnownZero, LHSKnownOne;
       DAG.ComputeMaskedBits(N.getOperand(0),
-                            APInt::getAllOnesValue(32),
+                            APInt::getAllOnesValue(N.getOperand(0)
+                                                   .getValueSizeInBits()),
                             LHSKnownZero, LHSKnownOne);
       if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
         // If all of the bits are known zero on the LHS or RHS, the add won't
diff --git a/test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll b/test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll
new file mode 100644
index 0000000..395c986
--- /dev/null
+++ b/test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll
@@ -0,0 +1,25 @@
+; RUN: llvm-as < %s | llc -march=ppc64
+
+define fastcc i8* @page_rec_get_next(i8* %rec) nounwind  {
+entry:
+	%tmp2627 = ptrtoint i8* %rec to i64		; <i64> [#uses=2]
+	%tmp28 = and i64 %tmp2627, -16384		; <i64> [#uses=2]
+	%tmp2829 = inttoptr i64 %tmp28 to i8*		; <i8*> [#uses=1]
+	%tmp37 = getelementptr i8* %tmp2829, i64 42		; <i8*> [#uses=1]
+	%tmp40 = load i8* %tmp37, align 1		; <i8> [#uses=1]
+	%tmp4041 = zext i8 %tmp40 to i64		; <i64> [#uses=1]
+	%tmp42 = shl i64 %tmp4041, 8		; <i64> [#uses=1]
+	%tmp47 = add i64 %tmp42, 0		; <i64> [#uses=1]
+	%tmp52 = and i64 %tmp47, 32768		; <i64> [#uses=1]
+	%tmp72 = icmp eq i64 %tmp52, 0		; <i1> [#uses=1]
+	br i1 %tmp72, label %bb91, label %bb
+bb:		; preds = %entry
+	ret i8* null
+bb91:		; preds = %entry
+	br i1 false, label %bb100, label %bb185
+bb100:		; preds = %bb91
+	%tmp106 = sub i64 %tmp2627, %tmp28		; <i64> [#uses=0]
+	ret i8* null
+bb185:		; preds = %bb91
+	ret i8* null
+}