Teach CodeGen's version of computeMaskedBits to understand the range metadata.
This is the CodeGen equivalent of r153747. I tested that there is not noticeable
performance difference with any combination of -O0/-O2 /-g when compiling
gcc as a single compilation unit.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@153817 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 17bad94..1784f00 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -197,8 +197,8 @@
     KnownOne.setBit(BitWidth - 1);
 }
 
-static void computeMaskedBitsLoad(const MDNode &Ranges, const APInt &Mask,
-                                  APInt &KnownZero) {
+void llvm::computeMaskedBitsLoad(const MDNode &Ranges, const APInt &Mask,
+                                 APInt &KnownZero) {
   unsigned BitWidth = Mask.getBitWidth();
   unsigned NumRanges = Ranges.getNumOperands() / 2;
   assert(NumRanges >= 1);