Add example, tighten text, and minor clean-ups.
diff --git a/Doc/library/functools.rst b/Doc/library/functools.rst
index 5b65874..7c9b9ca 100644
--- a/Doc/library/functools.rst
+++ b/Doc/library/functools.rst
@@ -42,40 +42,52 @@
    .. versionadded:: 3.2
 
 
-.. decorator:: lru_cache(maxsize)
+.. decorator:: lru_cache(maxsize=100)
 
    Decorator to wrap a function with a memoizing callable that saves up to the
    *maxsize* most recent calls.  It can save time when an expensive or I/O bound
    function is periodically called with the same arguments.
 
-   The *maxsize* parameter defaults to 100.  Since a dictionary is used to cache
-   results, the positional and keyword arguments to the function must be
-   hashable.
+   Since a dictionary is used to cache results, the positional and keyword
+   arguments to the function must be hashable.
 
-   The wrapped function is instrumented with a :attr:`cache_info` attribute that
-   can be called to retrieve a named tuple with the following fields:
+   To help measure the effectiveness of the cache and tune the *maxsize*
+   parameter, the wrapped function is instrumented with a :func:`cache_info`
+   function that returns a :term:`named tuple` showing *hits*, *misses*,
+   *maxsize* and *currsize*.
 
-      - :attr:`maxsize`: maximum cache size (as set by the *maxsize* parameter)
-      - :attr:`size`: current number of entries in the cache
-      - :attr:`hits`: number of successful cache lookups
-      - :attr:`misses`: number of unsuccessful cache lookups.
-
-   These statistics are helpful for tuning the *maxsize* parameter and for measuring
-   the effectiveness of the cache.
-
-   The wrapped function also has a :attr:`cache_clear` attribute which can be
-   called (with no arguments) to clear the cache.
+   The decorator also provides a :func:`cache_clear` function for clearing or
+   invalidating the cache.
 
    The original underlying function is accessible through the
-   :attr:`__wrapped__` attribute.  This allows introspection, bypassing
-   the cache, or rewrapping the function with a different caching tool.
+   :attr:`__wrapped__` attribute.  This is useful for introspection, for
+   bypassing the cache, or for rewrapping the function with a different cache.
 
    A `LRU (least recently used) cache
-   <http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used>`_
-   works best when more recent calls are the best predictors of upcoming calls
-   (for example, the most popular articles on a news server tend to
-   change each day).  The cache's size limit assurs that caching does not
-   grow without bound on long-running processes such as web servers.
+   <http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used>`_ works
+   best when more recent calls are the best predictors of upcoming calls (for
+   example, the most popular articles on a news server tend to change daily).
+   The cache's size limit assures that the cache does not grow without bound on
+   long-running processes such as web servers.
+
+   Example -- Caching static web content::
+
+        @functools.lru_cache(maxsize=20)
+        def get_pep(num):
+            'Retrieve text of a Python Enhancement Proposal'
+            resource = 'http://www.python.org/dev/peps/pep-%04d/' % num
+            try:
+                with urllib.request.urlopen(resource) as s:
+                    return s.read()
+            except urllib.error.HTTPError:
+                return 'Not Found'
+
+        >>> for n in 8, 290, 308, 320, 8, 218, 320, 279, 289, 320, 9991:
+        ...     pep = get_pep(n)
+        ...     print(n, len(pep))
+
+        >>> print(get_pep.cache_info())
+        CacheInfo(hits=3, misses=8, maxsize=20, currsize=8)
 
    .. versionadded:: 3.2
 
diff --git a/Doc/whatsnew/3.2.rst b/Doc/whatsnew/3.2.rst
index 056e2fa..bc5e84e 100644
--- a/Doc/whatsnew/3.2.rst
+++ b/Doc/whatsnew/3.2.rst
@@ -332,13 +332,14 @@
          c.execute('SELECT phonenumber FROM phonelist WHERE name=?', (name,))
          return c.fetchone()[0]
 
-  To help with choosing an effective cache size, the wrapped function is
-  instrumented with info function:
-
   >>> for name in user_requests:
-  ...     get_phone_number(name)
+  ...     get_phone_number(name)        # cached lookup
+
+  To help with choosing an effective cache size, the wrapped function is
+  instrumented for tracking cache statistics:
+
   >>> get_phone_number.cache_info()
-  CacheInfo(maxsize=300, size=300, hits=4805, misses=980)
+  CacheInfo(hits=4805, misses=980, maxsize=300, currsize=300)
 
   If the phonelist table gets updated, the outdated contents of the cache can be
   cleared with:
diff --git a/Lib/functools.py b/Lib/functools.py
index e8e9960..c558a5e 100644
--- a/Lib/functools.py
+++ b/Lib/functools.py
@@ -114,7 +114,7 @@
             raise TypeError('hash not implemented')
     return K
 
-_CacheInfo = namedtuple("CacheInfo", "maxsize, size, hits, misses")
+_CacheInfo = namedtuple("CacheInfo", "hits misses maxsize currsize")
 
 def lru_cache(maxsize=100):
     """Least-recently-used cache decorator.
@@ -166,7 +166,7 @@
         def cache_info():
             """Report cache statistics"""
             with lock:
-                return _CacheInfo(maxsize, len(cache), hits, misses)
+                return _CacheInfo(hits, misses, maxsize, len(cache))
 
         def cache_clear():
             """Clear the cache and cache statistics"""
diff --git a/Lib/test/test_functools.py b/Lib/test/test_functools.py
index c877f88..8f48e9e 100644
--- a/Lib/test/test_functools.py
+++ b/Lib/test/test_functools.py
@@ -501,7 +501,7 @@
         def orig(x, y):
             return 3*x+y
         f = functools.lru_cache(maxsize=20)(orig)
-        maxsize, currsize, hits, misses = f.cache_info()
+        hits, misses, maxsize, currsize = f.cache_info()
         self.assertEqual(maxsize, 20)
         self.assertEqual(currsize, 0)
         self.assertEqual(hits, 0)
@@ -513,18 +513,18 @@
             actual = f(x, y)
             expected = orig(x, y)
             self.assertEqual(actual, expected)
-        maxsize, currsize, hits, misses = f.cache_info()
+        hits, misses, maxsize, currsize = f.cache_info()
         self.assertTrue(hits > misses)
         self.assertEqual(hits + misses, 1000)
         self.assertEqual(currsize, 20)
 
         f.cache_clear()   # test clearing
-        maxsize, currsize, hits, misses = f.cache_info()
+        hits, misses, maxsize, currsize = f.cache_info()
         self.assertEqual(hits, 0)
         self.assertEqual(misses, 0)
         self.assertEqual(currsize, 0)
         f(x, y)
-        maxsize, currsize, hits, misses = f.cache_info()
+        hits, misses, maxsize, currsize = f.cache_info()
         self.assertEqual(hits, 0)
         self.assertEqual(misses, 1)
         self.assertEqual(currsize, 1)
@@ -532,7 +532,7 @@
         # Test bypassing the cache
         self.assertIs(f.__wrapped__, orig)
         f.__wrapped__(x, y)
-        maxsize, currsize, hits, misses = f.cache_info()
+        hits, misses, maxsize, currsize = f.cache_info()
         self.assertEqual(hits, 0)
         self.assertEqual(misses, 1)
         self.assertEqual(currsize, 1)
@@ -548,7 +548,7 @@
         for i in range(5):
             self.assertEqual(f(), 20)
         self.assertEqual(f_cnt, 5)
-        maxsize, currsize, hits, misses = f.cache_info()
+        hits, misses, maxsize, currsize = f.cache_info()
         self.assertEqual(hits, 0)
         self.assertEqual(misses, 5)
         self.assertEqual(currsize, 0)
@@ -564,7 +564,7 @@
         for i in range(5):
             self.assertEqual(f(), 20)
         self.assertEqual(f_cnt, 1)
-        maxsize, currsize, hits, misses = f.cache_info()
+        hits, misses, maxsize, currsize = f.cache_info()
         self.assertEqual(hits, 4)
         self.assertEqual(misses, 1)
         self.assertEqual(currsize, 1)
@@ -581,7 +581,7 @@
             #    *  *              *                          *
             self.assertEqual(f(x), x*10)
         self.assertEqual(f_cnt, 4)
-        maxsize, currsize, hits, misses = f.cache_info()
+        hits, misses, maxsize, currsize = f.cache_info()
         self.assertEqual(hits, 12)
         self.assertEqual(misses, 4)
         self.assertEqual(currsize, 2)