Added docstrings excerpted from Python Library Reference.
Closes patch 556161.
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 27ee1bc..42aafe4 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -121,6 +121,18 @@
         (srow, scol, erow, ecol, tok_name[type], repr(token))
 
 def tokenize(readline, tokeneater=printtoken):
+    """
+    The tokenize() function accepts two parameters: one representing the
+    input stream, and one providing an output mechanism for tokenize().
+    
+    The first parameter, readline, must be a callable object which provides
+    the same interface as the readline() method of built-in file objects.
+    Each call to the function should return one line of input as a string. 
+
+    The second parameter, tokeneater, must also be a callable object. It is
+    called once for each token, with five arguments, corresponding to the
+    tuples generated by generate_tokens(). 
+    """
     try:
         tokenize_loop(readline, tokeneater)
     except StopTokenizing:
@@ -132,6 +144,19 @@
         apply(tokeneater, token_info)
 
 def generate_tokens(readline):
+    """
+    The generate_tokens() generator requires one argment, readline, which
+    must be a callable object which provides the same interface as the
+    readline() method of built-in file objects. Each call to the function
+    should return one line of input as a string.
+    
+    The generator produces 5-tuples with these members: the token type; the
+    token string; a 2-tuple (srow, scol) of ints specifying the row and
+    column where the token begins in the source; a 2-tuple (erow, ecol) of
+    ints specifying the row and column where the token ends in the source;
+    and the line on which the token was found. The line passed is the
+    logical line; continuation lines are included. 
+    """
     lnum = parenlev = continued = 0
     namechars, numchars = string.ascii_letters + '_', '0123456789'
     contstr, needcont = '', 0