bpo-12486: Document tokenize.generate_tokens() as public API (#6957)

* Document tokenize.generate_tokens()

* Add news file

* Add test for generate_tokens

* Document behaviour around ENCODING token

* Add generate_tokens to __all__
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 40e6a8b..c78d9f7 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -37,7 +37,7 @@
 blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
 
 import token
-__all__ = token.__all__ + ["tokenize", "detect_encoding",
+__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding",
                            "untokenize", "TokenInfo"]
 del token
 
@@ -653,9 +653,12 @@
     yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
 
 
-# An undocumented, backwards compatible, API for all the places in the standard
-# library that expect to be able to use tokenize with strings
 def generate_tokens(readline):
+    """Tokenize a source reading Python code as unicode strings.
+
+    This has the same API as tokenize(), except that it expects the *readline*
+    callable to return str objects instead of bytes.
+    """
     return _tokenize(readline, None)
 
 def main():