reuse tokenize.detect_encoding in linecache instead of a custom solution

patch by Victor Stinner #4016
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 16c4f3f..4ff859d 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -27,7 +27,6 @@
 import re, string, sys
 from token import *
 from codecs import lookup, BOM_UTF8
-from itertools import chain, repeat
 cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
 
 import token
@@ -327,13 +326,15 @@
     which tells you which encoding was used to decode the bytes stream.
     """
     encoding, consumed = detect_encoding(readline)
-    def readline_generator():
+    def readline_generator(consumed):
+        for line in consumed:
+            yield line
         while True:
             try:
                 yield readline()
             except StopIteration:
                 return
-    chained = chain(consumed, readline_generator())
+    chained = readline_generator(consumed)
     return _tokenize(chained.__next__, encoding)