in tokenize.detect_encoding(), return utf-8-sig when a BOM is found
diff --git a/Doc/library/tokenize.rst b/Doc/library/tokenize.rst
index 7017045..ac6ae36 100644
--- a/Doc/library/tokenize.rst
+++ b/Doc/library/tokenize.rst
@@ -95,7 +95,8 @@
 
     It detects the encoding from the presence of a UTF-8 BOM or an encoding
     cookie as specified in :pep:`263`. If both a BOM and a cookie are present,
-    but disagree, a SyntaxError will be raised.
+    but disagree, a SyntaxError will be raised. Note that if the BOM is found,
+    ``'utf-8-sig'`` will be returned as an encoding.
 
     If no encoding is specified, then the default of ``'utf-8'`` will be returned.
 
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 7b91ab2..1bfac40 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -726,7 +726,7 @@
             b'do_something(else)\n'
         )
         encoding, consumed_lines = detect_encoding(self.get_readline(lines))
-        self.assertEquals(encoding, 'utf-8')
+        self.assertEquals(encoding, 'utf-8-sig')
         self.assertEquals(consumed_lines,
                           [b'# something\n', b'print(something)\n'])
 
@@ -747,7 +747,7 @@
             b'do_something(else)\n'
         )
         encoding, consumed_lines = detect_encoding(self.get_readline(lines))
-        self.assertEquals(encoding, 'utf-8')
+        self.assertEquals(encoding, 'utf-8-sig')
         self.assertEquals(consumed_lines, [b'# coding=utf-8\n'])
 
     def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self):
@@ -779,7 +779,7 @@
             b'do_something(else)\n'
         )
         encoding, consumed_lines = detect_encoding(self.get_readline(lines))
-        self.assertEquals(encoding, 'utf-8')
+        self.assertEquals(encoding, 'utf-8-sig')
         self.assertEquals(consumed_lines,
                           [b'#! something\n', b'f# coding=utf-8\n'])
 
@@ -833,12 +833,12 @@
 
         readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',))
         encoding, consumed_lines = detect_encoding(readline)
-        self.assertEquals(encoding, 'utf-8')
+        self.assertEquals(encoding, 'utf-8-sig')
         self.assertEquals(consumed_lines, [b'print(something)\n'])
 
         readline = self.get_readline((b'\xef\xbb\xbf',))
         encoding, consumed_lines = detect_encoding(readline)
-        self.assertEquals(encoding, 'utf-8')
+        self.assertEquals(encoding, 'utf-8-sig')
         self.assertEquals(consumed_lines, [])
 
         readline = self.get_readline((b'# coding: bad\n',))
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index f82922b..8972137 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -301,14 +301,16 @@
     in.
 
     It detects the encoding from the presence of a utf-8 bom or an encoding
-    cookie as specified in pep-0263. If both a bom and a cookie are present,
-    but disagree, a SyntaxError will be raised. If the encoding cookie is an
-    invalid charset, raise a SyntaxError.
+    cookie as specified in pep-0263. If both a bom and a cookie are present, but
+    disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
+    charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
+    'utf-8-sig' is returned.
 
     If no encoding is specified, then the default of 'utf-8' will be returned.
     """
     bom_found = False
     encoding = None
+    default = 'utf-8'
     def read_or_stop():
         try:
             return readline()
@@ -340,8 +342,9 @@
     if first.startswith(BOM_UTF8):
         bom_found = True
         first = first[3:]
+        default = 'utf-8-sig'
     if not first:
-        return 'utf-8', []
+        return default, []
 
     encoding = find_cookie(first)
     if encoding:
@@ -349,13 +352,13 @@
 
     second = read_or_stop()
     if not second:
-        return 'utf-8', [first]
+        return default, [first]
 
     encoding = find_cookie(second)
     if encoding:
         return encoding, [first, second]
 
-    return 'utf-8', [first, second]
+    return default, [first, second]
 
 
 def tokenize(readline):
@@ -394,6 +397,9 @@
     indents = [0]
 
     if encoding is not None:
+        if encoding == "utf-8-sig":
+            # BOM will already have been stripped.
+            encoding = "utf-8"
         yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
     while True:             # loop over lines in stream
         try:
diff --git a/Misc/NEWS b/Misc/NEWS
index 01c37ce..f1b068b 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -283,6 +283,9 @@
 Library
 -------
 
+- ``tokenize.detect_encoding`` now returns ``'utf-8-sig'`` when a UTF-8 BOM is
+  detected.
+
 - Issue #8024: Update the Unicode database to 5.2.
 
 - Issue #6716/2: Backslash-replace error output in compilall.