Minor modernization and readability improvement to the tokenizer example (GH-19558)

diff --git a/Doc/library/re.rst b/Doc/library/re.rst
index 7c950bf..9abbd8b 100644
--- a/Doc/library/re.rst
+++ b/Doc/library/re.rst
@@ -1617,10 +1617,14 @@
 to combine those into a single master regular expression and to loop over
 successive matches::
 
-    import collections
+    from typing import NamedTuple
     import re
 
-    Token = collections.namedtuple('Token', ['type', 'value', 'line', 'column'])
+    class Token(NamedTuple):
+        type: str
+        value: str
+        line: int
+        column: int
 
     def tokenize(code):
         keywords = {'IF', 'THEN', 'ENDIF', 'FOR', 'NEXT', 'GOSUB', 'RETURN'}