Revert "Update build-tools to ab/8357123" am: 17d9f2c99b am: 59e5185096

Original change: https://android-review.googlesource.com/c/platform/prebuilts/build-tools/+/2043009

Change-Id: I9a34b6c8f45fa38d6ee869c976082c7ff171034c
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/common/py3-stdlib/LICENSE b/common/py3-stdlib/LICENSE
index 02a5145..f42f8ad 100644
--- a/common/py3-stdlib/LICENSE
+++ b/common/py3-stdlib/LICENSE
@@ -84,7 +84,7 @@
 distribute, and otherwise use Python alone or in any derivative version,
 provided, however, that PSF's License Agreement and PSF's notice of copyright,
 i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation;
+2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;
 All Rights Reserved" are retained in Python alone or in any derivative version
 prepared by Licensee.
 
@@ -191,9 +191,9 @@
 Agreement, Licensee may substitute the following text (omitting the
 quotes): "Python 1.6.1 is made available subject to the terms and
 conditions in CNRI's License Agreement.  This Agreement together with
-Python 1.6.1 may be located on the internet using the following
+Python 1.6.1 may be located on the Internet using the following
 unique, persistent identifier (known as a handle): 1895.22/1013.  This
-Agreement may also be obtained from a proxy server on the internet
+Agreement may also be obtained from a proxy server on the Internet
 using the following URL: http://hdl.handle.net/1895.22/1013".
 
 3. In the event Licensee prepares a derivative work that is based on
diff --git a/common/py3-stdlib/__future__.py b/common/py3-stdlib/__future__.py
index 97dc90c..0e7b555 100644
--- a/common/py3-stdlib/__future__.py
+++ b/common/py3-stdlib/__future__.py
@@ -42,7 +42,7 @@
 argument to the builtin function compile() to enable the feature in
 dynamically compiled code.  This flag is stored in the .compiler_flag
 attribute on _Future instances.  These values must match the appropriate
-#defines of CO_xxx flags in Include/cpython/compile.h.
+#defines of CO_xxx flags in Include/compile.h.
 
 No feature line is ever to be deleted from this file.
 """
@@ -143,5 +143,5 @@
                           CO_FUTURE_GENERATOR_STOP)
 
 annotations = _Feature((3, 7, 0, "beta", 1),
-                       (3, 11, 0, "alpha", 0),
+                       (3, 10, 0, "alpha", 0),
                        CO_FUTURE_ANNOTATIONS)
diff --git a/common/py3-stdlib/_bootlocale.py b/common/py3-stdlib/_bootlocale.py
new file mode 100644
index 0000000..3273a3b
--- /dev/null
+++ b/common/py3-stdlib/_bootlocale.py
@@ -0,0 +1,46 @@
+"""A minimal subset of the locale module used at interpreter startup
+(imported by the _io module), in order to reduce startup time.
+
+Don't import directly from third-party code; use the `locale` module instead!
+"""
+
+import sys
+import _locale
+
+if sys.platform.startswith("win"):
+    def getpreferredencoding(do_setlocale=True):
+        if sys.flags.utf8_mode:
+            return 'UTF-8'
+        return _locale._getdefaultlocale()[1]
+else:
+    try:
+        _locale.CODESET
+    except AttributeError:
+        if hasattr(sys, 'getandroidapilevel'):
+            # On Android langinfo.h and CODESET are missing, and UTF-8 is
+            # always used in mbstowcs() and wcstombs().
+            def getpreferredencoding(do_setlocale=True):
+                return 'UTF-8'
+        else:
+            def getpreferredencoding(do_setlocale=True):
+                if sys.flags.utf8_mode:
+                    return 'UTF-8'
+                # This path for legacy systems needs the more complex
+                # getdefaultlocale() function, import the full locale module.
+                import locale
+                return locale.getpreferredencoding(do_setlocale)
+    else:
+        def getpreferredencoding(do_setlocale=True):
+            assert not do_setlocale
+            if sys.flags.utf8_mode:
+                return 'UTF-8'
+            result = _locale.nl_langinfo(_locale.CODESET)
+            if not result and sys.platform == 'darwin':
+                # nl_langinfo can return an empty string
+                # when the setting has an invalid value.
+                # Default to UTF-8 in that case because
+                # UTF-8 is the default charset on OSX and
+                # returning nothing will crash the
+                # interpreter.
+                result = 'UTF-8'
+            return result
diff --git a/common/py3-stdlib/_collections_abc.py b/common/py3-stdlib/_collections_abc.py
index 40417dc..36cd993 100644
--- a/common/py3-stdlib/_collections_abc.py
+++ b/common/py3-stdlib/_collections_abc.py
@@ -10,10 +10,6 @@
 import sys
 
 GenericAlias = type(list[int])
-EllipsisType = type(...)
-def _f(): pass
-FunctionType = type(_f)
-del _f
 
 __all__ = ["Awaitable", "Coroutine",
            "AsyncIterable", "AsyncIterator", "AsyncGenerator",
@@ -413,143 +409,6 @@
         return NotImplemented
 
 
-class _CallableGenericAlias(GenericAlias):
-    """ Represent `Callable[argtypes, resulttype]`.
-
-    This sets ``__args__`` to a tuple containing the flattened ``argtypes``
-    followed by ``resulttype``.
-
-    Example: ``Callable[[int, str], float]`` sets ``__args__`` to
-    ``(int, str, float)``.
-    """
-
-    __slots__ = ()
-
-    def __new__(cls, origin, args):
-        if not (isinstance(args, tuple) and len(args) == 2):
-            raise TypeError(
-                "Callable must be used as Callable[[arg, ...], result].")
-        t_args, t_result = args
-        if isinstance(t_args, list):
-            args = (*t_args, t_result)
-        elif not _is_param_expr(t_args):
-            raise TypeError(f"Expected a list of types, an ellipsis, "
-                            f"ParamSpec, or Concatenate. Got {t_args}")
-        return super().__new__(cls, origin, args)
-
-    @property
-    def __parameters__(self):
-        params = []
-        for arg in self.__args__:
-            # Looks like a genericalias
-            if hasattr(arg, "__parameters__") and isinstance(arg.__parameters__, tuple):
-                params.extend(arg.__parameters__)
-            else:
-                if _is_typevarlike(arg):
-                    params.append(arg)
-        return tuple(dict.fromkeys(params))
-
-    def __repr__(self):
-        if len(self.__args__) == 2 and _is_param_expr(self.__args__[0]):
-            return super().__repr__()
-        return (f'collections.abc.Callable'
-                f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], '
-                f'{_type_repr(self.__args__[-1])}]')
-
-    def __reduce__(self):
-        args = self.__args__
-        if not (len(args) == 2 and _is_param_expr(args[0])):
-            args = list(args[:-1]), args[-1]
-        return _CallableGenericAlias, (Callable, args)
-
-    def __getitem__(self, item):
-        # Called during TypeVar substitution, returns the custom subclass
-        # rather than the default types.GenericAlias object.  Most of the
-        # code is copied from typing's _GenericAlias and the builtin
-        # types.GenericAlias.
-
-        # A special case in PEP 612 where if X = Callable[P, int],
-        # then X[int, str] == X[[int, str]].
-        param_len = len(self.__parameters__)
-        if param_len == 0:
-            raise TypeError(f'{self} is not a generic class')
-        if not isinstance(item, tuple):
-            item = (item,)
-        if (param_len == 1 and _is_param_expr(self.__parameters__[0])
-                and item and not _is_param_expr(item[0])):
-            item = (list(item),)
-        item_len = len(item)
-        if item_len != param_len:
-            raise TypeError(f'Too {"many" if item_len > param_len else "few"}'
-                            f' arguments for {self};'
-                            f' actual {item_len}, expected {param_len}')
-        subst = dict(zip(self.__parameters__, item))
-        new_args = []
-        for arg in self.__args__:
-            if _is_typevarlike(arg):
-                if _is_param_expr(arg):
-                    arg = subst[arg]
-                    if not _is_param_expr(arg):
-                        raise TypeError(f"Expected a list of types, an ellipsis, "
-                                        f"ParamSpec, or Concatenate. Got {arg}")
-                else:
-                    arg = subst[arg]
-            # Looks like a GenericAlias
-            elif hasattr(arg, '__parameters__') and isinstance(arg.__parameters__, tuple):
-                subparams = arg.__parameters__
-                if subparams:
-                    subargs = tuple(subst[x] for x in subparams)
-                    arg = arg[subargs]
-            if isinstance(arg, tuple):
-                new_args.extend(arg)
-            else:
-                new_args.append(arg)
-
-        # args[0] occurs due to things like Z[[int, str, bool]] from PEP 612
-        if not isinstance(new_args[0], list):
-            t_result = new_args[-1]
-            t_args = new_args[:-1]
-            new_args = (t_args, t_result)
-        return _CallableGenericAlias(Callable, tuple(new_args))
-
-
-def _is_typevarlike(arg):
-    obj = type(arg)
-    # looks like a TypeVar/ParamSpec
-    return (obj.__module__ == 'typing'
-            and obj.__name__ in {'ParamSpec', 'TypeVar'})
-
-def _is_param_expr(obj):
-    """Checks if obj matches either a list of types, ``...``, ``ParamSpec`` or
-    ``_ConcatenateGenericAlias`` from typing.py
-    """
-    if obj is Ellipsis:
-        return True
-    if isinstance(obj, list):
-        return True
-    obj = type(obj)
-    names = ('ParamSpec', '_ConcatenateGenericAlias')
-    return obj.__module__ == 'typing' and any(obj.__name__ == name for name in names)
-
-def _type_repr(obj):
-    """Return the repr() of an object, special-casing types (internal helper).
-
-    Copied from :mod:`typing` since collections.abc
-    shouldn't depend on that module.
-    """
-    if isinstance(obj, GenericAlias):
-        return repr(obj)
-    if isinstance(obj, type):
-        if obj.__module__ == 'builtins':
-            return obj.__qualname__
-        return f'{obj.__module__}.{obj.__qualname__}'
-    if obj is Ellipsis:
-        return '...'
-    if isinstance(obj, FunctionType):
-        return obj.__name__
-    return repr(obj)
-
-
 class Callable(metaclass=ABCMeta):
 
     __slots__ = ()
@@ -564,13 +423,14 @@
             return _check_methods(C, "__call__")
         return NotImplemented
 
-    __class_getitem__ = classmethod(_CallableGenericAlias)
+    __class_getitem__ = classmethod(GenericAlias)
 
 
 ### SETS ###
 
 
 class Set(Collection):
+
     """A set is a finite, iterable container.
 
     This class provides concrete generic implementations of all
@@ -698,7 +558,6 @@
             hx = hash(x)
             h ^= (hx ^ (hx << 16) ^ 89869747)  * 3644798167
             h &= MASK
-        h ^= (h >> 11) ^ (h >> 25)
         h = h * 69069 + 907133923
         h &= MASK
         if h > MAX:
@@ -796,19 +655,19 @@
 
 ### MAPPINGS ###
 
+
 class Mapping(Collection):
+
+    __slots__ = ()
+
     """A Mapping is a generic container for associating key/value
     pairs.
 
     This class provides concrete generic implementations of all
     methods except for __getitem__, __iter__, and __len__.
+
     """
 
-    __slots__ = ()
-
-    # Tell ABCMeta.__new__ that this class should have TPFLAGS_MAPPING set.
-    __abc_tpflags__ = 1 << 6 # Py_TPFLAGS_MAPPING
-
     @abstractmethod
     def __getitem__(self, key):
         raise KeyError
@@ -847,6 +706,7 @@
 
     __reversed__ = None
 
+
 Mapping.register(mappingproxy)
 
 
@@ -871,7 +731,7 @@
     __slots__ = ()
 
     @classmethod
-    def _from_iterable(cls, it):
+    def _from_iterable(self, it):
         return set(it)
 
     def __contains__(self, key):
@@ -889,7 +749,7 @@
     __slots__ = ()
 
     @classmethod
-    def _from_iterable(cls, it):
+    def _from_iterable(self, it):
         return set(it)
 
     def __contains__(self, item):
@@ -929,15 +789,17 @@
 
 
 class MutableMapping(Mapping):
+
+    __slots__ = ()
+
     """A MutableMapping is a generic container for associating
     key/value pairs.
 
     This class provides concrete generic implementations of all
     methods except for __getitem__, __setitem__, __delitem__,
     __iter__, and __len__.
-    """
 
-    __slots__ = ()
+    """
 
     @abstractmethod
     def __setitem__(self, key, value):
@@ -1015,7 +877,9 @@
 
 ### SEQUENCES ###
 
+
 class Sequence(Reversible, Collection):
+
     """All the operations on a read-only sequence.
 
     Concrete subclasses must override __new__ or __init__,
@@ -1024,9 +888,6 @@
 
     __slots__ = ()
 
-    # Tell ABCMeta.__new__ that this class should have TPFLAGS_SEQUENCE set.
-    __abc_tpflags__ = 1 << 5 # Py_TPFLAGS_SEQUENCE
-
     @abstractmethod
     def __getitem__(self, index):
         raise IndexError
@@ -1078,6 +939,7 @@
         'S.count(value) -> integer -- return number of occurrences of value'
         return sum(1 for v in self if v is value or v == value)
 
+
 Sequence.register(tuple)
 Sequence.register(str)
 Sequence.register(range)
@@ -1085,6 +947,7 @@
 
 
 class ByteString(Sequence):
+
     """This unifies bytes and bytearray.
 
     XXX Should add all their methods.
@@ -1097,13 +960,15 @@
 
 
 class MutableSequence(Sequence):
+
+    __slots__ = ()
+
     """All the operations on a read-write sequence.
 
     Concrete subclasses must provide __new__ or __init__,
     __getitem__, __setitem__, __delitem__, __len__, and insert().
-    """
 
-    __slots__ = ()
+    """
 
     @abstractmethod
     def __setitem__(self, index, value):
diff --git a/common/py3-stdlib/_compression.py b/common/py3-stdlib/_compression.py
index e8b70aa..b00f31b 100644
--- a/common/py3-stdlib/_compression.py
+++ b/common/py3-stdlib/_compression.py
@@ -1,7 +1,7 @@
 """Internal classes used by the gzip, lzma and bz2 modules"""
 
 import io
-import sys
+
 
 BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE  # Compressed data read chunk size
 
@@ -110,16 +110,6 @@
         self._pos += len(data)
         return data
 
-    def readall(self):
-        chunks = []
-        # sys.maxsize means the max length of output buffer is unlimited,
-        # so that the whole input buffer can be decompressed within one
-        # .decompress() call.
-        while data := self.read(sys.maxsize):
-            chunks.append(data)
-
-        return b"".join(chunks)
-
     # Rewind the file to the beginning of the data stream.
     def _rewind(self):
         self._fp.seek(0)
diff --git a/common/py3-stdlib/_markupbase.py b/common/py3-stdlib/_markupbase.py
index 3ad7e27..2af5f1c 100644
--- a/common/py3-stdlib/_markupbase.py
+++ b/common/py3-stdlib/_markupbase.py
@@ -29,6 +29,10 @@
             raise RuntimeError(
                 "_markupbase.ParserBase must be subclassed")
 
+    def error(self, message):
+        raise NotImplementedError(
+            "subclasses of ParserBase must override error()")
+
     def reset(self):
         self.lineno = 1
         self.offset = 0
@@ -127,11 +131,12 @@
                     # also in data attribute specifications of attlist declaration
                     # also link type declaration subsets in linktype declarations
                     # also link attribute specification lists in link declarations
-                    raise AssertionError("unsupported '[' char in %s declaration" % decltype)
+                    self.error("unsupported '[' char in %s declaration" % decltype)
                 else:
-                    raise AssertionError("unexpected '[' char in declaration")
+                    self.error("unexpected '[' char in declaration")
             else:
-                raise AssertionError("unexpected %r char in declaration" % rawdata[j])
+                self.error(
+                    "unexpected %r char in declaration" % rawdata[j])
             if j < 0:
                 return j
         return -1 # incomplete
@@ -151,9 +156,7 @@
             # look for MS Office ]> ending
             match= _msmarkedsectionclose.search(rawdata, i+3)
         else:
-            raise AssertionError(
-                'unknown status keyword %r in marked section' % rawdata[i+3:j]
-            )
+            self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
         if not match:
             return -1
         if report:
@@ -165,7 +168,7 @@
     def parse_comment(self, i, report=1):
         rawdata = self.rawdata
         if rawdata[i:i+4] != '<!--':
-            raise AssertionError('unexpected call to parse_comment()')
+            self.error('unexpected call to parse_comment()')
         match = _commentclose.search(rawdata, i+4)
         if not match:
             return -1
@@ -189,9 +192,7 @@
                     return -1
                 if s != "<!":
                     self.updatepos(declstartpos, j + 1)
-                    raise AssertionError(
-                        "unexpected char in internal subset (in %r)" % s
-                    )
+                    self.error("unexpected char in internal subset (in %r)" % s)
                 if (j + 2) == n:
                     # end of buffer; incomplete
                     return -1
@@ -208,9 +209,8 @@
                     return -1
                 if name not in {"attlist", "element", "entity", "notation"}:
                     self.updatepos(declstartpos, j + 2)
-                    raise AssertionError(
-                        "unknown declaration %r in internal subset" % name
-                    )
+                    self.error(
+                        "unknown declaration %r in internal subset" % name)
                 # handle the individual names
                 meth = getattr(self, "_parse_doctype_" + name)
                 j = meth(j, declstartpos)
@@ -234,14 +234,14 @@
                     if rawdata[j] == ">":
                         return j
                     self.updatepos(declstartpos, j)
-                    raise AssertionError("unexpected char after internal subset")
+                    self.error("unexpected char after internal subset")
                 else:
                     return -1
             elif c.isspace():
                 j = j + 1
             else:
                 self.updatepos(declstartpos, j)
-                raise AssertionError("unexpected char %r in internal subset" % c)
+                self.error("unexpected char %r in internal subset" % c)
         # end of buffer reached
         return -1
 
@@ -387,9 +387,8 @@
             return name.lower(), m.end()
         else:
             self.updatepos(declstartpos, i)
-            raise AssertionError(
-                "expected name token at %r" % rawdata[declstartpos:declstartpos+20]
-            )
+            self.error("expected name token at %r"
+                       % rawdata[declstartpos:declstartpos+20])
 
     # To be overridden -- handlers for unknown objects
     def unknown_decl(self, data):
diff --git a/common/py3-stdlib/_osx_support.py b/common/py3-stdlib/_osx_support.py
index aa66c8b..37975fe 100644
--- a/common/py3-stdlib/_osx_support.py
+++ b/common/py3-stdlib/_osx_support.py
@@ -96,7 +96,7 @@
     if _SYSTEM_VERSION is None:
         _SYSTEM_VERSION = ''
         try:
-            f = open('/System/Library/CoreServices/SystemVersion.plist', encoding="utf-8")
+            f = open('/System/Library/CoreServices/SystemVersion.plist')
         except OSError:
             # We're on a plain darwin box, fall back to the default
             # behaviour.
@@ -156,9 +156,9 @@
 
     if _cache_default_sysroot is not None:
         return _cache_default_sysroot
-
+   
     contents = _read_output('%s -c -E -v - </dev/null' % (cc,), True)
-    in_incdirs = False
+    in_incdirs = False   
     for line in contents.splitlines():
         if line.startswith("#include <...>"):
             in_incdirs = True
@@ -428,9 +428,10 @@
             break
 
     if sysroot and not os.path.isdir(sysroot):
-        sys.stderr.write(f"Compiling with an SDK that doesn't seem to exist: {sysroot}\n")
-        sys.stderr.write("Please check your Xcode installation\n")
-        sys.stderr.flush()
+        from distutils import log
+        log.warn("Compiling with an SDK that doesn't seem to exist: %s",
+                sysroot)
+        log.warn("Please check your Xcode installation")
 
     return compiler_so
 
@@ -481,7 +482,7 @@
 
     This customization is performed when the first
     extension module build is requested
-    in distutils.sysconfig.customize_compiler.
+    in distutils.sysconfig.customize_compiler).
     """
 
     # Find a compiler to use for extension module builds
@@ -524,10 +525,10 @@
             try:
                 macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
             except ValueError:
-                macrelease = (10, 3)
+                macrelease = (10, 0)
         else:
             # assume no universal support
-            macrelease = (10, 3)
+            macrelease = (10, 0)
 
         if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
             # The universal build will build fat binaries, but not on
diff --git a/common/py3-stdlib/_pydecimal.py b/common/py3-stdlib/_pydecimal.py
index 3d6cece..ab989e5 100644
--- a/common/py3-stdlib/_pydecimal.py
+++ b/common/py3-stdlib/_pydecimal.py
@@ -951,7 +951,7 @@
             if self.is_snan():
                 raise TypeError('Cannot hash a signaling NaN value.')
             elif self.is_nan():
-                return object.__hash__(self)
+                return _PyHASH_NAN
             else:
                 if self._sign:
                     return -_PyHASH_INF
diff --git a/common/py3-stdlib/_pyio.py b/common/py3-stdlib/_pyio.py
index fb867fb..4804ed2 100644
--- a/common/py3-stdlib/_pyio.py
+++ b/common/py3-stdlib/_pyio.py
@@ -40,36 +40,6 @@
 _CHECK_ERRORS = _IOBASE_EMITS_UNRAISABLE
 
 
-def text_encoding(encoding, stacklevel=2):
-    """
-    A helper function to choose the text encoding.
-
-    When encoding is not None, just return it.
-    Otherwise, return the default text encoding (i.e. "locale").
-
-    This function emits an EncodingWarning if *encoding* is None and
-    sys.flags.warn_default_encoding is true.
-
-    This can be used in APIs with an encoding=None parameter
-    that pass it to TextIOWrapper or open.
-    However, please consider using encoding="utf-8" for new APIs.
-    """
-    if encoding is None:
-        encoding = "locale"
-        if sys.flags.warn_default_encoding:
-            import warnings
-            warnings.warn("'encoding' argument not specified.",
-                          EncodingWarning, stacklevel + 1)
-    return encoding
-
-
-# Wrapper for builtins.open
-#
-# Trick so that open() won't become a bound method when stored
-# as a class variable (as dbm.dumb does).
-#
-# See init_set_builtins_open() in Python/pylifecycle.c.
-@staticmethod
 def open(file, mode="r", buffering=-1, encoding=None, errors=None,
          newline=None, closefd=True, opener=None):
 
@@ -278,7 +248,6 @@
         result = buffer
         if binary:
             return result
-        encoding = text_encoding(encoding)
         text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
         result = text
         text.mode = mode
@@ -311,20 +280,27 @@
     open_code = _open_code_with_warning
 
 
-def __getattr__(name):
-    if name == "OpenWrapper":
-        # bpo-43680: Until Python 3.9, _pyio.open was not a static method and
-        # builtins.open was set to OpenWrapper to not become a bound method
-        # when set to a class variable. _io.open is a built-in function whereas
-        # _pyio.open is a Python function. In Python 3.10, _pyio.open() is now
-        # a static method, and builtins.open() is now io.open().
-        import warnings
-        warnings.warn('OpenWrapper is deprecated, use open instead',
-                      DeprecationWarning, stacklevel=2)
-        global OpenWrapper
-        OpenWrapper = open
-        return OpenWrapper
-    raise AttributeError(name)
+class DocDescriptor:
+    """Helper for builtins.open.__doc__
+    """
+    def __get__(self, obj, typ=None):
+        return (
+            "open(file, mode='r', buffering=-1, encoding=None, "
+                 "errors=None, newline=None, closefd=True)\n\n" +
+            open.__doc__)
+
+class OpenWrapper:
+    """Wrapper for builtins.open
+
+    Trick so that open won't become a bound method when stored
+    as a class variable (as dbm.dumb does).
+
+    See initstdio() in Python/pylifecycle.c.
+    """
+    __doc__ = DocDescriptor()
+
+    def __new__(cls, *args, **kwargs):
+        return open(*args, **kwargs)
 
 
 # In normal operation, both `UnsupportedOperation`s should be bound to the
@@ -338,7 +314,8 @@
 
 class IOBase(metaclass=abc.ABCMeta):
 
-    """The abstract base class for all I/O classes.
+    """The abstract base class for all I/O classes, acting on streams of
+    bytes. There is no public constructor.
 
     This class provides dummy implementations for many methods that
     derived classes can override selectively; the default implementations
@@ -1844,7 +1821,7 @@
     """Base class for text I/O.
 
     This class provides a character and line based interface to stream
-    I/O.
+    I/O. There is no public constructor.
     """
 
     def read(self, size=-1):
@@ -2027,22 +2004,19 @@
     def __init__(self, buffer, encoding=None, errors=None, newline=None,
                  line_buffering=False, write_through=False):
         self._check_newline(newline)
-        encoding = text_encoding(encoding)
-
-        if encoding == "locale":
+        if encoding is None:
             try:
-                encoding = os.device_encoding(buffer.fileno()) or "locale"
+                encoding = os.device_encoding(buffer.fileno())
             except (AttributeError, UnsupportedOperation):
                 pass
-
-        if encoding == "locale":
-            try:
-                import locale
-            except ImportError:
-                # Importing locale may fail if Python is being built
-                encoding = "utf-8"
-            else:
-                encoding = locale.getpreferredencoding(False)
+            if encoding is None:
+                try:
+                    import locale
+                except ImportError:
+                    # Importing locale may fail if Python is being built
+                    encoding = "ascii"
+                else:
+                    encoding = locale.getpreferredencoding(False)
 
         if not isinstance(encoding, str):
             raise ValueError("invalid encoding: %r" % encoding)
diff --git a/common/py3-stdlib/_sitebuiltins.py b/common/py3-stdlib/_sitebuiltins.py
index c66269a..c29cf4b 100644
--- a/common/py3-stdlib/_sitebuiltins.py
+++ b/common/py3-stdlib/_sitebuiltins.py
@@ -47,7 +47,7 @@
         data = None
         for filename in self.__filenames:
             try:
-                with open(filename, encoding='utf-8') as fp:
+                with open(filename, "r") as fp:
                     data = fp.read()
                 break
             except OSError:
diff --git a/common/py3-stdlib/_strptime.py b/common/py3-stdlib/_strptime.py
index b97dfcc..5df37f5 100644
--- a/common/py3-stdlib/_strptime.py
+++ b/common/py3-stdlib/_strptime.py
@@ -201,7 +201,7 @@
             #XXX: Does 'Y' need to worry about having less or more than
             #     4 digits?
             'Y': r"(?P<Y>\d\d\d\d)",
-            'z': r"(?P<z>[+-]\d\d:?[0-5]\d(:?[0-5]\d(\.\d{1,6})?)?|(?-i:Z))",
+            'z': r"(?P<z>[+-]\d\d:?[0-5]\d(:?[0-5]\d(\.\d{1,6})?)?|Z)",
             'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
             'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
             'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
diff --git a/common/py3-stdlib/_weakrefset.py b/common/py3-stdlib/_weakrefset.py
index 2a27684..b267780 100644
--- a/common/py3-stdlib/_weakrefset.py
+++ b/common/py3-stdlib/_weakrefset.py
@@ -51,14 +51,10 @@
             self.update(data)
 
     def _commit_removals(self):
-        pop = self._pending_removals.pop
+        l = self._pending_removals
         discard = self.data.discard
-        while True:
-            try:
-                item = pop()
-            except IndexError:
-                return
-            discard(item)
+        while l:
+            discard(l.pop())
 
     def __iter__(self):
         with _IterationGuard(self):
diff --git a/common/py3-stdlib/abc.py b/common/py3-stdlib/abc.py
index 3c552ce..431b640 100644
--- a/common/py3-stdlib/abc.py
+++ b/common/py3-stdlib/abc.py
@@ -28,14 +28,7 @@
 class abstractclassmethod(classmethod):
     """A decorator indicating abstract classmethods.
 
-    Deprecated, use 'classmethod' with 'abstractmethod' instead:
-
-        class C(ABC):
-            @classmethod
-            @abstractmethod
-            def my_abstract_classmethod(cls, ...):
-                ...
-
+    Deprecated, use 'classmethod' with 'abstractmethod' instead.
     """
 
     __isabstractmethod__ = True
@@ -48,14 +41,7 @@
 class abstractstaticmethod(staticmethod):
     """A decorator indicating abstract staticmethods.
 
-    Deprecated, use 'staticmethod' with 'abstractmethod' instead:
-
-        class C(ABC):
-            @staticmethod
-            @abstractmethod
-            def my_abstract_staticmethod(...):
-                ...
-
+    Deprecated, use 'staticmethod' with 'abstractmethod' instead.
     """
 
     __isabstractmethod__ = True
@@ -68,14 +54,7 @@
 class abstractproperty(property):
     """A decorator indicating abstract properties.
 
-    Deprecated, use 'property' with 'abstractmethod' instead:
-
-        class C(ABC):
-            @property
-            @abstractmethod
-            def my_abstract_property(self):
-                ...
-
+    Deprecated, use 'property' with 'abstractmethod' instead.
     """
 
     __isabstractmethod__ = True
@@ -143,44 +122,6 @@
             _reset_caches(cls)
 
 
-def update_abstractmethods(cls):
-    """Recalculate the set of abstract methods of an abstract class.
-
-    If a class has had one of its abstract methods implemented after the
-    class was created, the method will not be considered implemented until
-    this function is called. Alternatively, if a new abstract method has been
-    added to the class, it will only be considered an abstract method of the
-    class after this function is called.
-
-    This function should be called before any use is made of the class,
-    usually in class decorators that add methods to the subject class.
-
-    Returns cls, to allow usage as a class decorator.
-
-    If cls is not an instance of ABCMeta, does nothing.
-    """
-    if not hasattr(cls, '__abstractmethods__'):
-        # We check for __abstractmethods__ here because cls might by a C
-        # implementation or a python implementation (especially during
-        # testing), and we want to handle both cases.
-        return cls
-
-    abstracts = set()
-    # Check the existing abstract methods of the parents, keep only the ones
-    # that are not implemented.
-    for scls in cls.__bases__:
-        for name in getattr(scls, '__abstractmethods__', ()):
-            value = getattr(cls, name, None)
-            if getattr(value, "__isabstractmethod__", False):
-                abstracts.add(name)
-    # Also add any other newly added abstract methods.
-    for name, value in cls.__dict__.items():
-        if getattr(value, "__isabstractmethod__", False):
-            abstracts.add(name)
-    cls.__abstractmethods__ = frozenset(abstracts)
-    return cls
-
-
 class ABC(metaclass=ABCMeta):
     """Helper class that provides a standard way to create an ABC using
     inheritance.
diff --git a/common/py3-stdlib/argparse.py b/common/py3-stdlib/argparse.py
index 2c0dd85..2fb1da5 100644
--- a/common/py3-stdlib/argparse.py
+++ b/common/py3-stdlib/argparse.py
@@ -392,9 +392,6 @@
         group_actions = set()
         inserts = {}
         for group in groups:
-            if not group._group_actions:
-                raise ValueError(f'empty group {group}')
-
             try:
                 start = actions.index(group._group_actions[0])
             except ValueError:
@@ -529,13 +526,12 @@
         parts = [action_header]
 
         # if there was help for the action, add lines of help text
-        if action.help and action.help.strip():
+        if action.help:
             help_text = self._expand_help(action)
-            if help_text:
-                help_lines = self._split_lines(help_text, help_width)
-                parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
-                for line in help_lines[1:]:
-                    parts.append('%*s%s\n' % (help_position, '', line))
+            help_lines = self._split_lines(help_text, help_width)
+            parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
+            for line in help_lines[1:]:
+                parts.append('%*s%s\n' % (help_position, '', line))
 
         # or add a newline if the description doesn't end with one
         elif not action_header.endswith('\n'):
@@ -726,13 +722,11 @@
     if argument is None:
         return None
     elif argument.option_strings:
-        return '/'.join(argument.option_strings)
+        return  '/'.join(argument.option_strings)
     elif argument.metavar not in (None, SUPPRESS):
         return argument.metavar
     elif argument.dest not in (None, SUPPRESS):
         return argument.dest
-    elif argument.choices:
-        return '{' + ','.join(argument.choices) + '}'
     else:
         return None
 
@@ -878,8 +872,8 @@
                 option_string = '--no-' + option_string[2:]
                 _option_strings.append(option_string)
 
-        if help is not None and default is not None and default is not SUPPRESS:
-            help += " (default: %(default)s)"
+        if help is not None and default is not None:
+            help += f" (default: {default})"
 
         super().__init__(
             option_strings=_option_strings,
@@ -1256,9 +1250,9 @@
         # the special argument "-" means sys.std{in,out}
         if string == '-':
             if 'r' in self._mode:
-                return _sys.stdin.buffer if 'b' in self._mode else _sys.stdin
-            elif any(c in self._mode for c in 'wax'):
-                return _sys.stdout.buffer if 'b' in self._mode else _sys.stdout
+                return _sys.stdin
+            elif 'w' in self._mode:
+                return _sys.stdout
             else:
                 msg = _('argument "-" with mode %r') % self._mode
                 raise ValueError(msg)
@@ -1672,8 +1666,7 @@
     """Object for parsing command line strings into Python objects.
 
     Keyword Arguments:
-        - prog -- The name of the program (default:
-            ``os.path.basename(sys.argv[0])``)
+        - prog -- The name of the program (default: sys.argv[0])
         - usage -- A usage message (default: auto-generated from arguments)
         - description -- A description of what the program does
         - epilog -- Text following the argument descriptions
@@ -1726,7 +1719,7 @@
 
         add_group = self.add_argument_group
         self._positionals = add_group(_('positional arguments'))
-        self._optionals = add_group(_('options'))
+        self._optionals = add_group(_('optional arguments'))
         self._subparsers = None
 
         # register types
diff --git a/common/py3-stdlib/ast.py b/common/py3-stdlib/ast.py
index f4d2f6e..ecd4895 100644
--- a/common/py3-stdlib/ast.py
+++ b/common/py3-stdlib/ast.py
@@ -59,14 +59,11 @@
     sets, booleans, and None.
     """
     if isinstance(node_or_string, str):
-        node_or_string = parse(node_or_string.lstrip(" \t"), mode='eval')
+        node_or_string = parse(node_or_string, mode='eval')
     if isinstance(node_or_string, Expression):
         node_or_string = node_or_string.body
     def _raise_malformed_node(node):
-        msg = "malformed node or string"
-        if lno := getattr(node, 'lineno', None):
-            msg += f' on line {lno}'
-        raise ValueError(msg + f': {node!r}')
+        raise ValueError(f'malformed node or string: {node!r}')
     def _convert_num(node):
         if not isinstance(node, Constant) or type(node.value) not in (int, float, complex):
             _raise_malformed_node(node)
@@ -797,9 +794,6 @@
         else:
             super().visit(node)
 
-    # Note: as visit() resets the output text, do NOT rely on
-    # NodeVisitor.generic_visit to handle any nodes (as it calls back in to
-    # the subclass visit() method, which resets self._source to an empty list)
     def visit(self, node):
         """Outputs a source code string that, if converted back to an ast
         (using ast.parse) will generate an AST equivalent to *node*"""
@@ -1202,13 +1196,8 @@
 
     def _write_constant(self, value):
         if isinstance(value, (float, complex)):
-            # Substitute overflowing decimal literal for AST infinities,
-            # and inf - inf for NaNs.
-            self.write(
-                repr(value)
-                .replace("inf", _INFSTR)
-                .replace("nan", f"({_INFSTR}-{_INFSTR})")
-            )
+            # Substitute overflowing decimal literal for AST infinities.
+            self.write(repr(value).replace("inf", _INFSTR))
         elif self._avoid_backslashes and isinstance(value, str):
             self._write_str_avoiding_backslashes(value)
         else:
@@ -1281,13 +1270,10 @@
             self.traverse(node.orelse)
 
     def visit_Set(self, node):
-        if node.elts:
-            with self.delimit("{", "}"):
-                self.interleave(lambda: self.write(", "), self.traverse, node.elts)
-        else:
-            # `{}` would be interpreted as a dictionary literal, and
-            # `set` might be shadowed. Thus:
-            self.write('{*()}')
+        if not node.elts:
+            raise ValueError("Set node should have at least one item")
+        with self.delimit("{", "}"):
+            self.interleave(lambda: self.write(", "), self.traverse, node.elts)
 
     def visit_Dict(self, node):
         def write_key_value_pair(k, v):
@@ -1454,9 +1440,9 @@
 
     def visit_Subscript(self, node):
         def is_simple_tuple(slice_value):
-            # when unparsing a non-empty tuple, the parentheses can be safely
+            # when unparsing a non-empty tuple, the parantheses can be safely
             # omitted if there aren't any elements that explicitly requires
-            # parentheses (such as starred expressions).
+            # parantheses (such as starred expressions).
             return (
                 isinstance(slice_value, Tuple)
                 and slice_value.elts
@@ -1489,13 +1475,6 @@
             self.write(":")
             self.traverse(node.step)
 
-    def visit_Match(self, node):
-        self.fill("match ")
-        self.traverse(node.subject)
-        with self.block():
-            for case in node.cases:
-                self.traverse(case)
-
     def visit_arg(self, node):
         self.write(node.arg)
         if node.annotation:
@@ -1580,94 +1559,6 @@
             self.write(" as ")
             self.traverse(node.optional_vars)
 
-    def visit_match_case(self, node):
-        self.fill("case ")
-        self.traverse(node.pattern)
-        if node.guard:
-            self.write(" if ")
-            self.traverse(node.guard)
-        with self.block():
-            self.traverse(node.body)
-
-    def visit_MatchValue(self, node):
-        self.traverse(node.value)
-
-    def visit_MatchSingleton(self, node):
-        self._write_constant(node.value)
-
-    def visit_MatchSequence(self, node):
-        with self.delimit("[", "]"):
-            self.interleave(
-                lambda: self.write(", "), self.traverse, node.patterns
-            )
-
-    def visit_MatchStar(self, node):
-        name = node.name
-        if name is None:
-            name = "_"
-        self.write(f"*{name}")
-
-    def visit_MatchMapping(self, node):
-        def write_key_pattern_pair(pair):
-            k, p = pair
-            self.traverse(k)
-            self.write(": ")
-            self.traverse(p)
-
-        with self.delimit("{", "}"):
-            keys = node.keys
-            self.interleave(
-                lambda: self.write(", "),
-                write_key_pattern_pair,
-                zip(keys, node.patterns, strict=True),
-            )
-            rest = node.rest
-            if rest is not None:
-                if keys:
-                    self.write(", ")
-                self.write(f"**{rest}")
-
-    def visit_MatchClass(self, node):
-        self.set_precedence(_Precedence.ATOM, node.cls)
-        self.traverse(node.cls)
-        with self.delimit("(", ")"):
-            patterns = node.patterns
-            self.interleave(
-                lambda: self.write(", "), self.traverse, patterns
-            )
-            attrs = node.kwd_attrs
-            if attrs:
-                def write_attr_pattern(pair):
-                    attr, pattern = pair
-                    self.write(f"{attr}=")
-                    self.traverse(pattern)
-
-                if patterns:
-                    self.write(", ")
-                self.interleave(
-                    lambda: self.write(", "),
-                    write_attr_pattern,
-                    zip(attrs, node.kwd_patterns, strict=True),
-                )
-
-    def visit_MatchAs(self, node):
-        name = node.name
-        pattern = node.pattern
-        if name is None:
-            self.write("_")
-        elif pattern is None:
-            self.write(node.name)
-        else:
-            with self.require_parens(_Precedence.TEST, node):
-                self.set_precedence(_Precedence.BOR, node.pattern)
-                self.traverse(node.pattern)
-                self.write(f" as {node.name}")
-
-    def visit_MatchOr(self, node):
-        with self.require_parens(_Precedence.BOR, node):
-            self.set_precedence(_Precedence.BOR.next(), *node.patterns)
-            self.interleave(lambda: self.write(" | "), self.traverse, node.patterns)
-
 def unparse(ast_obj):
     unparser = _Unparser()
     return unparser.visit(ast_obj)
diff --git a/common/py3-stdlib/asynchat.py b/common/py3-stdlib/asynchat.py
index de26ffa..f4ba361 100644
--- a/common/py3-stdlib/asynchat.py
+++ b/common/py3-stdlib/asynchat.py
@@ -48,14 +48,6 @@
 import asyncore
 from collections import deque
 
-from warnings import warn
-warn(
-    'The asynchat module is deprecated. '
-    'The recommended replacement is asyncio',
-    DeprecationWarning,
-    stacklevel=2)
-
-
 
 class async_chat(asyncore.dispatcher):
     """This is an abstract class.  You must derive from this class, and add
diff --git a/common/py3-stdlib/asyncio/__init__.py b/common/py3-stdlib/asyncio/__init__.py
index 200b14c..eb84bfb 100644
--- a/common/py3-stdlib/asyncio/__init__.py
+++ b/common/py3-stdlib/asyncio/__init__.py
@@ -20,6 +20,10 @@
 from .threads import *
 from .transports import *
 
+# Exposed for _asynciomodule.c to implement now deprecated
+# Task.all_tasks() method.  This function will be removed in 3.9.
+from .tasks import _all_tasks_compat  # NoQA
+
 __all__ = (base_events.__all__ +
            coroutines.__all__ +
            events.__all__ +
diff --git a/common/py3-stdlib/asyncio/base_events.py b/common/py3-stdlib/asyncio/base_events.py
index 952da11..b2d446a 100644
--- a/common/py3-stdlib/asyncio/base_events.py
+++ b/common/py3-stdlib/asyncio/base_events.py
@@ -49,7 +49,7 @@
 from .log import logger
 
 
-__all__ = 'BaseEventLoop','Server',
+__all__ = 'BaseEventLoop',
 
 
 # Minimum number of _scheduled timer handles before cleanup of
@@ -202,11 +202,6 @@
         pass
 
 
-def _check_ssl_socket(sock):
-    if ssl is not None and isinstance(sock, ssl.SSLSocket):
-        raise TypeError("Socket cannot be of type SSLSocket")
-
-
 class _SendfileFallbackProtocol(protocols.Protocol):
     def __init__(self, transp):
         if not isinstance(transp, transports._FlowControlMixin):
@@ -355,7 +350,7 @@
         self._start_serving()
         # Skip one loop iteration so that all 'loop.add_reader'
         # go through.
-        await tasks.sleep(0)
+        await tasks.sleep(0, loop=self._loop)
 
     async def serve_forever(self):
         if self._serving_forever_fut is not None:
@@ -546,7 +541,8 @@
 
         results = await tasks.gather(
             *[ag.aclose() for ag in closing_agens],
-            return_exceptions=True)
+            return_exceptions=True,
+            loop=self)
 
         for result, agen in zip(results, closing_agens):
             if isinstance(result, Exception):
@@ -868,7 +864,6 @@
                             *, fallback=True):
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
-        _check_ssl_socket(sock)
         self._check_sendfile_params(sock, file, offset, count)
         try:
             return await self._sock_sendfile_native(sock, file,
@@ -978,7 +973,7 @@
             happy_eyeballs_delay=None, interleave=None):
         """Connect to a TCP server.
 
-        Create a streaming transport connection to a given internet host and
+        Create a streaming transport connection to a given Internet host and
         port: socket family AF_INET or socket.AF_INET6 depending on host (or
         family if specified), socket type SOCK_STREAM. protocol_factory must be
         a callable returning a protocol instance.
@@ -1010,9 +1005,6 @@
             raise ValueError(
                 'ssl_handshake_timeout is only meaningful with ssl')
 
-        if sock is not None:
-            _check_ssl_socket(sock)
-
         if happy_eyeballs_delay is not None and interleave is None:
             # If using happy eyeballs, default to interleave addresses by family
             interleave = 1
@@ -1446,9 +1438,6 @@
             raise ValueError(
                 'ssl_handshake_timeout is only meaningful with ssl')
 
-        if sock is not None:
-            _check_ssl_socket(sock)
-
         if host is not None or port is not None:
             if sock is not None:
                 raise ValueError(
@@ -1468,7 +1457,7 @@
             fs = [self._create_server_getaddrinfo(host, port, family=family,
                                                   flags=flags)
                   for host in hosts]
-            infos = await tasks.gather(*fs)
+            infos = await tasks.gather(*fs, loop=self)
             infos = set(itertools.chain.from_iterable(infos))
 
             completed = False
@@ -1526,7 +1515,7 @@
             server._start_serving()
             # Skip one loop iteration so that all 'loop.add_reader'
             # go through.
-            await tasks.sleep(0)
+            await tasks.sleep(0, loop=self)
 
         if self._debug:
             logger.info("%r is serving", server)
@@ -1536,6 +1525,14 @@
             self, protocol_factory, sock,
             *, ssl=None,
             ssl_handshake_timeout=None):
+        """Handle an accepted connection.
+
+        This is used by servers that accept connections outside of
+        asyncio but that use asyncio to handle connections.
+
+        This method is a coroutine.  When completed, the coroutine
+        returns a (transport, protocol) pair.
+        """
         if sock.type != socket.SOCK_STREAM:
             raise ValueError(f'A Stream Socket was expected, got {sock!r}')
 
@@ -1543,9 +1540,6 @@
             raise ValueError(
                 'ssl_handshake_timeout is only meaningful with ssl')
 
-        if sock is not None:
-            _check_ssl_socket(sock)
-
         transport, protocol = await self._create_connection_transport(
             sock, protocol_factory, ssl, '', server_side=True,
             ssl_handshake_timeout=ssl_handshake_timeout)
diff --git a/common/py3-stdlib/asyncio/events.py b/common/py3-stdlib/asyncio/events.py
index 5ab1acc..0dce87b 100644
--- a/common/py3-stdlib/asyncio/events.py
+++ b/common/py3-stdlib/asyncio/events.py
@@ -258,13 +258,13 @@
         """Notification that a TimerHandle has been cancelled."""
         raise NotImplementedError
 
-    def call_soon(self, callback, *args, context=None):
-        return self.call_later(0, callback, *args, context=context)
+    def call_soon(self, callback, *args):
+        return self.call_later(0, callback, *args)
 
-    def call_later(self, delay, callback, *args, context=None):
+    def call_later(self, delay, callback, *args):
         raise NotImplementedError
 
-    def call_at(self, when, callback, *args, context=None):
+    def call_at(self, when, callback, *args):
         raise NotImplementedError
 
     def time(self):
@@ -280,7 +280,7 @@
 
     # Methods for interacting with threads.
 
-    def call_soon_threadsafe(self, callback, *args, context=None):
+    def call_soon_threadsafe(self, callback, *args):
         raise NotImplementedError
 
     def run_in_executor(self, executor, func, *args):
@@ -418,20 +418,6 @@
         """
         raise NotImplementedError
 
-    async def connect_accepted_socket(
-            self, protocol_factory, sock,
-            *, ssl=None,
-            ssl_handshake_timeout=None):
-        """Handle an accepted connection.
-
-        This is used by servers that accept connections outside of
-        asyncio, but use asyncio to handle connections.
-
-        This method is a coroutine.  When completed, the coroutine
-        returns a (transport, protocol) pair.
-        """
-        raise NotImplementedError
-
     async def create_datagram_endpoint(self, protocol_factory,
                                        local_addr=None, remote_addr=None, *,
                                        family=0, proto=0, flags=0,
@@ -479,7 +465,7 @@
         # The reason to accept file-like object instead of just file descriptor
         # is: we need to own pipe and close it at transport finishing
         # Can got complicated errors if pass f.fileno(),
-        # close fd in pipe transport then close f and vice versa.
+        # close fd in pipe transport then close f and vise versa.
         raise NotImplementedError
 
     async def connect_write_pipe(self, protocol_factory, pipe):
@@ -492,7 +478,7 @@
         # The reason to accept file-like object instead of just file descriptor
         # is: we need to own pipe and close it at transport finishing
         # Can got complicated errors if pass f.fileno(),
-        # close fd in pipe transport then close f and vice versa.
+        # close fd in pipe transport then close f and vise versa.
         raise NotImplementedError
 
     async def subprocess_shell(self, protocol_factory, cmd, *,
@@ -759,16 +745,9 @@
     the result of `get_event_loop_policy().get_event_loop()` call.
     """
     # NOTE: this function is implemented in C (see _asynciomodule.c)
-    return _py__get_event_loop()
-
-
-def _get_event_loop(stacklevel=3):
     current_loop = _get_running_loop()
     if current_loop is not None:
         return current_loop
-    import warnings
-    warnings.warn('There is no current event loop',
-                  DeprecationWarning, stacklevel=stacklevel)
     return get_event_loop_policy().get_event_loop()
 
 
@@ -798,7 +777,6 @@
 _py__set_running_loop = _set_running_loop
 _py_get_running_loop = get_running_loop
 _py_get_event_loop = get_event_loop
-_py__get_event_loop = _get_event_loop
 
 
 try:
@@ -806,7 +784,7 @@
     # functions in asyncio.  Pure Python implementation is
     # about 4 times slower than C-accelerated.
     from _asyncio import (_get_running_loop, _set_running_loop,
-                          get_running_loop, get_event_loop, _get_event_loop)
+                          get_running_loop, get_event_loop)
 except ImportError:
     pass
 else:
@@ -815,4 +793,3 @@
     _c__set_running_loop = _set_running_loop
     _c_get_running_loop = get_running_loop
     _c_get_event_loop = get_event_loop
-    _c__get_event_loop = _get_event_loop
diff --git a/common/py3-stdlib/asyncio/futures.py b/common/py3-stdlib/asyncio/futures.py
index 8e8cd87..bed4da5 100644
--- a/common/py3-stdlib/asyncio/futures.py
+++ b/common/py3-stdlib/asyncio/futures.py
@@ -8,7 +8,6 @@
 import contextvars
 import logging
 import sys
-from types import GenericAlias
 
 from . import base_futures
 from . import events
@@ -77,7 +76,7 @@
         the default event loop.
         """
         if loop is None:
-            self._loop = events._get_event_loop()
+            self._loop = events.get_event_loop()
         else:
             self._loop = loop
         self._callbacks = []
@@ -107,7 +106,8 @@
             context['source_traceback'] = self._source_traceback
         self._loop.call_exception_handler(context)
 
-    __class_getitem__ = classmethod(GenericAlias)
+    def __class_getitem__(cls, type):
+        return cls
 
     @property
     def _log_traceback(self):
@@ -115,7 +115,7 @@
 
     @_log_traceback.setter
     def _log_traceback(self, val):
-        if val:
+        if bool(val):
             raise ValueError('_log_traceback can only be set to False')
         self.__log_traceback = False
 
@@ -408,7 +408,7 @@
     assert isinstance(future, concurrent.futures.Future), \
         f'concurrent.futures.Future is expected, got {future!r}'
     if loop is None:
-        loop = events._get_event_loop()
+        loop = events.get_event_loop()
     new_future = loop.create_future()
     _chain_future(future, new_future)
     return new_future
diff --git a/common/py3-stdlib/asyncio/locks.py b/common/py3-stdlib/asyncio/locks.py
index 4fef64e..f1ce732 100644
--- a/common/py3-stdlib/asyncio/locks.py
+++ b/common/py3-stdlib/asyncio/locks.py
@@ -3,9 +3,10 @@
 __all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore')
 
 import collections
+import warnings
 
+from . import events
 from . import exceptions
-from . import mixins
 
 
 class _ContextManagerMixin:
@@ -19,7 +20,7 @@
         self.release()
 
 
-class Lock(_ContextManagerMixin, mixins._LoopBoundMixin):
+class Lock(_ContextManagerMixin):
     """Primitive lock objects.
 
     A primitive lock is a synchronization primitive that is not owned
@@ -73,10 +74,16 @@
 
     """
 
-    def __init__(self, *, loop=mixins._marker):
-        super().__init__(loop=loop)
+    def __init__(self, *, loop=None):
         self._waiters = None
         self._locked = False
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
 
     def __repr__(self):
         res = super().__repr__()
@@ -102,7 +109,7 @@
 
         if self._waiters is None:
             self._waiters = collections.deque()
-        fut = self._get_loop().create_future()
+        fut = self._loop.create_future()
         self._waiters.append(fut)
 
         # Finally block should be called before the CancelledError
@@ -154,7 +161,7 @@
             fut.set_result(True)
 
 
-class Event(mixins._LoopBoundMixin):
+class Event:
     """Asynchronous equivalent to threading.Event.
 
     Class implementing event objects. An event manages a flag that can be set
@@ -163,10 +170,16 @@
     false.
     """
 
-    def __init__(self, *, loop=mixins._marker):
-        super().__init__(loop=loop)
+    def __init__(self, *, loop=None):
         self._waiters = collections.deque()
         self._value = False
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
 
     def __repr__(self):
         res = super().__repr__()
@@ -207,7 +220,7 @@
         if self._value:
             return True
 
-        fut = self._get_loop().create_future()
+        fut = self._loop.create_future()
         self._waiters.append(fut)
         try:
             await fut
@@ -216,7 +229,7 @@
             self._waiters.remove(fut)
 
 
-class Condition(_ContextManagerMixin, mixins._LoopBoundMixin):
+class Condition(_ContextManagerMixin):
     """Asynchronous equivalent to threading.Condition.
 
     This class implements condition variable objects. A condition variable
@@ -226,10 +239,19 @@
     A new Lock object is created and used as the underlying lock.
     """
 
-    def __init__(self, lock=None, *, loop=mixins._marker):
-        super().__init__(loop=loop)
+    def __init__(self, lock=None, *, loop=None):
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
+
         if lock is None:
-            lock = Lock()
+            lock = Lock(loop=loop)
+        elif lock._loop is not self._loop:
+            raise ValueError("loop argument must agree with lock")
 
         self._lock = lock
         # Export the lock's locked(), acquire() and release() methods.
@@ -262,7 +284,7 @@
 
         self.release()
         try:
-            fut = self._get_loop().create_future()
+            fut = self._loop.create_future()
             self._waiters.append(fut)
             try:
                 await fut
@@ -329,7 +351,7 @@
         self.notify(len(self._waiters))
 
 
-class Semaphore(_ContextManagerMixin, mixins._LoopBoundMixin):
+class Semaphore(_ContextManagerMixin):
     """A Semaphore implementation.
 
     A semaphore manages an internal counter which is decremented by each
@@ -344,12 +366,18 @@
     ValueError is raised.
     """
 
-    def __init__(self, value=1, *, loop=mixins._marker):
-        super().__init__(loop=loop)
+    def __init__(self, value=1, *, loop=None):
         if value < 0:
             raise ValueError("Semaphore initial value must be >= 0")
         self._value = value
         self._waiters = collections.deque()
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
 
     def __repr__(self):
         res = super().__repr__()
@@ -379,7 +407,7 @@
         True.
         """
         while self._value <= 0:
-            fut = self._get_loop().create_future()
+            fut = self._loop.create_future()
             self._waiters.append(fut)
             try:
                 await fut
@@ -408,7 +436,12 @@
     above the initial value.
     """
 
-    def __init__(self, value=1, *, loop=mixins._marker):
+    def __init__(self, value=1, *, loop=None):
+        if loop:
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
+
         self._bound_value = value
         super().__init__(value, loop=loop)
 
diff --git a/common/py3-stdlib/asyncio/mixins.py b/common/py3-stdlib/asyncio/mixins.py
deleted file mode 100644
index 650df05..0000000
--- a/common/py3-stdlib/asyncio/mixins.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""Event loop mixins."""
-
-import threading
-from . import events
-
-_global_lock = threading.Lock()
-
-# Used as a sentinel for loop parameter
-_marker = object()
-
-
-class _LoopBoundMixin:
-    _loop = None
-
-    def __init__(self, *, loop=_marker):
-        if loop is not _marker:
-            raise TypeError(
-                f'As of 3.10, the *loop* parameter was removed from '
-                f'{type(self).__name__}() since it is no longer necessary'
-            )
-
-    def _get_loop(self):
-        loop = events._get_running_loop()
-
-        if self._loop is None:
-            with _global_lock:
-                if self._loop is None:
-                    self._loop = loop
-        if loop is not self._loop:
-            raise RuntimeError(f'{self!r} is bound to a different event loop')
-        return loop
diff --git a/common/py3-stdlib/asyncio/proactor_events.py b/common/py3-stdlib/asyncio/proactor_events.py
index 411685b..b4cd414 100644
--- a/common/py3-stdlib/asyncio/proactor_events.py
+++ b/common/py3-stdlib/asyncio/proactor_events.py
@@ -158,7 +158,7 @@
             # end then it may fail with ERROR_NETNAME_DELETED if we
             # just close our end.  First calling shutdown() seems to
             # cure it, but maybe using DisconnectEx() would be better.
-            if hasattr(self._sock, 'shutdown') and self._sock.fileno() != -1:
+            if hasattr(self._sock, 'shutdown'):
                 self._sock.shutdown(socket.SHUT_RDWR)
             self._sock.close()
             self._sock = None
@@ -179,12 +179,11 @@
     """Transport for read pipes."""
 
     def __init__(self, loop, sock, protocol, waiter=None,
-                 extra=None, server=None, buffer_size=65536):
-        self._pending_data_length = -1
+                 extra=None, server=None):
+        self._pending_data = None
         self._paused = True
         super().__init__(loop, sock, protocol, waiter, extra, server)
 
-        self._data = bytearray(buffer_size)
         self._loop.call_soon(self._loop_reading)
         self._paused = False
 
@@ -218,12 +217,12 @@
         if self._read_fut is None:
             self._loop.call_soon(self._loop_reading, None)
 
-        length = self._pending_data_length
-        self._pending_data_length = -1
-        if length > -1:
+        data = self._pending_data
+        self._pending_data = None
+        if data is not None:
             # Call the protocol methode after calling _loop_reading(),
             # since the protocol can decide to pause reading again.
-            self._loop.call_soon(self._data_received, self._data[:length], length)
+            self._loop.call_soon(self._data_received, data)
 
         if self._loop.get_debug():
             logger.debug("%r resumes reading", self)
@@ -244,15 +243,15 @@
         if not keep_open:
             self.close()
 
-    def _data_received(self, data, length):
+    def _data_received(self, data):
         if self._paused:
             # Don't call any protocol method while reading is paused.
             # The protocol will be called on resume_reading().
-            assert self._pending_data_length == -1
-            self._pending_data_length = length
+            assert self._pending_data is None
+            self._pending_data = data
             return
 
-        if length == 0:
+        if not data:
             self._eof_received()
             return
 
@@ -270,7 +269,6 @@
             self._protocol.data_received(data)
 
     def _loop_reading(self, fut=None):
-        length = -1
         data = None
         try:
             if fut is not None:
@@ -279,18 +277,18 @@
                 self._read_fut = None
                 if fut.done():
                     # deliver data later in "finally" clause
-                    length = fut.result()
-                    if length == 0:
-                        # we got end-of-file so no need to reschedule a new read
-                        return
-
-                    data = self._data[:length]
+                    data = fut.result()
                 else:
                     # the future will be replaced by next proactor.recv call
                     fut.cancel()
 
             if self._closing:
                 # since close() has been called we ignore any read data
+                data = None
+                return
+
+            if data == b'':
+                # we got end-of-file so no need to reschedule a new read
                 return
 
             # bpo-33694: buffer_updated() has currently no fast path because of
@@ -298,7 +296,7 @@
 
             if not self._paused:
                 # reschedule a new read
-                self._read_fut = self._loop._proactor.recv_into(self._sock, self._data)
+                self._read_fut = self._loop._proactor.recv(self._sock, 32768)
         except ConnectionAbortedError as exc:
             if not self._closing:
                 self._fatal_error(exc, 'Fatal read error on pipe transport')
@@ -316,8 +314,8 @@
             if not self._paused:
                 self._read_fut.add_done_callback(self._loop_reading)
         finally:
-            if length > -1:
-                self._data_received(data, length)
+            if data is not None:
+                self._data_received(data)
 
 
 class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
@@ -452,8 +450,7 @@
             self.close()
 
 
-class _ProactorDatagramTransport(_ProactorBasePipeTransport,
-                                 transports.DatagramTransport):
+class _ProactorDatagramTransport(_ProactorBasePipeTransport):
     max_size = 256 * 1024
     def __init__(self, loop, sock, protocol, address=None,
                  waiter=None, extra=None):
diff --git a/common/py3-stdlib/asyncio/protocols.py b/common/py3-stdlib/asyncio/protocols.py
index 09987b1..69fa43e 100644
--- a/common/py3-stdlib/asyncio/protocols.py
+++ b/common/py3-stdlib/asyncio/protocols.py
@@ -109,6 +109,10 @@
 class BufferedProtocol(BaseProtocol):
     """Interface for stream protocol with manual buffer control.
 
+    Important: this has been added to asyncio in Python 3.7
+    *on a provisional basis*!  Consider it as an experimental API that
+    might be changed or removed in Python 3.8.
+
     Event methods, such as `create_server` and `create_connection`,
     accept factories that return protocols that implement this interface.
 
diff --git a/common/py3-stdlib/asyncio/queues.py b/common/py3-stdlib/asyncio/queues.py
index 10dd689..cd3f7c6 100644
--- a/common/py3-stdlib/asyncio/queues.py
+++ b/common/py3-stdlib/asyncio/queues.py
@@ -2,10 +2,10 @@
 
 import collections
 import heapq
-from types import GenericAlias
+import warnings
 
+from . import events
 from . import locks
-from . import mixins
 
 
 class QueueEmpty(Exception):
@@ -18,7 +18,7 @@
     pass
 
 
-class Queue(mixins._LoopBoundMixin):
+class Queue:
     """A queue, useful for coordinating producer and consumer coroutines.
 
     If maxsize is less than or equal to zero, the queue size is infinite. If it
@@ -30,8 +30,14 @@
     interrupted between calling qsize() and doing an operation on the Queue.
     """
 
-    def __init__(self, maxsize=0, *, loop=mixins._marker):
-        super().__init__(loop=loop)
+    def __init__(self, maxsize=0, *, loop=None):
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
         self._maxsize = maxsize
 
         # Futures.
@@ -39,7 +45,7 @@
         # Futures.
         self._putters = collections.deque()
         self._unfinished_tasks = 0
-        self._finished = locks.Event()
+        self._finished = locks.Event(loop=loop)
         self._finished.set()
         self._init(maxsize)
 
@@ -70,7 +76,8 @@
     def __str__(self):
         return f'<{type(self).__name__} {self._format()}>'
 
-    __class_getitem__ = classmethod(GenericAlias)
+    def __class_getitem__(cls, type):
+        return cls
 
     def _format(self):
         result = f'maxsize={self._maxsize!r}'
@@ -115,7 +122,7 @@
         slot is available before adding item.
         """
         while self.full():
-            putter = self._get_loop().create_future()
+            putter = self._loop.create_future()
             self._putters.append(putter)
             try:
                 await putter
@@ -153,7 +160,7 @@
         If queue is empty, wait until an item is available.
         """
         while self.empty():
-            getter = self._get_loop().create_future()
+            getter = self._loop.create_future()
             self._getters.append(getter)
             try:
                 await getter
diff --git a/common/py3-stdlib/asyncio/runners.py b/common/py3-stdlib/asyncio/runners.py
index 9a5e9a4..268635d 100644
--- a/common/py3-stdlib/asyncio/runners.py
+++ b/common/py3-stdlib/asyncio/runners.py
@@ -60,7 +60,8 @@
     for task in to_cancel:
         task.cancel()
 
-    loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True))
+    loop.run_until_complete(
+        tasks.gather(*to_cancel, loop=loop, return_exceptions=True))
 
     for task in to_cancel:
         if task.cancelled():
diff --git a/common/py3-stdlib/asyncio/selector_events.py b/common/py3-stdlib/asyncio/selector_events.py
index 71080b8..59cb6b1 100644
--- a/common/py3-stdlib/asyncio/selector_events.py
+++ b/common/py3-stdlib/asyncio/selector_events.py
@@ -40,6 +40,11 @@
         return bool(key.events & event)
 
 
+def _check_ssl_socket(sock):
+    if ssl is not None and isinstance(sock, ssl.SSLSocket):
+        raise TypeError("Socket cannot be of type SSLSocket")
+
+
 class BaseSelectorEventLoop(base_events.BaseEventLoop):
     """Selector event loop.
 
@@ -352,7 +357,7 @@
         The maximum amount of data to be received at once is specified by
         nbytes.
         """
-        base_events._check_ssl_socket(sock)
+        _check_ssl_socket(sock)
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
         try:
@@ -393,7 +398,7 @@
         The received data is written into *buf* (a writable buffer).
         The return value is the number of bytes written.
         """
-        base_events._check_ssl_socket(sock)
+        _check_ssl_socket(sock)
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
         try:
@@ -434,7 +439,7 @@
         raised, and there is no way to determine how much data, if any, was
         successfully processed by the receiving end of the connection.
         """
-        base_events._check_ssl_socket(sock)
+        _check_ssl_socket(sock)
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
         try:
@@ -483,15 +488,13 @@
 
         This method is a coroutine.
         """
-        base_events._check_ssl_socket(sock)
+        _check_ssl_socket(sock)
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
 
         if not hasattr(socket, 'AF_UNIX') or sock.family != socket.AF_UNIX:
             resolved = await self._ensure_resolved(
-                address, family=sock.family, type=sock.type, proto=sock.proto,
-                loop=self,
-            )
+                address, family=sock.family, proto=sock.proto, loop=self)
             _, _, _, _, address = resolved[0]
 
         fut = self.create_future()
@@ -550,7 +553,7 @@
         object usable to send and receive data on the connection, and address
         is the address bound to the socket on the other end of the connection.
         """
-        base_events._check_ssl_socket(sock)
+        _check_ssl_socket(sock)
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
         fut = self.create_future()
diff --git a/common/py3-stdlib/asyncio/sslproto.py b/common/py3-stdlib/asyncio/sslproto.py
index 00fc16c..cad25b2 100644
--- a/common/py3-stdlib/asyncio/sslproto.py
+++ b/common/py3-stdlib/asyncio/sslproto.py
@@ -367,12 +367,6 @@
         """Return the current size of the write buffer."""
         return self._ssl_protocol._transport.get_write_buffer_size()
 
-    def get_write_buffer_limits(self):
-        """Get the high and low watermarks for write flow control. 
-        Return a tuple (low, high) where low and high are 
-        positive number of bytes."""
-        return self._ssl_protocol._transport.get_write_buffer_limits()
-
     @property
     def _protocol_paused(self):
         # Required for sendfile fallback pause_writing/resume_writing logic
diff --git a/common/py3-stdlib/asyncio/streams.py b/common/py3-stdlib/asyncio/streams.py
index 080d8a6..3c80bb8 100644
--- a/common/py3-stdlib/asyncio/streams.py
+++ b/common/py3-stdlib/asyncio/streams.py
@@ -23,7 +23,7 @@
 
 
 async def open_connection(host=None, port=None, *,
-                          limit=_DEFAULT_LIMIT, **kwds):
+                          loop=None, limit=_DEFAULT_LIMIT, **kwds):
     """A wrapper for create_connection() returning a (reader, writer) pair.
 
     The reader returned is a StreamReader instance; the writer is a
@@ -41,7 +41,12 @@
     StreamReaderProtocol classes, just copy the code -- there's
     really nothing special here except some convenience.)
     """
-    loop = events.get_running_loop()
+    if loop is None:
+        loop = events.get_event_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
     reader = StreamReader(limit=limit, loop=loop)
     protocol = StreamReaderProtocol(reader, loop=loop)
     transport, _ = await loop.create_connection(
@@ -51,7 +56,7 @@
 
 
 async def start_server(client_connected_cb, host=None, port=None, *,
-                       limit=_DEFAULT_LIMIT, **kwds):
+                       loop=None, limit=_DEFAULT_LIMIT, **kwds):
     """Start a socket server, call back for each client connected.
 
     The first parameter, `client_connected_cb`, takes two parameters:
@@ -73,7 +78,12 @@
     The return value is the same as loop.create_server(), i.e. a
     Server object which can be used to stop the service.
     """
-    loop = events.get_running_loop()
+    if loop is None:
+        loop = events.get_event_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
 
     def factory():
         reader = StreamReader(limit=limit, loop=loop)
@@ -88,10 +98,14 @@
     # UNIX Domain Sockets are supported on this platform
 
     async def open_unix_connection(path=None, *,
-                                   limit=_DEFAULT_LIMIT, **kwds):
+                                   loop=None, limit=_DEFAULT_LIMIT, **kwds):
         """Similar to `open_connection` but works with UNIX Domain Sockets."""
-        loop = events.get_running_loop()
-
+        if loop is None:
+            loop = events.get_event_loop()
+        else:
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
         reader = StreamReader(limit=limit, loop=loop)
         protocol = StreamReaderProtocol(reader, loop=loop)
         transport, _ = await loop.create_unix_connection(
@@ -100,9 +114,14 @@
         return reader, writer
 
     async def start_unix_server(client_connected_cb, path=None, *,
-                                limit=_DEFAULT_LIMIT, **kwds):
+                                loop=None, limit=_DEFAULT_LIMIT, **kwds):
         """Similar to `start_server` but works with UNIX Domain Sockets."""
-        loop = events.get_running_loop()
+        if loop is None:
+            loop = events.get_event_loop()
+        else:
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
 
         def factory():
             reader = StreamReader(limit=limit, loop=loop)
@@ -125,7 +144,7 @@
 
     def __init__(self, loop=None):
         if loop is None:
-            self._loop = events._get_event_loop(stacklevel=4)
+            self._loop = events.get_event_loop()
         else:
             self._loop = loop
         self._paused = False
@@ -283,13 +302,9 @@
     def __del__(self):
         # Prevent reports about unhandled exceptions.
         # Better than self._closed._log_traceback = False hack
-        try:
-            closed = self._closed
-        except AttributeError:
-            pass  # failed constructor
-        else:
-            if closed.done() and not closed.cancelled():
-                closed.exception()
+        closed = self._closed
+        if closed.done() and not closed.cancelled():
+            closed.exception()
 
 
 class StreamWriter:
@@ -385,7 +400,7 @@
 
         self._limit = limit
         if loop is None:
-            self._loop = events._get_event_loop()
+            self._loop = events.get_event_loop()
         else:
             self._loop = loop
         self._buffer = bytearray()
diff --git a/common/py3-stdlib/asyncio/subprocess.py b/common/py3-stdlib/asyncio/subprocess.py
index cd10231..c9506b1 100644
--- a/common/py3-stdlib/asyncio/subprocess.py
+++ b/common/py3-stdlib/asyncio/subprocess.py
@@ -1,6 +1,7 @@
 __all__ = 'create_subprocess_exec', 'create_subprocess_shell'
 
 import subprocess
+import warnings
 
 from . import events
 from . import protocols
@@ -192,14 +193,24 @@
             stderr = self._read_stream(2)
         else:
             stderr = self._noop()
-        stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr)
+        stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr,
+                                                   loop=self._loop)
         await self.wait()
         return (stdout, stderr)
 
 
 async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
-                                  limit=streams._DEFAULT_LIMIT, **kwds):
-    loop = events.get_running_loop()
+                                  loop=None, limit=streams._DEFAULT_LIMIT,
+                                  **kwds):
+    if loop is None:
+        loop = events.get_event_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8 "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning,
+                      stacklevel=2
+        )
+
     protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
                                                         loop=loop)
     transport, protocol = await loop.subprocess_shell(
@@ -210,9 +221,16 @@
 
 
 async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
-                                 stderr=None, limit=streams._DEFAULT_LIMIT,
-                                 **kwds):
-    loop = events.get_running_loop()
+                                 stderr=None, loop=None,
+                                 limit=streams._DEFAULT_LIMIT, **kwds):
+    if loop is None:
+        loop = events.get_event_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8 "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning,
+                      stacklevel=2
+        )
     protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
                                                         loop=loop)
     transport, protocol = await loop.subprocess_exec(
diff --git a/common/py3-stdlib/asyncio/tasks.py b/common/py3-stdlib/asyncio/tasks.py
index c4bedb5..f486b67 100644
--- a/common/py3-stdlib/asyncio/tasks.py
+++ b/common/py3-stdlib/asyncio/tasks.py
@@ -17,7 +17,6 @@
 import types
 import warnings
 import weakref
-from types import GenericAlias
 
 from . import base_tasks
 from . import coroutines
@@ -62,6 +61,30 @@
             if futures._get_loop(t) is loop and not t.done()}
 
 
+def _all_tasks_compat(loop=None):
+    # Different from "all_task()" by returning *all* Tasks, including
+    # the completed ones.  Used to implement deprecated "Tasks.all_task()"
+    # method.
+    if loop is None:
+        loop = events.get_event_loop()
+    # Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another
+    # thread while we do so. Therefore we cast it to list prior to filtering. The list
+    # cast itself requires iteration, so we repeat it several times ignoring
+    # RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for
+    # details.
+    i = 0
+    while True:
+        try:
+            tasks = list(_all_tasks)
+        except RuntimeError:
+            i += 1
+            if i >= 1000:
+                raise
+        else:
+            break
+    return {t for t in tasks if futures._get_loop(t) is loop}
+
+
 def _set_task_name(task, name):
     if name is not None:
         try:
@@ -124,7 +147,8 @@
             self._loop.call_exception_handler(context)
         super().__del__()
 
-    __class_getitem__ = classmethod(GenericAlias)
+    def __class_getitem__(cls, type):
+        return cls
 
     def _repr_info(self):
         return base_tasks._task_repr_info(self)
@@ -346,7 +370,7 @@
 ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
 
 
-async def wait(fs, *, timeout=None, return_when=ALL_COMPLETED):
+async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
     """Wait for the Futures and coroutines given by fs to complete.
 
     The fs iterable must not be empty.
@@ -369,7 +393,12 @@
     if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
         raise ValueError(f'Invalid return_when value: {return_when}')
 
-    loop = events.get_running_loop()
+    if loop is None:
+        loop = events.get_running_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
 
     fs = set(fs)
 
@@ -389,7 +418,7 @@
         waiter.set_result(None)
 
 
-async def wait_for(fut, timeout):
+async def wait_for(fut, timeout, *, loop=None):
     """Wait for the single Future or coroutine to complete, with timeout.
 
     Coroutine will be wrapped in Task.
@@ -402,7 +431,12 @@
 
     This function is a coroutine.
     """
-    loop = events.get_running_loop()
+    if loop is None:
+        loop = events.get_running_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
 
     if timeout is None:
         return await fut
@@ -415,9 +449,11 @@
 
         await _cancel_and_wait(fut, loop=loop)
         try:
-            return fut.result()
+            fut.result()
         except exceptions.CancelledError as exc:
             raise exceptions.TimeoutError() from exc
+        else:
+            raise exceptions.TimeoutError()
 
     waiter = loop.create_future()
     timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
@@ -435,10 +471,7 @@
                 return fut.result()
             else:
                 fut.remove_done_callback(cb)
-                # We must ensure that the task is not running
-                # after wait_for() returns.
-                # See https://bugs.python.org/issue32751
-                await _cancel_and_wait(fut, loop=loop)
+                fut.cancel()
                 raise
 
         if fut.done():
@@ -453,9 +486,11 @@
             # exception, we should re-raise it
             # See https://bugs.python.org/issue40607
             try:
-                return fut.result()
+                fut.result()
             except exceptions.CancelledError as exc:
                 raise exceptions.TimeoutError() from exc
+            else:
+                raise exceptions.TimeoutError()
     finally:
         timeout_handle.cancel()
 
@@ -521,7 +556,7 @@
 
 
 # This is *not* a @coroutine!  It is just an iterator (yielding Futures).
-def as_completed(fs, *, timeout=None):
+def as_completed(fs, *, loop=None, timeout=None):
     """Return an iterator whose values are coroutines.
 
     When waiting for the yielded coroutines you'll get the results (or
@@ -543,9 +578,14 @@
         raise TypeError(f"expect an iterable of futures, not {type(fs).__name__}")
 
     from .queues import Queue  # Import here to avoid circular import problem.
-    done = Queue()
+    done = Queue(loop=loop)
 
-    loop = events._get_event_loop()
+    if loop is None:
+        loop = events.get_event_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
     todo = {ensure_future(f, loop=loop) for f in set(fs)}
     timeout_handle = None
 
@@ -590,13 +630,19 @@
     yield
 
 
-async def sleep(delay, result=None):
+async def sleep(delay, result=None, *, loop=None):
     """Coroutine that completes after a given time (in seconds)."""
     if delay <= 0:
         await __sleep0()
         return result
 
-    loop = events.get_running_loop()
+    if loop is None:
+        loop = events.get_running_loop()
+    else:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
+
     future = loop.create_future()
     h = loop.call_later(delay,
                         futures._set_result_unless_cancelled,
@@ -612,32 +658,23 @@
 
     If the argument is a Future, it is returned directly.
     """
-    return _ensure_future(coro_or_future, loop=loop)
-
-
-def _ensure_future(coro_or_future, *, loop=None):
-    if futures.isfuture(coro_or_future):
+    if coroutines.iscoroutine(coro_or_future):
+        if loop is None:
+            loop = events.get_event_loop()
+        task = loop.create_task(coro_or_future)
+        if task._source_traceback:
+            del task._source_traceback[-1]
+        return task
+    elif futures.isfuture(coro_or_future):
         if loop is not None and loop is not futures._get_loop(coro_or_future):
             raise ValueError('The future belongs to a different loop than '
-                            'the one specified as the loop argument')
+                             'the one specified as the loop argument')
         return coro_or_future
-    called_wrap_awaitable = False
-    if not coroutines.iscoroutine(coro_or_future):
-        if inspect.isawaitable(coro_or_future):
-            coro_or_future = _wrap_awaitable(coro_or_future)
-            called_wrap_awaitable = True
-        else:
-            raise TypeError('An asyncio.Future, a coroutine or an awaitable '
-                            'is required')
-
-    if loop is None:
-        loop = events._get_event_loop(stacklevel=4)
-    try:
-        return loop.create_task(coro_or_future)
-    except RuntimeError: 
-        if not called_wrap_awaitable:
-            coro_or_future.close()
-        raise
+    elif inspect.isawaitable(coro_or_future):
+        return ensure_future(_wrap_awaitable(coro_or_future), loop=loop)
+    else:
+        raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
+                        'required')
 
 
 @types.coroutine
@@ -660,8 +697,7 @@
     cancelled.
     """
 
-    def __init__(self, children, *, loop):
-        assert loop is not None
+    def __init__(self, children, *, loop=None):
         super().__init__(loop=loop)
         self._children = children
         self._cancel_requested = False
@@ -681,7 +717,7 @@
         return ret
 
 
-def gather(*coros_or_futures, return_exceptions=False):
+def gather(*coros_or_futures, loop=None, return_exceptions=False):
     """Return a future aggregating results from the given coroutines/futures.
 
     Coroutines will be wrapped in a future and scheduled in the event
@@ -712,7 +748,12 @@
     gather won't cancel any other awaitables.
     """
     if not coros_or_futures:
-        loop = events._get_event_loop()
+        if loop is None:
+            loop = events.get_event_loop()
+        else:
+            warnings.warn("The loop argument is deprecated since Python 3.8, "
+                          "and scheduled for removal in Python 3.10.",
+                          DeprecationWarning, stacklevel=2)
         outer = loop.create_future()
         outer.set_result([])
         return outer
@@ -721,7 +762,7 @@
         nonlocal nfinished
         nfinished += 1
 
-        if outer is None or outer.done():
+        if outer.done():
             if not fut.cancelled():
                 # Mark exception retrieved.
                 fut.exception()
@@ -776,11 +817,9 @@
     children = []
     nfuts = 0
     nfinished = 0
-    loop = None
-    outer = None  # bpo-46672
     for arg in coros_or_futures:
         if arg not in arg_to_fut:
-            fut = _ensure_future(arg, loop=loop)
+            fut = ensure_future(arg, loop=loop)
             if loop is None:
                 loop = futures._get_loop(fut)
             if fut is not arg:
@@ -804,7 +843,7 @@
     return outer
 
 
-def shield(arg):
+def shield(arg, *, loop=None):
     """Wait for a future, shielding it from cancellation.
 
     The statement
@@ -830,7 +869,11 @@
         except CancelledError:
             res = None
     """
-    inner = _ensure_future(arg)
+    if loop is not None:
+        warnings.warn("The loop argument is deprecated since Python 3.8, "
+                      "and scheduled for removal in Python 3.10.",
+                      DeprecationWarning, stacklevel=2)
+    inner = ensure_future(arg, loop=loop)
     if inner.done():
         # Shortcut.
         return inner
diff --git a/common/py3-stdlib/asyncio/threads.py b/common/py3-stdlib/asyncio/threads.py
index db048a8..34b7513 100644
--- a/common/py3-stdlib/asyncio/threads.py
+++ b/common/py3-stdlib/asyncio/threads.py
@@ -13,7 +13,7 @@
     """Asynchronously run function *func* in a separate thread.
 
     Any *args and **kwargs supplied for this function are directly passed
-    to *func*. Also, the current :class:`contextvars.Context` is propagated,
+    to *func*. Also, the current :class:`contextvars.Context` is propogated,
     allowing context variables from the main thread to be accessed in the
     separate thread.
 
diff --git a/common/py3-stdlib/asyncio/transports.py b/common/py3-stdlib/asyncio/transports.py
index 73b1fa2..45e155c 100644
--- a/common/py3-stdlib/asyncio/transports.py
+++ b/common/py3-stdlib/asyncio/transports.py
@@ -99,12 +99,6 @@
         """Return the current size of the write buffer."""
         raise NotImplementedError
 
-    def get_write_buffer_limits(self):
-        """Get the high and low watermarks for write flow control. 
-        Return a tuple (low, high) where low and high are 
-        positive number of bytes."""
-        raise NotImplementedError
-
     def write(self, data):
         """Write some data bytes to the transport.
 
diff --git a/common/py3-stdlib/asyncio/unix_events.py b/common/py3-stdlib/asyncio/unix_events.py
index c88b818..f34a5b4 100644
--- a/common/py3-stdlib/asyncio/unix_events.py
+++ b/common/py3-stdlib/asyncio/unix_events.py
@@ -44,16 +44,6 @@
     pass
 
 
-def waitstatus_to_exitcode(status):
-    try:
-        return os.waitstatus_to_exitcode(status)
-    except ValueError:
-        # The child exited, but we don't understand its status.
-        # This shouldn't happen, but if it does, let's just
-        # return that status; perhaps that helps debug it.
-        return status
-
-
 class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
     """Unix event loop.
 
@@ -333,7 +323,7 @@
             server._start_serving()
             # Skip one loop iteration so that all 'loop.add_reader'
             # go through.
-            await tasks.sleep(0)
+            await tasks.sleep(0, loop=self)
 
         return server
 
@@ -951,7 +941,7 @@
                 " will report returncode 255",
                 pid)
         else:
-            returncode = waitstatus_to_exitcode(status)
+            returncode = _compute_returncode(status)
 
         os.close(pidfd)
         callback(pid, returncode, *args)
@@ -966,6 +956,20 @@
         return True
 
 
+def _compute_returncode(status):
+    if os.WIFSIGNALED(status):
+        # The child process died because of a signal.
+        return -os.WTERMSIG(status)
+    elif os.WIFEXITED(status):
+        # The child process exited (e.g sys.exit()).
+        return os.WEXITSTATUS(status)
+    else:
+        # The child exited, but we don't understand its status.
+        # This shouldn't happen, but if it does, let's just
+        # return that status; perhaps that helps debug it.
+        return status
+
+
 class BaseChildWatcher(AbstractChildWatcher):
 
     def __init__(self):
@@ -1076,7 +1080,7 @@
                 # The child process is still alive.
                 return
 
-            returncode = waitstatus_to_exitcode(status)
+            returncode = _compute_returncode(status)
             if self._loop.get_debug():
                 logger.debug('process %s exited with returncode %s',
                              expected_pid, returncode)
@@ -1169,7 +1173,7 @@
                     # A child process is still alive.
                     return
 
-                returncode = waitstatus_to_exitcode(status)
+                returncode = _compute_returncode(status)
 
             with self._lock:
                 try:
@@ -1226,15 +1230,13 @@
 
     def close(self):
         self._callbacks.clear()
-        if self._saved_sighandler is None:
-            return
-
-        handler = signal.getsignal(signal.SIGCHLD)
-        if handler != self._sig_chld:
-            logger.warning("SIGCHLD handler was changed by outside code")
-        else:
-            signal.signal(signal.SIGCHLD, self._saved_sighandler)
-        self._saved_sighandler = None
+        if self._saved_sighandler is not None:
+            handler = signal.getsignal(signal.SIGCHLD)
+            if handler != self._sig_chld:
+                logger.warning("SIGCHLD handler was changed by outside code")
+            else:
+                signal.signal(signal.SIGCHLD, self._saved_sighandler)
+            self._saved_sighandler = None
 
     def __enter__(self):
         return self
@@ -1261,17 +1263,15 @@
         # The reason to do it here is that attach_loop() is called from
         # unix policy only for the main thread.
         # Main thread is required for subscription on SIGCHLD signal
-        if self._saved_sighandler is not None:
-            return
-
-        self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld)
         if self._saved_sighandler is None:
-            logger.warning("Previous SIGCHLD handler was set by non-Python code, "
-                           "restore to default handler on watcher close.")
-            self._saved_sighandler = signal.SIG_DFL
+            self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld)
+            if self._saved_sighandler is None:
+                logger.warning("Previous SIGCHLD handler was set by non-Python code, "
+                               "restore to default handler on watcher close.")
+                self._saved_sighandler = signal.SIG_DFL
 
-        # Set SA_RESTART to limit EINTR occurrences.
-        signal.siginterrupt(signal.SIGCHLD, False)
+            # Set SA_RESTART to limit EINTR occurrences.
+            signal.siginterrupt(signal.SIGCHLD, False)
 
     def _do_waitpid_all(self):
         for pid in list(self._callbacks):
@@ -1296,7 +1296,7 @@
                 # The child process is still alive.
                 return
 
-            returncode = waitstatus_to_exitcode(status)
+            returncode = _compute_returncode(status)
             debug_log = True
         try:
             loop, callback, args = self._callbacks.pop(pid)
@@ -1379,7 +1379,7 @@
     def remove_child_handler(self, pid):
         # asyncio never calls remove_child_handler() !!!
         # The method is no-op but is implemented because
-        # abstract base classes require it.
+        # abstract base classe requires it
         return True
 
     def attach_loop(self, loop):
@@ -1399,7 +1399,7 @@
                 "Unknown child process pid %d, will report returncode 255",
                 pid)
         else:
-            returncode = waitstatus_to_exitcode(status)
+            returncode = _compute_returncode(status)
             if loop.get_debug():
                 logger.debug('process %s exited with returncode %s',
                              expected_pid, returncode)
diff --git a/common/py3-stdlib/asyncio/windows_events.py b/common/py3-stdlib/asyncio/windows_events.py
index da81ab4..5e7cd79 100644
--- a/common/py3-stdlib/asyncio/windows_events.py
+++ b/common/py3-stdlib/asyncio/windows_events.py
@@ -1,10 +1,5 @@
 """Selector and proactor event loops for Windows."""
 
-import sys
-
-if sys.platform != 'win32':  # pragma: no cover
-    raise ImportError('win32 only')
-
 import _overlapped
 import _winapi
 import errno
diff --git a/common/py3-stdlib/asyncore.py b/common/py3-stdlib/asyncore.py
index b1eea4b..ce16f11 100644
--- a/common/py3-stdlib/asyncore.py
+++ b/common/py3-stdlib/asyncore.py
@@ -57,13 +57,6 @@
      ENOTCONN, ESHUTDOWN, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
      errorcode
 
-warnings.warn(
-    'The asyncore module is deprecated. '
-    'The recommended replacement is asyncio',
-    DeprecationWarning,
-    stacklevel=2)
-
-
 _DISCONNECTED = frozenset({ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
                            EBADF})
 
@@ -120,7 +113,7 @@
         if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
             obj.handle_close()
     except OSError as e:
-        if e.errno not in _DISCONNECTED:
+        if e.args[0] not in _DISCONNECTED:
             obj.handle_error()
         else:
             obj.handle_close()
@@ -243,7 +236,7 @@
             try:
                 self.addr = sock.getpeername()
             except OSError as err:
-                if err.errno in (ENOTCONN, EINVAL):
+                if err.args[0] in (ENOTCONN, EINVAL):
                     # To handle the case where we got an unconnected
                     # socket.
                     self.connected = False
@@ -353,7 +346,7 @@
         except TypeError:
             return None
         except OSError as why:
-            if why.errno in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
+            if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
                 return None
             else:
                 raise
@@ -365,9 +358,9 @@
             result = self.socket.send(data)
             return result
         except OSError as why:
-            if why.errno == EWOULDBLOCK:
+            if why.args[0] == EWOULDBLOCK:
                 return 0
-            elif why.errno in _DISCONNECTED:
+            elif why.args[0] in _DISCONNECTED:
                 self.handle_close()
                 return 0
             else:
@@ -385,7 +378,7 @@
                 return data
         except OSError as why:
             # winsock sometimes raises ENOTCONN
-            if why.errno in _DISCONNECTED:
+            if why.args[0] in _DISCONNECTED:
                 self.handle_close()
                 return b''
             else:
@@ -400,7 +393,7 @@
             try:
                 self.socket.close()
             except OSError as why:
-                if why.errno not in (ENOTCONN, EBADF):
+                if why.args[0] not in (ENOTCONN, EBADF):
                     raise
 
     # log and log_info may be overridden to provide more sophisticated
@@ -564,7 +557,7 @@
         try:
             x.close()
         except OSError as x:
-            if x.errno == EBADF:
+            if x.args[0] == EBADF:
                 pass
             elif not ignore_all:
                 raise
diff --git a/common/py3-stdlib/base64.py b/common/py3-stdlib/base64.py
index e1256ad..a28109f 100755
--- a/common/py3-stdlib/base64.py
+++ b/common/py3-stdlib/base64.py
@@ -16,7 +16,7 @@
     'encode', 'decode', 'encodebytes', 'decodebytes',
     # Generalized interface for other encodings
     'b64encode', 'b64decode', 'b32encode', 'b32decode',
-    'b32hexencode', 'b32hexdecode', 'b16encode', 'b16decode',
+    'b16encode', 'b16decode',
     # Base85 and Ascii85 encodings
     'b85encode', 'b85decode', 'a85encode', 'a85decode',
     # Standard Base64 encoding
@@ -135,40 +135,19 @@
 
 
 # Base32 encoding/decoding must be done in Python
-_B32_ENCODE_DOCSTRING = '''
-Encode the bytes-like objects using {encoding} and return a bytes object.
-'''
-_B32_DECODE_DOCSTRING = '''
-Decode the {encoding} encoded bytes-like object or ASCII string s.
-
-Optional casefold is a flag specifying whether a lowercase alphabet is
-acceptable as input.  For security purposes, the default is False.
-{extra_args}
-The result is returned as a bytes object.  A binascii.Error is raised if
-the input is incorrectly padded or if there are non-alphabet
-characters present in the input.
-'''
-_B32_DECODE_MAP01_DOCSTRING = '''
-RFC 3548 allows for optional mapping of the digit 0 (zero) to the
-letter O (oh), and for optional mapping of the digit 1 (one) to
-either the letter I (eye) or letter L (el).  The optional argument
-map01 when not None, specifies which letter the digit 1 should be
-mapped to (when map01 is not None, the digit 0 is always mapped to
-the letter O).  For security purposes the default is None, so that
-0 and 1 are not allowed in the input.
-'''
 _b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
-_b32hexalphabet = b'0123456789ABCDEFGHIJKLMNOPQRSTUV'
-_b32tab2 = {}
-_b32rev = {}
+_b32tab2 = None
+_b32rev = None
 
-def _b32encode(alphabet, s):
+def b32encode(s):
+    """Encode the bytes-like object s using Base32 and return a bytes object.
+    """
     global _b32tab2
     # Delay the initialization of the table to not waste memory
     # if the function is never called
-    if alphabet not in _b32tab2:
-        b32tab = [bytes((i,)) for i in alphabet]
-        _b32tab2[alphabet] = [a + b for a in b32tab for b in b32tab]
+    if _b32tab2 is None:
+        b32tab = [bytes((i,)) for i in _b32alphabet]
+        _b32tab2 = [a + b for a in b32tab for b in b32tab]
         b32tab = None
 
     if not isinstance(s, bytes_types):
@@ -179,7 +158,7 @@
         s = s + b'\0' * (5 - leftover)  # Don't use += !
     encoded = bytearray()
     from_bytes = int.from_bytes
-    b32tab2 = _b32tab2[alphabet]
+    b32tab2 = _b32tab2
     for i in range(0, len(s), 5):
         c = from_bytes(s[i: i + 5], 'big')
         encoded += (b32tab2[c >> 30] +           # bits 1 - 10
@@ -198,12 +177,29 @@
         encoded[-1:] = b'='
     return bytes(encoded)
 
-def _b32decode(alphabet, s, casefold=False, map01=None):
+def b32decode(s, casefold=False, map01=None):
+    """Decode the Base32 encoded bytes-like object or ASCII string s.
+
+    Optional casefold is a flag specifying whether a lowercase alphabet is
+    acceptable as input.  For security purposes, the default is False.
+
+    RFC 3548 allows for optional mapping of the digit 0 (zero) to the
+    letter O (oh), and for optional mapping of the digit 1 (one) to
+    either the letter I (eye) or letter L (el).  The optional argument
+    map01 when not None, specifies which letter the digit 1 should be
+    mapped to (when map01 is not None, the digit 0 is always mapped to
+    the letter O).  For security purposes the default is None, so that
+    0 and 1 are not allowed in the input.
+
+    The result is returned as a bytes object.  A binascii.Error is raised if
+    the input is incorrectly padded or if there are non-alphabet
+    characters present in the input.
+    """
     global _b32rev
     # Delay the initialization of the table to not waste memory
     # if the function is never called
-    if alphabet not in _b32rev:
-        _b32rev[alphabet] = {v: k for k, v in enumerate(alphabet)}
+    if _b32rev is None:
+        _b32rev = {v: k for k, v in enumerate(_b32alphabet)}
     s = _bytes_from_decode_data(s)
     if len(s) % 8:
         raise binascii.Error('Incorrect padding')
@@ -224,7 +220,7 @@
     padchars = l - len(s)
     # Now decode the full quanta
     decoded = bytearray()
-    b32rev = _b32rev[alphabet]
+    b32rev = _b32rev
     for i in range(0, len(s), 8):
         quanta = s[i: i + 8]
         acc = 0
@@ -245,26 +241,6 @@
     return bytes(decoded)
 
 
-def b32encode(s):
-    return _b32encode(_b32alphabet, s)
-b32encode.__doc__ = _B32_ENCODE_DOCSTRING.format(encoding='base32')
-
-def b32decode(s, casefold=False, map01=None):
-    return _b32decode(_b32alphabet, s, casefold, map01)
-b32decode.__doc__ = _B32_DECODE_DOCSTRING.format(encoding='base32',
-                                        extra_args=_B32_DECODE_MAP01_DOCSTRING)
-
-def b32hexencode(s):
-    return _b32encode(_b32hexalphabet, s)
-b32hexencode.__doc__ = _B32_ENCODE_DOCSTRING.format(encoding='base32hex')
-
-def b32hexdecode(s, casefold=False):
-    # base32hex does not have the 01 mapping
-    return _b32decode(_b32hexalphabet, s, casefold)
-b32hexdecode.__doc__ = _B32_DECODE_DOCSTRING.format(encoding='base32hex',
-                                                    extra_args='')
-
-
 # RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
 # lowercase.  The RFC also recommends against accepting input case
 # insensitively.
@@ -344,7 +320,7 @@
     global _a85chars, _a85chars2
     # Delay the initialization of tables to not waste memory
     # if the function is never called
-    if _a85chars2 is None:
+    if _a85chars is None:
         _a85chars = [bytes((i,)) for i in range(33, 118)]
         _a85chars2 = [(a + b) for a in _a85chars for b in _a85chars]
 
@@ -452,7 +428,7 @@
     global _b85chars, _b85chars2
     # Delay the initialization of tables to not waste memory
     # if the function is never called
-    if _b85chars2 is None:
+    if _b85chars is None:
         _b85chars = [bytes((i,)) for i in _b85alphabet]
         _b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
     return _85encode(b, _b85chars, _b85chars2, pad)
diff --git a/common/py3-stdlib/bdb.py b/common/py3-stdlib/bdb.py
index 75d6113..b18a061 100644
--- a/common/py3-stdlib/bdb.py
+++ b/common/py3-stdlib/bdb.py
@@ -34,8 +34,6 @@
         self.fncache = {}
         self.frame_returning = None
 
-        self._load_breaks()
-
     def canonic(self, filename):
         """Return canonical form of filename.
 
@@ -119,7 +117,7 @@
         """Invoke user function and return trace function for call event.
 
         If the debugger stops on this function call, invoke
-        self.user_call(). Raise BdbQuit if self.quitting is set.
+        self.user_call(). Raise BbdQuit if self.quitting is set.
         Return self.trace_dispatch to continue tracing in this scope.
         """
         # XXX 'arg' is no longer used
@@ -367,12 +365,6 @@
     # Call self.get_*break*() to see the breakpoints or better
     # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
 
-    def _add_to_breaks(self, filename, lineno):
-        """Add breakpoint to breaks, if not already there."""
-        bp_linenos = self.breaks.setdefault(filename, [])
-        if lineno not in bp_linenos:
-            bp_linenos.append(lineno)
-
     def set_break(self, filename, lineno, temporary=False, cond=None,
                   funcname=None):
         """Set a new breakpoint for filename:lineno.
@@ -385,21 +377,12 @@
         line = linecache.getline(filename, lineno)
         if not line:
             return 'Line %s:%d does not exist' % (filename, lineno)
-        self._add_to_breaks(filename, lineno)
+        list = self.breaks.setdefault(filename, [])
+        if lineno not in list:
+            list.append(lineno)
         bp = Breakpoint(filename, lineno, temporary, cond, funcname)
         return None
 
-    def _load_breaks(self):
-        """Apply all breakpoints (set in other instances) to this one.
-
-        Populates this instance's breaks list from the Breakpoint class's
-        list, which can have breakpoints set by another Bdb instance. This
-        is necessary for interactive sessions to keep the breakpoints
-        active across multiple calls to run().
-        """
-        for (filename, lineno) in Breakpoint.bplist.keys():
-            self._add_to_breaks(filename, lineno)
-
     def _prune_breaks(self, filename, lineno):
         """Prune breakpoints for filename:lineno.
 
@@ -698,12 +681,6 @@
         else:
             self.bplist[file, line] = [self]
 
-    @staticmethod
-    def clearBreakpoints():
-        Breakpoint.next = 1
-        Breakpoint.bplist = {}
-        Breakpoint.bpbynumber = [None]
-
     def deleteMe(self):
         """Delete the breakpoint from the list associated to a file:line.
 
diff --git a/common/py3-stdlib/bisect.py b/common/py3-stdlib/bisect.py
index d37da74..8f3f6a3 100644
--- a/common/py3-stdlib/bisect.py
+++ b/common/py3-stdlib/bisect.py
@@ -1,7 +1,6 @@
 """Bisection algorithms."""
 
-
-def insort_right(a, x, lo=0, hi=None, *, key=None):
+def insort_right(a, x, lo=0, hi=None):
     """Insert item x in list a, and keep it sorted assuming a is sorted.
 
     If x is already in a, insert it to the right of the rightmost x.
@@ -9,18 +8,15 @@
     Optional args lo (default 0) and hi (default len(a)) bound the
     slice of a to be searched.
     """
-    if key is None:
-        lo = bisect_right(a, x, lo, hi)
-    else:
-        lo = bisect_right(a, key(x), lo, hi, key=key)
+
+    lo = bisect_right(a, x, lo, hi)
     a.insert(lo, x)
 
-
-def bisect_right(a, x, lo=0, hi=None, *, key=None):
+def bisect_right(a, x, lo=0, hi=None):
     """Return the index where to insert item x in list a, assuming a is sorted.
 
     The return value i is such that all e in a[:i] have e <= x, and all e in
-    a[i:] have e > x.  So if x already appears in the list, a.insert(i, x) will
+    a[i:] have e > x.  So if x already appears in the list, a.insert(x) will
     insert just after the rightmost x already there.
 
     Optional args lo (default 0) and hi (default len(a)) bound the
@@ -31,26 +27,14 @@
         raise ValueError('lo must be non-negative')
     if hi is None:
         hi = len(a)
-    # Note, the comparison uses "<" to match the
-    # __lt__() logic in list.sort() and in heapq.
-    if key is None:
-        while lo < hi:
-            mid = (lo + hi) // 2
-            if x < a[mid]:
-                hi = mid
-            else:
-                lo = mid + 1
-    else:
-        while lo < hi:
-            mid = (lo + hi) // 2
-            if x < key(a[mid]):
-                hi = mid
-            else:
-                lo = mid + 1
+    while lo < hi:
+        mid = (lo+hi)//2
+        # Use __lt__ to match the logic in list.sort() and in heapq
+        if x < a[mid]: hi = mid
+        else: lo = mid+1
     return lo
 
-
-def insort_left(a, x, lo=0, hi=None, *, key=None):
+def insort_left(a, x, lo=0, hi=None):
     """Insert item x in list a, and keep it sorted assuming a is sorted.
 
     If x is already in a, insert it to the left of the leftmost x.
@@ -59,17 +43,15 @@
     slice of a to be searched.
     """
 
-    if key is None:
-        lo = bisect_left(a, x, lo, hi)
-    else:
-        lo = bisect_left(a, key(x), lo, hi, key=key)
+    lo = bisect_left(a, x, lo, hi)
     a.insert(lo, x)
 
-def bisect_left(a, x, lo=0, hi=None, *, key=None):
+
+def bisect_left(a, x, lo=0, hi=None):
     """Return the index where to insert item x in list a, assuming a is sorted.
 
     The return value i is such that all e in a[:i] have e < x, and all e in
-    a[i:] have e >= x.  So if x already appears in the list, a.insert(i, x) will
+    a[i:] have e >= x.  So if x already appears in the list, a.insert(x) will
     insert just before the leftmost x already there.
 
     Optional args lo (default 0) and hi (default len(a)) bound the
@@ -80,25 +62,13 @@
         raise ValueError('lo must be non-negative')
     if hi is None:
         hi = len(a)
-    # Note, the comparison uses "<" to match the
-    # __lt__() logic in list.sort() and in heapq.
-    if key is None:
-        while lo < hi:
-            mid = (lo + hi) // 2
-            if a[mid] < x:
-                lo = mid + 1
-            else:
-                hi = mid
-    else:
-        while lo < hi:
-            mid = (lo + hi) // 2
-            if key(a[mid]) < x:
-                lo = mid + 1
-            else:
-                hi = mid
+    while lo < hi:
+        mid = (lo+hi)//2
+        # Use __lt__ to match the logic in list.sort() and in heapq
+        if a[mid] < x: lo = mid+1
+        else: hi = mid
     return lo
 
-
 # Overwrite above definitions with a fast C implementation
 try:
     from _bisect import *
diff --git a/common/py3-stdlib/bz2.py b/common/py3-stdlib/bz2.py
index fabe4f7..ce07ebe 100644
--- a/common/py3-stdlib/bz2.py
+++ b/common/py3-stdlib/bz2.py
@@ -13,6 +13,7 @@
 import io
 import os
 import _compression
+from threading import RLock
 
 from _bz2 import BZ2Compressor, BZ2Decompressor
 
@@ -52,6 +53,9 @@
         If mode is 'r', the input file may be the concatenation of
         multiple compressed streams.
         """
+        # This lock must be recursive, so that BufferedIOBase's
+        # writelines() does not deadlock.
+        self._lock = RLock()
         self._fp = None
         self._closefp = False
         self._mode = _MODE_CLOSED
@@ -100,23 +104,24 @@
         May be called more than once without error. Once the file is
         closed, any other operation on it will raise a ValueError.
         """
-        if self._mode == _MODE_CLOSED:
-            return
-        try:
-            if self._mode == _MODE_READ:
-                self._buffer.close()
-            elif self._mode == _MODE_WRITE:
-                self._fp.write(self._compressor.flush())
-                self._compressor = None
-        finally:
+        with self._lock:
+            if self._mode == _MODE_CLOSED:
+                return
             try:
-                if self._closefp:
-                    self._fp.close()
+                if self._mode == _MODE_READ:
+                    self._buffer.close()
+                elif self._mode == _MODE_WRITE:
+                    self._fp.write(self._compressor.flush())
+                    self._compressor = None
             finally:
-                self._fp = None
-                self._closefp = False
-                self._mode = _MODE_CLOSED
-                self._buffer = None
+                try:
+                    if self._closefp:
+                        self._fp.close()
+                finally:
+                    self._fp = None
+                    self._closefp = False
+                    self._mode = _MODE_CLOSED
+                    self._buffer = None
 
     @property
     def closed(self):
@@ -148,11 +153,12 @@
         Always returns at least one byte of data, unless at EOF.
         The exact number of bytes returned is unspecified.
         """
-        self._check_can_read()
-        # Relies on the undocumented fact that BufferedReader.peek()
-        # always returns at least one byte (except at EOF), independent
-        # of the value of n
-        return self._buffer.peek(n)
+        with self._lock:
+            self._check_can_read()
+            # Relies on the undocumented fact that BufferedReader.peek()
+            # always returns at least one byte (except at EOF), independent
+            # of the value of n
+            return self._buffer.peek(n)
 
     def read(self, size=-1):
         """Read up to size uncompressed bytes from the file.
@@ -160,8 +166,9 @@
         If size is negative or omitted, read until EOF is reached.
         Returns b'' if the file is already at EOF.
         """
-        self._check_can_read()
-        return self._buffer.read(size)
+        with self._lock:
+            self._check_can_read()
+            return self._buffer.read(size)
 
     def read1(self, size=-1):
         """Read up to size uncompressed bytes, while trying to avoid
@@ -170,18 +177,20 @@
 
         Returns b'' if the file is at EOF.
         """
-        self._check_can_read()
-        if size < 0:
-            size = io.DEFAULT_BUFFER_SIZE
-        return self._buffer.read1(size)
+        with self._lock:
+            self._check_can_read()
+            if size < 0:
+                size = io.DEFAULT_BUFFER_SIZE
+            return self._buffer.read1(size)
 
     def readinto(self, b):
         """Read bytes into b.
 
         Returns the number of bytes read (0 for EOF).
         """
-        self._check_can_read()
-        return self._buffer.readinto(b)
+        with self._lock:
+            self._check_can_read()
+            return self._buffer.readinto(b)
 
     def readline(self, size=-1):
         """Read a line of uncompressed bytes from the file.
@@ -194,8 +203,9 @@
             if not hasattr(size, "__index__"):
                 raise TypeError("Integer argument expected")
             size = size.__index__()
-        self._check_can_read()
-        return self._buffer.readline(size)
+        with self._lock:
+            self._check_can_read()
+            return self._buffer.readline(size)
 
     def readlines(self, size=-1):
         """Read a list of lines of uncompressed bytes from the file.
@@ -208,29 +218,23 @@
             if not hasattr(size, "__index__"):
                 raise TypeError("Integer argument expected")
             size = size.__index__()
-        self._check_can_read()
-        return self._buffer.readlines(size)
+        with self._lock:
+            self._check_can_read()
+            return self._buffer.readlines(size)
 
     def write(self, data):
         """Write a byte string to the file.
 
         Returns the number of uncompressed bytes written, which is
-        always the length of data in bytes. Note that due to buffering,
-        the file on disk may not reflect the data written until close()
-        is called.
+        always len(data). Note that due to buffering, the file on disk
+        may not reflect the data written until close() is called.
         """
-        self._check_can_write()
-        if isinstance(data, (bytes, bytearray)):
-            length = len(data)
-        else:
-            # accept any data that supports the buffer protocol
-            data = memoryview(data)
-            length = data.nbytes
-
-        compressed = self._compressor.compress(data)
-        self._fp.write(compressed)
-        self._pos += length
-        return length
+        with self._lock:
+            self._check_can_write()
+            compressed = self._compressor.compress(data)
+            self._fp.write(compressed)
+            self._pos += len(data)
+            return len(data)
 
     def writelines(self, seq):
         """Write a sequence of byte strings to the file.
@@ -240,7 +244,8 @@
 
         Line separators are not added between the written byte strings.
         """
-        return _compression.BaseStream.writelines(self, seq)
+        with self._lock:
+            return _compression.BaseStream.writelines(self, seq)
 
     def seek(self, offset, whence=io.SEEK_SET):
         """Change the file position.
@@ -257,15 +262,17 @@
         Note that seeking is emulated, so depending on the parameters,
         this operation may be extremely slow.
         """
-        self._check_can_seek()
-        return self._buffer.seek(offset, whence)
+        with self._lock:
+            self._check_can_seek()
+            return self._buffer.seek(offset, whence)
 
     def tell(self):
         """Return the current file position."""
-        self._check_not_closed()
-        if self._mode == _MODE_READ:
-            return self._buffer.tell()
-        return self._pos
+        with self._lock:
+            self._check_not_closed()
+            if self._mode == _MODE_READ:
+                return self._buffer.tell()
+            return self._pos
 
 
 def open(filename, mode="rb", compresslevel=9,
@@ -304,7 +311,6 @@
     binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel)
 
     if "t" in mode:
-        encoding = io.text_encoding(encoding)
         return io.TextIOWrapper(binary_file, encoding, errors, newline)
     else:
         return binary_file
diff --git a/common/py3-stdlib/cProfile.py b/common/py3-stdlib/cProfile.py
index 22a7d0a..59b4699 100755
--- a/common/py3-stdlib/cProfile.py
+++ b/common/py3-stdlib/cProfile.py
@@ -175,12 +175,7 @@
                 '__package__': None,
                 '__cached__': None,
             }
-        try:
-            runctx(code, globs, None, options.outfile, options.sort)
-        except BrokenPipeError as exc:
-            # Prevent "Exception ignored" during interpreter shutdown.
-            sys.stdout = None
-            sys.exit(exc.errno)
+        runctx(code, globs, None, options.outfile, options.sort)
     else:
         parser.print_usage()
     return parser
diff --git a/common/py3-stdlib/calendar.py b/common/py3-stdlib/calendar.py
index cbea9ec..7550d52 100644
--- a/common/py3-stdlib/calendar.py
+++ b/common/py3-stdlib/calendar.py
@@ -15,9 +15,7 @@
            "monthcalendar", "prmonth", "month", "prcal", "calendar",
            "timegm", "month_name", "month_abbr", "day_name", "day_abbr",
            "Calendar", "TextCalendar", "HTMLCalendar", "LocaleTextCalendar",
-           "LocaleHTMLCalendar", "weekheader",
-           "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY",
-           "SATURDAY", "SUNDAY"]
+           "LocaleHTMLCalendar", "weekheader"]
 
 # Exception raised for bad input (with string parameter for details)
 error = ValueError
@@ -573,11 +571,19 @@
 
     def formatweekday(self, day, width):
         with different_locale(self.locale):
-            return super().formatweekday(day, width)
+            if width >= 9:
+                names = day_name
+            else:
+                names = day_abbr
+            name = names[day]
+            return name[:width].center(width)
 
     def formatmonthname(self, theyear, themonth, width, withyear=True):
         with different_locale(self.locale):
-            return super().formatmonthname(theyear, themonth, width, withyear)
+            s = month_name[themonth]
+            if withyear:
+                s = "%s %r" % (s, theyear)
+            return s.center(width)
 
 
 class LocaleHTMLCalendar(HTMLCalendar):
@@ -595,11 +601,16 @@
 
     def formatweekday(self, day):
         with different_locale(self.locale):
-            return super().formatweekday(day)
+            s = day_abbr[day]
+            return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
 
     def formatmonthname(self, theyear, themonth, withyear=True):
         with different_locale(self.locale):
-            return super().formatmonthname(theyear, themonth, withyear)
+            s = month_name[themonth]
+            if withyear:
+                s = '%s %s' % (s, theyear)
+            return '<tr><th colspan="7" class="month">%s</th></tr>' % s
+
 
 # Support for old module level interface
 c = TextCalendar()
diff --git a/common/py3-stdlib/cgi.py b/common/py3-stdlib/cgi.py
index 6cb8cf2..77ab703 100755
--- a/common/py3-stdlib/cgi.py
+++ b/common/py3-stdlib/cgi.py
@@ -41,7 +41,6 @@
 import html
 import locale
 import tempfile
-import warnings
 
 __all__ = ["MiniFieldStorage", "FieldStorage", "parse", "parse_multipart",
            "parse_header", "test", "print_exception", "print_environ",
@@ -78,11 +77,9 @@
 
     """
     global log, logfile, logfp
-    warnings.warn("cgi.log() is deprecated as of 3.10. Use logging instead",
-                  DeprecationWarning, stacklevel=2)
     if logfile and not logfp:
         try:
-            logfp = open(logfile, "a", encoding="locale")
+            logfp = open(logfile, "a")
         except OSError:
             pass
     if not logfp:
@@ -118,8 +115,7 @@
 # 0 ==> unlimited input
 maxlen = 0
 
-def parse(fp=None, environ=os.environ, keep_blank_values=0,
-          strict_parsing=0, separator='&'):
+def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
     """Parse a query in the environment or from a file (default stdin)
 
         Arguments, all optional:
@@ -138,9 +134,6 @@
         strict_parsing: flag indicating what to do with parsing errors.
             If false (the default), errors are silently ignored.
             If true, errors raise a ValueError exception.
-
-        separator: str. The symbol to use for separating the query arguments.
-            Defaults to &.
     """
     if fp is None:
         fp = sys.stdin
@@ -161,7 +154,7 @@
     if environ['REQUEST_METHOD'] == 'POST':
         ctype, pdict = parse_header(environ['CONTENT_TYPE'])
         if ctype == 'multipart/form-data':
-            return parse_multipart(fp, pdict, separator=separator)
+            return parse_multipart(fp, pdict)
         elif ctype == 'application/x-www-form-urlencoded':
             clength = int(environ['CONTENT_LENGTH'])
             if maxlen and clength > maxlen:
@@ -185,10 +178,10 @@
             qs = ""
         environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
     return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
-                                 encoding=encoding, separator=separator)
+                                 encoding=encoding)
 
 
-def parse_multipart(fp, pdict, encoding="utf-8", errors="replace", separator='&'):
+def parse_multipart(fp, pdict, encoding="utf-8", errors="replace"):
     """Parse multipart input.
 
     Arguments:
@@ -201,7 +194,7 @@
     value is a list of values for that field. For non-file fields, the value
     is a list of strings.
     """
-    # RFC 2046, Section 5.1 : The "multipart" boundary delimiters are always
+    # RFC 2026, Section 5.1 : The "multipart" boundary delimiters are always
     # represented as 7bit US-ASCII.
     boundary = pdict['boundary'].decode('ascii')
     ctype = "multipart/form-data; boundary={}".format(boundary)
@@ -212,7 +205,7 @@
     except KeyError:
         pass
     fs = FieldStorage(fp, headers=headers, encoding=encoding, errors=errors,
-        environ={'REQUEST_METHOD': 'POST'}, separator=separator)
+        environ={'REQUEST_METHOD': 'POST'})
     return {k: fs.getlist(k) for k in fs}
 
 def _parseparam(s):
@@ -322,7 +315,7 @@
     def __init__(self, fp=None, headers=None, outerboundary=b'',
                  environ=os.environ, keep_blank_values=0, strict_parsing=0,
                  limit=None, encoding='utf-8', errors='replace',
-                 max_num_fields=None, separator='&'):
+                 max_num_fields=None):
         """Constructor.  Read multipart/* until last part.
 
         Arguments, all optional:
@@ -370,7 +363,6 @@
         self.keep_blank_values = keep_blank_values
         self.strict_parsing = strict_parsing
         self.max_num_fields = max_num_fields
-        self.separator = separator
         if 'REQUEST_METHOD' in environ:
             method = environ['REQUEST_METHOD'].upper()
         self.qs_on_post = None
@@ -597,7 +589,7 @@
         query = urllib.parse.parse_qsl(
             qs, self.keep_blank_values, self.strict_parsing,
             encoding=self.encoding, errors=self.errors,
-            max_num_fields=self.max_num_fields, separator=self.separator)
+            max_num_fields=self.max_num_fields)
         self.list = [MiniFieldStorage(key, value) for key, value in query]
         self.skip_lines()
 
@@ -613,7 +605,7 @@
             query = urllib.parse.parse_qsl(
                 self.qs_on_post, self.keep_blank_values, self.strict_parsing,
                 encoding=self.encoding, errors=self.errors,
-                max_num_fields=self.max_num_fields, separator=self.separator)
+                max_num_fields=self.max_num_fields)
             self.list.extend(MiniFieldStorage(key, value) for key, value in query)
 
         klass = self.FieldStorageClass or self.__class__
@@ -657,7 +649,7 @@
                 else self.limit - self.bytes_read
             part = klass(self.fp, headers, ib, environ, keep_blank_values,
                          strict_parsing, limit,
-                         self.encoding, self.errors, max_num_fields, self.separator)
+                         self.encoding, self.errors, max_num_fields)
 
             if max_num_fields is not None:
                 max_num_fields -= 1
diff --git a/common/py3-stdlib/cgitb.py b/common/py3-stdlib/cgitb.py
index 17ddda3..4f81271 100644
--- a/common/py3-stdlib/cgitb.py
+++ b/common/py3-stdlib/cgitb.py
@@ -181,8 +181,8 @@
 
 
 <!-- The above is a description of an error in a Python program, formatted
-     for a web browser because the 'cgitb' module was enabled.  In case you
-     are not reading this in a web browser, here is the original traceback:
+     for a Web browser because the 'cgitb' module was enabled.  In case you
+     are not reading this in a Web browser, here is the original traceback:
 
 %s
 -->
diff --git a/common/py3-stdlib/codecs.py b/common/py3-stdlib/codecs.py
index e6ad6e3..7f23e97 100644
--- a/common/py3-stdlib/codecs.py
+++ b/common/py3-stdlib/codecs.py
@@ -83,7 +83,7 @@
 class CodecInfo(tuple):
     """Codec details when looking up the codec registry"""
 
-    # Private API to allow Python 3.4 to denylist the known non-Unicode
+    # Private API to allow Python 3.4 to blacklist the known non-Unicode
     # codecs in the standard library. A more general mechanism to
     # reliably distinguish test encodings from other codecs will hopefully
     # be defined for Python 3.5
@@ -386,7 +386,7 @@
 
     def reset(self):
 
-        """ Resets the codec buffers used for keeping internal state.
+        """ Flushes and resets the codec buffers used for keeping state.
 
             Calling this method should ensure that the data on the
             output is put into a clean state, that allows appending
@@ -620,7 +620,7 @@
 
     def reset(self):
 
-        """ Resets the codec buffers used for keeping internal state.
+        """ Resets the codec buffers used for keeping state.
 
             Note that no stream repositioning should take place.
             This method is primarily intended to be able to recover
diff --git a/common/py3-stdlib/codeop.py b/common/py3-stdlib/codeop.py
index 568e9bb..4c10470 100644
--- a/common/py3-stdlib/codeop.py
+++ b/common/py3-stdlib/codeop.py
@@ -10,6 +10,30 @@
   syntax error (OverflowError and ValueError can be produced by
   malformed literals).
 
+Approach:
+
+First, check if the source consists entirely of blank lines and
+comments; if so, replace it with 'pass', because the built-in
+parser doesn't always do the right thing for these.
+
+Compile three times: as is, with \n, and with \n\n appended.  If it
+compiles as is, it's complete.  If it compiles with one \n appended,
+we expect more.  If it doesn't compile either way, we compare the
+error we get when compiling with \n or \n\n appended.  If the errors
+are the same, the code is broken.  But if the errors are different, we
+expect more.  Not intuitive; not even guaranteed to hold in future
+releases; but this matches the compiler's behavior from Python 1.4
+through 2.2, at least.
+
+Caveat:
+
+It is possible (but not likely) that the parser stops parsing with a
+successful outcome before reaching the end of the source; in this
+case, trailing symbols may be ignored instead of causing an error.
+For example, a backslash followed by two newlines may be followed by
+arbitrary garbage.  This will be fixed once the API for the parser is
+better.
+
 The two interfaces are:
 
 compile_command(source, filename, symbol):
@@ -40,25 +64,24 @@
 
 __all__ = ["compile_command", "Compile", "CommandCompiler"]
 
-# The following flags match the values from Include/cpython/compile.h
-# Caveat emptor: These flags are undocumented on purpose and depending
-# on their effect outside the standard library is **unsupported**.
-PyCF_DONT_IMPLY_DEDENT = 0x200          
-PyCF_ALLOW_INCOMPLETE_INPUT = 0x4000
+PyCF_DONT_IMPLY_DEDENT = 0x200          # Matches pythonrun.h
 
 def _maybe_compile(compiler, source, filename, symbol):
-    # Check for source consisting of only blank lines and comments.
+    # Check for source consisting of only blank lines and comments
     for line in source.split("\n"):
         line = line.strip()
         if line and line[0] != '#':
-            break               # Leave it alone.
+            break               # Leave it alone
     else:
         if symbol != "eval":
             source = "pass"     # Replace it with a 'pass' statement
 
+    err = err1 = err2 = None
+    code = code1 = code2 = None
+
     try:
-        return compiler(source, filename, symbol)
-    except SyntaxError:  # Let other compile() errors propagate.
+        code = compiler(source, filename, symbol)
+    except SyntaxError:
         pass
 
     # Catch syntax warnings after the first compile
@@ -67,23 +90,25 @@
         warnings.simplefilter("error")
 
         try:
-            compiler(source + "\n", filename, symbol)
+            code1 = compiler(source + "\n", filename, symbol)
         except SyntaxError as e:
-            if "incomplete input" in str(e):
-                return None
-            raise
+            err1 = e
 
-def _is_syntax_error(err1, err2):
-    rep1 = repr(err1)
-    rep2 = repr(err2)
-    if "was never closed" in rep1 and "was never closed" in rep2:
-        return False
-    if rep1 == rep2:
-        return True
-    return False
+        try:
+            code2 = compiler(source + "\n\n", filename, symbol)
+        except SyntaxError as e:
+            err2 = e
+
+    try:
+        if code:
+            return code
+        if not code1 and repr(err1) == repr(err2):
+            raise err1
+    finally:
+        err1 = err2 = None
 
 def _compile(source, filename, symbol):
-    return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT)
+    return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
 
 def compile_command(source, filename="<input>", symbol="single"):
     r"""Compile a command and determine whether it is incomplete.
@@ -112,7 +137,7 @@
     statement, it "remembers" and compiles all subsequent program texts
     with the statement in force."""
     def __init__(self):
-        self.flags = PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT
+        self.flags = PyCF_DONT_IMPLY_DEDENT
 
     def __call__(self, source, filename, symbol):
         codeob = compile(source, filename, symbol, self.flags, True)
diff --git a/common/py3-stdlib/collections/__init__.py b/common/py3-stdlib/collections/__init__.py
index 818588f..bc69a67 100644
--- a/common/py3-stdlib/collections/__init__.py
+++ b/common/py3-stdlib/collections/__init__.py
@@ -27,6 +27,7 @@
 ]
 
 import _collections_abc
+import heapq as _heapq
 import sys as _sys
 
 from itertools import chain as _chain
@@ -51,6 +52,22 @@
     pass
 
 
+def __getattr__(name):
+    # For backwards compatibility, continue to make the collections ABCs
+    # through Python 3.6 available through the collections module.
+    # Note, no new collections ABCs were added in Python 3.7
+    if name in _collections_abc.__all__:
+        obj = getattr(_collections_abc, name)
+        import warnings
+        warnings.warn("Using or importing the ABCs from 'collections' instead "
+                      "of from 'collections.abc' is deprecated since Python 3.3, "
+                      "and in 3.10 it will stop working",
+                      DeprecationWarning, stacklevel=2)
+        globals()[name] = obj
+        return obj
+    raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
+
+
 ################################################################################
 ### OrderedDict
 ################################################################################
@@ -407,7 +424,7 @@
 
     namespace = {
         '_tuple_new': tuple_new,
-        '__builtins__': {},
+        '__builtins__': None,
         '__name__': f'namedtuple_{typename}',
     }
     code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
@@ -472,7 +489,6 @@
         '__repr__': __repr__,
         '_asdict': _asdict,
         '__getnewargs__': __getnewargs__,
-        '__match_args__': field_names,
     }
     for index, name in enumerate(field_names):
         doc = _sys.intern(f'Alias for field number {index}')
@@ -581,10 +597,6 @@
         # Needed so that self[missing_item] does not raise KeyError
         return 0
 
-    def total(self):
-        'Sum of the counts'
-        return sum(self.values())
-
     def most_common(self, n=None):
         '''List the n most common elements and their counts from the most
         common to the least.  If n is None, then list all element counts.
@@ -596,10 +608,7 @@
         # Emulate Bag.sortedByCount from Smalltalk
         if n is None:
             return sorted(self.items(), key=_itemgetter(1), reverse=True)
-
-        # Lazy import to speedup Python startup time
-        import heapq
-        return heapq.nlargest(n, self.items(), key=_itemgetter(1))
+        return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
 
     def elements(self):
         '''Iterator over elements repeating each as many times as its count.
@@ -710,42 +719,6 @@
         if elem in self:
             super().__delitem__(elem)
 
-    def __eq__(self, other):
-        'True if all counts agree. Missing counts are treated as zero.'
-        if not isinstance(other, Counter):
-            return NotImplemented
-        return all(self[e] == other[e] for c in (self, other) for e in c)
-
-    def __ne__(self, other):
-        'True if any counts disagree. Missing counts are treated as zero.'
-        if not isinstance(other, Counter):
-            return NotImplemented
-        return not self == other
-
-    def __le__(self, other):
-        'True if all counts in self are a subset of those in other.'
-        if not isinstance(other, Counter):
-            return NotImplemented
-        return all(self[e] <= other[e] for c in (self, other) for e in c)
-
-    def __lt__(self, other):
-        'True if all counts in self are a proper subset of those in other.'
-        if not isinstance(other, Counter):
-            return NotImplemented
-        return self <= other and self != other
-
-    def __ge__(self, other):
-        'True if all counts in self are a superset of those in other.'
-        if not isinstance(other, Counter):
-            return NotImplemented
-        return all(self[e] >= other[e] for c in (self, other) for e in c)
-
-    def __gt__(self, other):
-        'True if all counts in self are a proper superset of those in other.'
-        if not isinstance(other, Counter):
-            return NotImplemented
-        return self >= other and self != other
-
     def __repr__(self):
         if not self:
             return f'{self.__class__.__name__}()'
@@ -766,30 +739,12 @@
     # To strip negative and zero counts, add-in an empty counter:
     #       c += Counter()
     #
-    # Results are ordered according to when an element is first
-    # encountered in the left operand and then by the order
-    # encountered in the right operand.
-    #
-    # When the multiplicities are all zero or one, multiset operations
-    # are guaranteed to be equivalent to the corresponding operations
-    # for regular sets.
-    #     Given counter multisets such as:
-    #         cp = Counter(a=1, b=0, c=1)
-    #         cq = Counter(c=1, d=0, e=1)
-    #     The corresponding regular sets would be:
-    #         sp = {'a', 'c'}
-    #         sq = {'c', 'e'}
-    #     All of the following relations would hold:
-    #         set(cp + cq) == sp | sq
-    #         set(cp - cq) == sp - sq
-    #         set(cp | cq) == sp | sq
-    #         set(cp & cq) == sp & sq
-    #         (cp == cq) == (sp == sq)
-    #         (cp != cq) == (sp != sq)
-    #         (cp <= cq) == (sp <= sq)
-    #         (cp < cq) == (sp < sq)
-    #         (cp >= cq) == (sp >= sq)
-    #         (cp > cq) == (sp > sq)
+    # Rich comparison operators for multiset subset and superset tests
+    # are deliberately omitted due to semantic conflicts with the
+    # existing inherited dict equality method.  Subset and superset
+    # semantics ignore zero counts and require that p≤q ∧ p≥q → p=q;
+    # however, that would not be the case for p=Counter(a=1, b=0)
+    # and q=Counter(a=1) where the dictionaries are not equal.
 
     def __add__(self, other):
         '''Add counts from two counters.
@@ -1018,15 +973,12 @@
 
     __copy__ = copy
 
-    def new_child(self, m=None, **kwargs):      # like Django's Context.push()
+    def new_child(self, m=None):                # like Django's Context.push()
         '''New ChainMap with a new map followed by all previous maps.
         If no map is provided, an empty dict is used.
-        Keyword arguments update the map or new empty dict.
         '''
         if m is None:
-            m = kwargs
-        elif kwargs:
-            m.update(kwargs)
+            m = {}
         return self.__class__(m, *self.maps)
 
     @property
diff --git a/common/py3-stdlib/collections/abc.py b/common/py3-stdlib/collections/abc.py
index 86ca8b8..891600d 100644
--- a/common/py3-stdlib/collections/abc.py
+++ b/common/py3-stdlib/collections/abc.py
@@ -1,3 +1,2 @@
 from _collections_abc import *
 from _collections_abc import __all__
-from _collections_abc import _CallableGenericAlias
diff --git a/common/py3-stdlib/colorsys.py b/common/py3-stdlib/colorsys.py
index 0f52512..b93e384 100644
--- a/common/py3-stdlib/colorsys.py
+++ b/common/py3-stdlib/colorsys.py
@@ -75,18 +75,17 @@
 def rgb_to_hls(r, g, b):
     maxc = max(r, g, b)
     minc = min(r, g, b)
-    sumc = (maxc+minc)
-    rangec = (maxc-minc)
-    l = sumc/2.0
+    # XXX Can optimize (maxc+minc) and (maxc-minc)
+    l = (minc+maxc)/2.0
     if minc == maxc:
         return 0.0, l, 0.0
     if l <= 0.5:
-        s = rangec / sumc
+        s = (maxc-minc) / (maxc+minc)
     else:
-        s = rangec / (2.0-sumc)
-    rc = (maxc-r) / rangec
-    gc = (maxc-g) / rangec
-    bc = (maxc-b) / rangec
+        s = (maxc-minc) / (2.0-maxc-minc)
+    rc = (maxc-r) / (maxc-minc)
+    gc = (maxc-g) / (maxc-minc)
+    bc = (maxc-b) / (maxc-minc)
     if r == maxc:
         h = bc-gc
     elif g == maxc:
diff --git a/common/py3-stdlib/compileall.py b/common/py3-stdlib/compileall.py
index 3755e76..fe7f450 100644
--- a/common/py3-stdlib/compileall.py
+++ b/common/py3-stdlib/compileall.py
@@ -84,14 +84,12 @@
     if workers < 0:
         raise ValueError('workers must be greater or equal to 0')
     if workers != 1:
-        # Check if this is a system where ProcessPoolExecutor can function.
-        from concurrent.futures.process import _check_system_limits
         try:
-            _check_system_limits()
-        except NotImplementedError:
-            workers = 1
-        else:
+            # Only import when needed, as low resource platforms may
+            # fail to import it
             from concurrent.futures import ProcessPoolExecutor
+        except ImportError:
+            workers = 1
     if maxlevels is None:
         maxlevels = sys.getrecursionlimit()
     files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels)
@@ -221,8 +219,8 @@
             if not force:
                 try:
                     mtime = int(os.stat(fullname).st_mtime)
-                    expect = struct.pack('<4sLL', importlib.util.MAGIC_NUMBER,
-                                         0, mtime & 0xFFFF_FFFF)
+                    expect = struct.pack('<4sll', importlib.util.MAGIC_NUMBER,
+                                         0, mtime)
                     for cfile in opt_cfiles.values():
                         with open(cfile, 'rb') as chandle:
                             actual = chandle.read(12)
@@ -254,8 +252,9 @@
                 else:
                     print('*** ', end='')
                 # escape non-printable characters in msg
-                encoding = sys.stdout.encoding or sys.getdefaultencoding()
-                msg = err.msg.encode(encoding, errors='backslashreplace').decode(encoding)
+                msg = err.msg.encode(sys.stdout.encoding,
+                                     errors='backslashreplace')
+                msg = msg.decode(sys.stdout.encoding)
                 print(msg)
             except (SyntaxError, UnicodeError, OSError) as e:
                 success = False
@@ -367,9 +366,9 @@
                               'environment variable is set, and '
                               '"timestamp" otherwise.'))
     parser.add_argument('-o', action='append', type=int, dest='opt_levels',
-                        help=('Optimization levels to run compilation with. '
-                              'Default is -1 which uses the optimization level '
-                              'of the Python interpreter itself (see -O).'))
+                        help=('Optimization levels to run compilation with.'
+                              'Default is -1 which uses optimization level of'
+                              'Python interpreter itself (specified by -O).'))
     parser.add_argument('-e', metavar='DIR', dest='limit_sl_dest',
                         help='Ignore symlinks pointing outsite of the DIR')
     parser.add_argument('--hardlink-dupes', action='store_true',
@@ -406,8 +405,7 @@
     # if flist is provided then load it
     if args.flist:
         try:
-            with (sys.stdin if args.flist=='-' else
-                    open(args.flist, encoding="utf-8")) as f:
+            with (sys.stdin if args.flist=='-' else open(args.flist)) as f:
                 for line in f:
                     compile_dests.append(line.strip())
         except OSError:
diff --git a/common/py3-stdlib/concurrent/futures/_base.py b/common/py3-stdlib/concurrent/futures/_base.py
index 5c00f2e..00eb548 100644
--- a/common/py3-stdlib/concurrent/futures/_base.py
+++ b/common/py3-stdlib/concurrent/futures/_base.py
@@ -284,14 +284,13 @@
         A named 2-tuple of sets. The first set, named 'done', contains the
         futures that completed (is finished or cancelled) before the wait
         completed. The second set, named 'not_done', contains uncompleted
-        futures. Duplicate futures given to *fs* are removed and will be 
-        returned only once.
+        futures.
     """
-    fs = set(fs)
     with _AcquireFutures(fs):
-        done = {f for f in fs
-                   if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]}
-        not_done = fs - done
+        done = set(f for f in fs
+                   if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
+        not_done = set(fs) - done
+
         if (return_when == FIRST_COMPLETED) and done:
             return DoneAndNotDoneFutures(done, not_done)
         elif (return_when == FIRST_EXCEPTION) and done:
@@ -310,7 +309,7 @@
             f._waiters.remove(waiter)
 
     done.update(waiter.finished_futures)
-    return DoneAndNotDoneFutures(done, fs - done)
+    return DoneAndNotDoneFutures(done, set(fs) - done)
 
 class Future(object):
     """Represents the result of an asynchronous computation."""
@@ -387,11 +386,7 @@
 
     def __get_result(self):
         if self._exception:
-            try:
-                raise self._exception
-            finally:
-                # Break a reference cycle with the exception in self._exception
-                self = None
+            raise self._exception
         else:
             return self._result
 
@@ -431,24 +426,20 @@
                 timeout.
             Exception: If the call raised then that exception will be raised.
         """
-        try:
-            with self._condition:
-                if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
-                    raise CancelledError()
-                elif self._state == FINISHED:
-                    return self.__get_result()
+        with self._condition:
+            if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
+                raise CancelledError()
+            elif self._state == FINISHED:
+                return self.__get_result()
 
-                self._condition.wait(timeout)
+            self._condition.wait(timeout)
 
-                if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
-                    raise CancelledError()
-                elif self._state == FINISHED:
-                    return self.__get_result()
-                else:
-                    raise TimeoutError()
-        finally:
-            # Break a reference cycle with the exception in self._exception
-            self = None
+            if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
+                raise CancelledError()
+            elif self._state == FINISHED:
+                return self.__get_result()
+            else:
+                raise TimeoutError()
 
     def exception(self, timeout=None):
         """Return the exception raised by the call that the future represents.
diff --git a/common/py3-stdlib/concurrent/futures/process.py b/common/py3-stdlib/concurrent/futures/process.py
index 6ee2ce6..90bc98b 100644
--- a/common/py3-stdlib/concurrent/futures/process.py
+++ b/common/py3-stdlib/concurrent/futures/process.py
@@ -373,7 +373,7 @@
         assert not self.thread_wakeup._closed
         wakeup_reader = self.thread_wakeup._reader
         readers = [result_reader, wakeup_reader]
-        worker_sentinels = [p.sentinel for p in list(self.processes.values())]
+        worker_sentinels = [p.sentinel for p in self.processes.values()]
         ready = mp.connection.wait(readers + worker_sentinels)
 
         cause = None
@@ -533,14 +533,6 @@
             raise NotImplementedError(_system_limited)
     _system_limits_checked = True
     try:
-        import multiprocessing.synchronize
-    except ImportError:
-        _system_limited = (
-            "This Python build lacks multiprocessing.synchronize, usually due "
-            "to named semaphores being unavailable on this platform."
-        )
-        raise NotImplementedError(_system_limited)
-    try:
         nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
     except (AttributeError, ValueError):
         # sysconf not available or setting not available
diff --git a/common/py3-stdlib/concurrent/futures/thread.py b/common/py3-stdlib/concurrent/futures/thread.py
index 51c942f..b7a2cac 100644
--- a/common/py3-stdlib/concurrent/futures/thread.py
+++ b/common/py3-stdlib/concurrent/futures/thread.py
@@ -36,12 +36,6 @@
 # See bpo-39812 for context.
 threading._register_atexit(_python_exit)
 
-# At fork, reinitialize the `_global_shutdown_lock` lock in the child process
-if hasattr(os, 'register_at_fork'):
-    os.register_at_fork(before=_global_shutdown_lock.acquire,
-                        after_in_child=_global_shutdown_lock._at_fork_reinit,
-                        after_in_parent=_global_shutdown_lock.release)
-
 
 class _WorkItem(object):
     def __init__(self, future, fn, args, kwargs):
diff --git a/common/py3-stdlib/configparser.py b/common/py3-stdlib/configparser.py
index 3470624..924cc56 100644
--- a/common/py3-stdlib/configparser.py
+++ b/common/py3-stdlib/configparser.py
@@ -316,7 +316,7 @@
     def filename(self):
         """Deprecated, use `source'."""
         warnings.warn(
-            "The 'filename' attribute will be removed in Python 3.12. "
+            "The 'filename' attribute will be removed in future versions.  "
             "Use 'source' instead.",
             DeprecationWarning, stacklevel=2
         )
@@ -326,7 +326,7 @@
     def filename(self, value):
         """Deprecated, user `source'."""
         warnings.warn(
-            "The 'filename' attribute will be removed in Python 3.12. "
+            "The 'filename' attribute will be removed in future versions.  "
             "Use 'source' instead.",
             DeprecationWarning, stacklevel=2
         )
@@ -563,7 +563,7 @@
     # Regular expressions for parsing section headers and options
     _SECT_TMPL = r"""
         \[                                 # [
-        (?P<header>.+)                     # very permissive!
+        (?P<header>[^]]+)                  # very permissive!
         \]                                 # ]
         """
     _OPT_TMPL = r"""
@@ -690,7 +690,6 @@
         """
         if isinstance(filenames, (str, bytes, os.PathLike)):
             filenames = [filenames]
-        encoding = io.text_encoding(encoding)
         read_ok = []
         for filename in filenames:
             try:
@@ -757,7 +756,7 @@
     def readfp(self, fp, filename=None):
         """Deprecated, use read_file instead."""
         warnings.warn(
-            "This method will be removed in Python 3.12. "
+            "This method will be removed in future versions.  "
             "Use 'parser.read_file()' instead.",
             DeprecationWarning, stacklevel=2
         )
@@ -908,9 +907,6 @@
 
         If `space_around_delimiters' is True (the default), delimiters
         between keys and values are surrounded by spaces.
-
-        Please note that comments in the original configuration file are not
-        preserved when writing the configuration back.
         """
         if space_around_delimiters:
             d = " {} ".format(self._delimiters[0])
@@ -1009,7 +1005,7 @@
         Configuration files may include comments, prefixed by specific
         characters (`#' and `;' by default). Comments may appear on their own
         in an otherwise empty line or may be entered in lines holding values or
-        section names. Please note that comments get stripped off when reading configuration files.
+        section names.
         """
         elements_added = set()
         cursect = None                        # None, or a dictionary
@@ -1232,7 +1228,7 @@
         super().__init__(*args, **kwargs)
         warnings.warn(
             "The SafeConfigParser class has been renamed to ConfigParser "
-            "in Python 3.2. This alias will be removed in Python 3.12."
+            "in Python 3.2. This alias will be removed in future versions."
             " Use ConfigParser directly instead.",
             DeprecationWarning, stacklevel=2
         )
diff --git a/common/py3-stdlib/contextlib.py b/common/py3-stdlib/contextlib.py
index c63a849..ff92d9f 100644
--- a/common/py3-stdlib/contextlib.py
+++ b/common/py3-stdlib/contextlib.py
@@ -9,7 +9,7 @@
 __all__ = ["asynccontextmanager", "contextmanager", "closing", "nullcontext",
            "AbstractContextManager", "AbstractAsyncContextManager",
            "AsyncExitStack", "ContextDecorator", "ExitStack",
-           "redirect_stdout", "redirect_stderr", "suppress", "aclosing"]
+           "redirect_stdout", "redirect_stderr", "suppress"]
 
 
 class AbstractContextManager(abc.ABC):
@@ -80,22 +80,6 @@
         return inner
 
 
-class AsyncContextDecorator(object):
-    "A base class or mixin that enables async context managers to work as decorators."
-
-    def _recreate_cm(self):
-        """Return a recreated instance of self.
-        """
-        return self
-
-    def __call__(self, func):
-        @wraps(func)
-        async def inner(*args, **kwds):
-            async with self._recreate_cm():
-                return await func(*args, **kwds)
-        return inner
-
-
 class _GeneratorContextManagerBase:
     """Shared functionality for @contextmanager and @asynccontextmanager."""
 
@@ -113,20 +97,18 @@
         # for the class instead.
         # See http://bugs.python.org/issue19404 for more details.
 
+
+class _GeneratorContextManager(_GeneratorContextManagerBase,
+                               AbstractContextManager,
+                               ContextDecorator):
+    """Helper for @contextmanager decorator."""
+
     def _recreate_cm(self):
-        # _GCMB instances are one-shot context managers, so the
+        # _GCM instances are one-shot context managers, so the
         # CM must be recreated each time a decorated function is
         # called
         return self.__class__(self.func, self.args, self.kwds)
 
-
-class _GeneratorContextManager(
-    _GeneratorContextManagerBase,
-    AbstractContextManager,
-    ContextDecorator,
-):
-    """Helper for @contextmanager decorator."""
-
     def __enter__(self):
         # do not keep args and kwds alive unnecessarily
         # they are only needed for recreation, which is not possible anymore
@@ -136,8 +118,8 @@
         except StopIteration:
             raise RuntimeError("generator didn't yield") from None
 
-    def __exit__(self, typ, value, traceback):
-        if typ is None:
+    def __exit__(self, type, value, traceback):
+        if type is None:
             try:
                 next(self.gen)
             except StopIteration:
@@ -148,9 +130,9 @@
             if value is None:
                 # Need to force instantiation so we can reliably
                 # tell if we get the same exception back
-                value = typ()
+                value = type()
             try:
-                self.gen.throw(typ, value, traceback)
+                self.gen.throw(type, value, traceback)
             except StopIteration as exc:
                 # Suppress StopIteration *unless* it's the same exception that
                 # was passed to throw().  This prevents a StopIteration
@@ -160,93 +142,74 @@
                 # Don't re-raise the passed in exception. (issue27122)
                 if exc is value:
                     return False
-                # Avoid suppressing if a StopIteration exception
+                # Likewise, avoid suppressing if a StopIteration exception
                 # was passed to throw() and later wrapped into a RuntimeError
-                # (see PEP 479 for sync generators; async generators also
-                # have this behavior). But do this only if the exception wrapped
-                # by the RuntimeError is actually Stop(Async)Iteration (see
-                # issue29692).
-                if (
-                    isinstance(value, StopIteration)
-                    and exc.__cause__ is value
-                ):
+                # (see PEP 479).
+                if type is StopIteration and exc.__cause__ is value:
                     return False
                 raise
-            except BaseException as exc:
+            except:
                 # only re-raise if it's *not* the exception that was
                 # passed to throw(), because __exit__() must not raise
                 # an exception unless __exit__() itself failed.  But throw()
                 # has to raise the exception to signal propagation, so this
                 # fixes the impedance mismatch between the throw() protocol
                 # and the __exit__() protocol.
-                if exc is not value:
-                    raise
-                return False
+                #
+                # This cannot use 'except BaseException as exc' (as in the
+                # async implementation) to maintain compatibility with
+                # Python 2, where old-style class exceptions are not caught
+                # by 'except BaseException'.
+                if sys.exc_info()[1] is value:
+                    return False
+                raise
             raise RuntimeError("generator didn't stop after throw()")
 
-class _AsyncGeneratorContextManager(
-    _GeneratorContextManagerBase,
-    AbstractAsyncContextManager,
-    AsyncContextDecorator,
-):
-    """Helper for @asynccontextmanager decorator."""
+
+class _AsyncGeneratorContextManager(_GeneratorContextManagerBase,
+                                    AbstractAsyncContextManager):
+    """Helper for @asynccontextmanager."""
 
     async def __aenter__(self):
-        # do not keep args and kwds alive unnecessarily
-        # they are only needed for recreation, which is not possible anymore
-        del self.args, self.kwds, self.func
         try:
-            return await anext(self.gen)
+            return await self.gen.__anext__()
         except StopAsyncIteration:
             raise RuntimeError("generator didn't yield") from None
 
     async def __aexit__(self, typ, value, traceback):
         if typ is None:
             try:
-                await anext(self.gen)
+                await self.gen.__anext__()
             except StopAsyncIteration:
-                return False
+                return
             else:
                 raise RuntimeError("generator didn't stop")
         else:
             if value is None:
-                # Need to force instantiation so we can reliably
-                # tell if we get the same exception back
                 value = typ()
+            # See _GeneratorContextManager.__exit__ for comments on subtleties
+            # in this implementation
             try:
                 await self.gen.athrow(typ, value, traceback)
+                raise RuntimeError("generator didn't stop after athrow()")
             except StopAsyncIteration as exc:
-                # Suppress StopIteration *unless* it's the same exception that
-                # was passed to throw().  This prevents a StopIteration
-                # raised inside the "with" statement from being suppressed.
                 return exc is not value
             except RuntimeError as exc:
-                # Don't re-raise the passed in exception. (issue27122)
                 if exc is value:
                     return False
-                # Avoid suppressing if a Stop(Async)Iteration exception
-                # was passed to athrow() and later wrapped into a RuntimeError
+                # Avoid suppressing if a StopIteration exception
+                # was passed to throw() and later wrapped into a RuntimeError
                 # (see PEP 479 for sync generators; async generators also
                 # have this behavior). But do this only if the exception wrapped
                 # by the RuntimeError is actully Stop(Async)Iteration (see
                 # issue29692).
-                if (
-                    isinstance(value, (StopIteration, StopAsyncIteration))
-                    and exc.__cause__ is value
-                ):
-                    return False
+                if isinstance(value, (StopIteration, StopAsyncIteration)):
+                    if exc.__cause__ is value:
+                        return False
                 raise
             except BaseException as exc:
-                # only re-raise if it's *not* the exception that was
-                # passed to throw(), because __exit__() must not raise
-                # an exception unless __exit__() itself failed.  But throw()
-                # has to raise the exception to signal propagation, so this
-                # fixes the impedance mismatch between the throw() protocol
-                # and the __exit__() protocol.
                 if exc is not value:
                     raise
-                return False
-            raise RuntimeError("generator didn't stop after athrow()")
 
 
 def contextmanager(func):
@@ -340,32 +303,6 @@
         self.thing.close()
 
 
-class aclosing(AbstractAsyncContextManager):
-    """Async context manager for safely finalizing an asynchronously cleaned-up
-    resource such as an async generator, calling its ``aclose()`` method.
-
-    Code like this:
-
-        async with aclosing(<module>.fetch(<arguments>)) as agen:
-            <block>
-
-    is equivalent to this:
-
-        agen = <module>.fetch(<arguments>)
-        try:
-            <block>
-        finally:
-            await agen.aclose()
-
-    """
-    def __init__(self, thing):
-        self.thing = thing
-    async def __aenter__(self):
-        return self.thing
-    async def __aexit__(self, *exc_info):
-        await self.thing.aclose()
-
-
 class _RedirectStream(AbstractContextManager):
 
     _stream = None
@@ -540,10 +477,10 @@
             # Context may not be correct, so find the end of the chain
             while 1:
                 exc_context = new_exc.__context__
-                if exc_context is None or exc_context is old_exc:
+                if exc_context is old_exc:
                     # Context is already set correctly (see issue 20317)
                     return
-                if exc_context is frame_exc:
+                if exc_context is None or exc_context is frame_exc:
                     break
                 new_exc = exc_context
             # Change the end of the chain to point to the exception
@@ -674,10 +611,10 @@
             # Context may not be correct, so find the end of the chain
             while 1:
                 exc_context = new_exc.__context__
-                if exc_context is None or exc_context is old_exc:
+                if exc_context is old_exc:
                     # Context is already set correctly (see issue 20317)
                     return
-                if exc_context is frame_exc:
+                if exc_context is None or exc_context is frame_exc:
                     break
                 new_exc = exc_context
             # Change the end of the chain to point to the exception
@@ -718,7 +655,7 @@
         return received_exc and suppressed_exc
 
 
-class nullcontext(AbstractContextManager, AbstractAsyncContextManager):
+class nullcontext(AbstractContextManager):
     """Context manager that does no additional processing.
 
     Used as a stand-in for a normal context manager, when a particular
@@ -737,9 +674,3 @@
 
     def __exit__(self, *excinfo):
         pass
-
-    async def __aenter__(self):
-        return self.enter_result
-
-    async def __aexit__(self, *excinfo):
-        pass
diff --git a/common/py3-stdlib/copy.py b/common/py3-stdlib/copy.py
index 69bac98..41873f2 100644
--- a/common/py3-stdlib/copy.py
+++ b/common/py3-stdlib/copy.py
@@ -39,8 +39,8 @@
     set of components copied
 
 This version does not copy types like module, class, function, method,
-nor stack trace, stack frame, nor file, socket, window, nor any
-similar types.
+nor stack trace, stack frame, nor file, socket, window, nor array, nor
+any similar types.
 
 Classes can use the same interfaces to control copying that they use
 to control pickling: they can define methods called __getinitargs__(),
@@ -192,7 +192,6 @@
 d[str] = _deepcopy_atomic
 d[types.CodeType] = _deepcopy_atomic
 d[type] = _deepcopy_atomic
-d[range] = _deepcopy_atomic
 d[types.BuiltinFunctionType] = _deepcopy_atomic
 d[types.FunctionType] = _deepcopy_atomic
 d[weakref.ref] = _deepcopy_atomic
diff --git a/common/py3-stdlib/copyreg.py b/common/py3-stdlib/copyreg.py
index 356db6f..7ab8c12 100644
--- a/common/py3-stdlib/copyreg.py
+++ b/common/py3-stdlib/copyreg.py
@@ -36,12 +36,6 @@
 
     pickle(complex, pickle_complex, complex)
 
-def pickle_union(obj):
-    import functools, operator
-    return functools.reduce, (operator.or_, obj.__args__)
-
-pickle(type(int | str), pickle_union)
-
 # Support for pickling new-style objects
 
 def _reconstructor(cls, base, state):
diff --git a/common/py3-stdlib/csv.py b/common/py3-stdlib/csv.py
index bb3ee26..dc85077 100644
--- a/common/py3-stdlib/csv.py
+++ b/common/py3-stdlib/csv.py
@@ -409,10 +409,14 @@
                 continue # skip rows that have irregular number of columns
 
             for col in list(columnTypes.keys()):
-                thisType = complex
-                try:
-                    thisType(row[col])
-                except (ValueError, OverflowError):
+
+                for thisType in [int, float, complex]:
+                    try:
+                        thisType(row[col])
+                        break
+                    except (ValueError, OverflowError):
+                        pass
+                else:
                     # fallback to length of string
                     thisType = len(row[col])
 
diff --git a/common/py3-stdlib/ctypes/_aix.py b/common/py3-stdlib/ctypes/_aix.py
index fc3e95c..190cac6 100644
--- a/common/py3-stdlib/ctypes/_aix.py
+++ b/common/py3-stdlib/ctypes/_aix.py
@@ -163,7 +163,7 @@
             return member
     else:
         # 32-bit legacy names - both shr.o and shr4.o exist.
-        # shr.o is the preferred name so we look for shr.o first
+        # shr.o is the preffered name so we look for shr.o first
         #  i.e., shr4.o is returned only when shr.o does not exist
         for name in ['shr.o', 'shr4.o']:
             member = get_one_match(re.escape(name), members)
@@ -282,7 +282,7 @@
         if path.exists(archive):
             members = get_shared(get_ld_headers(archive))
             member = get_member(re.escape(name), members)
-            if member is not None:
+            if member != None:
                 return (base, member)
             else:
                 return (None, None)
@@ -307,7 +307,7 @@
 
     libpaths = get_libpaths()
     (base, member) = find_shared(libpaths, name)
-    if base is not None:
+    if base != None:
         return f"{base}({member})"
 
     # To get here, a member in an archive has not been found
diff --git a/common/py3-stdlib/ctypes/test/__init__.py b/common/py3-stdlib/ctypes/test/__init__.py
index 6e496fa..26a70b7 100644
--- a/common/py3-stdlib/ctypes/test/__init__.py
+++ b/common/py3-stdlib/ctypes/test/__init__.py
@@ -1,11 +1,9 @@
 import os
 import unittest
 from test import support
-from test.support import import_helper
-
 
 # skip tests if _ctypes was not built
-ctypes = import_helper.import_module('ctypes')
+ctypes = support.import_module('ctypes')
 ctypes_symbols = dir(ctypes)
 
 def need_symbol(name):
diff --git a/common/py3-stdlib/ctypes/test/test_bitfields.py b/common/py3-stdlib/ctypes/test/test_bitfields.py
index 66acd62..992b8c4 100644
--- a/common/py3-stdlib/ctypes/test/test_bitfields.py
+++ b/common/py3-stdlib/ctypes/test/test_bitfields.py
@@ -1,6 +1,5 @@
 from ctypes import *
 from ctypes.test import need_symbol
-from test import support
 import unittest
 import os
 
@@ -40,8 +39,6 @@
                 setattr(b, name, i)
                 self.assertEqual(getattr(b, name), func(byref(b), name.encode('ascii')))
 
-    # bpo-46913: _ctypes/cfield.c h_get() has an undefined behavior
-    @support.skip_if_sanitizer(ub=True)
     def test_shorts(self):
         b = BITS()
         name = "M"
diff --git a/common/py3-stdlib/ctypes/test/test_find.py b/common/py3-stdlib/ctypes/test/test_find.py
index 1ff9d01..92ac184 100644
--- a/common/py3-stdlib/ctypes/test/test_find.py
+++ b/common/py3-stdlib/ctypes/test/test_find.py
@@ -3,7 +3,6 @@
 import os.path
 import sys
 import test.support
-from test.support import os_helper
 from ctypes import *
 from ctypes.util import find_library
 
@@ -67,8 +66,8 @@
         self.gle.gleGetJoinStyle
 
     def test_shell_injection(self):
-        result = find_library('; echo Hello shell > ' + os_helper.TESTFN)
-        self.assertFalse(os.path.lexists(os_helper.TESTFN))
+        result = find_library('; echo Hello shell > ' + test.support.TESTFN)
+        self.assertFalse(os.path.lexists(test.support.TESTFN))
         self.assertIsNone(result)
 
 
@@ -90,7 +89,7 @@
             srcname = os.path.join(d, 'dummy.c')
             libname = 'py_ctypes_test_dummy'
             dstname = os.path.join(d, 'lib%s.so' % libname)
-            with open(srcname, 'wb') as f:
+            with open(srcname, 'w') as f:
                 pass
             self.assertTrue(os.path.exists(srcname))
             # compile the file to a shared library
@@ -102,7 +101,7 @@
             # LD_LIBRARY_PATH)
             self.assertIsNone(find_library(libname))
             # now add the location to LD_LIBRARY_PATH
-            with os_helper.EnvironmentVarGuard() as env:
+            with test.support.EnvironmentVarGuard() as env:
                 KEY = 'LD_LIBRARY_PATH'
                 if KEY not in env:
                     v = d
diff --git a/common/py3-stdlib/ctypes/test/test_functions.py b/common/py3-stdlib/ctypes/test/test_functions.py
index bdb044e..7562892 100644
--- a/common/py3-stdlib/ctypes/test/test_functions.py
+++ b/common/py3-stdlib/ctypes/test/test_functions.py
@@ -35,24 +35,34 @@
         # wasn't checked, and it even crashed Python.
         # Found by Greg Chapman.
 
-        with self.assertRaises(TypeError):
+        try:
             class X(object, Array):
                 _length_ = 5
                 _type_ = "i"
+        except TypeError:
+            pass
+
 
         from _ctypes import _Pointer
-        with self.assertRaises(TypeError):
+        try:
             class X(object, _Pointer):
                 pass
+        except TypeError:
+            pass
 
         from _ctypes import _SimpleCData
-        with self.assertRaises(TypeError):
+        try:
             class X(object, _SimpleCData):
                 _type_ = "i"
+        except TypeError:
+            pass
 
-        with self.assertRaises(TypeError):
+        try:
             class X(object, Structure):
                 _fields_ = []
+        except TypeError:
+            pass
+
 
     @need_symbol('c_wchar')
     def test_wchar_parm(self):
@@ -379,7 +389,7 @@
                 (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
 
     def test_sf1651235(self):
-        # see https://www.python.org/sf/1651235
+        # see http://www.python.org/sf/1651235
 
         proto = CFUNCTYPE(c_int, RECT, POINT)
         def callback(*args):
diff --git a/common/py3-stdlib/ctypes/test/test_loading.py b/common/py3-stdlib/ctypes/test/test_loading.py
index ea89227..ba655bc 100644
--- a/common/py3-stdlib/ctypes/test/test_loading.py
+++ b/common/py3-stdlib/ctypes/test/test_loading.py
@@ -5,8 +5,6 @@
 import sys
 import unittest
 import test.support
-from test.support import import_helper
-from test.support import os_helper
 from ctypes.util import find_library
 
 libc_name = None
@@ -93,7 +91,7 @@
         # NOT fit into a 32-bit integer.  FreeLibrary must be able
         # to accept this address.
 
-        # These are tests for https://www.python.org/sf/1703286
+        # These are tests for http://www.python.org/sf/1703286
         handle = LoadLibrary("advapi32")
         FreeLibrary(handle)
 
@@ -119,14 +117,14 @@
     @unittest.skipUnless(os.name == "nt",
                          'test specific to Windows')
     def test_load_dll_with_flags(self):
-        _sqlite3 = import_helper.import_module("_sqlite3")
+        _sqlite3 = test.support.import_module("_sqlite3")
         src = _sqlite3.__file__
         if src.lower().endswith("_d.pyd"):
             ext = "_d.dll"
         else:
             ext = ".dll"
 
-        with os_helper.temp_dir() as tmp:
+        with test.support.temp_dir() as tmp:
             # We copy two files and load _sqlite3.dll (formerly .pyd),
             # which has a dependency on sqlite3.dll. Then we test
             # loading it in subprocesses to avoid it starting in memory
diff --git a/common/py3-stdlib/ctypes/test/test_numbers.py b/common/py3-stdlib/ctypes/test/test_numbers.py
index db500e8..c6d843b 100644
--- a/common/py3-stdlib/ctypes/test/test_numbers.py
+++ b/common/py3-stdlib/ctypes/test/test_numbers.py
@@ -134,7 +134,8 @@
         for t in signed_types + unsigned_types:
             self.assertRaises(TypeError, t, 3.14)
             self.assertRaises(TypeError, t, f)
-            self.assertRaises(TypeError, t, d)
+            with self.assertWarns(DeprecationWarning):
+                self.assertEqual(t(d).value, 2)
             self.assertEqual(t(i).value, 2)
 
     def test_sizes(self):
diff --git a/common/py3-stdlib/ctypes/test/test_parameters.py b/common/py3-stdlib/ctypes/test/test_parameters.py
index 38af7ac..e4c25fd 100644
--- a/common/py3-stdlib/ctypes/test/test_parameters.py
+++ b/common/py3-stdlib/ctypes/test/test_parameters.py
@@ -201,49 +201,6 @@
         with self.assertRaises(ZeroDivisionError):
             WorseStruct().__setstate__({}, b'foo')
 
-    def test_parameter_repr(self):
-        from ctypes import (
-            c_bool,
-            c_char,
-            c_wchar,
-            c_byte,
-            c_ubyte,
-            c_short,
-            c_ushort,
-            c_int,
-            c_uint,
-            c_long,
-            c_ulong,
-            c_longlong,
-            c_ulonglong,
-            c_float,
-            c_double,
-            c_longdouble,
-            c_char_p,
-            c_wchar_p,
-            c_void_p,
-        )
-        self.assertRegex(repr(c_bool.from_param(True)), r"^<cparam '\?' at 0x[A-Fa-f0-9]+>$")
-        self.assertEqual(repr(c_char.from_param(97)), "<cparam 'c' ('a')>")
-        self.assertRegex(repr(c_wchar.from_param('a')), r"^<cparam 'u' at 0x[A-Fa-f0-9]+>$")
-        self.assertEqual(repr(c_byte.from_param(98)), "<cparam 'b' (98)>")
-        self.assertEqual(repr(c_ubyte.from_param(98)), "<cparam 'B' (98)>")
-        self.assertEqual(repr(c_short.from_param(511)), "<cparam 'h' (511)>")
-        self.assertEqual(repr(c_ushort.from_param(511)), "<cparam 'H' (511)>")
-        self.assertRegex(repr(c_int.from_param(20000)), r"^<cparam '[li]' \(20000\)>$")
-        self.assertRegex(repr(c_uint.from_param(20000)), r"^<cparam '[LI]' \(20000\)>$")
-        self.assertRegex(repr(c_long.from_param(20000)), r"^<cparam '[li]' \(20000\)>$")
-        self.assertRegex(repr(c_ulong.from_param(20000)), r"^<cparam '[LI]' \(20000\)>$")
-        self.assertRegex(repr(c_longlong.from_param(20000)), r"^<cparam '[liq]' \(20000\)>$")
-        self.assertRegex(repr(c_ulonglong.from_param(20000)), r"^<cparam '[LIQ]' \(20000\)>$")
-        self.assertEqual(repr(c_float.from_param(1.5)), "<cparam 'f' (1.5)>")
-        self.assertEqual(repr(c_double.from_param(1.5)), "<cparam 'd' (1.5)>")
-        self.assertEqual(repr(c_double.from_param(1e300)), "<cparam 'd' (1e+300)>")
-        self.assertRegex(repr(c_longdouble.from_param(1.5)), r"^<cparam ('d' \(1.5\)|'g' at 0x[A-Fa-f0-9]+)>$")
-        self.assertRegex(repr(c_char_p.from_param(b'hihi')), r"^<cparam 'z' \(0x[A-Fa-f0-9]+\)>$")
-        self.assertRegex(repr(c_wchar_p.from_param('hihi')), r"^<cparam 'Z' \(0x[A-Fa-f0-9]+\)>$")
-        self.assertRegex(repr(c_void_p.from_param(0x12)), r"^<cparam 'P' \(0x0*12\)>$")
-
 ################################################################
 
 if __name__ == '__main__':
diff --git a/common/py3-stdlib/ctypes/test/test_python_api.py b/common/py3-stdlib/ctypes/test/test_python_api.py
index 49571f9..9c13746 100644
--- a/common/py3-stdlib/ctypes/test/test_python_api.py
+++ b/common/py3-stdlib/ctypes/test/test_python_api.py
@@ -1,5 +1,5 @@
 from ctypes import *
-import unittest
+import unittest, sys
 from test import support
 
 ################################################################
@@ -10,6 +10,10 @@
 ################################################################
 
 from sys import getrefcount as grc
+if sys.version_info > (2, 4):
+    c_py_ssize_t = c_size_t
+else:
+    c_py_ssize_t = c_int
 
 class PythonAPITestCase(unittest.TestCase):
 
@@ -17,7 +21,7 @@
         PyBytes_FromStringAndSize = pythonapi.PyBytes_FromStringAndSize
 
         PyBytes_FromStringAndSize.restype = py_object
-        PyBytes_FromStringAndSize.argtypes = c_char_p, c_size_t
+        PyBytes_FromStringAndSize.argtypes = c_char_p, c_py_ssize_t
 
         self.assertEqual(PyBytes_FromStringAndSize(b"abcdefghi", 3), b"abc")
 
diff --git a/common/py3-stdlib/ctypes/test/test_struct_fields.py b/common/py3-stdlib/ctypes/test/test_struct_fields.py
index ee8415f..8045cc8 100644
--- a/common/py3-stdlib/ctypes/test/test_struct_fields.py
+++ b/common/py3-stdlib/ctypes/test/test_struct_fields.py
@@ -46,14 +46,6 @@
         Y._fields_ = []
         self.assertRaises(AttributeError, setattr, X, "_fields_", [])
 
-    def test_5(self):
-        class X(Structure):
-            _fields_ = (("char", c_char * 5),)
-
-        x = X(b'#' * 5)
-        x.char = b'a\0b\0'
-        self.assertEqual(bytes(x), b'a\x00###')
-
     # __set__ and __get__ should raise a TypeError in case their self
     # argument is not a ctype instance.
     def test___set__(self):
diff --git a/common/py3-stdlib/ctypes/test/test_structures.py b/common/py3-stdlib/ctypes/test/test_structures.py
index 97ad2b8..245cd94 100644
--- a/common/py3-stdlib/ctypes/test/test_structures.py
+++ b/common/py3-stdlib/ctypes/test/test_structures.py
@@ -443,7 +443,7 @@
 
         s = Test(1, 2, 3)
         # Test the StructUnionType_paramfunc() code path which copies the
-        # structure: if the structure is larger than sizeof(void*).
+        # structure: if the stucture is larger than sizeof(void*).
         self.assertGreater(sizeof(s), sizeof(c_void_p))
 
         dll = CDLL(_ctypes_test.__file__)
@@ -451,7 +451,7 @@
         func.argtypes = (Test,)
         func.restype = None
         func(s)
-        # bpo-37140: Passing the structure by reference must not call
+        # bpo-37140: Passing the structure by refrence must not call
         # its finalizer!
         self.assertEqual(finalizer_calls, [])
         self.assertEqual(s.first, 1)
diff --git a/common/py3-stdlib/ctypes/test/test_unicode.py b/common/py3-stdlib/ctypes/test/test_unicode.py
index 60c7542..c200af7 100644
--- a/common/py3-stdlib/ctypes/test/test_unicode.py
+++ b/common/py3-stdlib/ctypes/test/test_unicode.py
@@ -26,14 +26,6 @@
         self.assertEqual(buf[::2], 'a\xe4\xfc')
         self.assertEqual(buf[6:5:-1], "")
 
-    def test_embedded_null(self):
-        class TestStruct(ctypes.Structure):
-            _fields_ = [("unicode", ctypes.c_wchar_p)]
-        t = TestStruct()
-        # This would raise a ValueError:
-        t.unicode = "foo\0bar\0\0"
-
-
 func = ctypes.CDLL(_ctypes_test.__file__)._testfunc_p_p
 
 class StringTestCase(UnicodeTestCase):
diff --git a/common/py3-stdlib/ctypes/test/test_values.py b/common/py3-stdlib/ctypes/test/test_values.py
index 7514fe8..87eb919 100644
--- a/common/py3-stdlib/ctypes/test/test_values.py
+++ b/common/py3-stdlib/ctypes/test/test_values.py
@@ -80,9 +80,9 @@
                 continue
             items.append((entry.name.decode("ascii"), entry.size))
 
-        expected = [("__hello__", 137),
-                    ("__phello__", -137),
-                    ("__phello__.spam", 137),
+        expected = [("__hello__", 141),
+                    ("__phello__", -141),
+                    ("__phello__.spam", 141),
                     ]
         self.assertEqual(items, expected, "PyImport_FrozenModules example "
             "in Doc/library/ctypes.rst may be out of date")
diff --git a/common/py3-stdlib/dataclasses.py b/common/py3-stdlib/dataclasses.py
index 105a95b..530d3e9 100644
--- a/common/py3-stdlib/dataclasses.py
+++ b/common/py3-stdlib/dataclasses.py
@@ -6,9 +6,8 @@
 import keyword
 import builtins
 import functools
-import abc
 import _thread
-from types import FunctionType, GenericAlias
+from types import GenericAlias
 
 
 __all__ = ['dataclass',
@@ -16,7 +15,6 @@
            'Field',
            'FrozenInstanceError',
            'InitVar',
-           'KW_ONLY',
            'MISSING',
 
            # Helper functions.
@@ -153,20 +151,6 @@
 #
 # See _hash_action (below) for a coded version of this table.
 
-# __match_args__
-#
-#    +--- match_args= parameter
-#    |
-#    v    |       |       |
-#         |  no   |  yes  |  <--- class has __match_args__ in __dict__?
-# +=======+=======+=======+
-# | False |       |       |
-# +-------+-------+-------+
-# | True  | add   |       |  <- the default
-# +=======+=======+=======+
-# __match_args__ is always added unless the class already defines it. It is a
-# tuple of __init__ parameter names; non-init fields must be matched by keyword.
-
 
 # Raised when an attempt is made to modify a frozen class.
 class FrozenInstanceError(AttributeError): pass
@@ -185,12 +169,6 @@
     pass
 MISSING = _MISSING_TYPE()
 
-# A sentinel object to indicate that following fields are keyword-only by
-# default.  Use a class to give it a better repr.
-class _KW_ONLY_TYPE:
-    pass
-KW_ONLY = _KW_ONLY_TYPE()
-
 # Since most per-field metadata will be unused, create an empty
 # read-only proxy that can be shared among all fields.
 _EMPTY_METADATA = types.MappingProxyType({})
@@ -229,7 +207,7 @@
         self.type = type
 
     def __repr__(self):
-        if isinstance(self.type, type) and not isinstance(self.type, GenericAlias):
+        if isinstance(self.type, type):
             type_name = self.type.__name__
         else:
             # typing objects, e.g. List[int]
@@ -239,6 +217,7 @@
     def __class_getitem__(cls, type):
         return InitVar(type)
 
+
 # Instances of Field are only ever created from within this module,
 # and only from the field() function, although Field instances are
 # exposed externally as (conceptually) read-only objects.
@@ -259,12 +238,11 @@
                  'init',
                  'compare',
                  'metadata',
-                 'kw_only',
                  '_field_type',  # Private: not to be used by user code.
                  )
 
     def __init__(self, default, default_factory, init, repr, hash, compare,
-                 metadata, kw_only):
+                 metadata):
         self.name = None
         self.type = None
         self.default = default
@@ -276,7 +254,6 @@
         self.metadata = (_EMPTY_METADATA
                          if metadata is None else
                          types.MappingProxyType(metadata))
-        self.kw_only = kw_only
         self._field_type = None
 
     def __repr__(self):
@@ -290,7 +267,6 @@
                 f'hash={self.hash!r},'
                 f'compare={self.compare!r},'
                 f'metadata={self.metadata!r},'
-                f'kw_only={self.kw_only!r},'
                 f'_field_type={self._field_type}'
                 ')')
 
@@ -344,19 +320,17 @@
 # so that a type checker can be told (via overloads) that this is a
 # function whose type depends on its parameters.
 def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True,
-          hash=None, compare=True, metadata=None, kw_only=MISSING):
+          hash=None, compare=True, metadata=None):
     """Return an object to identify dataclass fields.
 
     default is the default value of the field.  default_factory is a
     0-argument function called to initialize a field's value.  If init
-    is true, the field will be a parameter to the class's __init__()
-    function.  If repr is true, the field will be included in the
-    object's repr().  If hash is true, the field will be included in the
-    object's hash().  If compare is true, the field will be used in
-    comparison functions.  metadata, if specified, must be a mapping
-    which is stored but not otherwise examined by dataclass.  If kw_only
-    is true, the field will become a keyword-only parameter to
-    __init__().
+    is True, the field will be a parameter to the class's __init__()
+    function.  If repr is True, the field will be included in the
+    object's repr().  If hash is True, the field will be included in
+    the object's hash().  If compare is True, the field will be used
+    in comparison functions.  metadata, if specified, must be a
+    mapping which is stored but not otherwise examined by dataclass.
 
     It is an error to specify both default and default_factory.
     """
@@ -364,16 +338,7 @@
     if default is not MISSING and default_factory is not MISSING:
         raise ValueError('cannot specify both default and default_factory')
     return Field(default, default_factory, init, repr, hash, compare,
-                 metadata, kw_only)
-
-
-def _fields_in_init_order(fields):
-    # Returns the fields as __init__ will output them.  It returns 2 tuples:
-    # the first for normal args, and the second for keyword args.
-
-    return (tuple(f for f in fields if f.init and not f.kw_only),
-            tuple(f for f in fields if f.init and f.kw_only)
-            )
+                 metadata)
 
 
 def _tuple_str(obj_name, fields):
@@ -430,6 +395,7 @@
 
     local_vars = ', '.join(locals.keys())
     txt = f"def __create_fn__({local_vars}):\n{txt}\n return {name}"
+
     ns = {}
     exec(txt, globals, ns)
     return ns['__create_fn__'](**locals)
@@ -447,7 +413,7 @@
     return f'{self_name}.{name}={value}'
 
 
-def _field_init(f, frozen, globals, self_name, slots):
+def _field_init(f, frozen, globals, self_name):
     # Return the text of the line in the body of __init__ that will
     # initialize this field.
 
@@ -487,15 +453,9 @@
                 globals[default_name] = f.default
                 value = f.name
         else:
-            # If the class has slots, then initialize this field.
-            if slots and f.default is not MISSING:
-                globals[default_name] = f.default
-                value = default_name
-            else:
-                # This field does not need initialization: reading from it will
-                # just use the class attribute that contains the default.
-                # Signify that to the caller by returning None.
-                return None
+            # This field does not need initialization.  Signify that
+            # to the caller by returning None.
+            return None
 
     # Only test this now, so that we can create variables for the
     # default.  However, return None to signify that we're not going
@@ -526,8 +486,7 @@
     return f'{f.name}:_type_{f.name}{default}'
 
 
-def _init_fn(fields, std_fields, kw_only_fields, frozen, has_post_init,
-             self_name, globals, slots):
+def _init_fn(fields, frozen, has_post_init, self_name, globals):
     # fields contains both real fields and InitVar pseudo-fields.
 
     # Make sure we don't have fields without defaults following fields
@@ -535,10 +494,9 @@
     # function source code, but catching it here gives a better error
     # message, and future-proofs us in case we build up the function
     # using ast.
-
     seen_default = False
-    for f in std_fields:
-        # Only consider the non-kw-only fields in the __init__ call.
+    for f in fields:
+        # Only consider fields in the __init__ call.
         if f.init:
             if not (f.default is MISSING and f.default_factory is MISSING):
                 seen_default = True
@@ -554,7 +512,7 @@
 
     body_lines = []
     for f in fields:
-        line = _field_init(f, frozen, locals, self_name, slots)
+        line = _field_init(f, frozen, locals, self_name)
         # line is None means that this field doesn't require
         # initialization (it's a pseudo-field).  Just skip it.
         if line:
@@ -570,15 +528,8 @@
     if not body_lines:
         body_lines = ['pass']
 
-    _init_params = [_init_param(f) for f in std_fields]
-    if kw_only_fields:
-        # Add the keyword-only args.  Because the * can only be added if
-        # there's at least one keyword-only arg, there needs to be a test here
-        # (instead of just concatenting the lists together).
-        _init_params += ['*']
-        _init_params += [_init_param(f) for f in kw_only_fields]
     return _create_fn('__init__',
-                      [self_name] + _init_params,
+                      [self_name] + [_init_param(f) for f in fields if f.init],
                       body_lines,
                       locals=locals,
                       globals=globals,
@@ -657,9 +608,6 @@
     return (a_type is dataclasses.InitVar
             or type(a_type) is dataclasses.InitVar)
 
-def _is_kw_only(a_type, dataclasses):
-    return a_type is dataclasses.KW_ONLY
-
 
 def _is_type(annotation, cls, a_module, a_type, is_type_predicate):
     # Given a type annotation string, does it refer to a_type in
@@ -720,11 +668,10 @@
     return False
 
 
-def _get_field(cls, a_name, a_type, default_kw_only):
-    # Return a Field object for this field name and type.  ClassVars and
-    # InitVars are also returned, but marked as such (see f._field_type).
-    # default_kw_only is the value of kw_only to use if there isn't a field()
-    # that defines it.
+def _get_field(cls, a_name, a_type):
+    # Return a Field object for this field name and type.  ClassVars
+    # and InitVars are also returned, but marked as such (see
+    # f._field_type).
 
     # If the default value isn't derived from Field, then it's only a
     # normal default value.  Convert it to a Field().
@@ -749,7 +696,7 @@
     # In addition to checking for actual types here, also check for
     # string annotations.  get_type_hints() won't always work for us
     # (see https://github.com/python/typing/issues/508 for example),
-    # plus it's expensive and would require an eval for every string
+    # plus it's expensive and would require an eval for every stirng
     # annotation.  So, make a best effort to see if this is a ClassVar
     # or InitVar using regex's and checking that the thing referenced
     # is actually of the correct type.
@@ -795,19 +742,6 @@
         # init=<not-the-default-init-value>)?  It makes no sense for
         # ClassVar and InitVar to specify init=<anything>.
 
-    # kw_only validation and assignment.
-    if f._field_type in (_FIELD, _FIELD_INITVAR):
-        # For real and InitVar fields, if kw_only wasn't specified use the
-        # default value.
-        if f.kw_only is MISSING:
-            f.kw_only = default_kw_only
-    else:
-        # Make sure kw_only isn't set for ClassVars
-        assert f._field_type is _FIELD_CLASSVAR
-        if f.kw_only is not MISSING:
-            raise TypeError(f'field {f.name} is a ClassVar but specifies '
-                            'kw_only')
-
     # For real fields, disallow mutable defaults for known types.
     if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
         raise ValueError(f'mutable default {type(f.default)} for field '
@@ -815,19 +749,12 @@
 
     return f
 
-def _set_qualname(cls, value):
-    # Ensure that the functions returned from _create_fn uses the proper
-    # __qualname__ (the class they belong to).
-    if isinstance(value, FunctionType):
-        value.__qualname__ = f"{cls.__qualname__}.{value.__name__}"
-    return value
 
 def _set_new_attribute(cls, name, value):
     # Never overwrites an existing attribute.  Returns True if the
     # attribute already exists.
     if name in cls.__dict__:
         return True
-    _set_qualname(cls, value)
     setattr(cls, name, value)
     return False
 
@@ -842,7 +769,7 @@
 
 def _hash_add(cls, fields, globals):
     flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]
-    return _set_qualname(cls, _hash_fn(flds, globals))
+    return _hash_fn(flds, globals)
 
 def _hash_exception(cls, fields, globals):
     # Raise an exception.
@@ -879,8 +806,7 @@
 # version of this table.
 
 
-def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen,
-                   match_args, kw_only, slots):
+def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
     # Now that dicts retain insertion order, there's no reason to use
     # an ordered dict.  I am leveraging that ordering here, because
     # derived class fields overwrite base class fields, but the order
@@ -910,7 +836,7 @@
         # Only process classes that have been processed by our
         # decorator.  That is, they have a _FIELDS attribute.
         base_fields = getattr(b, _FIELDS, None)
-        if base_fields is not None:
+        if base_fields:
             has_dataclass_bases = True
             for f in base_fields.values():
                 fields[f.name] = f
@@ -934,27 +860,8 @@
     # Now find fields in our class.  While doing so, validate some
     # things, and set the default values (as class attributes) where
     # we can.
-    cls_fields = []
-    # Get a reference to this module for the _is_kw_only() test.
-    KW_ONLY_seen = False
-    dataclasses = sys.modules[__name__]
-    for name, type in cls_annotations.items():
-        # See if this is a marker to change the value of kw_only.
-        if (_is_kw_only(type, dataclasses)
-            or (isinstance(type, str)
-                and _is_type(type, cls, dataclasses, dataclasses.KW_ONLY,
-                             _is_kw_only))):
-            # Switch the default to kw_only=True, and ignore this
-            # annotation: it's not a real field.
-            if KW_ONLY_seen:
-                raise TypeError(f'{name!r} is KW_ONLY, but KW_ONLY '
-                                'has already been specified')
-            KW_ONLY_seen = True
-            kw_only = True
-        else:
-            # Otherwise it's a field of some type.
-            cls_fields.append(_get_field(cls, name, type, kw_only))
-
+    cls_fields = [_get_field(cls, name, type)
+                  for name, type in cls_annotations.items()]
     for f in cls_fields:
         fields[f.name] = f
 
@@ -1009,22 +916,15 @@
     if order and not eq:
         raise ValueError('eq must be true if order is true')
 
-    # Include InitVars and regular fields (so, not ClassVars).  This is
-    # initialized here, outside of the "if init:" test, because std_init_fields
-    # is used with match_args, below.
-    all_init_fields = [f for f in fields.values()
-                       if f._field_type in (_FIELD, _FIELD_INITVAR)]
-    (std_init_fields,
-     kw_only_init_fields) = _fields_in_init_order(all_init_fields)
-
     if init:
         # Does this class have a post-init function?
         has_post_init = hasattr(cls, _POST_INIT_NAME)
 
+        # Include InitVars and regular fields (so, not ClassVars).
+        flds = [f for f in fields.values()
+                if f._field_type in (_FIELD, _FIELD_INITVAR)]
         _set_new_attribute(cls, '__init__',
-                           _init_fn(all_init_fields,
-                                    std_init_fields,
-                                    kw_only_init_fields,
+                           _init_fn(flds,
                                     frozen,
                                     has_post_init,
                                     # The name to use for the "self"
@@ -1033,7 +933,6 @@
                                     '__dataclass_self__' if 'self' in fields
                                             else 'self',
                                     globals,
-                                    slots,
                           ))
 
     # Get the fields as a list, and include only real fields.  This is
@@ -1045,7 +944,7 @@
         _set_new_attribute(cls, '__repr__', _repr_fn(flds, globals))
 
     if eq:
-        # Create __eq__ method.  There's no need for a __ne__ method,
+        # Create _eq__ method.  There's no need for a __ne__ method,
         # since python will call __eq__ and negate it.
         flds = [f for f in field_list if f.compare]
         self_tuple = _tuple_str('self', flds)
@@ -1093,70 +992,11 @@
         cls.__doc__ = (cls.__name__ +
                        str(inspect.signature(cls)).replace(' -> None', ''))
 
-    if match_args:
-        # I could probably compute this once
-        _set_new_attribute(cls, '__match_args__',
-                           tuple(f.name for f in std_init_fields))
-
-    if slots:
-        cls = _add_slots(cls, frozen)
-
-    abc.update_abstractmethods(cls)
-
-    return cls
-
-
-# _dataclass_getstate and _dataclass_setstate are needed for pickling frozen
-# classes with slots.  These could be slighly more performant if we generated
-# the code instead of iterating over fields.  But that can be a project for
-# another day, if performance becomes an issue.
-def _dataclass_getstate(self):
-    return [getattr(self, f.name) for f in fields(self)]
-
-
-def _dataclass_setstate(self, state):
-    for field, value in zip(fields(self), state):
-        # use setattr because dataclass may be frozen
-        object.__setattr__(self, field.name, value)
-
-
-def _add_slots(cls, is_frozen):
-    # Need to create a new class, since we can't set __slots__
-    #  after a class has been created.
-
-    # Make sure __slots__ isn't already set.
-    if '__slots__' in cls.__dict__:
-        raise TypeError(f'{cls.__name__} already specifies __slots__')
-
-    # Create a new dict for our new class.
-    cls_dict = dict(cls.__dict__)
-    field_names = tuple(f.name for f in fields(cls))
-    cls_dict['__slots__'] = field_names
-    for field_name in field_names:
-        # Remove our attributes, if present. They'll still be
-        #  available in _MARKER.
-        cls_dict.pop(field_name, None)
-
-    # Remove __dict__ itself.
-    cls_dict.pop('__dict__', None)
-
-    # And finally create the class.
-    qualname = getattr(cls, '__qualname__', None)
-    cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
-    if qualname is not None:
-        cls.__qualname__ = qualname
-
-    if is_frozen:
-        # Need this for pickling frozen classes with slots.
-        cls.__getstate__ = _dataclass_getstate
-        cls.__setstate__ = _dataclass_setstate
-
     return cls
 
 
 def dataclass(cls=None, /, *, init=True, repr=True, eq=True, order=False,
-              unsafe_hash=False, frozen=False, match_args=True,
-              kw_only=False, slots=False):
+              unsafe_hash=False, frozen=False):
     """Returns the same class as was passed in, with dunder methods
     added based on the fields defined in the class.
 
@@ -1166,15 +1006,11 @@
     repr is true, a __repr__() method is added. If order is true, rich
     comparison dunder methods are added. If unsafe_hash is true, a
     __hash__() method function is added. If frozen is true, fields may
-    not be assigned to after instance creation. If match_args is true,
-    the __match_args__ tuple is added. If kw_only is true, then by
-    default all fields are keyword-only. If slots is true, an
-    __slots__ attribute is added.
+    not be assigned to after instance creation.
     """
 
     def wrap(cls):
-        return _process_class(cls, init, repr, eq, order, unsafe_hash,
-                              frozen, match_args, kw_only, slots)
+        return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
 
     # See if we're being called as @dataclass or @dataclass().
     if cls is None:
@@ -1211,7 +1047,7 @@
 def is_dataclass(obj):
     """Returns True if obj is a dataclass or an instance of a
     dataclass."""
-    cls = obj if isinstance(obj, type) and not isinstance(obj, GenericAlias) else type(obj)
+    cls = obj if isinstance(obj, type) else type(obj)
     return hasattr(cls, _FIELDS)
 
 
@@ -1333,7 +1169,7 @@
 
 def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True,
                    repr=True, eq=True, order=False, unsafe_hash=False,
-                   frozen=False, match_args=True, kw_only=False, slots=False):
+                   frozen=False):
     """Return a new dynamically created dataclass.
 
     The dataclass name will be 'cls_name'.  'fields' is an iterable
@@ -1359,12 +1195,14 @@
 
     if namespace is None:
         namespace = {}
+    else:
+        # Copy namespace since we're going to mutate it.
+        namespace = namespace.copy()
 
     # While we're looking through the field names, validate that they
     # are identifiers, are not keywords, and not duplicates.
     seen = set()
-    annotations = {}
-    defaults = {}
+    anns = {}
     for item in fields:
         if isinstance(item, str):
             name = item
@@ -1373,7 +1211,7 @@
             name, tp, = item
         elif len(item) == 3:
             name, tp, spec = item
-            defaults[name] = spec
+            namespace[name] = spec
         else:
             raise TypeError(f'Invalid field: {item!r}')
 
@@ -1385,22 +1223,14 @@
             raise TypeError(f'Field name duplicated: {name!r}')
 
         seen.add(name)
-        annotations[name] = tp
+        anns[name] = tp
 
-    # Update 'ns' with the user-supplied namespace plus our calculated values.
-    def exec_body_callback(ns):
-        ns.update(namespace)
-        ns.update(defaults)
-        ns['__annotations__'] = annotations
-
+    namespace['__annotations__'] = anns
     # We use `types.new_class()` instead of simply `type()` to allow dynamic creation
-    # of generic dataclasses.
-    cls = types.new_class(cls_name, bases, {}, exec_body_callback)
-
-    # Apply the normal decorator.
+    # of generic dataclassses.
+    cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
     return dataclass(cls, init=init, repr=repr, eq=eq, order=order,
-                     unsafe_hash=unsafe_hash, frozen=frozen,
-                     match_args=match_args, kw_only=kw_only, slots=slots)
+                     unsafe_hash=unsafe_hash, frozen=frozen)
 
 
 def replace(obj, /, **changes):
@@ -1441,7 +1271,7 @@
             continue
 
         if f.name not in changes:
-            if f._field_type is _FIELD_INITVAR and f.default is MISSING:
+            if f._field_type is _FIELD_INITVAR:
                 raise ValueError(f"InitVar {f.name!r} "
                                  'must be specified with replace()')
             changes[f.name] = getattr(obj, f.name)
diff --git a/common/py3-stdlib/datetime.py b/common/py3-stdlib/datetime.py
index 6bf37cc..e508d99 100644
--- a/common/py3-stdlib/datetime.py
+++ b/common/py3-stdlib/datetime.py
@@ -11,7 +11,6 @@
 import time as _time
 import math as _math
 import sys
-from operator import index as _index
 
 def _cmp(x, y):
     return 0 if x == y else 1 if x > y else -1
@@ -381,10 +380,42 @@
                          "-timedelta(hours=24) and timedelta(hours=24)" %
                          (name, offset))
 
+def _check_int_field(value):
+    if isinstance(value, int):
+        return value
+    if isinstance(value, float):
+        raise TypeError('integer argument expected, got float')
+    try:
+        value = value.__index__()
+    except AttributeError:
+        pass
+    else:
+        if not isinstance(value, int):
+            raise TypeError('__index__ returned non-int (type %s)' %
+                            type(value).__name__)
+        return value
+    orig = value
+    try:
+        value = value.__int__()
+    except AttributeError:
+        pass
+    else:
+        if not isinstance(value, int):
+            raise TypeError('__int__ returned non-int (type %s)' %
+                            type(value).__name__)
+        import warnings
+        warnings.warn("an integer is required (got type %s)"  %
+                      type(orig).__name__,
+                      DeprecationWarning,
+                      stacklevel=2)
+        return value
+    raise TypeError('an integer is required (got type %s)' %
+                    type(value).__name__)
+
 def _check_date_fields(year, month, day):
-    year = _index(year)
-    month = _index(month)
-    day = _index(day)
+    year = _check_int_field(year)
+    month = _check_int_field(month)
+    day = _check_int_field(day)
     if not MINYEAR <= year <= MAXYEAR:
         raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
     if not 1 <= month <= 12:
@@ -395,10 +426,10 @@
     return year, month, day
 
 def _check_time_fields(hour, minute, second, microsecond, fold):
-    hour = _index(hour)
-    minute = _index(minute)
-    second = _index(second)
-    microsecond = _index(microsecond)
+    hour = _check_int_field(hour)
+    minute = _check_int_field(minute)
+    second = _check_int_field(second)
+    microsecond = _check_int_field(microsecond)
     if not 0 <= hour <= 23:
         raise ValueError('hour must be in 0..23', hour)
     if not 0 <= minute <= 59:
@@ -2327,7 +2358,7 @@
 #    This is again a requirement for a sane tzinfo class.
 #
 # 4. (x+k).s = x.s
-#    This follows from #2, and that datetime.timetz+timedelta preserves tzinfo.
+#    This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
 #
 # 5. (x+k).n = x.n + k
 #    Again follows from how arithmetic is defined.
@@ -2510,10 +2541,10 @@
     # Clean up unused names
     del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y,
          _DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time,
-         _check_date_fields, _check_time_fields,
+         _check_date_fields, _check_int_field, _check_time_fields,
          _check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror,
          _date_class, _days_before_month, _days_before_year, _days_in_month,
-         _format_time, _format_offset, _index, _is_leap, _isoweek1monday, _math,
+         _format_time, _format_offset, _is_leap, _isoweek1monday, _math,
          _ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord,
          _divide_and_round, _parse_isoformat_date, _parse_isoformat_time,
          _parse_hh_mm_ss_ff, _IsoCalendarDate)
diff --git a/common/py3-stdlib/difflib.py b/common/py3-stdlib/difflib.py
index afd8a0c..0dda80d 100644
--- a/common/py3-stdlib/difflib.py
+++ b/common/py3-stdlib/difflib.py
@@ -62,7 +62,7 @@
     notion, pairing up elements that appear uniquely in each sequence.
     That, and the method here, appear to yield more intuitive difference
     reports than does diff.  This method appears to be the least vulnerable
-    to syncing up on blocks of "junk lines", though (like blank lines in
+    to synching up on blocks of "junk lines", though (like blank lines in
     ordinary text files, or maybe "<P>" lines in HTML files).  That may be
     because this is the only method of the 3 that has a *concept* of
     "junk" <wink>.
@@ -115,6 +115,38 @@
     case.  SequenceMatcher is quadratic time for the worst case and has
     expected-case behavior dependent in a complicated way on how many
     elements the sequences have in common; best case time is linear.
+
+    Methods:
+
+    __init__(isjunk=None, a='', b='')
+        Construct a SequenceMatcher.
+
+    set_seqs(a, b)
+        Set the two sequences to be compared.
+
+    set_seq1(a)
+        Set the first sequence to be compared.
+
+    set_seq2(b)
+        Set the second sequence to be compared.
+
+    find_longest_match(alo=0, ahi=None, blo=0, bhi=None)
+        Find longest matching block in a[alo:ahi] and b[blo:bhi].
+
+    get_matching_blocks()
+        Return list of triples describing matching subsequences.
+
+    get_opcodes()
+        Return list of 5-tuples describing how to turn a into b.
+
+    ratio()
+        Return a measure of the sequences' similarity (float in [0,1]).
+
+    quick_ratio()
+        Return an upper bound on .ratio() relatively quickly.
+
+    real_quick_ratio()
+        Return an upper bound on ratio() very quickly.
     """
 
     def __init__(self, isjunk=None, a='', b='', autojunk=True):
@@ -805,6 +837,14 @@
     +   4. Complicated is better than complex.
     ?           ++++ ^                      ^
     +   5. Flat is better than nested.
+
+    Methods:
+
+    __init__(linejunk=None, charjunk=None)
+        Construct a text differencer, with optional filters.
+
+    compare(a, b)
+        Compare two sequences of lines; generate the resulting delta.
     """
 
     def __init__(self, linejunk=None, charjunk=None):
diff --git a/common/py3-stdlib/dis.py b/common/py3-stdlib/dis.py
index fe5d24e..e289e17 100644
--- a/common/py3-stdlib/dis.py
+++ b/common/py3-stdlib/dis.py
@@ -338,11 +338,8 @@
                 argval, argrepr = _get_const_info(arg, constants)
             elif op in hasname:
                 argval, argrepr = _get_name_info(arg, names)
-            elif op in hasjabs:
-                argval = arg*2
-                argrepr = "to " + repr(argval)
             elif op in hasjrel:
-                argval = offset + 2 + arg*2
+                argval = offset + 2 + arg
                 argrepr = "to " + repr(argval)
             elif op in haslocal:
                 argval, argrepr = _get_name_info(arg, varnames)
@@ -387,7 +384,7 @@
                        constants=None, cells=None, linestarts=None,
                        *, file=None, line_offset=0):
     # Omit the line number column entirely if we have no line number info
-    show_lineno = bool(linestarts)
+    show_lineno = linestarts is not None
     if show_lineno:
         maxlineno = max(linestarts.values()) + line_offset
         if maxlineno >= 1000:
@@ -428,7 +425,6 @@
             extended_arg = (arg << 8) if op == EXTENDED_ARG else 0
         else:
             arg = None
-            extended_arg = 0
         yield (i, op, arg)
 
 def findlabels(code):
@@ -441,9 +437,9 @@
     for offset, op, arg in _unpack_opargs(code):
         if arg is not None:
             if op in hasjrel:
-                label = offset + 2 + arg*2
+                label = offset + 2 + arg
             elif op in hasjabs:
-                label = arg*2
+                label = arg
             else:
                 continue
             if label not in labels:
@@ -453,15 +449,32 @@
 def findlinestarts(code):
     """Find the offsets in a byte code which are start of lines in the source.
 
-    Generate pairs (offset, lineno)
-    """
-    lastline = None
-    for start, end, line in code.co_lines():
-        if line is not None and line != lastline:
-            lastline = line
-            yield start, line
-    return
+    Generate pairs (offset, lineno) as described in Python/compile.c.
 
+    """
+    byte_increments = code.co_lnotab[0::2]
+    line_increments = code.co_lnotab[1::2]
+    bytecode_len = len(code.co_code)
+
+    lastlineno = None
+    lineno = code.co_firstlineno
+    addr = 0
+    for byte_incr, line_incr in zip(byte_increments, line_increments):
+        if byte_incr:
+            if lineno != lastlineno:
+                yield (addr, lineno)
+                lastlineno = lineno
+            addr += byte_incr
+            if addr >= bytecode_len:
+                # The rest of the lnotab byte offsets are past the end of
+                # the bytecode, so the lines were optimized away.
+                return
+        if line_incr >= 0x80:
+            # line_increments is an array of 8-bit signed integers
+            line_incr -= 0x100
+        lineno += line_incr
+    if lineno != lastlineno:
+        yield (addr, lineno)
 
 class Bytecode:
     """The bytecode operations of a piece of code
diff --git a/common/py3-stdlib/distutils/__init__.py b/common/py3-stdlib/distutils/__init__.py
index fdad6f6..d823d04 100644
--- a/common/py3-stdlib/distutils/__init__.py
+++ b/common/py3-stdlib/distutils/__init__.py
@@ -9,12 +9,5 @@
 """
 
 import sys
-import warnings
 
 __version__ = sys.version[:sys.version.index(' ')]
-
-_DEPRECATION_MESSAGE = ("The distutils package is deprecated and slated for "
-                        "removal in Python 3.12. Use setuptools or check "
-                        "PEP 632 for potential alternatives")
-warnings.warn(_DEPRECATION_MESSAGE,
-              DeprecationWarning, 2)
diff --git a/common/py3-stdlib/distutils/ccompiler.py b/common/py3-stdlib/distutils/ccompiler.py
index 4c47f2e..b5ef143 100644
--- a/common/py3-stdlib/distutils/ccompiler.py
+++ b/common/py3-stdlib/distutils/ccompiler.py
@@ -392,7 +392,7 @@
         return output_dir, macros, include_dirs
 
     def _prep_compile(self, sources, output_dir, depends=None):
-        """Decide which source files must be recompiled.
+        """Decide which souce files must be recompiled.
 
         Determine the list of object files corresponding to 'sources',
         and figure out which ones really need to be recompiled.
diff --git a/common/py3-stdlib/distutils/command/__init__.py b/common/py3-stdlib/distutils/command/__init__.py
index fd0bfae..481eea9 100644
--- a/common/py3-stdlib/distutils/command/__init__.py
+++ b/common/py3-stdlib/distutils/command/__init__.py
@@ -19,6 +19,7 @@
            'bdist',
            'bdist_dumb',
            'bdist_rpm',
+           'bdist_wininst',
            'check',
            'upload',
            # These two are reserved for future use:
diff --git a/common/py3-stdlib/distutils/command/bdist.py b/common/py3-stdlib/distutils/command/bdist.py
index d580a80..014871d 100644
--- a/common/py3-stdlib/distutils/command/bdist.py
+++ b/common/py3-stdlib/distutils/command/bdist.py
@@ -62,7 +62,7 @@
 
     # Establish the preferred order (for the --help-formats option).
     format_commands = ['rpm', 'gztar', 'bztar', 'xztar', 'ztar', 'tar',
-                       'zip', 'msi']
+                       'wininst', 'zip', 'msi']
 
     # And the real information.
     format_command = {'rpm':   ('bdist_rpm',  "RPM distribution"),
@@ -71,6 +71,8 @@
                       'xztar': ('bdist_dumb', "xz'ed tar file"),
                       'ztar':  ('bdist_dumb', "compressed tar file"),
                       'tar':   ('bdist_dumb', "tar file"),
+                      'wininst': ('bdist_wininst',
+                                  "Windows executable installer"),
                       'zip':   ('bdist_dumb', "ZIP file"),
                       'msi':   ('bdist_msi',  "Microsoft Installer")
                       }
diff --git a/common/py3-stdlib/distutils/command/bdist_msi.py b/common/py3-stdlib/distutils/command/bdist_msi.py
index 2ed017b..0863a18 100644
--- a/common/py3-stdlib/distutils/command/bdist_msi.py
+++ b/common/py3-stdlib/distutils/command/bdist_msi.py
@@ -1,5 +1,7 @@
 # Copyright (C) 2005, 2006 Martin von Löwis
 # Licensed to PSF under a Contributor Agreement.
+# The bdist_wininst command proper
+# based on bdist_wininst
 """
 Implements the bdist_msi command.
 """
diff --git a/common/py3-stdlib/distutils/command/bdist_wininst.py b/common/py3-stdlib/distutils/command/bdist_wininst.py
new file mode 100644
index 0000000..0e9ddaa
--- /dev/null
+++ b/common/py3-stdlib/distutils/command/bdist_wininst.py
@@ -0,0 +1,377 @@
+"""distutils.command.bdist_wininst
+
+Implements the Distutils 'bdist_wininst' command: create a windows installer
+exe-program."""
+
+import os
+import sys
+import warnings
+from distutils.core import Command
+from distutils.util import get_platform
+from distutils.dir_util import remove_tree
+from distutils.errors import *
+from distutils.sysconfig import get_python_version
+from distutils import log
+
+class bdist_wininst(Command):
+
+    description = "create an executable installer for MS Windows"
+
+    user_options = [('bdist-dir=', None,
+                     "temporary directory for creating the distribution"),
+                    ('plat-name=', 'p',
+                     "platform name to embed in generated filenames "
+                     "(default: %s)" % get_platform()),
+                    ('keep-temp', 'k',
+                     "keep the pseudo-installation tree around after " +
+                     "creating the distribution archive"),
+                    ('target-version=', None,
+                     "require a specific python version" +
+                     " on the target system"),
+                    ('no-target-compile', 'c',
+                     "do not compile .py to .pyc on the target system"),
+                    ('no-target-optimize', 'o',
+                     "do not compile .py to .pyo (optimized) "
+                     "on the target system"),
+                    ('dist-dir=', 'd',
+                     "directory to put final built distributions in"),
+                    ('bitmap=', 'b',
+                     "bitmap to use for the installer instead of python-powered logo"),
+                    ('title=', 't',
+                     "title to display on the installer background instead of default"),
+                    ('skip-build', None,
+                     "skip rebuilding everything (for testing/debugging)"),
+                    ('install-script=', None,
+                     "basename of installation script to be run after "
+                     "installation or before deinstallation"),
+                    ('pre-install-script=', None,
+                     "Fully qualified filename of a script to be run before "
+                     "any files are installed.  This script need not be in the "
+                     "distribution"),
+                    ('user-access-control=', None,
+                     "specify Vista's UAC handling - 'none'/default=no "
+                     "handling, 'auto'=use UAC if target Python installed for "
+                     "all users, 'force'=always use UAC"),
+                   ]
+
+    boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
+                       'skip-build']
+
+    # bpo-10945: bdist_wininst requires mbcs encoding only available on Windows
+    _unsupported = (sys.platform != "win32")
+
+    def __init__(self, *args, **kw):
+        super().__init__(*args, **kw)
+        warnings.warn("bdist_wininst command is deprecated since Python 3.8, "
+                      "use bdist_wheel (wheel packages) instead",
+                      DeprecationWarning, 2)
+
+    def initialize_options(self):
+        self.bdist_dir = None
+        self.plat_name = None
+        self.keep_temp = 0
+        self.no_target_compile = 0
+        self.no_target_optimize = 0
+        self.target_version = None
+        self.dist_dir = None
+        self.bitmap = None
+        self.title = None
+        self.skip_build = None
+        self.install_script = None
+        self.pre_install_script = None
+        self.user_access_control = None
+
+
+    def finalize_options(self):
+        self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
+
+        if self.bdist_dir is None:
+            if self.skip_build and self.plat_name:
+                # If build is skipped and plat_name is overridden, bdist will
+                # not see the correct 'plat_name' - so set that up manually.
+                bdist = self.distribution.get_command_obj('bdist')
+                bdist.plat_name = self.plat_name
+                # next the command will be initialized using that name
+            bdist_base = self.get_finalized_command('bdist').bdist_base
+            self.bdist_dir = os.path.join(bdist_base, 'wininst')
+
+        if not self.target_version:
+            self.target_version = ""
+
+        if not self.skip_build and self.distribution.has_ext_modules():
+            short_version = get_python_version()
+            if self.target_version and self.target_version != short_version:
+                raise DistutilsOptionError(
+                      "target version can only be %s, or the '--skip-build'" \
+                      " option must be specified" % (short_version,))
+            self.target_version = short_version
+
+        self.set_undefined_options('bdist',
+                                   ('dist_dir', 'dist_dir'),
+                                   ('plat_name', 'plat_name'),
+                                  )
+
+        if self.install_script:
+            for script in self.distribution.scripts:
+                if self.install_script == os.path.basename(script):
+                    break
+            else:
+                raise DistutilsOptionError(
+                      "install_script '%s' not found in scripts"
+                      % self.install_script)
+
+    def run(self):
+        if (sys.platform != "win32" and
+            (self.distribution.has_ext_modules() or
+             self.distribution.has_c_libraries())):
+            raise DistutilsPlatformError \
+                  ("distribution contains extensions and/or C libraries; "
+                   "must be compiled on a Windows 32 platform")
+
+        if not self.skip_build:
+            self.run_command('build')
+
+        install = self.reinitialize_command('install', reinit_subcommands=1)
+        install.root = self.bdist_dir
+        install.skip_build = self.skip_build
+        install.warn_dir = 0
+        install.plat_name = self.plat_name
+
+        install_lib = self.reinitialize_command('install_lib')
+        # we do not want to include pyc or pyo files
+        install_lib.compile = 0
+        install_lib.optimize = 0
+
+        if self.distribution.has_ext_modules():
+            # If we are building an installer for a Python version other
+            # than the one we are currently running, then we need to ensure
+            # our build_lib reflects the other Python version rather than ours.
+            # Note that for target_version!=sys.version, we must have skipped the
+            # build step, so there is no issue with enforcing the build of this
+            # version.
+            target_version = self.target_version
+            if not target_version:
+                assert self.skip_build, "Should have already checked this"
+                target_version = '%d.%d' % sys.version_info[:2]
+            plat_specifier = ".%s-%s" % (self.plat_name, target_version)
+            build = self.get_finalized_command('build')
+            build.build_lib = os.path.join(build.build_base,
+                                           'lib' + plat_specifier)
+
+        # Use a custom scheme for the zip-file, because we have to decide
+        # at installation time which scheme to use.
+        for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
+            value = key.upper()
+            if key == 'headers':
+                value = value + '/Include/$dist_name'
+            setattr(install,
+                    'install_' + key,
+                    value)
+
+        log.info("installing to %s", self.bdist_dir)
+        install.ensure_finalized()
+
+        # avoid warning of 'install_lib' about installing
+        # into a directory not in sys.path
+        sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
+
+        install.run()
+
+        del sys.path[0]
+
+        # And make an archive relative to the root of the
+        # pseudo-installation tree.
+        from tempfile import mktemp
+        archive_basename = mktemp()
+        fullname = self.distribution.get_fullname()
+        arcname = self.make_archive(archive_basename, "zip",
+                                    root_dir=self.bdist_dir)
+        # create an exe containing the zip-file
+        self.create_exe(arcname, fullname, self.bitmap)
+        if self.distribution.has_ext_modules():
+            pyversion = get_python_version()
+        else:
+            pyversion = 'any'
+        self.distribution.dist_files.append(('bdist_wininst', pyversion,
+                                             self.get_installer_filename(fullname)))
+        # remove the zip-file again
+        log.debug("removing temporary file '%s'", arcname)
+        os.remove(arcname)
+
+        if not self.keep_temp:
+            remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+    def get_inidata(self):
+        # Return data describing the installation.
+        lines = []
+        metadata = self.distribution.metadata
+
+        # Write the [metadata] section.
+        lines.append("[metadata]")
+
+        # 'info' will be displayed in the installer's dialog box,
+        # describing the items to be installed.
+        info = (metadata.long_description or '') + '\n'
+
+        # Escape newline characters
+        def escape(s):
+            return s.replace("\n", "\\n")
+
+        for name in ["author", "author_email", "description", "maintainer",
+                     "maintainer_email", "name", "url", "version"]:
+            data = getattr(metadata, name, "")
+            if data:
+                info = info + ("\n    %s: %s" % \
+                               (name.capitalize(), escape(data)))
+                lines.append("%s=%s" % (name, escape(data)))
+
+        # The [setup] section contains entries controlling
+        # the installer runtime.
+        lines.append("\n[Setup]")
+        if self.install_script:
+            lines.append("install_script=%s" % self.install_script)
+        lines.append("info=%s" % escape(info))
+        lines.append("target_compile=%d" % (not self.no_target_compile))
+        lines.append("target_optimize=%d" % (not self.no_target_optimize))
+        if self.target_version:
+            lines.append("target_version=%s" % self.target_version)
+        if self.user_access_control:
+            lines.append("user_access_control=%s" % self.user_access_control)
+
+        title = self.title or self.distribution.get_fullname()
+        lines.append("title=%s" % escape(title))
+        import time
+        import distutils
+        build_info = "Built %s with distutils-%s" % \
+                     (time.ctime(time.time()), distutils.__version__)
+        lines.append("build_info=%s" % build_info)
+        return "\n".join(lines)
+
+    def create_exe(self, arcname, fullname, bitmap=None):
+        import struct
+
+        self.mkpath(self.dist_dir)
+
+        cfgdata = self.get_inidata()
+
+        installer_name = self.get_installer_filename(fullname)
+        self.announce("creating %s" % installer_name)
+
+        if bitmap:
+            with open(bitmap, "rb") as f:
+                bitmapdata = f.read()
+            bitmaplen = len(bitmapdata)
+        else:
+            bitmaplen = 0
+
+        with open(installer_name, "wb") as file:
+            file.write(self.get_exe_bytes())
+            if bitmap:
+                file.write(bitmapdata)
+
+            # Convert cfgdata from unicode to ascii, mbcs encoded
+            if isinstance(cfgdata, str):
+                cfgdata = cfgdata.encode("mbcs")
+
+            # Append the pre-install script
+            cfgdata = cfgdata + b"\0"
+            if self.pre_install_script:
+                # We need to normalize newlines, so we open in text mode and
+                # convert back to bytes. "latin-1" simply avoids any possible
+                # failures.
+                with open(self.pre_install_script, "r",
+                          encoding="latin-1") as script:
+                    script_data = script.read().encode("latin-1")
+                cfgdata = cfgdata + script_data + b"\n\0"
+            else:
+                # empty pre-install script
+                cfgdata = cfgdata + b"\0"
+            file.write(cfgdata)
+
+            # The 'magic number' 0x1234567B is used to make sure that the
+            # binary layout of 'cfgdata' is what the wininst.exe binary
+            # expects.  If the layout changes, increment that number, make
+            # the corresponding changes to the wininst.exe sources, and
+            # recompile them.
+            header = struct.pack("<iii",
+                                0x1234567B,       # tag
+                                len(cfgdata),     # length
+                                bitmaplen,        # number of bytes in bitmap
+                                )
+            file.write(header)
+            with open(arcname, "rb") as f:
+                file.write(f.read())
+
+    def get_installer_filename(self, fullname):
+        # Factored out to allow overriding in subclasses
+        if self.target_version:
+            # if we create an installer for a specific python version,
+            # it's better to include this in the name
+            installer_name = os.path.join(self.dist_dir,
+                                          "%s.%s-py%s.exe" %
+                                           (fullname, self.plat_name, self.target_version))
+        else:
+            installer_name = os.path.join(self.dist_dir,
+                                          "%s.%s.exe" % (fullname, self.plat_name))
+        return installer_name
+
+    def get_exe_bytes(self):
+        # If a target-version other than the current version has been
+        # specified, then using the MSVC version from *this* build is no good.
+        # Without actually finding and executing the target version and parsing
+        # its sys.version, we just hard-code our knowledge of old versions.
+        # NOTE: Possible alternative is to allow "--target-version" to
+        # specify a Python executable rather than a simple version string.
+        # We can then execute this program to obtain any info we need, such
+        # as the real sys.version string for the build.
+        cur_version = get_python_version()
+
+        # If the target version is *later* than us, then we assume they
+        # use what we use
+        # string compares seem wrong, but are what sysconfig.py itself uses
+        if self.target_version and self.target_version < cur_version:
+            if self.target_version < "2.4":
+                bv = '6.0'
+            elif self.target_version == "2.4":
+                bv = '7.1'
+            elif self.target_version == "2.5":
+                bv = '8.0'
+            elif self.target_version <= "3.2":
+                bv = '9.0'
+            elif self.target_version <= "3.4":
+                bv = '10.0'
+            else:
+                bv = '14.0'
+        else:
+            # for current version - use authoritative check.
+            try:
+                from msvcrt import CRT_ASSEMBLY_VERSION
+            except ImportError:
+                # cross-building, so assume the latest version
+                bv = '14.0'
+            else:
+                # as far as we know, CRT is binary compatible based on
+                # the first field, so assume 'x.0' until proven otherwise
+                major = CRT_ASSEMBLY_VERSION.partition('.')[0]
+                bv = major + '.0'
+
+
+        # wininst-x.y.exe is in the same directory as this file
+        directory = os.path.dirname(__file__)
+        # we must use a wininst-x.y.exe built with the same C compiler
+        # used for python.  XXX What about mingw, borland, and so on?
+
+        # if plat_name starts with "win" but is not "win32"
+        # we want to strip "win" and leave the rest (e.g. -amd64)
+        # for all other cases, we don't want any suffix
+        if self.plat_name != 'win32' and self.plat_name[:3] == 'win':
+            sfix = self.plat_name[3:]
+        else:
+            sfix = ''
+
+        filename = os.path.join(directory, "wininst-%s%s.exe" % (bv, sfix))
+        f = open(filename, "rb")
+        try:
+            return f.read()
+        finally:
+            f.close()
diff --git a/common/py3-stdlib/distutils/command/check.py b/common/py3-stdlib/distutils/command/check.py
index 73a30f3..ada2500 100644
--- a/common/py3-stdlib/distutils/command/check.py
+++ b/common/py3-stdlib/distutils/command/check.py
@@ -83,7 +83,7 @@
             name, version, URL
 
         Recommended fields:
-            (author and author_email) or (maintainer and maintainer_email)
+            (author and author_email) or (maintainer and maintainer_email))
 
         Warns if any are missing.
         """
diff --git a/common/py3-stdlib/distutils/command/install.py b/common/py3-stdlib/distutils/command/install.py
index 01d5331..aaa300e 100644
--- a/common/py3-stdlib/distutils/command/install.py
+++ b/common/py3-stdlib/distutils/command/install.py
@@ -3,9 +3,7 @@
 Implements the Distutils 'install' command."""
 
 import sys
-import sysconfig
 import os
-import re
 
 from distutils import log
 from distutils.core import Command
@@ -19,55 +17,35 @@
 
 from site import USER_BASE
 from site import USER_SITE
+HAS_USER_SITE = True
 
-HAS_USER_SITE = (USER_SITE is not None)
+WINDOWS_SCHEME = {
+    'purelib': '$base/Lib/site-packages',
+    'platlib': '$base/Lib/site-packages',
+    'headers': '$base/Include/$dist_name',
+    'scripts': '$base/Scripts',
+    'data'   : '$base',
+}
 
-# The keys to an installation scheme; if any new types of files are to be
-# installed, be sure to add an entry to every scheme in
-# sysconfig._INSTALL_SCHEMES, and to SCHEME_KEYS here.
-SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
+INSTALL_SCHEMES = {
+    'unix_prefix': {
+        'purelib': '$base/lib/python$py_version_short/site-packages',
+        'platlib': '$platbase/$platlibdir/python$py_version_short/site-packages',
+        'headers': '$base/include/python$py_version_short$abiflags/$dist_name',
+        'scripts': '$base/bin',
+        'data'   : '$base',
+        },
+    'unix_home': {
+        'purelib': '$base/lib/python',
+        'platlib': '$base/$platlibdir/python',
+        'headers': '$base/include/python/$dist_name',
+        'scripts': '$base/bin',
+        'data'   : '$base',
+        },
+    'nt': WINDOWS_SCHEME,
+    }
 
-# The following code provides backward-compatible INSTALL_SCHEMES
-# while making the sysconfig module the single point of truth.
-# This makes it easier for OS distributions where they need to
-# alter locations for packages installations in a single place.
-# Note that this module is deprecated (PEP 632); all consumers
-# of this information should switch to using sysconfig directly.
-INSTALL_SCHEMES = {"unix_prefix": {}, "unix_home": {}, "nt": {}}
-
-# Copy from sysconfig._INSTALL_SCHEMES
-for key in SCHEME_KEYS:
-    for distutils_scheme_name, sys_scheme_name in (
-            ("unix_prefix", "posix_prefix"), ("unix_home", "posix_home"),
-            ("nt", "nt")):
-        sys_key = key
-        sys_scheme = sysconfig._INSTALL_SCHEMES[sys_scheme_name]
-        if key == "headers" and key not in sys_scheme:
-            # On POSIX-y platforms, Python will:
-            # - Build from .h files in 'headers' (only there when
-            #   building CPython)
-            # - Install .h files to 'include'
-            # When 'headers' is missing, fall back to 'include'
-            sys_key = 'include'
-        INSTALL_SCHEMES[distutils_scheme_name][key] = sys_scheme[sys_key]
-
-# Transformation to different template format
-for main_key in INSTALL_SCHEMES:
-    for key, value in INSTALL_SCHEMES[main_key].items():
-        # Change all ocurences of {variable} to $variable
-        value = re.sub(r"\{(.+?)\}", r"$\g<1>", value)
-        value = value.replace("$installed_base", "$base")
-        value = value.replace("$py_version_nodot_plat", "$py_version_nodot")
-        if key == "headers":
-            value += "/$dist_name"
-        if sys.version_info >= (3, 9) and key == "platlib":
-            # platlibdir is available since 3.9: bpo-1294959
-            value = value.replace("/lib/", "/$platlibdir/")
-        INSTALL_SCHEMES[main_key][key] = value
-
-# The following part of INSTALL_SCHEMES has a different definition
-# than the one in sysconfig, but because both depend on the site module,
-# the outcomes should be the same.
+# user site schemes
 if HAS_USER_SITE:
     INSTALL_SCHEMES['nt_user'] = {
         'purelib': '$usersite',
@@ -86,6 +64,11 @@
         'data'   : '$userbase',
         }
 
+# The keys to an installation scheme; if any new types of files are to be
+# installed, be sure to add an entry to every installation scheme above,
+# and to SCHEME_KEYS here.
+SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
+
 
 class install(Command):
 
@@ -186,9 +169,8 @@
         self.install_lib = None         # set to either purelib or platlib
         self.install_scripts = None
         self.install_data = None
-        if HAS_USER_SITE:
-            self.install_userbase = USER_BASE
-            self.install_usersite = USER_SITE
+        self.install_userbase = USER_BASE
+        self.install_usersite = USER_SITE
 
         self.compile = None
         self.optimize = None
@@ -323,9 +305,6 @@
             self.config_vars['userbase'] = self.install_userbase
             self.config_vars['usersite'] = self.install_usersite
 
-        if sysconfig.is_python_build(True):
-            self.config_vars['srcdir'] = sysconfig.get_config_var('srcdir')
-
         self.expand_basedirs()
 
         self.dump_dirs("post-expand_basedirs()")
@@ -364,9 +343,8 @@
         # Convert directories from Unix /-separated syntax to the local
         # convention.
         self.convert_paths('lib', 'purelib', 'platlib',
-                           'scripts', 'data', 'headers')
-        if HAS_USER_SITE:
-            self.convert_paths('userbase', 'usersite')
+                           'scripts', 'data', 'headers',
+                           'userbase', 'usersite')
 
         # Deprecated
         # Well, we're not actually fully completely finalized yet: we still
diff --git a/common/py3-stdlib/distutils/command/upload.py b/common/py3-stdlib/distutils/command/upload.py
index e0ecb65..95e9fda 100644
--- a/common/py3-stdlib/distutils/command/upload.py
+++ b/common/py3-stdlib/distutils/command/upload.py
@@ -9,8 +9,7 @@
 import io
 import hashlib
 from base64 import standard_b64encode
-from urllib.error import HTTPError
-from urllib.request import urlopen, Request
+from urllib.request import urlopen, Request, HTTPError
 from urllib.parse import urlparse
 from distutils.errors import DistutilsError, DistutilsOptionError
 from distutils.core import PyPIRCCommand
diff --git a/common/py3-stdlib/distutils/extension.py b/common/py3-stdlib/distutils/extension.py
index e85032e..c507da3 100644
--- a/common/py3-stdlib/distutils/extension.py
+++ b/common/py3-stdlib/distutils/extension.py
@@ -4,7 +4,6 @@
 modules in setup scripts."""
 
 import os
-import re
 import warnings
 
 # This class is really only used by the "build_ext" command, so it might
@@ -162,7 +161,7 @@
             line = file.readline()
             if line is None:                # eof
                 break
-            if re.match(_variable_rx, line):    # VAR=VALUE, handled in first pass
+            if _variable_rx.match(line):    # VAR=VALUE, handled in first pass
                 continue
 
             if line[0] == line[-1] == "*":
diff --git a/common/py3-stdlib/distutils/msvc9compiler.py b/common/py3-stdlib/distutils/msvc9compiler.py
index a7976fb..6934e96 100644
--- a/common/py3-stdlib/distutils/msvc9compiler.py
+++ b/common/py3-stdlib/distutils/msvc9compiler.py
@@ -673,7 +673,7 @@
         # If a manifest should be embedded, return a tuple of
         # (manifest_filename, resource_id).  Returns None if no manifest
         # should be embedded.  See http://bugs.python.org/issue7833 for why
-        # we want to avoid any manifest for extension modules if we can.
+        # we want to avoid any manifest for extension modules if we can)
         for arg in ld_args:
             if arg.startswith("/MANIFESTFILE:"):
                 temp_manifest = arg.split(":", 1)[1]
diff --git a/common/py3-stdlib/distutils/spawn.py b/common/py3-stdlib/distutils/spawn.py
index 31df3f7..f50edd2 100644
--- a/common/py3-stdlib/distutils/spawn.py
+++ b/common/py3-stdlib/distutils/spawn.py
@@ -54,22 +54,18 @@
         global _cfg_target, _cfg_target_split
         if _cfg_target is None:
             from distutils import sysconfig
-            _cfg_target = sysconfig.get_config_var(
-                                  'MACOSX_DEPLOYMENT_TARGET') or ''
+            _cfg_target = str(sysconfig.get_config_var(
+                                  'MACOSX_DEPLOYMENT_TARGET') or '')
             if _cfg_target:
                 _cfg_target_split = [int(x) for x in _cfg_target.split('.')]
         if _cfg_target:
-            # Ensure that the deployment target of the build process is not
-            # less than 10.3 if the interpreter was built for 10.3 or later.
-            # This ensures extension modules are built with correct
-            # compatibility values, specifically LDSHARED which can use
-            # '-undefined dynamic_lookup' which only works on >= 10.3.
+            # ensure that the deployment target of build process is not less
+            # than that used when the interpreter was built. This ensures
+            # extension modules are built with correct compatibility values
             cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target)
-            cur_target_split = [int(x) for x in cur_target.split('.')]
-            if _cfg_target_split[:2] >= [10, 3] and cur_target_split[:2] < [10, 3]:
+            if _cfg_target_split > [int(x) for x in cur_target.split('.')]:
                 my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: '
-                          'now "%s" but "%s" during configure;'
-                          'must use 10.3 or later'
+                          'now "%s" but "%s" during configure'
                                 % (cur_target, _cfg_target))
                 raise DistutilsPlatformError(my_msg)
             env = dict(os.environ,
diff --git a/common/py3-stdlib/distutils/sysconfig.py b/common/py3-stdlib/distutils/sysconfig.py
index 3414a76..37feae5 100644
--- a/common/py3-stdlib/distutils/sysconfig.py
+++ b/common/py3-stdlib/distutils/sysconfig.py
@@ -13,174 +13,56 @@
 import os
 import re
 import sys
-import warnings
-
-from functools import partial
 
 from .errors import DistutilsPlatformError
 
-from sysconfig import (
-    _PREFIX as PREFIX,
-    _BASE_PREFIX as BASE_PREFIX,
-    _EXEC_PREFIX as EXEC_PREFIX,
-    _BASE_EXEC_PREFIX as BASE_EXEC_PREFIX,
-    _PROJECT_BASE as project_base,
-    _PYTHON_BUILD as python_build,
-    _init_posix as sysconfig_init_posix,
-    parse_config_h as sysconfig_parse_config_h,
+# These are needed in a couple of spots, so just compute them once.
+PREFIX = os.path.normpath(sys.prefix)
+EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
+BASE_PREFIX = os.path.normpath(sys.base_prefix)
+BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
 
-    _init_non_posix,
-    _is_python_source_dir,
-    _sys_home,
-
-    _variable_rx,
-    _findvar1_rx,
-    _findvar2_rx,
-
-    expand_makefile_vars,
-    is_python_build,
-    get_config_h_filename,
-    get_config_var,
-    get_config_vars,
-    get_makefile_filename,
-    get_python_version,
-)
-
-# This is better than
-# from sysconfig import _CONFIG_VARS as _config_vars
-# because it makes sure that the global dictionary is initialized
-# which might not be true in the time of import.
-_config_vars = get_config_vars()
-
-if os.name == "nt":
-    from sysconfig import _fix_pcbuild
-
-warnings.warn(
-    'The distutils.sysconfig module is deprecated, use sysconfig instead',
-    DeprecationWarning,
-    stacklevel=2
-)
+# Path to the base directory of the project. On Windows the binary may
+# live in project/PCbuild/win32 or project/PCbuild/amd64.
+# set for cross builds
+if "_PYTHON_PROJECT_BASE" in os.environ:
+    project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
+else:
+    if sys.executable:
+        project_base = os.path.dirname(os.path.abspath(sys.executable))
+    else:
+        # sys.executable can be empty if argv[0] has been changed and Python is
+        # unable to retrieve the real program name
+        project_base = os.getcwd()
 
 
-# Following functions are the same as in sysconfig but with different API
-def parse_config_h(fp, g=None):
-    return sysconfig_parse_config_h(fp, vars=g)
+# python_build: (Boolean) if true, we're either building Python or
+# building an extension with an un-installed Python, so we use
+# different (hard-wired) directories.
+def _is_python_source_dir(d):
+    for fn in ("Setup", "Setup.local"):
+        if os.path.isfile(os.path.join(d, "Modules", fn)):
+            return True
+    return False
 
+_sys_home = getattr(sys, '_home', None)
 
-_python_build = partial(is_python_build, check_home=True)
-_init_posix = partial(sysconfig_init_posix, _config_vars)
-_init_nt = partial(_init_non_posix, _config_vars)
+if os.name == 'nt':
+    def _fix_pcbuild(d):
+        if d and os.path.normcase(d).startswith(
+                os.path.normcase(os.path.join(PREFIX, "PCbuild"))):
+            return PREFIX
+        return d
+    project_base = _fix_pcbuild(project_base)
+    _sys_home = _fix_pcbuild(_sys_home)
 
+def _python_build():
+    if _sys_home:
+        return _is_python_source_dir(_sys_home)
+    return _is_python_source_dir(project_base)
 
-# Similar function is also implemented in sysconfig as _parse_makefile
-# but without the parsing capabilities of distutils.text_file.TextFile.
-def parse_makefile(fn, g=None):
-    """Parse a Makefile-style file.
-    A dictionary containing name/value pairs is returned.  If an
-    optional dictionary is passed in as the second argument, it is
-    used instead of a new dictionary.
-    """
-    from distutils.text_file import TextFile
-    fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
+python_build = _python_build()
 
-    if g is None:
-        g = {}
-    done = {}
-    notdone = {}
-
-    while True:
-        line = fp.readline()
-        if line is None: # eof
-            break
-        m = re.match(_variable_rx, line)
-        if m:
-            n, v = m.group(1, 2)
-            v = v.strip()
-            # `$$' is a literal `$' in make
-            tmpv = v.replace('$$', '')
-
-            if "$" in tmpv:
-                notdone[n] = v
-            else:
-                try:
-                    v = int(v)
-                except ValueError:
-                    # insert literal `$'
-                    done[n] = v.replace('$$', '$')
-                else:
-                    done[n] = v
-
-    # Variables with a 'PY_' prefix in the makefile. These need to
-    # be made available without that prefix through sysconfig.
-    # Special care is needed to ensure that variable expansion works, even
-    # if the expansion uses the name without a prefix.
-    renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
-
-    # do variable interpolation here
-    while notdone:
-        for name in list(notdone):
-            value = notdone[name]
-            m = re.search(_findvar1_rx, value) or re.search(_findvar2_rx, value)
-            if m:
-                n = m.group(1)
-                found = True
-                if n in done:
-                    item = str(done[n])
-                elif n in notdone:
-                    # get it on a subsequent round
-                    found = False
-                elif n in os.environ:
-                    # do it like make: fall back to environment
-                    item = os.environ[n]
-
-                elif n in renamed_variables:
-                    if name.startswith('PY_') and name[3:] in renamed_variables:
-                        item = ""
-
-                    elif 'PY_' + n in notdone:
-                        found = False
-
-                    else:
-                        item = str(done['PY_' + n])
-                else:
-                    done[n] = item = ""
-                if found:
-                    after = value[m.end():]
-                    value = value[:m.start()] + item + after
-                    if "$" in after:
-                        notdone[name] = value
-                    else:
-                        try: value = int(value)
-                        except ValueError:
-                            done[name] = value.strip()
-                        else:
-                            done[name] = value
-                        del notdone[name]
-
-                        if name.startswith('PY_') \
-                            and name[3:] in renamed_variables:
-
-                            name = name[3:]
-                            if name not in done:
-                                done[name] = value
-            else:
-                # bogus variable reference; just drop it since we can't deal
-                del notdone[name]
-
-    fp.close()
-
-    # strip spurious spaces
-    for k, v in done.items():
-        if isinstance(v, str):
-            done[k] = v.strip()
-
-    # save the results in the global dictionary
-    g.update(done)
-    return g
-
-
-# Following functions are deprecated together with this module and they
-# have no direct replacement
 
 # Calculate the build qualifier flags if they are defined.  Adding the flags
 # to the include and lib directories only makes sense for an installation, not
@@ -194,76 +76,12 @@
     # this attribute, which is fine.
     pass
 
-
-def customize_compiler(compiler):
-    """Do any platform-specific customization of a CCompiler instance.
-
-    Mainly needed on Unix, so we can plug in the information that
-    varies across Unices and is stored in Python's Makefile.
+def get_python_version():
+    """Return a string containing the major and minor Python version,
+    leaving off the patchlevel.  Sample return values could be '1.5'
+    or '2.2'.
     """
-    if compiler.compiler_type == "unix":
-        if sys.platform == "darwin":
-            # Perform first-time customization of compiler-related
-            # config vars on OS X now that we know we need a compiler.
-            # This is primarily to support Pythons from binary
-            # installers.  The kind and paths to build tools on
-            # the user system may vary significantly from the system
-            # that Python itself was built on.  Also the user OS
-            # version and build tools may not support the same set
-            # of CPU architectures for universal builds.
-            if not _config_vars.get('CUSTOMIZED_OSX_COMPILER'):
-                import _osx_support
-                _osx_support.customize_compiler(_config_vars)
-                _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
-
-        (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
-            get_config_vars('CC', 'CXX', 'CFLAGS',
-                            'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
-
-        if 'CC' in os.environ:
-            newcc = os.environ['CC']
-            if (sys.platform == 'darwin'
-                    and 'LDSHARED' not in os.environ
-                    and ldshared.startswith(cc)):
-                # On OS X, if CC is overridden, use that as the default
-                #       command for LDSHARED as well
-                ldshared = newcc + ldshared[len(cc):]
-            cc = newcc
-        if 'CXX' in os.environ:
-            cxx = os.environ['CXX']
-        if 'LDSHARED' in os.environ:
-            ldshared = os.environ['LDSHARED']
-        if 'CPP' in os.environ:
-            cpp = os.environ['CPP']
-        else:
-            cpp = cc + " -E"           # not always
-        if 'LDFLAGS' in os.environ:
-            ldshared = ldshared + ' ' + os.environ['LDFLAGS']
-        if 'CFLAGS' in os.environ:
-            cflags = cflags + ' ' + os.environ['CFLAGS']
-            ldshared = ldshared + ' ' + os.environ['CFLAGS']
-        if 'CPPFLAGS' in os.environ:
-            cpp = cpp + ' ' + os.environ['CPPFLAGS']
-            cflags = cflags + ' ' + os.environ['CPPFLAGS']
-            ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
-        if 'AR' in os.environ:
-            ar = os.environ['AR']
-        if 'ARFLAGS' in os.environ:
-            archiver = ar + ' ' + os.environ['ARFLAGS']
-        else:
-            archiver = ar + ' ' + ar_flags
-
-        cc_cmd = cc + ' ' + cflags
-        compiler.set_executables(
-            preprocessor=cpp,
-            compiler=cc_cmd,
-            compiler_so=cc_cmd + ' ' + ccshared,
-            compiler_cxx=cxx,
-            linker_so=ldshared,
-            linker_exe=cc,
-            archiver=archiver)
-
-        compiler.shared_lib_extension = shlib_suffix
+    return '%d.%d' % sys.version_info[:2]
 
 
 def get_python_inc(plat_specific=0, prefix=None):
@@ -349,3 +167,389 @@
         raise DistutilsPlatformError(
             "I don't know where Python installs its library "
             "on platform '%s'" % os.name)
+
+
+
+def customize_compiler(compiler):
+    """Do any platform-specific customization of a CCompiler instance.
+
+    Mainly needed on Unix, so we can plug in the information that
+    varies across Unices and is stored in Python's Makefile.
+    """
+    if compiler.compiler_type == "unix":
+        if sys.platform == "darwin":
+            # Perform first-time customization of compiler-related
+            # config vars on OS X now that we know we need a compiler.
+            # This is primarily to support Pythons from binary
+            # installers.  The kind and paths to build tools on
+            # the user system may vary significantly from the system
+            # that Python itself was built on.  Also the user OS
+            # version and build tools may not support the same set
+            # of CPU architectures for universal builds.
+            global _config_vars
+            # Use get_config_var() to ensure _config_vars is initialized.
+            if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
+                import _osx_support
+                _osx_support.customize_compiler(_config_vars)
+                _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
+
+        (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
+            get_config_vars('CC', 'CXX', 'CFLAGS',
+                            'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
+
+        if 'CC' in os.environ:
+            newcc = os.environ['CC']
+            if (sys.platform == 'darwin'
+                    and 'LDSHARED' not in os.environ
+                    and ldshared.startswith(cc)):
+                # On OS X, if CC is overridden, use that as the default
+                #       command for LDSHARED as well
+                ldshared = newcc + ldshared[len(cc):]
+            cc = newcc
+        if 'CXX' in os.environ:
+            cxx = os.environ['CXX']
+        if 'LDSHARED' in os.environ:
+            ldshared = os.environ['LDSHARED']
+        if 'CPP' in os.environ:
+            cpp = os.environ['CPP']
+        else:
+            cpp = cc + " -E"           # not always
+        if 'LDFLAGS' in os.environ:
+            ldshared = ldshared + ' ' + os.environ['LDFLAGS']
+        if 'CFLAGS' in os.environ:
+            cflags = cflags + ' ' + os.environ['CFLAGS']
+            ldshared = ldshared + ' ' + os.environ['CFLAGS']
+        if 'CPPFLAGS' in os.environ:
+            cpp = cpp + ' ' + os.environ['CPPFLAGS']
+            cflags = cflags + ' ' + os.environ['CPPFLAGS']
+            ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
+        if 'AR' in os.environ:
+            ar = os.environ['AR']
+        if 'ARFLAGS' in os.environ:
+            archiver = ar + ' ' + os.environ['ARFLAGS']
+        else:
+            archiver = ar + ' ' + ar_flags
+
+        cc_cmd = cc + ' ' + cflags
+        compiler.set_executables(
+            preprocessor=cpp,
+            compiler=cc_cmd,
+            compiler_so=cc_cmd + ' ' + ccshared,
+            compiler_cxx=cxx,
+            linker_so=ldshared,
+            linker_exe=cc,
+            archiver=archiver)
+
+        compiler.shared_lib_extension = shlib_suffix
+
+
+def get_config_h_filename():
+    """Return full pathname of installed pyconfig.h file."""
+    if python_build:
+        if os.name == "nt":
+            inc_dir = os.path.join(_sys_home or project_base, "PC")
+        else:
+            inc_dir = _sys_home or project_base
+    else:
+        inc_dir = get_python_inc(plat_specific=1)
+
+    return os.path.join(inc_dir, 'pyconfig.h')
+
+
+def get_makefile_filename():
+    """Return full pathname of installed Makefile from the Python build."""
+    if python_build:
+        return os.path.join(_sys_home or project_base, "Makefile")
+    lib_dir = get_python_lib(plat_specific=0, standard_lib=1)
+    config_file = 'config-{}{}'.format(get_python_version(), build_flags)
+    if hasattr(sys.implementation, '_multiarch'):
+        config_file += '-%s' % sys.implementation._multiarch
+    return os.path.join(lib_dir, config_file, 'Makefile')
+
+
+def parse_config_h(fp, g=None):
+    """Parse a config.h-style file.
+
+    A dictionary containing name/value pairs is returned.  If an
+    optional dictionary is passed in as the second argument, it is
+    used instead of a new dictionary.
+    """
+    if g is None:
+        g = {}
+    define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
+    undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
+    #
+    while True:
+        line = fp.readline()
+        if not line:
+            break
+        m = define_rx.match(line)
+        if m:
+            n, v = m.group(1, 2)
+            try: v = int(v)
+            except ValueError: pass
+            g[n] = v
+        else:
+            m = undef_rx.match(line)
+            if m:
+                g[m.group(1)] = 0
+    return g
+
+
+# Regexes needed for parsing Makefile (and similar syntaxes,
+# like old-style Setup files).
+_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
+_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
+_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
+
+def parse_makefile(fn, g=None):
+    """Parse a Makefile-style file.
+
+    A dictionary containing name/value pairs is returned.  If an
+    optional dictionary is passed in as the second argument, it is
+    used instead of a new dictionary.
+    """
+    from distutils.text_file import TextFile
+    fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
+
+    if g is None:
+        g = {}
+    done = {}
+    notdone = {}
+
+    while True:
+        line = fp.readline()
+        if line is None: # eof
+            break
+        m = _variable_rx.match(line)
+        if m:
+            n, v = m.group(1, 2)
+            v = v.strip()
+            # `$$' is a literal `$' in make
+            tmpv = v.replace('$$', '')
+
+            if "$" in tmpv:
+                notdone[n] = v
+            else:
+                try:
+                    v = int(v)
+                except ValueError:
+                    # insert literal `$'
+                    done[n] = v.replace('$$', '$')
+                else:
+                    done[n] = v
+
+    # Variables with a 'PY_' prefix in the makefile. These need to
+    # be made available without that prefix through sysconfig.
+    # Special care is needed to ensure that variable expansion works, even
+    # if the expansion uses the name without a prefix.
+    renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
+
+    # do variable interpolation here
+    while notdone:
+        for name in list(notdone):
+            value = notdone[name]
+            m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
+            if m:
+                n = m.group(1)
+                found = True
+                if n in done:
+                    item = str(done[n])
+                elif n in notdone:
+                    # get it on a subsequent round
+                    found = False
+                elif n in os.environ:
+                    # do it like make: fall back to environment
+                    item = os.environ[n]
+
+                elif n in renamed_variables:
+                    if name.startswith('PY_') and name[3:] in renamed_variables:
+                        item = ""
+
+                    elif 'PY_' + n in notdone:
+                        found = False
+
+                    else:
+                        item = str(done['PY_' + n])
+                else:
+                    done[n] = item = ""
+                if found:
+                    after = value[m.end():]
+                    value = value[:m.start()] + item + after
+                    if "$" in after:
+                        notdone[name] = value
+                    else:
+                        try: value = int(value)
+                        except ValueError:
+                            done[name] = value.strip()
+                        else:
+                            done[name] = value
+                        del notdone[name]
+
+                        if name.startswith('PY_') \
+                            and name[3:] in renamed_variables:
+
+                            name = name[3:]
+                            if name not in done:
+                                done[name] = value
+            else:
+                # bogus variable reference; just drop it since we can't deal
+                del notdone[name]
+
+    fp.close()
+
+    # strip spurious spaces
+    for k, v in done.items():
+        if isinstance(v, str):
+            done[k] = v.strip()
+
+    # save the results in the global dictionary
+    g.update(done)
+    return g
+
+
+def expand_makefile_vars(s, vars):
+    """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
+    'string' according to 'vars' (a dictionary mapping variable names to
+    values).  Variables not present in 'vars' are silently expanded to the
+    empty string.  The variable values in 'vars' should not contain further
+    variable expansions; if 'vars' is the output of 'parse_makefile()',
+    you're fine.  Returns a variable-expanded version of 's'.
+    """
+
+    # This algorithm does multiple expansion, so if vars['foo'] contains
+    # "${bar}", it will expand ${foo} to ${bar}, and then expand
+    # ${bar}... and so forth.  This is fine as long as 'vars' comes from
+    # 'parse_makefile()', which takes care of such expansions eagerly,
+    # according to make's variable expansion semantics.
+
+    while True:
+        m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
+        if m:
+            (beg, end) = m.span()
+            s = s[0:beg] + vars.get(m.group(1)) + s[end:]
+        else:
+            break
+    return s
+
+
+_config_vars = None
+
+def _init_posix():
+    """Initialize the module as appropriate for POSIX systems."""
+    # _sysconfigdata is generated at build time, see the sysconfig module
+    name = os.environ.get('_PYTHON_SYSCONFIGDATA_NAME',
+        '_sysconfigdata_{abi}_{platform}_{multiarch}'.format(
+        abi=sys.abiflags,
+        platform=sys.platform,
+        multiarch=getattr(sys.implementation, '_multiarch', ''),
+    ))
+    _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0)
+    build_time_vars = _temp.build_time_vars
+    global _config_vars
+    _config_vars = {}
+    _config_vars.update(build_time_vars)
+
+
+def _init_nt():
+    """Initialize the module as appropriate for NT"""
+    g = {}
+    # set basic install directories
+    g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
+    g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
+
+    # XXX hmmm.. a normal install puts include files here
+    g['INCLUDEPY'] = get_python_inc(plat_specific=0)
+
+    g['EXT_SUFFIX'] = _imp.extension_suffixes()[0]
+    g['EXE'] = ".exe"
+    g['VERSION'] = get_python_version().replace(".", "")
+    g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
+
+    global _config_vars
+    _config_vars = g
+
+
+def get_config_vars(*args):
+    """With no arguments, return a dictionary of all configuration
+    variables relevant for the current platform.  Generally this includes
+    everything needed to build extensions and install both pure modules and
+    extensions.  On Unix, this means every variable defined in Python's
+    installed Makefile; on Windows it's a much smaller set.
+
+    With arguments, return a list of values that result from looking up
+    each argument in the configuration variable dictionary.
+    """
+    global _config_vars
+    if _config_vars is None:
+        func = globals().get("_init_" + os.name)
+        if func:
+            func()
+        else:
+            _config_vars = {}
+
+        # Normalized versions of prefix and exec_prefix are handy to have;
+        # in fact, these are the standard versions used most places in the
+        # Distutils.
+        _config_vars['prefix'] = PREFIX
+        _config_vars['exec_prefix'] = EXEC_PREFIX
+
+        # For backward compatibility, see issue19555
+        SO = _config_vars.get('EXT_SUFFIX')
+        if SO is not None:
+            _config_vars['SO'] = SO
+
+        # Always convert srcdir to an absolute path
+        srcdir = _config_vars.get('srcdir', project_base)
+        if os.name == 'posix':
+            if python_build:
+                # If srcdir is a relative path (typically '.' or '..')
+                # then it should be interpreted relative to the directory
+                # containing Makefile.
+                base = os.path.dirname(get_makefile_filename())
+                srcdir = os.path.join(base, srcdir)
+            else:
+                # srcdir is not meaningful since the installation is
+                # spread about the filesystem.  We choose the
+                # directory containing the Makefile since we know it
+                # exists.
+                srcdir = os.path.dirname(get_makefile_filename())
+        _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir))
+
+        # Convert srcdir into an absolute path if it appears necessary.
+        # Normally it is relative to the build directory.  However, during
+        # testing, for example, we might be running a non-installed python
+        # from a different directory.
+        if python_build and os.name == "posix":
+            base = project_base
+            if (not os.path.isabs(_config_vars['srcdir']) and
+                base != os.getcwd()):
+                # srcdir is relative and we are not in the same directory
+                # as the executable. Assume executable is in the build
+                # directory and make srcdir absolute.
+                srcdir = os.path.join(base, _config_vars['srcdir'])
+                _config_vars['srcdir'] = os.path.normpath(srcdir)
+
+        # OS X platforms require special customization to handle
+        # multi-architecture, multi-os-version installers
+        if sys.platform == 'darwin':
+            import _osx_support
+            _osx_support.customize_config_vars(_config_vars)
+
+    if args:
+        vals = []
+        for name in args:
+            vals.append(_config_vars.get(name))
+        return vals
+    else:
+        return _config_vars
+
+def get_config_var(name):
+    """Return the value of a single variable using the dictionary
+    returned by 'get_config_vars()'.  Equivalent to
+    get_config_vars().get(name)
+    """
+    if name == 'SO':
+        import warnings
+        warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
+    return get_config_vars().get(name)
diff --git a/common/py3-stdlib/distutils/tests/__init__.py b/common/py3-stdlib/distutils/tests/__init__.py
index 16d011f..6803721 100644
--- a/common/py3-stdlib/distutils/tests/__init__.py
+++ b/common/py3-stdlib/distutils/tests/__init__.py
@@ -15,8 +15,7 @@
 import os
 import sys
 import unittest
-from test.support import run_unittest
-from test.support.warnings_helper import save_restore_warnings_filters
+from test.support import run_unittest, save_restore_warnings_filters
 
 
 here = os.path.dirname(__file__) or os.curdir
diff --git a/common/py3-stdlib/distutils/tests/support.py b/common/py3-stdlib/distutils/tests/support.py
index 23b907b..259af88 100644
--- a/common/py3-stdlib/distutils/tests/support.py
+++ b/common/py3-stdlib/distutils/tests/support.py
@@ -6,7 +6,7 @@
 import unittest
 import sysconfig
 from copy import deepcopy
-from test.support import os_helper
+import test.support
 
 from distutils import log
 from distutils.log import DEBUG, INFO, WARN, ERROR, FATAL
@@ -64,7 +64,7 @@
         super().tearDown()
         while self.tempdirs:
             tmpdir = self.tempdirs.pop()
-            os_helper.rmtree(tmpdir)
+            test.support.rmtree(tmpdir)
 
     def mkdtemp(self):
         """Create a temporary directory that will be cleaned up.
diff --git a/common/py3-stdlib/distutils/tests/test_archive_util.py b/common/py3-stdlib/distutils/tests/test_archive_util.py
index edcec25..e9aad0e 100644
--- a/common/py3-stdlib/distutils/tests/test_archive_util.py
+++ b/common/py3-stdlib/distutils/tests/test_archive_util.py
@@ -13,9 +13,7 @@
                                     ARCHIVE_FORMATS)
 from distutils.spawn import find_executable, spawn
 from distutils.tests import support
-from test.support import run_unittest, patch
-from test.support.os_helper import change_cwd
-from test.support.warnings_helper import check_warnings
+from test.support import check_warnings, run_unittest, patch, change_cwd
 
 try:
     import grp
diff --git a/common/py3-stdlib/distutils/tests/test_bdist.py b/common/py3-stdlib/distutils/tests/test_bdist.py
index 55fa393..130d8bf 100644
--- a/common/py3-stdlib/distutils/tests/test_bdist.py
+++ b/common/py3-stdlib/distutils/tests/test_bdist.py
@@ -2,12 +2,10 @@
 import os
 import unittest
 from test.support import run_unittest
-
 import warnings
-with warnings.catch_warnings():
-    warnings.simplefilter('ignore', DeprecationWarning)
-    from distutils.command.bdist import bdist
-    from distutils.tests import support
+
+from distutils.command.bdist import bdist
+from distutils.tests import support
 
 
 class BuildTestCase(support.TempdirManager,
@@ -24,7 +22,7 @@
 
         # what formats does bdist offer?
         formats = ['bztar', 'gztar', 'msi', 'rpm', 'tar',
-                   'xztar', 'zip', 'ztar']
+                   'wininst', 'xztar', 'zip', 'ztar']
         found = sorted(cmd.format_command)
         self.assertEqual(found, formats)
 
@@ -36,12 +34,15 @@
         cmd.ensure_finalized()
         dist.command_obj['bdist'] = cmd
 
-        names = ['bdist_dumb']  # bdist_rpm does not support --skip-build
+        names = ['bdist_dumb', 'bdist_wininst']  # bdist_rpm does not support --skip-build
         if os.name == 'nt':
             names.append('bdist_msi')
 
         for name in names:
-            subcmd = cmd.get_finalized_command(name)
+            with warnings.catch_warnings():
+                warnings.filterwarnings('ignore', 'bdist_wininst command is deprecated',
+                                        DeprecationWarning)
+                subcmd = cmd.get_finalized_command(name)
             if getattr(subcmd, '_unsupported', False):
                 # command is not supported on this build
                 continue
diff --git a/common/py3-stdlib/distutils/tests/test_bdist_msi.py b/common/py3-stdlib/distutils/tests/test_bdist_msi.py
index a61266a..418e60e 100644
--- a/common/py3-stdlib/distutils/tests/test_bdist_msi.py
+++ b/common/py3-stdlib/distutils/tests/test_bdist_msi.py
@@ -1,8 +1,7 @@
 """Tests for distutils.command.bdist_msi."""
 import sys
 import unittest
-from test.support import run_unittest
-from test.support.warnings_helper import check_warnings
+from test.support import run_unittest, check_warnings
 from distutils.tests import support
 
 
diff --git a/common/py3-stdlib/distutils/tests/test_bdist_rpm.py b/common/py3-stdlib/distutils/tests/test_bdist_rpm.py
index ba4382f..6453a02 100644
--- a/common/py3-stdlib/distutils/tests/test_bdist_rpm.py
+++ b/common/py3-stdlib/distutils/tests/test_bdist_rpm.py
@@ -44,7 +44,7 @@
     # spurious sdtout/stderr output under Mac OS X
     @unittest.skipUnless(sys.platform.startswith('linux'),
                          'spurious sdtout/stderr output under Mac OS X')
-    @requires_zlib()
+    @requires_zlib
     @unittest.skipIf(find_executable('rpm') is None,
                      'the rpm command is not found')
     @unittest.skipIf(find_executable('rpmbuild') is None,
@@ -87,7 +87,7 @@
     # spurious sdtout/stderr output under Mac OS X
     @unittest.skipUnless(sys.platform.startswith('linux'),
                          'spurious sdtout/stderr output under Mac OS X')
-    @requires_zlib()
+    @requires_zlib
     # http://bugs.python.org/issue1533164
     @unittest.skipIf(find_executable('rpm') is None,
                      'the rpm command is not found')
diff --git a/common/py3-stdlib/distutils/tests/test_bdist_wininst.py b/common/py3-stdlib/distutils/tests/test_bdist_wininst.py
new file mode 100644
index 0000000..5c3d025
--- /dev/null
+++ b/common/py3-stdlib/distutils/tests/test_bdist_wininst.py
@@ -0,0 +1,38 @@
+"""Tests for distutils.command.bdist_wininst."""
+import sys
+import platform
+import unittest
+from test.support import run_unittest, check_warnings
+
+from distutils.command.bdist_wininst import bdist_wininst
+from distutils.tests import support
+
+@unittest.skipIf(sys.platform == 'win32' and platform.machine() == 'ARM64',
+    'bdist_wininst is not supported in this install')
+@unittest.skipIf(getattr(bdist_wininst, '_unsupported', False),
+    'bdist_wininst is not supported in this install')
+class BuildWinInstTestCase(support.TempdirManager,
+                           support.LoggingSilencer,
+                           unittest.TestCase):
+
+    def test_get_exe_bytes(self):
+
+        # issue5731: command was broken on non-windows platforms
+        # this test makes sure it works now for every platform
+        # let's create a command
+        pkg_pth, dist = self.create_dist()
+        with check_warnings(("", DeprecationWarning)):
+            cmd = bdist_wininst(dist)
+        cmd.ensure_finalized()
+
+        # let's run the code that finds the right wininst*.exe file
+        # and make sure it finds it and returns its content
+        # no matter what platform we have
+        exe_file = cmd.get_exe_bytes()
+        self.assertGreater(len(exe_file), 10)
+
+def test_suite():
+    return unittest.makeSuite(BuildWinInstTestCase)
+
+if __name__ == '__main__':
+    run_unittest(test_suite())
diff --git a/common/py3-stdlib/distutils/tests/test_build_clib.py b/common/py3-stdlib/distutils/tests/test_build_clib.py
index 19e012a..abd8313 100644
--- a/common/py3-stdlib/distutils/tests/test_build_clib.py
+++ b/common/py3-stdlib/distutils/tests/test_build_clib.py
@@ -2,7 +2,6 @@
 import unittest
 import os
 import sys
-import sysconfig
 
 from test.support import run_unittest, missing_compiler_executable
 
@@ -14,15 +13,6 @@
                         support.LoggingSilencer,
                         unittest.TestCase):
 
-    def setUp(self):
-        super().setUp()
-        self._backup_CONFIG_VARS = dict(sysconfig._CONFIG_VARS)
-
-    def tearDown(self):
-        super().tearDown()
-        sysconfig._CONFIG_VARS.clear()
-        sysconfig._CONFIG_VARS.update(self._backup_CONFIG_VARS)
-
     def test_check_library_dist(self):
         pkg_dir, dist = self.create_dist()
         cmd = build_clib(dist)
diff --git a/common/py3-stdlib/distutils/tests/test_build_ext.py b/common/py3-stdlib/distutils/tests/test_build_ext.py
index 8e7364d..1b034c9 100644
--- a/common/py3-stdlib/distutils/tests/test_build_ext.py
+++ b/common/py3-stdlib/distutils/tests/test_build_ext.py
@@ -15,7 +15,6 @@
 
 import unittest
 from test import support
-from test.support import os_helper
 from test.support.script_helper import assert_python_ok
 
 # http://bugs.python.org/issue4373
@@ -35,12 +34,11 @@
         site.USER_BASE = self.mkdtemp()
         from distutils.command import build_ext
         build_ext.USER_BASE = site.USER_BASE
-        self.old_config_vars = dict(sysconfig._config_vars)
 
         # bpo-30132: On Windows, a .pdb file may be created in the current
         # working directory. Create a temporary working directory to cleanup
         # everything at the end of the test.
-        change_cwd = os_helper.change_cwd(self.tmp_dir)
+        change_cwd = support.change_cwd(self.tmp_dir)
         change_cwd.__enter__()
         self.addCleanup(change_cwd.__exit__, None, None, None)
 
@@ -49,8 +47,6 @@
         site.USER_BASE = self.old_user_base
         from distutils.command import build_ext
         build_ext.USER_BASE = self.old_user_base
-        sysconfig._config_vars.clear()
-        sysconfig._config_vars.update(self.old_config_vars)
         super(BuildExtTestCase, self).tearDown()
 
     def build_ext(self, *args, **kwargs):
@@ -459,7 +455,7 @@
         deptarget = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
         if deptarget:
             # increment the minor version number (i.e. 10.6 -> 10.7)
-            deptarget = [int(x) for x in deptarget.split('.')]
+            deptarget = [int(x) for x in str(deptarget).split('.')]
             deptarget[-1] += 1
             deptarget = '.'.join(str(i) for i in deptarget)
             self._try_compile_deployment_target('<', deptarget)
@@ -492,7 +488,7 @@
 
         # get the deployment target that the interpreter was built with
         target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
-        target = tuple(map(int, target.split('.')[0:2]))
+        target = tuple(map(int, str(target).split('.')[0:2]))
         # format the target value as defined in the Apple
         # Availability Macros.  We can't use the macro names since
         # at least one value we test with will not exist yet.
diff --git a/common/py3-stdlib/distutils/tests/test_config_cmd.py b/common/py3-stdlib/distutils/tests/test_config_cmd.py
index 0127ba7..9aeab07 100644
--- a/common/py3-stdlib/distutils/tests/test_config_cmd.py
+++ b/common/py3-stdlib/distutils/tests/test_config_cmd.py
@@ -2,7 +2,6 @@
 import unittest
 import os
 import sys
-import sysconfig
 from test.support import run_unittest, missing_compiler_executable
 
 from distutils.command.config import dump_file, config
@@ -22,12 +21,9 @@
         self._logs = []
         self.old_log = log.info
         log.info = self._info
-        self.old_config_vars = dict(sysconfig._CONFIG_VARS)
 
     def tearDown(self):
         log.info = self.old_log
-        sysconfig._CONFIG_VARS.clear()
-        sysconfig._CONFIG_VARS.update(self.old_config_vars)
         super(ConfigTestCase, self).tearDown()
 
     def test_dump_file(self):
diff --git a/common/py3-stdlib/distutils/tests/test_core.py b/common/py3-stdlib/distutils/tests/test_core.py
index 4e6694a..27ce732 100644
--- a/common/py3-stdlib/distutils/tests/test_core.py
+++ b/common/py3-stdlib/distutils/tests/test_core.py
@@ -5,8 +5,8 @@
 import os
 import shutil
 import sys
+import test.support
 from test.support import captured_stdout, run_unittest
-from test.support import os_helper
 import unittest
 from distutils.tests import support
 from distutils import log
@@ -62,13 +62,13 @@
         super(CoreTestCase, self).tearDown()
 
     def cleanup_testfn(self):
-        path = os_helper.TESTFN
+        path = test.support.TESTFN
         if os.path.isfile(path):
             os.remove(path)
         elif os.path.isdir(path):
             shutil.rmtree(path)
 
-    def write_setup(self, text, path=os_helper.TESTFN):
+    def write_setup(self, text, path=test.support.TESTFN):
         f = open(path, "w")
         try:
             f.write(text)
@@ -105,8 +105,8 @@
         cwd = os.getcwd()
 
         # Create a directory and write the setup.py file there:
-        os.mkdir(os_helper.TESTFN)
-        setup_py = os.path.join(os_helper.TESTFN, "setup.py")
+        os.mkdir(test.support.TESTFN)
+        setup_py = os.path.join(test.support.TESTFN, "setup.py")
         distutils.core.run_setup(
             self.write_setup(setup_prints_cwd, path=setup_py))
 
diff --git a/common/py3-stdlib/distutils/tests/test_dist.py b/common/py3-stdlib/distutils/tests/test_dist.py
index f8a9e86..60956da 100644
--- a/common/py3-stdlib/distutils/tests/test_dist.py
+++ b/common/py3-stdlib/distutils/tests/test_dist.py
@@ -12,9 +12,8 @@
 from distutils.cmd import Command
 
 from test.support import (
-     captured_stdout, captured_stderr, run_unittest
+     TESTFN, captured_stdout, captured_stderr, run_unittest
 )
-from test.support.os_helper import TESTFN
 from distutils.tests import support
 from distutils import log
 
diff --git a/common/py3-stdlib/distutils/tests/test_extension.py b/common/py3-stdlib/distutils/tests/test_extension.py
index 81fad02..e35f273 100644
--- a/common/py3-stdlib/distutils/tests/test_extension.py
+++ b/common/py3-stdlib/distutils/tests/test_extension.py
@@ -3,8 +3,7 @@
 import os
 import warnings
 
-from test.support import run_unittest
-from test.support.warnings_helper import check_warnings
+from test.support import check_warnings, run_unittest
 from distutils.extension import read_setup_file, Extension
 
 class ExtensionTestCase(unittest.TestCase):
diff --git a/common/py3-stdlib/distutils/tests/test_file_util.py b/common/py3-stdlib/distutils/tests/test_file_util.py
index c7783b8..a4e2d02 100644
--- a/common/py3-stdlib/distutils/tests/test_file_util.py
+++ b/common/py3-stdlib/distutils/tests/test_file_util.py
@@ -8,9 +8,7 @@
 from distutils import log
 from distutils.tests import support
 from distutils.errors import DistutilsFileError
-from test.support import run_unittest
-from test.support.os_helper import unlink
-
+from test.support import run_unittest, unlink
 
 class FileUtilTestCase(support.TempdirManager, unittest.TestCase):
 
diff --git a/common/py3-stdlib/distutils/tests/test_filelist.py b/common/py3-stdlib/distutils/tests/test_filelist.py
index cee97d4..c71342d 100644
--- a/common/py3-stdlib/distutils/tests/test_filelist.py
+++ b/common/py3-stdlib/distutils/tests/test_filelist.py
@@ -8,7 +8,7 @@
 from distutils.filelist import glob_to_re, translate_pattern, FileList
 from distutils import filelist
 
-from test.support import os_helper
+import test.support
 from test.support import captured_stdout, run_unittest
 from distutils.tests import support
 
@@ -295,9 +295,9 @@
 
 
 class FindAllTestCase(unittest.TestCase):
-    @os_helper.skip_unless_symlink
+    @test.support.skip_unless_symlink
     def test_missing_symlink(self):
-        with os_helper.temp_cwd():
+        with test.support.temp_cwd():
             os.symlink('foo', 'bar')
             self.assertEqual(filelist.findall(), [])
 
@@ -307,13 +307,13 @@
         '.' as the parameter, the dot should be omitted from
         the results.
         """
-        with os_helper.temp_cwd():
+        with test.support.temp_cwd():
             os.mkdir('foo')
             file1 = os.path.join('foo', 'file1.txt')
-            os_helper.create_empty_file(file1)
+            test.support.create_empty_file(file1)
             os.mkdir('bar')
             file2 = os.path.join('bar', 'file2.txt')
-            os_helper.create_empty_file(file2)
+            test.support.create_empty_file(file2)
             expected = [file2, file1]
             self.assertEqual(sorted(filelist.findall()), expected)
 
@@ -322,9 +322,9 @@
         When findall is called with another path, the full
         path name should be returned.
         """
-        with os_helper.temp_dir() as temp_dir:
+        with test.support.temp_dir() as temp_dir:
             file1 = os.path.join(temp_dir, 'file1.txt')
-            os_helper.create_empty_file(file1)
+            test.support.create_empty_file(file1)
             expected = [file1]
             self.assertEqual(filelist.findall(temp_dir), expected)
 
diff --git a/common/py3-stdlib/distutils/tests/test_install.py b/common/py3-stdlib/distutils/tests/test_install.py
index 0632024..51c80e0 100644
--- a/common/py3-stdlib/distutils/tests/test_install.py
+++ b/common/py3-stdlib/distutils/tests/test_install.py
@@ -8,7 +8,7 @@
 from test.support import captured_stdout, run_unittest
 
 from distutils import sysconfig
-from distutils.command.install import install, HAS_USER_SITE
+from distutils.command.install import install
 from distutils.command import install as install_module
 from distutils.command.build_ext import build_ext
 from distutils.command.install import INSTALL_SCHEMES
@@ -29,15 +29,6 @@
                       support.LoggingSilencer,
                       unittest.TestCase):
 
-    def setUp(self):
-        super().setUp()
-        self._backup_config_vars = dict(sysconfig._config_vars)
-
-    def tearDown(self):
-        super().tearDown()
-        sysconfig._config_vars.clear()
-        sysconfig._config_vars.update(self._backup_config_vars)
-
     def test_home_installation_scheme(self):
         # This ensure two things:
         # - that --home generates the desired set of directory names
@@ -75,7 +66,6 @@
         check_path(cmd.install_scripts, os.path.join(destination, "bin"))
         check_path(cmd.install_data, destination)
 
-    @unittest.skipUnless(HAS_USER_SITE, 'need user site')
     def test_user_site(self):
         # test install with --user
         # preparing the environment for the test
@@ -103,9 +93,8 @@
 
         self.addCleanup(cleanup)
 
-        if HAS_USER_SITE:
-            for key in ('nt_user', 'unix_user'):
-                self.assertIn(key, INSTALL_SCHEMES)
+        for key in ('nt_user', 'unix_user'):
+            self.assertIn(key, INSTALL_SCHEMES)
 
         dist = Distribution({'name': 'xx'})
         cmd = install(dist)
diff --git a/common/py3-stdlib/distutils/tests/test_register.py b/common/py3-stdlib/distutils/tests/test_register.py
index bba4863..e68b0af 100644
--- a/common/py3-stdlib/distutils/tests/test_register.py
+++ b/common/py3-stdlib/distutils/tests/test_register.py
@@ -5,8 +5,7 @@
 import urllib
 import warnings
 
-from test.support import run_unittest
-from test.support.warnings_helper import check_warnings
+from test.support import check_warnings, run_unittest
 
 from distutils.command import register as register_module
 from distutils.command.register import register
diff --git a/common/py3-stdlib/distutils/tests/test_sdist.py b/common/py3-stdlib/distutils/tests/test_sdist.py
index 752e9db..23db126 100644
--- a/common/py3-stdlib/distutils/tests/test_sdist.py
+++ b/common/py3-stdlib/distutils/tests/test_sdist.py
@@ -6,8 +6,7 @@
 import zipfile
 from os.path import join
 from textwrap import dedent
-from test.support import captured_stdout, run_unittest
-from test.support.warnings_helper import check_warnings
+from test.support import captured_stdout, check_warnings, run_unittest
 
 try:
     import zlib
diff --git a/common/py3-stdlib/distutils/tests/test_spawn.py b/common/py3-stdlib/distutils/tests/test_spawn.py
index 4ec767b..ad50381 100644
--- a/common/py3-stdlib/distutils/tests/test_spawn.py
+++ b/common/py3-stdlib/distutils/tests/test_spawn.py
@@ -4,7 +4,7 @@
 import sys
 import unittest.mock
 from test.support import run_unittest, unix_shell
-from test.support import os_helper
+from test import support as test_support
 
 from distutils.spawn import find_executable
 from distutils.spawn import spawn
@@ -44,9 +44,9 @@
         spawn([exe])  # should work without any error
 
     def test_find_executable(self):
-        with os_helper.temp_dir() as tmp_dir:
+        with test_support.temp_dir() as tmp_dir:
             # use TESTFN to get a pseudo-unique filename
-            program_noeext = os_helper.TESTFN
+            program_noeext = test_support.TESTFN
             # Give the temporary program an ".exe" suffix for all.
             # It's needed on Windows and not harmful on other platforms.
             program = program_noeext + ".exe"
@@ -66,7 +66,7 @@
                 self.assertEqual(rv, filename)
 
             # test find in the current directory
-            with os_helper.change_cwd(tmp_dir):
+            with test_support.change_cwd(tmp_dir):
                 rv = find_executable(program)
                 self.assertEqual(rv, program)
 
@@ -76,7 +76,7 @@
             self.assertIsNone(rv)
 
             # PATH='': no match, except in the current directory
-            with os_helper.EnvironmentVarGuard() as env:
+            with test_support.EnvironmentVarGuard() as env:
                 env['PATH'] = ''
                 with unittest.mock.patch('distutils.spawn.os.confstr',
                                          return_value=tmp_dir, create=True), \
@@ -86,12 +86,12 @@
                     self.assertIsNone(rv)
 
                     # look in current directory
-                    with os_helper.change_cwd(tmp_dir):
+                    with test_support.change_cwd(tmp_dir):
                         rv = find_executable(program)
                         self.assertEqual(rv, program)
 
             # PATH=':': explicitly looks in the current directory
-            with os_helper.EnvironmentVarGuard() as env:
+            with test_support.EnvironmentVarGuard() as env:
                 env['PATH'] = os.pathsep
                 with unittest.mock.patch('distutils.spawn.os.confstr',
                                          return_value='', create=True), \
@@ -100,12 +100,12 @@
                     self.assertIsNone(rv)
 
                     # look in current directory
-                    with os_helper.change_cwd(tmp_dir):
+                    with test_support.change_cwd(tmp_dir):
                         rv = find_executable(program)
                         self.assertEqual(rv, program)
 
             # missing PATH: test os.confstr("CS_PATH") and os.defpath
-            with os_helper.EnvironmentVarGuard() as env:
+            with test_support.EnvironmentVarGuard() as env:
                 env.pop('PATH', None)
 
                 # without confstr
diff --git a/common/py3-stdlib/distutils/tests/test_sysconfig.py b/common/py3-stdlib/distutils/tests/test_sysconfig.py
index 59676b0..236755d 100644
--- a/common/py3-stdlib/distutils/tests/test_sysconfig.py
+++ b/common/py3-stdlib/distutils/tests/test_sysconfig.py
@@ -10,10 +10,7 @@
 from distutils import sysconfig
 from distutils.ccompiler import get_default_compiler
 from distutils.tests import support
-from test.support import run_unittest, swap_item
-from test.support.os_helper import TESTFN
-from test.support.warnings_helper import check_warnings
-
+from test.support import TESTFN, run_unittest, check_warnings, swap_item
 
 class SysconfigTestCase(support.EnvironGuard, unittest.TestCase):
     def setUp(self):
diff --git a/common/py3-stdlib/distutils/tests/test_unixccompiler.py b/common/py3-stdlib/distutils/tests/test_unixccompiler.py
index 24725ea..eef702c 100644
--- a/common/py3-stdlib/distutils/tests/test_unixccompiler.py
+++ b/common/py3-stdlib/distutils/tests/test_unixccompiler.py
@@ -1,8 +1,7 @@
 """Tests for distutils.unixccompiler."""
 import sys
 import unittest
-from test.support import run_unittest
-from test.support.os_helper import EnvironmentVarGuard
+from test.support import EnvironmentVarGuard, run_unittest
 
 from distutils import sysconfig
 from distutils.unixccompiler import UnixCCompiler
@@ -12,7 +11,6 @@
     def setUp(self):
         self._backup_platform = sys.platform
         self._backup_get_config_var = sysconfig.get_config_var
-        self._backup_config_vars = dict(sysconfig._config_vars)
         class CompilerWrapper(UnixCCompiler):
             def rpath_foo(self):
                 return self.runtime_library_dir_option('/foo')
@@ -21,8 +19,6 @@
     def tearDown(self):
         sys.platform = self._backup_platform
         sysconfig.get_config_var = self._backup_get_config_var
-        sysconfig._config_vars.clear()
-        sysconfig._config_vars.update(self._backup_config_vars)
 
     @unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
     def test_runtime_libdir_option(self):
diff --git a/common/py3-stdlib/distutils/tests/test_upload.py b/common/py3-stdlib/distutils/tests/test_upload.py
index 74f0bc0..bca5516 100644
--- a/common/py3-stdlib/distutils/tests/test_upload.py
+++ b/common/py3-stdlib/distutils/tests/test_upload.py
@@ -2,7 +2,7 @@
 import os
 import unittest
 import unittest.mock as mock
-from urllib.error import HTTPError
+from urllib.request import HTTPError
 
 from test.support import run_unittest
 
diff --git a/common/py3-stdlib/distutils/tests/test_util.py b/common/py3-stdlib/distutils/tests/test_util.py
index d4a01c6..bf0d433 100644
--- a/common/py3-stdlib/distutils/tests/test_util.py
+++ b/common/py3-stdlib/distutils/tests/test_util.py
@@ -54,8 +54,7 @@
             os.uname = self.uname
         else:
             del os.uname
-        sysconfig._config_vars.clear()
-        sysconfig._config_vars.update(self._config_vars)
+        sysconfig._config_vars = copy(self._config_vars)
         super(UtilTestCase, self).tearDown()
 
     def _set_uname(self, uname):
diff --git a/common/py3-stdlib/distutils/unixccompiler.py b/common/py3-stdlib/distutils/unixccompiler.py
index d00c489..f0792de 100644
--- a/common/py3-stdlib/distutils/unixccompiler.py
+++ b/common/py3-stdlib/distutils/unixccompiler.py
@@ -215,8 +215,7 @@
         return "-L" + dir
 
     def _is_gcc(self, compiler_name):
-        # clang uses same syntax for rpath as gcc
-        return any(name in compiler_name for name in ("gcc", "g++", "clang"))
+        return "gcc" in compiler_name or "g++" in compiler_name
 
     def runtime_library_dir_option(self, dir):
         # XXX Hackish, at the very least.  See Python bug #445902:
diff --git a/common/py3-stdlib/distutils/util.py b/common/py3-stdlib/distutils/util.py
index 2ce5c5b..4b002ec 100644
--- a/common/py3-stdlib/distutils/util.py
+++ b/common/py3-stdlib/distutils/util.py
@@ -9,7 +9,6 @@
 import importlib.util
 import string
 import sys
-import distutils
 from distutils.errors import DistutilsPlatformError
 from distutils.dep_util import newer
 from distutils.spawn import spawn
@@ -420,10 +419,8 @@
              direct=1)
 """ % (optimize, force, prefix, base_dir, verbose))
 
-        msg = distutils._DEPRECATION_MESSAGE
         cmd = [sys.executable]
         cmd.extend(subprocess._optim_args_from_interpreter_flags())
-        cmd.append(f'-Wignore:{msg}:DeprecationWarning')
         cmd.append(script_name)
         spawn(cmd, dry_run=dry_run)
         execute(os.remove, (script_name,), "removing %s" % script_name,
diff --git a/common/py3-stdlib/doctest.py b/common/py3-stdlib/doctest.py
index b27cbdf..baa503c 100644
--- a/common/py3-stdlib/doctest.py
+++ b/common/py3-stdlib/doctest.py
@@ -102,7 +102,7 @@
 import sys
 import traceback
 import unittest
-from io import StringIO, IncrementalNewlineDecoder
+from io import StringIO
 from collections import namedtuple
 
 TestResults = namedtuple('TestResults', 'failed attempted')
@@ -212,24 +212,23 @@
         raise TypeError("Expected a module, string, or None")
 
 def _newline_convert(data):
-    # The IO module provides a handy decoder for universal newline conversion
-    return IncrementalNewlineDecoder(None, True).decode(data, True)
+    # We have two cases to cover and we need to make sure we do
+    # them in the right order
+    for newline in ('\r\n', '\r'):
+        data = data.replace(newline, '\n')
+    return data
 
 def _load_testfile(filename, package, module_relative, encoding):
     if module_relative:
         package = _normalize_module(package, 3)
         filename = _module_relative_path(package, filename)
-        if (loader := getattr(package, '__loader__', None)) is None:
-            try:
-                loader = package.__spec__.loader
-            except AttributeError:
-                pass
-        if hasattr(loader, 'get_data'):
-            file_contents = loader.get_data(filename)
-            file_contents = file_contents.decode(encoding)
-            # get_data() opens files as 'rb', so one must do the equivalent
-            # conversion as universal newlines would do.
-            return _newline_convert(file_contents), filename
+        if getattr(package, '__loader__', None) is not None:
+            if hasattr(package.__loader__, 'get_data'):
+                file_contents = package.__loader__.get_data(filename)
+                file_contents = file_contents.decode(encoding)
+                # get_data() opens files as 'rb', so one must do the equivalent
+                # conversion as universal newlines would do.
+                return _newline_convert(file_contents), filename
     with open(filename, encoding=encoding) as f:
         return f.read(), filename
 
@@ -973,17 +972,6 @@
         else:
             raise ValueError("object must be a class or function")
 
-    def _is_routine(self, obj):
-        """
-        Safely unwrap objects and determine if they are functions.
-        """
-        maybe_routine = obj
-        try:
-            maybe_routine = inspect.unwrap(maybe_routine)
-        except ValueError:
-            pass
-        return inspect.isroutine(maybe_routine)
-
     def _find(self, tests, obj, name, module, source_lines, globs, seen):
         """
         Find tests for the given object and any contained objects, and
@@ -1006,9 +994,9 @@
         if inspect.ismodule(obj) and self._recurse:
             for valname, val in obj.__dict__.items():
                 valname = '%s.%s' % (name, valname)
-
                 # Recurse to functions & classes.
-                if ((self._is_routine(val) or inspect.isclass(val)) and
+                if ((inspect.isroutine(inspect.unwrap(val))
+                     or inspect.isclass(val)) and
                     self._from_module(module, val)):
                     self._find(tests, val, valname, module, source_lines,
                                globs, seen)
@@ -1034,8 +1022,10 @@
         if inspect.isclass(obj) and self._recurse:
             for valname, val in obj.__dict__.items():
                 # Special handling for staticmethod/classmethod.
-                if isinstance(val, (staticmethod, classmethod)):
-                    val = val.__func__
+                if isinstance(val, staticmethod):
+                    val = getattr(obj, valname)
+                if isinstance(val, classmethod):
+                    val = getattr(obj, valname).__func__
 
                 # Recurse to methods, properties, and nested classes.
                 if ((inspect.isroutine(val) or inspect.isclass(val) or
diff --git a/common/py3-stdlib/email/_parseaddr.py b/common/py3-stdlib/email/_parseaddr.py
index ba5ad5a..41ff6f8 100644
--- a/common/py3-stdlib/email/_parseaddr.py
+++ b/common/py3-stdlib/email/_parseaddr.py
@@ -65,10 +65,8 @@
 
     """
     if not data:
-        return None
+        return
     data = data.split()
-    if not data:  # This happens for whitespace-only input.
-        return None
     # The FWS after the comma after the day-of-week is optional, so search and
     # adjust for this.
     if data[0].endswith(',') or data[0].lower() in _daynames:
@@ -128,8 +126,6 @@
             tss = 0
         elif len(tm) == 3:
             [thh, tmm, tss] = tm
-        else:
-            return None
     else:
         return None
     try:
diff --git a/common/py3-stdlib/email/base64mime.py b/common/py3-stdlib/email/base64mime.py
index a7cc373..17f0818 100644
--- a/common/py3-stdlib/email/base64mime.py
+++ b/common/py3-stdlib/email/base64mime.py
@@ -84,7 +84,7 @@
     in an email.
     """
     if not s:
-        return ""
+        return s
 
     encvec = []
     max_unencoded = maxlinelen * 3 // 4
diff --git a/common/py3-stdlib/email/contentmanager.py b/common/py3-stdlib/email/contentmanager.py
index fcf278d..b91fb0e 100644
--- a/common/py3-stdlib/email/contentmanager.py
+++ b/common/py3-stdlib/email/contentmanager.py
@@ -144,7 +144,7 @@
     linesep = policy.linesep.encode('ascii')
     def embedded_body(lines): return linesep.join(lines) + linesep
     def normal_body(lines): return b'\n'.join(lines) + b'\n'
-    if cte is None:
+    if cte==None:
         # Use heuristics to decide on the "best" encoding.
         if max((len(x) for x in lines), default=0) <= policy.max_line_length:
             try:
@@ -238,7 +238,9 @@
         data = binascii.b2a_qp(data, istext=False, header=False, quotetabs=True)
         data = data.decode('ascii')
     elif cte == '7bit':
-        data = data.decode('ascii')
+        # Make sure it really is only ASCII.  The early warning here seems
+        # worth the overhead...if you care write your own content manager :).
+        data.encode('ascii')
     elif cte in ('8bit', 'binary'):
         data = data.decode('ascii', 'surrogateescape')
     msg.set_payload(data)
diff --git a/common/py3-stdlib/email/errors.py b/common/py3-stdlib/email/errors.py
index 3ad0056..d28a680 100644
--- a/common/py3-stdlib/email/errors.py
+++ b/common/py3-stdlib/email/errors.py
@@ -108,6 +108,3 @@
     """local_part contains non-ASCII characters"""
     # This defect only occurs during unicode parsing, not when
     # parsing messages decoded from binary.
-
-class InvalidDateDefect(HeaderDefect):
-    """Header has unparsable or invalid date"""
diff --git a/common/py3-stdlib/email/headerregistry.py b/common/py3-stdlib/email/headerregistry.py
index b590d69..5d84fc0 100644
--- a/common/py3-stdlib/email/headerregistry.py
+++ b/common/py3-stdlib/email/headerregistry.py
@@ -2,6 +2,10 @@
 
 This module provides an implementation of the HeaderRegistry API.
 The implementation is designed to flexibly follow RFC5322 rules.
+
+Eventually HeaderRegistry will be a public API, but it isn't yet,
+and will probably change some before that happens.
+
 """
 from types import MappingProxyType
 
@@ -298,14 +302,7 @@
             kwds['parse_tree'] = parser.TokenList()
             return
         if isinstance(value, str):
-            kwds['decoded'] = value
-            try:
-                value = utils.parsedate_to_datetime(value)
-            except ValueError:
-                kwds['defects'].append(errors.InvalidDateDefect('Invalid date value or format'))
-                kwds['datetime'] = None
-                kwds['parse_tree'] = parser.TokenList()
-                return
+            value = utils.parsedate_to_datetime(value)
         kwds['datetime'] = value
         kwds['decoded'] = utils.format_datetime(kwds['datetime'])
         kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
diff --git a/common/py3-stdlib/email/message.py b/common/py3-stdlib/email/message.py
index 6752ce0..3701b30 100644
--- a/common/py3-stdlib/email/message.py
+++ b/common/py3-stdlib/email/message.py
@@ -948,7 +948,7 @@
         if policy is None:
             from email.policy import default
             policy = default
-        super().__init__(policy)
+        Message.__init__(self, policy)
 
 
     def as_string(self, unixfrom=False, maxheaderlen=None, policy=None):
@@ -965,7 +965,7 @@
         policy = self.policy if policy is None else policy
         if maxheaderlen is None:
             maxheaderlen = policy.max_line_length
-        return super().as_string(unixfrom, maxheaderlen, policy)
+        return super().as_string(maxheaderlen=maxheaderlen, policy=policy)
 
     def __str__(self):
         return self.as_string(policy=self.policy.clone(utf8=True))
@@ -982,7 +982,7 @@
             if subtype in preferencelist:
                 yield (preferencelist.index(subtype), part)
             return
-        if maintype != 'multipart' or not self.is_multipart():
+        if maintype != 'multipart':
             return
         if subtype != 'related':
             for subpart in part.iter_parts():
@@ -1087,7 +1087,7 @@
 
         Return an empty iterator for a non-multipart.
         """
-        if self.is_multipart():
+        if self.get_content_maintype() == 'multipart':
             yield from self.get_payload()
 
     def get_content(self, *args, content_manager=None, **kw):
diff --git a/common/py3-stdlib/email/utils.py b/common/py3-stdlib/email/utils.py
index cfdfeb3..1a7719d 100644
--- a/common/py3-stdlib/email/utils.py
+++ b/common/py3-stdlib/email/utils.py
@@ -109,7 +109,7 @@
 
 def getaddresses(fieldvalues):
     """Return a list of (REALNAME, EMAIL) for each fieldvalue."""
-    all = COMMASPACE.join(str(v) for v in fieldvalues)
+    all = COMMASPACE.join(fieldvalues)
     a = _AddressList(all)
     return a.addresslist
 
@@ -195,10 +195,7 @@
 
 
 def parsedate_to_datetime(data):
-    parsed_date_tz = _parsedate_tz(data)
-    if parsed_date_tz is None:
-        raise ValueError('Invalid date value or format "%s"' % str(data))
-    *dtuple, tz = parsed_date_tz
+    *dtuple, tz = _parsedate_tz(data)
     if tz is None:
         return datetime.datetime(*dtuple[:6])
     return datetime.datetime(*dtuple[:6],
diff --git a/common/py3-stdlib/encodings/__init__.py b/common/py3-stdlib/encodings/__init__.py
index 4b37d33..ddd5afd 100644
--- a/common/py3-stdlib/encodings/__init__.py
+++ b/common/py3-stdlib/encodings/__init__.py
@@ -61,8 +61,7 @@
         if c.isalnum() or c == '.':
             if punct and chars:
                 chars.append('_')
-            if c.isascii():
-                chars.append(c)
+            chars.append(c)
             punct = False
         else:
             punct = True
diff --git a/common/py3-stdlib/encodings/raw_unicode_escape.py b/common/py3-stdlib/encodings/raw_unicode_escape.py
index 46c8e07..2b919b4 100644
--- a/common/py3-stdlib/encodings/raw_unicode_escape.py
+++ b/common/py3-stdlib/encodings/raw_unicode_escape.py
@@ -21,16 +21,15 @@
     def encode(self, input, final=False):
         return codecs.raw_unicode_escape_encode(input, self.errors)[0]
 
-class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
-    def _buffer_decode(self, input, errors, final):
-        return codecs.raw_unicode_escape_decode(input, errors, final)
+class IncrementalDecoder(codecs.IncrementalDecoder):
+    def decode(self, input, final=False):
+        return codecs.raw_unicode_escape_decode(input, self.errors)[0]
 
 class StreamWriter(Codec,codecs.StreamWriter):
     pass
 
 class StreamReader(Codec,codecs.StreamReader):
-    def decode(self, input, errors='strict'):
-        return codecs.raw_unicode_escape_decode(input, errors, False)
+    pass
 
 ### encodings module API
 
diff --git a/common/py3-stdlib/encodings/unicode_escape.py b/common/py3-stdlib/encodings/unicode_escape.py
index 9b1ce99..817f932 100644
--- a/common/py3-stdlib/encodings/unicode_escape.py
+++ b/common/py3-stdlib/encodings/unicode_escape.py
@@ -21,16 +21,15 @@
     def encode(self, input, final=False):
         return codecs.unicode_escape_encode(input, self.errors)[0]
 
-class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
-    def _buffer_decode(self, input, errors, final):
-        return codecs.unicode_escape_decode(input, errors, final)
+class IncrementalDecoder(codecs.IncrementalDecoder):
+    def decode(self, input, final=False):
+        return codecs.unicode_escape_decode(input, self.errors)[0]
 
 class StreamWriter(Codec,codecs.StreamWriter):
     pass
 
 class StreamReader(Codec,codecs.StreamReader):
-    def decode(self, input, errors='strict'):
-        return codecs.unicode_escape_decode(input, errors, False)
+    pass
 
 ### encodings module API
 
diff --git a/common/py3-stdlib/enum.py b/common/py3-stdlib/enum.py
index f5657a6..ebadd9f 100644
--- a/common/py3-stdlib/enum.py
+++ b/common/py3-stdlib/enum.py
@@ -10,55 +10,31 @@
 
 
 def _is_descriptor(obj):
-    """
-    Returns True if obj is a descriptor, False otherwise.
-    """
+    """Returns True if obj is a descriptor, False otherwise."""
     return (
             hasattr(obj, '__get__') or
             hasattr(obj, '__set__') or
-            hasattr(obj, '__delete__')
-            )
+            hasattr(obj, '__delete__'))
+
 
 def _is_dunder(name):
-    """
-    Returns True if a __dunder__ name, False otherwise.
-    """
-    return (
-            len(name) > 4 and
+    """Returns True if a __dunder__ name, False otherwise."""
+    return (len(name) > 4 and
             name[:2] == name[-2:] == '__' and
             name[2] != '_' and
-            name[-3] != '_'
-            )
+            name[-3] != '_')
+
 
 def _is_sunder(name):
-    """
-    Returns True if a _sunder_ name, False otherwise.
-    """
-    return (
-            len(name) > 2 and
+    """Returns True if a _sunder_ name, False otherwise."""
+    return (len(name) > 2 and
             name[0] == name[-1] == '_' and
             name[1:2] != '_' and
-            name[-2:-1] != '_'
-            )
+            name[-2:-1] != '_')
 
-def _is_private(cls_name, name):
-    # do not use `re` as `re` imports `enum`
-    pattern = '_%s__' % (cls_name, )
-    pat_len = len(pattern)
-    if (
-            len(name) > pat_len
-            and name.startswith(pattern)
-            and name[pat_len:pat_len+1] != ['_']
-            and (name[-1] != '_' or name[-2] != '_')
-        ):
-        return True
-    else:
-        return False
 
 def _make_class_unpicklable(cls):
-    """
-    Make the given class un-picklable.
-    """
+    """Make the given class un-picklable."""
     def _break_on_call_reduce(self, proto):
         raise TypeError('%r cannot be pickled' % self)
     cls.__reduce_ex__ = _break_on_call_reduce
@@ -73,11 +49,11 @@
 
 
 class _EnumDict(dict):
-    """
-    Track enum member order and ensure member names are not reused.
+    """Track enum member order and ensure member names are not reused.
 
     EnumMeta will use the names found in self._member_names as the
     enumeration member names.
+
     """
     def __init__(self):
         super().__init__()
@@ -87,22 +63,14 @@
         self._auto_called = False
 
     def __setitem__(self, key, value):
-        """
-        Changes anything not dundered or not a descriptor.
+        """Changes anything not dundered or not a descriptor.
 
         If an enum member name is used twice, an error is raised; duplicate
         values are not checked for.
 
         Single underscore (sunder) names are reserved.
+
         """
-        if _is_private(self._cls_name, key):
-            import warnings
-            warnings.warn(
-                    "private variables, such as %r, will be normal attributes in 3.11"
-                        % (key, ),
-                    DeprecationWarning,
-                    stacklevel=2,
-                    )
         if _is_sunder(key):
             if key not in (
                     '_order_', '_create_pseudo_member_',
@@ -122,10 +90,7 @@
                 self._ignore = value
                 already = set(value) & set(self._member_names)
                 if already:
-                    raise ValueError(
-                            '_ignore_ cannot specify already set names: %r'
-                            % (already, )
-                            )
+                    raise ValueError('_ignore_ cannot specify already set names: %r' % (already, ))
         elif _is_dunder(key):
             if key == '__order__':
                 key = '_order_'
@@ -140,12 +105,7 @@
                 raise TypeError('%r already defined as: %r' % (key, self[key]))
             if isinstance(value, auto):
                 if value.value == _auto_null:
-                    value.value = self._generate_next_value(
-                            key,
-                            1,
-                            len(self._member_names),
-                            self._last_values[:],
-                            )
+                    value.value = self._generate_next_value(key, 1, len(self._member_names), self._last_values[:])
                     self._auto_called = True
                 value = value.value
             self._member_names.append(key)
@@ -158,26 +118,22 @@
 # This is also why there are checks in EnumMeta like `if Enum is not None`
 Enum = None
 
+
 class EnumMeta(type):
-    """
-    Metaclass for Enum
-    """
+    """Metaclass for Enum"""
     @classmethod
-    def __prepare__(metacls, cls, bases, **kwds):
+    def __prepare__(metacls, cls, bases):
         # check that previous enum members do not exist
         metacls._check_for_existing_members(cls, bases)
         # create the namespace dict
         enum_dict = _EnumDict()
-        enum_dict._cls_name = cls
         # inherit previous flags and _generate_next_value_ function
         member_type, first_enum = metacls._get_mixins_(cls, bases)
         if first_enum is not None:
-            enum_dict['_generate_next_value_'] = getattr(
-                    first_enum, '_generate_next_value_', None,
-                    )
+            enum_dict['_generate_next_value_'] = getattr(first_enum, '_generate_next_value_', None)
         return enum_dict
 
-    def __new__(metacls, cls, bases, classdict, **kwds):
+    def __new__(metacls, cls, bases, classdict):
         # an Enum class is final once enumeration items have been defined; it
         # cannot be mixed with other types (int, float, etc.) if it has an
         # inherited __new__ unless a new __new__ is defined (or the resulting
@@ -189,9 +145,8 @@
         for key in ignore:
             classdict.pop(key, None)
         member_type, first_enum = metacls._get_mixins_(cls, bases)
-        __new__, save_new, use_args = metacls._find_new_(
-                classdict, member_type, first_enum,
-                )
+        __new__, save_new, use_args = metacls._find_new_(classdict, member_type,
+                                                        first_enum)
 
         # save enum items into separate mapping so they don't get baked into
         # the new class
@@ -212,18 +167,17 @@
         if '__doc__' not in classdict:
             classdict['__doc__'] = 'An enumeration.'
 
-        enum_class = super().__new__(metacls, cls, bases, classdict, **kwds)
+        # create our new Enum type
+        enum_class = super().__new__(metacls, cls, bases, classdict)
         enum_class._member_names_ = []               # names in definition order
         enum_class._member_map_ = {}                 # name->value map
         enum_class._member_type_ = member_type
 
         # save DynamicClassAttribute attributes from super classes so we know
         # if we can take the shortcut of storing members in the class dict
-        dynamic_attributes = {
-                k for c in enum_class.mro()
-                for k, v in c.__dict__.items()
-                if isinstance(v, DynamicClassAttribute)
-                }
+        dynamic_attributes = {k for c in enum_class.mro()
+                              for k, v in c.__dict__.items()
+                              if isinstance(v, DynamicClassAttribute)}
 
         # Reverse value->name map for hashable values.
         enum_class._value2member_map_ = {}
@@ -243,32 +197,8 @@
                 methods = ('__getnewargs_ex__', '__getnewargs__',
                         '__reduce_ex__', '__reduce__')
                 if not any(m in member_type.__dict__ for m in methods):
-                    if '__new__' in classdict:
-                        # too late, sabotage
-                        _make_class_unpicklable(enum_class)
-                    else:
-                        # final attempt to verify that pickling would work:
-                        # travel mro until __new__ is found, checking for
-                        # __reduce__ and friends along the way -- if any of them
-                        # are found before/when __new__ is found, pickling should
-                        # work
-                        sabotage = None
-                        for chain in bases:
-                            for base in chain.__mro__:
-                                if base is object:
-                                    continue
-                                elif any(m in base.__dict__ for m in methods):
-                                    # found one, we're good
-                                    sabotage = False
-                                    break
-                                elif '__new__' in base.__dict__:
-                                    # not good
-                                    sabotage = True
-                                    break
-                            if sabotage is not None:
-                                break
-                        if sabotage:
-                            _make_class_unpicklable(enum_class)
+                    _make_class_unpicklable(enum_class)
+
         # instantiate them, checking for duplicates as we go
         # we instantiate first instead of checking for duplicates first in case
         # a custom __new__ is doing something funky with the values -- such as
@@ -357,8 +287,7 @@
         return True
 
     def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
-        """
-        Either returns an existing member, or creates a new enum class.
+        """Either returns an existing member, or creates a new enum class.
 
         This method is used both when an enum class is given a value to match
         to an enumeration member (i.e. Color(3)) and for the functional API
@@ -380,54 +309,40 @@
         not correct, unpickling will fail in some circumstances.
 
         `type`, if set, will be mixed in as the first base class.
+
         """
         if names is None:  # simple value lookup
             return cls.__new__(cls, value)
         # otherwise, functional API: we're creating a new Enum type
-        return cls._create_(
-                value,
-                names,
-                module=module,
-                qualname=qualname,
-                type=type,
-                start=start,
-                )
+        return cls._create_(value, names, module=module, qualname=qualname, type=type, start=start)
 
-    def __contains__(cls, obj):
-        if not isinstance(obj, Enum):
-            import warnings
-            warnings.warn(
-                    "in 3.12 __contains__ will no longer raise TypeError, but will return True if\n"
-                    "obj is a member or a member's value",
-                    DeprecationWarning,
-                    stacklevel=2,
-                    )
+    def __contains__(cls, member):
+        if not isinstance(member, Enum):
             raise TypeError(
                 "unsupported operand type(s) for 'in': '%s' and '%s'" % (
-                    type(obj).__qualname__, cls.__class__.__qualname__))
-        return isinstance(obj, cls) and obj._name_ in cls._member_map_
+                    type(member).__qualname__, cls.__class__.__qualname__))
+        return isinstance(member, cls) and member._name_ in cls._member_map_
 
     def __delattr__(cls, attr):
         # nicer error message when someone tries to delete an attribute
         # (see issue19025).
         if attr in cls._member_map_:
-            raise AttributeError("%s: cannot delete Enum member." % cls.__name__)
+            raise AttributeError(
+                    "%s: cannot delete Enum member." % cls.__name__)
         super().__delattr__(attr)
 
     def __dir__(self):
-        return (
-                ['__class__', '__doc__', '__members__', '__module__']
-                + self._member_names_
-                )
+        return (['__class__', '__doc__', '__members__', '__module__'] +
+                self._member_names_)
 
     def __getattr__(cls, name):
-        """
-        Return the enum member matching `name`
+        """Return the enum member matching `name`
 
         We use __getattr__ instead of descriptors or inserting into the enum
         class' __dict__ in order to support `name` and `value` being both
         properties for enum members (which live in the class' __dict__) and
         enum members themselves.
+
         """
         if _is_dunder(name):
             raise AttributeError(name)
@@ -440,9 +355,6 @@
         return cls._member_map_[name]
 
     def __iter__(cls):
-        """
-        Returns members in definition order.
-        """
         return (cls._member_map_[name] for name in cls._member_names_)
 
     def __len__(cls):
@@ -450,11 +362,11 @@
 
     @property
     def __members__(cls):
-        """
-        Returns a mapping of member name->value.
+        """Returns a mapping of member name->value.
 
         This mapping lists all enum members, including aliases. Note that this
         is a read-only view of the internal mapping.
+
         """
         return MappingProxyType(cls._member_map_)
 
@@ -462,18 +374,15 @@
         return "<enum %r>" % cls.__name__
 
     def __reversed__(cls):
-        """
-        Returns members in reverse definition order.
-        """
         return (cls._member_map_[name] for name in reversed(cls._member_names_))
 
     def __setattr__(cls, name, value):
-        """
-        Block attempts to reassign Enum members.
+        """Block attempts to reassign Enum members.
 
         A simple assignment to the class namespace only changes one of the
         several possible ways to get an Enum member from the Enum class,
         resulting in an inconsistent Enumeration.
+
         """
         member_map = cls.__dict__.get('_member_map_', {})
         if name in member_map:
@@ -481,8 +390,7 @@
         super().__setattr__(name, value)
 
     def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, start=1):
-        """
-        Convenience method to create a new Enum class.
+        """Convenience method to create a new Enum class.
 
         `names` can be:
 
@@ -491,6 +399,7 @@
         * An iterable of member names.  Values are incremented by 1 from `start`.
         * An iterable of (member name, value) pairs.
         * A mapping of member name -> value pairs.
+
         """
         metacls = cls.__class__
         bases = (cls, ) if type is None else (type, cls)
@@ -571,44 +480,37 @@
         for chain in bases:
             for base in chain.__mro__:
                 if issubclass(base, Enum) and base._member_names_:
-                    raise TypeError(
-                            "%s: cannot extend enumeration %r"
-                            % (class_name, base.__name__)
-                            )
+                    raise TypeError("%s: cannot extend enumeration %r" % (class_name, base.__name__))
 
     @staticmethod
     def _get_mixins_(class_name, bases):
-        """
-        Returns the type for creating enum members, and the first inherited
+        """Returns the type for creating enum members, and the first inherited
         enum class.
 
         bases: the tuple of bases that was given to __new__
+
         """
         if not bases:
             return object, Enum
 
         def _find_data_type(bases):
-            data_types = set()
+            data_types = []
             for chain in bases:
                 candidate = None
                 for base in chain.__mro__:
                     if base is object:
                         continue
-                    elif issubclass(base, Enum):
-                        if base._member_type_ is not object:
-                            data_types.add(base._member_type_)
-                            break
                     elif '__new__' in base.__dict__:
                         if issubclass(base, Enum):
                             continue
-                        data_types.add(candidate or base)
+                        data_types.append(candidate or base)
                         break
-                    else:
-                        candidate = candidate or base
+                    elif not issubclass(base, Enum):
+                        candidate = base
             if len(data_types) > 1:
                 raise TypeError('%r: too many data types: %r' % (class_name, data_types))
             elif data_types:
-                return data_types.pop()
+                return data_types[0]
             else:
                 return None
 
@@ -625,12 +527,12 @@
 
     @staticmethod
     def _find_new_(classdict, member_type, first_enum):
-        """
-        Returns the __new__ to be used for creating the enum members.
+        """Returns the __new__ to be used for creating the enum members.
 
         classdict: the class dictionary given to __new__
         member_type: the data type whose __new__ will be used by default
         first_enum: enumeration to check for an overriding __new__
+
         """
         # now find the correct __new__, checking to see of one was defined
         # by the user; also check earlier enum classes in case a __new__ was
@@ -670,10 +572,10 @@
 
 
 class Enum(metaclass=EnumMeta):
-    """
-    Generic enumeration.
+    """Generic enumeration.
 
     Derive from this class to define new enumerations.
+
     """
     def __new__(cls, value):
         # all enum instances are actually created during class construction
@@ -701,35 +603,21 @@
         except Exception as e:
             exc = e
             result = None
-        try:
-            if isinstance(result, cls):
-                return result
-            else:
-                ve_exc = ValueError("%r is not a valid %s" % (value, cls.__qualname__))
-                if result is None and exc is None:
-                    raise ve_exc
-                elif exc is None:
-                    exc = TypeError(
-                            'error in %s._missing_: returned %r instead of None or a valid member'
-                            % (cls.__name__, result)
-                            )
-                if not isinstance(exc, ValueError):
-                    exc.__context__ = ve_exc
-                raise exc
-        finally:
-            # ensure all variables that could hold an exception are destroyed
-            exc = None
-            ve_exc = None
+        if isinstance(result, cls):
+            return result
+        else:
+            ve_exc = ValueError("%r is not a valid %s" % (value, cls.__qualname__))
+            if result is None and exc is None:
+                raise ve_exc
+            elif exc is None:
+                exc = TypeError(
+                        'error in %s._missing_: returned %r instead of None or a valid member'
+                        % (cls.__name__, result)
+                        )
+            exc.__context__ = ve_exc
+            raise exc
 
     def _generate_next_value_(name, start, count, last_values):
-        """
-        Generate the next value when not given.
-
-        name: the name of the member
-        start: the initial start value or None
-        count: the number of existing members
-        last_value: the last value assigned or None
-        """
         for last_value in reversed(last_values):
             try:
                 return last_value + 1
@@ -750,27 +638,21 @@
         return "%s.%s" % (self.__class__.__name__, self._name_)
 
     def __dir__(self):
-        """
-        Returns all members and all public methods
-        """
         added_behavior = [
                 m
                 for cls in self.__class__.mro()
                 for m in cls.__dict__
                 if m[0] != '_' and m not in self._member_map_
-                ] + [m for m in self.__dict__ if m[0] != '_']
+                ]
         return (['__class__', '__doc__', '__module__'] + added_behavior)
 
     def __format__(self, format_spec):
-        """
-        Returns format using actual value type unless __str__ has been overridden.
-        """
         # mixed-in Enums should use the mixed-in type's __format__, otherwise
         # we can get strange results with the Enum name showing up instead of
         # the value
 
         # pure Enum branch, or branch with __str__ explicitly overridden
-        str_overridden = type(self).__str__ not in (Enum.__str__, Flag.__str__)
+        str_overridden = type(self).__str__ != Enum.__str__
         if self._member_type_ is object or str_overridden:
             cls = str
             val = str(self)
@@ -812,9 +694,7 @@
     return self.name
 
 class Flag(Enum):
-    """
-    Support for flags
-    """
+    """Support for flags"""
 
     def _generate_next_value_(name, start, count, last_values):
         """
@@ -837,9 +717,6 @@
 
     @classmethod
     def _missing_(cls, value):
-        """
-        Returns member (possibly creating it) if one can be found for value.
-        """
         original_value = value
         if value < 0:
             value = ~value
@@ -869,9 +746,6 @@
         return pseudo_member
 
     def __contains__(self, other):
-        """
-        Returns True if self has at least the same flags set as other.
-        """
         if not isinstance(other, self.__class__):
             raise TypeError(
                 "unsupported operand type(s) for 'in': '%s' and '%s'" % (
@@ -930,15 +804,10 @@
 
 
 class IntFlag(int, Flag):
-    """
-    Support for integer-based Flags
-    """
+    """Support for integer-based Flags"""
 
     @classmethod
     def _missing_(cls, value):
-        """
-        Returns member (possibly creating it) if one can be found for value.
-        """
         if not isinstance(value, int):
             raise ValueError("%r is not a valid %s" % (value, cls.__qualname__))
         new_member = cls._create_pseudo_member_(value)
@@ -946,9 +815,6 @@
 
     @classmethod
     def _create_pseudo_member_(cls, value):
-        """
-        Create a composite member iff value contains only members.
-        """
         pseudo_member = cls._value2member_map_.get(value, None)
         if pseudo_member is None:
             need_to_create = [value]
@@ -1003,15 +869,11 @@
 
 
 def _high_bit(value):
-    """
-    returns index of highest bit, or -1 if value is zero or negative
-    """
+    """returns index of highest bit, or -1 if value is zero or negative"""
     return value.bit_length() - 1
 
 def unique(enumeration):
-    """
-    Class decorator for enumerations ensuring unique member values.
-    """
+    """Class decorator for enumerations ensuring unique member values."""
     duplicates = []
     for name, member in enumeration.__members__.items():
         if name != member.name:
@@ -1024,9 +886,7 @@
     return enumeration
 
 def _decompose(flag, value):
-    """
-    Extract all members from the value.
-    """
+    """Extract all members from the value."""
     # _decompose is only called if the value is not named
     not_covered = value
     negative = value < 0
diff --git a/common/py3-stdlib/filecmp.py b/common/py3-stdlib/filecmp.py
index 70a4b23..7a4da6b 100644
--- a/common/py3-stdlib/filecmp.py
+++ b/common/py3-stdlib/filecmp.py
@@ -36,9 +36,8 @@
 
     f2 -- Second file name
 
-    shallow -- treat files as identical if their stat signatures (type, size,
-               mtime) are identical. Otherwise, files are considered different
-               if their sizes or contents differ.  [default: True]
+    shallow -- Just check stat signature (do not read the files).
+               defaults to True.
 
     Return value:
 
@@ -116,9 +115,7 @@
      same_files: list of identical files.
      diff_files: list of filenames which differ.
      funny_files: list of files which could not be compared.
-     subdirs: a dictionary of dircmp instances (or MyDirCmp instances if this
-       object is of type MyDirCmp, a subclass of dircmp), keyed by names
-       in common_dirs.
+     subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
      """
 
     def __init__(self, a, b, ignore=None, hide=None): # Initialize
@@ -188,15 +185,14 @@
         self.same_files, self.diff_files, self.funny_files = xx
 
     def phase4(self): # Find out differences between common subdirectories
-        # A new dircmp (or MyDirCmp if dircmp was subclassed) object is created
-        # for each common subdirectory,
+        # A new dircmp object is created for each common subdirectory,
         # these are stored in a dictionary indexed by filename.
         # The hide and ignore properties are inherited from the parent
         self.subdirs = {}
         for x in self.common_dirs:
             a_x = os.path.join(self.left, x)
             b_x = os.path.join(self.right, x)
-            self.subdirs[x]  = self.__class__(a_x, b_x, self.ignore, self.hide)
+            self.subdirs[x]  = dircmp(a_x, b_x, self.ignore, self.hide)
 
     def phase4_closure(self): # Recursively call phase4() on subdirectories
         self.phase4()
diff --git a/common/py3-stdlib/fileinput.py b/common/py3-stdlib/fileinput.py
index 3534718..0c31f93 100644
--- a/common/py3-stdlib/fileinput.py
+++ b/common/py3-stdlib/fileinput.py
@@ -3,7 +3,7 @@
 Typical use is:
 
     import fileinput
-    for line in fileinput.input(encoding="utf-8"):
+    for line in fileinput.input():
         process(line)
 
 This iterates over the lines of all files listed in sys.argv[1:],
@@ -63,9 +63,15 @@
 deleted when the output file is closed.  In-place filtering is
 disabled when standard input is read.  XXX The current implementation
 does not work for MS-DOS 8+3 filesystems.
+
+XXX Possible additions:
+
+- optional getopt argument processing
+- isatty()
+- read(), read(size), even readlines()
+
 """
 
-import io
 import sys, os
 from types import GenericAlias
 
@@ -75,8 +81,7 @@
 
 _state = None
 
-def input(files=None, inplace=False, backup="", *, mode="r", openhook=None,
-          encoding=None, errors=None):
+def input(files=None, inplace=False, backup="", *, mode="r", openhook=None):
     """Return an instance of the FileInput class, which can be iterated.
 
     The parameters are passed to the constructor of the FileInput class.
@@ -86,8 +91,7 @@
     global _state
     if _state and _state._file:
         raise RuntimeError("input() already active")
-    _state = FileInput(files, inplace, backup, mode=mode, openhook=openhook,
-                       encoding=encoding, errors=errors)
+    _state = FileInput(files, inplace, backup, mode=mode, openhook=openhook)
     return _state
 
 def close():
@@ -182,7 +186,7 @@
     """
 
     def __init__(self, files=None, inplace=False, backup="", *,
-                 mode="r", openhook=None, encoding=None, errors=None):
+                 mode="r", openhook=None):
         if isinstance(files, str):
             files = (files,)
         elif isinstance(files, os.PathLike):
@@ -205,17 +209,6 @@
         self._file = None
         self._isstdin = False
         self._backupfilename = None
-        self._encoding = encoding
-        self._errors = errors
-
-        # We can not use io.text_encoding() here because old openhook doesn't
-        # take encoding parameter.
-        if (sys.flags.warn_default_encoding and
-                "b" not in mode and encoding is None and openhook is None):
-            import warnings
-            warnings.warn("'encoding' argument not specified.",
-                          EncodingWarning, 2)
-
         # restrict mode argument to reading modes
         if mode not in ('r', 'rU', 'U', 'rb'):
             raise ValueError("FileInput opening mode must be one of "
@@ -331,13 +324,6 @@
         self._file = None
         self._isstdin = False
         self._backupfilename = 0
-
-        # EncodingWarning is emitted in __init__() already
-        if "b" not in self._mode:
-            encoding = self._encoding or "locale"
-        else:
-            encoding = None
-
         if self._filename == '-':
             self._filename = '<stdin>'
             if 'b' in self._mode:
@@ -355,18 +341,18 @@
                     pass
                 # The next few lines may raise OSError
                 os.rename(self._filename, self._backupfilename)
-                self._file = open(self._backupfilename, self._mode, encoding=encoding)
+                self._file = open(self._backupfilename, self._mode)
                 try:
                     perm = os.fstat(self._file.fileno()).st_mode
                 except OSError:
-                    self._output = open(self._filename, self._write_mode, encoding=encoding)
+                    self._output = open(self._filename, self._write_mode)
                 else:
                     mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
                     if hasattr(os, 'O_BINARY'):
                         mode |= os.O_BINARY
 
                     fd = os.open(self._filename, mode, perm)
-                    self._output = os.fdopen(fd, self._write_mode, encoding=encoding)
+                    self._output = os.fdopen(fd, self._write_mode)
                     try:
                         os.chmod(self._filename, perm)
                     except OSError:
@@ -376,15 +362,9 @@
             else:
                 # This may raise OSError
                 if self._openhook:
-                    # Custom hooks made previous to Python 3.10 didn't have
-                    # encoding argument
-                    if self._encoding is None:
-                        self._file = self._openhook(self._filename, self._mode)
-                    else:
-                        self._file = self._openhook(
-                            self._filename, self._mode, encoding=self._encoding, errors=self._errors)
+                    self._file = self._openhook(self._filename, self._mode)
                 else:
-                    self._file = open(self._filename, self._mode, encoding=encoding, errors=self._errors)
+                    self._file = open(self._filename, self._mode)
         self._readline = self._file.readline  # hide FileInput._readline
         return self._readline()
 
@@ -415,23 +395,16 @@
     __class_getitem__ = classmethod(GenericAlias)
 
 
-def hook_compressed(filename, mode, *, encoding=None, errors=None):
-    if encoding is None:  # EncodingWarning is emitted in FileInput() already.
-        encoding = "locale"
+def hook_compressed(filename, mode):
     ext = os.path.splitext(filename)[1]
     if ext == '.gz':
         import gzip
-        stream = gzip.open(filename, mode)
+        return gzip.open(filename, mode)
     elif ext == '.bz2':
         import bz2
-        stream = bz2.BZ2File(filename, mode)
+        return bz2.BZ2File(filename, mode)
     else:
-        return open(filename, mode, encoding=encoding, errors=errors)
-
-    # gzip and bz2 are binary mode by default.
-    if "b" not in mode:
-        stream = io.TextIOWrapper(stream, encoding=encoding, errors=errors)
-    return stream
+        return open(filename, mode)
 
 
 def hook_encoded(encoding, errors=None):
diff --git a/common/py3-stdlib/fnmatch.py b/common/py3-stdlib/fnmatch.py
index 7c52c23..0eb1802 100644
--- a/common/py3-stdlib/fnmatch.py
+++ b/common/py3-stdlib/fnmatch.py
@@ -52,7 +52,7 @@
     return re.compile(res).match
 
 def filter(names, pat):
-    """Construct a list from those elements of the iterable NAMES that match PAT."""
+    """Return the subset of the list NAMES that match PAT."""
     result = []
     pat = os.path.normcase(pat)
     match = _compile_pattern(pat)
diff --git a/common/py3-stdlib/formatter.py b/common/py3-stdlib/formatter.py
new file mode 100644
index 0000000..e2394de
--- /dev/null
+++ b/common/py3-stdlib/formatter.py
@@ -0,0 +1,452 @@
+"""Generic output formatting.
+
+Formatter objects transform an abstract flow of formatting events into
+specific output events on writer objects. Formatters manage several stack
+structures to allow various properties of a writer object to be changed and
+restored; writers need not be able to handle relative changes nor any sort
+of ``change back'' operation. Specific writer properties which may be
+controlled via formatter objects are horizontal alignment, font, and left
+margin indentations. A mechanism is provided which supports providing
+arbitrary, non-exclusive style settings to a writer as well. Additional
+interfaces facilitate formatting events which are not reversible, such as
+paragraph separation.
+
+Writer objects encapsulate device interfaces. Abstract devices, such as
+file formats, are supported as well as physical devices. The provided
+implementations all work with abstract devices. The interface makes
+available mechanisms for setting the properties which formatter objects
+manage and inserting data into the output.
+"""
+
+import sys
+import warnings
+warnings.warn('the formatter module is deprecated', DeprecationWarning,
+              stacklevel=2)
+
+
+AS_IS = None
+
+
+class NullFormatter:
+    """A formatter which does nothing.
+
+    If the writer parameter is omitted, a NullWriter instance is created.
+    No methods of the writer are called by NullFormatter instances.
+
+    Implementations should inherit from this class if implementing a writer
+    interface but don't need to inherit any implementation.
+
+    """
+
+    def __init__(self, writer=None):
+        if writer is None:
+            writer = NullWriter()
+        self.writer = writer
+    def end_paragraph(self, blankline): pass
+    def add_line_break(self): pass
+    def add_hor_rule(self, *args, **kw): pass
+    def add_label_data(self, format, counter, blankline=None): pass
+    def add_flowing_data(self, data): pass
+    def add_literal_data(self, data): pass
+    def flush_softspace(self): pass
+    def push_alignment(self, align): pass
+    def pop_alignment(self): pass
+    def push_font(self, x): pass
+    def pop_font(self): pass
+    def push_margin(self, margin): pass
+    def pop_margin(self): pass
+    def set_spacing(self, spacing): pass
+    def push_style(self, *styles): pass
+    def pop_style(self, n=1): pass
+    def assert_line_data(self, flag=1): pass
+
+
+class AbstractFormatter:
+    """The standard formatter.
+
+    This implementation has demonstrated wide applicability to many writers,
+    and may be used directly in most circumstances.  It has been used to
+    implement a full-featured World Wide Web browser.
+
+    """
+
+    #  Space handling policy:  blank spaces at the boundary between elements
+    #  are handled by the outermost context.  "Literal" data is not checked
+    #  to determine context, so spaces in literal data are handled directly
+    #  in all circumstances.
+
+    def __init__(self, writer):
+        self.writer = writer            # Output device
+        self.align = None               # Current alignment
+        self.align_stack = []           # Alignment stack
+        self.font_stack = []            # Font state
+        self.margin_stack = []          # Margin state
+        self.spacing = None             # Vertical spacing state
+        self.style_stack = []           # Other state, e.g. color
+        self.nospace = 1                # Should leading space be suppressed
+        self.softspace = 0              # Should a space be inserted
+        self.para_end = 1               # Just ended a paragraph
+        self.parskip = 0                # Skipped space between paragraphs?
+        self.hard_break = 1             # Have a hard break
+        self.have_label = 0
+
+    def end_paragraph(self, blankline):
+        if not self.hard_break:
+            self.writer.send_line_break()
+            self.have_label = 0
+        if self.parskip < blankline and not self.have_label:
+            self.writer.send_paragraph(blankline - self.parskip)
+            self.parskip = blankline
+            self.have_label = 0
+        self.hard_break = self.nospace = self.para_end = 1
+        self.softspace = 0
+
+    def add_line_break(self):
+        if not (self.hard_break or self.para_end):
+            self.writer.send_line_break()
+            self.have_label = self.parskip = 0
+        self.hard_break = self.nospace = 1
+        self.softspace = 0
+
+    def add_hor_rule(self, *args, **kw):
+        if not self.hard_break:
+            self.writer.send_line_break()
+        self.writer.send_hor_rule(*args, **kw)
+        self.hard_break = self.nospace = 1
+        self.have_label = self.para_end = self.softspace = self.parskip = 0
+
+    def add_label_data(self, format, counter, blankline = None):
+        if self.have_label or not self.hard_break:
+            self.writer.send_line_break()
+        if not self.para_end:
+            self.writer.send_paragraph((blankline and 1) or 0)
+        if isinstance(format, str):
+            self.writer.send_label_data(self.format_counter(format, counter))
+        else:
+            self.writer.send_label_data(format)
+        self.nospace = self.have_label = self.hard_break = self.para_end = 1
+        self.softspace = self.parskip = 0
+
+    def format_counter(self, format, counter):
+        label = ''
+        for c in format:
+            if c == '1':
+                label = label + ('%d' % counter)
+            elif c in 'aA':
+                if counter > 0:
+                    label = label + self.format_letter(c, counter)
+            elif c in 'iI':
+                if counter > 0:
+                    label = label + self.format_roman(c, counter)
+            else:
+                label = label + c
+        return label
+
+    def format_letter(self, case, counter):
+        label = ''
+        while counter > 0:
+            counter, x = divmod(counter-1, 26)
+            # This makes a strong assumption that lowercase letters
+            # and uppercase letters form two contiguous blocks, with
+            # letters in order!
+            s = chr(ord(case) + x)
+            label = s + label
+        return label
+
+    def format_roman(self, case, counter):
+        ones = ['i', 'x', 'c', 'm']
+        fives = ['v', 'l', 'd']
+        label, index = '', 0
+        # This will die of IndexError when counter is too big
+        while counter > 0:
+            counter, x = divmod(counter, 10)
+            if x == 9:
+                label = ones[index] + ones[index+1] + label
+            elif x == 4:
+                label = ones[index] + fives[index] + label
+            else:
+                if x >= 5:
+                    s = fives[index]
+                    x = x-5
+                else:
+                    s = ''
+                s = s + ones[index]*x
+                label = s + label
+            index = index + 1
+        if case == 'I':
+            return label.upper()
+        return label
+
+    def add_flowing_data(self, data):
+        if not data: return
+        prespace = data[:1].isspace()
+        postspace = data[-1:].isspace()
+        data = " ".join(data.split())
+        if self.nospace and not data:
+            return
+        elif prespace or self.softspace:
+            if not data:
+                if not self.nospace:
+                    self.softspace = 1
+                    self.parskip = 0
+                return
+            if not self.nospace:
+                data = ' ' + data
+        self.hard_break = self.nospace = self.para_end = \
+                          self.parskip = self.have_label = 0
+        self.softspace = postspace
+        self.writer.send_flowing_data(data)
+
+    def add_literal_data(self, data):
+        if not data: return
+        if self.softspace:
+            self.writer.send_flowing_data(" ")
+        self.hard_break = data[-1:] == '\n'
+        self.nospace = self.para_end = self.softspace = \
+                       self.parskip = self.have_label = 0
+        self.writer.send_literal_data(data)
+
+    def flush_softspace(self):
+        if self.softspace:
+            self.hard_break = self.para_end = self.parskip = \
+                              self.have_label = self.softspace = 0
+            self.nospace = 1
+            self.writer.send_flowing_data(' ')
+
+    def push_alignment(self, align):
+        if align and align != self.align:
+            self.writer.new_alignment(align)
+            self.align = align
+            self.align_stack.append(align)
+        else:
+            self.align_stack.append(self.align)
+
+    def pop_alignment(self):
+        if self.align_stack:
+            del self.align_stack[-1]
+        if self.align_stack:
+            self.align = align = self.align_stack[-1]
+            self.writer.new_alignment(align)
+        else:
+            self.align = None
+            self.writer.new_alignment(None)
+
+    def push_font(self, font):
+        size, i, b, tt = font
+        if self.softspace:
+            self.hard_break = self.para_end = self.softspace = 0
+            self.nospace = 1
+            self.writer.send_flowing_data(' ')
+        if self.font_stack:
+            csize, ci, cb, ctt = self.font_stack[-1]
+            if size is AS_IS: size = csize
+            if i is AS_IS: i = ci
+            if b is AS_IS: b = cb
+            if tt is AS_IS: tt = ctt
+        font = (size, i, b, tt)
+        self.font_stack.append(font)
+        self.writer.new_font(font)
+
+    def pop_font(self):
+        if self.font_stack:
+            del self.font_stack[-1]
+        if self.font_stack:
+            font = self.font_stack[-1]
+        else:
+            font = None
+        self.writer.new_font(font)
+
+    def push_margin(self, margin):
+        self.margin_stack.append(margin)
+        fstack = [m for m in self.margin_stack if m]
+        if not margin and fstack:
+            margin = fstack[-1]
+        self.writer.new_margin(margin, len(fstack))
+
+    def pop_margin(self):
+        if self.margin_stack:
+            del self.margin_stack[-1]
+        fstack = [m for m in self.margin_stack if m]
+        if fstack:
+            margin = fstack[-1]
+        else:
+            margin = None
+        self.writer.new_margin(margin, len(fstack))
+
+    def set_spacing(self, spacing):
+        self.spacing = spacing
+        self.writer.new_spacing(spacing)
+
+    def push_style(self, *styles):
+        if self.softspace:
+            self.hard_break = self.para_end = self.softspace = 0
+            self.nospace = 1
+            self.writer.send_flowing_data(' ')
+        for style in styles:
+            self.style_stack.append(style)
+        self.writer.new_styles(tuple(self.style_stack))
+
+    def pop_style(self, n=1):
+        del self.style_stack[-n:]
+        self.writer.new_styles(tuple(self.style_stack))
+
+    def assert_line_data(self, flag=1):
+        self.nospace = self.hard_break = not flag
+        self.para_end = self.parskip = self.have_label = 0
+
+
+class NullWriter:
+    """Minimal writer interface to use in testing & inheritance.
+
+    A writer which only provides the interface definition; no actions are
+    taken on any methods.  This should be the base class for all writers
+    which do not need to inherit any implementation methods.
+
+    """
+    def __init__(self): pass
+    def flush(self): pass
+    def new_alignment(self, align): pass
+    def new_font(self, font): pass
+    def new_margin(self, margin, level): pass
+    def new_spacing(self, spacing): pass
+    def new_styles(self, styles): pass
+    def send_paragraph(self, blankline): pass
+    def send_line_break(self): pass
+    def send_hor_rule(self, *args, **kw): pass
+    def send_label_data(self, data): pass
+    def send_flowing_data(self, data): pass
+    def send_literal_data(self, data): pass
+
+
+class AbstractWriter(NullWriter):
+    """A writer which can be used in debugging formatters, but not much else.
+
+    Each method simply announces itself by printing its name and
+    arguments on standard output.
+
+    """
+
+    def new_alignment(self, align):
+        print("new_alignment(%r)" % (align,))
+
+    def new_font(self, font):
+        print("new_font(%r)" % (font,))
+
+    def new_margin(self, margin, level):
+        print("new_margin(%r, %d)" % (margin, level))
+
+    def new_spacing(self, spacing):
+        print("new_spacing(%r)" % (spacing,))
+
+    def new_styles(self, styles):
+        print("new_styles(%r)" % (styles,))
+
+    def send_paragraph(self, blankline):
+        print("send_paragraph(%r)" % (blankline,))
+
+    def send_line_break(self):
+        print("send_line_break()")
+
+    def send_hor_rule(self, *args, **kw):
+        print("send_hor_rule()")
+
+    def send_label_data(self, data):
+        print("send_label_data(%r)" % (data,))
+
+    def send_flowing_data(self, data):
+        print("send_flowing_data(%r)" % (data,))
+
+    def send_literal_data(self, data):
+        print("send_literal_data(%r)" % (data,))
+
+
+class DumbWriter(NullWriter):
+    """Simple writer class which writes output on the file object passed in
+    as the file parameter or, if file is omitted, on standard output.  The
+    output is simply word-wrapped to the number of columns specified by
+    the maxcol parameter.  This class is suitable for reflowing a sequence
+    of paragraphs.
+
+    """
+
+    def __init__(self, file=None, maxcol=72):
+        self.file = file or sys.stdout
+        self.maxcol = maxcol
+        NullWriter.__init__(self)
+        self.reset()
+
+    def reset(self):
+        self.col = 0
+        self.atbreak = 0
+
+    def send_paragraph(self, blankline):
+        self.file.write('\n'*blankline)
+        self.col = 0
+        self.atbreak = 0
+
+    def send_line_break(self):
+        self.file.write('\n')
+        self.col = 0
+        self.atbreak = 0
+
+    def send_hor_rule(self, *args, **kw):
+        self.file.write('\n')
+        self.file.write('-'*self.maxcol)
+        self.file.write('\n')
+        self.col = 0
+        self.atbreak = 0
+
+    def send_literal_data(self, data):
+        self.file.write(data)
+        i = data.rfind('\n')
+        if i >= 0:
+            self.col = 0
+            data = data[i+1:]
+        data = data.expandtabs()
+        self.col = self.col + len(data)
+        self.atbreak = 0
+
+    def send_flowing_data(self, data):
+        if not data: return
+        atbreak = self.atbreak or data[0].isspace()
+        col = self.col
+        maxcol = self.maxcol
+        write = self.file.write
+        for word in data.split():
+            if atbreak:
+                if col + len(word) >= maxcol:
+                    write('\n')
+                    col = 0
+                else:
+                    write(' ')
+                    col = col + 1
+            write(word)
+            col = col + len(word)
+            atbreak = 1
+        self.col = col
+        self.atbreak = data[-1].isspace()
+
+
+def test(file = None):
+    w = DumbWriter()
+    f = AbstractFormatter(w)
+    if file is not None:
+        fp = open(file)
+    elif sys.argv[1:]:
+        fp = open(sys.argv[1])
+    else:
+        fp = sys.stdin
+    try:
+        for line in fp:
+            if line == '\n':
+                f.end_paragraph(1)
+            else:
+                f.add_flowing_data(line)
+    finally:
+        if fp is not sys.stdin:
+            fp.close()
+    f.end_paragraph(0)
+
+
+if __name__ == '__main__':
+    test()
diff --git a/common/py3-stdlib/fractions.py b/common/py3-stdlib/fractions.py
index 96047be..de3e23b 100644
--- a/common/py3-stdlib/fractions.py
+++ b/common/py3-stdlib/fractions.py
@@ -380,139 +380,32 @@
 
         return forward, reverse
 
-    # Rational arithmetic algorithms: Knuth, TAOCP, Volume 2, 4.5.1.
-    #
-    # Assume input fractions a and b are normalized.
-    #
-    # 1) Consider addition/subtraction.
-    #
-    # Let g = gcd(da, db). Then
-    #
-    #              na   nb    na*db ± nb*da
-    #     a ± b == -- ± -- == ------------- ==
-    #              da   db        da*db
-    #
-    #              na*(db//g) ± nb*(da//g)    t
-    #           == ----------------------- == -
-    #                      (da*db)//g         d
-    #
-    # Now, if g > 1, we're working with smaller integers.
-    #
-    # Note, that t, (da//g) and (db//g) are pairwise coprime.
-    #
-    # Indeed, (da//g) and (db//g) share no common factors (they were
-    # removed) and da is coprime with na (since input fractions are
-    # normalized), hence (da//g) and na are coprime.  By symmetry,
-    # (db//g) and nb are coprime too.  Then,
-    #
-    #     gcd(t, da//g) == gcd(na*(db//g), da//g) == 1
-    #     gcd(t, db//g) == gcd(nb*(da//g), db//g) == 1
-    #
-    # Above allows us optimize reduction of the result to lowest
-    # terms.  Indeed,
-    #
-    #     g2 = gcd(t, d) == gcd(t, (da//g)*(db//g)*g) == gcd(t, g)
-    #
-    #                       t//g2                   t//g2
-    #     a ± b == ----------------------- == ----------------
-    #              (da//g)*(db//g)*(g//g2)    (da//g)*(db//g2)
-    #
-    # is a normalized fraction.  This is useful because the unnormalized
-    # denominator d could be much larger than g.
-    #
-    # We should special-case g == 1 (and g2 == 1), since 60.8% of
-    # randomly-chosen integers are coprime:
-    # https://en.wikipedia.org/wiki/Coprime_integers#Probability_of_coprimality
-    # Note, that g2 == 1 always for fractions, obtained from floats: here
-    # g is a power of 2 and the unnormalized numerator t is an odd integer.
-    #
-    # 2) Consider multiplication
-    #
-    # Let g1 = gcd(na, db) and g2 = gcd(nb, da), then
-    #
-    #            na*nb    na*nb    (na//g1)*(nb//g2)
-    #     a*b == ----- == ----- == -----------------
-    #            da*db    db*da    (db//g1)*(da//g2)
-    #
-    # Note, that after divisions we're multiplying smaller integers.
-    #
-    # Also, the resulting fraction is normalized, because each of
-    # two factors in the numerator is coprime to each of the two factors
-    # in the denominator.
-    #
-    # Indeed, pick (na//g1).  It's coprime with (da//g2), because input
-    # fractions are normalized.  It's also coprime with (db//g1), because
-    # common factors are removed by g1 == gcd(na, db).
-    #
-    # As for addition/subtraction, we should special-case g1 == 1
-    # and g2 == 1 for same reason.  That happens also for multiplying
-    # rationals, obtained from floats.
-
     def _add(a, b):
         """a + b"""
-        na, da = a.numerator, a.denominator
-        nb, db = b.numerator, b.denominator
-        g = math.gcd(da, db)
-        if g == 1:
-            return Fraction(na * db + da * nb, da * db, _normalize=False)
-        s = da // g
-        t = na * (db // g) + nb * s
-        g2 = math.gcd(t, g)
-        if g2 == 1:
-            return Fraction(t, s * db, _normalize=False)
-        return Fraction(t // g2, s * (db // g2), _normalize=False)
+        da, db = a.denominator, b.denominator
+        return Fraction(a.numerator * db + b.numerator * da,
+                        da * db)
 
     __add__, __radd__ = _operator_fallbacks(_add, operator.add)
 
     def _sub(a, b):
         """a - b"""
-        na, da = a.numerator, a.denominator
-        nb, db = b.numerator, b.denominator
-        g = math.gcd(da, db)
-        if g == 1:
-            return Fraction(na * db - da * nb, da * db, _normalize=False)
-        s = da // g
-        t = na * (db // g) - nb * s
-        g2 = math.gcd(t, g)
-        if g2 == 1:
-            return Fraction(t, s * db, _normalize=False)
-        return Fraction(t // g2, s * (db // g2), _normalize=False)
+        da, db = a.denominator, b.denominator
+        return Fraction(a.numerator * db - b.numerator * da,
+                        da * db)
 
     __sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
 
     def _mul(a, b):
         """a * b"""
-        na, da = a.numerator, a.denominator
-        nb, db = b.numerator, b.denominator
-        g1 = math.gcd(na, db)
-        if g1 > 1:
-            na //= g1
-            db //= g1
-        g2 = math.gcd(nb, da)
-        if g2 > 1:
-            nb //= g2
-            da //= g2
-        return Fraction(na * nb, db * da, _normalize=False)
+        return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
 
     __mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
 
     def _div(a, b):
         """a / b"""
-        # Same as _mul(), with inversed b.
-        na, da = a.numerator, a.denominator
-        nb, db = b.numerator, b.denominator
-        g1 = math.gcd(na, nb)
-        if g1 > 1:
-            na //= g1
-            nb //= g1
-        g2 = math.gcd(db, da)
-        if g2 > 1:
-            da //= g2
-            db //= g2
-        n, d = na * db, nb * da
-        if d < 0:
-            n, d = -n, -d
-        return Fraction(n, d, _normalize=False)
+        return Fraction(a.numerator * b.denominator,
+                        a.denominator * b.numerator)
 
     __truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
 
diff --git a/common/py3-stdlib/ftplib.py b/common/py3-stdlib/ftplib.py
index 7c5a507..1f760ed 100644
--- a/common/py3-stdlib/ftplib.py
+++ b/common/py3-stdlib/ftplib.py
@@ -102,9 +102,7 @@
     sock = None
     file = None
     welcome = None
-    passiveserver = True
-    # Disables https://bugs.python.org/issue43285 security if set to True.
-    trust_server_pasv_ipv4_address = False
+    passiveserver = 1
 
     def __init__(self, host='', user='', passwd='', acct='',
                  timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *,
@@ -322,13 +320,8 @@
         return sock
 
     def makepasv(self):
-        """Internal: Does the PASV or EPSV handshake -> (address, port)"""
         if self.af == socket.AF_INET:
-            untrusted_host, port = parse227(self.sendcmd('PASV'))
-            if self.trust_server_pasv_ipv4_address:
-                host = untrusted_host
-            else:
-                host = self.sock.getpeername()[0]
+            host, port = parse227(self.sendcmd('PASV'))
         else:
             host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
         return host, port
diff --git a/common/py3-stdlib/functools.py b/common/py3-stdlib/functools.py
index 305ceb4..5cab497 100644
--- a/common/py3-stdlib/functools.py
+++ b/common/py3-stdlib/functools.py
@@ -88,84 +88,84 @@
 
 def _gt_from_lt(self, other, NotImplemented=NotImplemented):
     'Return a > b.  Computed by @total_ordering from (not a < b) and (a != b).'
-    op_result = type(self).__lt__(self, other)
+    op_result = self.__lt__(other)
     if op_result is NotImplemented:
         return op_result
     return not op_result and self != other
 
 def _le_from_lt(self, other, NotImplemented=NotImplemented):
     'Return a <= b.  Computed by @total_ordering from (a < b) or (a == b).'
-    op_result = type(self).__lt__(self, other)
+    op_result = self.__lt__(other)
     if op_result is NotImplemented:
         return op_result
     return op_result or self == other
 
 def _ge_from_lt(self, other, NotImplemented=NotImplemented):
     'Return a >= b.  Computed by @total_ordering from (not a < b).'
-    op_result = type(self).__lt__(self, other)
+    op_result = self.__lt__(other)
     if op_result is NotImplemented:
         return op_result
     return not op_result
 
 def _ge_from_le(self, other, NotImplemented=NotImplemented):
     'Return a >= b.  Computed by @total_ordering from (not a <= b) or (a == b).'
-    op_result = type(self).__le__(self, other)
+    op_result = self.__le__(other)
     if op_result is NotImplemented:
         return op_result
     return not op_result or self == other
 
 def _lt_from_le(self, other, NotImplemented=NotImplemented):
     'Return a < b.  Computed by @total_ordering from (a <= b) and (a != b).'
-    op_result = type(self).__le__(self, other)
+    op_result = self.__le__(other)
     if op_result is NotImplemented:
         return op_result
     return op_result and self != other
 
 def _gt_from_le(self, other, NotImplemented=NotImplemented):
     'Return a > b.  Computed by @total_ordering from (not a <= b).'
-    op_result = type(self).__le__(self, other)
+    op_result = self.__le__(other)
     if op_result is NotImplemented:
         return op_result
     return not op_result
 
 def _lt_from_gt(self, other, NotImplemented=NotImplemented):
     'Return a < b.  Computed by @total_ordering from (not a > b) and (a != b).'
-    op_result = type(self).__gt__(self, other)
+    op_result = self.__gt__(other)
     if op_result is NotImplemented:
         return op_result
     return not op_result and self != other
 
 def _ge_from_gt(self, other, NotImplemented=NotImplemented):
     'Return a >= b.  Computed by @total_ordering from (a > b) or (a == b).'
-    op_result = type(self).__gt__(self, other)
+    op_result = self.__gt__(other)
     if op_result is NotImplemented:
         return op_result
     return op_result or self == other
 
 def _le_from_gt(self, other, NotImplemented=NotImplemented):
     'Return a <= b.  Computed by @total_ordering from (not a > b).'
-    op_result = type(self).__gt__(self, other)
+    op_result = self.__gt__(other)
     if op_result is NotImplemented:
         return op_result
     return not op_result
 
 def _le_from_ge(self, other, NotImplemented=NotImplemented):
     'Return a <= b.  Computed by @total_ordering from (not a >= b) or (a == b).'
-    op_result = type(self).__ge__(self, other)
+    op_result = self.__ge__(other)
     if op_result is NotImplemented:
         return op_result
     return not op_result or self == other
 
 def _gt_from_ge(self, other, NotImplemented=NotImplemented):
     'Return a > b.  Computed by @total_ordering from (a >= b) and (a != b).'
-    op_result = type(self).__ge__(self, other)
+    op_result = self.__ge__(other)
     if op_result is NotImplemented:
         return op_result
     return op_result and self != other
 
 def _lt_from_ge(self, other, NotImplemented=NotImplemented):
     'Return a < b.  Computed by @total_ordering from (not a >= b).'
-    op_result = type(self).__ge__(self, other)
+    op_result = self.__ge__(other)
     if op_result is NotImplemented:
         return op_result
     return not op_result
@@ -236,14 +236,14 @@
 
 def reduce(function, sequence, initial=_initial_missing):
     """
-    reduce(function, iterable[, initial]) -> value
+    reduce(function, sequence[, initial]) -> value
 
-    Apply a function of two arguments cumulatively to the items of a sequence
-    or iterable, from left to right, so as to reduce the iterable to a single
-    value.  For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
+    Apply a function of two arguments cumulatively to the items of a sequence,
+    from left to right, so as to reduce the sequence to a single value.
+    For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
     ((((1+2)+3)+4)+5).  If initial is present, it is placed before the items
-    of the iterable in the calculation, and serves as a default when the
-    iterable is empty.
+    of the sequence in the calculation, and serves as a default when the
+    sequence is empty.
     """
 
     it = iter(sequence)
@@ -252,8 +252,7 @@
         try:
             value = next(it)
         except StopIteration:
-            raise TypeError(
-                "reduce() of empty iterable with no initial value") from None
+            raise TypeError("reduce() of empty sequence with no initial value") from None
     else:
         value = initial
 
@@ -492,7 +491,7 @@
     with f.cache_info().  Clear the cache and statistics with f.cache_clear().
     Access the underlying function with f.__wrapped__.
 
-    See:  https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)
+    See:  http://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)
 
     """
 
@@ -660,7 +659,7 @@
 def _c3_merge(sequences):
     """Merges MROs in *sequences* to a single MRO using the C3 algorithm.
 
-    Adapted from https://www.python.org/download/releases/2.3/mro/.
+    Adapted from http://www.python.org/download/releases/2.3/mro/.
 
     """
     result = []
@@ -740,7 +739,6 @@
     # Remove entries which are already present in the __mro__ or unrelated.
     def is_related(typ):
         return (typ not in bases and hasattr(typ, '__mro__')
-                                 and not isinstance(typ, GenericAlias)
                                  and issubclass(cls, typ))
     types = [n for n in types if is_related(n)]
     # Remove entries which are strict bases of other entries (they will end up
@@ -838,9 +836,6 @@
             dispatch_cache[cls] = impl
         return impl
 
-    def _is_valid_dispatch_type(cls):
-        return isinstance(cls, type) and not isinstance(cls, GenericAlias)
-
     def register(cls, func=None):
         """generic_func.register(cls, func) -> func
 
@@ -848,15 +843,9 @@
 
         """
         nonlocal cache_token
-        if _is_valid_dispatch_type(cls):
-            if func is None:
+        if func is None:
+            if isinstance(cls, type):
                 return lambda f: register(cls, f)
-        else:
-            if func is not None:
-                raise TypeError(
-                    f"Invalid first argument to `register()`. "
-                    f"{cls!r} is not a class."
-                )
             ann = getattr(cls, '__annotations__', {})
             if not ann:
                 raise TypeError(
@@ -869,12 +858,11 @@
             # only import typing if annotation parsing is necessary
             from typing import get_type_hints
             argname, cls = next(iter(get_type_hints(func).items()))
-            if not _is_valid_dispatch_type(cls):
+            if not isinstance(cls, type):
                 raise TypeError(
                     f"Invalid annotation for {argname!r}. "
                     f"{cls!r} is not a class."
                 )
-
         registry[cls] = func
         if cache_token is None and hasattr(cls, '__abstractmethods__'):
             cache_token = get_cache_token()
diff --git a/common/py3-stdlib/getpass.py b/common/py3-stdlib/getpass.py
index 6970d8a..6911f41 100644
--- a/common/py3-stdlib/getpass.py
+++ b/common/py3-stdlib/getpass.py
@@ -95,7 +95,7 @@
 
 
 def win_getpass(prompt='Password: ', stream=None):
-    """Prompt for password with echo off, using Windows getwch()."""
+    """Prompt for password with echo off, using Windows getch()."""
     if sys.stdin is not sys.__stdin__:
         return fallback_getpass(prompt, stream)
 
diff --git a/common/py3-stdlib/glob.py b/common/py3-stdlib/glob.py
index 9fc08f4..0dd2f8b 100644
--- a/common/py3-stdlib/glob.py
+++ b/common/py3-stdlib/glob.py
@@ -1,16 +1,13 @@
 """Filename globbing utility."""
 
-import contextlib
 import os
 import re
 import fnmatch
-import itertools
-import stat
 import sys
 
 __all__ = ["glob", "iglob", "escape"]
 
-def glob(pathname, *, root_dir=None, dir_fd=None, recursive=False):
+def glob(pathname, *, recursive=False):
     """Return a list of paths matching a pathname pattern.
 
     The pattern may contain simple shell-style wildcards a la
@@ -21,9 +18,9 @@
     If recursive is true, the pattern '**' will match any files and
     zero or more directories and subdirectories.
     """
-    return list(iglob(pathname, root_dir=root_dir, dir_fd=dir_fd, recursive=recursive))
+    return list(iglob(pathname, recursive=recursive))
 
-def iglob(pathname, *, root_dir=None, dir_fd=None, recursive=False):
+def iglob(pathname, *, recursive=False):
     """Return an iterator which yields the paths matching a pathname pattern.
 
     The pattern may contain simple shell-style wildcards a la
@@ -35,44 +32,35 @@
     zero or more directories and subdirectories.
     """
     sys.audit("glob.glob", pathname, recursive)
-    sys.audit("glob.glob/2", pathname, recursive, root_dir, dir_fd)
-    if root_dir is not None:
-        root_dir = os.fspath(root_dir)
-    else:
-        root_dir = pathname[:0]
-    it = _iglob(pathname, root_dir, dir_fd, recursive, False)
-    if not pathname or recursive and _isrecursive(pathname[:2]):
-        try:
-            s = next(it)  # skip empty string
-            if s:
-                it = itertools.chain((s,), it)
-        except StopIteration:
-            pass
+    it = _iglob(pathname, recursive, False)
+    if recursive and _isrecursive(pathname):
+        s = next(it)  # skip empty string
+        assert not s
     return it
 
-def _iglob(pathname, root_dir, dir_fd, recursive, dironly):
+def _iglob(pathname, recursive, dironly):
     dirname, basename = os.path.split(pathname)
     if not has_magic(pathname):
         assert not dironly
         if basename:
-            if _lexists(_join(root_dir, pathname), dir_fd):
+            if os.path.lexists(pathname):
                 yield pathname
         else:
             # Patterns ending with a slash should match only directories
-            if _isdir(_join(root_dir, dirname), dir_fd):
+            if os.path.isdir(dirname):
                 yield pathname
         return
     if not dirname:
         if recursive and _isrecursive(basename):
-            yield from _glob2(root_dir, basename, dir_fd, dironly)
+            yield from _glob2(dirname, basename, dironly)
         else:
-            yield from _glob1(root_dir, basename, dir_fd, dironly)
+            yield from _glob1(dirname, basename, dironly)
         return
     # `os.path.split()` returns the argument itself as a dirname if it is a
     # drive or UNC path.  Prevent an infinite recursion if a drive or UNC path
     # contains magic characters (i.e. r'\\?\C:').
     if dirname != pathname and has_magic(dirname):
-        dirs = _iglob(dirname, root_dir, dir_fd, recursive, True)
+        dirs = _iglob(dirname, recursive, True)
     else:
         dirs = [dirname]
     if has_magic(basename):
@@ -83,125 +71,76 @@
     else:
         glob_in_dir = _glob0
     for dirname in dirs:
-        for name in glob_in_dir(_join(root_dir, dirname), basename, dir_fd, dironly):
+        for name in glob_in_dir(dirname, basename, dironly):
             yield os.path.join(dirname, name)
 
 # These 2 helper functions non-recursively glob inside a literal directory.
 # They return a list of basenames.  _glob1 accepts a pattern while _glob0
 # takes a literal basename (so it only has to check for its existence).
 
-def _glob1(dirname, pattern, dir_fd, dironly):
-    names = _listdir(dirname, dir_fd, dironly)
+def _glob1(dirname, pattern, dironly):
+    names = list(_iterdir(dirname, dironly))
     if not _ishidden(pattern):
         names = (x for x in names if not _ishidden(x))
     return fnmatch.filter(names, pattern)
 
-def _glob0(dirname, basename, dir_fd, dironly):
-    if basename:
-        if _lexists(_join(dirname, basename), dir_fd):
-            return [basename]
-    else:
+def _glob0(dirname, basename, dironly):
+    if not basename:
         # `os.path.split()` returns an empty basename for paths ending with a
         # directory separator.  'q*x/' should match only directories.
-        if _isdir(dirname, dir_fd):
+        if os.path.isdir(dirname):
+            return [basename]
+    else:
+        if os.path.lexists(os.path.join(dirname, basename)):
             return [basename]
     return []
 
 # Following functions are not public but can be used by third-party code.
 
 def glob0(dirname, pattern):
-    return _glob0(dirname, pattern, None, False)
+    return _glob0(dirname, pattern, False)
 
 def glob1(dirname, pattern):
-    return _glob1(dirname, pattern, None, False)
+    return _glob1(dirname, pattern, False)
 
 # This helper function recursively yields relative pathnames inside a literal
 # directory.
 
-def _glob2(dirname, pattern, dir_fd, dironly):
+def _glob2(dirname, pattern, dironly):
     assert _isrecursive(pattern)
     yield pattern[:0]
-    yield from _rlistdir(dirname, dir_fd, dironly)
+    yield from _rlistdir(dirname, dironly)
 
 # If dironly is false, yields all file names inside a directory.
 # If dironly is true, yields only directory names.
-def _iterdir(dirname, dir_fd, dironly):
-    try:
-        fd = None
-        fsencode = None
-        if dir_fd is not None:
-            if dirname:
-                fd = arg = os.open(dirname, _dir_open_flags, dir_fd=dir_fd)
-            else:
-                arg = dir_fd
-            if isinstance(dirname, bytes):
-                fsencode = os.fsencode
-        elif dirname:
-            arg = dirname
-        elif isinstance(dirname, bytes):
-            arg = bytes(os.curdir, 'ASCII')
+def _iterdir(dirname, dironly):
+    if not dirname:
+        if isinstance(dirname, bytes):
+            dirname = bytes(os.curdir, 'ASCII')
         else:
-            arg = os.curdir
-        try:
-            with os.scandir(arg) as it:
-                for entry in it:
-                    try:
-                        if not dironly or entry.is_dir():
-                            if fsencode is not None:
-                                yield fsencode(entry.name)
-                            else:
-                                yield entry.name
-                    except OSError:
-                        pass
-        finally:
-            if fd is not None:
-                os.close(fd)
+            dirname = os.curdir
+    try:
+        with os.scandir(dirname) as it:
+            for entry in it:
+                try:
+                    if not dironly or entry.is_dir():
+                        yield entry.name
+                except OSError:
+                    pass
     except OSError:
         return
 
-def _listdir(dirname, dir_fd, dironly):
-    with contextlib.closing(_iterdir(dirname, dir_fd, dironly)) as it:
-        return list(it)
-
 # Recursively yields relative pathnames inside a literal directory.
-def _rlistdir(dirname, dir_fd, dironly):
-    names = _listdir(dirname, dir_fd, dironly)
+def _rlistdir(dirname, dironly):
+    names = list(_iterdir(dirname, dironly))
     for x in names:
         if not _ishidden(x):
             yield x
-            path = _join(dirname, x) if dirname else x
-            for y in _rlistdir(path, dir_fd, dironly):
-                yield _join(x, y)
+            path = os.path.join(dirname, x) if dirname else x
+            for y in _rlistdir(path, dironly):
+                yield os.path.join(x, y)
 
 
-def _lexists(pathname, dir_fd):
-    # Same as os.path.lexists(), but with dir_fd
-    if dir_fd is None:
-        return os.path.lexists(pathname)
-    try:
-        os.lstat(pathname, dir_fd=dir_fd)
-    except (OSError, ValueError):
-        return False
-    else:
-        return True
-
-def _isdir(pathname, dir_fd):
-    # Same as os.path.isdir(), but with dir_fd
-    if dir_fd is None:
-        return os.path.isdir(pathname)
-    try:
-        st = os.stat(pathname, dir_fd=dir_fd)
-    except (OSError, ValueError):
-        return False
-    else:
-        return stat.S_ISDIR(st.st_mode)
-
-def _join(dirname, basename):
-    # It is common if dirname or basename is empty
-    if not dirname or not basename:
-        return dirname or basename
-    return os.path.join(dirname, basename)
-
 magic_check = re.compile('([*?[])')
 magic_check_bytes = re.compile(b'([*?[])')
 
@@ -232,6 +171,3 @@
     else:
         pathname = magic_check.sub(r'[\1]', pathname)
     return drive + pathname
-
-
-_dir_open_flags = os.O_RDONLY | getattr(os, 'O_DIRECTORY', 0)
diff --git a/common/py3-stdlib/graphlib.py b/common/py3-stdlib/graphlib.py
index 1c5d9a4..d0e7a48 100644
--- a/common/py3-stdlib/graphlib.py
+++ b/common/py3-stdlib/graphlib.py
@@ -17,7 +17,7 @@
         self.npredecessors = 0
 
         # List of successor nodes. The list can contain duplicated elements as
-        # long as they're all reflected in the successor's npredecessors attribute.
+        # long as they're all reflected in the successor's npredecessors attribute).
         self.successors = []
 
 
diff --git a/common/py3-stdlib/gzip.py b/common/py3-stdlib/gzip.py
index 475ec32..e422773 100644
--- a/common/py3-stdlib/gzip.py
+++ b/common/py3-stdlib/gzip.py
@@ -62,7 +62,6 @@
         raise TypeError("filename must be a str or bytes object, or a file")
 
     if "t" in mode:
-        encoding = io.text_encoding(encoding)
         return io.TextIOWrapper(binary_file, encoding, errors, newline)
     else:
         return binary_file
@@ -278,7 +277,7 @@
         if self.fileobj is None:
             raise ValueError("write() on closed GzipFile object")
 
-        if isinstance(data, (bytes, bytearray)):
+        if isinstance(data, bytes):
             length = len(data)
         else:
             # accept any data that supports the buffer protocol
@@ -517,7 +516,7 @@
 
     def _read_eof(self):
         # We've read to the end of the file
-        # We check that the computed CRC and size of the
+        # We check the that the computed CRC and size of the
         # uncompressed data matches the stored values.  Note that the size
         # stored is the true file size mod 2**32.
         crc32, isize = struct.unpack("<II", self._read_exact(8))
@@ -584,7 +583,8 @@
                 g = sys.stdout.buffer
             else:
                 if arg[-3:] != ".gz":
-                    sys.exit(f"filename doesn't end in .gz: {arg!r}")
+                    print("filename doesn't end in .gz:", repr(arg))
+                    continue
                 f = open(arg, "rb")
                 g = builtins.open(arg[:-3], "wb")
         else:
@@ -596,7 +596,7 @@
                 f = builtins.open(arg, "rb")
                 g = open(arg + ".gz", "wb")
         while True:
-            chunk = f.read(io.DEFAULT_BUFFER_SIZE)
+            chunk = f.read(1024)
             if not chunk:
                 break
             g.write(chunk)
diff --git a/common/py3-stdlib/hashlib.py b/common/py3-stdlib/hashlib.py
index 21a73f3..58c340d 100644
--- a/common/py3-stdlib/hashlib.py
+++ b/common/py3-stdlib/hashlib.py
@@ -173,7 +173,6 @@
     algorithms_available = algorithms_available.union(
             _hashlib.openssl_md_meth_names)
 except ImportError:
-    _hashlib = None
     new = __py_new
     __get_hash = __get_builtin_constructor
 
@@ -181,7 +180,6 @@
     # OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA
     from _hashlib import pbkdf2_hmac
 except ImportError:
-    from warnings import warn as _warn
     _trans_5C = bytes((x ^ 0x5C) for x in range(256))
     _trans_36 = bytes((x ^ 0x36) for x in range(256))
 
@@ -192,11 +190,6 @@
         as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster
         for long passwords.
         """
-        _warn(
-            "Python implementation of pbkdf2_hmac() is deprecated.",
-            category=DeprecationWarning,
-            stacklevel=2
-        )
         if not isinstance(hash_name, str):
             raise TypeError(hash_name)
 
diff --git a/common/py3-stdlib/hmac.py b/common/py3-stdlib/hmac.py
index 8b4f920..180bc37 100644
--- a/common/py3-stdlib/hmac.py
+++ b/common/py3-stdlib/hmac.py
@@ -8,12 +8,11 @@
     import _hashlib as _hashopenssl
 except ImportError:
     _hashopenssl = None
-    _functype = None
+    _openssl_md_meths = None
     from _operator import _compare_digest as compare_digest
 else:
+    _openssl_md_meths = frozenset(_hashopenssl.openssl_md_meth_names)
     compare_digest = _hashopenssl.compare_digest
-    _functype = type(_hashopenssl.openssl_sha256)  # builtin type
-
 import hashlib as _hashlib
 
 trans_5C = bytes((x ^ 0x5C) for x in range(256))
@@ -24,6 +23,7 @@
 digest_size = None
 
 
+
 class HMAC:
     """RFC 2104 HMAC class.  Also complies with RFC 4231.
 
@@ -32,7 +32,7 @@
     blocksize = 64  # 512-bit HMAC; can be changed in subclasses.
 
     __slots__ = (
-        "_hmac", "_inner", "_outer", "block_size", "digest_size"
+        "_digest_cons", "_inner", "_outer", "block_size", "digest_size"
     )
 
     def __init__(self, key, msg=None, digestmod=''):
@@ -55,30 +55,15 @@
         if not digestmod:
             raise TypeError("Missing required parameter 'digestmod'.")
 
-        if _hashopenssl and isinstance(digestmod, (str, _functype)):
-            try:
-                self._init_hmac(key, msg, digestmod)
-            except _hashopenssl.UnsupportedDigestmodError:
-                self._init_old(key, msg, digestmod)
-        else:
-            self._init_old(key, msg, digestmod)
-
-    def _init_hmac(self, key, msg, digestmod):
-        self._hmac = _hashopenssl.hmac_new(key, msg, digestmod=digestmod)
-        self.digest_size = self._hmac.digest_size
-        self.block_size = self._hmac.block_size
-
-    def _init_old(self, key, msg, digestmod):
         if callable(digestmod):
-            digest_cons = digestmod
+            self._digest_cons = digestmod
         elif isinstance(digestmod, str):
-            digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
+            self._digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
         else:
-            digest_cons = lambda d=b'': digestmod.new(d)
+            self._digest_cons = lambda d=b'': digestmod.new(d)
 
-        self._hmac = None
-        self._outer = digest_cons()
-        self._inner = digest_cons()
+        self._outer = self._digest_cons()
+        self._inner = self._digest_cons()
         self.digest_size = self._inner.digest_size
 
         if hasattr(self._inner, 'block_size'):
@@ -94,13 +79,13 @@
                            RuntimeWarning, 2)
             blocksize = self.blocksize
 
-        if len(key) > blocksize:
-            key = digest_cons(key).digest()
-
         # self.blocksize is the default blocksize. self.block_size is
         # effective block size as well as the public API attribute.
         self.block_size = blocksize
 
+        if len(key) > blocksize:
+            key = self._digest_cons(key).digest()
+
         key = key.ljust(blocksize, b'\0')
         self._outer.update(key.translate(trans_5C))
         self._inner.update(key.translate(trans_36))
@@ -109,15 +94,23 @@
 
     @property
     def name(self):
-        if self._hmac:
-            return self._hmac.name
-        else:
-            return f"hmac-{self._inner.name}"
+        return "hmac-" + self._inner.name
+
+    @property
+    def digest_cons(self):
+        return self._digest_cons
+
+    @property
+    def inner(self):
+        return self._inner
+
+    @property
+    def outer(self):
+        return self._outer
 
     def update(self, msg):
         """Feed data from msg into this hashing object."""
-        inst = self._hmac or self._inner
-        inst.update(msg)
+        self._inner.update(msg)
 
     def copy(self):
         """Return a separate copy of this hashing object.
@@ -126,14 +119,10 @@
         """
         # Call __new__ directly to avoid the expensive __init__.
         other = self.__class__.__new__(self.__class__)
+        other._digest_cons = self._digest_cons
         other.digest_size = self.digest_size
-        if self._hmac:
-            other._hmac = self._hmac.copy()
-            other._inner = other._outer = None
-        else:
-            other._hmac = None
-            other._inner = self._inner.copy()
-            other._outer = self._outer.copy()
+        other._inner = self._inner.copy()
+        other._outer = self._outer.copy()
         return other
 
     def _current(self):
@@ -141,12 +130,9 @@
 
         To be used only internally with digest() and hexdigest().
         """
-        if self._hmac:
-            return self._hmac
-        else:
-            h = self._outer.copy()
-            h.update(self._inner.digest())
-            return h
+        h = self._outer.copy()
+        h.update(self._inner.digest())
+        return h
 
     def digest(self):
         """Return the hash value of this hashing object.
@@ -193,11 +179,9 @@
             A hashlib constructor returning a new hash object. *OR*
             A module supporting PEP 247.
     """
-    if _hashopenssl is not None and isinstance(digest, (str, _functype)):
-        try:
-            return _hashopenssl.hmac_digest(key, msg, digest)
-        except _hashopenssl.UnsupportedDigestmodError:
-            pass
+    if (_hashopenssl is not None and
+            isinstance(digest, str) and digest in _openssl_md_meths):
+        return _hashopenssl.hmac_digest(key, msg, digest)
 
     if callable(digest):
         digest_cons = digest
diff --git a/common/py3-stdlib/html/parser.py b/common/py3-stdlib/html/parser.py
index 58f6bb3..6083077 100644
--- a/common/py3-stdlib/html/parser.py
+++ b/common/py3-stdlib/html/parser.py
@@ -46,7 +46,7 @@
           |"[^"]*"                   # LIT-enclosed value
           |(?!['"])[^>\s]*           # bare value
          )
-        \s*                          # possibly followed by a space
+         (?:\s*,)*                   # possibly followed by a comma
        )?(?:\s|/(?!>))*
      )*
    )?
@@ -405,7 +405,7 @@
             tagname = namematch.group(1).lower()
             # consume and ignore other stuff between the name and the >
             # Note: this is not 100% correct, since we might have things like
-            # </tag attr=">">, but looking for > after the name should cover
+            # </tag attr=">">, but looking for > after tha name should cover
             # most of the cases and is much simpler
             gtpos = rawdata.find('>', namematch.end())
             self.handle_endtag(tagname)
diff --git a/common/py3-stdlib/http/__init__.py b/common/py3-stdlib/http/__init__.py
index bf8d7d6..37be765 100644
--- a/common/py3-stdlib/http/__init__.py
+++ b/common/py3-stdlib/http/__init__.py
@@ -2,7 +2,6 @@
 
 __all__ = ['HTTPStatus']
 
-
 class HTTPStatus(IntEnum):
     """HTTP status codes and reason phrases
 
diff --git a/common/py3-stdlib/http/client.py b/common/py3-stdlib/http/client.py
index a6ab135..c2ad047 100644
--- a/common/py3-stdlib/http/client.py
+++ b/common/py3-stdlib/http/client.py
@@ -70,12 +70,10 @@
 
 import email.parser
 import email.message
-import errno
 import http
 import io
 import re
 import socket
-import sys
 import collections.abc
 from urllib.parse import urlsplit
 
@@ -203,11 +201,15 @@
                 lst.append(line)
         return lst
 
-def _read_headers(fp):
-    """Reads potential header lines into a list from a file pointer.
+def parse_headers(fp, _class=HTTPMessage):
+    """Parses only RFC2822 headers from a file pointer.
 
-    Length of line is limited by _MAXLINE, and number of
-    headers is limited by _MAXHEADERS.
+    email Parser wants to see strings rather than bytes.
+    But a TextIOWrapper around self.rfile would buffer too many bytes
+    from the stream, bytes which we later need to read as bytes.
+    So we read the correct bytes here, as bytes, for email Parser
+    to parse.
+
     """
     headers = []
     while True:
@@ -219,19 +221,6 @@
             raise HTTPException("got more than %d headers" % _MAXHEADERS)
         if line in (b'\r\n', b'\n', b''):
             break
-    return headers
-
-def parse_headers(fp, _class=HTTPMessage):
-    """Parses only RFC2822 headers from a file pointer.
-
-    email Parser wants to see strings rather than bytes.
-    But a TextIOWrapper around self.rfile would buffer too many bytes
-    from the stream, bytes which we later need to read as bytes.
-    So we read the correct bytes here, as bytes, for email Parser
-    to parse.
-
-    """
-    headers = _read_headers(fp)
     hstring = b''.join(headers).decode('iso-8859-1')
     return email.parser.Parser(_class=_class).parsestr(hstring)
 
@@ -319,10 +308,15 @@
             if status != CONTINUE:
                 break
             # skip the header from the 100 response
-            skipped_headers = _read_headers(self.fp)
-            if self.debuglevel > 0:
-                print("headers:", skipped_headers)
-            del skipped_headers
+            while True:
+                skip = self.fp.readline(_MAXLINE + 1)
+                if len(skip) > _MAXLINE:
+                    raise LineTooLong("header line")
+                skip = skip.strip()
+                if not skip:
+                    break
+                if self.debuglevel > 0:
+                    print("header:", skip)
 
         self.code = self.status = status
         self.reason = reason.strip()
@@ -355,6 +349,9 @@
         # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
         self.length = None
         length = self.headers.get("content-length")
+
+         # are we using the chunked-style of transfer encoding?
+        tr_enc = self.headers.get("transfer-encoding")
         if length and not self.chunked:
             try:
                 self.length = int(length)
@@ -455,25 +452,18 @@
             self._close_conn()
             return b""
 
-        if self.chunked:
-            return self._read_chunked(amt)
-
         if amt is not None:
-            if self.length is not None and amt > self.length:
-                # clip the read to the "end of response"
-                amt = self.length
-            s = self.fp.read(amt)
-            if not s and amt:
-                # Ideally, we would raise IncompleteRead if the content-length
-                # wasn't satisfied, but it might break compatibility.
-                self._close_conn()
-            elif self.length is not None:
-                self.length -= len(s)
-                if not self.length:
-                    self._close_conn()
-            return s
+            # Amount is given, implement using readinto
+            b = bytearray(amt)
+            n = self.readinto(b)
+            return memoryview(b)[:n].tobytes()
         else:
             # Amount is not given (unbounded read) so we must check self.length
+            # and self.chunked
+
+            if self.chunked:
+                return self._readall_chunked()
+
             if self.length is None:
                 s = self.fp.read()
             else:
@@ -574,7 +564,7 @@
             self.chunk_left = chunk_left
         return chunk_left
 
-    def _read_chunked(self, amt=None):
+    def _readall_chunked(self):
         assert self.chunked != _UNKNOWN
         value = []
         try:
@@ -582,15 +572,7 @@
                 chunk_left = self._get_chunk_left()
                 if chunk_left is None:
                     break
-
-                if amt is not None and amt <= chunk_left:
-                    value.append(self._safe_read(amt))
-                    self.chunk_left = chunk_left - amt
-                    break
-
                 value.append(self._safe_read(chunk_left))
-                if amt is not None:
-                    amt -= chunk_left
                 self.chunk_left = 0
             return b''.join(value)
         except IncompleteRead:
@@ -864,7 +846,7 @@
         the endpoint passed to `set_tunnel`. This done by sending an HTTP
         CONNECT request to the proxy server when the connection is established.
 
-        This method must be called before the HTTP connection has been
+        This method must be called before the HTML connection has been
         established.
 
         The headers argument should be a mapping of extra HTTP headers to send
@@ -904,24 +886,23 @@
         self.debuglevel = level
 
     def _tunnel(self):
-        connect = b"CONNECT %s:%d HTTP/1.0\r\n" % (
-            self._tunnel_host.encode("ascii"), self._tunnel_port)
-        headers = [connect]
+        connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
+            self._tunnel_port)
+        connect_bytes = connect_str.encode("ascii")
+        self.send(connect_bytes)
         for header, value in self._tunnel_headers.items():
-            headers.append(f"{header}: {value}\r\n".encode("latin-1"))
-        headers.append(b"\r\n")
-        # Making a single send() call instead of one per line encourages
-        # the host OS to use a more optimal packet size instead of
-        # potentially emitting a series of small packets.
-        self.send(b"".join(headers))
-        del headers
+            header_str = "%s: %s\r\n" % (header, value)
+            header_bytes = header_str.encode("latin-1")
+            self.send(header_bytes)
+        self.send(b'\r\n')
 
         response = self.response_class(self.sock, method=self._method)
         (version, code, message) = response._read_status()
 
         if code != http.HTTPStatus.OK:
             self.close()
-            raise OSError(f"Tunnel connection failed: {code} {message.strip()}")
+            raise OSError("Tunnel connection failed: %d %s" % (code,
+                                                               message.strip()))
         while True:
             line = response.fp.readline(_MAXLINE + 1)
             if len(line) > _MAXLINE:
@@ -937,15 +918,9 @@
 
     def connect(self):
         """Connect to the host and port specified in __init__."""
-        sys.audit("http.client.connect", self, self.host, self.port)
         self.sock = self._create_connection(
             (self.host,self.port), self.timeout, self.source_address)
-        # Might fail in OSs that don't implement TCP_NODELAY
-        try:
-             self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-        except OSError as e:
-            if e.errno != errno.ENOPROTOOPT:
-                raise
+        self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
 
         if self._tunnel_host:
             self._tunnel()
@@ -990,10 +965,8 @@
                     break
                 if encode:
                     datablock = datablock.encode("iso-8859-1")
-                sys.audit("http.client.send", self, datablock)
                 self.sock.sendall(datablock)
             return
-        sys.audit("http.client.send", self, data)
         try:
             self.sock.sendall(data)
         except TypeError:
@@ -1419,9 +1392,6 @@
             self.cert_file = cert_file
             if context is None:
                 context = ssl._create_default_https_context()
-                # send ALPN extension to indicate HTTP/1.1 protocol
-                if self._http_vsn == 11:
-                    context.set_alpn_protocols(['http/1.1'])
                 # enable PHA for TLS 1.3 connections if available
                 if context.post_handshake_auth is not None:
                     context.post_handshake_auth = True
diff --git a/common/py3-stdlib/http/cookiejar.py b/common/py3-stdlib/http/cookiejar.py
index eaa76c2..47ed5c3 100644
--- a/common/py3-stdlib/http/cookiejar.py
+++ b/common/py3-stdlib/http/cookiejar.py
@@ -50,18 +50,10 @@
         logger = logging.getLogger("http.cookiejar")
     return logger.debug(*args)
 
-HTTPONLY_ATTR = "HTTPOnly"
-HTTPONLY_PREFIX = "#HttpOnly_"
+
 DEFAULT_HTTP_PORT = str(http.client.HTTP_PORT)
-NETSCAPE_MAGIC_RGX = re.compile("#( Netscape)? HTTP Cookie File")
 MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
                          "instance initialised with one)")
-NETSCAPE_HEADER_TEXT =  """\
-# Netscape HTTP Cookie File
-# http://curl.haxx.se/rfc/cookie_spec.html
-# This is a generated file!  Do not edit.
-
-"""
 
 def _warn_unhandled_exception():
     # There are a few catch-all except: statements in this module, for
@@ -2012,11 +2004,19 @@
     header by default (Mozilla can cope with that).
 
     """
+    magic_re = re.compile("#( Netscape)? HTTP Cookie File")
+    header = """\
+# Netscape HTTP Cookie File
+# http://curl.haxx.se/rfc/cookie_spec.html
+# This is a generated file!  Do not edit.
+
+"""
 
     def _really_load(self, f, filename, ignore_discard, ignore_expires):
         now = time.time()
 
-        if not NETSCAPE_MAGIC_RGX.match(f.readline()):
+        magic = f.readline()
+        if not self.magic_re.search(magic):
             raise LoadError(
                 "%r does not look like a Netscape format cookies file" %
                 filename)
@@ -2024,17 +2024,8 @@
         try:
             while 1:
                 line = f.readline()
-                rest = {}
-
                 if line == "": break
 
-                # httponly is a cookie flag as defined in rfc6265
-                # when encoded in a netscape cookie file,
-                # the line is prepended with "#HttpOnly_"
-                if line.startswith(HTTPONLY_PREFIX):
-                    rest[HTTPONLY_ATTR] = ""
-                    line = line[len(HTTPONLY_PREFIX):]
-
                 # last field may be absent, so keep any trailing tab
                 if line.endswith("\n"): line = line[:-1]
 
@@ -2072,7 +2063,7 @@
                            discard,
                            None,
                            None,
-                           rest)
+                           {})
                 if not ignore_discard and c.discard:
                     continue
                 if not ignore_expires and c.is_expired(now):
@@ -2092,17 +2083,16 @@
             else: raise ValueError(MISSING_FILENAME_TEXT)
 
         with open(filename, "w") as f:
-            f.write(NETSCAPE_HEADER_TEXT)
+            f.write(self.header)
             now = time.time()
             for cookie in self:
-                domain = cookie.domain
                 if not ignore_discard and cookie.discard:
                     continue
                 if not ignore_expires and cookie.is_expired(now):
                     continue
                 if cookie.secure: secure = "TRUE"
                 else: secure = "FALSE"
-                if domain.startswith("."): initial_dot = "TRUE"
+                if cookie.domain.startswith("."): initial_dot = "TRUE"
                 else: initial_dot = "FALSE"
                 if cookie.expires is not None:
                     expires = str(cookie.expires)
@@ -2117,9 +2107,7 @@
                 else:
                     name = cookie.name
                     value = cookie.value
-                if cookie.has_nonstandard_attr(HTTPONLY_ATTR):
-                    domain = HTTPONLY_PREFIX + domain
                 f.write(
-                    "\t".join([domain, initial_dot, cookie.path,
+                    "\t".join([cookie.domain, initial_dot, cookie.path,
                                secure, expires, name, value])+
                     "\n")
diff --git a/common/py3-stdlib/http/server.py b/common/py3-stdlib/http/server.py
index 58abadf..def05f4 100644
--- a/common/py3-stdlib/http/server.py
+++ b/common/py3-stdlib/http/server.py
@@ -103,6 +103,8 @@
 import sys
 import time
 import urllib.parse
+import contextlib
+from functools import partial
 
 from http import HTTPStatus
 
@@ -412,7 +414,7 @@
             method = getattr(self, mname)
             method()
             self.wfile.flush() #actually send the response if not already done.
-        except TimeoutError as e:
+        except socket.timeout as e:
             #a read or a write timed out.  Discard this connection
             self.log_error("Request timed out: %r", e)
             self.close_connection = True
@@ -687,7 +689,6 @@
                              parts[3], parts[4])
                 new_url = urllib.parse.urlunsplit(new_parts)
                 self.send_header("Location", new_url)
-                self.send_header("Content-Length", "0")
                 self.end_headers()
                 return None
             for index in "index.html", "index.htm":
@@ -1091,7 +1092,8 @@
         env['PATH_INFO'] = uqrest
         env['PATH_TRANSLATED'] = self.translate_path(uqrest)
         env['SCRIPT_NAME'] = scriptname
-        env['QUERY_STRING'] = query
+        if query:
+            env['QUERY_STRING'] = query
         env['REMOTE_ADDR'] = self.client_address[0]
         authorization = self.headers.get("authorization")
         if authorization:
@@ -1237,6 +1239,7 @@
 
     """
     ServerClass.address_family, addr = _get_best_family(bind, port)
+
     HandlerClass.protocol_version = protocol
     with ServerClass(addr, HandlerClass) as httpd:
         host, port = httpd.socket.getsockname()[:2]
@@ -1253,29 +1256,29 @@
 
 if __name__ == '__main__':
     import argparse
-    import contextlib
 
     parser = argparse.ArgumentParser()
     parser.add_argument('--cgi', action='store_true',
-                        help='run as CGI server')
+                       help='Run as CGI Server')
     parser.add_argument('--bind', '-b', metavar='ADDRESS',
-                        help='specify alternate bind address '
-                             '(default: all interfaces)')
+                        help='Specify alternate bind address '
+                             '[default: all interfaces]')
     parser.add_argument('--directory', '-d', default=os.getcwd(),
-                        help='specify alternate directory '
-                             '(default: current directory)')
-    parser.add_argument('port', action='store', default=8000, type=int,
+                        help='Specify alternative directory '
+                        '[default:current directory]')
+    parser.add_argument('port', action='store',
+                        default=8000, type=int,
                         nargs='?',
-                        help='specify alternate port (default: 8000)')
+                        help='Specify alternate port [default: 8000]')
     args = parser.parse_args()
     if args.cgi:
         handler_class = CGIHTTPRequestHandler
     else:
-        handler_class = SimpleHTTPRequestHandler
+        handler_class = partial(SimpleHTTPRequestHandler,
+                                directory=args.directory)
 
     # ensure dual-stack is not disabled; ref #38907
     class DualStackServer(ThreadingHTTPServer):
-
         def server_bind(self):
             # suppress exception when protocol is IPv4
             with contextlib.suppress(Exception):
@@ -1283,10 +1286,6 @@
                     socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
             return super().server_bind()
 
-        def finish_request(self, request, client_address):
-            self.RequestHandlerClass(request, client_address, self,
-                                     directory=args.directory)
-
     test(
         HandlerClass=handler_class,
         ServerClass=DualStackServer,
diff --git a/common/py3-stdlib/imaplib.py b/common/py3-stdlib/imaplib.py
index 7318439..d9720f2 100644
--- a/common/py3-stdlib/imaplib.py
+++ b/common/py3-stdlib/imaplib.py
@@ -1251,12 +1251,13 @@
             sys.stderr.write('  %s.%02d %s\n' % (tm, (secs*100)%100, s))
             sys.stderr.flush()
 
-        def _dump_ur(self, untagged_resp_dict):
-            if not untagged_resp_dict:
-                return
-            items = (f'{key}: {value!r}'
-                    for key, value in untagged_resp_dict.items())
-            self._mesg('untagged responses dump:' + '\n\t\t'.join(items))
+        def _dump_ur(self, dict):
+            # Dump untagged responses (in `dict').
+            l = dict.items()
+            if not l: return
+            t = '\n\t\t'
+            l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
+            self._mesg('untagged responses dump:%s%s' % (t, t.join(l)))
 
         def _log(self, line):
             # Keep log of last `_cmd_log_len' interactions for debugging.
diff --git a/common/py3-stdlib/imp.py b/common/py3-stdlib/imp.py
index e02aaef..31f8c76 100644
--- a/common/py3-stdlib/imp.py
+++ b/common/py3-stdlib/imp.py
@@ -28,8 +28,7 @@
 import types
 import warnings
 
-warnings.warn("the imp module is deprecated in favour of importlib and slated "
-              "for removal in Python 3.12; "
+warnings.warn("the imp module is deprecated in favour of importlib; "
               "see the module's documentation for alternative uses",
               DeprecationWarning, stacklevel=2)
 
diff --git a/common/py3-stdlib/importlib/__init__.py b/common/py3-stdlib/importlib/__init__.py
index ce61883..0c73c50 100644
--- a/common/py3-stdlib/importlib/__init__.py
+++ b/common/py3-stdlib/importlib/__init__.py
@@ -34,7 +34,7 @@
     import _frozen_importlib_external as _bootstrap_external
 except ImportError:
     from . import _bootstrap_external
-    _bootstrap_external._set_bootstrap_module(_bootstrap)
+    _bootstrap_external._setup(_bootstrap)
     _bootstrap._bootstrap_external = _bootstrap_external
 else:
     _bootstrap_external.__name__ = 'importlib._bootstrap_external'
@@ -54,6 +54,7 @@
 # Fully bootstrapped at this point, import whatever you like, circular
 # dependencies and startup overhead minimisation permitting :)
 
+import types
 import warnings
 
 
@@ -78,8 +79,8 @@
     This function is deprecated in favor of importlib.util.find_spec().
 
     """
-    warnings.warn('Deprecated since Python 3.4 and slated for removal in '
-                  'Python 3.12; use importlib.util.find_spec() instead',
+    warnings.warn('Deprecated since Python 3.4. '
+                  'Use importlib.util.find_spec() instead.',
                   DeprecationWarning, stacklevel=2)
     try:
         loader = sys.modules[name].__loader__
@@ -135,13 +136,12 @@
     The module must have been successfully imported before.
 
     """
+    if not module or not isinstance(module, types.ModuleType):
+        raise TypeError("reload() argument must be a module")
     try:
         name = module.__spec__.name
     except AttributeError:
-        try:
-            name = module.__name__
-        except AttributeError:
-            raise TypeError("reload() argument must be a module")
+        name = module.__name__
 
     if sys.modules.get(name) is not module:
         msg = "module {} not in sys.modules"
diff --git a/common/py3-stdlib/importlib/_abc.py b/common/py3-stdlib/importlib/_abc.py
deleted file mode 100644
index f80348f..0000000
--- a/common/py3-stdlib/importlib/_abc.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""Subset of importlib.abc used to reduce importlib.util imports."""
-from . import _bootstrap
-import abc
-import warnings
-
-
-class Loader(metaclass=abc.ABCMeta):
-
-    """Abstract base class for import loaders."""
-
-    def create_module(self, spec):
-        """Return a module to initialize and into which to load.
-
-        This method should raise ImportError if anything prevents it
-        from creating a new module.  It may return None to indicate
-        that the spec should create the new module.
-        """
-        # By default, defer to default semantics for the new module.
-        return None
-
-    # We don't define exec_module() here since that would break
-    # hasattr checks we do to support backward compatibility.
-
-    def load_module(self, fullname):
-        """Return the loaded module.
-
-        The module must be added to sys.modules and have import-related
-        attributes set properly.  The fullname is a str.
-
-        ImportError is raised on failure.
-
-        This method is deprecated in favor of loader.exec_module(). If
-        exec_module() exists then it is used to provide a backwards-compatible
-        functionality for this method.
-
-        """
-        if not hasattr(self, 'exec_module'):
-            raise ImportError
-        # Warning implemented in _load_module_shim().
-        return _bootstrap._load_module_shim(self, fullname)
-
-    def module_repr(self, module):
-        """Return a module's repr.
-
-        Used by the module type when the method does not raise
-        NotImplementedError.
-
-        This method is deprecated.
-
-        """
-        warnings.warn("importlib.abc.Loader.module_repr() is deprecated and "
-                      "slated for removal in Python 3.12", DeprecationWarning)
-        # The exception will cause ModuleType.__repr__ to ignore this method.
-        raise NotImplementedError
diff --git a/common/py3-stdlib/importlib/_adapters.py b/common/py3-stdlib/importlib/_adapters.py
deleted file mode 100644
index e72edd1..0000000
--- a/common/py3-stdlib/importlib/_adapters.py
+++ /dev/null
@@ -1,83 +0,0 @@
-from contextlib import suppress
-
-from . import abc
-
-
-class SpecLoaderAdapter:
-    """
-    Adapt a package spec to adapt the underlying loader.
-    """
-
-    def __init__(self, spec, adapter=lambda spec: spec.loader):
-        self.spec = spec
-        self.loader = adapter(spec)
-
-    def __getattr__(self, name):
-        return getattr(self.spec, name)
-
-
-class TraversableResourcesLoader:
-    """
-    Adapt a loader to provide TraversableResources.
-    """
-
-    def __init__(self, spec):
-        self.spec = spec
-
-    def get_resource_reader(self, name):
-        return DegenerateFiles(self.spec)._native()
-
-
-class DegenerateFiles:
-    """
-    Adapter for an existing or non-existant resource reader
-    to provide a degenerate .files().
-    """
-
-    class Path(abc.Traversable):
-        def iterdir(self):
-            return iter(())
-
-        def is_dir(self):
-            return False
-
-        is_file = exists = is_dir  # type: ignore
-
-        def joinpath(self, other):
-            return DegenerateFiles.Path()
-
-        @property
-        def name(self):
-            return ''
-
-        def open(self, mode='rb', *args, **kwargs):
-            raise ValueError()
-
-    def __init__(self, spec):
-        self.spec = spec
-
-    @property
-    def _reader(self):
-        with suppress(AttributeError):
-            return self.spec.loader.get_resource_reader(self.spec.name)
-
-    def _native(self):
-        """
-        Return the native reader if it supports files().
-        """
-        reader = self._reader
-        return reader if hasattr(reader, 'files') else self
-
-    def __getattr__(self, attr):
-        return getattr(self._reader, attr)
-
-    def files(self):
-        return DegenerateFiles.Path()
-
-
-def wrap_spec(package):
-    """
-    Construct a package spec with traversable compatibility
-    on the spec/loader/reader.
-    """
-    return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
diff --git a/common/py3-stdlib/importlib/_bootstrap.py b/common/py3-stdlib/importlib/_bootstrap.py
index 527bc9c..e00b27e 100644
--- a/common/py3-stdlib/importlib/_bootstrap.py
+++ b/common/py3-stdlib/importlib/_bootstrap.py
@@ -20,23 +20,10 @@
 # reference any injected objects! This includes not only global code but also
 # anything specified at the class level.
 
-def _object_name(obj):
-    try:
-        return obj.__qualname__
-    except AttributeError:
-        return type(obj).__qualname__
-
 # Bootstrap-related code ######################################################
 
-# Modules injected manually by _setup()
-_thread = None
-_warnings = None
-_weakref = None
-
-# Import done by _install_external_importers()
 _bootstrap_external = None
 
-
 def _wrap(new, old):
     """Simple substitute for functools.update_wrapper."""
     for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
@@ -275,12 +262,9 @@
 def _load_module_shim(self, fullname):
     """Load the specified module into sys.modules and return it.
 
-    This method is deprecated.  Use loader.exec_module() instead.
+    This method is deprecated.  Use loader.exec_module instead.
 
     """
-    msg = ("the load_module() method is deprecated and slated for removal in "
-          "Python 3.12; use exec_module() instead")
-    _warnings.warn(msg, DeprecationWarning)
     spec = spec_from_loader(fullname, self)
     if fullname in sys.modules:
         module = sys.modules[fullname]
@@ -292,16 +276,26 @@
 # Module specifications #######################################################
 
 def _module_repr(module):
-    """The implementation of ModuleType.__repr__()."""
+    # The implementation of ModuleType.__repr__().
     loader = getattr(module, '__loader__', None)
-    if spec := getattr(module, "__spec__", None):
-        return _module_repr_from_spec(spec)
-    elif hasattr(loader, 'module_repr'):
+    if hasattr(loader, 'module_repr'):
+        # As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader
+        # drop their implementations for module_repr. we can add a
+        # deprecation warning here.
         try:
             return loader.module_repr(module)
         except Exception:
             pass
-    # Fall through to a catch-all which always succeeds.
+    try:
+        spec = module.__spec__
+    except AttributeError:
+        pass
+    else:
+        if spec is not None:
+            return _module_repr_from_spec(spec)
+
+    # We could use module.__class__.__name__ instead of 'module' in the
+    # various repr permutations.
     try:
         name = module.__name__
     except AttributeError:
@@ -611,9 +605,9 @@
             else:
                 _init_module_attrs(spec, module, override=True)
                 if not hasattr(spec.loader, 'exec_module'):
-                    msg = (f"{_object_name(spec.loader)}.exec_module() not found; "
-                           "falling back to load_module()")
-                    _warnings.warn(msg, ImportWarning)
+                    # (issue19713) Once BuiltinImporter and ExtensionFileLoader
+                    # have exec_module() implemented, we can add a deprecation
+                    # warning here.
                     spec.loader.load_module(name)
                 else:
                     spec.loader.exec_module(module)
@@ -626,8 +620,9 @@
 
 
 def _load_backward_compatible(spec):
-    # It is assumed that all callers have been warned about using load_module()
-    # appropriately before calling this function.
+    # (issue19713) Once BuiltinImporter and ExtensionFileLoader
+    # have exec_module() implemented, we can add a deprecation
+    # warning here.
     try:
         spec.loader.load_module(spec.name)
     except:
@@ -666,9 +661,6 @@
     if spec.loader is not None:
         # Not a namespace package.
         if not hasattr(spec.loader, 'exec_module'):
-            msg = (f"{_object_name(spec.loader)}.exec_module() not found; "
-                    "falling back to load_module()")
-            _warnings.warn(msg, ImportWarning)
             return _load_backward_compatible(spec)
 
     module = module_from_spec(spec)
@@ -739,8 +731,6 @@
         The method is deprecated.  The import machinery does the job itself.
 
         """
-        _warnings.warn("BuiltinImporter.module_repr() is deprecated and "
-                       "slated for removal in Python 3.12", DeprecationWarning)
         return f'<module {module.__name__!r} ({BuiltinImporter._ORIGIN})>'
 
     @classmethod
@@ -761,22 +751,19 @@
         This method is deprecated.  Use find_spec() instead.
 
         """
-        _warnings.warn("BuiltinImporter.find_module() is deprecated and "
-                       "slated for removal in Python 3.12; use find_spec() instead",
-                       DeprecationWarning)
         spec = cls.find_spec(fullname, path)
         return spec.loader if spec is not None else None
 
-    @staticmethod
-    def create_module(spec):
+    @classmethod
+    def create_module(self, spec):
         """Create a built-in module"""
         if spec.name not in sys.builtin_module_names:
             raise ImportError('{!r} is not a built-in module'.format(spec.name),
                               name=spec.name)
         return _call_with_frames_removed(_imp.create_builtin, spec)
 
-    @staticmethod
-    def exec_module(module):
+    @classmethod
+    def exec_module(self, module):
         """Exec a built-in module"""
         _call_with_frames_removed(_imp.exec_builtin, module)
 
@@ -819,8 +806,6 @@
         The method is deprecated.  The import machinery does the job itself.
 
         """
-        _warnings.warn("FrozenImporter.module_repr() is deprecated and "
-                       "slated for removal in Python 3.12", DeprecationWarning)
         return '<module {!r} ({})>'.format(m.__name__, FrozenImporter._ORIGIN)
 
     @classmethod
@@ -837,13 +822,10 @@
         This method is deprecated.  Use find_spec() instead.
 
         """
-        _warnings.warn("FrozenImporter.find_module() is deprecated and "
-                       "slated for removal in Python 3.12; use find_spec() instead",
-                       DeprecationWarning)
         return cls if _imp.is_frozen(fullname) else None
 
-    @staticmethod
-    def create_module(spec):
+    @classmethod
+    def create_module(cls, spec):
         """Use default semantics for module creation."""
 
     @staticmethod
@@ -862,7 +844,6 @@
         This method is deprecated.  Use exec_module() instead.
 
         """
-        # Warning about deprecation implemented in _load_module_shim().
         return _load_module_shim(cls, fullname)
 
     @classmethod
@@ -909,9 +890,8 @@
 
 
 def _find_spec_legacy(finder, name, path):
-    msg = (f"{_object_name(finder)}.find_spec() not found; "
-                           "falling back to find_module()")
-    _warnings.warn(msg, ImportWarning)
+    # This would be a good place for a DeprecationWarning if
+    # we ended up going that route.
     loader = finder.find_module(name, path)
     if loader is None:
         return None
diff --git a/common/py3-stdlib/importlib/_bootstrap_external.py b/common/py3-stdlib/importlib/_bootstrap_external.py
index 49bcaea..25a3f8c 100644
--- a/common/py3-stdlib/importlib/_bootstrap_external.py
+++ b/common/py3-stdlib/importlib/_bootstrap_external.py
@@ -19,37 +19,6 @@
 # reference any injected objects! This includes not only global code but also
 # anything specified at the class level.
 
-# Module injected manually by _set_bootstrap_module()
-_bootstrap = None
-
-# Import builtin modules
-import _imp
-import _io
-import sys
-import _warnings
-import marshal
-
-
-_MS_WINDOWS = (sys.platform == 'win32')
-if _MS_WINDOWS:
-    import nt as _os
-    import winreg
-else:
-    import posix as _os
-
-
-if _MS_WINDOWS:
-    path_separators = ['\\', '/']
-else:
-    path_separators = ['/']
-# Assumption made in _path_join()
-assert all(len(sep) == 1 for sep in path_separators)
-path_sep = path_separators[0]
-path_sep_tuple = tuple(path_separators)
-path_separators = ''.join(path_separators)
-_pathseps_with_colon = {f':{s}' for s in path_separators}
-
-
 # Bootstrap-related code ######################################################
 _CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win',
 _CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin'
@@ -73,8 +42,6 @@
             return False
     return _relax_case
 
-_relax_case = _make_relax_case()
-
 
 def _pack_uint32(x):
     """Convert a 32-bit integer to little-endian."""
@@ -92,49 +59,22 @@
     return int.from_bytes(data, 'little')
 
 
-if _MS_WINDOWS:
-    def _path_join(*path_parts):
-        """Replacement for os.path.join()."""
-        if not path_parts:
-            return ""
-        if len(path_parts) == 1:
-            return path_parts[0]
-        root = ""
-        path = []
-        for new_root, tail in map(_os._path_splitroot, path_parts):
-            if new_root.startswith(path_sep_tuple) or new_root.endswith(path_sep_tuple):
-                root = new_root.rstrip(path_separators) or root
-                path = [path_sep + tail]
-            elif new_root.endswith(':'):
-                if root.casefold() != new_root.casefold():
-                    # Drive relative paths have to be resolved by the OS, so we reset the
-                    # tail but do not add a path_sep prefix.
-                    root = new_root
-                    path = [tail]
-                else:
-                    path.append(tail)
-            else:
-                root = new_root or root
-                path.append(tail)
-        path = [p.rstrip(path_separators) for p in path if p]
-        if len(path) == 1 and not path[0]:
-            # Avoid losing the root's trailing separator when joining with nothing
-            return root + path_sep
-        return root + path_sep.join(path)
-
-else:
-    def _path_join(*path_parts):
-        """Replacement for os.path.join()."""
-        return path_sep.join([part.rstrip(path_separators)
-                              for part in path_parts if part])
+def _path_join(*path_parts):
+    """Replacement for os.path.join()."""
+    return path_sep.join([part.rstrip(path_separators)
+                          for part in path_parts if part])
 
 
 def _path_split(path):
     """Replacement for os.path.split()."""
-    i = max(path.rfind(p) for p in path_separators)
-    if i < 0:
-        return '', path
-    return path[:i], path[i + 1:]
+    if len(path_separators) == 1:
+        front, _, tail = path.rpartition(path_sep)
+        return front, tail
+    for x in reversed(path):
+        if x in path_separators:
+            front, tail = path.rsplit(x, maxsplit=1)
+            return front, tail
+    return '', path
 
 
 def _path_stat(path):
@@ -168,18 +108,13 @@
     return _path_is_mode_type(path, 0o040000)
 
 
-if _MS_WINDOWS:
-    def _path_isabs(path):
-        """Replacement for os.path.isabs."""
-        if not path:
-            return False
-        root = _os._path_splitroot(path)[0].replace('/', '\\')
-        return len(root) > 1 and (root.startswith('\\\\') or root.endswith('\\'))
+def _path_isabs(path):
+    """Replacement for os.path.isabs.
 
-else:
-    def _path_isabs(path):
-        """Replacement for os.path.isabs."""
-        return path.startswith(path_separators)
+    Considers a Windows drive-relative path (no drive, but starts with slash) to
+    still be "absolute".
+    """
+    return path.startswith(path_separators) or path[1:3] in _pathseps_with_colon
 
 
 def _write_atomic(path, data, mode=0o666):
@@ -342,16 +277,6 @@
 #     Python 3.9a2  3423 (add IS_OP, CONTAINS_OP and JUMP_IF_NOT_EXC_MATCH bytecodes #39156)
 #     Python 3.9a2  3424 (simplify bytecodes for *value unpacking)
 #     Python 3.9a2  3425 (simplify bytecodes for **value unpacking)
-#     Python 3.10a1 3430 (Make 'annotations' future by default)
-#     Python 3.10a1 3431 (New line number table format -- PEP 626)
-#     Python 3.10a2 3432 (Function annotation for MAKE_FUNCTION is changed from dict to tuple bpo-42202)
-#     Python 3.10a2 3433 (RERAISE restores f_lasti if oparg != 0)
-#     Python 3.10a6 3434 (PEP 634: Structural Pattern Matching)
-#     Python 3.10a7 3435 Use instruction offsets (as opposed to byte offsets).
-#     Python 3.10b1 3436 (Add GEN_START bytecode #43683)
-#     Python 3.10b1 3437 (Undo making 'annotations' future by default - We like to dance among core devs!)
-#     Python 3.10b1 3438 Safer line number table handling.
-#     Python 3.10b1 3439 (Add ROT_N)
 
 #
 # MAGIC must change whenever the bytecode emitted by the compiler may no
@@ -361,17 +286,13 @@
 # Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array
 # in PC/launcher.c must also be updated.
 
-MAGIC_NUMBER = (3439).to_bytes(2, 'little') + b'\r\n'
+MAGIC_NUMBER = (3425).to_bytes(2, 'little') + b'\r\n'
 _RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little')  # For import.c
 
 _PYCACHE = '__pycache__'
 _OPT = 'opt-'
 
-SOURCE_SUFFIXES = ['.py']
-if _MS_WINDOWS:
-    SOURCE_SUFFIXES.append('.pyw')
-
-EXTENSION_SUFFIXES = _imp.extension_suffixes()
+SOURCE_SUFFIXES = ['.py']  # _setup() adds .pyw as needed.
 
 BYTECODE_SUFFIXES = ['.pyc']
 # Deprecated.
@@ -546,18 +467,15 @@
             raise ImportError('loader for %s cannot handle %s' %
                                 (self.name, name), name=name)
         return method(self, name, *args, **kwargs)
-
-    # FIXME: @_check_name is used to define class methods before the
-    # _bootstrap module is set by _set_bootstrap_module().
-    if _bootstrap is not None:
+    try:
         _wrap = _bootstrap._wrap
-    else:
+    except NameError:
+        # XXX yuck
         def _wrap(new, old):
             for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
                 if hasattr(old, replace):
                     setattr(new, replace, getattr(old, replace))
             new.__dict__.update(old.__dict__)
-
     _wrap(_check_name_wrapper, method)
     return _check_name_wrapper
 
@@ -569,9 +487,6 @@
     This method is deprecated in favor of finder.find_spec().
 
     """
-    _warnings.warn("find_module() is deprecated and "
-                   "slated for removal in Python 3.12; use find_spec() instead",
-                   DeprecationWarning)
     # Call find_loader(). If it returns a string (indicating this
     # is a namespace package portion), generate a warning and
     # return None.
@@ -743,11 +658,6 @@
                 pass
     else:
         location = _os.fspath(location)
-        if not _path_isabs(location):
-            try:
-                location = _path_join(_os.getcwd(), location)
-            except OSError:
-                pass
 
     # If the location is on the filesystem, but doesn't actually exist,
     # we could return None here, indicating that the location is not
@@ -801,10 +711,10 @@
     REGISTRY_KEY_DEBUG = (
         'Software\\Python\\PythonCore\\{sys_version}'
         '\\Modules\\{fullname}\\Debug')
-    DEBUG_BUILD = (_MS_WINDOWS and '_d.pyd' in EXTENSION_SUFFIXES)
+    DEBUG_BUILD = False  # Changed in _setup()
 
-    @staticmethod
-    def _open_registry(key):
+    @classmethod
+    def _open_registry(cls, key):
         try:
             return winreg.OpenKey(winreg.HKEY_CURRENT_USER, key)
         except OSError:
@@ -845,12 +755,9 @@
     def find_module(cls, fullname, path=None):
         """Find module named in the registry.
 
-        This method is deprecated.  Use find_spec() instead.
+        This method is deprecated.  Use exec_module() instead.
 
         """
-        _warnings.warn("WindowsRegistryFinder.find_module() is deprecated and "
-                       "slated for removal in Python 3.12; use find_spec() instead",
-                       DeprecationWarning)
         spec = cls.find_spec(fullname, path)
         if spec is not None:
             return spec.loader
@@ -883,8 +790,7 @@
         _bootstrap._call_with_frames_removed(exec, code, module.__dict__)
 
     def load_module(self, fullname):
-        """This method is deprecated."""
-        # Warning implemented in _load_module_shim().
+        """This module is deprecated."""
         return _bootstrap._load_module_shim(self, fullname)
 
 
@@ -1059,7 +965,7 @@
         """
         # The only reason for this method is for the name check.
         # Issue #14857: Avoid the zero-argument form of super so the implementation
-        # of that form can be updated without breaking the frozen module.
+        # of that form can be updated without breaking the frozen module
         return super(FileLoader, self).load_module(fullname)
 
     @_check_name
@@ -1076,10 +982,32 @@
             with _io.FileIO(path, 'r') as file:
                 return file.read()
 
+    # ResourceReader ABC API.
+
     @_check_name
     def get_resource_reader(self, module):
-        from importlib.readers import FileReader
-        return FileReader(self)
+        if self.is_package(module):
+            return self
+        return None
+
+    def open_resource(self, resource):
+        path = _path_join(_path_split(self.path)[0], resource)
+        return _io.FileIO(path, 'r')
+
+    def resource_path(self, resource):
+        if not self.is_resource(resource):
+            raise FileNotFoundError
+        path = _path_join(_path_split(self.path)[0], resource)
+        return path
+
+    def is_resource(self, name):
+        if path_sep in name:
+            return False
+        path = _path_join(_path_split(self.path)[0], name)
+        return _path_isfile(path)
+
+    def contents(self):
+        return iter(_os.listdir(_path_split(self.path)[0]))
 
 
 class SourceFileLoader(FileLoader, SourceLoader):
@@ -1152,6 +1080,10 @@
         return None
 
 
+# Filled in by _setup().
+EXTENSION_SUFFIXES = []
+
+
 class ExtensionFileLoader(FileLoader, _LoaderBasics):
 
     """Loader for extension modules.
@@ -1212,15 +1144,10 @@
     using path_finder.  For top-level modules, the parent module's path
     is sys.path."""
 
-    # When invalidate_caches() is called, this epoch is incremented
-    # https://bugs.python.org/issue45703
-    _epoch = 0
-
     def __init__(self, name, path, path_finder):
         self._name = name
         self._path = path
         self._last_parent_path = tuple(self._get_parent_path())
-        self._last_epoch = self._epoch
         self._path_finder = path_finder
 
     def _find_parent_path_names(self):
@@ -1240,7 +1167,7 @@
     def _recalculate(self):
         # If the parent's path has changed, recalculate _path
         parent_path = tuple(self._get_parent_path()) # Make a copy
-        if parent_path != self._last_parent_path or self._epoch != self._last_epoch:
+        if parent_path != self._last_parent_path:
             spec = self._path_finder(self._name, parent_path)
             # Note that no changes are made if a loader is returned, but we
             #  do remember the new parent path
@@ -1248,7 +1175,6 @@
                 if spec.submodule_search_locations:
                     self._path = spec.submodule_search_locations
             self._last_parent_path = parent_path     # Save the copy
-            self._last_epoch = self._epoch
         return self._path
 
     def __iter__(self):
@@ -1278,15 +1204,13 @@
     def __init__(self, name, path, path_finder):
         self._path = _NamespacePath(name, path, path_finder)
 
-    @staticmethod
-    def module_repr(module):
+    @classmethod
+    def module_repr(cls, module):
         """Return repr for the module.
 
         The method is deprecated.  The import machinery does the job itself.
 
         """
-        _warnings.warn("_NamespaceLoader.module_repr() is deprecated and "
-                       "slated for removal in Python 3.12", DeprecationWarning)
         return '<module {!r} (namespace)>'.format(module.__name__)
 
     def is_package(self, fullname):
@@ -1313,13 +1237,8 @@
         # The import system never calls this method.
         _bootstrap._verbose_message('namespace module loaded with path {!r}',
                                     self._path)
-        # Warning implemented in _load_module_shim().
         return _bootstrap._load_module_shim(self, fullname)
 
-    def get_resource_reader(self, module):
-        from importlib.readers import NamespaceReader
-        return NamespaceReader(self._path)
-
 
 # Finders #####################################################################
 
@@ -1327,8 +1246,8 @@
 
     """Meta path finder for sys.path and package __path__ attributes."""
 
-    @staticmethod
-    def invalidate_caches():
+    @classmethod
+    def invalidate_caches(cls):
         """Call the invalidate_caches() method on all path entry finders
         stored in sys.path_importer_caches (where implemented)."""
         for name, finder in list(sys.path_importer_cache.items()):
@@ -1336,12 +1255,9 @@
                 del sys.path_importer_cache[name]
             elif hasattr(finder, 'invalidate_caches'):
                 finder.invalidate_caches()
-        # Also invalidate the caches of _NamespacePaths
-        # https://bugs.python.org/issue45703
-        _NamespacePath._epoch += 1
 
-    @staticmethod
-    def _path_hooks(path):
+    @classmethod
+    def _path_hooks(cls, path):
         """Search sys.path_hooks for a finder for 'path'."""
         if sys.path_hooks is not None and not sys.path_hooks:
             _warnings.warn('sys.path_hooks is empty', ImportWarning)
@@ -1380,14 +1296,8 @@
         # This would be a good place for a DeprecationWarning if
         # we ended up going that route.
         if hasattr(finder, 'find_loader'):
-            msg = (f"{_bootstrap._object_name(finder)}.find_spec() not found; "
-                    "falling back to find_loader()")
-            _warnings.warn(msg, ImportWarning)
             loader, portions = finder.find_loader(fullname)
         else:
-            msg = (f"{_bootstrap._object_name(finder)}.find_spec() not found; "
-                    "falling back to find_module()")
-            _warnings.warn(msg, ImportWarning)
             loader = finder.find_module(fullname)
             portions = []
         if loader is not None:
@@ -1460,16 +1370,13 @@
         This method is deprecated.  Use find_spec() instead.
 
         """
-        _warnings.warn("PathFinder.find_module() is deprecated and "
-                       "slated for removal in Python 3.12; use find_spec() instead",
-                       DeprecationWarning)
         spec = cls.find_spec(fullname, path)
         if spec is None:
             return None
         return spec.loader
 
-    @staticmethod
-    def find_distributions(*args, **kwargs):
+    @classmethod
+    def find_distributions(cls, *args, **kwargs):
         """
         Find distributions.
 
@@ -1501,8 +1408,6 @@
         self._loaders = loaders
         # Base (directory) path
         self.path = path or '.'
-        if not _path_isabs(self.path):
-            self.path = _path_join(_os.getcwd(), self.path)
         self._path_mtime = -1
         self._path_cache = set()
         self._relaxed_path_cache = set()
@@ -1520,9 +1425,6 @@
         This method is deprecated.  Use find_spec() instead.
 
         """
-        _warnings.warn("FileFinder.find_loader() is deprecated and "
-                       "slated for removal in Python 3.12; use find_spec() instead",
-                       DeprecationWarning)
         spec = self.find_spec(fullname)
         if spec is None:
             return None, []
@@ -1568,10 +1470,7 @@
                 is_namespace = _path_isdir(base_path)
         # Check for a file w/ a proper suffix exists.
         for suffix, loader_class in self._loaders:
-            try:
-                full_path = _path_join(self.path, tail_module + suffix)
-            except ValueError:
-                return None
+            full_path = _path_join(self.path, tail_module + suffix)
             _bootstrap._verbose_message('trying {}', full_path, verbosity=2)
             if cache_module + suffix in cache:
                 if _path_isfile(full_path):
@@ -1673,14 +1572,66 @@
     return [extensions, source, bytecode]
 
 
-def _set_bootstrap_module(_bootstrap_module):
-    global _bootstrap
+def _setup(_bootstrap_module):
+    """Setup the path-based importers for importlib by importing needed
+    built-in modules and injecting them into the global namespace.
+
+    Other components are extracted from the core bootstrap module.
+
+    """
+    global sys, _imp, _bootstrap
     _bootstrap = _bootstrap_module
+    sys = _bootstrap.sys
+    _imp = _bootstrap._imp
+
+    self_module = sys.modules[__name__]
+
+    # Directly load the os module (needed during bootstrap).
+    os_details = ('posix', ['/']), ('nt', ['\\', '/'])
+    for builtin_os, path_separators in os_details:
+        # Assumption made in _path_join()
+        assert all(len(sep) == 1 for sep in path_separators)
+        path_sep = path_separators[0]
+        if builtin_os in sys.modules:
+            os_module = sys.modules[builtin_os]
+            break
+        else:
+            try:
+                os_module = _bootstrap._builtin_from_name(builtin_os)
+                break
+            except ImportError:
+                continue
+    else:
+        raise ImportError('importlib requires posix or nt')
+
+    setattr(self_module, '_os', os_module)
+    setattr(self_module, 'path_sep', path_sep)
+    setattr(self_module, 'path_separators', ''.join(path_separators))
+    setattr(self_module, '_pathseps_with_colon', {f':{s}' for s in path_separators})
+
+    # Directly load built-in modules needed during bootstrap.
+    builtin_names = ['_io', '_warnings', 'marshal']
+    if builtin_os == 'nt':
+        builtin_names.append('winreg')
+    for builtin_name in builtin_names:
+        if builtin_name not in sys.modules:
+            builtin_module = _bootstrap._builtin_from_name(builtin_name)
+        else:
+            builtin_module = sys.modules[builtin_name]
+        setattr(self_module, builtin_name, builtin_module)
+
+    # Constants
+    setattr(self_module, '_relax_case', _make_relax_case())
+    EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
+    if builtin_os == 'nt':
+        SOURCE_SUFFIXES.append('.pyw')
+        if '_d.pyd' in EXTENSION_SUFFIXES:
+            WindowsRegistryFinder.DEBUG_BUILD = True
 
 
 def _install(_bootstrap_module):
     """Install the path-based import components."""
-    _set_bootstrap_module(_bootstrap_module)
+    _setup(_bootstrap_module)
     supported_loaders = _get_supported_file_loaders()
     sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
     sys.meta_path.append(PathFinder)
diff --git a/common/py3-stdlib/importlib/_common.py b/common/py3-stdlib/importlib/_common.py
index 549fee3..c1204f0 100644
--- a/common/py3-stdlib/importlib/_common.py
+++ b/common/py3-stdlib/importlib/_common.py
@@ -1,72 +1,9 @@
 import os
 import pathlib
+import zipfile
 import tempfile
 import functools
 import contextlib
-import types
-import importlib
-
-from typing import Union, Any, Optional
-from .abc import ResourceReader, Traversable
-
-from ._adapters import wrap_spec
-
-Package = Union[types.ModuleType, str]
-
-
-def files(package):
-    # type: (Package) -> Traversable
-    """
-    Get a Traversable resource from a package
-    """
-    return from_package(get_package(package))
-
-
-def normalize_path(path):
-    # type: (Any) -> str
-    """Normalize a path by ensuring it is a string.
-
-    If the resulting string contains path separators, an exception is raised.
-    """
-    str_path = str(path)
-    parent, file_name = os.path.split(str_path)
-    if parent:
-        raise ValueError(f'{path!r} must be only a file name')
-    return file_name
-
-
-def get_resource_reader(package):
-    # type: (types.ModuleType) -> Optional[ResourceReader]
-    """
-    Return the package's loader if it's a ResourceReader.
-    """
-    # We can't use
-    # a issubclass() check here because apparently abc.'s __subclasscheck__()
-    # hook wants to create a weak reference to the object, but
-    # zipimport.zipimporter does not support weak references, resulting in a
-    # TypeError.  That seems terrible.
-    spec = package.__spec__
-    reader = getattr(spec.loader, 'get_resource_reader', None)  # type: ignore
-    if reader is None:
-        return None
-    return reader(spec.name)  # type: ignore
-
-
-def resolve(cand):
-    # type: (Package) -> types.ModuleType
-    return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand)
-
-
-def get_package(package):
-    # type: (Package) -> types.ModuleType
-    """Take a package name or module object and return the module.
-
-    Raise an exception if the resolved module is not a package.
-    """
-    resolved = resolve(package)
-    if wrap_spec(resolved).submodule_search_locations is None:
-        raise TypeError(f'{package!r} is not a package')
-    return resolved
 
 
 def from_package(package):
@@ -74,9 +11,18 @@
     Return a Traversable object for the given package.
 
     """
-    spec = wrap_spec(package)
-    reader = spec.loader.get_resource_reader(spec.name)
-    return reader.files()
+    return fallback_resources(package.__spec__)
+
+
+def fallback_resources(spec):
+    package_directory = pathlib.Path(spec.origin).parent
+    try:
+        archive_path = spec.loader.archive
+        rel_path = package_directory.relative_to(archive_path)
+        return zipfile.Path(archive_path, str(rel_path) + '/')
+    except Exception:
+        pass
+    return package_directory
 
 
 @contextlib.contextmanager
@@ -88,7 +34,6 @@
     try:
         os.write(fd, reader())
         os.close(fd)
-        del reader
         yield pathlib.Path(raw_path)
     finally:
         try:
@@ -98,12 +43,14 @@
 
 
 @functools.singledispatch
+@contextlib.contextmanager
 def as_file(path):
     """
     Given a Traversable object, return that object as a
     path on the local file system in a context manager.
     """
-    return _tempfile(path.read_bytes, suffix=path.name)
+    with _tempfile(path.read_bytes, suffix=path.name) as local:
+        yield local
 
 
 @as_file.register(pathlib.Path)
diff --git a/common/py3-stdlib/importlib/abc.py b/common/py3-stdlib/importlib/abc.py
index 0b4a3f8..b8a9bb1 100644
--- a/common/py3-stdlib/importlib/abc.py
+++ b/common/py3-stdlib/importlib/abc.py
@@ -1,4 +1,5 @@
 """Abstract base classes related to import."""
+from . import _bootstrap
 from . import _bootstrap_external
 from . import machinery
 try:
@@ -11,10 +12,8 @@
     import _frozen_importlib_external
 except ImportError:
     _frozen_importlib_external = _bootstrap_external
-from ._abc import Loader
 import abc
 import warnings
-from typing import BinaryIO, Iterable, Text
 from typing import Protocol, runtime_checkable
 
 
@@ -41,27 +40,15 @@
     Deprecated since Python 3.3
     """
 
-    def __init__(self):
-        warnings.warn("the Finder ABC is deprecated and "
-                       "slated for removal in Python 3.12; use MetaPathFinder "
-                       "or PathEntryFinder instead",
-                       DeprecationWarning)
-
     @abc.abstractmethod
     def find_module(self, fullname, path=None):
         """An abstract method that should find a module.
         The fullname is a str and the optional path is a str or None.
         Returns a Loader object or None.
         """
-        warnings.warn("importlib.abc.Finder along with its find_module() "
-                      "method are deprecated and "
-                       "slated for removal in Python 3.12; use "
-                       "MetaPathFinder.find_spec() or "
-                       "PathEntryFinder.find_spec() instead",
-                       DeprecationWarning)
 
 
-class MetaPathFinder(metaclass=abc.ABCMeta):
+class MetaPathFinder(Finder):
 
     """Abstract base class for import finders on sys.meta_path."""
 
@@ -80,8 +67,8 @@
 
         """
         warnings.warn("MetaPathFinder.find_module() is deprecated since Python "
-                      "3.4 in favor of MetaPathFinder.find_spec() and is "
-                      "slated for removal in Python 3.12",
+                      "3.4 in favor of MetaPathFinder.find_spec() "
+                      "(available since 3.4)",
                       DeprecationWarning,
                       stacklevel=2)
         if not hasattr(self, 'find_spec'):
@@ -98,7 +85,7 @@
           machinery.PathFinder, machinery.WindowsRegistryFinder)
 
 
-class PathEntryFinder(metaclass=abc.ABCMeta):
+class PathEntryFinder(Finder):
 
     """Abstract base class for path entry finders used by PathFinder."""
 
@@ -147,6 +134,53 @@
 _register(PathEntryFinder, machinery.FileFinder)
 
 
+class Loader(metaclass=abc.ABCMeta):
+
+    """Abstract base class for import loaders."""
+
+    def create_module(self, spec):
+        """Return a module to initialize and into which to load.
+
+        This method should raise ImportError if anything prevents it
+        from creating a new module.  It may return None to indicate
+        that the spec should create the new module.
+        """
+        # By default, defer to default semantics for the new module.
+        return None
+
+    # We don't define exec_module() here since that would break
+    # hasattr checks we do to support backward compatibility.
+
+    def load_module(self, fullname):
+        """Return the loaded module.
+
+        The module must be added to sys.modules and have import-related
+        attributes set properly.  The fullname is a str.
+
+        ImportError is raised on failure.
+
+        This method is deprecated in favor of loader.exec_module(). If
+        exec_module() exists then it is used to provide a backwards-compatible
+        functionality for this method.
+
+        """
+        if not hasattr(self, 'exec_module'):
+            raise ImportError
+        return _bootstrap._load_module_shim(self, fullname)
+
+    def module_repr(self, module):
+        """Return a module's repr.
+
+        Used by the module type when the method does not raise
+        NotImplementedError.
+
+        This method is deprecated.
+
+        """
+        # The exception will cause ModuleType.__repr__ to ignore this method.
+        raise NotImplementedError
+
+
 class ResourceLoader(Loader):
 
     """Abstract base class for loaders which can return data from their
@@ -310,45 +344,49 @@
 
 
 class ResourceReader(metaclass=abc.ABCMeta):
-    """Abstract base class for loaders to provide resource reading support."""
+
+    """Abstract base class to provide resource-reading support.
+
+    Loaders that support resource reading are expected to implement
+    the ``get_resource_reader(fullname)`` method and have it either return None
+    or an object compatible with this ABC.
+    """
 
     @abc.abstractmethod
-    def open_resource(self, resource: Text) -> BinaryIO:
+    def open_resource(self, resource):
         """Return an opened, file-like object for binary reading.
 
-        The 'resource' argument is expected to represent only a file name.
+        The 'resource' argument is expected to represent only a file name
+        and thus not contain any subdirectory components.
+
         If the resource cannot be found, FileNotFoundError is raised.
         """
-        # This deliberately raises FileNotFoundError instead of
-        # NotImplementedError so that if this method is accidentally called,
-        # it'll still do the right thing.
         raise FileNotFoundError
 
     @abc.abstractmethod
-    def resource_path(self, resource: Text) -> Text:
+    def resource_path(self, resource):
         """Return the file system path to the specified resource.
 
-        The 'resource' argument is expected to represent only a file name.
+        The 'resource' argument is expected to represent only a file name
+        and thus not contain any subdirectory components.
+
         If the resource does not exist on the file system, raise
         FileNotFoundError.
         """
-        # This deliberately raises FileNotFoundError instead of
-        # NotImplementedError so that if this method is accidentally called,
-        # it'll still do the right thing.
         raise FileNotFoundError
 
     @abc.abstractmethod
-    def is_resource(self, path: Text) -> bool:
-        """Return True if the named 'path' is a resource.
-
-        Files are resources, directories are not.
-        """
+    def is_resource(self, name):
+        """Return True if the named 'name' is consider a resource."""
         raise FileNotFoundError
 
     @abc.abstractmethod
-    def contents(self) -> Iterable[str]:
-        """Return an iterable of entries in `package`."""
-        raise FileNotFoundError
+    def contents(self):
+        """Return an iterable of strings over the contents of the package."""
+        return []
+
+
+_register(ResourceReader, machinery.SourceFileLoader)
 
 
 @runtime_checkable
@@ -364,28 +402,26 @@
         Yield Traversable objects in self
         """
 
+    @abc.abstractmethod
     def read_bytes(self):
         """
         Read contents of self as bytes
         """
-        with self.open('rb') as strm:
-            return strm.read()
-
-    def read_text(self, encoding=None):
-        """
-        Read contents of self as text
-        """
-        with self.open(encoding=encoding) as strm:
-            return strm.read()
 
     @abc.abstractmethod
-    def is_dir(self) -> bool:
+    def read_text(self, encoding=None):
+        """
+        Read contents of self as bytes
+        """
+
+    @abc.abstractmethod
+    def is_dir(self):
         """
         Return True if self is a dir
         """
 
     @abc.abstractmethod
-    def is_file(self) -> bool:
+    def is_file(self):
         """
         Return True if self is a file
         """
@@ -396,11 +432,11 @@
         Return Traversable child in self
         """
 
+    @abc.abstractmethod
     def __truediv__(self, child):
         """
         Return Traversable child in self
         """
-        return self.joinpath(child)
 
     @abc.abstractmethod
     def open(self, mode='r', *args, **kwargs):
@@ -413,18 +449,14 @@
         """
 
     @abc.abstractproperty
-    def name(self) -> str:
+    def name(self):
+        # type: () -> str
         """
         The base name of this object without any parent references.
         """
 
 
 class TraversableResources(ResourceReader):
-    """
-    The required interface for providing traversable
-    resources.
-    """
-
     @abc.abstractmethod
     def files(self):
         """Return a Traversable object for the loaded package."""
@@ -436,7 +468,7 @@
         raise FileNotFoundError(resource)
 
     def is_resource(self, path):
-        return self.files().joinpath(path).is_file()
+        return self.files().joinpath(path).isfile()
 
     def contents(self):
         return (item.name for item in self.files().iterdir())
diff --git a/common/py3-stdlib/importlib/machinery.py b/common/py3-stdlib/importlib/machinery.py
index 9a7757f..1b2b5c9 100644
--- a/common/py3-stdlib/importlib/machinery.py
+++ b/common/py3-stdlib/importlib/machinery.py
@@ -1,5 +1,7 @@
 """The machinery of importlib: finders, loaders, hooks, etc."""
 
+import _imp
+
 from ._bootstrap import ModuleSpec
 from ._bootstrap import BuiltinImporter
 from ._bootstrap import FrozenImporter
diff --git a/common/py3-stdlib/importlib/metadata.py b/common/py3-stdlib/importlib/metadata.py
new file mode 100644
index 0000000..ffa0cba
--- /dev/null
+++ b/common/py3-stdlib/importlib/metadata.py
@@ -0,0 +1,586 @@
+import io
+import os
+import re
+import abc
+import csv
+import sys
+import email
+import pathlib
+import zipfile
+import operator
+import functools
+import itertools
+import posixpath
+import collections
+
+from configparser import ConfigParser
+from contextlib import suppress
+from importlib import import_module
+from importlib.abc import MetaPathFinder
+from itertools import starmap
+
+
+__all__ = [
+    'Distribution',
+    'DistributionFinder',
+    'PackageNotFoundError',
+    'distribution',
+    'distributions',
+    'entry_points',
+    'files',
+    'metadata',
+    'requires',
+    'version',
+    ]
+
+
+class PackageNotFoundError(ModuleNotFoundError):
+    """The package was not found."""
+
+
+class EntryPoint(
+        collections.namedtuple('EntryPointBase', 'name value group')):
+    """An entry point as defined by Python packaging conventions.
+
+    See `the packaging docs on entry points
+    <https://packaging.python.org/specifications/entry-points/>`_
+    for more information.
+    """
+
+    pattern = re.compile(
+        r'(?P<module>[\w.]+)\s*'
+        r'(:\s*(?P<attr>[\w.]+))?\s*'
+        r'(?P<extras>\[.*\])?\s*$'
+        )
+    """
+    A regular expression describing the syntax for an entry point,
+    which might look like:
+
+        - module
+        - package.module
+        - package.module:attribute
+        - package.module:object.attribute
+        - package.module:attr [extra1, extra2]
+
+    Other combinations are possible as well.
+
+    The expression is lenient about whitespace around the ':',
+    following the attr, and following any extras.
+    """
+
+    def load(self):
+        """Load the entry point from its definition. If only a module
+        is indicated by the value, return that module. Otherwise,
+        return the named object.
+        """
+        match = self.pattern.match(self.value)
+        module = import_module(match.group('module'))
+        attrs = filter(None, (match.group('attr') or '').split('.'))
+        return functools.reduce(getattr, attrs, module)
+
+    @property
+    def module(self):
+        match = self.pattern.match(self.value)
+        return match.group('module')
+
+    @property
+    def attr(self):
+        match = self.pattern.match(self.value)
+        return match.group('attr')
+
+    @property
+    def extras(self):
+        match = self.pattern.match(self.value)
+        return list(re.finditer(r'\w+', match.group('extras') or ''))
+
+    @classmethod
+    def _from_config(cls, config):
+        return [
+            cls(name, value, group)
+            for group in config.sections()
+            for name, value in config.items(group)
+            ]
+
+    @classmethod
+    def _from_text(cls, text):
+        config = ConfigParser(delimiters='=')
+        # case sensitive: https://stackoverflow.com/q/1611799/812183
+        config.optionxform = str
+        try:
+            config.read_string(text)
+        except AttributeError:  # pragma: nocover
+            # Python 2 has no read_string
+            config.readfp(io.StringIO(text))
+        return EntryPoint._from_config(config)
+
+    def __iter__(self):
+        """
+        Supply iter so one may construct dicts of EntryPoints easily.
+        """
+        return iter((self.name, self))
+
+    def __reduce__(self):
+        return (
+            self.__class__,
+            (self.name, self.value, self.group),
+            )
+
+
+class PackagePath(pathlib.PurePosixPath):
+    """A reference to a path in a package"""
+
+    def read_text(self, encoding='utf-8'):
+        with self.locate().open(encoding=encoding) as stream:
+            return stream.read()
+
+    def read_binary(self):
+        with self.locate().open('rb') as stream:
+            return stream.read()
+
+    def locate(self):
+        """Return a path-like object for this path"""
+        return self.dist.locate_file(self)
+
+
+class FileHash:
+    def __init__(self, spec):
+        self.mode, _, self.value = spec.partition('=')
+
+    def __repr__(self):
+        return '<FileHash mode: {} value: {}>'.format(self.mode, self.value)
+
+
+class Distribution:
+    """A Python distribution package."""
+
+    @abc.abstractmethod
+    def read_text(self, filename):
+        """Attempt to load metadata file given by the name.
+
+        :param filename: The name of the file in the distribution info.
+        :return: The text if found, otherwise None.
+        """
+
+    @abc.abstractmethod
+    def locate_file(self, path):
+        """
+        Given a path to a file in this distribution, return a path
+        to it.
+        """
+
+    @classmethod
+    def from_name(cls, name):
+        """Return the Distribution for the given package name.
+
+        :param name: The name of the distribution package to search for.
+        :return: The Distribution instance (or subclass thereof) for the named
+            package, if found.
+        :raises PackageNotFoundError: When the named package's distribution
+            metadata cannot be found.
+        """
+        for resolver in cls._discover_resolvers():
+            dists = resolver(DistributionFinder.Context(name=name))
+            dist = next(iter(dists), None)
+            if dist is not None:
+                return dist
+        else:
+            raise PackageNotFoundError(name)
+
+    @classmethod
+    def discover(cls, **kwargs):
+        """Return an iterable of Distribution objects for all packages.
+
+        Pass a ``context`` or pass keyword arguments for constructing
+        a context.
+
+        :context: A ``DistributionFinder.Context`` object.
+        :return: Iterable of Distribution objects for all packages.
+        """
+        context = kwargs.pop('context', None)
+        if context and kwargs:
+            raise ValueError("cannot accept context and kwargs")
+        context = context or DistributionFinder.Context(**kwargs)
+        return itertools.chain.from_iterable(
+            resolver(context)
+            for resolver in cls._discover_resolvers()
+            )
+
+    @staticmethod
+    def at(path):
+        """Return a Distribution for the indicated metadata path
+
+        :param path: a string or path-like object
+        :return: a concrete Distribution instance for the path
+        """
+        return PathDistribution(pathlib.Path(path))
+
+    @staticmethod
+    def _discover_resolvers():
+        """Search the meta_path for resolvers."""
+        declared = (
+            getattr(finder, 'find_distributions', None)
+            for finder in sys.meta_path
+            )
+        return filter(None, declared)
+
+    @classmethod
+    def _local(cls, root='.'):
+        from pep517 import build, meta
+        system = build.compat_system(root)
+        builder = functools.partial(
+            meta.build,
+            source_dir=root,
+            system=system,
+            )
+        return PathDistribution(zipfile.Path(meta.build_as_zip(builder)))
+
+    @property
+    def metadata(self):
+        """Return the parsed metadata for this Distribution.
+
+        The returned object will have keys that name the various bits of
+        metadata.  See PEP 566 for details.
+        """
+        text = (
+            self.read_text('METADATA')
+            or self.read_text('PKG-INFO')
+            # This last clause is here to support old egg-info files.  Its
+            # effect is to just end up using the PathDistribution's self._path
+            # (which points to the egg-info file) attribute unchanged.
+            or self.read_text('')
+            )
+        return email.message_from_string(text)
+
+    @property
+    def version(self):
+        """Return the 'Version' metadata for the distribution package."""
+        return self.metadata['Version']
+
+    @property
+    def entry_points(self):
+        return EntryPoint._from_text(self.read_text('entry_points.txt'))
+
+    @property
+    def files(self):
+        """Files in this distribution.
+
+        :return: List of PackagePath for this distribution or None
+
+        Result is `None` if the metadata file that enumerates files
+        (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
+        missing.
+        Result may be empty if the metadata exists but is empty.
+        """
+        file_lines = self._read_files_distinfo() or self._read_files_egginfo()
+
+        def make_file(name, hash=None, size_str=None):
+            result = PackagePath(name)
+            result.hash = FileHash(hash) if hash else None
+            result.size = int(size_str) if size_str else None
+            result.dist = self
+            return result
+
+        return file_lines and list(starmap(make_file, csv.reader(file_lines)))
+
+    def _read_files_distinfo(self):
+        """
+        Read the lines of RECORD
+        """
+        text = self.read_text('RECORD')
+        return text and text.splitlines()
+
+    def _read_files_egginfo(self):
+        """
+        SOURCES.txt might contain literal commas, so wrap each line
+        in quotes.
+        """
+        text = self.read_text('SOURCES.txt')
+        return text and map('"{}"'.format, text.splitlines())
+
+    @property
+    def requires(self):
+        """Generated requirements specified for this Distribution"""
+        reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
+        return reqs and list(reqs)
+
+    def _read_dist_info_reqs(self):
+        return self.metadata.get_all('Requires-Dist')
+
+    def _read_egg_info_reqs(self):
+        source = self.read_text('requires.txt')
+        return source and self._deps_from_requires_text(source)
+
+    @classmethod
+    def _deps_from_requires_text(cls, source):
+        section_pairs = cls._read_sections(source.splitlines())
+        sections = {
+            section: list(map(operator.itemgetter('line'), results))
+            for section, results in
+            itertools.groupby(section_pairs, operator.itemgetter('section'))
+            }
+        return cls._convert_egg_info_reqs_to_simple_reqs(sections)
+
+    @staticmethod
+    def _read_sections(lines):
+        section = None
+        for line in filter(None, lines):
+            section_match = re.match(r'\[(.*)\]$', line)
+            if section_match:
+                section = section_match.group(1)
+                continue
+            yield locals()
+
+    @staticmethod
+    def _convert_egg_info_reqs_to_simple_reqs(sections):
+        """
+        Historically, setuptools would solicit and store 'extra'
+        requirements, including those with environment markers,
+        in separate sections. More modern tools expect each
+        dependency to be defined separately, with any relevant
+        extras and environment markers attached directly to that
+        requirement. This method converts the former to the
+        latter. See _test_deps_from_requires_text for an example.
+        """
+        def make_condition(name):
+            return name and 'extra == "{name}"'.format(name=name)
+
+        def parse_condition(section):
+            section = section or ''
+            extra, sep, markers = section.partition(':')
+            if extra and markers:
+                markers = '({markers})'.format(markers=markers)
+            conditions = list(filter(None, [markers, make_condition(extra)]))
+            return '; ' + ' and '.join(conditions) if conditions else ''
+
+        for section, deps in sections.items():
+            for dep in deps:
+                yield dep + parse_condition(section)
+
+
+class DistributionFinder(MetaPathFinder):
+    """
+    A MetaPathFinder capable of discovering installed distributions.
+    """
+
+    class Context:
+        """
+        Keyword arguments presented by the caller to
+        ``distributions()`` or ``Distribution.discover()``
+        to narrow the scope of a search for distributions
+        in all DistributionFinders.
+
+        Each DistributionFinder may expect any parameters
+        and should attempt to honor the canonical
+        parameters defined below when appropriate.
+        """
+
+        name = None
+        """
+        Specific name for which a distribution finder should match.
+        A name of ``None`` matches all distributions.
+        """
+
+        def __init__(self, **kwargs):
+            vars(self).update(kwargs)
+
+        @property
+        def path(self):
+            """
+            The path that a distribution finder should search.
+
+            Typically refers to Python package paths and defaults
+            to ``sys.path``.
+            """
+            return vars(self).get('path', sys.path)
+
+    @abc.abstractmethod
+    def find_distributions(self, context=Context()):
+        """
+        Find distributions.
+
+        Return an iterable of all Distribution instances capable of
+        loading the metadata for packages matching the ``context``,
+        a DistributionFinder.Context instance.
+        """
+
+
+class FastPath:
+    """
+    Micro-optimized class for searching a path for
+    children.
+    """
+
+    def __init__(self, root):
+        self.root = root
+        self.base = os.path.basename(self.root).lower()
+
+    def joinpath(self, child):
+        return pathlib.Path(self.root, child)
+
+    def children(self):
+        with suppress(Exception):
+            return os.listdir(self.root or '')
+        with suppress(Exception):
+            return self.zip_children()
+        return []
+
+    def zip_children(self):
+        zip_path = zipfile.Path(self.root)
+        names = zip_path.root.namelist()
+        self.joinpath = zip_path.joinpath
+
+        return dict.fromkeys(
+            child.split(posixpath.sep, 1)[0]
+            for child in names
+            )
+
+    def is_egg(self, search):
+        base = self.base
+        return (
+            base == search.versionless_egg_name
+            or base.startswith(search.prefix)
+            and base.endswith('.egg'))
+
+    def search(self, name):
+        for child in self.children():
+            n_low = child.lower()
+            if (n_low in name.exact_matches
+                    or n_low.startswith(name.prefix)
+                    and n_low.endswith(name.suffixes)
+                    # legacy case:
+                    or self.is_egg(name) and n_low == 'egg-info'):
+                yield self.joinpath(child)
+
+
+class Prepared:
+    """
+    A prepared search for metadata on a possibly-named package.
+    """
+    normalized = ''
+    prefix = ''
+    suffixes = '.dist-info', '.egg-info'
+    exact_matches = [''][:0]
+    versionless_egg_name = ''
+
+    def __init__(self, name):
+        self.name = name
+        if name is None:
+            return
+        self.normalized = name.lower().replace('-', '_')
+        self.prefix = self.normalized + '-'
+        self.exact_matches = [
+            self.normalized + suffix for suffix in self.suffixes]
+        self.versionless_egg_name = self.normalized + '.egg'
+
+
+class MetadataPathFinder(DistributionFinder):
+    @classmethod
+    def find_distributions(cls, context=DistributionFinder.Context()):
+        """
+        Find distributions.
+
+        Return an iterable of all Distribution instances capable of
+        loading the metadata for packages matching ``context.name``
+        (or all names if ``None`` indicated) along the paths in the list
+        of directories ``context.path``.
+        """
+        found = cls._search_paths(context.name, context.path)
+        return map(PathDistribution, found)
+
+    @classmethod
+    def _search_paths(cls, name, paths):
+        """Find metadata directories in paths heuristically."""
+        return itertools.chain.from_iterable(
+            path.search(Prepared(name))
+            for path in map(FastPath, paths)
+            )
+
+
+class PathDistribution(Distribution):
+    def __init__(self, path):
+        """Construct a distribution from a path to the metadata directory.
+
+        :param path: A pathlib.Path or similar object supporting
+                     .joinpath(), __div__, .parent, and .read_text().
+        """
+        self._path = path
+
+    def read_text(self, filename):
+        with suppress(FileNotFoundError, IsADirectoryError, KeyError,
+                      NotADirectoryError, PermissionError):
+            return self._path.joinpath(filename).read_text(encoding='utf-8')
+    read_text.__doc__ = Distribution.read_text.__doc__
+
+    def locate_file(self, path):
+        return self._path.parent / path
+
+
+def distribution(distribution_name):
+    """Get the ``Distribution`` instance for the named package.
+
+    :param distribution_name: The name of the distribution package as a string.
+    :return: A ``Distribution`` instance (or subclass thereof).
+    """
+    return Distribution.from_name(distribution_name)
+
+
+def distributions(**kwargs):
+    """Get all ``Distribution`` instances in the current environment.
+
+    :return: An iterable of ``Distribution`` instances.
+    """
+    return Distribution.discover(**kwargs)
+
+
+def metadata(distribution_name):
+    """Get the metadata for the named package.
+
+    :param distribution_name: The name of the distribution package to query.
+    :return: An email.Message containing the parsed metadata.
+    """
+    return Distribution.from_name(distribution_name).metadata
+
+
+def version(distribution_name):
+    """Get the version string for the named package.
+
+    :param distribution_name: The name of the distribution package to query.
+    :return: The version string for the package as defined in the package's
+        "Version" metadata key.
+    """
+    return distribution(distribution_name).version
+
+
+def entry_points():
+    """Return EntryPoint objects for all installed packages.
+
+    :return: EntryPoint objects for all installed packages.
+    """
+    eps = itertools.chain.from_iterable(
+        dist.entry_points for dist in distributions())
+    by_group = operator.attrgetter('group')
+    ordered = sorted(eps, key=by_group)
+    grouped = itertools.groupby(ordered, by_group)
+    return {
+        group: tuple(eps)
+        for group, eps in grouped
+        }
+
+
+def files(distribution_name):
+    """Return a list of files for the named package.
+
+    :param distribution_name: The name of the distribution package to query.
+    :return: List of files composing the distribution.
+    """
+    return distribution(distribution_name).files
+
+
+def requires(distribution_name):
+    """
+    Return a list of requirements for the named package.
+
+    :return: An iterator of requirements, suitable for
+    packaging.requirement.Requirement.
+    """
+    return distribution(distribution_name).requires
diff --git a/common/py3-stdlib/importlib/metadata/__init__.py b/common/py3-stdlib/importlib/metadata/__init__.py
deleted file mode 100644
index b3063cd..0000000
--- a/common/py3-stdlib/importlib/metadata/__init__.py
+++ /dev/null
@@ -1,1045 +0,0 @@
-import os
-import re
-import abc
-import csv
-import sys
-import email
-import pathlib
-import zipfile
-import operator
-import textwrap
-import warnings
-import functools
-import itertools
-import posixpath
-import collections
-
-from . import _adapters, _meta
-from ._meta import PackageMetadata
-from ._collections import FreezableDefaultDict, Pair
-from ._functools import method_cache
-from ._itertools import unique_everseen
-from ._meta import PackageMetadata, SimplePath
-
-from contextlib import suppress
-from importlib import import_module
-from importlib.abc import MetaPathFinder
-from itertools import starmap
-from typing import List, Mapping, Optional, Union
-
-
-__all__ = [
-    'Distribution',
-    'DistributionFinder',
-    'PackageMetadata',
-    'PackageNotFoundError',
-    'distribution',
-    'distributions',
-    'entry_points',
-    'files',
-    'metadata',
-    'packages_distributions',
-    'requires',
-    'version',
-]
-
-
-class PackageNotFoundError(ModuleNotFoundError):
-    """The package was not found."""
-
-    def __str__(self):
-        return f"No package metadata was found for {self.name}"
-
-    @property
-    def name(self):
-        (name,) = self.args
-        return name
-
-
-class Sectioned:
-    """
-    A simple entry point config parser for performance
-
-    >>> for item in Sectioned.read(Sectioned._sample):
-    ...     print(item)
-    Pair(name='sec1', value='# comments ignored')
-    Pair(name='sec1', value='a = 1')
-    Pair(name='sec1', value='b = 2')
-    Pair(name='sec2', value='a = 2')
-
-    >>> res = Sectioned.section_pairs(Sectioned._sample)
-    >>> item = next(res)
-    >>> item.name
-    'sec1'
-    >>> item.value
-    Pair(name='a', value='1')
-    >>> item = next(res)
-    >>> item.value
-    Pair(name='b', value='2')
-    >>> item = next(res)
-    >>> item.name
-    'sec2'
-    >>> item.value
-    Pair(name='a', value='2')
-    >>> list(res)
-    []
-    """
-
-    _sample = textwrap.dedent(
-        """
-        [sec1]
-        # comments ignored
-        a = 1
-        b = 2
-
-        [sec2]
-        a = 2
-        """
-    ).lstrip()
-
-    @classmethod
-    def section_pairs(cls, text):
-        return (
-            section._replace(value=Pair.parse(section.value))
-            for section in cls.read(text, filter_=cls.valid)
-            if section.name is not None
-        )
-
-    @staticmethod
-    def read(text, filter_=None):
-        lines = filter(filter_, map(str.strip, text.splitlines()))
-        name = None
-        for value in lines:
-            section_match = value.startswith('[') and value.endswith(']')
-            if section_match:
-                name = value.strip('[]')
-                continue
-            yield Pair(name, value)
-
-    @staticmethod
-    def valid(line):
-        return line and not line.startswith('#')
-
-
-class EntryPoint(
-        collections.namedtuple('EntryPointBase', 'name value group')):
-    """An entry point as defined by Python packaging conventions.
-
-    See `the packaging docs on entry points
-    <https://packaging.python.org/specifications/entry-points/>`_
-    for more information.
-
-    >>> ep = EntryPoint(
-    ...     name=None, group=None, value='package.module:attr [extra1, extra2]')
-    >>> ep.module
-    'package.module'
-    >>> ep.attr
-    'attr'
-    >>> ep.extras
-    ['extra1', 'extra2']
-    """
-
-    pattern = re.compile(
-        r'(?P<module>[\w.]+)\s*'
-        r'(:\s*(?P<attr>[\w.]+)\s*)?'
-        r'((?P<extras>\[.*\])\s*)?$'
-    )
-    """
-    A regular expression describing the syntax for an entry point,
-    which might look like:
-
-        - module
-        - package.module
-        - package.module:attribute
-        - package.module:object.attribute
-        - package.module:attr [extra1, extra2]
-
-    Other combinations are possible as well.
-
-    The expression is lenient about whitespace around the ':',
-    following the attr, and following any extras.
-    """
-
-    dist: Optional['Distribution'] = None
-
-    def load(self):
-        """Load the entry point from its definition. If only a module
-        is indicated by the value, return that module. Otherwise,
-        return the named object.
-        """
-        match = self.pattern.match(self.value)
-        module = import_module(match.group('module'))
-        attrs = filter(None, (match.group('attr') or '').split('.'))
-        return functools.reduce(getattr, attrs, module)
-
-    @property
-    def module(self):
-        match = self.pattern.match(self.value)
-        return match.group('module')
-
-    @property
-    def attr(self):
-        match = self.pattern.match(self.value)
-        return match.group('attr')
-
-    @property
-    def extras(self):
-        match = self.pattern.match(self.value)
-        return re.findall(r'\w+', match.group('extras') or '')
-
-    def _for(self, dist):
-        self.dist = dist
-        return self
-
-    def __iter__(self):
-        """
-        Supply iter so one may construct dicts of EntryPoints by name.
-        """
-        msg = (
-            "Construction of dict of EntryPoints is deprecated in "
-            "favor of EntryPoints."
-        )
-        warnings.warn(msg, DeprecationWarning)
-        return iter((self.name, self))
-
-    def __reduce__(self):
-        return (
-            self.__class__,
-            (self.name, self.value, self.group),
-        )
-
-    def matches(self, **params):
-        """
-        EntryPoint matches the given parameters.
-
-        >>> ep = EntryPoint(group='foo', name='bar', value='bing:bong [extra1, extra2]')
-        >>> ep.matches(group='foo')
-        True
-        >>> ep.matches(name='bar', value='bing:bong [extra1, extra2]')
-        True
-        >>> ep.matches(group='foo', name='other')
-        False
-        >>> ep.matches()
-        True
-        >>> ep.matches(extras=['extra1', 'extra2'])
-        True
-        >>> ep.matches(module='bing')
-        True
-        >>> ep.matches(attr='bong')
-        True
-        """
-        attrs = (getattr(self, param) for param in params)
-        return all(map(operator.eq, params.values(), attrs))
-
-
-class DeprecatedList(list):
-    """
-    Allow an otherwise immutable object to implement mutability
-    for compatibility.
-
-    >>> recwarn = getfixture('recwarn')
-    >>> dl = DeprecatedList(range(3))
-    >>> dl[0] = 1
-    >>> dl.append(3)
-    >>> del dl[3]
-    >>> dl.reverse()
-    >>> dl.sort()
-    >>> dl.extend([4])
-    >>> dl.pop(-1)
-    4
-    >>> dl.remove(1)
-    >>> dl += [5]
-    >>> dl + [6]
-    [1, 2, 5, 6]
-    >>> dl + (6,)
-    [1, 2, 5, 6]
-    >>> dl.insert(0, 0)
-    >>> dl
-    [0, 1, 2, 5]
-    >>> dl == [0, 1, 2, 5]
-    True
-    >>> dl == (0, 1, 2, 5)
-    True
-    >>> len(recwarn)
-    1
-    """
-
-    __slots__ = ()
-
-    _warn = functools.partial(
-        warnings.warn,
-        "EntryPoints list interface is deprecated. Cast to list if needed.",
-        DeprecationWarning,
-        stacklevel=2,
-    )
-
-    def __setitem__(self, *args, **kwargs):
-        self._warn()
-        return super().__setitem__(*args, **kwargs)
-
-    def __delitem__(self, *args, **kwargs):
-        self._warn()
-        return super().__delitem__(*args, **kwargs)
-
-    def append(self, *args, **kwargs):
-        self._warn()
-        return super().append(*args, **kwargs)
-
-    def reverse(self, *args, **kwargs):
-        self._warn()
-        return super().reverse(*args, **kwargs)
-
-    def extend(self, *args, **kwargs):
-        self._warn()
-        return super().extend(*args, **kwargs)
-
-    def pop(self, *args, **kwargs):
-        self._warn()
-        return super().pop(*args, **kwargs)
-
-    def remove(self, *args, **kwargs):
-        self._warn()
-        return super().remove(*args, **kwargs)
-
-    def __iadd__(self, *args, **kwargs):
-        self._warn()
-        return super().__iadd__(*args, **kwargs)
-
-    def __add__(self, other):
-        if not isinstance(other, tuple):
-            self._warn()
-            other = tuple(other)
-        return self.__class__(tuple(self) + other)
-
-    def insert(self, *args, **kwargs):
-        self._warn()
-        return super().insert(*args, **kwargs)
-
-    def sort(self, *args, **kwargs):
-        self._warn()
-        return super().sort(*args, **kwargs)
-
-    def __eq__(self, other):
-        if not isinstance(other, tuple):
-            self._warn()
-            other = tuple(other)
-
-        return tuple(self).__eq__(other)
-
-
-class EntryPoints(DeprecatedList):
-    """
-    An immutable collection of selectable EntryPoint objects.
-    """
-
-    __slots__ = ()
-
-    def __getitem__(self, name):  # -> EntryPoint:
-        """
-        Get the EntryPoint in self matching name.
-        """
-        if isinstance(name, int):
-            warnings.warn(
-                "Accessing entry points by index is deprecated. "
-                "Cast to tuple if needed.",
-                DeprecationWarning,
-                stacklevel=2,
-            )
-            return super().__getitem__(name)
-        try:
-            return next(iter(self.select(name=name)))
-        except StopIteration:
-            raise KeyError(name)
-
-    def select(self, **params):
-        """
-        Select entry points from self that match the
-        given parameters (typically group and/or name).
-        """
-        return EntryPoints(ep for ep in self if ep.matches(**params))
-
-    @property
-    def names(self):
-        """
-        Return the set of all names of all entry points.
-        """
-        return set(ep.name for ep in self)
-
-    @property
-    def groups(self):
-        """
-        Return the set of all groups of all entry points.
-
-        For coverage while SelectableGroups is present.
-        >>> EntryPoints().groups
-        set()
-        """
-        return set(ep.group for ep in self)
-
-    @classmethod
-    def _from_text_for(cls, text, dist):
-        return cls(ep._for(dist) for ep in cls._from_text(text))
-
-    @classmethod
-    def _from_text(cls, text):
-        return itertools.starmap(EntryPoint, cls._parse_groups(text or ''))
-
-    @staticmethod
-    def _parse_groups(text):
-        return (
-            (item.value.name, item.value.value, item.name)
-            for item in Sectioned.section_pairs(text)
-        )
-
-
-class Deprecated:
-    """
-    Compatibility add-in for mapping to indicate that
-    mapping behavior is deprecated.
-
-    >>> recwarn = getfixture('recwarn')
-    >>> class DeprecatedDict(Deprecated, dict): pass
-    >>> dd = DeprecatedDict(foo='bar')
-    >>> dd.get('baz', None)
-    >>> dd['foo']
-    'bar'
-    >>> list(dd)
-    ['foo']
-    >>> list(dd.keys())
-    ['foo']
-    >>> 'foo' in dd
-    True
-    >>> list(dd.values())
-    ['bar']
-    >>> len(recwarn)
-    1
-    """
-
-    _warn = functools.partial(
-        warnings.warn,
-        "SelectableGroups dict interface is deprecated. Use select.",
-        DeprecationWarning,
-        stacklevel=2,
-    )
-
-    def __getitem__(self, name):
-        self._warn()
-        return super().__getitem__(name)
-
-    def get(self, name, default=None):
-        self._warn()
-        return super().get(name, default)
-
-    def __iter__(self):
-        self._warn()
-        return super().__iter__()
-
-    def __contains__(self, *args):
-        self._warn()
-        return super().__contains__(*args)
-
-    def keys(self):
-        self._warn()
-        return super().keys()
-
-    def values(self):
-        self._warn()
-        return super().values()
-
-
-class SelectableGroups(Deprecated, dict):
-    """
-    A backward- and forward-compatible result from
-    entry_points that fully implements the dict interface.
-    """
-
-    @classmethod
-    def load(cls, eps):
-        by_group = operator.attrgetter('group')
-        ordered = sorted(eps, key=by_group)
-        grouped = itertools.groupby(ordered, by_group)
-        return cls((group, EntryPoints(eps)) for group, eps in grouped)
-
-    @property
-    def _all(self):
-        """
-        Reconstruct a list of all entrypoints from the groups.
-        """
-        groups = super(Deprecated, self).values()
-        return EntryPoints(itertools.chain.from_iterable(groups))
-
-    @property
-    def groups(self):
-        return self._all.groups
-
-    @property
-    def names(self):
-        """
-        for coverage:
-        >>> SelectableGroups().names
-        set()
-        """
-        return self._all.names
-
-    def select(self, **params):
-        if not params:
-            return self
-        return self._all.select(**params)
-
-
-class PackagePath(pathlib.PurePosixPath):
-    """A reference to a path in a package"""
-
-    def read_text(self, encoding='utf-8'):
-        with self.locate().open(encoding=encoding) as stream:
-            return stream.read()
-
-    def read_binary(self):
-        with self.locate().open('rb') as stream:
-            return stream.read()
-
-    def locate(self):
-        """Return a path-like object for this path"""
-        return self.dist.locate_file(self)
-
-
-class FileHash:
-    def __init__(self, spec):
-        self.mode, _, self.value = spec.partition('=')
-
-    def __repr__(self):
-        return f'<FileHash mode: {self.mode} value: {self.value}>'
-
-
-class Distribution:
-    """A Python distribution package."""
-
-    @abc.abstractmethod
-    def read_text(self, filename):
-        """Attempt to load metadata file given by the name.
-
-        :param filename: The name of the file in the distribution info.
-        :return: The text if found, otherwise None.
-        """
-
-    @abc.abstractmethod
-    def locate_file(self, path):
-        """
-        Given a path to a file in this distribution, return a path
-        to it.
-        """
-
-    @classmethod
-    def from_name(cls, name):
-        """Return the Distribution for the given package name.
-
-        :param name: The name of the distribution package to search for.
-        :return: The Distribution instance (or subclass thereof) for the named
-            package, if found.
-        :raises PackageNotFoundError: When the named package's distribution
-            metadata cannot be found.
-        """
-        for resolver in cls._discover_resolvers():
-            dists = resolver(DistributionFinder.Context(name=name))
-            dist = next(iter(dists), None)
-            if dist is not None:
-                return dist
-        else:
-            raise PackageNotFoundError(name)
-
-    @classmethod
-    def discover(cls, **kwargs):
-        """Return an iterable of Distribution objects for all packages.
-
-        Pass a ``context`` or pass keyword arguments for constructing
-        a context.
-
-        :context: A ``DistributionFinder.Context`` object.
-        :return: Iterable of Distribution objects for all packages.
-        """
-        context = kwargs.pop('context', None)
-        if context and kwargs:
-            raise ValueError("cannot accept context and kwargs")
-        context = context or DistributionFinder.Context(**kwargs)
-        return itertools.chain.from_iterable(
-            resolver(context) for resolver in cls._discover_resolvers()
-        )
-
-    @staticmethod
-    def at(path):
-        """Return a Distribution for the indicated metadata path
-
-        :param path: a string or path-like object
-        :return: a concrete Distribution instance for the path
-        """
-        return PathDistribution(pathlib.Path(path))
-
-    @staticmethod
-    def _discover_resolvers():
-        """Search the meta_path for resolvers."""
-        declared = (
-            getattr(finder, 'find_distributions', None) for finder in sys.meta_path
-        )
-        return filter(None, declared)
-
-    @classmethod
-    def _local(cls, root='.'):
-        from pep517 import build, meta
-
-        system = build.compat_system(root)
-        builder = functools.partial(
-            meta.build,
-            source_dir=root,
-            system=system,
-        )
-        return PathDistribution(zipfile.Path(meta.build_as_zip(builder)))
-
-    @property
-    def metadata(self) -> _meta.PackageMetadata:
-        """Return the parsed metadata for this Distribution.
-
-        The returned object will have keys that name the various bits of
-        metadata.  See PEP 566 for details.
-        """
-        text = (
-            self.read_text('METADATA')
-            or self.read_text('PKG-INFO')
-            # This last clause is here to support old egg-info files.  Its
-            # effect is to just end up using the PathDistribution's self._path
-            # (which points to the egg-info file) attribute unchanged.
-            or self.read_text('')
-        )
-        return _adapters.Message(email.message_from_string(text))
-
-    @property
-    def name(self):
-        """Return the 'Name' metadata for the distribution package."""
-        return self.metadata['Name']
-
-    @property
-    def _normalized_name(self):
-        """Return a normalized version of the name."""
-        return Prepared.normalize(self.name)
-
-    @property
-    def version(self):
-        """Return the 'Version' metadata for the distribution package."""
-        return self.metadata['Version']
-
-    @property
-    def entry_points(self):
-        return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
-
-    @property
-    def files(self):
-        """Files in this distribution.
-
-        :return: List of PackagePath for this distribution or None
-
-        Result is `None` if the metadata file that enumerates files
-        (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
-        missing.
-        Result may be empty if the metadata exists but is empty.
-        """
-        file_lines = self._read_files_distinfo() or self._read_files_egginfo()
-
-        def make_file(name, hash=None, size_str=None):
-            result = PackagePath(name)
-            result.hash = FileHash(hash) if hash else None
-            result.size = int(size_str) if size_str else None
-            result.dist = self
-            return result
-
-        return file_lines and list(starmap(make_file, csv.reader(file_lines)))
-
-    def _read_files_distinfo(self):
-        """
-        Read the lines of RECORD
-        """
-        text = self.read_text('RECORD')
-        return text and text.splitlines()
-
-    def _read_files_egginfo(self):
-        """
-        SOURCES.txt might contain literal commas, so wrap each line
-        in quotes.
-        """
-        text = self.read_text('SOURCES.txt')
-        return text and map('"{}"'.format, text.splitlines())
-
-    @property
-    def requires(self):
-        """Generated requirements specified for this Distribution"""
-        reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
-        return reqs and list(reqs)
-
-    def _read_dist_info_reqs(self):
-        return self.metadata.get_all('Requires-Dist')
-
-    def _read_egg_info_reqs(self):
-        source = self.read_text('requires.txt')
-        return None if source is None else self._deps_from_requires_text(source)
-
-    @classmethod
-    def _deps_from_requires_text(cls, source):
-        return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
-
-    @staticmethod
-    def _convert_egg_info_reqs_to_simple_reqs(sections):
-        """
-        Historically, setuptools would solicit and store 'extra'
-        requirements, including those with environment markers,
-        in separate sections. More modern tools expect each
-        dependency to be defined separately, with any relevant
-        extras and environment markers attached directly to that
-        requirement. This method converts the former to the
-        latter. See _test_deps_from_requires_text for an example.
-        """
-
-        def make_condition(name):
-            return name and f'extra == "{name}"'
-
-        def quoted_marker(section):
-            section = section or ''
-            extra, sep, markers = section.partition(':')
-            if extra and markers:
-                markers = f'({markers})'
-            conditions = list(filter(None, [markers, make_condition(extra)]))
-            return '; ' + ' and '.join(conditions) if conditions else ''
-
-        def url_req_space(req):
-            """
-            PEP 508 requires a space between the url_spec and the quoted_marker.
-            Ref python/importlib_metadata#357.
-            """
-            # '@' is uniquely indicative of a url_req.
-            return ' ' * ('@' in req)
-
-        for section in sections:
-            space = url_req_space(section.value)
-            yield section.value + space + quoted_marker(section.name)
-
-
-class DistributionFinder(MetaPathFinder):
-    """
-    A MetaPathFinder capable of discovering installed distributions.
-    """
-
-    class Context:
-        """
-        Keyword arguments presented by the caller to
-        ``distributions()`` or ``Distribution.discover()``
-        to narrow the scope of a search for distributions
-        in all DistributionFinders.
-
-        Each DistributionFinder may expect any parameters
-        and should attempt to honor the canonical
-        parameters defined below when appropriate.
-        """
-
-        name = None
-        """
-        Specific name for which a distribution finder should match.
-        A name of ``None`` matches all distributions.
-        """
-
-        def __init__(self, **kwargs):
-            vars(self).update(kwargs)
-
-        @property
-        def path(self):
-            """
-            The sequence of directory path that a distribution finder
-            should search.
-
-            Typically refers to Python installed package paths such as
-            "site-packages" directories and defaults to ``sys.path``.
-            """
-            return vars(self).get('path', sys.path)
-
-    @abc.abstractmethod
-    def find_distributions(self, context=Context()):
-        """
-        Find distributions.
-
-        Return an iterable of all Distribution instances capable of
-        loading the metadata for packages matching the ``context``,
-        a DistributionFinder.Context instance.
-        """
-
-
-class FastPath:
-    """
-    Micro-optimized class for searching a path for
-    children.
-    """
-
-    @functools.lru_cache()  # type: ignore
-    def __new__(cls, root):
-        return super().__new__(cls)
-
-    def __init__(self, root):
-        self.root = root
-
-    def joinpath(self, child):
-        return pathlib.Path(self.root, child)
-
-    def children(self):
-        with suppress(Exception):
-            return os.listdir(self.root or '.')
-        with suppress(Exception):
-            return self.zip_children()
-        return []
-
-    def zip_children(self):
-        zip_path = zipfile.Path(self.root)
-        names = zip_path.root.namelist()
-        self.joinpath = zip_path.joinpath
-
-        return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names)
-
-    def search(self, name):
-        return self.lookup(self.mtime).search(name)
-
-    @property
-    def mtime(self):
-        with suppress(OSError):
-            return os.stat(self.root).st_mtime
-        self.lookup.cache_clear()
-
-    @method_cache
-    def lookup(self, mtime):
-        return Lookup(self)
-
-
-class Lookup:
-    def __init__(self, path: FastPath):
-        base = os.path.basename(path.root).lower()
-        base_is_egg = base.endswith(".egg")
-        self.infos = FreezableDefaultDict(list)
-        self.eggs = FreezableDefaultDict(list)
-
-        for child in path.children():
-            low = child.lower()
-            if low.endswith((".dist-info", ".egg-info")):
-                # rpartition is faster than splitext and suitable for this purpose.
-                name = low.rpartition(".")[0].partition("-")[0]
-                normalized = Prepared.normalize(name)
-                self.infos[normalized].append(path.joinpath(child))
-            elif base_is_egg and low == "egg-info":
-                name = base.rpartition(".")[0].partition("-")[0]
-                legacy_normalized = Prepared.legacy_normalize(name)
-                self.eggs[legacy_normalized].append(path.joinpath(child))
-
-        self.infos.freeze()
-        self.eggs.freeze()
-
-    def search(self, prepared):
-        infos = (
-            self.infos[prepared.normalized]
-            if prepared
-            else itertools.chain.from_iterable(self.infos.values())
-        )
-        eggs = (
-            self.eggs[prepared.legacy_normalized]
-            if prepared
-            else itertools.chain.from_iterable(self.eggs.values())
-        )
-        return itertools.chain(infos, eggs)
-
-
-class Prepared:
-    """
-    A prepared search for metadata on a possibly-named package.
-    """
-
-    normalized = None
-    legacy_normalized = None
-
-    def __init__(self, name):
-        self.name = name
-        if name is None:
-            return
-        self.normalized = self.normalize(name)
-        self.legacy_normalized = self.legacy_normalize(name)
-
-    @staticmethod
-    def normalize(name):
-        """
-        PEP 503 normalization plus dashes as underscores.
-        """
-        return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_')
-
-    @staticmethod
-    def legacy_normalize(name):
-        """
-        Normalize the package name as found in the convention in
-        older packaging tools versions and specs.
-        """
-        return name.lower().replace('-', '_')
-
-    def __bool__(self):
-        return bool(self.name)
-
-
-class MetadataPathFinder(DistributionFinder):
-    @classmethod
-    def find_distributions(cls, context=DistributionFinder.Context()):
-        """
-        Find distributions.
-
-        Return an iterable of all Distribution instances capable of
-        loading the metadata for packages matching ``context.name``
-        (or all names if ``None`` indicated) along the paths in the list
-        of directories ``context.path``.
-        """
-        found = cls._search_paths(context.name, context.path)
-        return map(PathDistribution, found)
-
-    @classmethod
-    def _search_paths(cls, name, paths):
-        """Find metadata directories in paths heuristically."""
-        prepared = Prepared(name)
-        return itertools.chain.from_iterable(
-            path.search(prepared) for path in map(FastPath, paths)
-        )
-
-    def invalidate_caches(cls):
-        FastPath.__new__.cache_clear()
-
-
-class PathDistribution(Distribution):
-    def __init__(self, path: SimplePath):
-        """Construct a distribution.
-
-        :param path: SimplePath indicating the metadata directory.
-        """
-        self._path = path
-
-    def read_text(self, filename):
-        with suppress(
-            FileNotFoundError,
-            IsADirectoryError,
-            KeyError,
-            NotADirectoryError,
-            PermissionError,
-        ):
-            return self._path.joinpath(filename).read_text(encoding='utf-8')
-
-    read_text.__doc__ = Distribution.read_text.__doc__
-
-    def locate_file(self, path):
-        return self._path.parent / path
-
-    @property
-    def _normalized_name(self):
-        """
-        Performance optimization: where possible, resolve the
-        normalized name from the file system path.
-        """
-        stem = os.path.basename(str(self._path))
-        return self._name_from_stem(stem) or super()._normalized_name
-
-    def _name_from_stem(self, stem):
-        name, ext = os.path.splitext(stem)
-        if ext not in ('.dist-info', '.egg-info'):
-            return
-        name, sep, rest = stem.partition('-')
-        return name
-
-
-def distribution(distribution_name):
-    """Get the ``Distribution`` instance for the named package.
-
-    :param distribution_name: The name of the distribution package as a string.
-    :return: A ``Distribution`` instance (or subclass thereof).
-    """
-    return Distribution.from_name(distribution_name)
-
-
-def distributions(**kwargs):
-    """Get all ``Distribution`` instances in the current environment.
-
-    :return: An iterable of ``Distribution`` instances.
-    """
-    return Distribution.discover(**kwargs)
-
-
-def metadata(distribution_name) -> _meta.PackageMetadata:
-    """Get the metadata for the named package.
-
-    :param distribution_name: The name of the distribution package to query.
-    :return: A PackageMetadata containing the parsed metadata.
-    """
-    return Distribution.from_name(distribution_name).metadata
-
-
-def version(distribution_name):
-    """Get the version string for the named package.
-
-    :param distribution_name: The name of the distribution package to query.
-    :return: The version string for the package as defined in the package's
-        "Version" metadata key.
-    """
-    return distribution(distribution_name).version
-
-
-def entry_points(**params) -> Union[EntryPoints, SelectableGroups]:
-    """Return EntryPoint objects for all installed packages.
-
-    Pass selection parameters (group or name) to filter the
-    result to entry points matching those properties (see
-    EntryPoints.select()).
-
-    For compatibility, returns ``SelectableGroups`` object unless
-    selection parameters are supplied. In the future, this function
-    will return ``EntryPoints`` instead of ``SelectableGroups``
-    even when no selection parameters are supplied.
-
-    For maximum future compatibility, pass selection parameters
-    or invoke ``.select`` with parameters on the result.
-
-    :return: EntryPoints or SelectableGroups for all installed packages.
-    """
-    norm_name = operator.attrgetter('_normalized_name')
-    unique = functools.partial(unique_everseen, key=norm_name)
-    eps = itertools.chain.from_iterable(
-        dist.entry_points for dist in unique(distributions())
-    )
-    return SelectableGroups.load(eps).select(**params)
-
-
-def files(distribution_name):
-    """Return a list of files for the named package.
-
-    :param distribution_name: The name of the distribution package to query.
-    :return: List of files composing the distribution.
-    """
-    return distribution(distribution_name).files
-
-
-def requires(distribution_name):
-    """
-    Return a list of requirements for the named package.
-
-    :return: An iterator of requirements, suitable for
-        packaging.requirement.Requirement.
-    """
-    return distribution(distribution_name).requires
-
-
-def packages_distributions() -> Mapping[str, List[str]]:
-    """
-    Return a mapping of top-level packages to their
-    distributions.
-
-    >>> import collections.abc
-    >>> pkgs = packages_distributions()
-    >>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values())
-    True
-    """
-    pkg_to_dist = collections.defaultdict(list)
-    for dist in distributions():
-        for pkg in (dist.read_text('top_level.txt') or '').split():
-            pkg_to_dist[pkg].append(dist.metadata['Name'])
-    return dict(pkg_to_dist)
diff --git a/common/py3-stdlib/importlib/metadata/_adapters.py b/common/py3-stdlib/importlib/metadata/_adapters.py
deleted file mode 100644
index aa460d3..0000000
--- a/common/py3-stdlib/importlib/metadata/_adapters.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import re
-import textwrap
-import email.message
-
-from ._text import FoldedCase
-
-
-class Message(email.message.Message):
-    multiple_use_keys = set(
-        map(
-            FoldedCase,
-            [
-                'Classifier',
-                'Obsoletes-Dist',
-                'Platform',
-                'Project-URL',
-                'Provides-Dist',
-                'Provides-Extra',
-                'Requires-Dist',
-                'Requires-External',
-                'Supported-Platform',
-                'Dynamic',
-            ],
-        )
-    )
-    """
-    Keys that may be indicated multiple times per PEP 566.
-    """
-
-    def __new__(cls, orig: email.message.Message):
-        res = super().__new__(cls)
-        vars(res).update(vars(orig))
-        return res
-
-    def __init__(self, *args, **kwargs):
-        self._headers = self._repair_headers()
-
-    # suppress spurious error from mypy
-    def __iter__(self):
-        return super().__iter__()
-
-    def _repair_headers(self):
-        def redent(value):
-            "Correct for RFC822 indentation"
-            if not value or '\n' not in value:
-                return value
-            return textwrap.dedent(' ' * 8 + value)
-
-        headers = [(key, redent(value)) for key, value in vars(self)['_headers']]
-        if self._payload:
-            headers.append(('Description', self.get_payload()))
-        return headers
-
-    @property
-    def json(self):
-        """
-        Convert PackageMetadata to a JSON-compatible format
-        per PEP 0566.
-        """
-
-        def transform(key):
-            value = self.get_all(key) if key in self.multiple_use_keys else self[key]
-            if key == 'Keywords':
-                value = re.split(r'\s+', value)
-            tk = key.lower().replace('-', '_')
-            return tk, value
-
-        return dict(map(transform, map(FoldedCase, self)))
diff --git a/common/py3-stdlib/importlib/metadata/_collections.py b/common/py3-stdlib/importlib/metadata/_collections.py
deleted file mode 100644
index cf0954e..0000000
--- a/common/py3-stdlib/importlib/metadata/_collections.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import collections
-
-
-# from jaraco.collections 3.3
-class FreezableDefaultDict(collections.defaultdict):
-    """
-    Often it is desirable to prevent the mutation of
-    a default dict after its initial construction, such
-    as to prevent mutation during iteration.
-
-    >>> dd = FreezableDefaultDict(list)
-    >>> dd[0].append('1')
-    >>> dd.freeze()
-    >>> dd[1]
-    []
-    >>> len(dd)
-    1
-    """
-
-    def __missing__(self, key):
-        return getattr(self, '_frozen', super().__missing__)(key)
-
-    def freeze(self):
-        self._frozen = lambda key: self.default_factory()
-
-
-class Pair(collections.namedtuple('Pair', 'name value')):
-    @classmethod
-    def parse(cls, text):
-        return cls(*map(str.strip, text.split("=", 1)))
diff --git a/common/py3-stdlib/importlib/metadata/_functools.py b/common/py3-stdlib/importlib/metadata/_functools.py
deleted file mode 100644
index 73f50d0..0000000
--- a/common/py3-stdlib/importlib/metadata/_functools.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import types
-import functools
-
-
-# from jaraco.functools 3.3
-def method_cache(method, cache_wrapper=None):
-    """
-    Wrap lru_cache to support storing the cache data in the object instances.
-
-    Abstracts the common paradigm where the method explicitly saves an
-    underscore-prefixed protected property on first call and returns that
-    subsequently.
-
-    >>> class MyClass:
-    ...     calls = 0
-    ...
-    ...     @method_cache
-    ...     def method(self, value):
-    ...         self.calls += 1
-    ...         return value
-
-    >>> a = MyClass()
-    >>> a.method(3)
-    3
-    >>> for x in range(75):
-    ...     res = a.method(x)
-    >>> a.calls
-    75
-
-    Note that the apparent behavior will be exactly like that of lru_cache
-    except that the cache is stored on each instance, so values in one
-    instance will not flush values from another, and when an instance is
-    deleted, so are the cached values for that instance.
-
-    >>> b = MyClass()
-    >>> for x in range(35):
-    ...     res = b.method(x)
-    >>> b.calls
-    35
-    >>> a.method(0)
-    0
-    >>> a.calls
-    75
-
-    Note that if method had been decorated with ``functools.lru_cache()``,
-    a.calls would have been 76 (due to the cached value of 0 having been
-    flushed by the 'b' instance).
-
-    Clear the cache with ``.cache_clear()``
-
-    >>> a.method.cache_clear()
-
-    Same for a method that hasn't yet been called.
-
-    >>> c = MyClass()
-    >>> c.method.cache_clear()
-
-    Another cache wrapper may be supplied:
-
-    >>> cache = functools.lru_cache(maxsize=2)
-    >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
-    >>> a = MyClass()
-    >>> a.method2()
-    3
-
-    Caution - do not subsequently wrap the method with another decorator, such
-    as ``@property``, which changes the semantics of the function.
-
-    See also
-    http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
-    for another implementation and additional justification.
-    """
-    cache_wrapper = cache_wrapper or functools.lru_cache()
-
-    def wrapper(self, *args, **kwargs):
-        # it's the first call, replace the method with a cached, bound method
-        bound_method = types.MethodType(method, self)
-        cached_method = cache_wrapper(bound_method)
-        setattr(self, method.__name__, cached_method)
-        return cached_method(*args, **kwargs)
-
-    # Support cache clear even before cache has been created.
-    wrapper.cache_clear = lambda: None
-
-    return wrapper
diff --git a/common/py3-stdlib/importlib/metadata/_itertools.py b/common/py3-stdlib/importlib/metadata/_itertools.py
deleted file mode 100644
index dd45f2f..0000000
--- a/common/py3-stdlib/importlib/metadata/_itertools.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from itertools import filterfalse
-
-
-def unique_everseen(iterable, key=None):
-    "List unique elements, preserving order. Remember all elements ever seen."
-    # unique_everseen('AAAABBBCCDAABBB') --> A B C D
-    # unique_everseen('ABBCcAD', str.lower) --> A B C D
-    seen = set()
-    seen_add = seen.add
-    if key is None:
-        for element in filterfalse(seen.__contains__, iterable):
-            seen_add(element)
-            yield element
-    else:
-        for element in iterable:
-            k = key(element)
-            if k not in seen:
-                seen_add(k)
-                yield element
diff --git a/common/py3-stdlib/importlib/metadata/_meta.py b/common/py3-stdlib/importlib/metadata/_meta.py
deleted file mode 100644
index 1a6edbf..0000000
--- a/common/py3-stdlib/importlib/metadata/_meta.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from typing import Any, Dict, Iterator, List, Protocol, TypeVar, Union
-
-
-_T = TypeVar("_T")
-
-
-class PackageMetadata(Protocol):
-    def __len__(self) -> int:
-        ...  # pragma: no cover
-
-    def __contains__(self, item: str) -> bool:
-        ...  # pragma: no cover
-
-    def __getitem__(self, key: str) -> str:
-        ...  # pragma: no cover
-
-    def __iter__(self) -> Iterator[str]:
-        ...  # pragma: no cover
-
-    def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
-        """
-        Return all values associated with a possibly multi-valued key.
-        """
-
-    @property
-    def json(self) -> Dict[str, Union[str, List[str]]]:
-        """
-        A JSON-compatible form of the metadata.
-        """
-
-
-class SimplePath(Protocol):
-    """
-    A minimal subset of pathlib.Path required by PathDistribution.
-    """
-
-    def joinpath(self) -> 'SimplePath':
-        ...  # pragma: no cover
-
-    def __div__(self) -> 'SimplePath':
-        ...  # pragma: no cover
-
-    def parent(self) -> 'SimplePath':
-        ...  # pragma: no cover
-
-    def read_text(self) -> str:
-        ...  # pragma: no cover
diff --git a/common/py3-stdlib/importlib/metadata/_text.py b/common/py3-stdlib/importlib/metadata/_text.py
deleted file mode 100644
index 766979d..0000000
--- a/common/py3-stdlib/importlib/metadata/_text.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import re
-
-from ._functools import method_cache
-
-
-# from jaraco.text 3.5
-class FoldedCase(str):
-    """
-    A case insensitive string class; behaves just like str
-    except compares equal when the only variation is case.
-
-    >>> s = FoldedCase('hello world')
-
-    >>> s == 'Hello World'
-    True
-
-    >>> 'Hello World' == s
-    True
-
-    >>> s != 'Hello World'
-    False
-
-    >>> s.index('O')
-    4
-
-    >>> s.split('O')
-    ['hell', ' w', 'rld']
-
-    >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
-    ['alpha', 'Beta', 'GAMMA']
-
-    Sequence membership is straightforward.
-
-    >>> "Hello World" in [s]
-    True
-    >>> s in ["Hello World"]
-    True
-
-    You may test for set inclusion, but candidate and elements
-    must both be folded.
-
-    >>> FoldedCase("Hello World") in {s}
-    True
-    >>> s in {FoldedCase("Hello World")}
-    True
-
-    String inclusion works as long as the FoldedCase object
-    is on the right.
-
-    >>> "hello" in FoldedCase("Hello World")
-    True
-
-    But not if the FoldedCase object is on the left:
-
-    >>> FoldedCase('hello') in 'Hello World'
-    False
-
-    In that case, use in_:
-
-    >>> FoldedCase('hello').in_('Hello World')
-    True
-
-    >>> FoldedCase('hello') > FoldedCase('Hello')
-    False
-    """
-
-    def __lt__(self, other):
-        return self.lower() < other.lower()
-
-    def __gt__(self, other):
-        return self.lower() > other.lower()
-
-    def __eq__(self, other):
-        return self.lower() == other.lower()
-
-    def __ne__(self, other):
-        return self.lower() != other.lower()
-
-    def __hash__(self):
-        return hash(self.lower())
-
-    def __contains__(self, other):
-        return super(FoldedCase, self).lower().__contains__(other.lower())
-
-    def in_(self, other):
-        "Does self appear in other?"
-        return self in FoldedCase(other)
-
-    # cache lower since it's likely to be called frequently.
-    @method_cache
-    def lower(self):
-        return super(FoldedCase, self).lower()
-
-    def index(self, sub):
-        return self.lower().index(sub.lower())
-
-    def split(self, splitter=' ', maxsplit=0):
-        pattern = re.compile(re.escape(splitter), re.I)
-        return pattern.split(self, maxsplit)
diff --git a/common/py3-stdlib/importlib/readers.py b/common/py3-stdlib/importlib/readers.py
deleted file mode 100644
index 41089c0..0000000
--- a/common/py3-stdlib/importlib/readers.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import collections
-import zipfile
-import pathlib
-from . import abc
-
-
-def remove_duplicates(items):
-    return iter(collections.OrderedDict.fromkeys(items))
-
-
-class FileReader(abc.TraversableResources):
-    def __init__(self, loader):
-        self.path = pathlib.Path(loader.path).parent
-
-    def resource_path(self, resource):
-        """
-        Return the file system path to prevent
-        `resources.path()` from creating a temporary
-        copy.
-        """
-        return str(self.path.joinpath(resource))
-
-    def files(self):
-        return self.path
-
-
-class ZipReader(abc.TraversableResources):
-    def __init__(self, loader, module):
-        _, _, name = module.rpartition('.')
-        self.prefix = loader.prefix.replace('\\', '/') + name + '/'
-        self.archive = loader.archive
-
-    def open_resource(self, resource):
-        try:
-            return super().open_resource(resource)
-        except KeyError as exc:
-            raise FileNotFoundError(exc.args[0])
-
-    def is_resource(self, path):
-        # workaround for `zipfile.Path.is_file` returning true
-        # for non-existent paths.
-        target = self.files().joinpath(path)
-        return target.is_file() and target.exists()
-
-    def files(self):
-        return zipfile.Path(self.archive, self.prefix)
-
-
-class MultiplexedPath(abc.Traversable):
-    """
-    Given a series of Traversable objects, implement a merged
-    version of the interface across all objects. Useful for
-    namespace packages which may be multihomed at a single
-    name.
-    """
-
-    def __init__(self, *paths):
-        self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
-        if not self._paths:
-            message = 'MultiplexedPath must contain at least one path'
-            raise FileNotFoundError(message)
-        if not all(path.is_dir() for path in self._paths):
-            raise NotADirectoryError('MultiplexedPath only supports directories')
-
-    def iterdir(self):
-        visited = []
-        for path in self._paths:
-            for file in path.iterdir():
-                if file.name in visited:
-                    continue
-                visited.append(file.name)
-                yield file
-
-    def read_bytes(self):
-        raise FileNotFoundError(f'{self} is not a file')
-
-    def read_text(self, *args, **kwargs):
-        raise FileNotFoundError(f'{self} is not a file')
-
-    def is_dir(self):
-        return True
-
-    def is_file(self):
-        return False
-
-    def joinpath(self, child):
-        # first try to find child in current paths
-        for file in self.iterdir():
-            if file.name == child:
-                return file
-        # if it does not exist, construct it with the first path
-        return self._paths[0] / child
-
-    __truediv__ = joinpath
-
-    def open(self, *args, **kwargs):
-        raise FileNotFoundError(f'{self} is not a file')
-
-    @property
-    def name(self):
-        return self._paths[0].name
-
-    def __repr__(self):
-        paths = ', '.join(f"'{path}'" for path in self._paths)
-        return f'MultiplexedPath({paths})'
-
-
-class NamespaceReader(abc.TraversableResources):
-    def __init__(self, namespace_path):
-        if 'NamespacePath' not in str(namespace_path):
-            raise ValueError('Invalid path')
-        self.path = MultiplexedPath(*list(namespace_path))
-
-    def resource_path(self, resource):
-        """
-        Return the file system path to prevent
-        `resources.path()` from creating a temporary
-        copy.
-        """
-        return str(self.path.joinpath(resource))
-
-    def files(self):
-        return self.path
diff --git a/common/py3-stdlib/importlib/resources.py b/common/py3-stdlib/importlib/resources.py
index 8a98663..b803a01 100644
--- a/common/py3-stdlib/importlib/resources.py
+++ b/common/py3-stdlib/importlib/resources.py
@@ -1,26 +1,22 @@
 import os
-import io
 
+from . import abc as resources_abc
 from . import _common
-from ._common import as_file, files
-from .abc import ResourceReader
-from contextlib import suppress
+from ._common import as_file
+from contextlib import contextmanager, suppress
+from importlib import import_module
 from importlib.abc import ResourceLoader
-from importlib.machinery import ModuleSpec
 from io import BytesIO, TextIOWrapper
 from pathlib import Path
 from types import ModuleType
-from typing import ContextManager, Iterable, Union
+from typing import ContextManager, Iterable, Optional, Union
 from typing import cast
 from typing.io import BinaryIO, TextIO
-from collections.abc import Sequence
-from functools import singledispatch
 
 
 __all__ = [
     'Package',
     'Resource',
-    'ResourceReader',
     'as_file',
     'contents',
     'files',
@@ -30,57 +26,99 @@
     'path',
     'read_binary',
     'read_text',
-]
+    ]
 
 
 Package = Union[str, ModuleType]
 Resource = Union[str, os.PathLike]
 
 
+def _resolve(name) -> ModuleType:
+    """If name is a string, resolve to a module."""
+    if hasattr(name, '__spec__'):
+        return name
+    return import_module(name)
+
+
+def _get_package(package) -> ModuleType:
+    """Take a package name or module object and return the module.
+
+    If a name, the module is imported.  If the resolved module
+    object is not a package, raise an exception.
+    """
+    module = _resolve(package)
+    if module.__spec__.submodule_search_locations is None:
+        raise TypeError('{!r} is not a package'.format(package))
+    return module
+
+
+def _normalize_path(path) -> str:
+    """Normalize a path by ensuring it is a string.
+
+    If the resulting string contains path separators, an exception is raised.
+    """
+    parent, file_name = os.path.split(path)
+    if parent:
+        raise ValueError('{!r} must be only a file name'.format(path))
+    return file_name
+
+
+def _get_resource_reader(
+        package: ModuleType) -> Optional[resources_abc.ResourceReader]:
+    # Return the package's loader if it's a ResourceReader.  We can't use
+    # a issubclass() check here because apparently abc.'s __subclasscheck__()
+    # hook wants to create a weak reference to the object, but
+    # zipimport.zipimporter does not support weak references, resulting in a
+    # TypeError.  That seems terrible.
+    spec = package.__spec__
+    if hasattr(spec.loader, 'get_resource_reader'):
+        return cast(resources_abc.ResourceReader,
+                    spec.loader.get_resource_reader(spec.name))
+    return None
+
+
+def _check_location(package):
+    if package.__spec__.origin is None or not package.__spec__.has_location:
+        raise FileNotFoundError(f'Package has no location {package!r}')
+
+
 def open_binary(package: Package, resource: Resource) -> BinaryIO:
     """Return a file-like object opened for binary reading of the resource."""
-    resource = _common.normalize_path(resource)
-    package = _common.get_package(package)
-    reader = _common.get_resource_reader(package)
+    resource = _normalize_path(resource)
+    package = _get_package(package)
+    reader = _get_resource_reader(package)
     if reader is not None:
         return reader.open_resource(resource)
-    spec = cast(ModuleSpec, package.__spec__)
-    # Using pathlib doesn't work well here due to the lack of 'strict'
-    # argument for pathlib.Path.resolve() prior to Python 3.6.
-    if spec.submodule_search_locations is not None:
-        paths = spec.submodule_search_locations
-    elif spec.origin is not None:
-        paths = [os.path.dirname(os.path.abspath(spec.origin))]
-
-    for package_path in paths:
-        full_path = os.path.join(package_path, resource)
-        try:
-            return open(full_path, mode='rb')
-        except OSError:
-            # Just assume the loader is a resource loader; all the relevant
-            # importlib.machinery loaders are and an AttributeError for
-            # get_data() will make it clear what is needed from the loader.
-            loader = cast(ResourceLoader, spec.loader)
-            data = None
-            if hasattr(spec.loader, 'get_data'):
-                with suppress(OSError):
-                    data = loader.get_data(full_path)
-            if data is not None:
-                return BytesIO(data)
-
-    raise FileNotFoundError(f'{resource!r} resource not found in {spec.name!r}')
+    absolute_package_path = os.path.abspath(
+        package.__spec__.origin or 'non-existent file')
+    package_path = os.path.dirname(absolute_package_path)
+    full_path = os.path.join(package_path, resource)
+    try:
+        return open(full_path, mode='rb')
+    except OSError:
+        # Just assume the loader is a resource loader; all the relevant
+        # importlib.machinery loaders are and an AttributeError for
+        # get_data() will make it clear what is needed from the loader.
+        loader = cast(ResourceLoader, package.__spec__.loader)
+        data = None
+        if hasattr(package.__spec__.loader, 'get_data'):
+            with suppress(OSError):
+                data = loader.get_data(full_path)
+        if data is None:
+            package_name = package.__spec__.name
+            message = '{!r} resource not found in {!r}'.format(
+                resource, package_name)
+            raise FileNotFoundError(message)
+        return BytesIO(data)
 
 
-def open_text(
-    package: Package,
-    resource: Resource,
-    encoding: str = 'utf-8',
-    errors: str = 'strict',
-) -> TextIO:
+def open_text(package: Package,
+              resource: Resource,
+              encoding: str = 'utf-8',
+              errors: str = 'strict') -> TextIO:
     """Return a file-like object opened for text reading of the resource."""
     return TextIOWrapper(
-        open_binary(package, resource), encoding=encoding, errors=errors
-    )
+        open_binary(package, resource), encoding=encoding, errors=errors)
 
 
 def read_binary(package: Package, resource: Resource) -> bytes:
@@ -89,12 +127,10 @@
         return fp.read()
 
 
-def read_text(
-    package: Package,
-    resource: Resource,
-    encoding: str = 'utf-8',
-    errors: str = 'strict',
-) -> str:
+def read_text(package: Package,
+              resource: Resource,
+              encoding: str = 'utf-8',
+              errors: str = 'strict') -> str:
     """Return the decoded string of the resource.
 
     The decoding-related arguments have the same semantics as those of
@@ -104,10 +140,16 @@
         return fp.read()
 
 
+def files(package: Package) -> resources_abc.Traversable:
+    """
+    Get a Traversable resource from a package
+    """
+    return _common.from_package(_get_package(package))
+
+
 def path(
-    package: Package,
-    resource: Resource,
-) -> 'ContextManager[Path]':
+        package: Package, resource: Resource,
+        ) -> 'ContextManager[Path]':
     """A context manager providing a file path object to the resource.
 
     If the resource does not already exist on its own on the file system,
@@ -116,30 +158,23 @@
     raised if the file was deleted prior to the context manager
     exiting).
     """
-    reader = _common.get_resource_reader(_common.get_package(package))
+    reader = _get_resource_reader(_get_package(package))
     return (
-        _path_from_reader(reader, _common.normalize_path(resource))
-        if reader
-        else _common.as_file(
-            _common.files(package).joinpath(_common.normalize_path(resource))
+        _path_from_reader(reader, resource)
+        if reader else
+        _common.as_file(files(package).joinpath(_normalize_path(resource)))
         )
-    )
 
 
+@contextmanager
 def _path_from_reader(reader, resource):
-    return _path_from_resource_path(reader, resource) or _path_from_open_resource(
-        reader, resource
-    )
-
-
-def _path_from_resource_path(reader, resource):
+    norm_resource = _normalize_path(resource)
     with suppress(FileNotFoundError):
-        return Path(reader.resource_path(resource))
-
-
-def _path_from_open_resource(reader, resource):
-    saved = io.BytesIO(reader.open_resource(resource).read())
-    return _common._tempfile(saved.read, suffix=resource)
+        yield Path(reader.resource_path(norm_resource))
+        return
+    opener_reader = reader.open_resource(norm_resource)
+    with _common._tempfile(opener_reader.read, suffix=norm_resource) as res:
+        yield res
 
 
 def is_resource(package: Package, name: str) -> bool:
@@ -147,9 +182,9 @@
 
     Directories are *not* resources.
     """
-    package = _common.get_package(package)
-    _common.normalize_path(name)
-    reader = _common.get_resource_reader(package)
+    package = _get_package(package)
+    _normalize_path(name)
+    reader = _get_resource_reader(package)
     if reader is not None:
         return reader.is_resource(name)
     package_contents = set(contents(package))
@@ -165,21 +200,16 @@
     not considered resources.  Use `is_resource()` on each entry returned here
     to check if it is a resource or not.
     """
-    package = _common.get_package(package)
-    reader = _common.get_resource_reader(package)
+    package = _get_package(package)
+    reader = _get_resource_reader(package)
     if reader is not None:
-        return _ensure_sequence(reader.contents())
-    transversable = _common.from_package(package)
-    if transversable.is_dir():
-        return list(item.name for item in transversable.iterdir())
-    return []
-
-
-@singledispatch
-def _ensure_sequence(iterable):
-    return list(iterable)
-
-
-@_ensure_sequence.register(Sequence)
-def _(iterable):
-    return iterable
+        return reader.contents()
+    # Is the package a namespace package?  By definition, namespace packages
+    # cannot have resources.
+    namespace = (
+        package.__spec__.origin is None or
+        package.__spec__.origin == 'namespace'
+        )
+    if namespace or not package.__spec__.has_location:
+        return ()
+    return list(item.name for item in _common.from_package(package).iterdir())
diff --git a/common/py3-stdlib/importlib/util.py b/common/py3-stdlib/importlib/util.py
index 8623c89..269a6fa 100644
--- a/common/py3-stdlib/importlib/util.py
+++ b/common/py3-stdlib/importlib/util.py
@@ -1,5 +1,5 @@
 """Utility code for constructing importers, etc."""
-from ._abc import Loader
+from . import abc
 from ._bootstrap import module_from_spec
 from ._bootstrap import _resolve_name
 from ._bootstrap import spec_from_loader
@@ -149,8 +149,7 @@
     """
     @functools.wraps(fxn)
     def set_package_wrapper(*args, **kwargs):
-        warnings.warn('The import system now takes care of this automatically; '
-                      'this decorator is slated for removal in Python 3.12',
+        warnings.warn('The import system now takes care of this automatically.',
                       DeprecationWarning, stacklevel=2)
         module = fxn(*args, **kwargs)
         if getattr(module, '__package__', None) is None:
@@ -169,8 +168,7 @@
     """
     @functools.wraps(fxn)
     def set_loader_wrapper(self, *args, **kwargs):
-        warnings.warn('The import system now takes care of this automatically; '
-                      'this decorator is slated for removal in Python 3.12',
+        warnings.warn('The import system now takes care of this automatically.',
                       DeprecationWarning, stacklevel=2)
         module = fxn(self, *args, **kwargs)
         if getattr(module, '__loader__', None) is None:
@@ -197,8 +195,7 @@
     the second argument.
 
     """
-    warnings.warn('The import system now takes care of this automatically; '
-                  'this decorator is slated for removal in Python 3.12',
+    warnings.warn('The import system now takes care of this automatically.',
                   DeprecationWarning, stacklevel=2)
     @functools.wraps(fxn)
     def module_for_loader_wrapper(self, fullname, *args, **kwargs):
@@ -235,6 +232,7 @@
         # Figure out exactly what attributes were mutated between the creation
         # of the module and now.
         attrs_then = self.__spec__.loader_state['__dict__']
+        original_type = self.__spec__.loader_state['__class__']
         attrs_now = self.__dict__
         attrs_updated = {}
         for key, value in attrs_now.items():
@@ -265,7 +263,7 @@
         delattr(self, attr)
 
 
-class LazyLoader(Loader):
+class LazyLoader(abc.Loader):
 
     """A loader that creates a module which defers loading until attribute access."""
 
diff --git a/common/py3-stdlib/inspect.py b/common/py3-stdlib/inspect.py
index c5881cc..18bed90 100644
--- a/common/py3-stdlib/inspect.py
+++ b/common/py3-stdlib/inspect.py
@@ -24,8 +24,6 @@
     stack(), trace() - get info about frames on the stack or in a traceback
 
     signature() - get a Signature object for the callable
-
-    get_annotations() - safely compute an object's annotations
 """
 
 # This module is in the public domain.  No warranties.
@@ -62,122 +60,6 @@
 # See Include/object.h
 TPFLAGS_IS_ABSTRACT = 1 << 20
 
-
-def get_annotations(obj, *, globals=None, locals=None, eval_str=False):
-    """Compute the annotations dict for an object.
-
-    obj may be a callable, class, or module.
-    Passing in an object of any other type raises TypeError.
-
-    Returns a dict.  get_annotations() returns a new dict every time
-    it's called; calling it twice on the same object will return two
-    different but equivalent dicts.
-
-    This function handles several details for you:
-
-      * If eval_str is true, values of type str will
-        be un-stringized using eval().  This is intended
-        for use with stringized annotations
-        ("from __future__ import annotations").
-      * If obj doesn't have an annotations dict, returns an
-        empty dict.  (Functions and methods always have an
-        annotations dict; classes, modules, and other types of
-        callables may not.)
-      * Ignores inherited annotations on classes.  If a class
-        doesn't have its own annotations dict, returns an empty dict.
-      * All accesses to object members and dict values are done
-        using getattr() and dict.get() for safety.
-      * Always, always, always returns a freshly-created dict.
-
-    eval_str controls whether or not values of type str are replaced
-    with the result of calling eval() on those values:
-
-      * If eval_str is true, eval() is called on values of type str.
-      * If eval_str is false (the default), values of type str are unchanged.
-
-    globals and locals are passed in to eval(); see the documentation
-    for eval() for more information.  If either globals or locals is
-    None, this function may replace that value with a context-specific
-    default, contingent on type(obj):
-
-      * If obj is a module, globals defaults to obj.__dict__.
-      * If obj is a class, globals defaults to
-        sys.modules[obj.__module__].__dict__ and locals
-        defaults to the obj class namespace.
-      * If obj is a callable, globals defaults to obj.__globals__,
-        although if obj is a wrapped function (using
-        functools.update_wrapper()) it is first unwrapped.
-    """
-    if isinstance(obj, type):
-        # class
-        obj_dict = getattr(obj, '__dict__', None)
-        if obj_dict and hasattr(obj_dict, 'get'):
-            ann = obj_dict.get('__annotations__', None)
-            if isinstance(ann, types.GetSetDescriptorType):
-                ann = None
-        else:
-            ann = None
-
-        obj_globals = None
-        module_name = getattr(obj, '__module__', None)
-        if module_name:
-            module = sys.modules.get(module_name, None)
-            if module:
-                obj_globals = getattr(module, '__dict__', None)
-        obj_locals = dict(vars(obj))
-        unwrap = obj
-    elif isinstance(obj, types.ModuleType):
-        # module
-        ann = getattr(obj, '__annotations__', None)
-        obj_globals = getattr(obj, '__dict__')
-        obj_locals = None
-        unwrap = None
-    elif callable(obj):
-        # this includes types.Function, types.BuiltinFunctionType,
-        # types.BuiltinMethodType, functools.partial, functools.singledispatch,
-        # "class funclike" from Lib/test/test_inspect... on and on it goes.
-        ann = getattr(obj, '__annotations__', None)
-        obj_globals = getattr(obj, '__globals__', None)
-        obj_locals = None
-        unwrap = obj
-    else:
-        raise TypeError(f"{obj!r} is not a module, class, or callable.")
-
-    if ann is None:
-        return {}
-
-    if not isinstance(ann, dict):
-        raise ValueError(f"{obj!r}.__annotations__ is neither a dict nor None")
-
-    if not ann:
-        return {}
-
-    if not eval_str:
-        return dict(ann)
-
-    if unwrap is not None:
-        while True:
-            if hasattr(unwrap, '__wrapped__'):
-                unwrap = unwrap.__wrapped__
-                continue
-            if isinstance(unwrap, functools.partial):
-                unwrap = unwrap.func
-                continue
-            break
-        if hasattr(unwrap, "__globals__"):
-            obj_globals = unwrap.__globals__
-
-    if globals is None:
-        globals = obj_globals
-    if locals is None:
-        locals = obj_locals
-
-    return_value = {key:
-        value if not isinstance(value, str) else eval(value, globals, locals)
-        for key, value in ann.items() }
-    return return_value
-
-
 # ----------------------------------------------------------- type-checking
 def ismodule(object):
     """Return true if the object is a module.
@@ -395,7 +277,7 @@
         co_kwonlyargcount   number of keyword only arguments (not including ** arg)
         co_lnotab           encoded mapping of line numbers to bytecode indices
         co_name             name with which this code object was defined
-        co_names            tuple of names other than arguments and function locals
+        co_names            tuple of names of local variables
         co_nlocals          number of local variables
         co_stacksize        virtual machine stack space required
         co_varnames         tuple of names of arguments and local variables"""
@@ -525,7 +407,7 @@
     # attribute with the same name as a DynamicClassAttribute exists.
     for base in mro:
         for k, v in base.__dict__.items():
-            if isinstance(v, types.DynamicClassAttribute) and v.fget is not None:
+            if isinstance(v, types.DynamicClassAttribute):
                 names.append(k)
     result = []
     processed = set()
@@ -781,8 +663,6 @@
             module = sys.modules.get(object.__module__)
             if getattr(module, '__file__', None):
                 return module.__file__
-            if object.__module__ == '__main__':
-                raise OSError('source code not available')
         raise TypeError('{!r} is a built-in class'.format(object))
     if ismethod(object):
         object = object.__func__
@@ -826,13 +706,10 @@
     if os.path.exists(filename):
         return filename
     # only return a non-existent filename if the module has a PEP 302 loader
-    module = getmodule(object, filename)
-    if getattr(module, '__loader__', None) is not None:
-        return filename
-    elif getattr(getattr(module, "__spec__", None), "loader", None) is not None:
+    if getattr(getmodule(object, filename), '__loader__', None) is not None:
         return filename
     # or it is in the linecache
-    elif filename in linecache.cache:
+    if filename in linecache.cache:
         return filename
 
 def getabsfile(object, _filename=None):
@@ -859,7 +736,7 @@
     # Try the cache again with the absolute file name
     try:
         file = getabsfile(object, _filename)
-    except (TypeError, FileNotFoundError):
+    except TypeError:
         return None
     if file in modulesbyfile:
         return sys.modules.get(modulesbyfile[file])
@@ -1285,8 +1162,7 @@
         sig = _signature_from_callable(func,
                                        follow_wrapper_chains=False,
                                        skip_bound_arg=False,
-                                       sigcls=Signature,
-                                       eval_str=False)
+                                       sigcls=Signature)
     except Exception as ex:
         # Most of the times 'signature' will raise ValueError.
         # But, it can also raise AttributeError, and, maybe something
@@ -1357,8 +1233,6 @@
 def formatannotation(annotation, base_module=None):
     if getattr(annotation, '__module__', None) == 'typing':
         return repr(annotation).replace('typing.', '')
-    if isinstance(annotation, types.GenericAlias):
-        return str(annotation)
     if isinstance(annotation, type):
         if annotation.__module__ in ('builtins', base_module):
             return annotation.__qualname__
@@ -2021,7 +1895,7 @@
             isinstance(name, str) and
             (defaults is None or isinstance(defaults, tuple)) and
             (kwdefaults is None or isinstance(kwdefaults, dict)) and
-            (isinstance(annotations, (dict)) or annotations is None) )
+            isinstance(annotations, dict))
 
 
 def _signature_get_bound_param(spec):
@@ -2273,8 +2147,7 @@
     return _signature_fromstr(cls, func, s, skip_bound_arg)
 
 
-def _signature_from_function(cls, func, skip_bound_arg=True,
-                             globals=None, locals=None, eval_str=False):
+def _signature_from_function(cls, func, skip_bound_arg=True):
     """Private helper: constructs Signature for the given python function."""
 
     is_duck_function = False
@@ -2300,7 +2173,7 @@
     positional = arg_names[:pos_count]
     keyword_only_count = func_code.co_kwonlyargcount
     keyword_only = arg_names[pos_count:pos_count + keyword_only_count]
-    annotations = get_annotations(func, globals=globals, locals=locals, eval_str=eval_str)
+    annotations = func.__annotations__
     defaults = func.__defaults__
     kwdefaults = func.__kwdefaults__
 
@@ -2371,30 +2244,23 @@
 def _signature_from_callable(obj, *,
                              follow_wrapper_chains=True,
                              skip_bound_arg=True,
-                             globals=None,
-                             locals=None,
-                             eval_str=False,
                              sigcls):
 
     """Private helper function to get signature for arbitrary
     callable objects.
     """
 
-    _get_signature_of = functools.partial(_signature_from_callable,
-                                follow_wrapper_chains=follow_wrapper_chains,
-                                skip_bound_arg=skip_bound_arg,
-                                globals=globals,
-                                locals=locals,
-                                sigcls=sigcls,
-                                eval_str=eval_str)
-
     if not callable(obj):
         raise TypeError('{!r} is not a callable object'.format(obj))
 
     if isinstance(obj, types.MethodType):
         # In this case we skip the first parameter of the underlying
         # function (usually `self` or `cls`).
-        sig = _get_signature_of(obj.__func__)
+        sig = _signature_from_callable(
+            obj.__func__,
+            follow_wrapper_chains=follow_wrapper_chains,
+            skip_bound_arg=skip_bound_arg,
+            sigcls=sigcls)
 
         if skip_bound_arg:
             return _signature_bound_method(sig)
@@ -2408,7 +2274,11 @@
             # If the unwrapped object is a *method*, we might want to
             # skip its first parameter (self).
             # See test_signature_wrapped_bound_method for details.
-            return _get_signature_of(obj)
+            return _signature_from_callable(
+                obj,
+                follow_wrapper_chains=follow_wrapper_chains,
+                skip_bound_arg=skip_bound_arg,
+                sigcls=sigcls)
 
     try:
         sig = obj.__signature__
@@ -2435,7 +2305,11 @@
             # (usually `self`, or `cls`) will not be passed
             # automatically (as for boundmethods)
 
-            wrapped_sig = _get_signature_of(partialmethod.func)
+            wrapped_sig = _signature_from_callable(
+                partialmethod.func,
+                follow_wrapper_chains=follow_wrapper_chains,
+                skip_bound_arg=skip_bound_arg,
+                sigcls=sigcls)
 
             sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))
             first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]
@@ -2454,15 +2328,18 @@
         # If it's a pure Python function, or an object that is duck type
         # of a Python function (Cython functions, for instance), then:
         return _signature_from_function(sigcls, obj,
-                                        skip_bound_arg=skip_bound_arg,
-                                        globals=globals, locals=locals, eval_str=eval_str)
+                                        skip_bound_arg=skip_bound_arg)
 
     if _signature_is_builtin(obj):
         return _signature_from_builtin(sigcls, obj,
                                        skip_bound_arg=skip_bound_arg)
 
     if isinstance(obj, functools.partial):
-        wrapped_sig = _get_signature_of(obj.func)
+        wrapped_sig = _signature_from_callable(
+            obj.func,
+            follow_wrapper_chains=follow_wrapper_chains,
+            skip_bound_arg=skip_bound_arg,
+            sigcls=sigcls)
         return _signature_get_partial(wrapped_sig, obj)
 
     sig = None
@@ -2473,25 +2350,29 @@
         # in its metaclass
         call = _signature_get_user_defined_method(type(obj), '__call__')
         if call is not None:
-            sig = _get_signature_of(call)
+            sig = _signature_from_callable(
+                call,
+                follow_wrapper_chains=follow_wrapper_chains,
+                skip_bound_arg=skip_bound_arg,
+                sigcls=sigcls)
         else:
-            factory_method = None
+            # Now we check if the 'obj' class has a '__new__' method
             new = _signature_get_user_defined_method(obj, '__new__')
-            init = _signature_get_user_defined_method(obj, '__init__')
-            # Now we check if the 'obj' class has an own '__new__' method
-            if '__new__' in obj.__dict__:
-                factory_method = new
-            # or an own '__init__' method
-            elif '__init__' in obj.__dict__:
-                factory_method = init
-            # If not, we take inherited '__new__' or '__init__', if present
-            elif new is not None:
-                factory_method = new
-            elif init is not None:
-                factory_method = init
-
-            if factory_method is not None:
-                sig = _get_signature_of(factory_method)
+            if new is not None:
+                sig = _signature_from_callable(
+                    new,
+                    follow_wrapper_chains=follow_wrapper_chains,
+                    skip_bound_arg=skip_bound_arg,
+                    sigcls=sigcls)
+            else:
+                # Finally, we should have at least __init__ implemented
+                init = _signature_get_user_defined_method(obj, '__init__')
+                if init is not None:
+                    sig = _signature_from_callable(
+                        init,
+                        follow_wrapper_chains=follow_wrapper_chains,
+                        skip_bound_arg=skip_bound_arg,
+                        sigcls=sigcls)
 
         if sig is None:
             # At this point we know, that `obj` is a class, with no user-
@@ -2511,9 +2392,9 @@
                     pass
                 else:
                     if text_sig:
-                        # If 'base' class has a __text_signature__ attribute:
+                        # If 'obj' class has a __text_signature__ attribute:
                         # return a signature based on it
-                        return _signature_fromstr(sigcls, base, text_sig)
+                        return _signature_fromstr(sigcls, obj, text_sig)
 
             # No '__text_signature__' was found for the 'obj' class.
             # Last option is to check if its '__init__' is
@@ -2537,7 +2418,11 @@
         call = _signature_get_user_defined_method(type(obj), '__call__')
         if call is not None:
             try:
-                sig = _get_signature_of(call)
+                sig = _signature_from_callable(
+                    call,
+                    follow_wrapper_chains=follow_wrapper_chains,
+                    skip_bound_arg=skip_bound_arg,
+                    sigcls=sigcls)
             except ValueError as ex:
                 msg = 'no signature found for {!r}'.format(obj)
                 raise ValueError(msg) from ex
@@ -2989,12 +2874,10 @@
         return _signature_from_builtin(cls, func)
 
     @classmethod
-    def from_callable(cls, obj, *,
-                      follow_wrapped=True, globals=None, locals=None, eval_str=False):
+    def from_callable(cls, obj, *, follow_wrapped=True):
         """Constructs Signature for the given callable object."""
         return _signature_from_callable(obj, sigcls=cls,
-                                        follow_wrapper_chains=follow_wrapped,
-                                        globals=globals, locals=locals, eval_str=eval_str)
+                                        follow_wrapper_chains=follow_wrapped)
 
     @property
     def parameters(self):
@@ -3242,10 +3125,9 @@
         return rendered
 
 
-def signature(obj, *, follow_wrapped=True, globals=None, locals=None, eval_str=False):
+def signature(obj, *, follow_wrapped=True):
     """Get a signature object for the passed callable."""
-    return Signature.from_callable(obj, follow_wrapped=follow_wrapped,
-                                   globals=globals, locals=locals, eval_str=eval_str)
+    return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
 
 
 def _main():
diff --git a/common/py3-stdlib/io.py b/common/py3-stdlib/io.py
index 2a6140c..fbce6ef 100644
--- a/common/py3-stdlib/io.py
+++ b/common/py3-stdlib/io.py
@@ -54,24 +54,9 @@
 from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation,
                  open, open_code, FileIO, BytesIO, StringIO, BufferedReader,
                  BufferedWriter, BufferedRWPair, BufferedRandom,
-                 IncrementalNewlineDecoder, text_encoding, TextIOWrapper)
+                 IncrementalNewlineDecoder, TextIOWrapper)
 
-
-def __getattr__(name):
-    if name == "OpenWrapper":
-        # bpo-43680: Until Python 3.9, _pyio.open was not a static method and
-        # builtins.open was set to OpenWrapper to not become a bound method
-        # when set to a class variable. _io.open is a built-in function whereas
-        # _pyio.open is a Python function. In Python 3.10, _pyio.open() is now
-        # a static method, and builtins.open() is now io.open().
-        import warnings
-        warnings.warn('OpenWrapper is deprecated, use open instead',
-                      DeprecationWarning, stacklevel=2)
-        global OpenWrapper
-        OpenWrapper = open
-        return OpenWrapper
-    raise AttributeError(name)
-
+OpenWrapper = _io.open # for compatibility with _pyio
 
 # Pretend this exception was created here.
 UnsupportedOperation.__module__ = "io"
diff --git a/common/py3-stdlib/ipaddress.py b/common/py3-stdlib/ipaddress.py
index 4a6496a..bc662c4 100644
--- a/common/py3-stdlib/ipaddress.py
+++ b/common/py3-stdlib/ipaddress.py
@@ -16,7 +16,6 @@
 IPV4LENGTH = 32
 IPV6LENGTH = 128
 
-
 class AddressValueError(ValueError):
     """A Value Error related to the address."""
 
@@ -1215,7 +1214,7 @@
         """
         if not octet_str:
             raise ValueError("Empty octet not permitted")
-        # Reject non-ASCII digits.
+        # Whitelist the characters, since int() allows a lot of bizarre stuff.
         if not (octet_str.isascii() and octet_str.isdigit()):
             msg = "Only decimal digits permitted in %r"
             raise ValueError(msg % octet_str)
@@ -1224,11 +1223,6 @@
         if len(octet_str) > 3:
             msg = "At most 3 characters permitted in %r"
             raise ValueError(msg % octet_str)
-        # Handle leading zeros as strict as glibc's inet_pton()
-        # See security bug bpo-36384
-        if octet_str != '0' and octet_str[0] == '0':
-            msg = "Leading zeros are not permitted in %r"
-            raise ValueError(msg % octet_str)
         # Convert to integer (we know digits are legal)
         octet_int = int(octet_str, 10)
         if octet_int > 255:
@@ -1725,7 +1719,7 @@
               [0..FFFF].
 
         """
-        # Reject non-ASCII digits.
+        # Whitelist the characters, since int() allows a lot of bizarre stuff.
         if not cls._HEX_DIGITS.issuperset(hextet_str):
             raise ValueError("Only hex digits permitted in %r" % hextet_str)
         # We do the length check second, since the invalid character error
@@ -2003,13 +1997,9 @@
 
         Returns:
             A boolean, True if the address is reserved per
-            iana-ipv6-special-registry, or is ipv4_mapped and is
-            reserved in the iana-ipv4-special-registry.
+            iana-ipv6-special-registry.
 
         """
-        ipv4_mapped = self.ipv4_mapped
-        if ipv4_mapped is not None:
-            return ipv4_mapped.is_private
         return any(self in net for net in self._constants._private_networks)
 
     @property
diff --git a/common/py3-stdlib/json/__init__.py b/common/py3-stdlib/json/__init__.py
index e4c21da..2c52bde 100644
--- a/common/py3-stdlib/json/__init__.py
+++ b/common/py3-stdlib/json/__init__.py
@@ -133,7 +133,7 @@
 
     If ``check_circular`` is false, then the circular reference check
     for container types will be skipped and a circular reference will
-    result in an ``RecursionError`` (or worse).
+    result in an ``OverflowError`` (or worse).
 
     If ``allow_nan`` is false, then it will be a ``ValueError`` to
     serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
@@ -195,7 +195,7 @@
 
     If ``check_circular`` is false, then the circular reference check
     for container types will be skipped and a circular reference will
-    result in an ``RecursionError`` (or worse).
+    result in an ``OverflowError`` (or worse).
 
     If ``allow_nan`` is false, then it will be a ``ValueError`` to
     serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
diff --git a/common/py3-stdlib/json/encoder.py b/common/py3-stdlib/json/encoder.py
index 21bff2c..c8c78b9 100644
--- a/common/py3-stdlib/json/encoder.py
+++ b/common/py3-stdlib/json/encoder.py
@@ -116,7 +116,7 @@
 
         If check_circular is true, then lists, dicts, and custom encoded
         objects will be checked for circular references during encoding to
-        prevent an infinite recursion (which would cause an RecursionError).
+        prevent an infinite recursion (which would cause an OverflowError).
         Otherwise, no such check takes place.
 
         If allow_nan is true, then NaN, Infinity, and -Infinity will be
diff --git a/common/py3-stdlib/json/tool.py b/common/py3-stdlib/json/tool.py
index 0490b8c..5dee0a7 100644
--- a/common/py3-stdlib/json/tool.py
+++ b/common/py3-stdlib/json/tool.py
@@ -13,7 +13,6 @@
 import argparse
 import json
 import sys
-from pathlib import Path
 
 
 def main():
@@ -26,9 +25,9 @@
                         help='a JSON file to be validated or pretty-printed',
                         default=sys.stdin)
     parser.add_argument('outfile', nargs='?',
-                        type=Path,
+                        type=argparse.FileType('w', encoding="utf-8"),
                         help='write the output of infile to outfile',
-                        default=None)
+                        default=sys.stdout)
     parser.add_argument('--sort-keys', action='store_true', default=False,
                         help='sort the output of dictionaries alphabetically by key')
     parser.add_argument('--no-ensure-ascii', dest='ensure_ascii', action='store_false',
@@ -59,21 +58,15 @@
         dump_args['indent'] = None
         dump_args['separators'] = ',', ':'
 
-    with options.infile as infile:
+    with options.infile as infile, options.outfile as outfile:
         try:
             if options.json_lines:
                 objs = (json.loads(line) for line in infile)
             else:
-                objs = (json.load(infile),)
-
-            if options.outfile is None:
-                out = sys.stdout
-            else:
-                out = options.outfile.open('w', encoding='utf-8')
-            with out as outfile:
-                for obj in objs:
-                    json.dump(obj, outfile, **dump_args)
-                    outfile.write('\n')
+                objs = (json.load(infile), )
+            for obj in objs:
+                json.dump(obj, outfile, **dump_args)
+                outfile.write('\n')
         except ValueError as e:
             raise SystemExit(e)
 
diff --git a/common/py3-stdlib/keyword.py b/common/py3-stdlib/keyword.py
index cc2b46b..59fcfb0 100644
--- a/common/py3-stdlib/keyword.py
+++ b/common/py3-stdlib/keyword.py
@@ -6,7 +6,7 @@
 the python source tree and run:
 
     PYTHONPATH=Tools/peg_generator python3 -m pegen.keywordgen \
-        Grammar/python.gram \
+        Grammar/Grammar \
         Grammar/Tokens \
         Lib/keyword.py
 
@@ -19,6 +19,7 @@
     'False',
     'None',
     'True',
+    '__peg_parser__',
     'and',
     'as',
     'assert',
@@ -54,9 +55,7 @@
 ]
 
 softkwlist = [
-    '_',
-    'case',
-    'match'
+
 ]
 
 iskeyword = frozenset(kwlist).__contains__
diff --git a/common/py3-stdlib/linecache.py b/common/py3-stdlib/linecache.py
index 23191d6..fa5dbd0 100644
--- a/common/py3-stdlib/linecache.py
+++ b/common/py3-stdlib/linecache.py
@@ -154,7 +154,7 @@
 
     :return: True if a lazy load is registered in the cache,
         otherwise False. To register such a load a module loader with a
-        get_source method must be found, the filename must be a cacheable
+        get_source method must be found, the filename must be a cachable
         filename, and the filename must not be already cached.
     """
     if filename in cache:
@@ -165,14 +165,9 @@
     if not filename or (filename.startswith('<') and filename.endswith('>')):
         return False
     # Try for a __loader__, if available
-    if module_globals and '__name__' in module_globals:
-        name = module_globals['__name__']
-        if (loader := module_globals.get('__loader__')) is None:
-            if spec := module_globals.get('__spec__'):
-                try:
-                    loader = spec.loader
-                except AttributeError:
-                    pass
+    if module_globals and '__loader__' in module_globals:
+        name = module_globals.get('__name__')
+        loader = module_globals['__loader__']
         get_source = getattr(loader, 'get_source', None)
 
         if name and get_source:
diff --git a/common/py3-stdlib/locale.py b/common/py3-stdlib/locale.py
index 6d4f519..1a4e9f6 100644
--- a/common/py3-stdlib/locale.py
+++ b/common/py3-stdlib/locale.py
@@ -185,14 +185,8 @@
         formatted = percent % ((value,) + additional)
     else:
         formatted = percent % value
-    if percent[-1] in 'eEfFgGdiu':
-        formatted = _localize(formatted, grouping, monetary)
-    return formatted
-
-# Transform formatted as locale number according to the locale settings
-def _localize(formatted, grouping=False, monetary=False):
     # floats and decimal ints need special action!
-    if '.' in formatted:
+    if percent[-1] in 'eEfFgG':
         seps = 0
         parts = formatted.split('.')
         if grouping:
@@ -202,7 +196,7 @@
         formatted = decimal_point.join(parts)
         if seps:
             formatted = _strip_padding(formatted, seps)
-    else:
+    elif percent[-1] in 'diu':
         seps = 0
         if grouping:
             formatted, seps = _group(formatted, monetary=monetary)
@@ -273,7 +267,7 @@
         raise ValueError("Currency formatting is not possible using "
                          "the 'C' locale.")
 
-    s = _localize(f'{abs(val):.{digits}f}', grouping, monetary=True)
+    s = _format('%%.%if' % digits, abs(val), grouping, monetary=True)
     # '<' and '>' are markers if the sign must be inserted between symbol and value
     s = '<' + s + '>'
 
@@ -329,10 +323,6 @@
         string = string.replace(dd, '.')
     return string
 
-def localize(string, grouping=False, monetary=False):
-    """Parses a string as locale number according to the locale settings."""
-    return _localize(string, grouping, monetary)
-
 def atof(string, func=float):
     "Parses a string as a float according to the locale settings."
     return func(delocalize(string))
@@ -629,49 +619,53 @@
     """
     _setlocale(category, _build_localename(getdefaultlocale()))
 
-
-try:
-    from _locale import _get_locale_encoding
-except ImportError:
-    def _get_locale_encoding():
+if sys.platform.startswith("win"):
+    # On Win32, this will return the ANSI code page
+    def getpreferredencoding(do_setlocale = True):
+        """Return the charset that the user is likely using."""
+        if sys.flags.utf8_mode:
+            return 'UTF-8'
+        import _bootlocale
+        return _bootlocale.getpreferredencoding(False)
+else:
+    # On Unix, if CODESET is available, use that.
+    try:
+        CODESET
+    except NameError:
         if hasattr(sys, 'getandroidapilevel'):
             # On Android langinfo.h and CODESET are missing, and UTF-8 is
             # always used in mbstowcs() and wcstombs().
-            return 'UTF-8'
-        if sys.flags.utf8_mode:
-            return 'UTF-8'
-        encoding = getdefaultlocale()[1]
-        if encoding is None:
-            # LANG not set, default conservatively to ASCII
-            encoding = 'ascii'
-        return encoding
-
-try:
-    CODESET
-except NameError:
-    def getpreferredencoding(do_setlocale=True):
-        """Return the charset that the user is likely using."""
-        return _get_locale_encoding()
-else:
-    # On Unix, if CODESET is available, use that.
-    def getpreferredencoding(do_setlocale=True):
-        """Return the charset that the user is likely using,
-        according to the system configuration."""
-        if sys.flags.utf8_mode:
-            return 'UTF-8'
-
-        if not do_setlocale:
-            return _get_locale_encoding()
-
-        old_loc = setlocale(LC_CTYPE)
-        try:
-            try:
-                setlocale(LC_CTYPE, "")
-            except Error:
-                pass
-            return _get_locale_encoding()
-        finally:
-            setlocale(LC_CTYPE, old_loc)
+            def getpreferredencoding(do_setlocale = True):
+                return 'UTF-8'
+        else:
+            # Fall back to parsing environment variables :-(
+            def getpreferredencoding(do_setlocale = True):
+                """Return the charset that the user is likely using,
+                by looking at environment variables."""
+                if sys.flags.utf8_mode:
+                    return 'UTF-8'
+                res = getdefaultlocale()[1]
+                if res is None:
+                    # LANG not set, default conservatively to ASCII
+                    res = 'ascii'
+                return res
+    else:
+        def getpreferredencoding(do_setlocale = True):
+            """Return the charset that the user is likely using,
+            according to the system configuration."""
+            if sys.flags.utf8_mode:
+                return 'UTF-8'
+            import _bootlocale
+            if do_setlocale:
+                oldloc = setlocale(LC_CTYPE)
+                try:
+                    setlocale(LC_CTYPE, "")
+                except Error:
+                    pass
+            result = _bootlocale.getpreferredencoding(False)
+            if do_setlocale:
+                setlocale(LC_CTYPE, oldloc)
+            return result
 
 
 ### Database
diff --git a/common/py3-stdlib/logging/__init__.py b/common/py3-stdlib/logging/__init__.py
index 19bd2bc..7b169a1 100644
--- a/common/py3-stdlib/logging/__init__.py
+++ b/common/py3-stdlib/logging/__init__.py
@@ -118,7 +118,7 @@
 
 def getLevelName(level):
     """
-    Return the textual or numeric representation of logging level 'level'.
+    Return the textual representation of logging level 'level'.
 
     If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
     INFO, DEBUG) then you get the corresponding string. If you have
@@ -128,11 +128,7 @@
     If a numeric value corresponding to one of the defined levels is passed
     in, the corresponding string representation is returned.
 
-    If a string representation of the level is passed in, the corresponding
-    numeric value is returned.
-
-    If no matching numeric or string value is passed in, the string
-    'Level %s' % level is returned.
+    Otherwise, the string "Level %s" % level is returned.
     """
     # See Issues #22386, #27937 and #29220 for why it's this way
     result = _levelToName.get(level)
@@ -198,8 +194,7 @@
             raise ValueError("Unknown level: %r" % level)
         rv = _nameToLevel[level]
     else:
-        raise TypeError("Level not an integer or a valid string: %r"
-                        % (level,))
+        raise TypeError("Level not an integer or a valid string: %r" % level)
     return rv
 
 #---------------------------------------------------------------------------
@@ -416,9 +411,8 @@
     asctime_search = '%(asctime)'
     validation_pattern = re.compile(r'%\(\w+\)[#0+ -]*(\*|\d+)?(\.(\*|\d+))?[diouxefgcrsa%]', re.I)
 
-    def __init__(self, fmt, *, defaults=None):
+    def __init__(self, fmt):
         self._fmt = fmt or self.default_format
-        self._defaults = defaults
 
     def usesTime(self):
         return self._fmt.find(self.asctime_search) >= 0
@@ -429,11 +423,7 @@
             raise ValueError("Invalid format '%s' for '%s' style" % (self._fmt, self.default_format[0]))
 
     def _format(self, record):
-        if defaults := self._defaults:
-            values = defaults | record.__dict__
-        else:
-            values = record.__dict__
-        return self._fmt % values
+        return self._fmt % record.__dict__
 
     def format(self, record):
         try:
@@ -451,11 +441,7 @@
     field_spec = re.compile(r'^(\d+|\w+)(\.\w+|\[[^]]+\])*$')
 
     def _format(self, record):
-        if defaults := self._defaults:
-            values = defaults | record.__dict__
-        else:
-            values = record.__dict__
-        return self._fmt.format(**values)
+        return self._fmt.format(**record.__dict__)
 
     def validate(self):
         """Validate the input format, ensure it is the correct string formatting style"""
@@ -481,8 +467,8 @@
     asctime_format = '${asctime}'
     asctime_search = '${asctime}'
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
+    def __init__(self, fmt):
+        self._fmt = fmt or self.default_format
         self._tpl = Template(self._fmt)
 
     def usesTime(self):
@@ -504,11 +490,7 @@
             raise ValueError('invalid format: no fields')
 
     def _format(self, record):
-        if defaults := self._defaults:
-            values = defaults | record.__dict__
-        else:
-            values = record.__dict__
-        return self._tpl.substitute(**values)
+        return self._tpl.substitute(**record.__dict__)
 
 
 BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
@@ -564,8 +546,7 @@
 
     converter = time.localtime
 
-    def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *,
-                 defaults=None):
+    def __init__(self, fmt=None, datefmt=None, style='%', validate=True):
         """
         Initialize the formatter with specified format strings.
 
@@ -584,7 +565,7 @@
         if style not in _STYLES:
             raise ValueError('Style must be one of: %s' % ','.join(
                              _STYLES.keys()))
-        self._style = _STYLES[style][0](fmt, defaults=defaults)
+        self._style = _STYLES[style][0](fmt)
         if validate:
             self._style.validate()
 
@@ -878,7 +859,6 @@
         self._name = None
         self.level = _checkLevel(level)
         self.formatter = None
-        self._closed = False
         # Add the handler to the global _handlerList (for cleanup on shutdown)
         _addHandlerRef(self)
         self.createLock()
@@ -997,7 +977,6 @@
         #get the module data lock, as we're updating a shared structure.
         _acquireLock()
         try:    #unlikely to raise an exception, but you never know...
-            self._closed = True
             if self._name and self._name in _handlers:
                 del _handlers[self._name]
         finally:
@@ -1152,14 +1131,8 @@
         self.baseFilename = os.path.abspath(filename)
         self.mode = mode
         self.encoding = encoding
-        if "b" not in mode:
-            self.encoding = io.text_encoding(encoding)
         self.errors = errors
         self.delay = delay
-        # bpo-26789: FileHandler keeps a reference to the builtin open()
-        # function to be able to open or reopen the file during Python
-        # finalization.
-        self._builtin_open = open
         if delay:
             #We don't open the stream, but we still need to call the
             #Handler constructor to set level, formatter, lock etc.
@@ -1186,8 +1159,6 @@
             finally:
                 # Issue #19523: call unconditionally to
                 # prevent a handler leak when delay is set
-                # Also see Issue #42378: we also rely on
-                # self._closed being set to True there
                 StreamHandler.close(self)
         finally:
             self.release()
@@ -1197,9 +1168,8 @@
         Open the current base file with the (original) mode and encoding.
         Return the resulting stream.
         """
-        open_func = self._builtin_open
-        return open_func(self.baseFilename, self.mode,
-                         encoding=self.encoding, errors=self.errors)
+        return open(self.baseFilename, self.mode, encoding=self.encoding,
+                    errors=self.errors)
 
     def emit(self, record):
         """
@@ -1207,15 +1177,10 @@
 
         If the stream was not opened because 'delay' was specified in the
         constructor, open it before calling the superclass's emit.
-
-        If stream is not open, current mode is 'w' and `_closed=True`, record
-        will not be emitted (see Issue #42378).
         """
         if self.stream is None:
-            if self.mode != 'w' or not self._closed:
-                self.stream = self._open()
-        if self.stream:
-            StreamHandler.emit(self, record)
+            self.stream = self._open()
+        StreamHandler.emit(self, record)
 
     def __repr__(self):
         level = getLevelName(self.level)
@@ -1304,14 +1269,6 @@
         self.loggerClass = None
         self.logRecordFactory = None
 
-    @property
-    def disable(self):
-        return self._disable
-
-    @disable.setter
-    def disable(self, value):
-        self._disable = _checkLevel(value)
-
     def getLogger(self, name):
         """
         Get a logger with the specified name (channel name), creating it
@@ -1523,11 +1480,7 @@
         if self.isEnabledFor(CRITICAL):
             self._log(CRITICAL, msg, args, **kwargs)
 
-    def fatal(self, msg, *args, **kwargs):
-        """
-        Don't use this method, use critical() instead.
-        """
-        self.critical(msg, *args, **kwargs)
+    fatal = critical
 
     def log(self, level, msg, *args, **kwargs):
         """
@@ -1798,7 +1751,7 @@
     information in logging output.
     """
 
-    def __init__(self, logger, extra=None):
+    def __init__(self, logger, extra):
         """
         Initialize the adapter with a logger and a dict-like object which
         provides contextual information. This constructor signature allows
@@ -2033,10 +1986,8 @@
                 filename = kwargs.pop("filename", None)
                 mode = kwargs.pop("filemode", 'a')
                 if filename:
-                    if 'b' in mode:
+                    if 'b'in mode:
                         errors = None
-                    else:
-                        encoding = io.text_encoding(encoding)
                     h = FileHandler(filename, mode,
                                     encoding=encoding, errors=errors)
                 else:
@@ -2088,11 +2039,7 @@
         basicConfig()
     root.critical(msg, *args, **kwargs)
 
-def fatal(msg, *args, **kwargs):
-    """
-    Don't use this function, use critical() instead.
-    """
-    critical(msg, *args, **kwargs)
+fatal = critical
 
 def error(msg, *args, **kwargs):
     """
diff --git a/common/py3-stdlib/logging/config.py b/common/py3-stdlib/logging/config.py
index 3bc63b7..fd3aded 100644
--- a/common/py3-stdlib/logging/config.py
+++ b/common/py3-stdlib/logging/config.py
@@ -48,7 +48,7 @@
 #   _listener holds the server object doing the listening
 _listener = None
 
-def fileConfig(fname, defaults=None, disable_existing_loggers=True, encoding=None):
+def fileConfig(fname, defaults=None, disable_existing_loggers=True):
     """
     Read the logging configuration from a ConfigParser-format file.
 
@@ -66,8 +66,7 @@
         if hasattr(fname, 'readline'):
             cp.read_file(fname)
         else:
-            encoding = io.text_encoding(encoding)
-            cp.read(fname, encoding=encoding)
+            cp.read(fname)
 
     formatters = _create_formatters(cp)
 
diff --git a/common/py3-stdlib/logging/handlers.py b/common/py3-stdlib/logging/handlers.py
index 61a3995..867ef4e 100644
--- a/common/py3-stdlib/logging/handlers.py
+++ b/common/py3-stdlib/logging/handlers.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved.
+# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
 #
 # Permission to use, copy, modify, and distribute this software and its
 # documentation for any purpose and without fee is hereby granted,
@@ -18,12 +18,12 @@
 Additional handlers for the logging package for Python. The core package is
 based on PEP 282 and comments thereto in comp.lang.python.
 
-Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved.
+Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
 
 To use, simply 'import logging.handlers' and log away!
 """
 
-import io, logging, socket, os, pickle, struct, time, re
+import logging, socket, os, pickle, struct, time, re
 from stat import ST_DEV, ST_INO, ST_MTIME
 import queue
 import threading
@@ -150,8 +150,6 @@
         # on each run.
         if maxBytes > 0:
             mode = 'a'
-        if "b" not in mode:
-            encoding = io.text_encoding(encoding)
         BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
                                      delay=delay, errors=errors)
         self.maxBytes = maxBytes
@@ -187,17 +185,14 @@
         Basically, see if the supplied record would cause the file to exceed
         the size limit we have.
         """
-        # See bpo-45401: Never rollover anything other than regular files
-        if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
-            return False
         if self.stream is None:                 # delay was set...
             self.stream = self._open()
         if self.maxBytes > 0:                   # are we rolling over?
             msg = "%s\n" % self.format(record)
             self.stream.seek(0, 2)  #due to non-posix-compliant Windows feature
             if self.stream.tell() + len(msg) >= self.maxBytes:
-                return True
-        return False
+                return 1
+        return 0
 
 class TimedRotatingFileHandler(BaseRotatingHandler):
     """
@@ -210,7 +205,6 @@
     def __init__(self, filename, when='h', interval=1, backupCount=0,
                  encoding=None, delay=False, utc=False, atTime=None,
                  errors=None):
-        encoding = io.text_encoding(encoding)
         BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
                                      delay=delay, errors=errors)
         self.when = when.upper()
@@ -348,13 +342,10 @@
         record is not used, as we are just comparing times, but it is needed so
         the method signatures are the same
         """
-        # See bpo-45401: Never rollover anything other than regular files
-        if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
-            return False
         t = int(time.time())
         if t >= self.rolloverAt:
-            return True
-        return False
+            return 1
+        return 0
 
     def getFilesToDelete(self):
         """
@@ -365,32 +356,13 @@
         dirName, baseName = os.path.split(self.baseFilename)
         fileNames = os.listdir(dirName)
         result = []
-        # See bpo-44753: Don't use the extension when computing the prefix.
-        n, e = os.path.splitext(baseName)
-        prefix = n + '.'
+        prefix = baseName + "."
         plen = len(prefix)
         for fileName in fileNames:
-            if self.namer is None:
-                # Our files will always start with baseName
-                if not fileName.startswith(baseName):
-                    continue
-            else:
-                # Our files could be just about anything after custom naming, but
-                # likely candidates are of the form
-                # foo.log.DATETIME_SUFFIX or foo.DATETIME_SUFFIX.log
-                if (not fileName.startswith(baseName) and fileName.endswith(e) and
-                    len(fileName) > (plen + 1) and not fileName[plen+1].isdigit()):
-                    continue
-
             if fileName[:plen] == prefix:
                 suffix = fileName[plen:]
-                # See bpo-45628: The date/time suffix could be anywhere in the
-                # filename
-                parts = suffix.split('.')
-                for part in parts:
-                    if self.extMatch.match(part):
-                        result.append(os.path.join(dirName, fileName))
-                        break
+                if self.extMatch.match(suffix):
+                    result.append(os.path.join(dirName, fileName))
         if len(result) < self.backupCount:
             result = []
         else:
@@ -470,8 +442,6 @@
     """
     def __init__(self, filename, mode='a', encoding=None, delay=False,
                  errors=None):
-        if "b" not in mode:
-            encoding = io.text_encoding(encoding)
         logging.FileHandler.__init__(self, filename, mode=mode,
                                      encoding=encoding, delay=delay,
                                      errors=errors)
@@ -1172,7 +1142,7 @@
 
 class HTTPHandler(logging.Handler):
     """
-    A class which sends records to a web server, using either GET or
+    A class which sends records to a Web server, using either GET or
     POST semantics.
     """
     def __init__(self, host, url, method="GET", secure=False, credentials=None,
@@ -1221,7 +1191,7 @@
         """
         Emit a record.
 
-        Send the record to the web server as a percent-encoded dictionary
+        Send the record to the Web server as a percent-encoded dictionary
         """
         try:
             import urllib.parse
diff --git a/common/py3-stdlib/lzma.py b/common/py3-stdlib/lzma.py
index 800f521..0817b87 100644
--- a/common/py3-stdlib/lzma.py
+++ b/common/py3-stdlib/lzma.py
@@ -225,22 +225,14 @@
         """Write a bytes object to the file.
 
         Returns the number of uncompressed bytes written, which is
-        always the length of data in bytes. Note that due to buffering,
-        the file on disk may not reflect the data written until close()
-        is called.
+        always len(data). Note that due to buffering, the file on disk
+        may not reflect the data written until close() is called.
         """
         self._check_can_write()
-        if isinstance(data, (bytes, bytearray)):
-            length = len(data)
-        else:
-            # accept any data that supports the buffer protocol
-            data = memoryview(data)
-            length = data.nbytes
-
         compressed = self._compressor.compress(data)
         self._fp.write(compressed)
-        self._pos += length
-        return length
+        self._pos += len(data)
+        return len(data)
 
     def seek(self, offset, whence=io.SEEK_SET):
         """Change the file position.
@@ -310,7 +302,6 @@
                            preset=preset, filters=filters)
 
     if "t" in mode:
-        encoding = io.text_encoding(encoding)
         return io.TextIOWrapper(binary_file, encoding, errors, newline)
     else:
         return binary_file
diff --git a/common/py3-stdlib/mimetypes.py b/common/py3-stdlib/mimetypes.py
index b72ce08..92c2a47 100644
--- a/common/py3-stdlib/mimetypes.py
+++ b/common/py3-stdlib/mimetypes.py
@@ -27,12 +27,6 @@
 import sys
 import posixpath
 import urllib.parse
-
-try:
-    from _winapi import _mimetypes_read_windows_registry
-except ImportError:
-    _mimetypes_read_windows_registry = None
-
 try:
     import winreg as _winreg
 except ImportError:
@@ -141,23 +135,25 @@
                 type = 'text/plain'
             return type, None           # never compressed, so encoding is None
         base, ext = posixpath.splitext(url)
-        while (ext_lower := ext.lower()) in self.suffix_map:
-            base, ext = posixpath.splitext(base + self.suffix_map[ext_lower])
-        # encodings_map is case sensitive
+        while ext in self.suffix_map:
+            base, ext = posixpath.splitext(base + self.suffix_map[ext])
         if ext in self.encodings_map:
             encoding = self.encodings_map[ext]
             base, ext = posixpath.splitext(base)
         else:
             encoding = None
-        ext = ext.lower()
         types_map = self.types_map[True]
         if ext in types_map:
             return types_map[ext], encoding
+        elif ext.lower() in types_map:
+            return types_map[ext.lower()], encoding
         elif strict:
             return None, encoding
         types_map = self.types_map[False]
         if ext in types_map:
             return types_map[ext], encoding
+        elif ext.lower() in types_map:
+            return types_map[ext.lower()], encoding
         else:
             return None, encoding
 
@@ -173,7 +169,7 @@
         but non-standard types.
         """
         type = type.lower()
-        extensions = list(self.types_map_inv[True].get(type, []))
+        extensions = self.types_map_inv[True].get(type, [])
         if not strict:
             for ext in self.types_map_inv[False].get(type, []):
                 if ext not in extensions:
@@ -241,21 +237,10 @@
         types.
         """
 
-        if not _mimetypes_read_windows_registry and not _winreg:
+        # Windows only
+        if not _winreg:
             return
 
-        add_type = self.add_type
-        if strict:
-            add_type = lambda type, ext: self.add_type(type, ext, True)
-
-        # Accelerated function if it is available
-        if _mimetypes_read_windows_registry:
-            _mimetypes_read_windows_registry(add_type)
-        elif _winreg:
-            self._read_windows_registry(add_type)
-
-    @classmethod
-    def _read_windows_registry(cls, add_type):
         def enum_types(mimedb):
             i = 0
             while True:
@@ -280,7 +265,7 @@
                             subkey, 'Content Type')
                         if datatype != _winreg.REG_SZ:
                             continue
-                        add_type(mimetype, subkeyname)
+                        self.add_type(mimetype, subkeyname, strict)
                 except OSError:
                     continue
 
@@ -364,8 +349,8 @@
 
     if files is None or _db is None:
         db = MimeTypes()
-        # Quick return if not supported
-        db.read_windows_registry()
+        if _winreg:
+            db.read_windows_registry()
 
         if files is None:
             files = knownfiles
@@ -463,7 +448,6 @@
         '.dvi'    : 'application/x-dvi',
         '.gtar'   : 'application/x-gtar',
         '.hdf'    : 'application/x-hdf',
-        '.h5'     : 'application/x-hdf5',
         '.latex'  : 'application/x-latex',
         '.mif'    : 'application/x-mif',
         '.cdf'    : 'application/x-netcdf',
@@ -496,19 +480,10 @@
         '.wsdl'   : 'application/xml',
         '.xpdl'   : 'application/xml',
         '.zip'    : 'application/zip',
-        '.3gp'    : 'audio/3gpp',
-        '.3gpp'   : 'audio/3gpp',
-        '.3g2'    : 'audio/3gpp2',
-        '.3gpp2'  : 'audio/3gpp2',
-        '.aac'    : 'audio/aac',
-        '.adts'   : 'audio/aac',
-        '.loas'   : 'audio/aac',
-        '.ass'    : 'audio/aac',
         '.au'     : 'audio/basic',
         '.snd'    : 'audio/basic',
         '.mp3'    : 'audio/mpeg',
         '.mp2'    : 'audio/mpeg',
-        '.opus'   : 'audio/opus',
         '.aif'    : 'audio/x-aiff',
         '.aifc'   : 'audio/x-aiff',
         '.aiff'   : 'audio/x-aiff',
@@ -520,8 +495,6 @@
         '.jpg'    : 'image/jpeg',
         '.jpe'    : 'image/jpeg',
         '.jpeg'   : 'image/jpeg',
-        '.heic'   : 'image/heic',
-        '.heif'   : 'image/heif',
         '.png'    : 'image/png',
         '.svg'    : 'image/svg+xml',
         '.tiff'   : 'image/tiff',
diff --git a/common/py3-stdlib/multiprocessing/managers.py b/common/py3-stdlib/multiprocessing/managers.py
index b6b4cdd..0eb16c6 100644
--- a/common/py3-stdlib/multiprocessing/managers.py
+++ b/common/py3-stdlib/multiprocessing/managers.py
@@ -8,7 +8,8 @@
 # Licensed to PSF under a Contributor Agreement.
 #
 
-__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
+__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token',
+            'SharedMemoryManager' ]
 
 #
 # Imports
@@ -34,11 +35,9 @@
 from . import get_context
 try:
     from . import shared_memory
+    HAS_SHMEM = True
 except ImportError:
     HAS_SHMEM = False
-else:
-    HAS_SHMEM = True
-    __all__.append('SharedMemoryManager')
 
 #
 # Register some things for pickling
@@ -193,8 +192,11 @@
             t.daemon = True
             t.start()
 
-    def _handle_request(self, c):
-        request = None
+    def handle_request(self, c):
+        '''
+        Handle a new connection
+        '''
+        funcname = result = request = None
         try:
             connection.deliver_challenge(c, self.authkey)
             connection.answer_challenge(c, self.authkey)
@@ -211,7 +213,6 @@
                 msg = ('#TRACEBACK', format_exc())
             else:
                 msg = ('#RETURN', result)
-
         try:
             c.send(msg)
         except Exception as e:
@@ -223,17 +224,7 @@
             util.info(' ... request was %r', request)
             util.info(' ... exception was %r', e)
 
-    def handle_request(self, conn):
-        '''
-        Handle a new connection
-        '''
-        try:
-            self._handle_request(conn)
-        except SystemExit:
-            # Server.serve_client() calls sys.exit(0) on EOF
-            pass
-        finally:
-            conn.close()
+        c.close()
 
     def serve_client(self, conn):
         '''
@@ -968,7 +959,7 @@
 
 
 def AutoProxy(token, serializer, manager=None, authkey=None,
-              exposed=None, incref=True, manager_owned=False):
+              exposed=None, incref=True):
     '''
     Return an auto-proxy for `token`
     '''
@@ -988,7 +979,7 @@
 
     ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
     proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
-                      incref=incref, manager_owned=manager_owned)
+                      incref=incref)
     proxy._isauto = True
     return proxy
 
diff --git a/common/py3-stdlib/multiprocessing/resource_tracker.py b/common/py3-stdlib/multiprocessing/resource_tracker.py
index cc42dbd..c9bfa9b 100644
--- a/common/py3-stdlib/multiprocessing/resource_tracker.py
+++ b/common/py3-stdlib/multiprocessing/resource_tracker.py
@@ -37,16 +37,8 @@
     import _multiprocessing
     import _posixshmem
 
-    # Use sem_unlink() to clean up named semaphores.
-    #
-    # sem_unlink() may be missing if the Python build process detected the
-    # absence of POSIX named semaphores. In that case, no named semaphores were
-    # ever opened, so no cleanup would be necessary.
-    if hasattr(_multiprocessing, 'sem_unlink'):
-        _CLEANUP_FUNCS.update({
-            'semaphore': _multiprocessing.sem_unlink,
-        })
     _CLEANUP_FUNCS.update({
+        'semaphore': _multiprocessing.sem_unlink,
         'shared_memory': _posixshmem.shm_unlink,
     })
 
diff --git a/common/py3-stdlib/multiprocessing/util.py b/common/py3-stdlib/multiprocessing/util.py
index a468333..21f2a7e 100644
--- a/common/py3-stdlib/multiprocessing/util.py
+++ b/common/py3-stdlib/multiprocessing/util.py
@@ -419,7 +419,7 @@
     try:
         fd = os.open(os.devnull, os.O_RDONLY)
         try:
-            sys.stdin = open(fd, encoding="utf-8", closefd=False)
+            sys.stdin = open(fd, closefd=False)
         except:
             os.close(fd)
             raise
diff --git a/common/py3-stdlib/netrc.py b/common/py3-stdlib/netrc.py
index 734d94c..f0ae48c 100644
--- a/common/py3-stdlib/netrc.py
+++ b/common/py3-stdlib/netrc.py
@@ -26,12 +26,8 @@
             file = os.path.join(os.path.expanduser("~"), ".netrc")
         self.hosts = {}
         self.macros = {}
-        try:
-            with open(file, encoding="utf-8") as fp:
-                self._parse(file, fp, default_netrc)
-        except UnicodeDecodeError:
-            with open(file, encoding="locale") as fp:
-                self._parse(file, fp, default_netrc)
+        with open(file) as fp:
+            self._parse(file, fp, default_netrc)
 
     def _parse(self, file, fp, default_netrc):
         lexer = shlex.shlex(fp)
diff --git a/common/py3-stdlib/ntpath.py b/common/py3-stdlib/ntpath.py
index 527c7ae..6f77177 100644
--- a/common/py3-stdlib/ntpath.py
+++ b/common/py3-stdlib/ntpath.py
@@ -312,25 +312,12 @@
             drive = ''
         userhome = join(drive, os.environ['HOMEPATH'])
 
-    if i != 1: #~user
-        target_user = path[1:i]
-        if isinstance(target_user, bytes):
-            target_user = os.fsdecode(target_user)
-        current_user = os.environ.get('USERNAME')
-
-        if target_user != current_user:
-            # Try to guess user home directory.  By default all user
-            # profile directories are located in the same place and are
-            # named by corresponding usernames.  If userhome isn't a
-            # normal profile directory, this guess is likely wrong,
-            # so we bail out.
-            if current_user != basename(userhome):
-                return path
-            userhome = join(dirname(userhome), target_user)
-
     if isinstance(path, bytes):
         userhome = os.fsencode(userhome)
 
+    if i != 1: #~user
+        userhome = join(dirname(userhome), path[1:i])
+
     return userhome + path[i:]
 
 
@@ -635,7 +622,7 @@
                 tail = join(name, tail) if tail else name
         return tail
 
-    def realpath(path, *, strict=False):
+    def realpath(path):
         path = normpath(path)
         if isinstance(path, bytes):
             prefix = b'\\\\?\\'
@@ -660,8 +647,6 @@
             path = _getfinalpathname(path)
             initial_winerror = 0
         except OSError as ex:
-            if strict:
-                raise
             initial_winerror = ex.winerror
             path = _getfinalpathname_nonstrict(path)
         # The path returned by _getfinalpathname will always start with \\?\ -
diff --git a/common/py3-stdlib/nturl2path.py b/common/py3-stdlib/nturl2path.py
index 61852af..853e660 100644
--- a/common/py3-stdlib/nturl2path.py
+++ b/common/py3-stdlib/nturl2path.py
@@ -50,14 +50,6 @@
     # becomes
     #   ///C:/foo/bar/spam.foo
     import urllib.parse
-    # First, clean up some special forms. We are going to sacrifice
-    # the additional information anyway
-    if p[:4] == '\\\\?\\':
-        p = p[4:]
-        if p[:4].upper() == 'UNC\\':
-            p = '\\' + p[4:]
-        elif p[1:2] != ':':
-            raise OSError('Bad path: ' + p)
     if not ':' in p:
         # No drive specifier, just convert slashes and quote the name
         if p[:2] == '\\\\':
@@ -67,7 +59,7 @@
             p = '\\\\' + p
         components = p.split('\\')
         return urllib.parse.quote('/'.join(components))
-    comp = p.split(':', maxsplit=2)
+    comp = p.split(':')
     if len(comp) != 2 or len(comp[0]) > 1:
         error = 'Bad path: ' + p
         raise OSError(error)
diff --git a/common/py3-stdlib/numbers.py b/common/py3-stdlib/numbers.py
index 5b98e64..ed815ef 100644
--- a/common/py3-stdlib/numbers.py
+++ b/common/py3-stdlib/numbers.py
@@ -33,7 +33,7 @@
     """Complex defines the operations that work on the builtin complex type.
 
     In short, those are: a conversion to complex, .real, .imag, +, -,
-    *, /, **, abs(), .conjugate, ==, and !=.
+    *, /, abs(), .conjugate, ==, and !=.
 
     If it is given heterogeneous arguments, and doesn't have special
     knowledge about them, it should fall back to the builtin complex
@@ -292,11 +292,7 @@
 
 
 class Integral(Rational):
-    """Integral adds methods that work on integral numbers.
-
-    In short, these are conversion to int, pow with modulus, and the
-    bit-string operations.
-    """
+    """Integral adds a conversion to int and the bit-string operations."""
 
     __slots__ = ()
 
diff --git a/common/py3-stdlib/opcode.py b/common/py3-stdlib/opcode.py
index 37e88e9..ac1aa53 100644
--- a/common/py3-stdlib/opcode.py
+++ b/common/py3-stdlib/opcode.py
@@ -67,6 +67,7 @@
 def_op('UNARY_NOT', 12)
 
 def_op('UNARY_INVERT', 15)
+
 def_op('BINARY_MATRIX_MULTIPLY', 16)
 def_op('INPLACE_MATRIX_MULTIPLY', 17)
 
@@ -81,12 +82,8 @@
 def_op('BINARY_TRUE_DIVIDE', 27)
 def_op('INPLACE_FLOOR_DIVIDE', 28)
 def_op('INPLACE_TRUE_DIVIDE', 29)
-def_op('GET_LEN', 30)
-def_op('MATCH_MAPPING', 31)
-def_op('MATCH_SEQUENCE', 32)
-def_op('MATCH_KEYS', 33)
-def_op('COPY_DICT_WITHOUT_KEYS', 34)
 
+def_op('RERAISE', 48)
 def_op('WITH_EXCEPT_START', 49)
 def_op('GET_AITER', 50)
 def_op('GET_ANEXT', 51)
@@ -108,6 +105,7 @@
 def_op('INPLACE_POWER', 67)
 def_op('GET_ITER', 68)
 def_op('GET_YIELD_FROM_ITER', 69)
+
 def_op('PRINT_EXPR', 70)
 def_op('LOAD_BUILD_CLASS', 71)
 def_op('YIELD_FROM', 72)
@@ -139,7 +137,6 @@
 name_op('DELETE_ATTR', 96)      # ""
 name_op('STORE_GLOBAL', 97)     # ""
 name_op('DELETE_GLOBAL', 98)    # ""
-def_op('ROT_N', 99)
 def_op('LOAD_CONST', 100)       # Index in const list
 hasconst.append(100)
 name_op('LOAD_NAME', 101)       # Index in name list
@@ -152,16 +149,18 @@
 hascompare.append(107)
 name_op('IMPORT_NAME', 108)     # Index in name list
 name_op('IMPORT_FROM', 109)     # Index in name list
+
 jrel_op('JUMP_FORWARD', 110)    # Number of bytes to skip
 jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
 jabs_op('JUMP_IF_TRUE_OR_POP', 112)  # ""
 jabs_op('JUMP_ABSOLUTE', 113)        # ""
 jabs_op('POP_JUMP_IF_FALSE', 114)    # ""
 jabs_op('POP_JUMP_IF_TRUE', 115)     # ""
+
 name_op('LOAD_GLOBAL', 116)     # Index in name list
+
 def_op('IS_OP', 117)
 def_op('CONTAINS_OP', 118)
-def_op('RERAISE', 119)
 
 jabs_op('JUMP_IF_NOT_EXC_MATCH', 121)
 jrel_op('SETUP_FINALLY', 122)   # Distance to target address
@@ -173,12 +172,10 @@
 def_op('DELETE_FAST', 126)      # Local variable number
 haslocal.append(126)
 
-def_op('GEN_START', 129)        # Kind of generator/coroutine
 def_op('RAISE_VARARGS', 130)    # Number of raise arguments (1, 2, or 3)
 def_op('CALL_FUNCTION', 131)    # #args
 def_op('MAKE_FUNCTION', 132)    # Flags
 def_op('BUILD_SLICE', 133)      # Number of items
-
 def_op('LOAD_CLOSURE', 135)
 hasfree.append(135)
 def_op('LOAD_DEREF', 136)
@@ -190,24 +187,28 @@
 
 def_op('CALL_FUNCTION_KW', 141)  # #args + #kwargs
 def_op('CALL_FUNCTION_EX', 142)  # Flags
+
 jrel_op('SETUP_WITH', 143)
-def_op('EXTENDED_ARG', 144)
-EXTENDED_ARG = 144
+
 def_op('LIST_APPEND', 145)
 def_op('SET_ADD', 146)
 def_op('MAP_ADD', 147)
+
 def_op('LOAD_CLASSDEREF', 148)
 hasfree.append(148)
 
-def_op('MATCH_CLASS', 152)
+def_op('EXTENDED_ARG', 144)
+EXTENDED_ARG = 144
 
 jrel_op('SETUP_ASYNC_WITH', 154)
+
 def_op('FORMAT_VALUE', 155)
 def_op('BUILD_CONST_KEY_MAP', 156)
 def_op('BUILD_STRING', 157)
 
 name_op('LOAD_METHOD', 160)
 def_op('CALL_METHOD', 161)
+
 def_op('LIST_EXTEND', 162)
 def_op('SET_UPDATE', 163)
 def_op('DICT_MERGE', 164)
diff --git a/common/py3-stdlib/operator.py b/common/py3-stdlib/operator.py
index 241fdbb..fb58851 100644
--- a/common/py3-stdlib/operator.py
+++ b/common/py3-stdlib/operator.py
@@ -155,10 +155,10 @@
     return b in a
 
 def countOf(a, b):
-    "Return the number of items in a which are, or which equal, b."
+    "Return the number of times b occurs in a."
     count = 0
     for i in a:
-        if i is b or i == b:
+        if i == b:
             count += 1
     return count
 
@@ -173,7 +173,7 @@
 def indexOf(a, b):
     "Return the first index of b in a."
     for i, j in enumerate(a):
-        if j is b or j == b:
+        if j == b:
             return i
     else:
         raise ValueError('sequence.index(x): x not in sequence')
diff --git a/common/py3-stdlib/os.py b/common/py3-stdlib/os.py
index d26cfc9..b794159 100644
--- a/common/py3-stdlib/os.py
+++ b/common/py3-stdlib/os.py
@@ -36,7 +36,7 @@
 __all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
            "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
            "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
-           "extsep"]
+           "popen", "extsep"]
 
 def _exists(name):
     return name in globals()
@@ -969,64 +969,58 @@
 
     __all__.extend(["spawnlp", "spawnlpe"])
 
-# VxWorks has no user space shell provided. As a result, running
-# command in a shell can't be supported.
-if sys.platform != 'vxworks':
-    # Supply os.popen()
-    def popen(cmd, mode="r", buffering=-1):
-        if not isinstance(cmd, str):
-            raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
-        if mode not in ("r", "w"):
-            raise ValueError("invalid mode %r" % mode)
-        if buffering == 0 or buffering is None:
-            raise ValueError("popen() does not support unbuffered streams")
-        import subprocess, io
-        if mode == "r":
-            proc = subprocess.Popen(cmd,
-                                    shell=True, text=True,
-                                    stdout=subprocess.PIPE,
-                                    bufsize=buffering)
-            return _wrap_close(proc.stdout, proc)
+
+# Supply os.popen()
+def popen(cmd, mode="r", buffering=-1):
+    if not isinstance(cmd, str):
+        raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
+    if mode not in ("r", "w"):
+        raise ValueError("invalid mode %r" % mode)
+    if buffering == 0 or buffering is None:
+        raise ValueError("popen() does not support unbuffered streams")
+    import subprocess, io
+    if mode == "r":
+        proc = subprocess.Popen(cmd,
+                                shell=True,
+                                stdout=subprocess.PIPE,
+                                bufsize=buffering)
+        return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
+    else:
+        proc = subprocess.Popen(cmd,
+                                shell=True,
+                                stdin=subprocess.PIPE,
+                                bufsize=buffering)
+        return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
+
+# Helper for popen() -- a proxy for a file whose close waits for the process
+class _wrap_close:
+    def __init__(self, stream, proc):
+        self._stream = stream
+        self._proc = proc
+    def close(self):
+        self._stream.close()
+        returncode = self._proc.wait()
+        if returncode == 0:
+            return None
+        if name == 'nt':
+            return returncode
         else:
-            proc = subprocess.Popen(cmd,
-                                    shell=True, text=True,
-                                    stdin=subprocess.PIPE,
-                                    bufsize=buffering)
-            return _wrap_close(proc.stdin, proc)
-
-    # Helper for popen() -- a proxy for a file whose close waits for the process
-    class _wrap_close:
-        def __init__(self, stream, proc):
-            self._stream = stream
-            self._proc = proc
-        def close(self):
-            self._stream.close()
-            returncode = self._proc.wait()
-            if returncode == 0:
-                return None
-            if name == 'nt':
-                return returncode
-            else:
-                return returncode << 8  # Shift left to match old behavior
-        def __enter__(self):
-            return self
-        def __exit__(self, *args):
-            self.close()
-        def __getattr__(self, name):
-            return getattr(self._stream, name)
-        def __iter__(self):
-            return iter(self._stream)
-
-    __all__.append("popen")
+            return returncode << 8  # Shift left to match old behavior
+    def __enter__(self):
+        return self
+    def __exit__(self, *args):
+        self.close()
+    def __getattr__(self, name):
+        return getattr(self._stream, name)
+    def __iter__(self):
+        return iter(self._stream)
 
 # Supply os.fdopen()
-def fdopen(fd, mode="r", buffering=-1, encoding=None, *args, **kwargs):
+def fdopen(fd, *args, **kwargs):
     if not isinstance(fd, int):
         raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
     import io
-    if "b" not in mode:
-        encoding = io.text_encoding(encoding)
-    return io.open(fd, mode, buffering, encoding, *args, **kwargs)
+    return io.open(fd, *args, **kwargs)
 
 
 # For testing purposes, make sure the function is available when the C
diff --git a/common/py3-stdlib/pathlib.py b/common/py3-stdlib/pathlib.py
index 621fba0..147be2f 100644
--- a/common/py3-stdlib/pathlib.py
+++ b/common/py3-stdlib/pathlib.py
@@ -6,7 +6,6 @@
 import posixpath
 import re
 import sys
-import warnings
 from _collections_abc import Sequence
 from errno import EINVAL, ENOENT, ENOTDIR, EBADF, ELOOP
 from operator import attrgetter
@@ -14,6 +13,18 @@
 from urllib.parse import quote_from_bytes as urlquote_from_bytes
 
 
+supports_symlinks = True
+if os.name == 'nt':
+    import nt
+    if sys.getwindowsversion()[:2] >= (6, 0):
+        from nt import _getfinalpathname
+    else:
+        supports_symlinks = False
+        _getfinalpathname = None
+else:
+    nt = None
+
+
 __all__ = [
     "PurePath", "PurePosixPath", "PureWindowsPath",
     "Path", "PosixPath", "WindowsPath",
@@ -23,17 +34,13 @@
 # Internals
 #
 
-_WINERROR_NOT_READY = 21  # drive exists but is not accessible
-_WINERROR_INVALID_NAME = 123  # fix for bpo-35306
-_WINERROR_CANT_RESOLVE_FILENAME = 1921  # broken symlink pointing to itself
-
 # EBADF - guard against macOS `stat` throwing EBADF
 _IGNORED_ERROS = (ENOENT, ENOTDIR, EBADF, ELOOP)
 
 _IGNORED_WINERRORS = (
-    _WINERROR_NOT_READY,
-    _WINERROR_INVALID_NAME,
-    _WINERROR_CANT_RESOLVE_FILENAME)
+    21,  # ERROR_NOT_READY - drive exists but is not accessible
+    1921,  # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself
+)
 
 def _ignore_error(exception):
     return (getattr(exception, 'errno', None) in _IGNORED_ERROS or
@@ -124,25 +131,16 @@
     ext_namespace_prefix = '\\\\?\\'
 
     reserved_names = (
-        {'CON', 'PRN', 'AUX', 'NUL', 'CONIN$', 'CONOUT$'} |
-        {'COM%s' % c for c in '123456789\xb9\xb2\xb3'} |
-        {'LPT%s' % c for c in '123456789\xb9\xb2\xb3'}
+        {'CON', 'PRN', 'AUX', 'NUL'} |
+        {'COM%d' % i for i in range(1, 10)} |
+        {'LPT%d' % i for i in range(1, 10)}
         )
 
     # Interesting findings about extended paths:
-    # * '\\?\c:\a' is an extended path, which bypasses normal Windows API
-    #   path processing. Thus relative paths are not resolved and slash is not
-    #   translated to backslash. It has the native NT path limit of 32767
-    #   characters, but a bit less after resolving device symbolic links,
-    #   such as '\??\C:' => '\Device\HarddiskVolume2'.
-    # * '\\?\c:/a' looks for a device named 'C:/a' because slash is a
-    #   regular name character in the object namespace.
-    # * '\\?\c:\foo/bar' is invalid because '/' is illegal in NT filesystems.
-    #   The only path separator at the filesystem level is backslash.
-    # * '//?/c:\a' and '//?/c:/a' are effectively equivalent to '\\.\c:\a' and
-    #   thus limited to MAX_PATH.
-    # * Prior to Windows 8, ANSI API bytes paths are limited to MAX_PATH,
-    #   even with the '\\?\' prefix.
+    # - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
+    #   but '\\?\c:/a' is not
+    # - extended paths are always absolute; "relative" extended paths will
+    #   fail.
 
     def splitroot(self, part, sep=sep):
         first = part[0:1]
@@ -192,6 +190,30 @@
     def compile_pattern(self, pattern):
         return re.compile(fnmatch.translate(pattern), re.IGNORECASE).fullmatch
 
+    def resolve(self, path, strict=False):
+        s = str(path)
+        if not s:
+            return os.getcwd()
+        previous_s = None
+        if _getfinalpathname is not None:
+            if strict:
+                return self._ext_to_normal(_getfinalpathname(s))
+            else:
+                tail_parts = []  # End of the path after the first one not found
+                while True:
+                    try:
+                        s = self._ext_to_normal(_getfinalpathname(s))
+                    except FileNotFoundError:
+                        previous_s = s
+                        s, tail = os.path.split(s)
+                        tail_parts.append(tail)
+                        if previous_s == s:
+                            return path
+                    else:
+                        return os.path.join(s, *reversed(tail_parts))
+        # Means fallback on absolute
+        return None
+
     def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
         prefix = ''
         if s.startswith(ext_prefix):
@@ -202,18 +224,21 @@
                 s = '\\' + s[3:]
         return prefix, s
 
+    def _ext_to_normal(self, s):
+        # Turn back an extended path into a normal DOS-like path
+        return self._split_extended_path(s)[1]
+
     def is_reserved(self, parts):
         # NOTE: the rules for reserved names seem somewhat complicated
-        # (e.g. r"..\NUL" is reserved but not r"foo\NUL" if "foo" does not
-        # exist). We err on the side of caution and return True for paths
-        # which are not considered reserved by Windows.
+        # (e.g. r"..\NUL" is reserved but not r"foo\NUL").
+        # We err on the side of caution and return True for paths which are
+        # not considered reserved by Windows.
         if not parts:
             return False
         if parts[0].startswith('\\\\'):
             # UNC paths are never reserved
             return False
-        name = parts[-1].partition('.')[0].partition(':')[0].rstrip(' ')
-        return name.upper() in self.reserved_names
+        return parts[-1].partition('.')[0].upper() in self.reserved_names
 
     def make_uri(self, path):
         # Under Windows, file URIs use the UTF-8 encoding.
@@ -227,6 +252,34 @@
             # It's a path on a network drive => 'file://host/share/a/b'
             return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8'))
 
+    def gethomedir(self, username):
+        if 'USERPROFILE' in os.environ:
+            userhome = os.environ['USERPROFILE']
+        elif 'HOMEPATH' in os.environ:
+            try:
+                drv = os.environ['HOMEDRIVE']
+            except KeyError:
+                drv = ''
+            userhome = drv + os.environ['HOMEPATH']
+        else:
+            raise RuntimeError("Can't determine home directory")
+
+        if username:
+            # Try to guess user home directory.  By default all users
+            # directories are located in the same place and are named by
+            # corresponding usernames.  If current user home directory points
+            # to nonstandard place, this guess is likely wrong.
+            if os.environ['USERNAME'] != username:
+                drv, root, parts = self.parse_parts((userhome,))
+                if parts[-1] != os.environ['USERNAME']:
+                    raise RuntimeError("Can't determine home directory "
+                                       "for %r" % username)
+                parts[-1] = username
+                if drv or root:
+                    userhome = drv + root + self.join(parts[1:])
+                else:
+                    userhome = self.join(parts)
+        return userhome
 
 class _PosixFlavour(_Flavour):
     sep = '/'
@@ -260,6 +313,54 @@
     def compile_pattern(self, pattern):
         return re.compile(fnmatch.translate(pattern)).fullmatch
 
+    def resolve(self, path, strict=False):
+        sep = self.sep
+        accessor = path._accessor
+        seen = {}
+        def _resolve(path, rest):
+            if rest.startswith(sep):
+                path = ''
+
+            for name in rest.split(sep):
+                if not name or name == '.':
+                    # current dir
+                    continue
+                if name == '..':
+                    # parent dir
+                    path, _, _ = path.rpartition(sep)
+                    continue
+                if path.endswith(sep):
+                    newpath = path + name
+                else:
+                    newpath = path + sep + name
+                if newpath in seen:
+                    # Already seen this path
+                    path = seen[newpath]
+                    if path is not None:
+                        # use cached value
+                        continue
+                    # The symlink is not resolved, so we must have a symlink loop.
+                    raise RuntimeError("Symlink loop from %r" % newpath)
+                # Resolve the symbolic link
+                try:
+                    target = accessor.readlink(newpath)
+                except OSError as e:
+                    if e.errno != EINVAL and strict:
+                        raise
+                    # Not a symlink, or non-strict mode. We just leave the path
+                    # untouched.
+                    path = newpath
+                else:
+                    seen[newpath] = None # not resolved symlink
+                    path = _resolve(path, target)
+                    seen[newpath] = path # resolved symlink
+
+            return path
+        # NOTE: according to POSIX, getcwd() cannot contain path components
+        # which are symlinks.
+        base = '' if path.is_absolute() else os.getcwd()
+        return _resolve(base, str(path)) or sep
+
     def is_reserved(self, parts):
         return False
 
@@ -269,6 +370,21 @@
         bpath = bytes(path)
         return 'file://' + urlquote_from_bytes(bpath)
 
+    def gethomedir(self, username):
+        if not username:
+            try:
+                return os.environ['HOME']
+            except KeyError:
+                import pwd
+                return pwd.getpwuid(os.getuid()).pw_dir
+        else:
+            import pwd
+            try:
+                return pwd.getpwnam(username).pw_dir
+            except KeyError:
+                raise RuntimeError("Can't determine home directory "
+                                   "for %r" % username)
+
 
 _windows_flavour = _WindowsFlavour()
 _posix_flavour = _PosixFlavour()
@@ -283,7 +399,9 @@
 
     stat = os.stat
 
-    open = io.open
+    lstat = os.lstat
+
+    open = os.open
 
     listdir = os.listdir
 
@@ -291,14 +409,21 @@
 
     chmod = os.chmod
 
+    if hasattr(os, "lchmod"):
+        lchmod = os.lchmod
+    else:
+        def lchmod(self, pathobj, mode):
+            raise NotImplementedError("lchmod() not available on this system")
+
     mkdir = os.mkdir
 
     unlink = os.unlink
 
     if hasattr(os, "link"):
-        link = os.link
+        link_to = os.link
     else:
-        def link(self, src, dst):
+        @staticmethod
+        def link_to(self, target):
             raise NotImplementedError("os.link() not available on this system")
 
     rmdir = os.rmdir
@@ -307,35 +432,23 @@
 
     replace = os.replace
 
-    if hasattr(os, "symlink"):
-        symlink = os.symlink
+    if nt:
+        if supports_symlinks:
+            symlink = os.symlink
+        else:
+            def symlink(a, b, target_is_directory):
+                raise NotImplementedError("symlink() not available on this system")
     else:
-        def symlink(self, src, dst, target_is_directory=False):
-            raise NotImplementedError("os.symlink() not available on this system")
+        # Under POSIX, os.symlink() takes two args
+        @staticmethod
+        def symlink(a, b, target_is_directory):
+            return os.symlink(a, b)
 
-    def touch(self, path, mode=0o666, exist_ok=True):
-        if exist_ok:
-            # First try to bump modification time
-            # Implementation note: GNU touch uses the UTIME_NOW option of
-            # the utimensat() / futimens() functions.
-            try:
-                os.utime(path, None)
-            except OSError:
-                # Avoid exception chaining
-                pass
-            else:
-                return
-        flags = os.O_CREAT | os.O_WRONLY
-        if not exist_ok:
-            flags |= os.O_EXCL
-        fd = os.open(path, flags, mode)
-        os.close(fd)
+    utime = os.utime
 
-    if hasattr(os, "readlink"):
-        readlink = os.readlink
-    else:
-        def readlink(self, path):
-            raise NotImplementedError("os.readlink() not available on this system")
+    # Helper for resolve()
+    def readlink(self, path):
+        return os.readlink(path)
 
     def owner(self, path):
         try:
@@ -351,12 +464,6 @@
         except ImportError:
             raise NotImplementedError("Path.group() is unsupported on this system")
 
-    getcwd = os.getcwd
-
-    expanduser = staticmethod(os.path.expanduser)
-
-    realpath = staticmethod(os.path.realpath)
-
 
 _normal_accessor = _NormalAccessor()
 
@@ -523,10 +630,7 @@
             return len(self._parts)
 
     def __getitem__(self, idx):
-        if isinstance(idx, slice):
-            return tuple(self[i] for i in range(*idx.indices(len(self))))
-
-        if idx >= len(self) or idx < -len(self):
+        if idx < 0 or idx >= len(self):
             raise IndexError(idx)
         return self._pathcls._from_parsed_parts(self._drv, self._root,
                                                 self._parts[:-idx - 1])
@@ -585,7 +689,7 @@
         return cls._flavour.parse_parts(parts)
 
     @classmethod
-    def _from_parts(cls, args):
+    def _from_parts(cls, args, init=True):
         # We need to call _parse_args on the instance, so as to get the
         # right flavour.
         self = object.__new__(cls)
@@ -593,14 +697,18 @@
         self._drv = drv
         self._root = root
         self._parts = parts
+        if init:
+            self._init()
         return self
 
     @classmethod
-    def _from_parsed_parts(cls, drv, root, parts):
+    def _from_parsed_parts(cls, drv, root, parts, init=True):
         self = object.__new__(cls)
         self._drv = drv
         self._root = root
         self._parts = parts
+        if init:
+            self._init()
         return self
 
     @classmethod
@@ -610,6 +718,10 @@
         else:
             return cls._flavour.join(parts)
 
+    def _init(self):
+        # Overridden in concrete Path
+        pass
+
     def _make_child(self, args):
         drv, root, parts = self._parse_args(args)
         drv, root, parts = self._flavour.join_parsed_parts(
@@ -949,18 +1061,29 @@
     object. You can also instantiate a PosixPath or WindowsPath directly,
     but cannot instantiate a WindowsPath on a POSIX system or vice versa.
     """
-    _accessor = _normal_accessor
-    __slots__ = ()
+    __slots__ = (
+        '_accessor',
+    )
 
     def __new__(cls, *args, **kwargs):
         if cls is Path:
             cls = WindowsPath if os.name == 'nt' else PosixPath
-        self = cls._from_parts(args)
+        self = cls._from_parts(args, init=False)
         if not self._flavour.is_supported:
             raise NotImplementedError("cannot instantiate %r on your system"
                                       % (cls.__name__,))
+        self._init()
         return self
 
+    def _init(self,
+              # Private non-constructor arguments
+              template=None,
+              ):
+        if template is not None:
+            self._accessor = template._accessor
+        else:
+            self._accessor = _normal_accessor
+
     def _make_child_relpath(self, part):
         # This is an optimization used for dir walking.  `part` must be
         # a single part relative to this path.
@@ -981,6 +1104,17 @@
         # removed in the future.
         pass
 
+    def _opener(self, name, flags, mode=0o666):
+        # A stub for the opener argument to built-in open()
+        return self._accessor.open(self, flags, mode)
+
+    def _raw_open(self, flags, mode=0o777):
+        """
+        Open the file pointed by this path and return a file descriptor,
+        as os.open() does.
+        """
+        return self._accessor.open(self, flags, mode)
+
     # Public API
 
     @classmethod
@@ -988,14 +1122,14 @@
         """Return a new path pointing to the current working directory
         (as returned by os.getcwd()).
         """
-        return cls(cls._accessor.getcwd())
+        return cls(os.getcwd())
 
     @classmethod
     def home(cls):
         """Return a new path pointing to the user's home directory (as
         returned by os.path.expanduser('~')).
         """
-        return cls("~").expanduser()
+        return cls(cls()._flavour.gethomedir(None))
 
     def samefile(self, other_path):
         """Return whether other_path is the same or not as this file
@@ -1057,7 +1191,9 @@
             return self
         # FIXME this must defer to the specific flavour (and, under Windows,
         # use nt._getfullpathname())
-        return self._from_parts([self._accessor.getcwd()] + self._parts)
+        obj = self._from_parts([os.getcwd()] + self._parts, init=False)
+        obj._init(template=self)
+        return obj
 
     def resolve(self, strict=False):
         """
@@ -1065,34 +1201,24 @@
         normalizing it (for example turning slashes into backslashes under
         Windows).
         """
+        s = self._flavour.resolve(self, strict=strict)
+        if s is None:
+            # No symlink resolution => for consistency, raise an error if
+            # the path doesn't exist or is forbidden
+            self.stat()
+            s = str(self.absolute())
+        # Now we have no symlinks in the path, it's safe to normalize it.
+        normed = self._flavour.pathmod.normpath(s)
+        obj = self._from_parts((normed,), init=False)
+        obj._init(template=self)
+        return obj
 
-        def check_eloop(e):
-            winerror = getattr(e, 'winerror', 0)
-            if e.errno == ELOOP or winerror == _WINERROR_CANT_RESOLVE_FILENAME:
-                raise RuntimeError("Symlink loop from %r" % e.filename)
-
-        try:
-            s = self._accessor.realpath(self, strict=strict)
-        except OSError as e:
-            check_eloop(e)
-            raise
-        p = self._from_parts((s,))
-
-        # In non-strict mode, realpath() doesn't raise on symlink loops.
-        # Ensure we get an exception by calling stat()
-        if not strict:
-            try:
-                p.stat()
-            except OSError as e:
-                check_eloop(e)
-        return p
-
-    def stat(self, *, follow_symlinks=True):
+    def stat(self):
         """
         Return the result of the stat() system call on this path, like
         os.stat() does.
         """
-        return self._accessor.stat(self, follow_symlinks=follow_symlinks)
+        return self._accessor.stat(self)
 
     def owner(self):
         """
@@ -1112,10 +1238,8 @@
         Open the file pointed by this path and return a file object, as
         the built-in open() function does.
         """
-        if "b" not in mode:
-            encoding = io.text_encoding(encoding)
-        return self._accessor.open(self, mode, buffering, encoding, errors,
-                                   newline)
+        return io.open(self, mode, buffering, encoding, errors, newline,
+                       opener=self._opener)
 
     def read_bytes(self):
         """
@@ -1128,7 +1252,6 @@
         """
         Open the file in text mode, read it, and close the file.
         """
-        encoding = io.text_encoding(encoding)
         with self.open(mode='r', encoding=encoding, errors=errors) as f:
             return f.read()
 
@@ -1141,15 +1264,14 @@
         with self.open(mode='wb') as f:
             return f.write(view)
 
-    def write_text(self, data, encoding=None, errors=None, newline=None):
+    def write_text(self, data, encoding=None, errors=None):
         """
         Open the file in text mode, write to it, and close the file.
         """
         if not isinstance(data, str):
             raise TypeError('data must be str, not %s' %
                             data.__class__.__name__)
-        encoding = io.text_encoding(encoding)
-        with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f:
+        with self.open(mode='w', encoding=encoding, errors=errors) as f:
             return f.write(data)
 
     def readlink(self):
@@ -1157,13 +1279,30 @@
         Return the path to which the symbolic link points.
         """
         path = self._accessor.readlink(self)
-        return self._from_parts((path,))
+        obj = self._from_parts((path,), init=False)
+        obj._init(template=self)
+        return obj
 
     def touch(self, mode=0o666, exist_ok=True):
         """
         Create this file with the given access mode, if it doesn't exist.
         """
-        self._accessor.touch(self, mode, exist_ok)
+        if exist_ok:
+            # First try to bump modification time
+            # Implementation note: GNU touch uses the UTIME_NOW option of
+            # the utimensat() / futimens() functions.
+            try:
+                self._accessor.utime(self, None)
+            except OSError:
+                # Avoid exception chaining
+                pass
+            else:
+                return
+        flags = os.O_CREAT | os.O_WRONLY
+        if not exist_ok:
+            flags |= os.O_EXCL
+        fd = self._raw_open(flags, mode)
+        os.close(fd)
 
     def mkdir(self, mode=0o777, parents=False, exist_ok=False):
         """
@@ -1182,18 +1321,18 @@
             if not exist_ok or not self.is_dir():
                 raise
 
-    def chmod(self, mode, *, follow_symlinks=True):
+    def chmod(self, mode):
         """
         Change the permissions of the path, like os.chmod().
         """
-        self._accessor.chmod(self, mode, follow_symlinks=follow_symlinks)
+        self._accessor.chmod(self, mode)
 
     def lchmod(self, mode):
         """
         Like chmod(), except if the path points to a symlink, the symlink's
         permissions are changed, rather than its target's.
         """
-        self.chmod(mode, follow_symlinks=False)
+        self._accessor.lchmod(self, mode)
 
     def unlink(self, missing_ok=False):
         """
@@ -1217,7 +1356,13 @@
         Like stat(), except if the path points to a symlink, the symlink's
         status information is returned, rather than its target's.
         """
-        return self.stat(follow_symlinks=False)
+        return self._accessor.lstat(self)
+
+    def link_to(self, target):
+        """
+        Create a hard link pointing to a path named target.
+        """
+        self._accessor.link_to(self, target)
 
     def rename(self, target):
         """
@@ -1247,37 +1392,11 @@
 
     def symlink_to(self, target, target_is_directory=False):
         """
-        Make this path a symlink pointing to the target path.
-        Note the order of arguments (link, target) is the reverse of os.symlink.
+        Make this path a symlink pointing to the given path.
+        Note the order of arguments (self, target) is the reverse of os.symlink's.
         """
         self._accessor.symlink(target, self, target_is_directory)
 
-    def hardlink_to(self, target):
-        """
-        Make this path a hard link pointing to the same file as *target*.
-
-        Note the order of arguments (self, target) is the reverse of os.link's.
-        """
-        self._accessor.link(target, self)
-
-    def link_to(self, target):
-        """
-        Make the target path a hard link pointing to this path.
-
-        Note this function does not make this path a hard link to *target*,
-        despite the implication of the function and argument names. The order
-        of arguments (target, link) is the reverse of Path.symlink_to, but
-        matches that of os.link.
-
-        Deprecated since Python 3.10 and scheduled for removal in Python 3.12.
-        Use `hardlink_to()` instead.
-        """
-        warnings.warn("pathlib.Path.link_to() is deprecated and is scheduled "
-                      "for removal in Python 3.12. "
-                      "Use pathlib.Path.hardlink_to() instead.",
-                      DeprecationWarning, stacklevel=2)
-        self._accessor.link(self, target)
-
     # Convenience functions for querying the stat results
 
     def exists(self):
@@ -1305,7 +1424,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
             return False
         except ValueError:
             # Non-encodable path
@@ -1322,7 +1441,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
             return False
         except ValueError:
             # Non-encodable path
@@ -1373,7 +1492,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
             return False
         except ValueError:
             # Non-encodable path
@@ -1389,7 +1508,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
             return False
         except ValueError:
             # Non-encodable path
@@ -1405,7 +1524,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
             return False
         except ValueError:
             # Non-encodable path
@@ -1421,7 +1540,7 @@
             if not _ignore_error(e):
                 raise
             # Path doesn't exist or is a broken symlink
-            # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
             return False
         except ValueError:
             # Non-encodable path
@@ -1433,9 +1552,7 @@
         """
         if (not (self._drv or self._root) and
             self._parts and self._parts[0][:1] == '~'):
-            homedir = self._accessor.expanduser(self._parts[0])
-            if homedir[:1] == "~":
-                raise RuntimeError("Could not determine home directory.")
+            homedir = self._flavour.gethomedir(self._parts[0][1:])
             return self._from_parts([homedir] + self._parts[1:])
 
         return self
diff --git a/common/py3-stdlib/pdb.py b/common/py3-stdlib/pdb.py
index 7ab50b4..d7d9571 100755
--- a/common/py3-stdlib/pdb.py
+++ b/common/py3-stdlib/pdb.py
@@ -384,7 +384,8 @@
                 sys.stdin = save_stdin
                 sys.displayhook = save_displayhook
         except:
-            self._error_exc()
+            exc_info = sys.exc_info()[:2]
+            self.error(traceback.format_exception_only(*exc_info)[-1].strip())
 
     def precmd(self, line):
         """Handle alias expansion and ';;' separator."""
@@ -751,8 +752,7 @@
         """
         # this method should be callable before starting debugging, so default
         # to "no globals" if there is no current frame
-        frame = getattr(self, 'curframe', None)
-        globs = frame.f_globals if frame else None
+        globs = self.curframe.f_globals if hasattr(self, 'curframe') else None
         line = linecache.getline(filename, lineno, globs)
         if not line:
             self.message('End of file')
@@ -893,7 +893,7 @@
             except ValueError:
                 err = "Invalid line number (%s)" % arg
             else:
-                bplist = self.get_breaks(filename, lineno)[:]
+                bplist = self.get_breaks(filename, lineno)
                 err = self.clear_break(filename, lineno)
             if err:
                 self.error(err)
@@ -1026,11 +1026,7 @@
         if arg:
             import shlex
             argv0 = sys.argv[0:1]
-            try:
-                sys.argv = shlex.split(arg)
-            except ValueError as e:
-                self.error('Cannot run %s: %s' % (arg, e))
-                return
+            sys.argv = shlex.split(arg)
             sys.argv[:0] = argv0
         # this is caught in the main debugger loop
         raise Restart
@@ -1107,7 +1103,8 @@
         try:
             sys.call_tracing(p.run, (arg, globals, locals))
         except Exception:
-            self._error_exc()
+            exc_info = sys.exc_info()[:2]
+            self.error(traceback.format_exception_only(*exc_info)[-1].strip())
         self.message("LEAVING RECURSIVE DEBUGGER")
         sys.settrace(self.trace_dispatch)
         self.lastcmd = p.lastcmd
@@ -1165,7 +1162,8 @@
         try:
             return eval(arg, self.curframe.f_globals, self.curframe_locals)
         except:
-            self._error_exc()
+            exc_info = sys.exc_info()[:2]
+            self.error(traceback.format_exception_only(*exc_info)[-1].strip())
             raise
 
     def _getval_except(self, arg, frame=None):
@@ -1179,31 +1177,23 @@
             err = traceback.format_exception_only(*exc_info)[-1].strip()
             return _rstr('** raised %s **' % err)
 
-    def _error_exc(self):
-        exc_info = sys.exc_info()[:2]
-        self.error(traceback.format_exception_only(*exc_info)[-1].strip())
-
-    def _msg_val_func(self, arg, func):
-        try:
-            val = self._getval(arg)
-        except:
-            return  # _getval() has displayed the error
-        try:
-            self.message(func(val))
-        except:
-            self._error_exc()
-
     def do_p(self, arg):
         """p expression
         Print the value of the expression.
         """
-        self._msg_val_func(arg, repr)
+        try:
+            self.message(repr(self._getval(arg)))
+        except:
+            pass
 
     def do_pp(self, arg):
         """pp expression
         Pretty-print the value of the expression.
         """
-        self._msg_val_func(arg, pprint.pformat)
+        try:
+            self.message(pprint.pformat(self._getval(arg)))
+        except:
+            pass
 
     complete_print = _complete_expression
     complete_p = _complete_expression
@@ -1493,9 +1483,6 @@
                 self.error('No help for %r; please do not run Python with -OO '
                            'if you need command help' % arg)
                 return
-            if command.__doc__ is None:
-                self.error('No help for %r; __doc__ string missing' % arg)
-                return
             self.message(command.__doc__.rstrip())
 
     do_h = do_help
@@ -1697,19 +1684,10 @@
         print('Error:', mainpyfile, 'does not exist')
         sys.exit(1)
 
-    if run_as_module:
-        import runpy
-        try:
-            runpy._get_module_details(mainpyfile)
-        except Exception:
-            traceback.print_exc()
-            sys.exit(1)
-
     sys.argv[:] = args      # Hide "pdb.py" and pdb options from argument list
 
+    # Replace pdb's dir with script's dir in front of module search path.
     if not run_as_module:
-        mainpyfile = os.path.realpath(mainpyfile)
-        # Replace pdb's dir with script's dir in front of module search path.
         sys.path[0] = os.path.dirname(mainpyfile)
 
     # Note on saving/restoring sys.argv: it's a good idea when sys.argv was
@@ -1729,7 +1707,7 @@
             print("The program finished and will be restarted")
         except Restart:
             print("Restarting", mainpyfile, "with arguments:")
-            print("\t" + " ".join(sys.argv[1:]))
+            print("\t" + " ".join(args))
         except SystemExit:
             # In most cases SystemExit does not warrant a post-mortem session.
             print("The program exited via sys.exit(). Exit status:", end=' ')
diff --git a/common/py3-stdlib/pickle.py b/common/py3-stdlib/pickle.py
index e7f30f2..e63a8b6 100644
--- a/common/py3-stdlib/pickle.py
+++ b/common/py3-stdlib/pickle.py
@@ -818,7 +818,6 @@
             self._write_large_bytes(BYTEARRAY8 + pack("<Q", n), obj)
         else:
             self.write(BYTEARRAY8 + pack("<Q", n) + obj)
-        self.memoize(obj)
     dispatch[bytearray] = save_bytearray
 
     if _HAVE_PICKLE_BUFFER:
@@ -1173,7 +1172,7 @@
         used in Python 3.  The *encoding* and *errors* tell pickle how
         to decode 8-bit string instances pickled by Python 2; these
         default to 'ASCII' and 'strict', respectively. *encoding* can be
-        'bytes' to read these 8-bit string instances as bytes objects.
+        'bytes' to read theses 8-bit string instances as bytes objects.
         """
         self._buffers = iter(buffers) if buffers is not None else None
         self._file_readline = file.readline
diff --git a/common/py3-stdlib/pipes.py b/common/py3-stdlib/pipes.py
index 8cc74b0..f1a16f6 100644
--- a/common/py3-stdlib/pipes.py
+++ b/common/py3-stdlib/pipes.py
@@ -109,7 +109,7 @@
 
     def append(self, cmd, kind):
         """t.append(cmd, kind) adds a new step at the end."""
-        if not isinstance(cmd, str):
+        if type(cmd) is not type(''):
             raise TypeError('Template.append: cmd must be a string')
         if kind not in stepkinds:
             raise ValueError('Template.append: bad kind %r' % (kind,))
@@ -125,7 +125,7 @@
 
     def prepend(self, cmd, kind):
         """t.prepend(cmd, kind) adds a new step at the front."""
-        if not isinstance(cmd, str):
+        if type(cmd) is not type(''):
             raise TypeError('Template.prepend: cmd must be a string')
         if kind not in stepkinds:
             raise ValueError('Template.prepend: bad kind %r' % (kind,))
diff --git a/common/py3-stdlib/pkgutil.py b/common/py3-stdlib/pkgutil.py
index 8e010c7..4c18467 100644
--- a/common/py3-stdlib/pkgutil.py
+++ b/common/py3-stdlib/pkgutil.py
@@ -7,6 +7,7 @@
 import importlib.machinery
 import os
 import os.path
+import re
 import sys
 from types import ModuleType
 import warnings
@@ -204,8 +205,7 @@
 
     def __init__(self, path=None):
         global imp
-        warnings.warn("This emulation is deprecated and slated for removal "
-                      "in Python 3.12; use 'importlib' instead",
+        warnings.warn("This emulation is deprecated, use 'importlib' instead",
              DeprecationWarning)
         _import_imp()
         self.path = path
@@ -272,8 +272,7 @@
     code = source = None
 
     def __init__(self, fullname, file, filename, etc):
-        warnings.warn("This emulation is deprecated and slated for removal in "
-                      "Python 3.12; use 'importlib' instead",
+        warnings.warn("This emulation is deprecated, use 'importlib' instead",
                       DeprecationWarning)
         _import_imp()
         self.file = file
@@ -639,7 +638,9 @@
     return loader.get_data(resource_name)
 
 
-_NAME_PATTERN = None
+_DOTTED_WORDS = r'(?!\d)(\w+)(\.(?!\d)(\w+))*'
+_NAME_PATTERN = re.compile(f'^(?P<pkg>{_DOTTED_WORDS})(?P<cln>:(?P<obj>{_DOTTED_WORDS})?)?$', re.U)
+del _DOTTED_WORDS
 
 def resolve_name(name):
     """
@@ -671,17 +672,8 @@
     ValueError - if `name` isn't in a recognised format
     ImportError - if an import failed when it shouldn't have
     AttributeError - if a failure occurred when traversing the object hierarchy
-                     within the imported package to get to the desired object.
+                     within the imported package to get to the desired object)
     """
-    global _NAME_PATTERN
-    if _NAME_PATTERN is None:
-        # Lazy import to speedup Python startup time
-        import re
-        dotted_words = r'(?!\d)(\w+)(\.(?!\d)(\w+))*'
-        _NAME_PATTERN = re.compile(f'^(?P<pkg>{dotted_words})'
-                                   f'(?P<cln>:(?P<obj>{dotted_words})?)?$',
-                                   re.UNICODE)
-
     m = _NAME_PATTERN.match(name)
     if not m:
         raise ValueError(f'invalid format: {name!r}')
diff --git a/common/py3-stdlib/platform.py b/common/py3-stdlib/platform.py
index e32f9c1..e9f50ab 100755
--- a/common/py3-stdlib/platform.py
+++ b/common/py3-stdlib/platform.py
@@ -174,7 +174,7 @@
         The file is read and scanned in chunks of chunksize bytes.
 
     """
-    if not executable:
+    if executable is None:
         try:
             ver = os.confstr('CS_GNU_LIBC_VERSION')
             # parse 'glibc 2.28' as ('glibc', '2.28')
@@ -239,9 +239,11 @@
     if build:
         l.append(build)
     try:
-        strings = list(map(str, map(int, l)))
+        ints = map(int, l)
     except ValueError:
         strings = l
+    else:
+        strings = list(map(str, ints))
     version = '.'.join(strings[:3])
     return version
 
@@ -280,7 +282,6 @@
     for cmd in ('ver', 'command /c ver', 'cmd /c ver'):
         try:
             info = subprocess.check_output(cmd,
-                                           stdin=subprocess.DEVNULL,
                                            stderr=subprocess.DEVNULL,
                                            text=True,
                                            shell=True)
@@ -364,20 +365,17 @@
         return release, version, csd, ptype
 
     winver = getwindowsversion()
-    try:
-        major, minor, build = map(int, _syscmd_ver()[2].split('.'))
-    except ValueError:
-        major, minor, build = winver.platform_version or winver[:3]
-    version = '{0}.{1}.{2}'.format(major, minor, build)
+    maj, min, build = winver.platform_version or winver[:3]
+    version = '{0}.{1}.{2}'.format(maj, min, build)
 
-    release = (_WIN32_CLIENT_RELEASES.get((major, minor)) or
-               _WIN32_CLIENT_RELEASES.get((major, None)) or
+    release = (_WIN32_CLIENT_RELEASES.get((maj, min)) or
+               _WIN32_CLIENT_RELEASES.get((maj, None)) or
                release)
 
     # getwindowsversion() reflect the compatibility mode Python is
     # running under, and so the service pack value is only going to be
     # valid if the versions match.
-    if winver[:2] == (major, minor):
+    if winver[:2] == (maj, min):
         try:
             csd = 'SP{}'.format(winver.service_pack_major)
         except AttributeError:
@@ -386,8 +384,8 @@
 
     # VER_NT_SERVER = 3
     if getattr(winver, 'product_type', None) == 3:
-        release = (_WIN32_SERVER_RELEASES.get((major, minor)) or
-                   _WIN32_SERVER_RELEASES.get((major, None)) or
+        release = (_WIN32_SERVER_RELEASES.get((maj, min)) or
+                   _WIN32_SERVER_RELEASES.get((maj, None)) or
                    release)
 
     try:
@@ -526,6 +524,16 @@
             # XXX Whatever the new SunOS marketing name is...
             system = 'Solaris'
 
+    elif system == 'IRIX64':
+        # IRIX reports IRIX64 on platforms with 64-bit support; yet it
+        # is really a version and not a different platform, since 32-bit
+        # apps are also supported..
+        system = 'IRIX'
+        if version:
+            version = version + ' (64bit)'
+        else:
+            version = '64bit'
+
     elif system in ('win32', 'win16'):
         # In case one of the other tricks
         system = 'Windows'
@@ -690,6 +698,9 @@
     # Bits
     if '32-bit' in fileout:
         bits = '32bit'
+    elif 'N32' in fileout:
+        # On Irix only
+        bits = 'n32bit'
     elif '64-bit' in fileout:
         bits = '64bit'
 
@@ -771,7 +782,7 @@
         ):
     """
     A uname_result that's largely compatible with a
-    simple namedtuple except that 'processor' is
+    simple namedtuple except that 'platform' is
     resolved late and cached to avoid calling "uname"
     except when needed.
     """
@@ -786,25 +797,12 @@
             (self.processor,)
         )
 
-    @classmethod
-    def _make(cls, iterable):
-        # override factory to affect length check
-        num_fields = len(cls._fields)
-        result = cls.__new__(cls, *iterable)
-        if len(result) != num_fields + 1:
-            msg = f'Expected {num_fields} arguments, got {len(result)}'
-            raise TypeError(msg)
-        return result
-
     def __getitem__(self, key):
-        return tuple(self)[key]
+        return tuple(iter(self))[key]
 
     def __len__(self):
         return len(tuple(iter(self)))
 
-    def __reduce__(self):
-        return uname_result, tuple(self)[:len(self._fields)]
-
 
 _uname_cache = None
 
@@ -1245,63 +1243,6 @@
     _platform_cache[(aliased, terse)] = platform
     return platform
 
-### freedesktop.org os-release standard
-# https://www.freedesktop.org/software/systemd/man/os-release.html
-
-# NAME=value with optional quotes (' or "). The regular expression is less
-# strict than shell lexer, but that's ok.
-_os_release_line = re.compile(
-    "^(?P<name>[a-zA-Z0-9_]+)=(?P<quote>[\"\']?)(?P<value>.*)(?P=quote)$"
-)
-# unescape five special characters mentioned in the standard
-_os_release_unescape = re.compile(r"\\([\\\$\"\'`])")
-# /etc takes precedence over /usr/lib
-_os_release_candidates = ("/etc/os-release", "/usr/lib/os-release")
-_os_release_cache = None
-
-
-def _parse_os_release(lines):
-    # These fields are mandatory fields with well-known defaults
-    # in practice all Linux distributions override NAME, ID, and PRETTY_NAME.
-    info = {
-        "NAME": "Linux",
-        "ID": "linux",
-        "PRETTY_NAME": "Linux",
-    }
-
-    for line in lines:
-        mo = _os_release_line.match(line)
-        if mo is not None:
-            info[mo.group('name')] = _os_release_unescape.sub(
-                r"\1", mo.group('value')
-            )
-
-    return info
-
-
-def freedesktop_os_release():
-    """Return operation system identification from freedesktop.org os-release
-    """
-    global _os_release_cache
-
-    if _os_release_cache is None:
-        errno = None
-        for candidate in _os_release_candidates:
-            try:
-                with open(candidate, encoding="utf-8") as f:
-                    _os_release_cache = _parse_os_release(f)
-                break
-            except OSError as e:
-                errno = e.errno
-        else:
-            raise OSError(
-                errno,
-                f"Unable to read files {', '.join(_os_release_candidates)}"
-            )
-
-    return _os_release_cache.copy()
-
-
 ### Command line interface
 
 if __name__ == '__main__':
diff --git a/common/py3-stdlib/posixpath.py b/common/py3-stdlib/posixpath.py
index 1953746..ecb4e5a 100644
--- a/common/py3-stdlib/posixpath.py
+++ b/common/py3-stdlib/posixpath.py
@@ -262,9 +262,6 @@
             # password database, return the path unchanged
             return path
         userhome = pwent.pw_dir
-    # if no user home, return the path unchanged on VxWorks
-    if userhome is None and sys.platform == "vxworks":
-        return path
     if isinstance(path, bytes):
         userhome = os.fsencode(userhome)
         root = b'/'
@@ -352,7 +349,6 @@
     initial_slashes = path.startswith(sep)
     # POSIX allows one or two initial slashes, but treats three or more
     # as single slash.
-    # (see http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13)
     if (initial_slashes and
         path.startswith(sep*2) and not path.startswith(sep*3)):
         initial_slashes = 2
@@ -388,16 +384,16 @@
 # Return a canonical path (i.e. the absolute location of a file on the
 # filesystem).
 
-def realpath(filename, *, strict=False):
+def realpath(filename):
     """Return the canonical path of the specified filename, eliminating any
 symbolic links encountered in the path."""
     filename = os.fspath(filename)
-    path, ok = _joinrealpath(filename[:0], filename, strict, {})
+    path, ok = _joinrealpath(filename[:0], filename, {})
     return abspath(path)
 
 # Join two paths, normalizing and eliminating any symbolic links
 # encountered in the second path.
-def _joinrealpath(path, rest, strict, seen):
+def _joinrealpath(path, rest, seen):
     if isinstance(path, bytes):
         sep = b'/'
         curdir = b'.'
@@ -426,15 +422,7 @@
                 path = pardir
             continue
         newpath = join(path, name)
-        try:
-            st = os.lstat(newpath)
-        except OSError:
-            if strict:
-                raise
-            is_link = False
-        else:
-            is_link = stat.S_ISLNK(st.st_mode)
-        if not is_link:
+        if not islink(newpath):
             path = newpath
             continue
         # Resolve the symbolic link
@@ -445,14 +433,10 @@
                 # use cached value
                 continue
             # The symlink is not resolved, so we must have a symlink loop.
-            if strict:
-                # Raise OSError(errno.ELOOP)
-                os.stat(newpath)
-            else:
-                # Return already resolved part + rest of the path unchanged.
-                return join(newpath, rest), False
+            # Return already resolved part + rest of the path unchanged.
+            return join(newpath, rest), False
         seen[newpath] = None # not resolved symlink
-        path, ok = _joinrealpath(path, os.readlink(newpath), strict, seen)
+        path, ok = _joinrealpath(path, os.readlink(newpath), seen)
         if not ok:
             return join(path, rest), False
         seen[newpath] = path # resolved symlink
diff --git a/common/py3-stdlib/pprint.py b/common/py3-stdlib/pprint.py
index d91421f..7c1118a 100644
--- a/common/py3-stdlib/pprint.py
+++ b/common/py3-stdlib/pprint.py
@@ -35,7 +35,6 @@
 """
 
 import collections as _collections
-import dataclasses as _dataclasses
 import re
 import sys as _sys
 import types as _types
@@ -46,20 +45,18 @@
 
 
 def pprint(object, stream=None, indent=1, width=80, depth=None, *,
-           compact=False, sort_dicts=True, underscore_numbers=False):
+           compact=False, sort_dicts=True):
     """Pretty-print a Python object to a stream [default is sys.stdout]."""
     printer = PrettyPrinter(
         stream=stream, indent=indent, width=width, depth=depth,
-        compact=compact, sort_dicts=sort_dicts,
-        underscore_numbers=underscore_numbers)
+        compact=compact, sort_dicts=sort_dicts)
     printer.pprint(object)
 
 def pformat(object, indent=1, width=80, depth=None, *,
-            compact=False, sort_dicts=True, underscore_numbers=False):
+            compact=False, sort_dicts=True):
     """Format a Python object into a pretty-printed representation."""
     return PrettyPrinter(indent=indent, width=width, depth=depth,
-                         compact=compact, sort_dicts=sort_dicts,
-                         underscore_numbers=underscore_numbers).pformat(object)
+                         compact=compact, sort_dicts=sort_dicts).pformat(object)
 
 def pp(object, *args, sort_dicts=False, **kwargs):
     """Pretty-print a Python object"""
@@ -67,15 +64,15 @@
 
 def saferepr(object):
     """Version of repr() which can handle recursive data structures."""
-    return PrettyPrinter()._safe_repr(object, {}, None, 0)[0]
+    return _safe_repr(object, {}, None, 0, True)[0]
 
 def isreadable(object):
     """Determine if saferepr(object) is readable by eval()."""
-    return PrettyPrinter()._safe_repr(object, {}, None, 0)[1]
+    return _safe_repr(object, {}, None, 0, True)[1]
 
 def isrecursive(object):
     """Determine if object requires a recursive representation."""
-    return PrettyPrinter()._safe_repr(object, {}, None, 0)[2]
+    return _safe_repr(object, {}, None, 0, True)[2]
 
 class _safe_key:
     """Helper function for key functions when sorting unorderable objects.
@@ -105,7 +102,7 @@
 
 class PrettyPrinter:
     def __init__(self, indent=1, width=80, depth=None, stream=None, *,
-                 compact=False, sort_dicts=True, underscore_numbers=False):
+                 compact=False, sort_dicts=True):
         """Handle pretty printing operations onto a stream using a set of
         configured parameters.
 
@@ -146,7 +143,6 @@
             self._stream = _sys.stdout
         self._compact = bool(compact)
         self._sort_dicts = sort_dicts
-        self._underscore_numbers = underscore_numbers
 
     def pprint(self, object):
         self._format(object, self._stream, 0, 0, {}, 0)
@@ -180,26 +176,14 @@
                 p(self, object, stream, indent, allowance, context, level + 1)
                 del context[objid]
                 return
-            elif (_dataclasses.is_dataclass(object) and
-                  not isinstance(object, type) and
-                  object.__dataclass_params__.repr and
-                  # Check dataclass has generated repr method.
-                  hasattr(object.__repr__, "__wrapped__") and
-                  "__create_fn__" in object.__repr__.__wrapped__.__qualname__):
+            elif isinstance(object, dict):
                 context[objid] = 1
-                self._pprint_dataclass(object, stream, indent, allowance, context, level + 1)
+                self._pprint_dict(object, stream, indent, allowance,
+                                  context, level + 1)
                 del context[objid]
                 return
         stream.write(rep)
 
-    def _pprint_dataclass(self, object, stream, indent, allowance, context, level):
-        cls_name = object.__class__.__name__
-        indent += len(cls_name) + 1
-        items = [(f.name, getattr(object, f.name)) for f in _dataclasses.fields(object) if f.repr]
-        stream.write(cls_name + '(')
-        self._format_namespace_items(items, stream, indent, allowance, context, level)
-        stream.write(')')
-
     _dispatch = {}
 
     def _pprint_dict(self, object, stream, indent, allowance, context, level):
@@ -366,9 +350,21 @@
         else:
             cls_name = object.__class__.__name__
         indent += len(cls_name) + 1
+        delimnl = ',\n' + ' ' * indent
         items = object.__dict__.items()
+        last_index = len(items) - 1
+
         stream.write(cls_name + '(')
-        self._format_namespace_items(items, stream, indent, allowance, context, level)
+        for i, (key, ent) in enumerate(items):
+            stream.write(key)
+            stream.write('=')
+
+            last = i == last_index
+            self._format(ent, stream, indent + len(key) + 1,
+                         allowance if last else 1,
+                         context, level)
+            if not last:
+                stream.write(delimnl)
         stream.write(')')
 
     _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace
@@ -390,25 +386,6 @@
             if not last:
                 write(delimnl)
 
-    def _format_namespace_items(self, items, stream, indent, allowance, context, level):
-        write = stream.write
-        delimnl = ',\n' + ' ' * indent
-        last_index = len(items) - 1
-        for i, (key, ent) in enumerate(items):
-            last = i == last_index
-            write(key)
-            write('=')
-            if id(ent) in context:
-                # Special-case representation of recursion to match standard
-                # recursive dataclass repr.
-                write("...")
-            else:
-                self._format(ent, stream, indent + len(key) + 1,
-                             allowance if last else 1,
-                             context, level)
-            if not last:
-                write(delimnl)
-
     def _format_items(self, items, stream, indent, allowance, context, level):
         write = stream.write
         indent += self._indent_per_level
@@ -464,7 +441,7 @@
         and flags indicating whether the representation is 'readable'
         and whether the object represents a recursive construct.
         """
-        return self._safe_repr(object, context, maxlevels, level)
+        return _safe_repr(object, context, maxlevels, level, self._sort_dicts)
 
     def _pprint_default_dict(self, object, stream, indent, allowance, context, level):
         if not len(object):
@@ -547,88 +524,79 @@
 
     _dispatch[_collections.UserString.__repr__] = _pprint_user_string
 
-    def _safe_repr(self, object, context, maxlevels, level):
-        # Return triple (repr_string, isreadable, isrecursive).
-        typ = type(object)
-        if typ in _builtin_scalars:
-            return repr(object), True, False
+# Return triple (repr_string, isreadable, isrecursive).
 
-        r = getattr(typ, "__repr__", None)
+def _safe_repr(object, context, maxlevels, level, sort_dicts):
+    typ = type(object)
+    if typ in _builtin_scalars:
+        return repr(object), True, False
 
-        if issubclass(typ, int) and r is int.__repr__:
-            if self._underscore_numbers:
-                return f"{object:_d}", True, False
-            else:
-                return repr(object), True, False
+    r = getattr(typ, "__repr__", None)
+    if issubclass(typ, dict) and r is dict.__repr__:
+        if not object:
+            return "{}", True, False
+        objid = id(object)
+        if maxlevels and level >= maxlevels:
+            return "{...}", False, objid in context
+        if objid in context:
+            return _recursion(object), False, True
+        context[objid] = 1
+        readable = True
+        recursive = False
+        components = []
+        append = components.append
+        level += 1
+        if sort_dicts:
+            items = sorted(object.items(), key=_safe_tuple)
+        else:
+            items = object.items()
+        for k, v in items:
+            krepr, kreadable, krecur = _safe_repr(k, context, maxlevels, level, sort_dicts)
+            vrepr, vreadable, vrecur = _safe_repr(v, context, maxlevels, level, sort_dicts)
+            append("%s: %s" % (krepr, vrepr))
+            readable = readable and kreadable and vreadable
+            if krecur or vrecur:
+                recursive = True
+        del context[objid]
+        return "{%s}" % ", ".join(components), readable, recursive
 
-        if issubclass(typ, dict) and r is dict.__repr__:
+    if (issubclass(typ, list) and r is list.__repr__) or \
+       (issubclass(typ, tuple) and r is tuple.__repr__):
+        if issubclass(typ, list):
             if not object:
-                return "{}", True, False
-            objid = id(object)
-            if maxlevels and level >= maxlevels:
-                return "{...}", False, objid in context
-            if objid in context:
-                return _recursion(object), False, True
-            context[objid] = 1
-            readable = True
-            recursive = False
-            components = []
-            append = components.append
-            level += 1
-            if self._sort_dicts:
-                items = sorted(object.items(), key=_safe_tuple)
-            else:
-                items = object.items()
-            for k, v in items:
-                krepr, kreadable, krecur = self.format(
-                    k, context, maxlevels, level)
-                vrepr, vreadable, vrecur = self.format(
-                    v, context, maxlevels, level)
-                append("%s: %s" % (krepr, vrepr))
-                readable = readable and kreadable and vreadable
-                if krecur or vrecur:
-                    recursive = True
-            del context[objid]
-            return "{%s}" % ", ".join(components), readable, recursive
+                return "[]", True, False
+            format = "[%s]"
+        elif len(object) == 1:
+            format = "(%s,)"
+        else:
+            if not object:
+                return "()", True, False
+            format = "(%s)"
+        objid = id(object)
+        if maxlevels and level >= maxlevels:
+            return format % "...", False, objid in context
+        if objid in context:
+            return _recursion(object), False, True
+        context[objid] = 1
+        readable = True
+        recursive = False
+        components = []
+        append = components.append
+        level += 1
+        for o in object:
+            orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level, sort_dicts)
+            append(orepr)
+            if not oreadable:
+                readable = False
+            if orecur:
+                recursive = True
+        del context[objid]
+        return format % ", ".join(components), readable, recursive
 
-        if (issubclass(typ, list) and r is list.__repr__) or \
-           (issubclass(typ, tuple) and r is tuple.__repr__):
-            if issubclass(typ, list):
-                if not object:
-                    return "[]", True, False
-                format = "[%s]"
-            elif len(object) == 1:
-                format = "(%s,)"
-            else:
-                if not object:
-                    return "()", True, False
-                format = "(%s)"
-            objid = id(object)
-            if maxlevels and level >= maxlevels:
-                return format % "...", False, objid in context
-            if objid in context:
-                return _recursion(object), False, True
-            context[objid] = 1
-            readable = True
-            recursive = False
-            components = []
-            append = components.append
-            level += 1
-            for o in object:
-                orepr, oreadable, orecur = self.format(
-                    o, context, maxlevels, level)
-                append(orepr)
-                if not oreadable:
-                    readable = False
-                if orecur:
-                    recursive = True
-            del context[objid]
-            return format % ", ".join(components), readable, recursive
+    rep = repr(object)
+    return rep, (rep and not rep.startswith('<')), False
 
-        rep = repr(object)
-        return rep, (rep and not rep.startswith('<')), False
-
-_builtin_scalars = frozenset({str, bytes, bytearray, float, complex,
+_builtin_scalars = frozenset({str, bytes, bytearray, int, float, complex,
                               bool, type(None)})
 
 def _recursion(object):
@@ -642,7 +610,7 @@
         object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
     p = PrettyPrinter()
     t1 = time.perf_counter()
-    p._safe_repr(object, {}, None, 0, True)
+    _safe_repr(object, {}, None, 0, True)
     t2 = time.perf_counter()
     p.pformat(object)
     t3 = time.perf_counter()
diff --git a/common/py3-stdlib/profile.py b/common/py3-stdlib/profile.py
index d8599fb..5cb017e 100755
--- a/common/py3-stdlib/profile.py
+++ b/common/py3-stdlib/profile.py
@@ -595,12 +595,7 @@
                 '__package__': None,
                 '__cached__': None,
             }
-        try:
-            runctx(code, globs, None, options.outfile, options.sort)
-        except BrokenPipeError as exc:
-            # Prevent "Exception ignored" during interpreter shutdown.
-            sys.stdout = None
-            sys.exit(exc.errno)
+        runctx(code, globs, None, options.outfile, options.sort)
     else:
         parser.print_usage()
     return parser
diff --git a/common/py3-stdlib/pty.py b/common/py3-stdlib/pty.py
index 8d8ce40..a324320 100644
--- a/common/py3-stdlib/pty.py
+++ b/common/py3-stdlib/pty.py
@@ -1,7 +1,7 @@
 """Pseudo terminal utilities."""
 
 # Bugs: No signal handling.  Doesn't set slave termios and window size.
-#       Only tested on Linux, FreeBSD, and macOS.
+#       Only tested on Linux.
 # See:  W. Richard Stevens. 1992.  Advanced Programming in the
 #       UNIX Environment.  Chapter 19.
 # Author: Steen Lumholt -- with additions by Guido.
@@ -11,11 +11,7 @@
 import sys
 import tty
 
-# names imported directly for test mocking purposes
-from os import close, waitpid
-from tty import setraw, tcgetattr, tcsetattr
-
-__all__ = ["openpty", "fork", "spawn"]
+__all__ = ["openpty","fork","spawn"]
 
 STDIN_FILENO = 0
 STDOUT_FILENO = 1
@@ -109,8 +105,8 @@
         os.dup2(slave_fd, STDIN_FILENO)
         os.dup2(slave_fd, STDOUT_FILENO)
         os.dup2(slave_fd, STDERR_FILENO)
-        if slave_fd > STDERR_FILENO:
-            os.close(slave_fd)
+        if (slave_fd > STDERR_FILENO):
+            os.close (slave_fd)
 
         # Explicitly open the tty to make it become a controlling tty.
         tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
@@ -137,22 +133,14 @@
             pty master -> standard output   (master_read)
             standard input -> pty master    (stdin_read)"""
     fds = [master_fd, STDIN_FILENO]
-    while fds:
-        rfds, _wfds, _xfds = select(fds, [], [])
-
+    while True:
+        rfds, wfds, xfds = select(fds, [], [])
         if master_fd in rfds:
-            # Some OSes signal EOF by returning an empty byte string,
-            # some throw OSErrors.
-            try:
-                data = master_read(master_fd)
-            except OSError:
-                data = b""
+            data = master_read(master_fd)
             if not data:  # Reached EOF.
-                return    # Assume the child process has exited and is
-                          # unreachable, so we clean up.
+                fds.remove(master_fd)
             else:
                 os.write(STDOUT_FILENO, data)
-
         if STDIN_FILENO in rfds:
             data = stdin_read(STDIN_FILENO)
             if not data:
@@ -165,23 +153,20 @@
     if type(argv) == type(''):
         argv = (argv,)
     sys.audit('pty.spawn', argv)
-
     pid, master_fd = fork()
     if pid == CHILD:
         os.execlp(argv[0], *argv)
-
     try:
-        mode = tcgetattr(STDIN_FILENO)
-        setraw(STDIN_FILENO)
-        restore = True
+        mode = tty.tcgetattr(STDIN_FILENO)
+        tty.setraw(STDIN_FILENO)
+        restore = 1
     except tty.error:    # This is the same as termios.error
-        restore = False
-
+        restore = 0
     try:
         _copy(master_fd, master_read, stdin_read)
-    finally:
+    except OSError:
         if restore:
-            tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
+            tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
 
-    close(master_fd)
-    return waitpid(pid, 0)[1]
+    os.close(master_fd)
+    return os.waitpid(pid, 0)[1]
diff --git a/common/py3-stdlib/py_compile.py b/common/py3-stdlib/py_compile.py
index 388614e..a81f493 100644
--- a/common/py3-stdlib/py_compile.py
+++ b/common/py3-stdlib/py_compile.py
@@ -173,40 +173,43 @@
     return cfile
 
 
-def main():
-    import argparse
+def main(args=None):
+    """Compile several source files.
 
-    description = 'A simple command-line interface for py_compile module.'
-    parser = argparse.ArgumentParser(description=description)
-    parser.add_argument(
-        '-q', '--quiet',
-        action='store_true',
-        help='Suppress error output',
-    )
-    parser.add_argument(
-        'filenames',
-        nargs='+',
-        help='Files to compile',
-    )
-    args = parser.parse_args()
-    if args.filenames == ['-']:
-        filenames = [filename.rstrip('\n') for filename in sys.stdin.readlines()]
+    The files named in 'args' (or on the command line, if 'args' is
+    not specified) are compiled and the resulting bytecode is cached
+    in the normal manner.  This function does not search a directory
+    structure to locate source files; it only compiles files named
+    explicitly.  If '-' is the only parameter in args, the list of
+    files is taken from standard input.
+
+    """
+    if args is None:
+        args = sys.argv[1:]
+    rv = 0
+    if args == ['-']:
+        while True:
+            filename = sys.stdin.readline()
+            if not filename:
+                break
+            filename = filename.rstrip('\n')
+            try:
+                compile(filename, doraise=True)
+            except PyCompileError as error:
+                rv = 1
+                sys.stderr.write("%s\n" % error.msg)
+            except OSError as error:
+                rv = 1
+                sys.stderr.write("%s\n" % error)
     else:
-        filenames = args.filenames
-    for filename in filenames:
-        try:
-            compile(filename, doraise=True)
-        except PyCompileError as error:
-            if args.quiet:
-                parser.exit(1)
-            else:
-                parser.exit(1, error.msg)
-        except OSError as error:
-            if args.quiet:
-                parser.exit(1)
-            else:
-                parser.exit(1, str(error))
-
+        for filename in args:
+            try:
+                compile(filename, doraise=True)
+            except PyCompileError as error:
+                # return value to indicate at least one failure
+                rv = 1
+                sys.stderr.write("%s\n" % error.msg)
+    return rv
 
 if __name__ == "__main__":
-    main()
+    sys.exit(main())
diff --git a/common/py3-stdlib/pyclbr.py b/common/py3-stdlib/pyclbr.py
index 37f8699..99a1734 100644
--- a/common/py3-stdlib/pyclbr.py
+++ b/common/py3-stdlib/pyclbr.py
@@ -21,14 +21,11 @@
     name    -- name of the object;
     file    -- file in which the object is defined;
     lineno  -- line in the file where the object's definition starts;
-    end_lineno -- line in the file where the object's definition ends;
     parent  -- parent of this object, if any;
     children -- nested objects contained in this object.
 The 'children' attribute is a dictionary mapping names to objects.
 
-Instances of Function describe functions with the attributes from _Object,
-plus the following:
-    is_async -- if a function is defined with an 'async' prefix
+Instances of Function describe functions with the attributes from _Object.
 
 Instances of Class describe classes with the attributes from _Object,
 plus the following:
@@ -41,9 +38,11 @@
 shouldn't happen often.
 """
 
-import ast
+import io
 import sys
 import importlib.util
+import tokenize
+from token import NAME, DEDENT, OP
 
 __all__ = ["readmodule", "readmodule_ex", "Class", "Function"]
 
@@ -52,50 +51,48 @@
 
 class _Object:
     "Information about Python class or function."
-    def __init__(self, module, name, file, lineno, end_lineno, parent):
+    def __init__(self, module, name, file, lineno, parent):
         self.module = module
         self.name = name
         self.file = file
         self.lineno = lineno
-        self.end_lineno = end_lineno
         self.parent = parent
         self.children = {}
-        if parent is not None:
-            parent.children[name] = self
+
+    def _addchild(self, name, obj):
+        self.children[name] = obj
 
 
-# Odd Function and Class signatures are for back-compatibility.
 class Function(_Object):
     "Information about a Python function, including methods."
-    def __init__(self, module, name, file, lineno,
-                 parent=None, is_async=False, *, end_lineno=None):
-        super().__init__(module, name, file, lineno, end_lineno, parent)
-        self.is_async = is_async
-        if isinstance(parent, Class):
-            parent.methods[name] = lineno
+    def __init__(self, module, name, file, lineno, parent=None):
+        _Object.__init__(self, module, name, file, lineno, parent)
 
 
 class Class(_Object):
     "Information about a Python class."
-    def __init__(self, module, name, super_, file, lineno,
-                 parent=None, *, end_lineno=None):
-        super().__init__(module, name, file, lineno, end_lineno, parent)
-        self.super = super_ or []
+    def __init__(self, module, name, super, file, lineno, parent=None):
+        _Object.__init__(self, module, name, file, lineno, parent)
+        self.super = [] if super is None else super
         self.methods = {}
 
+    def _addmethod(self, name, lineno):
+        self.methods[name] = lineno
 
-# These 2 functions are used in these tests
-# Lib/test/test_pyclbr, Lib/idlelib/idle_test/test_browser.py
-def _nest_function(ob, func_name, lineno, end_lineno, is_async=False):
+
+def _nest_function(ob, func_name, lineno):
     "Return a Function after nesting within ob."
-    return Function(ob.module, func_name, ob.file, lineno,
-                    parent=ob, is_async=is_async, end_lineno=end_lineno)
+    newfunc = Function(ob.module, func_name, ob.file, lineno, ob)
+    ob._addchild(func_name, newfunc)
+    if isinstance(ob, Class):
+        ob._addmethod(func_name, lineno)
+    return newfunc
 
-def _nest_class(ob, class_name, lineno, end_lineno, super=None):
+def _nest_class(ob, class_name, lineno, super=None):
     "Return a Class after nesting within ob."
-    return Class(ob.module, class_name, super, ob.file, lineno,
-                 parent=ob, end_lineno=end_lineno)
-
+    newclass = Class(ob.module, class_name, super, ob.file, lineno, ob)
+    ob._addchild(class_name, newclass)
+    return newclass
 
 def readmodule(module, path=None):
     """Return Class objects for the top-level classes in module.
@@ -118,7 +115,6 @@
     """
     return _readmodule(module, path or [])
 
-
 def _readmodule(module, path, inpackage=None):
     """Do the hard work for readmodule[_ex].
 
@@ -183,93 +179,187 @@
     return _create_tree(fullmodule, path, fname, source, tree, inpackage)
 
 
-class _ModuleBrowser(ast.NodeVisitor):
-    def __init__(self, module, path, file, tree, inpackage):
-        self.path = path
-        self.tree = tree
-        self.file = file
-        self.module = module
-        self.inpackage = inpackage
-        self.stack = []
-
-    def visit_ClassDef(self, node):
-        bases = []
-        for base in node.bases:
-            name = ast.unparse(base)
-            if name in self.tree:
-                # We know this super class.
-                bases.append(self.tree[name])
-            elif len(names := name.split(".")) > 1:
-                # Super class form is module.class:
-                # look in module for class.
-                *_, module, class_ = names
-                if module in _modules:
-                    bases.append(_modules[module].get(class_, name))
-            else:
-                bases.append(name)
-
-        parent = self.stack[-1] if self.stack else None
-        class_ = Class(self.module, node.name, bases, self.file, node.lineno,
-                       parent=parent, end_lineno=node.end_lineno)
-        if parent is None:
-            self.tree[node.name] = class_
-        self.stack.append(class_)
-        self.generic_visit(node)
-        self.stack.pop()
-
-    def visit_FunctionDef(self, node, *, is_async=False):
-        parent = self.stack[-1] if self.stack else None
-        function = Function(self.module, node.name, self.file, node.lineno,
-                            parent, is_async, end_lineno=node.end_lineno)
-        if parent is None:
-            self.tree[node.name] = function
-        self.stack.append(function)
-        self.generic_visit(node)
-        self.stack.pop()
-
-    def visit_AsyncFunctionDef(self, node):
-        self.visit_FunctionDef(node, is_async=True)
-
-    def visit_Import(self, node):
-        if node.col_offset != 0:
-            return
-
-        for module in node.names:
-            try:
-                try:
-                    _readmodule(module.name, self.path, self.inpackage)
-                except ImportError:
-                    _readmodule(module.name, [])
-            except (ImportError, SyntaxError):
-                # If we can't find or parse the imported module,
-                # too bad -- don't die here.
-                continue
-
-    def visit_ImportFrom(self, node):
-        if node.col_offset != 0:
-            return
-        try:
-            module = "." * node.level
-            if node.module:
-                module += node.module
-            module = _readmodule(module, self.path, self.inpackage)
-        except (ImportError, SyntaxError):
-            return
-
-        for name in node.names:
-            if name.name in module:
-                self.tree[name.asname or name.name] = module[name.name]
-            elif name.name == "*":
-                for import_name, import_value in module.items():
-                    if import_name.startswith("_"):
-                        continue
-                    self.tree[import_name] = import_value
-
-
 def _create_tree(fullmodule, path, fname, source, tree, inpackage):
-    mbrowser = _ModuleBrowser(fullmodule, path, fname, tree, inpackage)
-    mbrowser.visit(ast.parse(source))
-    return mbrowser.tree
+    """Return the tree for a particular module.
+
+    fullmodule (full module name), inpackage+module, becomes o.module.
+    path is passed to recursive calls of _readmodule.
+    fname becomes o.file.
+    source is tokenized.  Imports cause recursive calls to _readmodule.
+    tree is {} or {'__path__': <submodule search locations>}.
+    inpackage, None or string, is passed to recursive calls of _readmodule.
+
+    The effect of recursive calls is mutation of global _modules.
+    """
+    f = io.StringIO(source)
+
+    stack = [] # Initialize stack of (class, indent) pairs.
+
+    g = tokenize.generate_tokens(f.readline)
+    try:
+        for tokentype, token, start, _end, _line in g:
+            if tokentype == DEDENT:
+                lineno, thisindent = start
+                # Close previous nested classes and defs.
+                while stack and stack[-1][1] >= thisindent:
+                    del stack[-1]
+            elif token == 'def':
+                lineno, thisindent = start
+                # Close previous nested classes and defs.
+                while stack and stack[-1][1] >= thisindent:
+                    del stack[-1]
+                tokentype, func_name, start = next(g)[0:3]
+                if tokentype != NAME:
+                    continue  # Skip def with syntax error.
+                cur_func = None
+                if stack:
+                    cur_obj = stack[-1][0]
+                    cur_func = _nest_function(cur_obj, func_name, lineno)
+                else:
+                    # It is just a function.
+                    cur_func = Function(fullmodule, func_name, fname, lineno)
+                    tree[func_name] = cur_func
+                stack.append((cur_func, thisindent))
+            elif token == 'class':
+                lineno, thisindent = start
+                # Close previous nested classes and defs.
+                while stack and stack[-1][1] >= thisindent:
+                    del stack[-1]
+                tokentype, class_name, start = next(g)[0:3]
+                if tokentype != NAME:
+                    continue # Skip class with syntax error.
+                # Parse what follows the class name.
+                tokentype, token, start = next(g)[0:3]
+                inherit = None
+                if token == '(':
+                    names = [] # Initialize list of superclasses.
+                    level = 1
+                    super = [] # Tokens making up current superclass.
+                    while True:
+                        tokentype, token, start = next(g)[0:3]
+                        if token in (')', ',') and level == 1:
+                            n = "".join(super)
+                            if n in tree:
+                                # We know this super class.
+                                n = tree[n]
+                            else:
+                                c = n.split('.')
+                                if len(c) > 1:
+                                    # Super class form is module.class:
+                                    # look in module for class.
+                                    m = c[-2]
+                                    c = c[-1]
+                                    if m in _modules:
+                                        d = _modules[m]
+                                        if c in d:
+                                            n = d[c]
+                            names.append(n)
+                            super = []
+                        if token == '(':
+                            level += 1
+                        elif token == ')':
+                            level -= 1
+                            if level == 0:
+                                break
+                        elif token == ',' and level == 1:
+                            pass
+                        # Only use NAME and OP (== dot) tokens for type name.
+                        elif tokentype in (NAME, OP) and level == 1:
+                            super.append(token)
+                        # Expressions in the base list are not supported.
+                    inherit = names
+                if stack:
+                    cur_obj = stack[-1][0]
+                    cur_class = _nest_class(
+                            cur_obj, class_name, lineno, inherit)
+                else:
+                    cur_class = Class(fullmodule, class_name, inherit,
+                                      fname, lineno)
+                    tree[class_name] = cur_class
+                stack.append((cur_class, thisindent))
+            elif token == 'import' and start[1] == 0:
+                modules = _getnamelist(g)
+                for mod, _mod2 in modules:
+                    try:
+                        # Recursively read the imported module.
+                        if inpackage is None:
+                            _readmodule(mod, path)
+                        else:
+                            try:
+                                _readmodule(mod, path, inpackage)
+                            except ImportError:
+                                _readmodule(mod, [])
+                    except:
+                        # If we can't find or parse the imported module,
+                        # too bad -- don't die here.
+                        pass
+            elif token == 'from' and start[1] == 0:
+                mod, token = _getname(g)
+                if not mod or token != "import":
+                    continue
+                names = _getnamelist(g)
+                try:
+                    # Recursively read the imported module.
+                    d = _readmodule(mod, path, inpackage)
+                except:
+                    # If we can't find or parse the imported module,
+                    # too bad -- don't die here.
+                    continue
+                # Add any classes that were defined in the imported module
+                # to our name space if they were mentioned in the list.
+                for n, n2 in names:
+                    if n in d:
+                        tree[n2 or n] = d[n]
+                    elif n == '*':
+                        # Don't add names that start with _.
+                        for n in d:
+                            if n[0] != '_':
+                                tree[n] = d[n]
+    except StopIteration:
+        pass
+
+    f.close()
+    return tree
+
+
+def _getnamelist(g):
+    """Return list of (dotted-name, as-name or None) tuples for token source g.
+
+    An as-name is the name that follows 'as' in an as clause.
+    """
+    names = []
+    while True:
+        name, token = _getname(g)
+        if not name:
+            break
+        if token == 'as':
+            name2, token = _getname(g)
+        else:
+            name2 = None
+        names.append((name, name2))
+        while token != "," and "\n" not in token:
+            token = next(g)[1]
+        if token != ",":
+            break
+    return names
+
+
+def _getname(g):
+    "Return (dotted-name or None, next-token) tuple for token source g."
+    parts = []
+    tokentype, token = next(g)[0:2]
+    if tokentype != NAME and token != '*':
+        return (None, token)
+    parts.append(token)
+    while True:
+        tokentype, token = next(g)[0:2]
+        if token != '.':
+            break
+        tokentype, token = next(g)[0:2]
+        if tokentype != NAME:
+            break
+        parts.append(token)
+    return (".".join(parts), token)
 
 
 def _main():
diff --git a/common/py3-stdlib/pydoc.py b/common/py3-stdlib/pydoc.py
index 4a8c10a..35ef3eb 100755
--- a/common/py3-stdlib/pydoc.py
+++ b/common/py3-stdlib/pydoc.py
@@ -23,7 +23,7 @@
 local machine.  Port number 0 can be used to get an arbitrary unused port.
 
 Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
-open a web browser to interactively browse documentation.  Combine with
+open a Web browser to interactively browse documentation.  Combine with
 the -n and -p options to control the hostname and port used.
 
 Run "pydoc -w <name>" to write out the HTML documentation for a module
@@ -504,7 +504,7 @@
               not file.startswith(os.path.join(basedir, 'site-packages')))) and
             object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
             if docloc.startswith(("http://", "https://")):
-                docloc = "{}/{}.html".format(docloc.rstrip("/"), object.__name__.lower())
+                docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__.lower())
             else:
                 docloc = os.path.join(docloc, object.__name__.lower() + ".html")
         else:
@@ -694,7 +694,7 @@
                 url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
                 results.append('<a href="%s">%s</a>' % (url, escape(all)))
             elif pep:
-                url = 'https://www.python.org/dev/peps/pep-%04d/' % int(pep)
+                url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
                 results.append('<a href="%s">%s</a>' % (url, escape(all)))
             elif selfdot:
                 # Create a link for methods like 'self.method(...)'
@@ -1594,10 +1594,9 @@
 def pipepager(text, cmd):
     """Page through text by feeding it to another program."""
     import subprocess
-    proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
-                            errors='backslashreplace')
+    proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
     try:
-        with proc.stdin as pipe:
+        with io.TextIOWrapper(proc.stdin, errors='backslashreplace') as pipe:
             try:
                 pipe.write(text)
             except KeyboardInterrupt:
@@ -1618,14 +1617,13 @@
 def tempfilepager(text, cmd):
     """Page through text by invoking a program on a temporary file."""
     import tempfile
-    with tempfile.TemporaryDirectory() as tempdir:
-        filename = os.path.join(tempdir, 'pydoc.out')
-        with open(filename, 'w', errors='backslashreplace',
-                  encoding=os.device_encoding(0) if
-                  sys.platform == 'win32' else None
-                  ) as file:
-            file.write(text)
+    filename = tempfile.mktemp()
+    with open(filename, 'w', errors='backslashreplace') as file:
+        file.write(text)
+    try:
         os.system(cmd + ' "' + filename + '"')
+    finally:
+        os.unlink(filename)
 
 def _escape_stdout(text):
     # Escape non-encodable characters to avoid encoding errors later
@@ -1819,6 +1817,7 @@
         'False': '',
         'None': '',
         'True': '',
+        '__peg_parser__': '',
         'and': 'BOOLEAN',
         'as': 'with',
         'assert': ('assert', ''),
@@ -2066,7 +2065,7 @@
 Welcome to Python {0}'s help utility!
 
 If this is your first time using Python, you should definitely check out
-the tutorial on the internet at https://docs.python.org/{0}/tutorial/.
+the tutorial on the Internet at https://docs.python.org/{0}/tutorial/.
 
 Enter the name of any module, keyword, or topic to get help on writing
 Python programs and using Python modules.  To quit this help utility and
@@ -2280,13 +2279,13 @@
         warnings.filterwarnings('ignore') # ignore problems during import
         ModuleScanner().run(callback, key, onerror=onerror)
 
-# --------------------------------------- enhanced web browser interface
+# --------------------------------------- enhanced Web browser interface
 
 def _start_server(urlhandler, hostname, port):
     """Start an HTTP server thread on a specific port.
 
     Start an HTML/text server thread, so HTML or text documents can be
-    browsed dynamically and interactively with a web browser.  Example use:
+    browsed dynamically and interactively with a Web browser.  Example use:
 
         >>> import time
         >>> import pydoc
@@ -2458,6 +2457,9 @@
 %s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
 </body></html>''' % (title, css_link, html_navbar(), contents)
 
+        def filelink(self, url, path):
+            return '<a href="getfile?key=%s">%s</a>' % (url, path)
+
 
     html = _HTMLDoc()
 
@@ -2543,6 +2545,19 @@
             'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
         return 'Search Results', contents
 
+    def html_getfile(path):
+        """Get and display a source file listing safely."""
+        path = urllib.parse.unquote(path)
+        with tokenize.open(path) as fp:
+            lines = html.escape(fp.read())
+        body = '<pre>%s</pre>' % lines
+        heading = html.heading(
+            '<big><big><strong>File Listing</strong></big></big>',
+            '#ffffff', '#7799ee')
+        contents = heading + html.bigsection(
+            'File: %s' % path, '#ffffff', '#ee77aa', body)
+        return 'getfile %s' % path, contents
+
     def html_topics():
         """Index of topic texts available."""
 
@@ -2634,6 +2649,8 @@
                 op, _, url = url.partition('=')
                 if op == "search?key":
                     title, content = html_search(url)
+                elif op == "getfile?key":
+                    title, content = html_getfile(url)
                 elif op == "topic?key":
                     # try topics first, then objects.
                     try:
@@ -2672,7 +2689,7 @@
 
 
 def browse(port=0, *, open_browser=True, hostname='localhost'):
-    """Start the enhanced pydoc web server and open a web browser.
+    """Start the enhanced pydoc Web server and open a Web browser.
 
     Use port '0' to start the server on an arbitrary port.
     Set open_browser to False to suppress opening a browser.
@@ -2824,7 +2841,7 @@
     number 0 can be used to get an arbitrary unused port.
 
 {cmd} -b
-    Start an HTTP server on an arbitrary unused port and open a web browser
+    Start an HTTP server on an arbitrary unused port and open a Web browser
     to interactively browse documentation.  This option can be used in
     combination with -n and/or -p.
 
diff --git a/common/py3-stdlib/pydoc_data/topics.py b/common/py3-stdlib/pydoc_data/topics.py
index ac7d16c..d8dd8c5 100644
--- a/common/py3-stdlib/pydoc_data/topics.py
+++ b/common/py3-stdlib/pydoc_data/topics.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Autogenerated by Sphinx on Wed Mar 16 11:26:55 2022
+# Autogenerated by Sphinx on Mon Dec  7 15:00:07 2020
 topics = {'assert': 'The "assert" statement\n'
            '**********************\n'
            '\n'
@@ -433,9 +433,11 @@
           '\n'
           'Execution of Python coroutines can be suspended and resumed at '
           'many\n'
-          'points (see *coroutine*). "await" expressions, "async for" and '
-          '"async\n'
-          'with" can only be used in the body of a coroutine function.\n'
+          'points (see *coroutine*).  Inside the body of a coroutine '
+          'function,\n'
+          '"await" and "async" identifiers become reserved keywords; "await"\n'
+          'expressions, "async for" and "async with" can only be used in\n'
+          'coroutine function bodies.\n'
           '\n'
           'Functions defined with "async def" syntax are always coroutine\n'
           'functions, even if they do not contain "await" or "async" '
@@ -451,22 +453,19 @@
           '       do_stuff()\n'
           '       await some_coroutine()\n'
           '\n'
-          'Changed in version 3.7: "await" and "async" are now keywords;\n'
-          'previously they were only treated as such inside the body of a\n'
-          'coroutine function.\n'
-          '\n'
           '\n'
           'The "async for" statement\n'
           '=========================\n'
           '\n'
           '   async_for_stmt ::= "async" for_stmt\n'
           '\n'
-          'An *asynchronous iterable* provides an "__aiter__" method that\n'
-          'directly returns an *asynchronous iterator*, which can call\n'
-          'asynchronous code in its "__anext__" method.\n'
+          'An *asynchronous iterable* is able to call asynchronous code in '
+          'its\n'
+          '*iter* implementation, and *asynchronous iterator* can call\n'
+          'asynchronous code in its *next* method.\n'
           '\n'
           'The "async for" statement allows convenient iteration over\n'
-          'asynchronous iterables.\n'
+          'asynchronous iterators.\n'
           '\n'
           'The following code:\n'
           '\n'
@@ -551,65 +550,13 @@
           'exception.\n'
           '    That new exception causes the old one to be lost.\n'
           '\n'
-          '[2] In pattern matching, a sequence is defined as one of the\n'
-          '    following:\n'
-          '\n'
-          '       * a class that inherits from "collections.abc.Sequence"\n'
-          '\n'
-          '       * a Python class that has been registered as\n'
-          '         "collections.abc.Sequence"\n'
-          '\n'
-          '       * a builtin class that has its (CPython) '
-          '"Py_TPFLAGS_SEQUENCE"\n'
-          '         bit set\n'
-          '\n'
-          '       * a class that inherits from any of the above\n'
-          '\n'
-          '    The following standard library classes are sequences:\n'
-          '\n'
-          '       * "array.array"\n'
-          '\n'
-          '       * "collections.deque"\n'
-          '\n'
-          '       * "list"\n'
-          '\n'
-          '       * "memoryview"\n'
-          '\n'
-          '       * "range"\n'
-          '\n'
-          '       * "tuple"\n'
-          '\n'
-          '    Note:\n'
-          '\n'
-          '      Subject values of type "str", "bytes", and "bytearray" do '
-          'not\n'
-          '      match sequence patterns.\n'
-          '\n'
-          '[3] In pattern matching, a mapping is defined as one of the '
-          'following:\n'
-          '\n'
-          '       * a class that inherits from "collections.abc.Mapping"\n'
-          '\n'
-          '       * a Python class that has been registered as\n'
-          '         "collections.abc.Mapping"\n'
-          '\n'
-          '       * a builtin class that has its (CPython) '
-          '"Py_TPFLAGS_MAPPING"\n'
-          '         bit set\n'
-          '\n'
-          '       * a class that inherits from any of the above\n'
-          '\n'
-          '    The standard library classes "dict" and '
-          '"types.MappingProxyType"\n'
-          '    are mappings.\n'
-          '\n'
-          '[4] A string literal appearing as the first statement in the '
+          '[2] A string literal appearing as the first statement in the '
           'function\n'
           '    body is transformed into the function’s "__doc__" attribute '
           'and\n'
           '    therefore the function’s *docstring*.\n'
           '\n'
-          '[5] A string literal appearing as the first statement in the class\n'
+          '[3] A string literal appearing as the first statement in the class\n'
           '    body is transformed into the namespace’s "__doc__" item and\n'
           '    therefore the class’s *docstring*.\n',
  'atom-identifiers': 'Identifiers (Names)\n'
@@ -936,6 +883,32 @@
                      '*instance* of the\n'
                      '   owner class.\n'
                      '\n'
+                     'object.__set_name__(self, owner, name)\n'
+                     '\n'
+                     '   Called at the time the owning class *owner* is '
+                     'created. The\n'
+                     '   descriptor has been assigned to *name*.\n'
+                     '\n'
+                     '   Note:\n'
+                     '\n'
+                     '     "__set_name__()" is only called implicitly as part '
+                     'of the "type"\n'
+                     '     constructor, so it will need to be called '
+                     'explicitly with the\n'
+                     '     appropriate parameters when a descriptor is added '
+                     'to a class\n'
+                     '     after initial creation:\n'
+                     '\n'
+                     '        class A:\n'
+                     '           pass\n'
+                     '        descr = custom_descriptor()\n'
+                     '        A.attr = descr\n'
+                     "        descr.__set_name__(A, 'attr')\n"
+                     '\n'
+                     '     See Creating the class object for more details.\n'
+                     '\n'
+                     '   New in version 3.6.\n'
+                     '\n'
                      'The attribute "__objclass__" is interpreted by the '
                      '"inspect" module as\n'
                      'specifying the class where this object was defined '
@@ -1007,16 +980,16 @@
                      '"super(B,\n'
                      '   obj).m()" searches "obj.__class__.__mro__" for the '
                      'base class "A"\n'
-                     '   immediately following "B" and then invokes the '
+                     '   immediately preceding "B" and then invokes the '
                      'descriptor with the\n'
                      '   call: "A.__dict__[\'m\'].__get__(obj, '
                      'obj.__class__)".\n'
                      '\n'
                      'For instance bindings, the precedence of descriptor '
                      'invocation depends\n'
-                     'on which descriptor methods are defined.  A descriptor '
-                     'can define any\n'
-                     'combination of "__get__()", "__set__()" and '
+                     'on the which descriptor methods are defined.  A '
+                     'descriptor can define\n'
+                     'any combination of "__get__()", "__set__()" and '
                      '"__delete__()".  If it\n'
                      'does not define "__get__()", then accessing the '
                      'attribute will return\n'
@@ -1038,15 +1011,14 @@
                      'can be\n'
                      'overridden by instances.\n'
                      '\n'
-                     'Python methods (including those decorated with '
-                     '"@staticmethod" and\n'
-                     '"@classmethod") are implemented as non-data '
-                     'descriptors.  Accordingly,\n'
-                     'instances can redefine and override methods.  This '
-                     'allows individual\n'
-                     'instances to acquire behaviors that differ from other '
-                     'instances of the\n'
-                     'same class.\n'
+                     'Python methods (including "staticmethod()" and '
+                     '"classmethod()") are\n'
+                     'implemented as non-data descriptors.  Accordingly, '
+                     'instances can\n'
+                     'redefine and override methods.  This allows individual '
+                     'instances to\n'
+                     'acquire behaviors that differ from other instances of '
+                     'the same class.\n'
                      '\n'
                      'The "property()" function is implemented as a data '
                      'descriptor.\n'
@@ -1059,12 +1031,12 @@
                      '\n'
                      '*__slots__* allow us to explicitly declare data members '
                      '(like\n'
-                     'properties) and deny the creation of "__dict__" and '
+                     'properties) and deny the creation of *__dict__* and '
                      '*__weakref__*\n'
                      '(unless explicitly declared in *__slots__* or available '
                      'in a parent.)\n'
                      '\n'
-                     'The space saved over using "__dict__" can be '
+                     'The space saved over using *__dict__* can be '
                      'significant. Attribute\n'
                      'lookup speed can be significantly improved as well.\n'
                      '\n'
@@ -1076,7 +1048,7 @@
                      '*__slots__*\n'
                      '   reserves space for the declared variables and '
                      'prevents the\n'
-                     '   automatic creation of "__dict__" and *__weakref__* '
+                     '   automatic creation of *__dict__* and *__weakref__* '
                      'for each\n'
                      '   instance.\n'
                      '\n'
@@ -1085,11 +1057,11 @@
                      '--------------------------\n'
                      '\n'
                      '* When inheriting from a class without *__slots__*, the '
-                     '"__dict__" and\n'
+                     '*__dict__* and\n'
                      '  *__weakref__* attribute of the instances will always '
                      'be accessible.\n'
                      '\n'
-                     '* Without a "__dict__" variable, instances cannot be '
+                     '* Without a *__dict__* variable, instances cannot be '
                      'assigned new\n'
                      '  variables not listed in the *__slots__* definition.  '
                      'Attempts to\n'
@@ -1103,28 +1075,28 @@
                      '\n'
                      '* Without a *__weakref__* variable for each instance, '
                      'classes defining\n'
-                     '  *__slots__* do not support "weak references" to its '
-                     'instances. If\n'
-                     '  weak reference support is needed, then add '
+                     '  *__slots__* do not support weak references to its '
+                     'instances. If weak\n'
+                     '  reference support is needed, then add '
                      '"\'__weakref__\'" to the\n'
                      '  sequence of strings in the *__slots__* declaration.\n'
                      '\n'
                      '* *__slots__* are implemented at the class level by '
                      'creating\n'
-                     '  descriptors for each variable name.  As a result, '
-                     'class attributes\n'
-                     '  cannot be used to set default values for instance '
-                     'variables defined\n'
-                     '  by *__slots__*; otherwise, the class attribute would '
-                     'overwrite the\n'
-                     '  descriptor assignment.\n'
+                     '  descriptors (Implementing Descriptors) for each '
+                     'variable name.  As a\n'
+                     '  result, class attributes cannot be used to set default '
+                     'values for\n'
+                     '  instance variables defined by *__slots__*; otherwise, '
+                     'the class\n'
+                     '  attribute would overwrite the descriptor assignment.\n'
                      '\n'
                      '* The action of a *__slots__* declaration is not limited '
                      'to the class\n'
                      '  where it is defined.  *__slots__* declared in parents '
                      'are available\n'
                      '  in child classes. However, child subclasses will get a '
-                     '"__dict__"\n'
+                     '*__dict__*\n'
                      '  and *__weakref__* unless they also define *__slots__* '
                      '(which should\n'
                      '  only contain names of any *additional* slots).\n'
@@ -1144,19 +1116,13 @@
                      '  “variable-length” built-in types such as "int", '
                      '"bytes" and "tuple".\n'
                      '\n'
-                     '* Any non-string *iterable* may be assigned to '
-                     '*__slots__*.\n'
+                     '* Any non-string iterable may be assigned to '
+                     '*__slots__*. Mappings may\n'
+                     '  also be used; however, in the future, special meaning '
+                     'may be\n'
+                     '  assigned to the values corresponding to each key.\n'
                      '\n'
-                     '* If a "dictionary" is used to assign *__slots__*, the '
-                     'dictionary keys\n'
-                     '  will be used as the slot names. The values of the '
-                     'dictionary can be\n'
-                     '  used to provide per-attribute docstrings that will be '
-                     'recognised by\n'
-                     '  "inspect.getdoc()" and displayed in the output of '
-                     '"help()".\n'
-                     '\n'
-                     '* "__class__" assignment works only if both classes have '
+                     '* *__class__* assignment works only if both classes have '
                      'the same\n'
                      '  *__slots__*.\n'
                      '\n'
@@ -1168,10 +1134,10 @@
                      'violations\n'
                      '  raise "TypeError".\n'
                      '\n'
-                     '* If an *iterator* is used for *__slots__* then a '
-                     '*descriptor* is\n'
-                     '  created for each of the iterator’s values. However, '
-                     'the *__slots__*\n'
+                     '* If an iterator is used for *__slots__* then a '
+                     'descriptor is created\n'
+                     '  for each of the iterator’s values. However, the '
+                     '*__slots__*\n'
                      '  attribute will be an empty iterator.\n',
  'attribute-references': 'Attribute references\n'
                          '********************\n'
@@ -1294,10 +1260,6 @@
            'In the latter case, sequence repetition is performed; a negative\n'
            'repetition factor yields an empty sequence.\n'
            '\n'
-           'This operation can be customized using the special "__mul__()" '
-           'and\n'
-           '"__rmul__()" methods.\n'
-           '\n'
            'The "@" (at) operator is intended to be used for matrix\n'
            'multiplication.  No builtin Python types implement this operator.\n'
            '\n'
@@ -1313,10 +1275,6 @@
            'result.  Division by zero raises the "ZeroDivisionError" '
            'exception.\n'
            '\n'
-           'This operation can be customized using the special "__truediv__()" '
-           'and\n'
-           '"__floordiv__()" methods.\n'
-           '\n'
            'The "%" (modulo) operator yields the remainder from the division '
            'of\n'
            'the first argument by the second.  The numeric arguments are '
@@ -1348,10 +1306,6 @@
            'string formatting is described in the Python Library Reference,\n'
            'section printf-style String Formatting.\n'
            '\n'
-           'The *modulo* operation can be customized using the special '
-           '"__mod__()"\n'
-           'method.\n'
-           '\n'
            'The floor division operator, the modulo operator, and the '
            '"divmod()"\n'
            'function are not defined for complex numbers.  Instead, convert to '
@@ -1366,16 +1320,9 @@
            'and then added together. In the latter case, the sequences are\n'
            'concatenated.\n'
            '\n'
-           'This operation can be customized using the special "__add__()" '
-           'and\n'
-           '"__radd__()" methods.\n'
-           '\n'
            'The "-" (subtraction) operator yields the difference of its '
            'arguments.\n'
-           'The numeric arguments are first converted to a common type.\n'
-           '\n'
-           'This operation can be customized using the special "__sub__()" '
-           'method.\n',
+           'The numeric arguments are first converted to a common type.\n',
  'bitwise': 'Binary bitwise operations\n'
             '*************************\n'
             '\n'
@@ -1388,18 +1335,14 @@
             '\n'
             'The "&" operator yields the bitwise AND of its arguments, which '
             'must\n'
-            'be integers or one of them must be a custom object overriding\n'
-            '"__and__()" or "__rand__()" special methods.\n'
+            'be integers.\n'
             '\n'
             'The "^" operator yields the bitwise XOR (exclusive OR) of its\n'
-            'arguments, which must be integers or one of them must be a '
-            'custom\n'
-            'object overriding "__xor__()" or "__rxor__()" special methods.\n'
+            'arguments, which must be integers.\n'
             '\n'
             'The "|" operator yields the bitwise (inclusive) OR of its '
             'arguments,\n'
-            'which must be integers or one of them must be a custom object\n'
-            'overriding "__or__()" or "__ror__()" special methods.\n',
+            'which must be integers.\n',
  'bltin-code-objects': 'Code Objects\n'
                        '************\n'
                        '\n'
@@ -1416,10 +1359,6 @@
                        'through their "__code__" attribute. See also the '
                        '"code" module.\n'
                        '\n'
-                       'Accessing "__code__" raises an auditing event '
-                       '"object.__getattr__"\n'
-                       'with arguments "obj" and ""__code__"".\n'
-                       '\n'
                        'A code object can be executed or evaluated by passing '
                        'it (instead of a\n'
                        'source string) to the "exec()" or "eval()"  built-in '
@@ -1764,7 +1703,7 @@
           'original global namespace. (Usually, the suite contains mostly\n'
           'function definitions.)  When the class’s suite finishes execution, '
           'its\n'
-          'execution frame is discarded but its local namespace is saved. [5] '
+          'execution frame is discarded but its local namespace is saved. [3] '
           'A\n'
           'class object is then created using the inheritance list for the '
           'base\n'
@@ -1845,11 +1784,7 @@
                 '   comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n'
                 '                     | "is" ["not"] | ["not"] "in"\n'
                 '\n'
-                'Comparisons yield boolean values: "True" or "False". Custom '
-                '*rich\n'
-                'comparison methods* may return non-boolean values. In this '
-                'case Python\n'
-                'will call "bool()" on such value in boolean contexts.\n'
+                'Comparisons yield boolean values: "True" or "False".\n'
                 '\n'
                 'Comparisons can be chained arbitrarily, e.g., "x < y <= z" '
                 'is\n'
@@ -2247,7 +2182,6 @@
              '                     | for_stmt\n'
              '                     | try_stmt\n'
              '                     | with_stmt\n'
-             '                     | match_stmt\n'
              '                     | funcdef\n'
              '                     | classdef\n'
              '                     | async_with_stmt\n'
@@ -2385,6 +2319,33 @@
              ':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, '
              '2]".\n'
              '\n'
+             'Note:\n'
+             '\n'
+             '  There is a subtlety when the sequence is being modified by the '
+             'loop\n'
+             '  (this can only occur for mutable sequences, e.g. lists).  An\n'
+             '  internal counter is used to keep track of which item is used '
+             'next,\n'
+             '  and this is incremented on each iteration.  When this counter '
+             'has\n'
+             '  reached the length of the sequence the loop terminates.  This '
+             'means\n'
+             '  that if the suite deletes the current (or a previous) item '
+             'from the\n'
+             '  sequence, the next item will be skipped (since it gets the '
+             'index of\n'
+             '  the current item which has already been treated).  Likewise, '
+             'if the\n'
+             '  suite inserts an item in the sequence before the current item, '
+             'the\n'
+             '  current item will be treated again the next time through the '
+             'loop.\n'
+             '  This can lead to nasty bugs that can be avoided by making a\n'
+             '  temporary copy using a slice of the whole sequence, e.g.,\n'
+             '\n'
+             '     for x in a[:]:\n'
+             '         if x < 0: a.remove(x)\n'
+             '\n'
              '\n'
              'The "try" statement\n'
              '===================\n'
@@ -2420,9 +2381,8 @@
              'compatible\n'
              'with an exception if it is the class or a base class of the '
              'exception\n'
-             'object, or a tuple containing an item that is the class or a '
-             'base\n'
-             'class of the exception object.\n'
+             'object or a tuple containing an item compatible with the '
+             'exception.\n'
              '\n'
              'If no except clause matches the exception, the search for an '
              'exception\n'
@@ -2489,32 +2449,11 @@
              '(see\n'
              'section The standard type hierarchy) identifying the point in '
              'the\n'
-             'program where the exception occurred.  The details about the '
-             'exception\n'
-             'accessed via "sys.exc_info()" are restored to their previous '
-             'values\n'
-             'when leaving an exception handler:\n'
-             '\n'
-             '   >>> print(sys.exc_info())\n'
-             '   (None, None, None)\n'
-             '   >>> try:\n'
-             '   ...     raise TypeError\n'
-             '   ... except:\n'
-             '   ...     print(sys.exc_info())\n'
-             '   ...     try:\n'
-             '   ...          raise ValueError\n'
-             '   ...     except:\n'
-             '   ...         print(sys.exc_info())\n'
-             '   ...     print(sys.exc_info())\n'
-             '   ...\n'
-             "   (<class 'TypeError'>, TypeError(), <traceback object at "
-             '0x10efad080>)\n'
-             "   (<class 'ValueError'>, ValueError(), <traceback object at "
-             '0x10efad040>)\n'
-             "   (<class 'TypeError'>, TypeError(), <traceback object at "
-             '0x10efad080>)\n'
-             '   >>> print(sys.exc_info())\n'
-             '   (None, None, None)\n'
+             'program where the exception occurred.  "sys.exc_info()" values '
+             'are\n'
+             'restored to their previous values (before the call) when '
+             'returning\n'
+             'from a function that handled an exception.\n'
              '\n'
              'The optional "else" clause is executed if the control flow '
              'leaves the\n'
@@ -2599,10 +2538,8 @@
              'usage\n'
              'patterns to be encapsulated for convenient reuse.\n'
              '\n'
-             '   with_stmt          ::= "with" ( "(" with_stmt_contents ","? '
-             '")" | with_stmt_contents ) ":" suite\n'
-             '   with_stmt_contents ::= with_item ("," with_item)*\n'
-             '   with_item          ::= expression ["as" target]\n'
+             '   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n'
+             '   with_item ::= expression ["as" target]\n'
              '\n'
              'The execution of the "with" statement with one “item” proceeds '
              'as\n'
@@ -2694,23 +2631,9 @@
              '       with B() as b:\n'
              '           SUITE\n'
              '\n'
-             'You can also write multi-item context managers in multiple lines '
-             'if\n'
-             'the items are surrounded by parentheses. For example:\n'
-             '\n'
-             '   with (\n'
-             '       A() as a,\n'
-             '       B() as b,\n'
-             '   ):\n'
-             '       SUITE\n'
-             '\n'
              'Changed in version 3.1: Support for multiple context '
              'expressions.\n'
              '\n'
-             'Changed in version 3.10: Support for using grouping parentheses '
-             'to\n'
-             'break the statement in multiple lines.\n'
-             '\n'
              'See also:\n'
              '\n'
              '  **PEP 343** - The “with” statement\n'
@@ -2719,746 +2642,6 @@
              '     statement.\n'
              '\n'
              '\n'
-             'The "match" statement\n'
-             '=====================\n'
-             '\n'
-             'New in version 3.10.\n'
-             '\n'
-             'The match statement is used for pattern matching.  Syntax:\n'
-             '\n'
-             '   match_stmt   ::= \'match\' subject_expr ":" NEWLINE INDENT '
-             'case_block+ DEDENT\n'
-             '   subject_expr ::= star_named_expression "," '
-             'star_named_expressions?\n'
-             '                    | named_expression\n'
-             '   case_block   ::= \'case\' patterns [guard] ":" block\n'
-             '\n'
-             'Note:\n'
-             '\n'
-             '  This section uses single quotes to denote soft keywords.\n'
-             '\n'
-             'Pattern matching takes a pattern as input (following "case") and '
-             'a\n'
-             'subject value (following "match").  The pattern (which may '
-             'contain\n'
-             'subpatterns) is matched against the subject value.  The outcomes '
-             'are:\n'
-             '\n'
-             '* A match success or failure (also termed a pattern success or\n'
-             '  failure).\n'
-             '\n'
-             '* Possible binding of matched values to a name.  The '
-             'prerequisites for\n'
-             '  this are further discussed below.\n'
-             '\n'
-             'The "match" and "case" keywords are soft keywords.\n'
-             '\n'
-             'See also:\n'
-             '\n'
-             '  * **PEP 634** – Structural Pattern Matching: Specification\n'
-             '\n'
-             '  * **PEP 636** – Structural Pattern Matching: Tutorial\n'
-             '\n'
-             '\n'
-             'Overview\n'
-             '--------\n'
-             '\n'
-             'Here’s an overview of the logical flow of a match statement:\n'
-             '\n'
-             '1. The subject expression "subject_expr" is evaluated and a '
-             'resulting\n'
-             '   subject value obtained. If the subject expression contains a '
-             'comma,\n'
-             '   a tuple is constructed using the standard rules.\n'
-             '\n'
-             '2. Each pattern in a "case_block" is attempted to match with '
-             'the\n'
-             '   subject value. The specific rules for success or failure are\n'
-             '   described below. The match attempt can also bind some or all '
-             'of the\n'
-             '   standalone names within the pattern. The precise pattern '
-             'binding\n'
-             '   rules vary per pattern type and are specified below.  **Name\n'
-             '   bindings made during a successful pattern match outlive the\n'
-             '   executed block and can be used after the match statement**.\n'
-             '\n'
-             '      Note:\n'
-             '\n'
-             '        During failed pattern matches, some subpatterns may '
-             'succeed.\n'
-             '        Do not rely on bindings being made for a failed match.\n'
-             '        Conversely, do not rely on variables remaining unchanged '
-             'after\n'
-             '        a failed match.  The exact behavior is dependent on\n'
-             '        implementation and may vary.  This is an intentional '
-             'decision\n'
-             '        made to allow different implementations to add '
-             'optimizations.\n'
-             '\n'
-             '3. If the pattern succeeds, the corresponding guard (if present) '
-             'is\n'
-             '   evaluated. In this case all name bindings are guaranteed to '
-             'have\n'
-             '   happened.\n'
-             '\n'
-             '   * If the guard evaluates as true or is missing, the "block" '
-             'inside\n'
-             '     "case_block" is executed.\n'
-             '\n'
-             '   * Otherwise, the next "case_block" is attempted as described '
-             'above.\n'
-             '\n'
-             '   * If there are no further case blocks, the match statement '
-             'is\n'
-             '     completed.\n'
-             '\n'
-             'Note:\n'
-             '\n'
-             '  Users should generally never rely on a pattern being '
-             'evaluated.\n'
-             '  Depending on implementation, the interpreter may cache values '
-             'or use\n'
-             '  other optimizations which skip repeated evaluations.\n'
-             '\n'
-             'A sample match statement:\n'
-             '\n'
-             '   >>> flag = False\n'
-             '   >>> match (100, 200):\n'
-             '   ...    case (100, 300):  # Mismatch: 200 != 300\n'
-             "   ...        print('Case 1')\n"
-             '   ...    case (100, 200) if flag:  # Successful match, but '
-             'guard fails\n'
-             "   ...        print('Case 2')\n"
-             '   ...    case (100, y):  # Matches and binds y to 200\n'
-             "   ...        print(f'Case 3, y: {y}')\n"
-             '   ...    case _:  # Pattern not attempted\n'
-             "   ...        print('Case 4, I match anything!')\n"
-             '   ...\n'
-             '   Case 3, y: 200\n'
-             '\n'
-             'In this case, "if flag" is a guard.  Read more about that in the '
-             'next\n'
-             'section.\n'
-             '\n'
-             '\n'
-             'Guards\n'
-             '------\n'
-             '\n'
-             '   guard ::= "if" named_expression\n'
-             '\n'
-             'A "guard" (which is part of the "case") must succeed for code '
-             'inside\n'
-             'the "case" block to execute.  It takes the form: "if" followed '
-             'by an\n'
-             'expression.\n'
-             '\n'
-             'The logical flow of a "case" block with a "guard" follows:\n'
-             '\n'
-             '1. Check that the pattern in the "case" block succeeded.  If '
-             'the\n'
-             '   pattern failed, the "guard" is not evaluated and the next '
-             '"case"\n'
-             '   block is checked.\n'
-             '\n'
-             '2. If the pattern succeeded, evaluate the "guard".\n'
-             '\n'
-             '   * If the "guard" condition evaluates as true, the case block '
-             'is\n'
-             '     selected.\n'
-             '\n'
-             '   * If the "guard" condition evaluates as false, the case block '
-             'is\n'
-             '     not selected.\n'
-             '\n'
-             '   * If the "guard" raises an exception during evaluation, the\n'
-             '     exception bubbles up.\n'
-             '\n'
-             'Guards are allowed to have side effects as they are '
-             'expressions.\n'
-             'Guard evaluation must proceed from the first to the last case '
-             'block,\n'
-             'one at a time, skipping case blocks whose pattern(s) don’t all\n'
-             'succeed. (I.e., guard evaluation must happen in order.) Guard\n'
-             'evaluation must stop once a case block is selected.\n'
-             '\n'
-             '\n'
-             'Irrefutable Case Blocks\n'
-             '-----------------------\n'
-             '\n'
-             'An irrefutable case block is a match-all case block.  A match\n'
-             'statement may have at most one irrefutable case block, and it '
-             'must be\n'
-             'last.\n'
-             '\n'
-             'A case block is considered irrefutable if it has no guard and '
-             'its\n'
-             'pattern is irrefutable.  A pattern is considered irrefutable if '
-             'we can\n'
-             'prove from its syntax alone that it will always succeed.  Only '
-             'the\n'
-             'following patterns are irrefutable:\n'
-             '\n'
-             '* AS Patterns whose left-hand side is irrefutable\n'
-             '\n'
-             '* OR Patterns containing at least one irrefutable pattern\n'
-             '\n'
-             '* Capture Patterns\n'
-             '\n'
-             '* Wildcard Patterns\n'
-             '\n'
-             '* parenthesized irrefutable patterns\n'
-             '\n'
-             '\n'
-             'Patterns\n'
-             '--------\n'
-             '\n'
-             'Note:\n'
-             '\n'
-             '  This section uses grammar notations beyond standard EBNF:\n'
-             '\n'
-             '  * the notation "SEP.RULE+" is shorthand for "RULE (SEP '
-             'RULE)*"\n'
-             '\n'
-             '  * the notation "!RULE" is shorthand for a negative lookahead\n'
-             '    assertion\n'
-             '\n'
-             'The top-level syntax for "patterns" is:\n'
-             '\n'
-             '   patterns       ::= open_sequence_pattern | pattern\n'
-             '   pattern        ::= as_pattern | or_pattern\n'
-             '   closed_pattern ::= | literal_pattern\n'
-             '                      | capture_pattern\n'
-             '                      | wildcard_pattern\n'
-             '                      | value_pattern\n'
-             '                      | group_pattern\n'
-             '                      | sequence_pattern\n'
-             '                      | mapping_pattern\n'
-             '                      | class_pattern\n'
-             '\n'
-             'The descriptions below will include a description “in simple '
-             'terms” of\n'
-             'what a pattern does for illustration purposes (credits to '
-             'Raymond\n'
-             'Hettinger for a document that inspired most of the '
-             'descriptions). Note\n'
-             'that these descriptions are purely for illustration purposes and '
-             '**may\n'
-             'not** reflect the underlying implementation.  Furthermore, they '
-             'do not\n'
-             'cover all valid forms.\n'
-             '\n'
-             '\n'
-             'OR Patterns\n'
-             '~~~~~~~~~~~\n'
-             '\n'
-             'An OR pattern is two or more patterns separated by vertical bars '
-             '"|".\n'
-             'Syntax:\n'
-             '\n'
-             '   or_pattern ::= "|".closed_pattern+\n'
-             '\n'
-             'Only the final subpattern may be irrefutable, and each '
-             'subpattern must\n'
-             'bind the same set of names to avoid ambiguity.\n'
-             '\n'
-             'An OR pattern matches each of its subpatterns in turn to the '
-             'subject\n'
-             'value, until one succeeds.  The OR pattern is then considered\n'
-             'successful.  Otherwise, if none of the subpatterns succeed, the '
-             'OR\n'
-             'pattern fails.\n'
-             '\n'
-             'In simple terms, "P1 | P2 | ..." will try to match "P1", if it '
-             'fails\n'
-             'it will try to match "P2", succeeding immediately if any '
-             'succeeds,\n'
-             'failing otherwise.\n'
-             '\n'
-             '\n'
-             'AS Patterns\n'
-             '~~~~~~~~~~~\n'
-             '\n'
-             'An AS pattern matches an OR pattern on the left of the "as" '
-             'keyword\n'
-             'against a subject.  Syntax:\n'
-             '\n'
-             '   as_pattern ::= or_pattern "as" capture_pattern\n'
-             '\n'
-             'If the OR pattern fails, the AS pattern fails.  Otherwise, the '
-             'AS\n'
-             'pattern binds the subject to the name on the right of the as '
-             'keyword\n'
-             'and succeeds. "capture_pattern" cannot be a a "_".\n'
-             '\n'
-             'In simple terms "P as NAME" will match with "P", and on success '
-             'it\n'
-             'will set "NAME = <subject>".\n'
-             '\n'
-             '\n'
-             'Literal Patterns\n'
-             '~~~~~~~~~~~~~~~~\n'
-             '\n'
-             'A literal pattern corresponds to most literals in Python.  '
-             'Syntax:\n'
-             '\n'
-             '   literal_pattern ::= signed_number\n'
-             '                       | signed_number "+" NUMBER\n'
-             '                       | signed_number "-" NUMBER\n'
-             '                       | strings\n'
-             '                       | "None"\n'
-             '                       | "True"\n'
-             '                       | "False"\n'
-             '                       | signed_number: NUMBER | "-" NUMBER\n'
-             '\n'
-             'The rule "strings" and the token "NUMBER" are defined in the '
-             'standard\n'
-             'Python grammar.  Triple-quoted strings are supported.  Raw '
-             'strings and\n'
-             'byte strings are supported.  Formatted string literals are not\n'
-             'supported.\n'
-             '\n'
-             'The forms "signed_number \'+\' NUMBER" and "signed_number \'-\' '
-             'NUMBER"\n'
-             'are for expressing complex numbers; they require a real number '
-             'on the\n'
-             'left and an imaginary number on the right. E.g. "3 + 4j".\n'
-             '\n'
-             'In simple terms, "LITERAL" will succeed only if "<subject> ==\n'
-             'LITERAL". For the singletons "None", "True" and "False", the '
-             '"is"\n'
-             'operator is used.\n'
-             '\n'
-             '\n'
-             'Capture Patterns\n'
-             '~~~~~~~~~~~~~~~~\n'
-             '\n'
-             'A capture pattern binds the subject value to a name. Syntax:\n'
-             '\n'
-             "   capture_pattern ::= !'_' NAME\n"
-             '\n'
-             'A single underscore "_" is not a capture pattern (this is what '
-             '"!\'_\'"\n'
-             'expresses). It is instead treated as a "wildcard_pattern".\n'
-             '\n'
-             'In a given pattern, a given name can only be bound once.  E.g. '
-             '"case\n'
-             'x, x: ..." is invalid while "case [x] | x: ..." is allowed.\n'
-             '\n'
-             'Capture patterns always succeed.  The binding follows scoping '
-             'rules\n'
-             'established by the assignment expression operator in **PEP '
-             '572**; the\n'
-             'name becomes a local variable in the closest containing function '
-             'scope\n'
-             'unless there’s an applicable "global" or "nonlocal" statement.\n'
-             '\n'
-             'In simple terms "NAME" will always succeed and it will set "NAME '
-             '=\n'
-             '<subject>".\n'
-             '\n'
-             '\n'
-             'Wildcard Patterns\n'
-             '~~~~~~~~~~~~~~~~~\n'
-             '\n'
-             'A wildcard pattern always succeeds (matches anything) and binds '
-             'no\n'
-             'name.  Syntax:\n'
-             '\n'
-             "   wildcard_pattern ::= '_'\n"
-             '\n'
-             '"_" is a soft keyword within any pattern, but only within '
-             'patterns.\n'
-             'It is an identifier, as usual, even within "match" subject\n'
-             'expressions, "guard"s, and "case" blocks.\n'
-             '\n'
-             'In simple terms, "_" will always succeed.\n'
-             '\n'
-             '\n'
-             'Value Patterns\n'
-             '~~~~~~~~~~~~~~\n'
-             '\n'
-             'A value pattern represents a named value in Python. Syntax:\n'
-             '\n'
-             '   value_pattern ::= attr\n'
-             '   attr          ::= name_or_attr "." NAME\n'
-             '   name_or_attr  ::= attr | NAME\n'
-             '\n'
-             'The dotted name in the pattern is looked up using standard '
-             'Python name\n'
-             'resolution rules.  The pattern succeeds if the value found '
-             'compares\n'
-             'equal to the subject value (using the "==" equality operator).\n'
-             '\n'
-             'In simple terms "NAME1.NAME2" will succeed only if "<subject> '
-             '==\n'
-             'NAME1.NAME2"\n'
-             '\n'
-             'Note:\n'
-             '\n'
-             '  If the same value occurs multiple times in the same match '
-             'statement,\n'
-             '  the interpreter may cache the first value found and reuse it '
-             'rather\n'
-             '  than repeat the same lookup.  This cache is strictly tied to a '
-             'given\n'
-             '  execution of a given match statement.\n'
-             '\n'
-             '\n'
-             'Group Patterns\n'
-             '~~~~~~~~~~~~~~\n'
-             '\n'
-             'A group pattern allows users to add parentheses around patterns '
-             'to\n'
-             'emphasize the intended grouping.  Otherwise, it has no '
-             'additional\n'
-             'syntax. Syntax:\n'
-             '\n'
-             '   group_pattern ::= "(" pattern ")"\n'
-             '\n'
-             'In simple terms "(P)" has the same effect as "P".\n'
-             '\n'
-             '\n'
-             'Sequence Patterns\n'
-             '~~~~~~~~~~~~~~~~~\n'
-             '\n'
-             'A sequence pattern contains several subpatterns to be matched '
-             'against\n'
-             'sequence elements. The syntax is similar to the unpacking of a '
-             'list or\n'
-             'tuple.\n'
-             '\n'
-             '   sequence_pattern       ::= "[" [maybe_sequence_pattern] "]"\n'
-             '                        | "(" [open_sequence_pattern] ")"\n'
-             '   open_sequence_pattern  ::= maybe_star_pattern "," '
-             '[maybe_sequence_pattern]\n'
-             '   maybe_sequence_pattern ::= ",".maybe_star_pattern+ ","?\n'
-             '   maybe_star_pattern     ::= star_pattern | pattern\n'
-             '   star_pattern           ::= "*" (capture_pattern | '
-             'wildcard_pattern)\n'
-             '\n'
-             'There is no difference if parentheses  or square brackets are '
-             'used for\n'
-             'sequence patterns (i.e. "(...)" vs "[...]" ).\n'
-             '\n'
-             'Note:\n'
-             '\n'
-             '  A single pattern enclosed in parentheses without a trailing '
-             'comma\n'
-             '  (e.g. "(3 | 4)") is a group pattern. While a single pattern '
-             'enclosed\n'
-             '  in square brackets (e.g. "[3 | 4]") is still a sequence '
-             'pattern.\n'
-             '\n'
-             'At most one star subpattern may be in a sequence pattern.  The '
-             'star\n'
-             'subpattern may occur in any position. If no star subpattern is\n'
-             'present, the sequence pattern is a fixed-length sequence '
-             'pattern;\n'
-             'otherwise it is a variable-length sequence pattern.\n'
-             '\n'
-             'The following is the logical flow for matching a sequence '
-             'pattern\n'
-             'against a subject value:\n'
-             '\n'
-             '1. If the subject value is not a sequence [2], the sequence '
-             'pattern\n'
-             '   fails.\n'
-             '\n'
-             '2. If the subject value is an instance of "str", "bytes" or\n'
-             '   "bytearray" the sequence pattern fails.\n'
-             '\n'
-             '3. The subsequent steps depend on whether the sequence pattern '
-             'is\n'
-             '   fixed or variable-length.\n'
-             '\n'
-             '   If the sequence pattern is fixed-length:\n'
-             '\n'
-             '   1. If the length of the subject sequence is not equal to the '
-             'number\n'
-             '      of subpatterns, the sequence pattern fails\n'
-             '\n'
-             '   2. Subpatterns in the sequence pattern are matched to their\n'
-             '      corresponding items in the subject sequence from left to '
-             'right.\n'
-             '      Matching stops as soon as a subpattern fails.  If all\n'
-             '      subpatterns succeed in matching their corresponding item, '
-             'the\n'
-             '      sequence pattern succeeds.\n'
-             '\n'
-             '   Otherwise, if the sequence pattern is variable-length:\n'
-             '\n'
-             '   1. If the length of the subject sequence is less than the '
-             'number of\n'
-             '      non-star subpatterns, the sequence pattern fails.\n'
-             '\n'
-             '   2. The leading non-star subpatterns are matched to their\n'
-             '      corresponding items as for fixed-length sequences.\n'
-             '\n'
-             '   3. If the previous step succeeds, the star subpattern matches '
-             'a\n'
-             '      list formed of the remaining subject items, excluding the\n'
-             '      remaining items corresponding to non-star subpatterns '
-             'following\n'
-             '      the star subpattern.\n'
-             '\n'
-             '   4. Remaining non-star subpatterns are matched to their\n'
-             '      corresponding subject items, as for a fixed-length '
-             'sequence.\n'
-             '\n'
-             '   Note:\n'
-             '\n'
-             '     The length of the subject sequence is obtained via "len()" '
-             '(i.e.\n'
-             '     via the "__len__()" protocol).  This length may be cached '
-             'by the\n'
-             '     interpreter in a similar manner as value patterns.\n'
-             '\n'
-             'In simple terms "[P1, P2, P3," … ", P<N>]" matches only if all '
-             'the\n'
-             'following happens:\n'
-             '\n'
-             '* check "<subject>" is a sequence\n'
-             '\n'
-             '* "len(subject) == <N>"\n'
-             '\n'
-             '* "P1" matches "<subject>[0]" (note that this match can also '
-             'bind\n'
-             '  names)\n'
-             '\n'
-             '* "P2" matches "<subject>[1]" (note that this match can also '
-             'bind\n'
-             '  names)\n'
-             '\n'
-             '* … and so on for the corresponding pattern/element.\n'
-             '\n'
-             '\n'
-             'Mapping Patterns\n'
-             '~~~~~~~~~~~~~~~~\n'
-             '\n'
-             'A mapping pattern contains one or more key-value patterns.  The '
-             'syntax\n'
-             'is similar to the construction of a dictionary. Syntax:\n'
-             '\n'
-             '   mapping_pattern     ::= "{" [items_pattern] "}"\n'
-             '   items_pattern       ::= ",".key_value_pattern+ ","?\n'
-             '   key_value_pattern   ::= (literal_pattern | value_pattern) ":" '
-             'pattern\n'
-             '                         | double_star_pattern\n'
-             '   double_star_pattern ::= "**" capture_pattern\n'
-             '\n'
-             'At most one double star pattern may be in a mapping pattern.  '
-             'The\n'
-             'double star pattern must be the last subpattern in the mapping\n'
-             'pattern.\n'
-             '\n'
-             'Duplicate keys in mapping patterns are disallowed. Duplicate '
-             'literal\n'
-             'keys will raise a "SyntaxError". Two keys that otherwise have '
-             'the same\n'
-             'value will raise a "ValueError" at runtime.\n'
-             '\n'
-             'The following is the logical flow for matching a mapping '
-             'pattern\n'
-             'against a subject value:\n'
-             '\n'
-             '1. If the subject value is not a mapping [3],the mapping '
-             'pattern\n'
-             '   fails.\n'
-             '\n'
-             '2. If every key given in the mapping pattern is present in the '
-             'subject\n'
-             '   mapping, and the pattern for each key matches the '
-             'corresponding\n'
-             '   item of the subject mapping, the mapping pattern succeeds.\n'
-             '\n'
-             '3. If duplicate keys are detected in the mapping pattern, the '
-             'pattern\n'
-             '   is considered invalid. A "SyntaxError" is raised for '
-             'duplicate\n'
-             '   literal values; or a "ValueError" for named keys of the same '
-             'value.\n'
-             '\n'
-             'Note:\n'
-             '\n'
-             '  Key-value pairs are matched using the two-argument form of '
-             'the\n'
-             '  mapping subject’s "get()" method.  Matched key-value pairs '
-             'must\n'
-             '  already be present in the mapping, and not created on-the-fly '
-             'via\n'
-             '  "__missing__()" or "__getitem__()".\n'
-             '\n'
-             'In simple terms "{KEY1: P1, KEY2: P2, ... }" matches only if all '
-             'the\n'
-             'following happens:\n'
-             '\n'
-             '* check "<subject>" is a mapping\n'
-             '\n'
-             '* "KEY1 in <subject>"\n'
-             '\n'
-             '* "P1" matches "<subject>[KEY1]"\n'
-             '\n'
-             '* … and so on for the corresponding KEY/pattern pair.\n'
-             '\n'
-             '\n'
-             'Class Patterns\n'
-             '~~~~~~~~~~~~~~\n'
-             '\n'
-             'A class pattern represents a class and its positional and '
-             'keyword\n'
-             'arguments (if any).  Syntax:\n'
-             '\n'
-             '   class_pattern       ::= name_or_attr "(" [pattern_arguments '
-             '","?] ")"\n'
-             '   pattern_arguments   ::= positional_patterns ["," '
-             'keyword_patterns]\n'
-             '                         | keyword_patterns\n'
-             '   positional_patterns ::= ",".pattern+\n'
-             '   keyword_patterns    ::= ",".keyword_pattern+\n'
-             '   keyword_pattern     ::= NAME "=" pattern\n'
-             '\n'
-             'The same keyword should not be repeated in class patterns.\n'
-             '\n'
-             'The following is the logical flow for matching a class pattern '
-             'against\n'
-             'a subject value:\n'
-             '\n'
-             '1. If "name_or_attr" is not an instance of the builtin "type" , '
-             'raise\n'
-             '   "TypeError".\n'
-             '\n'
-             '2. If the subject value is not an instance of "name_or_attr" '
-             '(tested\n'
-             '   via "isinstance()"), the class pattern fails.\n'
-             '\n'
-             '3. If no pattern arguments are present, the pattern succeeds.\n'
-             '   Otherwise, the subsequent steps depend on whether keyword or\n'
-             '   positional argument patterns are present.\n'
-             '\n'
-             '   For a number of built-in types (specified below), a single\n'
-             '   positional subpattern is accepted which will match the '
-             'entire\n'
-             '   subject; for these types keyword patterns also work as for '
-             'other\n'
-             '   types.\n'
-             '\n'
-             '   If only keyword patterns are present, they are processed as\n'
-             '   follows, one by one:\n'
-             '\n'
-             '   I. The keyword is looked up as an attribute on the subject.\n'
-             '\n'
-             '      * If this raises an exception other than "AttributeError", '
-             'the\n'
-             '        exception bubbles up.\n'
-             '\n'
-             '      * If this raises "AttributeError", the class pattern has '
-             'failed.\n'
-             '\n'
-             '      * Else, the subpattern associated with the keyword pattern '
-             'is\n'
-             '        matched against the subject’s attribute value.  If this '
-             'fails,\n'
-             '        the class pattern fails; if this succeeds, the match '
-             'proceeds\n'
-             '        to the next keyword.\n'
-             '\n'
-             '   II. If all keyword patterns succeed, the class pattern '
-             'succeeds.\n'
-             '\n'
-             '   If any positional patterns are present, they are converted '
-             'to\n'
-             '   keyword patterns using the "__match_args__" attribute on the '
-             'class\n'
-             '   "name_or_attr" before matching:\n'
-             '\n'
-             '   I. The equivalent of "getattr(cls, "__match_args__", ())" is\n'
-             '   called.\n'
-             '\n'
-             '      * If this raises an exception, the exception bubbles up.\n'
-             '\n'
-             '      * If the returned value is not a tuple, the conversion '
-             'fails and\n'
-             '        "TypeError" is raised.\n'
-             '\n'
-             '      * If there are more positional patterns than\n'
-             '        "len(cls.__match_args__)", "TypeError" is raised.\n'
-             '\n'
-             '      * Otherwise, positional pattern "i" is converted to a '
-             'keyword\n'
-             '        pattern using "__match_args__[i]" as the keyword.\n'
-             '        "__match_args__[i]" must be a string; if not "TypeError" '
-             'is\n'
-             '        raised.\n'
-             '\n'
-             '      * If there are duplicate keywords, "TypeError" is raised.\n'
-             '\n'
-             '      See also:\n'
-             '\n'
-             '        Customizing positional arguments in class pattern '
-             'matching\n'
-             '\n'
-             '   II. Once all positional patterns have been converted to '
-             'keyword\n'
-             '   patterns,\n'
-             '      the match proceeds as if there were only keyword '
-             'patterns.\n'
-             '\n'
-             '   For the following built-in types the handling of positional\n'
-             '   subpatterns is different:\n'
-             '\n'
-             '   * "bool"\n'
-             '\n'
-             '   * "bytearray"\n'
-             '\n'
-             '   * "bytes"\n'
-             '\n'
-             '   * "dict"\n'
-             '\n'
-             '   * "float"\n'
-             '\n'
-             '   * "frozenset"\n'
-             '\n'
-             '   * "int"\n'
-             '\n'
-             '   * "list"\n'
-             '\n'
-             '   * "set"\n'
-             '\n'
-             '   * "str"\n'
-             '\n'
-             '   * "tuple"\n'
-             '\n'
-             '   These classes accept a single positional argument, and the '
-             'pattern\n'
-             '   there is matched against the whole object rather than an '
-             'attribute.\n'
-             '   For example "int(0|1)" matches the value "0", but not the '
-             'values\n'
-             '   "0.0" or "False".\n'
-             '\n'
-             'In simple terms "CLS(P1, attr=P2)" matches only if the '
-             'following\n'
-             'happens:\n'
-             '\n'
-             '* "isinstance(<subject>, CLS)"\n'
-             '\n'
-             '* convert "P1" to a keyword pattern using "CLS.__match_args__"\n'
-             '\n'
-             '* For each keyword argument "attr=P2":\n'
-             '     * "hasattr(<subject>, "attr")"\n'
-             '\n'
-             '     * "P2" matches "<subject>.attr"\n'
-             '\n'
-             '* … and so on for the corresponding keyword argument/pattern '
-             'pair.\n'
-             '\n'
-             'See also:\n'
-             '\n'
-             '  * **PEP 634** – Structural Pattern Matching: Specification\n'
-             '\n'
-             '  * **PEP 636** – Structural Pattern Matching: Tutorial\n'
-             '\n'
-             '\n'
              'Function definitions\n'
              '====================\n'
              '\n'
@@ -3472,6 +2655,7 @@
              '   decorators                ::= decorator+\n'
              '   decorator                 ::= "@" assignment_expression '
              'NEWLINE\n'
+             '   dotted_name               ::= identifier ("." identifier)*\n'
              '   parameter_list            ::= defparameter ("," '
              'defparameter)* "," "/" ["," [parameter_list_no_posonly]]\n'
              '                        | parameter_list_no_posonly\n'
@@ -3496,7 +2680,7 @@
              '\n'
              'The function definition does not execute the function body; this '
              'gets\n'
-             'executed only when the function is called. [4]\n'
+             'executed only when the function is called. [2]\n'
              '\n'
              'A function definition may be wrapped by one or more *decorator*\n'
              'expressions. Decorator expressions are evaluated when the '
@@ -3549,17 +2733,17 @@
              '“pre-\n'
              'computed” value is used for each call.  This is especially '
              'important\n'
-             'to understand when a default parameter value is a mutable '
-             'object, such\n'
-             'as a list or a dictionary: if the function modifies the object '
-             '(e.g.\n'
-             'by appending an item to a list), the default parameter value is '
-             'in\n'
-             'effect modified.  This is generally not what was intended.  A '
-             'way\n'
-             'around this is to use "None" as the default, and explicitly test '
-             'for\n'
-             'it in the body of the function, e.g.:\n'
+             'to understand when a default parameter is a mutable object, such '
+             'as a\n'
+             'list or a dictionary: if the function modifies the object (e.g. '
+             'by\n'
+             'appending an item to a list), the default value is in effect '
+             'modified.\n'
+             'This is generally not what was intended.  A way around this is '
+             'to use\n'
+             '"None" as the default, and explicitly test for it in the body of '
+             'the\n'
+             'function, e.g.:\n'
              '\n'
              '   def whats_on_the_telly(penguin=None):\n'
              '       if penguin is None:\n'
@@ -3571,7 +2755,7 @@
              'Calls.\n'
              'A function call always assigns values to all parameters '
              'mentioned in\n'
-             'the parameter list, either from positional arguments, from '
+             'the parameter list, either from position arguments, from '
              'keyword\n'
              'arguments, or from default values.  If the form “"*identifier"” '
              'is\n'
@@ -3583,14 +2767,8 @@
              'new\n'
              'empty mapping of the same type.  Parameters after “"*"” or\n'
              '“"*identifier"” are keyword-only parameters and may only be '
-             'passed by\n'
-             'keyword arguments.  Parameters before “"/"” are positional-only\n'
-             'parameters and may only be passed by positional arguments.\n'
-             '\n'
-             'Changed in version 3.8: The "/" function parameter syntax may be '
-             'used\n'
-             'to indicate positional-only parameters. See **PEP 570** for '
-             'details.\n'
+             'passed\n'
+             'used keyword arguments.\n'
              '\n'
              'Parameters may have an *annotation* of the form “": '
              'expression"”\n'
@@ -3701,7 +2879,7 @@
              'function definitions.)  When the class’s suite finishes '
              'execution, its\n'
              'execution frame is discarded but its local namespace is saved. '
-             '[5] A\n'
+             '[3] A\n'
              'class object is then created using the inheritance list for the '
              'base\n'
              'classes and the saved local namespace for the attribute '
@@ -3786,9 +2964,12 @@
              '\n'
              'Execution of Python coroutines can be suspended and resumed at '
              'many\n'
-             'points (see *coroutine*). "await" expressions, "async for" and '
-             '"async\n'
-             'with" can only be used in the body of a coroutine function.\n'
+             'points (see *coroutine*).  Inside the body of a coroutine '
+             'function,\n'
+             '"await" and "async" identifiers become reserved keywords; '
+             '"await"\n'
+             'expressions, "async for" and "async with" can only be used in\n'
+             'coroutine function bodies.\n'
              '\n'
              'Functions defined with "async def" syntax are always coroutine\n'
              'functions, even if they do not contain "await" or "async" '
@@ -3804,22 +2985,19 @@
              '       do_stuff()\n'
              '       await some_coroutine()\n'
              '\n'
-             'Changed in version 3.7: "await" and "async" are now keywords;\n'
-             'previously they were only treated as such inside the body of a\n'
-             'coroutine function.\n'
-             '\n'
              '\n'
              'The "async for" statement\n'
              '-------------------------\n'
              '\n'
              '   async_for_stmt ::= "async" for_stmt\n'
              '\n'
-             'An *asynchronous iterable* provides an "__aiter__" method that\n'
-             'directly returns an *asynchronous iterator*, which can call\n'
-             'asynchronous code in its "__anext__" method.\n'
+             'An *asynchronous iterable* is able to call asynchronous code in '
+             'its\n'
+             '*iter* implementation, and *asynchronous iterator* can call\n'
+             'asynchronous code in its *next* method.\n'
              '\n'
              'The "async for" statement allows convenient iteration over\n'
-             'asynchronous iterables.\n'
+             'asynchronous iterators.\n'
              '\n'
              'The following code:\n'
              '\n'
@@ -3905,65 +3083,13 @@
              'exception.\n'
              '    That new exception causes the old one to be lost.\n'
              '\n'
-             '[2] In pattern matching, a sequence is defined as one of the\n'
-             '    following:\n'
-             '\n'
-             '       * a class that inherits from "collections.abc.Sequence"\n'
-             '\n'
-             '       * a Python class that has been registered as\n'
-             '         "collections.abc.Sequence"\n'
-             '\n'
-             '       * a builtin class that has its (CPython) '
-             '"Py_TPFLAGS_SEQUENCE"\n'
-             '         bit set\n'
-             '\n'
-             '       * a class that inherits from any of the above\n'
-             '\n'
-             '    The following standard library classes are sequences:\n'
-             '\n'
-             '       * "array.array"\n'
-             '\n'
-             '       * "collections.deque"\n'
-             '\n'
-             '       * "list"\n'
-             '\n'
-             '       * "memoryview"\n'
-             '\n'
-             '       * "range"\n'
-             '\n'
-             '       * "tuple"\n'
-             '\n'
-             '    Note:\n'
-             '\n'
-             '      Subject values of type "str", "bytes", and "bytearray" do '
-             'not\n'
-             '      match sequence patterns.\n'
-             '\n'
-             '[3] In pattern matching, a mapping is defined as one of the '
-             'following:\n'
-             '\n'
-             '       * a class that inherits from "collections.abc.Mapping"\n'
-             '\n'
-             '       * a Python class that has been registered as\n'
-             '         "collections.abc.Mapping"\n'
-             '\n'
-             '       * a builtin class that has its (CPython) '
-             '"Py_TPFLAGS_MAPPING"\n'
-             '         bit set\n'
-             '\n'
-             '       * a class that inherits from any of the above\n'
-             '\n'
-             '    The standard library classes "dict" and '
-             '"types.MappingProxyType"\n'
-             '    are mappings.\n'
-             '\n'
-             '[4] A string literal appearing as the first statement in the '
+             '[2] A string literal appearing as the first statement in the '
              'function\n'
              '    body is transformed into the function’s "__doc__" attribute '
              'and\n'
              '    therefore the function’s *docstring*.\n'
              '\n'
-             '[5] A string literal appearing as the first statement in the '
+             '[3] A string literal appearing as the first statement in the '
              'class\n'
              '    body is transformed into the namespace’s "__doc__" item and\n'
              '    therefore the class’s *docstring*.\n',
@@ -4101,13 +3227,13 @@
                   '\n'
                   '   If "__new__()" is invoked during object construction and '
                   'it returns\n'
-                  '   an instance of *cls*, then the new instance’s '
-                  '"__init__()" method\n'
-                  '   will be invoked like "__init__(self[, ...])", where '
-                  '*self* is the\n'
-                  '   new instance and the remaining arguments are the same as '
-                  'were\n'
-                  '   passed to the object constructor.\n'
+                  '   an instance or subclass of *cls*, then the new '
+                  'instance’s\n'
+                  '   "__init__()" method will be invoked like '
+                  '"__init__(self[, ...])",\n'
+                  '   where *self* is the new instance and the remaining '
+                  'arguments are\n'
+                  '   the same as were passed to the object constructor.\n'
                   '\n'
                   '   If "__new__()" does not return an instance of *cls*, '
                   'then the new\n'
@@ -4602,16 +3728,17 @@
              'debugger will pause execution just before the first line of the\n'
              'module.\n'
              '\n'
-             'The typical usage to break into the debugger is to insert:\n'
+             'The typical usage to break into the debugger from a running '
+             'program is\n'
+             'to insert\n'
              '\n'
              '   import pdb; pdb.set_trace()\n'
              '\n'
-             'at the location you want to break into the debugger, and then '
-             'run the\n'
-             'program. You can then step through the code following this '
-             'statement,\n'
-             'and continue running without the debugger using the "continue"\n'
-             'command.\n'
+             'at the location you want to break into the debugger.  You can '
+             'then\n'
+             'step through the code following this statement, and continue '
+             'running\n'
+             'without the debugger using the "continue" command.\n'
              '\n'
              'New in version 3.7: The built-in "breakpoint()", when called '
              'with\n'
@@ -5467,32 +4594,20 @@
               'binding\n'
               'operations.\n'
               '\n'
-              'The following constructs bind names:\n'
-              '\n'
-              '* formal parameters to functions,\n'
-              '\n'
-              '* class definitions,\n'
-              '\n'
-              '* function definitions,\n'
-              '\n'
-              '* assignment expressions,\n'
-              '\n'
-              '* targets that are identifiers if occurring in an assignment:\n'
-              '\n'
-              '  * "for" loop header,\n'
-              '\n'
-              '  * after "as" in a "with" statement, "except" clause or in the '
-              'as-\n'
-              '    pattern in structural pattern matching,\n'
-              '\n'
-              '  * in a capture pattern in structural pattern matching\n'
-              '\n'
-              '* "import" statements.\n'
-              '\n'
-              'The "import" statement of the form "from ... import *" binds '
-              'all names\n'
-              'defined in the imported module, except those beginning with an\n'
-              'underscore. This form may only be used at the module level.\n'
+              'The following constructs bind names: formal parameters to '
+              'functions,\n'
+              '"import" statements, class and function definitions (these bind '
+              'the\n'
+              'class or function name in the defining block), and targets that '
+              'are\n'
+              'identifiers if occurring in an assignment, "for" loop header, '
+              'or after\n'
+              '"as" in a "with" statement or "except" clause. The "import" '
+              'statement\n'
+              'of the form "from ... import *" binds all names defined in the\n'
+              'imported module, except those beginning with an underscore.  '
+              'This form\n'
+              'may only be used at the module level.\n'
               '\n'
               'A target occurring in a "del" statement is also considered '
               'bound for\n'
@@ -5565,9 +4680,9 @@
               'operations.\n'
               '\n'
               'If the "global" statement occurs within a block, all uses of '
-              'the names\n'
-              'specified in the statement refer to the bindings of those names '
-              'in the\n'
+              'the name\n'
+              'specified in the statement refer to the binding of that name in '
+              'the\n'
               'top-level namespace.  Names are resolved in the top-level '
               'namespace by\n'
               'searching the global namespace, i.e. the namespace of the '
@@ -5576,10 +4691,9 @@
               'namespace\n'
               'of the module "builtins".  The global namespace is searched '
               'first.  If\n'
-              'the names are not found there, the builtins namespace is '
-              'searched.\n'
-              'The "global" statement must precede all uses of the listed '
-              'names.\n'
+              'the name is not found there, the builtins namespace is '
+              'searched.  The\n'
+              '"global" statement must precede all uses of the name.\n'
               '\n'
               'The "global" statement has the same scope as a name binding '
               'operation\n'
@@ -5873,7 +4987,30 @@
         'all by the loop.  Hint: the built-in function "range()" returns an\n'
         'iterator of integers suitable to emulate the effect of Pascal’s "for '
         'i\n'
-        ':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n',
+        ':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n'
+        '\n'
+        'Note:\n'
+        '\n'
+        '  There is a subtlety when the sequence is being modified by the '
+        'loop\n'
+        '  (this can only occur for mutable sequences, e.g. lists).  An\n'
+        '  internal counter is used to keep track of which item is used next,\n'
+        '  and this is incremented on each iteration.  When this counter has\n'
+        '  reached the length of the sequence the loop terminates.  This '
+        'means\n'
+        '  that if the suite deletes the current (or a previous) item from '
+        'the\n'
+        '  sequence, the next item will be skipped (since it gets the index '
+        'of\n'
+        '  the current item which has already been treated).  Likewise, if '
+        'the\n'
+        '  suite inserts an item in the sequence before the current item, the\n'
+        '  current item will be treated again the next time through the loop.\n'
+        '  This can lead to nasty bugs that can be avoided by making a\n'
+        '  temporary copy using a slice of the whole sequence, e.g.,\n'
+        '\n'
+        '     for x in a[:]:\n'
+        '         if x < 0: a.remove(x)\n',
  'formatstrings': 'Format String Syntax\n'
                   '********************\n'
                   '\n'
@@ -5883,11 +5020,9 @@
                   '"Formatter",\n'
                   'subclasses can define their own format string syntax).  The '
                   'syntax is\n'
-                  'related to that of formatted string literals, but it is '
-                  'less\n'
-                  'sophisticated and, in particular, does not support '
-                  'arbitrary\n'
-                  'expressions.\n'
+                  'related to that of formatted string literals, but there '
+                  'are\n'
+                  'differences.\n'
                   '\n'
                   'Format strings contain “replacement fields” surrounded by '
                   'curly braces\n'
@@ -6081,7 +5216,7 @@
                   'character that can be any character and defaults to a space '
                   'if\n'
                   'omitted. It is not possible to use a literal curly brace '
-                  '(”"{"” or\n'
+                  '(“"{"” or\n'
                   '“"}"”) as the *fill* character in a formatted string '
                   'literal or when\n'
                   'using the "str.format()" method.  However, it is possible '
@@ -6119,9 +5254,9 @@
                   '   |           | in the form ‘+000000120’. This alignment '
                   'option is only    |\n'
                   '   |           | valid for numeric types.  It becomes the '
-                  'default for       |\n'
-                  '   |           | numbers when ‘0’ immediately precedes the '
-                  'field width.     |\n'
+                  'default when ‘0’  |\n'
+                  '   |           | immediately precedes the field '
+                  'width.                      |\n'
                   '   '
                   '+-----------+------------------------------------------------------------+\n'
                   '   | "\'^\'"     | Forces the field to be centered within '
@@ -6176,19 +5311,19 @@
                   'complex\n'
                   'types. For integers, when binary, octal, or hexadecimal '
                   'output is\n'
-                  'used, this option adds the respective prefix "\'0b\'", '
-                  '"\'0o\'", "\'0x\'",\n'
-                  'or "\'0X\'" to the output value. For float and complex the '
-                  'alternate\n'
-                  'form causes the result of the conversion to always contain '
-                  'a decimal-\n'
-                  'point character, even if no digits follow it. Normally, a '
-                  'decimal-\n'
-                  'point character appears in the result of these conversions '
-                  'only if a\n'
-                  'digit follows it. In addition, for "\'g\'" and "\'G\'" '
-                  'conversions,\n'
-                  'trailing zeros are not removed from the result.\n'
+                  'used, this option adds the prefix respective "\'0b\'", '
+                  '"\'0o\'", or "\'0x\'"\n'
+                  'to the output value. For float and complex the alternate '
+                  'form causes\n'
+                  'the result of the conversion to always contain a '
+                  'decimal-point\n'
+                  'character, even if no digits follow it. Normally, a '
+                  'decimal-point\n'
+                  'character appears in the result of these conversions only '
+                  'if a digit\n'
+                  'follows it. In addition, for "\'g\'" and "\'G\'" '
+                  'conversions, trailing\n'
+                  'zeros are not removed from the result.\n'
                   '\n'
                   'The "\',\'" option signals the use of a comma for a '
                   'thousands separator.\n'
@@ -6229,23 +5364,19 @@
                   'with an\n'
                   '*alignment* type of "\'=\'".\n'
                   '\n'
-                  'Changed in version 3.10: Preceding the *width* field by '
-                  '"\'0\'" no\n'
-                  'longer affects the default alignment for strings.\n'
-                  '\n'
-                  'The *precision* is a decimal integer indicating how many '
+                  'The *precision* is a decimal number indicating how many '
                   'digits should\n'
-                  'be displayed after the decimal point for presentation types '
-                  '"\'f\'" and\n'
-                  '"\'F\'", or before and after the decimal point for '
-                  'presentation types\n'
-                  '"\'g\'" or "\'G\'".  For string presentation types the '
-                  'field indicates the\n'
-                  'maximum field size - in other words, how many characters '
-                  'will be used\n'
-                  'from the field content.  The *precision* is not allowed for '
-                  'integer\n'
-                  'presentation types.\n'
+                  'be displayed after the decimal point for a floating point '
+                  'value\n'
+                  'formatted with "\'f\'" and "\'F\'", or before and after the '
+                  'decimal point\n'
+                  'for a floating point value formatted with "\'g\'" or '
+                  '"\'G\'".  For non-\n'
+                  'number types the field indicates the maximum field size - '
+                  'in other\n'
+                  'words, how many characters will be used from the field '
+                  'content. The\n'
+                  '*precision* is not allowed for integer values.\n'
                   '\n'
                   'Finally, the *type* determines how the data should be '
                   'presented.\n'
@@ -6305,12 +5436,8 @@
                   '+-----------+------------------------------------------------------------+\n'
                   '   | "\'X\'"     | Hex format. Outputs the number in base '
                   '16, using upper-    |\n'
-                  '   |           | case letters for the digits above 9. In '
-                  'case "\'#\'" is      |\n'
-                  '   |           | specified, the prefix "\'0x\'" will be '
-                  'upper-cased to "\'0X\'" |\n'
-                  '   |           | as '
-                  'well.                                                   |\n'
+                  '   |           | case letters for the digits above '
+                  '9.                       |\n'
                   '   '
                   '+-----------+------------------------------------------------------------+\n'
                   '   | "\'n\'"     | Number. This is the same as "\'d\'", '
@@ -6403,51 +5530,44 @@
                   '   |           | formats the result in either fixed-point '
                   'format or in      |\n'
                   '   |           | scientific notation, depending on its '
-                  'magnitude. A         |\n'
-                  '   |           | precision of "0" is treated as equivalent '
-                  'to a precision   |\n'
-                  '   |           | of "1".  The precise rules are as follows: '
-                  'suppose that    |\n'
-                  '   |           | the result formatted with presentation '
-                  'type "\'e\'" and      |\n'
-                  '   |           | precision "p-1" would have exponent '
-                  '"exp".  Then, if "m <= |\n'
-                  '   |           | exp < p", where "m" is -4 for floats and '
-                  '-6 for            |\n'
-                  '   |           | "Decimals", the number is formatted with '
-                  'presentation type |\n'
-                  '   |           | "\'f\'" and precision "p-1-exp".  '
-                  'Otherwise, the number is   |\n'
+                  'magnitude.  The      |\n'
+                  '   |           | precise rules are as follows: suppose that '
+                  'the result      |\n'
                   '   |           | formatted with presentation type "\'e\'" '
+                  'and precision "p-1" |\n'
+                  '   |           | would have exponent "exp".  Then, if "m <= '
+                  'exp < p", where |\n'
+                  '   |           | "m" is -4 for floats and -6 for '
+                  '"Decimals", the number is  |\n'
+                  '   |           | formatted with presentation type "\'f\'" '
                   'and precision       |\n'
-                  '   |           | "p-1". In both cases insignificant '
-                  'trailing zeros are      |\n'
-                  '   |           | removed from the significand, and the '
-                  'decimal point is     |\n'
-                  '   |           | also removed if there are no remaining '
-                  'digits following    |\n'
-                  '   |           | it, unless the "\'#\'" option is used.  '
-                  'With no precision    |\n'
-                  '   |           | given, uses a precision of "6" significant '
-                  'digits for      |\n'
-                  '   |           | "float". For "Decimal", the coefficient of '
-                  'the result is   |\n'
-                  '   |           | formed from the coefficient digits of the '
-                  'value;           |\n'
-                  '   |           | scientific notation is used for values '
-                  'smaller than "1e-6" |\n'
-                  '   |           | in absolute value and values where the '
-                  'place value of the  |\n'
-                  '   |           | least significant digit is larger than 1, '
-                  'and fixed-point  |\n'
-                  '   |           | notation is used otherwise.  Positive and '
-                  'negative         |\n'
-                  '   |           | infinity, positive and negative zero, and '
-                  'nans, are        |\n'
-                  '   |           | formatted as "inf", "-inf", "0", "-0" and '
-                  '"nan"            |\n'
-                  '   |           | respectively, regardless of the '
-                  'precision.                 |\n'
+                  '   |           | "p-1-exp".  Otherwise, the number is '
+                  'formatted with        |\n'
+                  '   |           | presentation type "\'e\'" and precision '
+                  '"p-1". In both cases |\n'
+                  '   |           | insignificant trailing zeros are removed '
+                  'from the          |\n'
+                  '   |           | significand, and the decimal point is also '
+                  'removed if      |\n'
+                  '   |           | there are no remaining digits following '
+                  'it, unless the     |\n'
+                  '   |           | "\'#\'" option is used.  Positive and '
+                  'negative infinity,     |\n'
+                  '   |           | positive and negative zero, and nans, are '
+                  'formatted as     |\n'
+                  '   |           | "inf", "-inf", "0", "-0" and "nan" '
+                  'respectively,           |\n'
+                  '   |           | regardless of the precision.  A precision '
+                  'of "0" is        |\n'
+                  '   |           | treated as equivalent to a precision of '
+                  '"1". With no       |\n'
+                  '   |           | precision given, uses a precision of "6" '
+                  'significant       |\n'
+                  '   |           | digits for "float", and shows all '
+                  'coefficient digits for   |\n'
+                  '   |           | '
+                  '"Decimal".                                                 '
+                  '|\n'
                   '   '
                   '+-----------+------------------------------------------------------------+\n'
                   '   | "\'G\'"     | General format. Same as "\'g\'" except '
@@ -6472,24 +5592,19 @@
                   'percent sign.          |\n'
                   '   '
                   '+-----------+------------------------------------------------------------+\n'
-                  '   | None      | For "float" this is the same as "\'g\'", '
-                  'except that when    |\n'
-                  '   |           | fixed-point notation is used to format the '
-                  'result, it      |\n'
-                  '   |           | always includes at least one digit past '
-                  'the decimal point. |\n'
-                  '   |           | The precision used is as large as needed '
-                  'to represent the  |\n'
-                  '   |           | given value faithfully.  For "Decimal", '
-                  'this is the same   |\n'
-                  '   |           | as either "\'g\'" or "\'G\'" depending on '
-                  'the value of         |\n'
-                  '   |           | "context.capitals" for the current decimal '
-                  'context.  The   |\n'
-                  '   |           | overall effect is to match the output of '
-                  '"str()" as        |\n'
-                  '   |           | altered by the other format '
-                  'modifiers.                     |\n'
+                  '   | None      | Similar to "\'g\'", except that '
+                  'fixed-point notation, when   |\n'
+                  '   |           | used, has at least one digit past the '
+                  'decimal point. The   |\n'
+                  '   |           | default precision is as high as needed to '
+                  'represent the    |\n'
+                  '   |           | particular value. The overall effect is to '
+                  'match the       |\n'
+                  '   |           | output of "str()" as altered by the other '
+                  'format           |\n'
+                  '   |           | '
+                  'modifiers.                                                 '
+                  '|\n'
                   '   '
                   '+-----------+------------------------------------------------------------+\n'
                   '\n'
@@ -6667,6 +5782,7 @@
              '   decorators                ::= decorator+\n'
              '   decorator                 ::= "@" assignment_expression '
              'NEWLINE\n'
+             '   dotted_name               ::= identifier ("." identifier)*\n'
              '   parameter_list            ::= defparameter ("," '
              'defparameter)* "," "/" ["," [parameter_list_no_posonly]]\n'
              '                        | parameter_list_no_posonly\n'
@@ -6691,7 +5807,7 @@
              '\n'
              'The function definition does not execute the function body; this '
              'gets\n'
-             'executed only when the function is called. [4]\n'
+             'executed only when the function is called. [2]\n'
              '\n'
              'A function definition may be wrapped by one or more *decorator*\n'
              'expressions. Decorator expressions are evaluated when the '
@@ -6744,17 +5860,17 @@
              '“pre-\n'
              'computed” value is used for each call.  This is especially '
              'important\n'
-             'to understand when a default parameter value is a mutable '
-             'object, such\n'
-             'as a list or a dictionary: if the function modifies the object '
-             '(e.g.\n'
-             'by appending an item to a list), the default parameter value is '
-             'in\n'
-             'effect modified.  This is generally not what was intended.  A '
-             'way\n'
-             'around this is to use "None" as the default, and explicitly test '
-             'for\n'
-             'it in the body of the function, e.g.:\n'
+             'to understand when a default parameter is a mutable object, such '
+             'as a\n'
+             'list or a dictionary: if the function modifies the object (e.g. '
+             'by\n'
+             'appending an item to a list), the default value is in effect '
+             'modified.\n'
+             'This is generally not what was intended.  A way around this is '
+             'to use\n'
+             '"None" as the default, and explicitly test for it in the body of '
+             'the\n'
+             'function, e.g.:\n'
              '\n'
              '   def whats_on_the_telly(penguin=None):\n'
              '       if penguin is None:\n'
@@ -6766,7 +5882,7 @@
              'Calls.\n'
              'A function call always assigns values to all parameters '
              'mentioned in\n'
-             'the parameter list, either from positional arguments, from '
+             'the parameter list, either from position arguments, from '
              'keyword\n'
              'arguments, or from default values.  If the form “"*identifier"” '
              'is\n'
@@ -6778,14 +5894,8 @@
              'new\n'
              'empty mapping of the same type.  Parameters after “"*"” or\n'
              '“"*identifier"” are keyword-only parameters and may only be '
-             'passed by\n'
-             'keyword arguments.  Parameters before “"/"” are positional-only\n'
-             'parameters and may only be passed by positional arguments.\n'
-             '\n'
-             'Changed in version 3.8: The "/" function parameter syntax may be '
-             'used\n'
-             'to indicate positional-only parameters. See **PEP 570** for '
-             'details.\n'
+             'passed\n'
+             'used keyword arguments.\n'
              '\n'
              'Parameters may have an *annotation* of the form “": '
              'expression"”\n'
@@ -6877,10 +5987,8 @@
            '\n'
            'Names listed in a "global" statement must not be defined as '
            'formal\n'
-           'parameters, or as targets in "with" statements or "except" '
-           'clauses, or\n'
-           'in a "for" target list, "class" definition, function definition,\n'
-           '"import" statement, or variable annotation.\n'
+           'parameters or in a "for" loop control target, "class" definition,\n'
+           'function definition, "import" statement, or variable annotation.\n'
            '\n'
            '**CPython implementation detail:** The current implementation does '
            'not\n'
@@ -6912,31 +6020,22 @@
                'trailing underscore characters:\n'
                '\n'
                '"_*"\n'
-               '   Not imported by "from module import *".\n'
-               '\n'
-               '"_"\n'
-               '   In a "case" pattern within a "match" statement, "_" is a '
-               'soft\n'
-               '   keyword that denotes a wildcard.\n'
-               '\n'
-               '   Separately, the interactive interpreter makes the result of '
-               'the\n'
-               '   last evaluation available in the variable "_". (It is '
-               'stored in the\n'
-               '   "builtins" module, alongside built-in functions like '
-               '"print".)\n'
-               '\n'
-               '   Elsewhere, "_" is a regular identifier. It is often used to '
-               'name\n'
-               '   “special” items, but it is not special to Python itself.\n'
+               '   Not imported by "from module import *".  The special '
+               'identifier "_"\n'
+               '   is used in the interactive interpreter to store the result '
+               'of the\n'
+               '   last evaluation; it is stored in the "builtins" module.  '
+               'When not\n'
+               '   in interactive mode, "_" has no special meaning and is not '
+               'defined.\n'
+               '   See section The import statement.\n'
                '\n'
                '   Note:\n'
                '\n'
                '     The name "_" is often used in conjunction with\n'
                '     internationalization; refer to the documentation for the\n'
                '     "gettext" module for more information on this '
-               'convention.It is\n'
-               '     also commonly used for unused variables.\n'
+               'convention.\n'
                '\n'
                '"__*__"\n'
                '   System-defined names, informally known as “dunder” names. '
@@ -7059,28 +6158,6 @@
                 '   async      elif       if         or         yield\n'
                 '\n'
                 '\n'
-                'Soft Keywords\n'
-                '=============\n'
-                '\n'
-                'New in version 3.10.\n'
-                '\n'
-                'Some identifiers are only reserved under specific contexts. '
-                'These are\n'
-                'known as *soft keywords*.  The identifiers "match", "case" '
-                'and "_" can\n'
-                'syntactically act as keywords in contexts related to the '
-                'pattern\n'
-                'matching statement, but this distinction is done at the '
-                'parser level,\n'
-                'not when tokenizing.\n'
-                '\n'
-                'As soft keywords, their use with pattern matching is possible '
-                'while\n'
-                'still preserving compatibility with existing code that uses '
-                '"match",\n'
-                '"case" and "_" as identifier names.\n'
-                '\n'
-                '\n'
                 'Reserved classes of identifiers\n'
                 '===============================\n'
                 '\n'
@@ -7091,23 +6168,15 @@
                 'trailing underscore characters:\n'
                 '\n'
                 '"_*"\n'
-                '   Not imported by "from module import *".\n'
-                '\n'
-                '"_"\n'
-                '   In a "case" pattern within a "match" statement, "_" is a '
-                'soft\n'
-                '   keyword that denotes a wildcard.\n'
-                '\n'
-                '   Separately, the interactive interpreter makes the result '
+                '   Not imported by "from module import *".  The special '
+                'identifier "_"\n'
+                '   is used in the interactive interpreter to store the result '
                 'of the\n'
-                '   last evaluation available in the variable "_". (It is '
-                'stored in the\n'
-                '   "builtins" module, alongside built-in functions like '
-                '"print".)\n'
-                '\n'
-                '   Elsewhere, "_" is a regular identifier. It is often used '
-                'to name\n'
-                '   “special” items, but it is not special to Python itself.\n'
+                '   last evaluation; it is stored in the "builtins" module.  '
+                'When not\n'
+                '   in interactive mode, "_" has no special meaning and is not '
+                'defined.\n'
+                '   See section The import statement.\n'
                 '\n'
                 '   Note:\n'
                 '\n'
@@ -7115,8 +6184,7 @@
                 '     internationalization; refer to the documentation for '
                 'the\n'
                 '     "gettext" module for more information on this '
-                'convention.It is\n'
-                '     also commonly used for unused variables.\n'
+                'convention.\n'
                 '\n'
                 '"__*__"\n'
                 '   System-defined names, informally known as “dunder” names. '
@@ -7188,7 +6256,7 @@
            '                   | "from" relative_module "import" "(" '
            'identifier ["as" identifier]\n'
            '                   ("," identifier ["as" identifier])* [","] ")"\n'
-           '                   | "from" relative_module "import" "*"\n'
+           '                   | "from" module "import" "*"\n'
            '   module          ::= (identifier ".")* identifier\n'
            '   relative_module ::= "."* module | "."+\n'
            '\n'
@@ -7532,7 +6600,10 @@
  'lambda': 'Lambdas\n'
            '*******\n'
            '\n'
-           '   lambda_expr ::= "lambda" [parameter_list] ":" expression\n'
+           '   lambda_expr        ::= "lambda" [parameter_list] ":" '
+           'expression\n'
+           '   lambda_expr_nocond ::= "lambda" [parameter_list] ":" '
+           'expression_nocond\n'
            '\n'
            'Lambda expressions (sometimes called lambda forms) are used to '
            'create\n'
@@ -7577,32 +6648,20 @@
            '*Names* refer to objects.  Names are introduced by name binding\n'
            'operations.\n'
            '\n'
-           'The following constructs bind names:\n'
-           '\n'
-           '* formal parameters to functions,\n'
-           '\n'
-           '* class definitions,\n'
-           '\n'
-           '* function definitions,\n'
-           '\n'
-           '* assignment expressions,\n'
-           '\n'
-           '* targets that are identifiers if occurring in an assignment:\n'
-           '\n'
-           '  * "for" loop header,\n'
-           '\n'
-           '  * after "as" in a "with" statement, "except" clause or in the '
-           'as-\n'
-           '    pattern in structural pattern matching,\n'
-           '\n'
-           '  * in a capture pattern in structural pattern matching\n'
-           '\n'
-           '* "import" statements.\n'
-           '\n'
-           'The "import" statement of the form "from ... import *" binds all '
-           'names\n'
-           'defined in the imported module, except those beginning with an\n'
-           'underscore. This form may only be used at the module level.\n'
+           'The following constructs bind names: formal parameters to '
+           'functions,\n'
+           '"import" statements, class and function definitions (these bind '
+           'the\n'
+           'class or function name in the defining block), and targets that '
+           'are\n'
+           'identifiers if occurring in an assignment, "for" loop header, or '
+           'after\n'
+           '"as" in a "with" statement or "except" clause. The "import" '
+           'statement\n'
+           'of the form "from ... import *" binds all names defined in the\n'
+           'imported module, except those beginning with an underscore.  This '
+           'form\n'
+           'may only be used at the module level.\n'
            '\n'
            'A target occurring in a "del" statement is also considered bound '
            'for\n'
@@ -7672,8 +6731,8 @@
            'operations.\n'
            '\n'
            'If the "global" statement occurs within a block, all uses of the '
-           'names\n'
-           'specified in the statement refer to the bindings of those names in '
+           'name\n'
+           'specified in the statement refer to the binding of that name in '
            'the\n'
            'top-level namespace.  Names are resolved in the top-level '
            'namespace by\n'
@@ -7682,9 +6741,9 @@
            'namespace\n'
            'of the module "builtins".  The global namespace is searched '
            'first.  If\n'
-           'the names are not found there, the builtins namespace is '
-           'searched.\n'
-           'The "global" statement must precede all uses of the listed names.\n'
+           'the name is not found there, the builtins namespace is searched.  '
+           'The\n'
+           '"global" statement must precede all uses of the name.\n'
            '\n'
            'The "global" statement has the same scope as a name binding '
            'operation\n'
@@ -7824,7 +6883,7 @@
             '\n'
             'Note that numeric literals do not include a sign; a phrase like '
             '"-1"\n'
-            'is actually an expression composed of the unary operator ‘"-"’ '
+            'is actually an expression composed of the unary operator ‘"-"‘ '
             'and the\n'
             'literal "1".\n',
  'numeric-types': 'Emulating numeric types\n'
@@ -7969,6 +7028,16 @@
                   'the data\n'
                   '   model.\n'
                   '\n'
+                  '   Note:\n'
+                  '\n'
+                  '     Due to a bug in the dispatching mechanism for "**=", a '
+                  'class that\n'
+                  '     defines "__ipow__()" but returns "NotImplemented" '
+                  'would fail to\n'
+                  '     fall back to "x.__pow__(y)" and "y.__rpow__(x)". This '
+                  'bug is\n'
+                  '     fixed in Python 3.10.\n'
+                  '\n'
                   'object.__neg__(self)\n'
                   'object.__pos__(self)\n'
                   'object.__abs__(self)\n'
@@ -8019,9 +7088,9 @@
                   '   of the object truncated to an "Integral" (typically an '
                   '"int").\n'
                   '\n'
-                  '   The built-in function "int()" falls back to '
-                  '"__trunc__()" if\n'
-                  '   neither "__int__()" nor "__index__()" is defined.\n',
+                  '   If "__int__()" is not defined then the built-in function '
+                  '"int()"\n'
+                  '   falls back to "__trunc__()".\n',
  'objects': 'Objects, values and types\n'
             '*************************\n'
             '\n'
@@ -8155,8 +7224,8 @@
                      '\n'
                      'The following table summarizes the operator precedence '
                      'in Python, from\n'
-                     'highest precedence (most binding) to lowest precedence '
-                     '(least\n'
+                     'lowest precedence (least binding) to highest precedence '
+                     '(most\n'
                      'binding).  Operators in the same box have the same '
                      'precedence.  Unless\n'
                      'the syntax is explicitly given, operators are binary.  '
@@ -8175,71 +7244,71 @@
                      '| Operator                                        | '
                      'Description                           |\n'
                      '|=================================================|=======================================|\n'
-                     '| "(expressions...)",  "[expressions...]", "{key: | '
-                     'Binding or parenthesized expression,  |\n'
-                     '| value...}", "{expressions...}"                  | list '
-                     'display, dictionary display, set |\n'
-                     '|                                                 | '
-                     'display                               |\n'
+                     '| ":="                                            | '
+                     'Assignment expression                 |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "x[index]", "x[index:index]",                   | '
-                     'Subscription, slicing, call,          |\n'
-                     '| "x(arguments...)", "x.attribute"                | '
-                     'attribute reference                   |\n'
+                     '| "lambda"                                        | '
+                     'Lambda expression                     |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "await" "x"                                     | '
-                     'Await expression                      |\n'
+                     '| "if" – "else"                                   | '
+                     'Conditional expression                |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "**"                                            | '
-                     'Exponentiation [5]                    |\n'
+                     '| "or"                                            | '
+                     'Boolean OR                            |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "+x", "-x", "~x"                                | '
-                     'Positive, negative, bitwise NOT       |\n'
+                     '| "and"                                           | '
+                     'Boolean AND                           |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "*", "@", "/", "//", "%"                        | '
-                     'Multiplication, matrix                |\n'
-                     '|                                                 | '
-                     'multiplication, division, floor       |\n'
-                     '|                                                 | '
-                     'division, remainder [6]               |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "+", "-"                                        | '
-                     'Addition and subtraction              |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "<<", ">>"                                      | '
-                     'Shifts                                |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "&"                                             | '
-                     'Bitwise AND                           |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "^"                                             | '
-                     'Bitwise XOR                           |\n'
-                     '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "|"                                             | '
-                     'Bitwise OR                            |\n'
+                     '| "not" "x"                                       | '
+                     'Boolean NOT                           |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
                      '| "in", "not in", "is", "is not", "<", "<=", ">", | '
                      'Comparisons, including membership     |\n'
                      '| ">=", "!=", "=="                                | '
                      'tests and identity tests              |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "not" "x"                                       | '
-                     'Boolean NOT                           |\n'
+                     '| "|"                                             | '
+                     'Bitwise OR                            |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "and"                                           | '
-                     'Boolean AND                           |\n'
+                     '| "^"                                             | '
+                     'Bitwise XOR                           |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "or"                                            | '
-                     'Boolean OR                            |\n'
+                     '| "&"                                             | '
+                     'Bitwise AND                           |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "if" – "else"                                   | '
-                     'Conditional expression                |\n'
+                     '| "<<", ">>"                                      | '
+                     'Shifts                                |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
-                     '| "lambda"                                        | '
-                     'Lambda expression                     |\n'
+                     '| "+", "-"                                        | '
+                     'Addition and subtraction              |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
-                     '| ":="                                            | '
-                     'Assignment expression                 |\n'
+                     '| "*", "@", "/", "//", "%"                        | '
+                     'Multiplication, matrix                |\n'
+                     '|                                                 | '
+                     'multiplication, division, floor       |\n'
+                     '|                                                 | '
+                     'division, remainder [5]               |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "+x", "-x", "~x"                                | '
+                     'Positive, negative, bitwise NOT       |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "**"                                            | '
+                     'Exponentiation [6]                    |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "await" "x"                                     | '
+                     'Await expression                      |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "x[index]", "x[index:index]",                   | '
+                     'Subscription, slicing, call,          |\n'
+                     '| "x(arguments...)", "x.attribute"                | '
+                     'attribute reference                   |\n'
+                     '+-------------------------------------------------+---------------------------------------+\n'
+                     '| "(expressions...)",  "[expressions...]", "{key: | '
+                     'Binding or parenthesized expression,  |\n'
+                     '| value...}", "{expressions...}"                  | list '
+                     'display, dictionary display, set |\n'
+                     '|                                                 | '
+                     'display                               |\n'
                      '+-------------------------------------------------+---------------------------------------+\n'
                      '\n'
                      '-[ Footnotes ]-\n'
@@ -8320,14 +7389,14 @@
                      'Check their\n'
                      '    documentation for more info.\n'
                      '\n'
-                     '[5] The power operator "**" binds less tightly than an '
+                     '[5] The "%" operator is also used for string formatting; '
+                     'the same\n'
+                     '    precedence applies.\n'
+                     '\n'
+                     '[6] The power operator "**" binds less tightly than an '
                      'arithmetic or\n'
                      '    bitwise unary operator on its right, that is, '
-                     '"2**-1" is "0.5".\n'
-                     '\n'
-                     '[6] The "%" operator is also used for string formatting; '
-                     'the same\n'
-                     '    precedence applies.\n',
+                     '"2**-1" is "0.5".\n',
  'pass': 'The "pass" statement\n'
          '********************\n'
          '\n'
@@ -8375,21 +7444,18 @@
           '"ZeroDivisionError".\n'
           'Raising a negative number to a fractional power results in a '
           '"complex"\n'
-          'number. (In earlier versions it raised a "ValueError".)\n'
-          '\n'
-          'This operation can be customized using the special "__pow__()" '
-          'method.\n',
+          'number. (In earlier versions it raised a "ValueError".)\n',
  'raise': 'The "raise" statement\n'
           '*********************\n'
           '\n'
           '   raise_stmt ::= "raise" [expression ["from" expression]]\n'
           '\n'
-          'If no expressions are present, "raise" re-raises the exception that '
-          'is\n'
-          'currently being handled, which is also known as the *active\n'
-          'exception*. If there isn’t currently an active exception, a\n'
-          '"RuntimeError" exception is raised indicating that this is an '
-          'error.\n'
+          'If no expressions are present, "raise" re-raises the last '
+          'exception\n'
+          'that was active in the current scope.  If no exception is active '
+          'in\n'
+          'the current scope, a "RuntimeError" exception is raised indicating\n'
+          'that this is an error.\n'
           '\n'
           'Otherwise, "raise" evaluates the first expression as the exception\n'
           'object.  It must be either a subclass or an instance of\n'
@@ -8415,18 +7481,12 @@
           '\n'
           'The "from" clause is used for exception chaining: if given, the '
           'second\n'
-          '*expression* must be another exception class or instance. If the\n'
-          'second expression is an exception instance, it will be attached to '
-          'the\n'
-          'raised exception as the "__cause__" attribute (which is writable). '
-          'If\n'
-          'the expression is an exception class, the class will be '
-          'instantiated\n'
-          'and the resulting exception instance will be attached to the '
-          'raised\n'
-          'exception as the "__cause__" attribute. If the raised exception is '
-          'not\n'
-          'handled, both exceptions will be printed:\n'
+          '*expression* must be another exception class or instance, which '
+          'will\n'
+          'then be attached to the raised exception as the "__cause__" '
+          'attribute\n'
+          '(which is writable).  If the raised exception is not handled, both\n'
+          'exceptions will be printed:\n'
           '\n'
           '   >>> try:\n'
           '   ...     print(1 / 0)\n'
@@ -8444,14 +7504,11 @@
           '     File "<stdin>", line 4, in <module>\n'
           '   RuntimeError: Something bad happened\n'
           '\n'
-          'A similar mechanism works implicitly if a new exception is raised '
-          'when\n'
-          'an exception is already being handled.  An exception may be '
-          'handled\n'
-          'when an "except" or "finally" clause, or a "with" statement, is '
-          'used.\n'
-          'The previous exception is then attached as the new exception’s\n'
-          '"__context__" attribute:\n'
+          'A similar mechanism works implicitly if an exception is raised '
+          'inside\n'
+          'an exception handler or a "finally" clause: the previous exception '
+          'is\n'
+          'then attached as the new exception’s "__context__" attribute:\n'
           '\n'
           '   >>> try:\n'
           '   ...     print(1 / 0)\n'
@@ -8533,62 +7590,61 @@
                    '\n'
                    'The following methods can be defined to implement '
                    'container objects.\n'
-                   'Containers usually are *sequences* (such as "lists" or '
-                   '"tuples") or\n'
-                   '*mappings* (like "dictionaries"), but can represent other '
-                   'containers\n'
-                   'as well.  The first set of methods is used either to '
-                   'emulate a\n'
-                   'sequence or to emulate a mapping; the difference is that '
-                   'for a\n'
-                   'sequence, the allowable keys should be the integers *k* '
-                   'for which "0\n'
-                   '<= k < N" where *N* is the length of the sequence, or '
-                   '"slice" objects,\n'
-                   'which define a range of items.  It is also recommended '
-                   'that mappings\n'
-                   'provide the methods "keys()", "values()", "items()", '
-                   '"get()",\n'
-                   '"clear()", "setdefault()", "pop()", "popitem()", "copy()", '
-                   'and\n'
-                   '"update()" behaving similar to those for Python’s '
-                   'standard\n'
-                   '"dictionary" objects.  The "collections.abc" module '
-                   'provides a\n'
-                   '"MutableMapping" *abstract base class* to help create '
-                   'those methods\n'
-                   'from a base set of "__getitem__()", "__setitem__()", '
-                   '"__delitem__()",\n'
-                   'and "keys()". Mutable sequences should provide methods '
-                   '"append()",\n'
-                   '"count()", "index()", "extend()", "insert()", "pop()", '
-                   '"remove()",\n'
-                   '"reverse()" and "sort()", like Python standard "list" '
-                   'objects.\n'
-                   'Finally, sequence types should implement addition '
-                   '(meaning\n'
-                   'concatenation) and multiplication (meaning repetition) by '
-                   'defining the\n'
-                   'methods "__add__()", "__radd__()", "__iadd__()", '
-                   '"__mul__()",\n'
-                   '"__rmul__()" and "__imul__()" described below; they should '
-                   'not define\n'
-                   'other numerical operators.  It is recommended that both '
-                   'mappings and\n'
-                   'sequences implement the "__contains__()" method to allow '
-                   'efficient use\n'
-                   'of the "in" operator; for mappings, "in" should search the '
-                   'mapping’s\n'
-                   'keys; for sequences, it should search through the values.  '
-                   'It is\n'
-                   'further recommended that both mappings and sequences '
-                   'implement the\n'
-                   '"__iter__()" method to allow efficient iteration through '
+                   'Containers usually are sequences (such as lists or tuples) '
+                   'or mappings\n'
+                   '(like dictionaries), but can represent other containers as '
+                   'well.  The\n'
+                   'first set of methods is used either to emulate a sequence '
+                   'or to\n'
+                   'emulate a mapping; the difference is that for a sequence, '
                    'the\n'
-                   'container; for mappings, "__iter__()" should iterate '
-                   'through the\n'
-                   'object’s keys; for sequences, it should iterate through '
-                   'the values.\n'
+                   'allowable keys should be the integers *k* for which "0 <= '
+                   'k < N" where\n'
+                   '*N* is the length of the sequence, or slice objects, which '
+                   'define a\n'
+                   'range of items.  It is also recommended that mappings '
+                   'provide the\n'
+                   'methods "keys()", "values()", "items()", "get()", '
+                   '"clear()",\n'
+                   '"setdefault()", "pop()", "popitem()", "copy()", and '
+                   '"update()"\n'
+                   'behaving similar to those for Python’s standard dictionary '
+                   'objects.\n'
+                   'The "collections.abc" module provides a "MutableMapping" '
+                   'abstract base\n'
+                   'class to help create those methods from a base set of '
+                   '"__getitem__()",\n'
+                   '"__setitem__()", "__delitem__()", and "keys()". Mutable '
+                   'sequences\n'
+                   'should provide methods "append()", "count()", "index()", '
+                   '"extend()",\n'
+                   '"insert()", "pop()", "remove()", "reverse()" and "sort()", '
+                   'like Python\n'
+                   'standard list objects.  Finally, sequence types should '
+                   'implement\n'
+                   'addition (meaning concatenation) and multiplication '
+                   '(meaning\n'
+                   'repetition) by defining the methods "__add__()", '
+                   '"__radd__()",\n'
+                   '"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" '
+                   'described\n'
+                   'below; they should not define other numerical operators.  '
+                   'It is\n'
+                   'recommended that both mappings and sequences implement '
+                   'the\n'
+                   '"__contains__()" method to allow efficient use of the "in" '
+                   'operator;\n'
+                   'for mappings, "in" should search the mapping’s keys; for '
+                   'sequences, it\n'
+                   'should search through the values.  It is further '
+                   'recommended that both\n'
+                   'mappings and sequences implement the "__iter__()" method '
+                   'to allow\n'
+                   'efficient iteration through the container; for mappings, '
+                   '"__iter__()"\n'
+                   'should iterate through the object’s keys; for sequences, '
+                   'it should\n'
+                   'iterate through the values.\n'
                    '\n'
                    'object.__len__(self)\n'
                    '\n'
@@ -8647,24 +7703,22 @@
                    'object.__getitem__(self, key)\n'
                    '\n'
                    '   Called to implement evaluation of "self[key]". For '
-                   '*sequence*\n'
-                   '   types, the accepted keys should be integers and slice '
-                   'objects.\n'
-                   '   Note that the special interpretation of negative '
-                   'indexes (if the\n'
-                   '   class wishes to emulate a *sequence* type) is up to '
+                   'sequence types,\n'
+                   '   the accepted keys should be integers and slice '
+                   'objects.  Note that\n'
+                   '   the special interpretation of negative indexes (if the '
+                   'class wishes\n'
+                   '   to emulate a sequence type) is up to the '
+                   '"__getitem__()" method. If\n'
+                   '   *key* is of an inappropriate type, "TypeError" may be '
+                   'raised; if of\n'
+                   '   a value outside the set of indexes for the sequence '
+                   '(after any\n'
+                   '   special interpretation of negative values), '
+                   '"IndexError" should be\n'
+                   '   raised. For mapping types, if *key* is missing (not in '
                    'the\n'
-                   '   "__getitem__()" method. If *key* is of an inappropriate '
-                   'type,\n'
-                   '   "TypeError" may be raised; if of a value outside the '
-                   'set of indexes\n'
-                   '   for the sequence (after any special interpretation of '
-                   'negative\n'
-                   '   values), "IndexError" should be raised. For *mapping* '
-                   'types, if\n'
-                   '   *key* is missing (not in the container), "KeyError" '
-                   'should be\n'
-                   '   raised.\n'
+                   '   container), "KeyError" should be raised.\n'
                    '\n'
                    '   Note:\n'
                    '\n'
@@ -8674,15 +7728,6 @@
                    'of the\n'
                    '     sequence.\n'
                    '\n'
-                   '   Note:\n'
-                   '\n'
-                   '     When subscripting a *class*, the special class '
-                   'method\n'
-                   '     "__class_getitem__()" may be called instead of '
-                   '"__getitem__()".\n'
-                   '     See __class_getitem__ versus __getitem__ for more '
-                   'details.\n'
-                   '\n'
                    'object.__setitem__(self, key, value)\n'
                    '\n'
                    '   Called to implement assignment to "self[key]".  Same '
@@ -8718,13 +7763,19 @@
                    '\n'
                    'object.__iter__(self)\n'
                    '\n'
-                   '   This method is called when an *iterator* is required '
-                   'for a\n'
-                   '   container. This method should return a new iterator '
-                   'object that can\n'
-                   '   iterate over all the objects in the container.  For '
-                   'mappings, it\n'
-                   '   should iterate over the keys of the container.\n'
+                   '   This method is called when an iterator is required for '
+                   'a container.\n'
+                   '   This method should return a new iterator object that '
+                   'can iterate\n'
+                   '   over all the objects in the container.  For mappings, '
+                   'it should\n'
+                   '   iterate over the keys of the container.\n'
+                   '\n'
+                   '   Iterator objects also need to implement this method; '
+                   'they are\n'
+                   '   required to return themselves.  For more information on '
+                   'iterator\n'
+                   '   objects, see Iterator Types.\n'
                    '\n'
                    'object.__reversed__(self)\n'
                    '\n'
@@ -8787,10 +7838,6 @@
              'the\n'
              'second argument.\n'
              '\n'
-             'This operation can be customized using the special '
-             '"__lshift__()" and\n'
-             '"__rshift__()" methods.\n'
-             '\n'
              'A right shift by *n* bits is defined as floor division by '
              '"pow(2,n)".\n'
              'A left shift by *n* bits is defined as multiplication with '
@@ -8903,7 +7950,7 @@
                  'immediate\n'
                  '   subclasses.  This method returns a list of all those '
                  'references\n'
-                 '   still alive.  The list is in definition order.  Example:\n'
+                 '   still alive. Example:\n'
                  '\n'
                  '      >>> int.__subclasses__()\n'
                  "      [<class 'bool'>]\n"
@@ -9005,13 +8052,13 @@
                  '\n'
                  '   If "__new__()" is invoked during object construction and '
                  'it returns\n'
-                 '   an instance of *cls*, then the new instance’s '
-                 '"__init__()" method\n'
-                 '   will be invoked like "__init__(self[, ...])", where '
-                 '*self* is the\n'
-                 '   new instance and the remaining arguments are the same as '
-                 'were\n'
-                 '   passed to the object constructor.\n'
+                 '   an instance or subclass of *cls*, then the new '
+                 'instance’s\n'
+                 '   "__init__()" method will be invoked like "__init__(self[, '
+                 '...])",\n'
+                 '   where *self* is the new instance and the remaining '
+                 'arguments are\n'
+                 '   the same as were passed to the object constructor.\n'
                  '\n'
                  '   If "__new__()" does not return an instance of *cls*, then '
                  'the new\n'
@@ -9679,6 +8726,32 @@
                  'of the\n'
                  '   owner class.\n'
                  '\n'
+                 'object.__set_name__(self, owner, name)\n'
+                 '\n'
+                 '   Called at the time the owning class *owner* is created. '
+                 'The\n'
+                 '   descriptor has been assigned to *name*.\n'
+                 '\n'
+                 '   Note:\n'
+                 '\n'
+                 '     "__set_name__()" is only called implicitly as part of '
+                 'the "type"\n'
+                 '     constructor, so it will need to be called explicitly '
+                 'with the\n'
+                 '     appropriate parameters when a descriptor is added to a '
+                 'class\n'
+                 '     after initial creation:\n'
+                 '\n'
+                 '        class A:\n'
+                 '           pass\n'
+                 '        descr = custom_descriptor()\n'
+                 '        A.attr = descr\n'
+                 "        descr.__set_name__(A, 'attr')\n"
+                 '\n'
+                 '     See Creating the class object for more details.\n'
+                 '\n'
+                 '   New in version 3.6.\n'
+                 '\n'
                  'The attribute "__objclass__" is interpreted by the "inspect" '
                  'module as\n'
                  'specifying the class where this object was defined (setting '
@@ -9749,16 +8822,16 @@
                  '"super(B,\n'
                  '   obj).m()" searches "obj.__class__.__mro__" for the base '
                  'class "A"\n'
-                 '   immediately following "B" and then invokes the descriptor '
+                 '   immediately preceding "B" and then invokes the descriptor '
                  'with the\n'
                  '   call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n'
                  '\n'
                  'For instance bindings, the precedence of descriptor '
                  'invocation depends\n'
-                 'on which descriptor methods are defined.  A descriptor can '
-                 'define any\n'
-                 'combination of "__get__()", "__set__()" and "__delete__()".  '
-                 'If it\n'
+                 'on the which descriptor methods are defined.  A descriptor '
+                 'can define\n'
+                 'any combination of "__get__()", "__set__()" and '
+                 '"__delete__()".  If it\n'
                  'does not define "__get__()", then accessing the attribute '
                  'will return\n'
                  'the descriptor object itself unless there is a value in the '
@@ -9779,14 +8852,13 @@
                  'be\n'
                  'overridden by instances.\n'
                  '\n'
-                 'Python methods (including those decorated with '
-                 '"@staticmethod" and\n'
-                 '"@classmethod") are implemented as non-data descriptors.  '
-                 'Accordingly,\n'
-                 'instances can redefine and override methods.  This allows '
-                 'individual\n'
-                 'instances to acquire behaviors that differ from other '
-                 'instances of the\n'
+                 'Python methods (including "staticmethod()" and '
+                 '"classmethod()") are\n'
+                 'implemented as non-data descriptors.  Accordingly, instances '
+                 'can\n'
+                 'redefine and override methods.  This allows individual '
+                 'instances to\n'
+                 'acquire behaviors that differ from other instances of the '
                  'same class.\n'
                  '\n'
                  'The "property()" function is implemented as a data '
@@ -9800,12 +8872,12 @@
                  '\n'
                  '*__slots__* allow us to explicitly declare data members '
                  '(like\n'
-                 'properties) and deny the creation of "__dict__" and '
+                 'properties) and deny the creation of *__dict__* and '
                  '*__weakref__*\n'
                  '(unless explicitly declared in *__slots__* or available in a '
                  'parent.)\n'
                  '\n'
-                 'The space saved over using "__dict__" can be significant. '
+                 'The space saved over using *__dict__* can be significant. '
                  'Attribute\n'
                  'lookup speed can be significantly improved as well.\n'
                  '\n'
@@ -9817,7 +8889,7 @@
                  '*__slots__*\n'
                  '   reserves space for the declared variables and prevents '
                  'the\n'
-                 '   automatic creation of "__dict__" and *__weakref__* for '
+                 '   automatic creation of *__dict__* and *__weakref__* for '
                  'each\n'
                  '   instance.\n'
                  '\n'
@@ -9826,11 +8898,11 @@
                  '~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
                  '\n'
                  '* When inheriting from a class without *__slots__*, the '
-                 '"__dict__" and\n'
+                 '*__dict__* and\n'
                  '  *__weakref__* attribute of the instances will always be '
                  'accessible.\n'
                  '\n'
-                 '* Without a "__dict__" variable, instances cannot be '
+                 '* Without a *__dict__* variable, instances cannot be '
                  'assigned new\n'
                  '  variables not listed in the *__slots__* definition.  '
                  'Attempts to\n'
@@ -9843,28 +8915,28 @@
                  '\n'
                  '* Without a *__weakref__* variable for each instance, '
                  'classes defining\n'
-                 '  *__slots__* do not support "weak references" to its '
-                 'instances. If\n'
-                 '  weak reference support is needed, then add '
-                 '"\'__weakref__\'" to the\n'
+                 '  *__slots__* do not support weak references to its '
+                 'instances. If weak\n'
+                 '  reference support is needed, then add "\'__weakref__\'" to '
+                 'the\n'
                  '  sequence of strings in the *__slots__* declaration.\n'
                  '\n'
                  '* *__slots__* are implemented at the class level by '
                  'creating\n'
-                 '  descriptors for each variable name.  As a result, class '
-                 'attributes\n'
-                 '  cannot be used to set default values for instance '
-                 'variables defined\n'
-                 '  by *__slots__*; otherwise, the class attribute would '
-                 'overwrite the\n'
-                 '  descriptor assignment.\n'
+                 '  descriptors (Implementing Descriptors) for each variable '
+                 'name.  As a\n'
+                 '  result, class attributes cannot be used to set default '
+                 'values for\n'
+                 '  instance variables defined by *__slots__*; otherwise, the '
+                 'class\n'
+                 '  attribute would overwrite the descriptor assignment.\n'
                  '\n'
                  '* The action of a *__slots__* declaration is not limited to '
                  'the class\n'
                  '  where it is defined.  *__slots__* declared in parents are '
                  'available\n'
                  '  in child classes. However, child subclasses will get a '
-                 '"__dict__"\n'
+                 '*__dict__*\n'
                  '  and *__weakref__* unless they also define *__slots__* '
                  '(which should\n'
                  '  only contain names of any *additional* slots).\n'
@@ -9884,18 +8956,13 @@
                  '  “variable-length” built-in types such as "int", "bytes" '
                  'and "tuple".\n'
                  '\n'
-                 '* Any non-string *iterable* may be assigned to *__slots__*.\n'
+                 '* Any non-string iterable may be assigned to *__slots__*. '
+                 'Mappings may\n'
+                 '  also be used; however, in the future, special meaning may '
+                 'be\n'
+                 '  assigned to the values corresponding to each key.\n'
                  '\n'
-                 '* If a "dictionary" is used to assign *__slots__*, the '
-                 'dictionary keys\n'
-                 '  will be used as the slot names. The values of the '
-                 'dictionary can be\n'
-                 '  used to provide per-attribute docstrings that will be '
-                 'recognised by\n'
-                 '  "inspect.getdoc()" and displayed in the output of '
-                 '"help()".\n'
-                 '\n'
-                 '* "__class__" assignment works only if both classes have the '
+                 '* *__class__* assignment works only if both classes have the '
                  'same\n'
                  '  *__slots__*.\n'
                  '\n'
@@ -9907,9 +8974,9 @@
                  'violations\n'
                  '  raise "TypeError".\n'
                  '\n'
-                 '* If an *iterator* is used for *__slots__* then a '
-                 '*descriptor* is\n'
-                 '  created for each of the iterator’s values. However, the '
+                 '* If an iterator is used for *__slots__* then a descriptor '
+                 'is created\n'
+                 '  for each of the iterator’s values. However, the '
                  '*__slots__*\n'
                  '  attribute will be an empty iterator.\n'
                  '\n'
@@ -9918,15 +8985,15 @@
                  '==========================\n'
                  '\n'
                  'Whenever a class inherits from another class, '
-                 '"__init_subclass__()" is\n'
-                 'called on the parent class. This way, it is possible to '
-                 'write classes\n'
-                 'which change the behavior of subclasses. This is closely '
-                 'related to\n'
-                 'class decorators, but where class decorators only affect the '
-                 'specific\n'
-                 'class they’re applied to, "__init_subclass__" solely applies '
-                 'to future\n'
+                 '*__init_subclass__* is\n'
+                 'called on that class. This way, it is possible to write '
+                 'classes which\n'
+                 'change the behavior of subclasses. This is closely related '
+                 'to class\n'
+                 'decorators, but where class decorators only affect the '
+                 'specific class\n'
+                 'they’re applied to, "__init_subclass__" solely applies to '
+                 'future\n'
                  'subclasses of the class defining the method.\n'
                  '\n'
                  'classmethod object.__init_subclass__(cls)\n'
@@ -9974,38 +9041,6 @@
                  '\n'
                  '   New in version 3.6.\n'
                  '\n'
-                 'When a class is created, "type.__new__()" scans the class '
-                 'variables\n'
-                 'and makes callbacks to those with a "__set_name__()" hook.\n'
-                 '\n'
-                 'object.__set_name__(self, owner, name)\n'
-                 '\n'
-                 '   Automatically called at the time the owning class *owner* '
-                 'is\n'
-                 '   created. The object has been assigned to *name* in that '
-                 'class:\n'
-                 '\n'
-                 '      class A:\n'
-                 '          x = C()  # Automatically calls: x.__set_name__(A, '
-                 "'x')\n"
-                 '\n'
-                 '   If the class variable is assigned after the class is '
-                 'created,\n'
-                 '   "__set_name__()" will not be called automatically. If '
-                 'needed,\n'
-                 '   "__set_name__()" can be called directly:\n'
-                 '\n'
-                 '      class A:\n'
-                 '         pass\n'
-                 '\n'
-                 '      c = C()\n'
-                 '      A.x = c                  # The hook is not called\n'
-                 "      c.__set_name__(A, 'x')   # Manually invoke the hook\n"
-                 '\n'
-                 '   See Creating the class object for more details.\n'
-                 '\n'
-                 '   New in version 3.6.\n'
-                 '\n'
                  '\n'
                  'Metaclasses\n'
                  '-----------\n'
@@ -10118,10 +9153,10 @@
                  'come from\n'
                  'the class definition). The "__prepare__" method should be '
                  'implemented\n'
-                 'as a "classmethod". The namespace returned by "__prepare__" '
-                 'is passed\n'
-                 'in to "__new__", but when the final class object is created '
-                 'the\n'
+                 'as a "classmethod()". The namespace returned by '
+                 '"__prepare__" is\n'
+                 'passed in to "__new__", but when the final class object is '
+                 'created the\n'
                  'namespace is copied into a new "dict".\n'
                  '\n'
                  'If the metaclass has no "__prepare__" attribute, then the '
@@ -10201,21 +9236,22 @@
                  'When using the default metaclass "type", or any metaclass '
                  'that\n'
                  'ultimately calls "type.__new__", the following additional\n'
-                 'customization steps are invoked after creating the class '
+                 'customisation steps are invoked after creating the class '
                  'object:\n'
                  '\n'
-                 '1. The "type.__new__" method collects all of the attributes '
-                 'in the\n'
-                 '   class namespace that define a "__set_name__()" method;\n'
+                 '* first, "type.__new__" collects all of the descriptors in '
+                 'the class\n'
+                 '  namespace that define a "__set_name__()" method;\n'
                  '\n'
-                 '2. Those "__set_name__" methods are called with the class '
-                 'being\n'
-                 '   defined and the assigned name of that particular '
-                 'attribute;\n'
+                 '* second, all of these "__set_name__" methods are called '
+                 'with the\n'
+                 '  class being defined and the assigned name of that '
+                 'particular\n'
+                 '  descriptor;\n'
                  '\n'
-                 '3. The "__init_subclass__()" hook is called on the immediate '
-                 'parent of\n'
-                 '   the new class in its method resolution order.\n'
+                 '* finally, the "__init_subclass__()" hook is called on the '
+                 'immediate\n'
+                 '  parent of the new class in its method resolution order.\n'
                  '\n'
                  'After the class object is created, it is passed to the '
                  'class\n'
@@ -10308,33 +9344,9 @@
                  'Emulating generic types\n'
                  '=======================\n'
                  '\n'
-                 'When using *type annotations*, it is often useful to '
-                 '*parameterize* a\n'
-                 '*generic type* using Python’s square-brackets notation. For '
-                 'example,\n'
-                 'the annotation "list[int]" might be used to signify a "list" '
-                 'in which\n'
-                 'all the elements are of type "int".\n'
-                 '\n'
-                 'See also:\n'
-                 '\n'
-                 '  **PEP 484** - Type Hints\n'
-                 '     Introducing Python’s framework for type annotations\n'
-                 '\n'
-                 '  Generic Alias Types\n'
-                 '     Documentation for objects representing parameterized '
-                 'generic\n'
-                 '     classes\n'
-                 '\n'
-                 '  Generics, user-defined generics and "typing.Generic"\n'
-                 '     Documentation on how to implement generic classes that '
-                 'can be\n'
-                 '     parameterized at runtime and understood by static '
-                 'type-checkers.\n'
-                 '\n'
-                 'A class can *generally* only be parameterized if it defines '
-                 'the\n'
-                 'special class method "__class_getitem__()".\n'
+                 'One can implement the generic class syntax as specified by '
+                 '**PEP 484**\n'
+                 '(for example "List[int]") by defining a special method:\n'
                  '\n'
                  'classmethod object.__class_getitem__(cls, key)\n'
                  '\n'
@@ -10342,144 +9354,18 @@
                  'generic class\n'
                  '   by type arguments found in *key*.\n'
                  '\n'
-                 '   When defined on a class, "__class_getitem__()" is '
-                 'automatically a\n'
-                 '   class method. As such, there is no need for it to be '
-                 'decorated with\n'
-                 '   "@classmethod" when it is defined.\n'
-                 '\n'
-                 '\n'
-                 'The purpose of *__class_getitem__*\n'
-                 '----------------------------------\n'
-                 '\n'
-                 'The purpose of "__class_getitem__()" is to allow runtime\n'
-                 'parameterization of standard-library generic classes in '
-                 'order to more\n'
-                 'easily apply *type hints* to these classes.\n'
-                 '\n'
-                 'To implement custom generic classes that can be '
-                 'parameterized at\n'
-                 'runtime and understood by static type-checkers, users should '
-                 'either\n'
-                 'inherit from a standard library class that already '
-                 'implements\n'
-                 '"__class_getitem__()", or inherit from "typing.Generic", '
-                 'which has its\n'
-                 'own implementation of "__class_getitem__()".\n'
-                 '\n'
-                 'Custom implementations of "__class_getitem__()" on classes '
-                 'defined\n'
-                 'outside of the standard library may not be understood by '
-                 'third-party\n'
-                 'type-checkers such as mypy. Using "__class_getitem__()" on '
-                 'any class\n'
-                 'for purposes other than type hinting is discouraged.\n'
-                 '\n'
-                 '\n'
-                 '*__class_getitem__* versus *__getitem__*\n'
-                 '----------------------------------------\n'
-                 '\n'
-                 'Usually, the subscription of an object using square brackets '
-                 'will call\n'
-                 'the "__getitem__()" instance method defined on the object’s '
-                 'class.\n'
-                 'However, if the object being subscribed is itself a class, '
-                 'the class\n'
-                 'method "__class_getitem__()" may be called instead.\n'
-                 '"__class_getitem__()" should return a GenericAlias object if '
-                 'it is\n'
-                 'properly defined.\n'
-                 '\n'
-                 'Presented with the *expression* "obj[x]", the Python '
-                 'interpreter\n'
-                 'follows something like the following process to decide '
-                 'whether\n'
-                 '"__getitem__()" or "__class_getitem__()" should be called:\n'
-                 '\n'
-                 '   from inspect import isclass\n'
-                 '\n'
-                 '   def subscribe(obj, x):\n'
-                 '       """Return the result of the expression `obj[x]`"""\n'
-                 '\n'
-                 '       class_of_obj = type(obj)\n'
-                 '\n'
-                 '       # If the class of obj defines __getitem__,\n'
-                 '       # call class_of_obj.__getitem__(obj, x)\n'
-                 "       if hasattr(class_of_obj, '__getitem__'):\n"
-                 '           return class_of_obj.__getitem__(obj, x)\n'
-                 '\n'
-                 '       # Else, if obj is a class and defines '
-                 '__class_getitem__,\n'
-                 '       # call obj.__class_getitem__(x)\n'
-                 '       elif isclass(obj) and hasattr(obj, '
-                 "'__class_getitem__'):\n"
-                 '           return obj.__class_getitem__(x)\n'
-                 '\n'
-                 '       # Else, raise an exception\n'
-                 '       else:\n'
-                 '           raise TypeError(\n'
-                 '               f"\'{class_of_obj.__name__}\' object is not '
-                 'subscriptable"\n'
-                 '           )\n'
-                 '\n'
-                 'In Python, all classes are themselves instances of other '
-                 'classes. The\n'
-                 'class of a class is known as that class’s *metaclass*, and '
-                 'most\n'
-                 'classes have the "type" class as their metaclass. "type" '
-                 'does not\n'
-                 'define "__getitem__()", meaning that expressions such as '
-                 '"list[int]",\n'
-                 '"dict[str, float]" and "tuple[str, bytes]" all result in\n'
-                 '"__class_getitem__()" being called:\n'
-                 '\n'
-                 '   >>> # list has class "type" as its metaclass, like most '
-                 'classes:\n'
-                 '   >>> type(list)\n'
-                 "   <class 'type'>\n"
-                 '   >>> type(dict) == type(list) == type(tuple) == type(str) '
-                 '== type(bytes)\n'
-                 '   True\n'
-                 '   >>> # "list[int]" calls "list.__class_getitem__(int)"\n'
-                 '   >>> list[int]\n'
-                 '   list[int]\n'
-                 '   >>> # list.__class_getitem__ returns a GenericAlias '
-                 'object:\n'
-                 '   >>> type(list[int])\n'
-                 "   <class 'types.GenericAlias'>\n"
-                 '\n'
-                 'However, if a class has a custom metaclass that defines\n'
-                 '"__getitem__()", subscribing the class may result in '
-                 'different\n'
-                 'behaviour. An example of this can be found in the "enum" '
-                 'module:\n'
-                 '\n'
-                 '   >>> from enum import Enum\n'
-                 '   >>> class Menu(Enum):\n'
-                 '   ...     """A breakfast menu"""\n'
-                 "   ...     SPAM = 'spam'\n"
-                 "   ...     BACON = 'bacon'\n"
-                 '   ...\n'
-                 '   >>> # Enum classes have a custom metaclass:\n'
-                 '   >>> type(Menu)\n'
-                 "   <class 'enum.EnumMeta'>\n"
-                 '   >>> # EnumMeta defines __getitem__,\n'
-                 '   >>> # so __class_getitem__ is not called,\n'
-                 '   >>> # and the result is not a GenericAlias object:\n'
-                 "   >>> Menu['SPAM']\n"
-                 "   <Menu.SPAM: 'spam'>\n"
-                 "   >>> type(Menu['SPAM'])\n"
-                 "   <enum 'Menu'>\n"
+                 'This method is looked up on the class object itself, and '
+                 'when defined\n'
+                 'in the class body, this method is implicitly a class '
+                 'method.  Note,\n'
+                 'this mechanism is primarily reserved for use with static '
+                 'type hints,\n'
+                 'other usage is discouraged.\n'
                  '\n'
                  'See also:\n'
                  '\n'
-                 '  **PEP 560** - Core Support for typing module and generic '
+                 '  **PEP 560** - Core support for typing module and generic '
                  'types\n'
-                 '     Introducing "__class_getitem__()", and outlining when '
-                 'a\n'
-                 '     subscription results in "__class_getitem__()" being '
-                 'called\n'
-                 '     instead of "__getitem__()"\n'
                  '\n'
                  '\n'
                  'Emulating callable objects\n'
@@ -10498,60 +9384,60 @@
                  '\n'
                  'The following methods can be defined to implement container '
                  'objects.\n'
-                 'Containers usually are *sequences* (such as "lists" or '
-                 '"tuples") or\n'
-                 '*mappings* (like "dictionaries"), but can represent other '
-                 'containers\n'
-                 'as well.  The first set of methods is used either to emulate '
-                 'a\n'
-                 'sequence or to emulate a mapping; the difference is that for '
-                 'a\n'
-                 'sequence, the allowable keys should be the integers *k* for '
-                 'which "0\n'
-                 '<= k < N" where *N* is the length of the sequence, or '
-                 '"slice" objects,\n'
-                 'which define a range of items.  It is also recommended that '
-                 'mappings\n'
-                 'provide the methods "keys()", "values()", "items()", '
-                 '"get()",\n'
-                 '"clear()", "setdefault()", "pop()", "popitem()", "copy()", '
-                 'and\n'
-                 '"update()" behaving similar to those for Python’s standard\n'
-                 '"dictionary" objects.  The "collections.abc" module provides '
-                 'a\n'
-                 '"MutableMapping" *abstract base class* to help create those '
-                 'methods\n'
-                 'from a base set of "__getitem__()", "__setitem__()", '
-                 '"__delitem__()",\n'
-                 'and "keys()". Mutable sequences should provide methods '
-                 '"append()",\n'
-                 '"count()", "index()", "extend()", "insert()", "pop()", '
-                 '"remove()",\n'
-                 '"reverse()" and "sort()", like Python standard "list" '
+                 'Containers usually are sequences (such as lists or tuples) '
+                 'or mappings\n'
+                 '(like dictionaries), but can represent other containers as '
+                 'well.  The\n'
+                 'first set of methods is used either to emulate a sequence or '
+                 'to\n'
+                 'emulate a mapping; the difference is that for a sequence, '
+                 'the\n'
+                 'allowable keys should be the integers *k* for which "0 <= k '
+                 '< N" where\n'
+                 '*N* is the length of the sequence, or slice objects, which '
+                 'define a\n'
+                 'range of items.  It is also recommended that mappings '
+                 'provide the\n'
+                 'methods "keys()", "values()", "items()", "get()", '
+                 '"clear()",\n'
+                 '"setdefault()", "pop()", "popitem()", "copy()", and '
+                 '"update()"\n'
+                 'behaving similar to those for Python’s standard dictionary '
                  'objects.\n'
-                 'Finally, sequence types should implement addition (meaning\n'
-                 'concatenation) and multiplication (meaning repetition) by '
-                 'defining the\n'
-                 'methods "__add__()", "__radd__()", "__iadd__()", '
-                 '"__mul__()",\n'
-                 '"__rmul__()" and "__imul__()" described below; they should '
-                 'not define\n'
-                 'other numerical operators.  It is recommended that both '
-                 'mappings and\n'
-                 'sequences implement the "__contains__()" method to allow '
-                 'efficient use\n'
-                 'of the "in" operator; for mappings, "in" should search the '
-                 'mapping’s\n'
-                 'keys; for sequences, it should search through the values.  '
-                 'It is\n'
-                 'further recommended that both mappings and sequences '
-                 'implement the\n'
-                 '"__iter__()" method to allow efficient iteration through '
-                 'the\n'
-                 'container; for mappings, "__iter__()" should iterate through '
-                 'the\n'
-                 'object’s keys; for sequences, it should iterate through the '
-                 'values.\n'
+                 'The "collections.abc" module provides a "MutableMapping" '
+                 'abstract base\n'
+                 'class to help create those methods from a base set of '
+                 '"__getitem__()",\n'
+                 '"__setitem__()", "__delitem__()", and "keys()". Mutable '
+                 'sequences\n'
+                 'should provide methods "append()", "count()", "index()", '
+                 '"extend()",\n'
+                 '"insert()", "pop()", "remove()", "reverse()" and "sort()", '
+                 'like Python\n'
+                 'standard list objects.  Finally, sequence types should '
+                 'implement\n'
+                 'addition (meaning concatenation) and multiplication '
+                 '(meaning\n'
+                 'repetition) by defining the methods "__add__()", '
+                 '"__radd__()",\n'
+                 '"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" '
+                 'described\n'
+                 'below; they should not define other numerical operators.  It '
+                 'is\n'
+                 'recommended that both mappings and sequences implement the\n'
+                 '"__contains__()" method to allow efficient use of the "in" '
+                 'operator;\n'
+                 'for mappings, "in" should search the mapping’s keys; for '
+                 'sequences, it\n'
+                 'should search through the values.  It is further recommended '
+                 'that both\n'
+                 'mappings and sequences implement the "__iter__()" method to '
+                 'allow\n'
+                 'efficient iteration through the container; for mappings, '
+                 '"__iter__()"\n'
+                 'should iterate through the object’s keys; for sequences, it '
+                 'should\n'
+                 'iterate through the values.\n'
                  '\n'
                  'object.__len__(self)\n'
                  '\n'
@@ -10609,23 +9495,22 @@
                  'object.__getitem__(self, key)\n'
                  '\n'
                  '   Called to implement evaluation of "self[key]". For '
-                 '*sequence*\n'
-                 '   types, the accepted keys should be integers and slice '
-                 'objects.\n'
-                 '   Note that the special interpretation of negative indexes '
-                 '(if the\n'
-                 '   class wishes to emulate a *sequence* type) is up to the\n'
-                 '   "__getitem__()" method. If *key* is of an inappropriate '
-                 'type,\n'
-                 '   "TypeError" may be raised; if of a value outside the set '
-                 'of indexes\n'
-                 '   for the sequence (after any special interpretation of '
-                 'negative\n'
-                 '   values), "IndexError" should be raised. For *mapping* '
-                 'types, if\n'
-                 '   *key* is missing (not in the container), "KeyError" '
+                 'sequence types,\n'
+                 '   the accepted keys should be integers and slice objects.  '
+                 'Note that\n'
+                 '   the special interpretation of negative indexes (if the '
+                 'class wishes\n'
+                 '   to emulate a sequence type) is up to the "__getitem__()" '
+                 'method. If\n'
+                 '   *key* is of an inappropriate type, "TypeError" may be '
+                 'raised; if of\n'
+                 '   a value outside the set of indexes for the sequence '
+                 '(after any\n'
+                 '   special interpretation of negative values), "IndexError" '
                  'should be\n'
-                 '   raised.\n'
+                 '   raised. For mapping types, if *key* is missing (not in '
+                 'the\n'
+                 '   container), "KeyError" should be raised.\n'
                  '\n'
                  '   Note:\n'
                  '\n'
@@ -10635,14 +9520,6 @@
                  'the\n'
                  '     sequence.\n'
                  '\n'
-                 '   Note:\n'
-                 '\n'
-                 '     When subscripting a *class*, the special class method\n'
-                 '     "__class_getitem__()" may be called instead of '
-                 '"__getitem__()".\n'
-                 '     See __class_getitem__ versus __getitem__ for more '
-                 'details.\n'
-                 '\n'
                  'object.__setitem__(self, key, value)\n'
                  '\n'
                  '   Called to implement assignment to "self[key]".  Same note '
@@ -10678,13 +9555,19 @@
                  '\n'
                  'object.__iter__(self)\n'
                  '\n'
-                 '   This method is called when an *iterator* is required for '
-                 'a\n'
-                 '   container. This method should return a new iterator '
-                 'object that can\n'
-                 '   iterate over all the objects in the container.  For '
-                 'mappings, it\n'
-                 '   should iterate over the keys of the container.\n'
+                 '   This method is called when an iterator is required for a '
+                 'container.\n'
+                 '   This method should return a new iterator object that can '
+                 'iterate\n'
+                 '   over all the objects in the container.  For mappings, it '
+                 'should\n'
+                 '   iterate over the keys of the container.\n'
+                 '\n'
+                 '   Iterator objects also need to implement this method; they '
+                 'are\n'
+                 '   required to return themselves.  For more information on '
+                 'iterator\n'
+                 '   objects, see Iterator Types.\n'
                  '\n'
                  'object.__reversed__(self)\n'
                  '\n'
@@ -10877,6 +9760,16 @@
                  'the data\n'
                  '   model.\n'
                  '\n'
+                 '   Note:\n'
+                 '\n'
+                 '     Due to a bug in the dispatching mechanism for "**=", a '
+                 'class that\n'
+                 '     defines "__ipow__()" but returns "NotImplemented" would '
+                 'fail to\n'
+                 '     fall back to "x.__pow__(y)" and "y.__rpow__(x)". This '
+                 'bug is\n'
+                 '     fixed in Python 3.10.\n'
+                 '\n'
                  'object.__neg__(self)\n'
                  'object.__pos__(self)\n'
                  'object.__abs__(self)\n'
@@ -10927,9 +9820,9 @@
                  '   of the object truncated to an "Integral" (typically an '
                  '"int").\n'
                  '\n'
-                 '   The built-in function "int()" falls back to "__trunc__()" '
-                 'if\n'
-                 '   neither "__int__()" nor "__index__()" is defined.\n'
+                 '   If "__int__()" is not defined then the built-in function '
+                 '"int()"\n'
+                 '   falls back to "__trunc__()".\n'
                  '\n'
                  '\n'
                  'With Statement Context Managers\n'
@@ -10995,51 +9888,6 @@
                  '     statement.\n'
                  '\n'
                  '\n'
-                 'Customizing positional arguments in class pattern matching\n'
-                 '==========================================================\n'
-                 '\n'
-                 'When using a class name in a pattern, positional arguments '
-                 'in the\n'
-                 'pattern are not allowed by default, i.e. "case MyClass(x, '
-                 'y)" is\n'
-                 'typically invalid without special support in "MyClass". To '
-                 'be able to\n'
-                 'use that kind of patterns, the class needs to define a\n'
-                 '*__match_args__* attribute.\n'
-                 '\n'
-                 'object.__match_args__\n'
-                 '\n'
-                 '   This class variable can be assigned a tuple of strings. '
-                 'When this\n'
-                 '   class is used in a class pattern with positional '
-                 'arguments, each\n'
-                 '   positional argument will be converted into a keyword '
-                 'argument,\n'
-                 '   using the corresponding value in *__match_args__* as the '
-                 'keyword.\n'
-                 '   The absence of this attribute is equivalent to setting it '
-                 'to "()".\n'
-                 '\n'
-                 'For example, if "MyClass.__match_args__" is "("left", '
-                 '"center",\n'
-                 '"right")" that means that "case MyClass(x, y)" is equivalent '
-                 'to "case\n'
-                 'MyClass(left=x, center=y)". Note that the number of '
-                 'arguments in the\n'
-                 'pattern must be smaller than or equal to the number of '
-                 'elements in\n'
-                 '*__match_args__*; if it is larger, the pattern match attempt '
-                 'will\n'
-                 'raise a "TypeError".\n'
-                 '\n'
-                 'New in version 3.10.\n'
-                 '\n'
-                 'See also:\n'
-                 '\n'
-                 '  **PEP 634** - Structural Pattern Matching\n'
-                 '     The specification for the Python "match" statement.\n'
-                 '\n'
-                 '\n'
                  'Special method lookup\n'
                  '=====================\n'
                  '\n'
@@ -11210,7 +10058,7 @@
                    '*start* and\n'
                    '   *end* are interpreted as in slice notation.\n'
                    '\n'
-                   "str.encode(encoding='utf-8', errors='strict')\n"
+                   'str.encode(encoding="utf-8", errors="strict")\n'
                    '\n'
                    '   Return an encoded version of the string as a bytes '
                    'object. Default\n'
@@ -11459,9 +10307,9 @@
                    '      >>> from keyword import iskeyword\n'
                    '\n'
                    "      >>> 'hello'.isidentifier(), iskeyword('hello')\n"
-                   '      (True, False)\n'
+                   '      True, False\n'
                    "      >>> 'def'.isidentifier(), iskeyword('def')\n"
-                   '      (True, True)\n'
+                   '      True, True\n'
                    '\n'
                    'str.islower()\n'
                    '\n'
@@ -11716,7 +10564,7 @@
                    'followed by\n'
                    '   the string itself.\n'
                    '\n'
-                   'str.rsplit(sep=None, maxsplit=- 1)\n'
+                   'str.rsplit(sep=None, maxsplit=-1)\n'
                    '\n'
                    '   Return a list of the words in the string, using *sep* '
                    'as the\n'
@@ -11757,7 +10605,7 @@
                    "      >>> 'Monty Python'.removesuffix(' Python')\n"
                    "      'Monty'\n"
                    '\n'
-                   'str.split(sep=None, maxsplit=- 1)\n'
+                   'str.split(sep=None, maxsplit=-1)\n'
                    '\n'
                    '   Return a list of the words in the string, using *sep* '
                    'as the\n'
@@ -11812,7 +10660,7 @@
                    "      >>> '   1   2   3   '.split()\n"
                    "      ['1', '2', '3']\n"
                    '\n'
-                   'str.splitlines(keepends=False)\n'
+                   'str.splitlines([keepends])\n'
                    '\n'
                    '   Return a list of the lines in the string, breaking at '
                    'line\n'
@@ -12293,86 +11141,67 @@
  'subscriptions': 'Subscriptions\n'
                   '*************\n'
                   '\n'
-                  'The subscription of an instance of a container class will '
-                  'generally\n'
-                  'select an element from the container. The subscription of a '
-                  '*generic\n'
-                  'class* will generally return a GenericAlias object.\n'
+                  'Subscription of a sequence (string, tuple or list) or '
+                  'mapping\n'
+                  '(dictionary) object usually selects an item from the '
+                  'collection:\n'
                   '\n'
                   '   subscription ::= primary "[" expression_list "]"\n'
                   '\n'
-                  'When an object is subscripted, the interpreter will '
-                  'evaluate the\n'
-                  'primary and the expression list.\n'
-                  '\n'
                   'The primary must evaluate to an object that supports '
-                  'subscription. An\n'
-                  'object may support subscription through defining one or '
-                  'both of\n'
-                  '"__getitem__()" and "__class_getitem__()". When the primary '
-                  'is\n'
-                  'subscripted, the evaluated result of the expression list '
-                  'will be\n'
-                  'passed to one of these methods. For more details on when\n'
-                  '"__class_getitem__" is called instead of "__getitem__", '
-                  'see\n'
-                  '__class_getitem__ versus __getitem__.\n'
-                  '\n'
-                  'If the expression list contains at least one comma, it will '
-                  'evaluate\n'
-                  'to a "tuple" containing the items of the expression list. '
-                  'Otherwise,\n'
-                  'the expression list will evaluate to the value of the '
-                  'list’s sole\n'
-                  'member.\n'
+                  'subscription\n'
+                  '(lists or dictionaries for example).  User-defined objects '
+                  'can support\n'
+                  'subscription by defining a "__getitem__()" method.\n'
                   '\n'
                   'For built-in objects, there are two types of objects that '
                   'support\n'
-                  'subscription via "__getitem__()":\n'
+                  'subscription:\n'
                   '\n'
-                  '1. Mappings. If the primary is a *mapping*, the expression '
-                  'list must\n'
-                  '   evaluate to an object whose value is one of the keys of '
+                  'If the primary is a mapping, the expression list must '
+                  'evaluate to an\n'
+                  'object whose value is one of the keys of the mapping, and '
                   'the\n'
-                  '   mapping, and the subscription selects the value in the '
-                  'mapping that\n'
-                  '   corresponds to that key. An example of a builtin mapping '
-                  'class is\n'
-                  '   the "dict" class.\n'
+                  'subscription selects the value in the mapping that '
+                  'corresponds to that\n'
+                  'key.  (The expression list is a tuple except if it has '
+                  'exactly one\n'
+                  'item.)\n'
                   '\n'
-                  '2. Sequences. If the primary is a *sequence*, the '
-                  'expression list must\n'
-                  '   evaluate to an "int" or a "slice" (as discussed in the '
-                  'following\n'
-                  '   section). Examples of builtin sequence classes include '
-                  'the "str",\n'
-                  '   "list" and "tuple" classes.\n'
+                  'If the primary is a sequence, the expression list must '
+                  'evaluate to an\n'
+                  'integer or a slice (as discussed in the following '
+                  'section).\n'
                   '\n'
                   'The formal syntax makes no special provision for negative '
                   'indices in\n'
-                  '*sequences*. However, built-in sequences all provide a '
+                  'sequences; however, built-in sequences all provide a '
                   '"__getitem__()"\n'
                   'method that interprets negative indices by adding the '
                   'length of the\n'
-                  'sequence to the index so that, for example, "x[-1]" selects '
-                  'the last\n'
-                  'item of "x". The resulting value must be a nonnegative '
-                  'integer less\n'
-                  'than the number of items in the sequence, and the '
-                  'subscription selects\n'
-                  'the item whose index is that value (counting from zero). '
-                  'Since the\n'
-                  'support for negative indices and slicing occurs in the '
-                  'object’s\n'
-                  '"__getitem__()" method, subclasses overriding this method '
-                  'will need to\n'
-                  'explicitly add that support.\n'
+                  'sequence to the index (so that "x[-1]" selects the last '
+                  'item of "x").\n'
+                  'The resulting value must be a nonnegative integer less than '
+                  'the number\n'
+                  'of items in the sequence, and the subscription selects the '
+                  'item whose\n'
+                  'index is that value (counting from zero). Since the support '
+                  'for\n'
+                  'negative indices and slicing occurs in the object’s '
+                  '"__getitem__()"\n'
+                  'method, subclasses overriding this method will need to '
+                  'explicitly add\n'
+                  'that support.\n'
                   '\n'
-                  'A "string" is a special kind of sequence whose items are '
-                  '*characters*.\n'
-                  'A character is not a separate data type but a string of '
-                  'exactly one\n'
-                  'character.\n',
+                  'A string’s items are characters.  A character is not a '
+                  'separate data\n'
+                  'type but a string of exactly one character.\n'
+                  '\n'
+                  'Subscription of certain *classes* or *types* creates a '
+                  'generic alias.\n'
+                  'In this case, user-defined classes can support subscription '
+                  'by\n'
+                  'providing a "__class_getitem__()" classmethod.\n',
  'truth': 'Truth Value Testing\n'
           '*******************\n'
           '\n'
@@ -12430,8 +11259,7 @@
         'object is “compatible” with the exception.  An object is compatible\n'
         'with an exception if it is the class or a base class of the '
         'exception\n'
-        'object, or a tuple containing an item that is the class or a base\n'
-        'class of the exception object.\n'
+        'object or a tuple containing an item compatible with the exception.\n'
         '\n'
         'If no except clause matches the exception, the search for an '
         'exception\n'
@@ -12486,31 +11314,9 @@
         'the\n'
         'exception class, the exception instance and a traceback object (see\n'
         'section The standard type hierarchy) identifying the point in the\n'
-        'program where the exception occurred.  The details about the '
-        'exception\n'
-        'accessed via "sys.exc_info()" are restored to their previous values\n'
-        'when leaving an exception handler:\n'
-        '\n'
-        '   >>> print(sys.exc_info())\n'
-        '   (None, None, None)\n'
-        '   >>> try:\n'
-        '   ...     raise TypeError\n'
-        '   ... except:\n'
-        '   ...     print(sys.exc_info())\n'
-        '   ...     try:\n'
-        '   ...          raise ValueError\n'
-        '   ...     except:\n'
-        '   ...         print(sys.exc_info())\n'
-        '   ...     print(sys.exc_info())\n'
-        '   ...\n'
-        "   (<class 'TypeError'>, TypeError(), <traceback object at "
-        '0x10efad080>)\n'
-        "   (<class 'ValueError'>, ValueError(), <traceback object at "
-        '0x10efad040>)\n'
-        "   (<class 'TypeError'>, TypeError(), <traceback object at "
-        '0x10efad080>)\n'
-        '   >>> print(sys.exc_info())\n'
-        '   (None, None, None)\n'
+        'program where the exception occurred.  "sys.exc_info()" values are\n'
+        'restored to their previous values (before the call) when returning\n'
+        'from a function that handled an exception.\n'
         '\n'
         'The optional "else" clause is executed if the control flow leaves '
         'the\n'
@@ -12674,6 +11480,7 @@
           '      There are two types of integers:\n'
           '\n'
           '      Integers ("int")\n'
+          '\n'
           '         These represent numbers in an unlimited range, subject to\n'
           '         available (virtual) memory only.  For the purpose of '
           'shift\n'
@@ -12770,7 +11577,7 @@
           '         points. All the code points in the range "U+0000 - '
           'U+10FFFF"\n'
           '         can be represented in a string.  Python doesn’t have a '
-          '*char*\n'
+          '"char"\n'
           '         type; instead, every code point in the string is '
           'represented\n'
           '         as a string object with length "1".  The built-in '
@@ -13030,13 +11837,7 @@
           '|             |\n'
           '      |                           | and "\'return\'" for the '
           'return   |             |\n'
-          '      |                           | annotation, if provided.  For   '
-          '|             |\n'
-          '      |                           | more information on working     '
-          '|             |\n'
-          '      |                           | with this attribute, see        '
-          '|             |\n'
-          '      |                           | Annotations Best Practices.     '
+          '      |                           | annotation, if provided.        '
           '|             |\n'
           '      '
           '+---------------------------+---------------------------------+-------------+\n'
@@ -13157,18 +11958,20 @@
           '      A function or method which uses the "yield" statement (see\n'
           '      section The yield statement) is called a *generator '
           'function*.\n'
-          '      Such a function, when called, always returns an *iterator*\n'
-          '      object which can be used to execute the body of the '
-          'function:\n'
-          '      calling the iterator’s "iterator.__next__()" method will '
-          'cause\n'
-          '      the function to execute until it provides a value using the\n'
-          '      "yield" statement.  When the function executes a "return"\n'
-          '      statement or falls off the end, a "StopIteration" exception '
-          'is\n'
-          '      raised and the iterator will have reached the end of the set '
-          'of\n'
-          '      values to be returned.\n'
+          '      Such a function, when called, always returns an iterator '
+          'object\n'
+          '      which can be used to execute the body of the function:  '
+          'calling\n'
+          '      the iterator’s "iterator.__next__()" method will cause the\n'
+          '      function to execute until it provides a value using the '
+          '"yield"\n'
+          '      statement.  When the function executes a "return" statement '
+          'or\n'
+          '      falls off the end, a "StopIteration" exception is raised and '
+          'the\n'
+          '      iterator will have reached the end of the set of values to '
+          'be\n'
+          '      returned.\n'
           '\n'
           '   Coroutine functions\n'
           '      A function or method which is defined using "async def" is\n'
@@ -13184,18 +11987,18 @@
           '      which uses the "yield" statement is called a *asynchronous\n'
           '      generator function*.  Such a function, when called, returns '
           'an\n'
-          '      *asynchronous iterator* object which can be used in an '
-          '"async\n'
-          '      for" statement to execute the body of the function.\n'
+          '      asynchronous iterator object which can be used in an "async '
+          'for"\n'
+          '      statement to execute the body of the function.\n'
           '\n'
-          '      Calling the asynchronous iterator’s "aiterator.__anext__" '
-          'method\n'
-          '      will return an *awaitable* which when awaited will execute '
-          'until\n'
-          '      it provides a value using the "yield" expression.  When the\n'
-          '      function executes an empty "return" statement or falls off '
+          '      Calling the asynchronous iterator’s "aiterator.__anext__()"\n'
+          '      method will return an *awaitable* which when awaited will\n'
+          '      execute until it provides a value using the "yield" '
+          'expression.\n'
+          '      When the function executes an empty "return" statement or '
+          'falls\n'
+          '      off the end, a "StopAsyncIteration" exception is raised and '
           'the\n'
-          '      end, a "StopAsyncIteration" exception is raised and the\n'
           '      asynchronous iterator will have reached the end of the set '
           'of\n'
           '      values to be yielded.\n'
@@ -13259,34 +12062,20 @@
           '   Attribute assignment updates the module’s namespace dictionary,\n'
           '   e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n'
           '\n'
-          '   Predefined (writable) attributes:\n'
-          '\n'
-          '      "__name__"\n'
-          '         The module’s name.\n'
-          '\n'
-          '      "__doc__"\n'
-          '         The module’s documentation string, or "None" if '
-          'unavailable.\n'
-          '\n'
-          '      "__file__"\n'
-          '         The pathname of the file from which the module was loaded, '
-          'if\n'
-          '         it was loaded from a file. The "__file__" attribute may '
-          'be\n'
-          '         missing for certain types of modules, such as C modules '
-          'that\n'
-          '         are statically linked into the interpreter.  For '
-          'extension\n'
-          '         modules loaded dynamically from a shared library, it’s '
+          '   Predefined (writable) attributes: "__name__" is the module’s '
+          'name;\n'
+          '   "__doc__" is the module’s documentation string, or "None" if\n'
+          '   unavailable; "__annotations__" (optional) is a dictionary\n'
+          '   containing *variable annotations* collected during module body\n'
+          '   execution; "__file__" is the pathname of the file from which '
           'the\n'
-          '         pathname of the shared library file.\n'
-          '\n'
-          '      "__annotations__"\n'
-          '         A dictionary containing *variable annotations* collected\n'
-          '         during module body execution.  For best practices on '
-          'working\n'
-          '         with "__annotations__", please see Annotations Best\n'
-          '         Practices.\n'
+          '   module was loaded, if it was loaded from a file. The "__file__"\n'
+          '   attribute may be missing for certain types of modules, such as '
+          'C\n'
+          '   modules that are statically linked into the interpreter; for\n'
+          '   extension modules loaded dynamically from a shared library, it '
+          'is\n'
+          '   the pathname of the shared library file.\n'
           '\n'
           '   Special read-only attribute: "__dict__" is the module’s '
           'namespace\n'
@@ -13344,31 +12133,20 @@
           'instance\n'
           '   (see below).\n'
           '\n'
-          '   Special attributes:\n'
-          '\n'
-          '      "__name__"\n'
-          '         The class name.\n'
-          '\n'
-          '      "__module__"\n'
-          '         The name of the module in which the class was defined.\n'
-          '\n'
-          '      "__dict__"\n'
-          '         The dictionary containing the class’s namespace.\n'
-          '\n'
-          '      "__bases__"\n'
-          '         A tuple containing the base classes, in the order of '
-          'their\n'
-          '         occurrence in the base class list.\n'
-          '\n'
-          '      "__doc__"\n'
-          '         The class’s documentation string, or "None" if undefined.\n'
-          '\n'
-          '      "__annotations__"\n'
-          '         A dictionary containing *variable annotations* collected\n'
-          '         during class body execution.  For best practices on '
-          'working\n'
-          '         with "__annotations__", please see Annotations Best\n'
-          '         Practices.\n'
+          '   Special attributes: "__name__" is the class name; "__module__" '
+          'is\n'
+          '   the module name in which the class was defined; "__dict__" is '
+          'the\n'
+          '   dictionary containing the class’s namespace; "__bases__" is a '
+          'tuple\n'
+          '   containing the base classes, in the order of their occurrence '
+          'in\n'
+          '   the base class list; "__doc__" is the class’s documentation '
+          'string,\n'
+          '   or "None" if undefined; "__annotations__" (optional) is a\n'
+          '   dictionary containing *variable annotations* collected during '
+          'class\n'
+          '   body execution.\n'
           '\n'
           'Class instances\n'
           '   A class instance is created by calling a class object (see '
@@ -13529,10 +12307,6 @@
           '      gives the precise instruction (this is an index into the\n'
           '      bytecode string of the code object).\n'
           '\n'
-          '      Accessing "f_code" raises an auditing event '
-          '"object.__getattr__"\n'
-          '      with arguments "obj" and ""f_code"".\n'
-          '\n'
           '      Special writable attributes: "f_trace", if not "None", is a\n'
           '      function called for various events during code execution '
           '(this\n'
@@ -13616,9 +12390,6 @@
           '      the exception occurred in a "try" statement with no matching\n'
           '      except clause or with a finally clause.\n'
           '\n'
-          '      Accessing "tb_frame" raises an auditing event\n'
-          '      "object.__getattr__" with arguments "obj" and ""tb_frame"".\n'
-          '\n'
           '      Special writable attribute: "tb_next" is the next level in '
           'the\n'
           '      stack trace (towards the frame where the exception occurred), '
@@ -13669,8 +12440,9 @@
           '      object actually returned is the wrapped object, which is not\n'
           '      subject to any further transformation. Static method objects '
           'are\n'
-          '      also callable. Static method objects are created by the '
-          'built-in\n'
+          '      not themselves callable, although the objects they wrap '
+          'usually\n'
+          '      are. Static method objects are created by the built-in\n'
           '      "staticmethod()" constructor.\n'
           '\n'
           '   Class method objects\n'
@@ -13739,9 +12511,9 @@
                  '"dict"\n'
                  'constructor.\n'
                  '\n'
-                 'class dict(**kwargs)\n'
-                 'class dict(mapping, **kwargs)\n'
-                 'class dict(iterable, **kwargs)\n'
+                 'class dict(**kwarg)\n'
+                 'class dict(mapping, **kwarg)\n'
+                 'class dict(iterable, **kwarg)\n'
                  '\n'
                  '   Return a new dictionary initialized from an optional '
                  'positional\n'
@@ -14135,14 +12907,6 @@
                  '   Changed in version 3.8: Dictionary views are now '
                  'reversible.\n'
                  '\n'
-                 'dictview.mapping\n'
-                 '\n'
-                 '   Return a "types.MappingProxyType" that wraps the '
-                 'original\n'
-                 '   dictionary to which the view refers.\n'
-                 '\n'
-                 '   New in version 3.10.\n'
-                 '\n'
                  'Keys views are set-like since their entries are unique and '
                  'hashable.\n'
                  'If all values are hashable, so that "(key, value)" pairs are '
@@ -14188,15 +12952,7 @@
                  "   >>> keys & {'eggs', 'bacon', 'salad'}\n"
                  "   {'bacon'}\n"
                  "   >>> keys ^ {'sausage', 'juice'}\n"
-                 "   {'juice', 'sausage', 'bacon', 'spam'}\n"
-                 '\n'
-                 '   >>> # get back a read-only proxy for the original '
-                 'dictionary\n'
-                 '   >>> values.mapping\n'
-                 "   mappingproxy({'eggs': 2, 'sausage': 1, 'bacon': 1, "
-                 "'spam': 500})\n"
-                 "   >>> values.mapping['spam']\n"
-                 '   500\n',
+                 "   {'juice', 'sausage', 'bacon', 'spam'}\n",
  'typesmethods': 'Methods\n'
                  '*******\n'
                  '\n'
@@ -14391,14 +13147,6 @@
              'Comparisons in\n'
              'the language reference.)\n'
              '\n'
-             'Forward and reversed iterators over mutable sequences access '
-             'values\n'
-             'using an index.  That index will continue to march forward (or\n'
-             'backward) even if the underlying sequence is mutated.  The '
-             'iterator\n'
-             'terminates only when an "IndexError" or a "StopIteration" is\n'
-             'encountered (or when the index drops below zero).\n'
-             '\n'
              'Notes:\n'
              '\n'
              '1. While the "in" and "not in" operations are used only for '
@@ -14630,7 +13378,7 @@
              '|                                | "s[i:i] = '
              '[x]")                  |                       |\n'
              '+--------------------------------+----------------------------------+-----------------------+\n'
-             '| "s.pop()" or "s.pop(i)"        | retrieves the item at *i* '
+             '| "s.pop([i])"                   | retrieves the item at *i* '
              'and    | (2)                   |\n'
              '|                                | also removes it from '
              '*s*         |                       |\n'
@@ -14870,8 +13618,7 @@
              '\n'
              '   The arguments to the range constructor must be integers '
              '(either\n'
-             '   built-in "int" or any object that implements the '
-             '"__index__()"\n'
+             '   built-in "int" or any object that implements the "__index__"\n'
              '   special method).  If the *step* argument is omitted, it '
              'defaults to\n'
              '   "1". If the *start* argument is omitted, it defaults to "0". '
@@ -15094,7 +13841,7 @@
                      '|                                | "s[i:i] = '
                      '[x]")                  |                       |\n'
                      '+--------------------------------+----------------------------------+-----------------------+\n'
-                     '| "s.pop()" or "s.pop(i)"        | retrieves the item at '
+                     '| "s.pop([i])"                   | retrieves the item at '
                      '*i* and    | (2)                   |\n'
                      '|                                | also removes it from '
                      '*s*         |                       |\n'
@@ -15159,21 +13906,15 @@
           '   u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n'
           '\n'
           'The unary "-" (minus) operator yields the negation of its numeric\n'
-          'argument; the operation can be overridden with the "__neg__()" '
-          'special\n'
-          'method.\n'
+          'argument.\n'
           '\n'
           'The unary "+" (plus) operator yields its numeric argument '
-          'unchanged;\n'
-          'the operation can be overridden with the "__pos__()" special '
-          'method.\n'
+          'unchanged.\n'
           '\n'
           'The unary "~" (invert) operator yields the bitwise inversion of '
           'its\n'
           'integer argument.  The bitwise inversion of "x" is defined as\n'
-          '"-(x+1)".  It only applies to integral numbers or to custom '
-          'objects\n'
-          'that override the "__invert__()" special method.\n'
+          '"-(x+1)".  It only applies to integral numbers.\n'
           '\n'
           'In all three cases, if the argument does not have the proper type, '
           'a\n'
@@ -15211,10 +13952,8 @@
          'usage\n'
          'patterns to be encapsulated for convenient reuse.\n'
          '\n'
-         '   with_stmt          ::= "with" ( "(" with_stmt_contents ","? ")" | '
-         'with_stmt_contents ) ":" suite\n'
-         '   with_stmt_contents ::= with_item ("," with_item)*\n'
-         '   with_item          ::= expression ["as" target]\n'
+         '   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n'
+         '   with_item ::= expression ["as" target]\n'
          '\n'
          'The execution of the "with" statement with one “item” proceeds as\n'
          'follows:\n'
@@ -15300,20 +14039,8 @@
          '       with B() as b:\n'
          '           SUITE\n'
          '\n'
-         'You can also write multi-item context managers in multiple lines if\n'
-         'the items are surrounded by parentheses. For example:\n'
-         '\n'
-         '   with (\n'
-         '       A() as a,\n'
-         '       B() as b,\n'
-         '   ):\n'
-         '       SUITE\n'
-         '\n'
          'Changed in version 3.1: Support for multiple context expressions.\n'
          '\n'
-         'Changed in version 3.10: Support for using grouping parentheses to\n'
-         'break the statement in multiple lines.\n'
-         '\n'
          'See also:\n'
          '\n'
          '  **PEP 343** - The “with” statement\n'
diff --git a/common/py3-stdlib/random.py b/common/py3-stdlib/random.py
index 1310a2d..a6454f5 100644
--- a/common/py3-stdlib/random.py
+++ b/common/py3-stdlib/random.py
@@ -48,10 +48,9 @@
 from warnings import warn as _warn
 from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
 from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
-from math import tau as TWOPI, floor as _floor, isfinite as _isfinite
+from math import tau as TWOPI, floor as _floor
 from os import urandom as _urandom
 from _collections_abc import Set as _Set, Sequence as _Sequence
-from operator import index as _index
 from itertools import accumulate as _accumulate, repeat as _repeat
 from bisect import bisect as _bisect
 import os as _os
@@ -78,7 +77,6 @@
     "lognormvariate",
     "normalvariate",
     "paretovariate",
-    "randbytes",
     "randint",
     "random",
     "randrange",
@@ -97,7 +95,6 @@
 SG_MAGICCONST = 1.0 + _log(4.5)
 BPF = 53        # Number of bits in a float
 RECIP_BPF = 2 ** -BPF
-_ONE = 1
 
 
 class Random(_random.Random):
@@ -154,7 +151,8 @@
         elif version == 2 and isinstance(a, (str, bytes, bytearray)):
             if isinstance(a, str):
                 a = a.encode()
-            a = int.from_bytes(a + _sha512(a).digest(), 'big')
+            a += _sha512(a).digest()
+            a = int.from_bytes(a, 'big')
 
         elif not isinstance(a, (type(None), int, float, str, bytes, bytearray)):
             _warn('Seeding based on hashing is deprecated\n'
@@ -289,7 +287,7 @@
 
     ## -------------------- integer methods  -------------------
 
-    def randrange(self, start, stop=None, step=_ONE):
+    def randrange(self, start, stop=None, step=1):
         """Choose a random item from range(start, stop[, step]).
 
         This fixes the problem with randint() which includes the
@@ -299,68 +297,38 @@
 
         # This code is a bit messy to make it fast for the
         # common case while still doing adequate error checking.
-        try:
-            istart = _index(start)
-        except TypeError:
-            istart = int(start)
-            if istart != start:
-                _warn('randrange() will raise TypeError in the future',
-                      DeprecationWarning, 2)
-                raise ValueError("non-integer arg 1 for randrange()")
-            _warn('non-integer arguments to randrange() have been deprecated '
-                  'since Python 3.10 and will be removed in a subsequent '
-                  'version',
-                  DeprecationWarning, 2)
+        istart = int(start)
+        if istart != start:
+            raise ValueError("non-integer arg 1 for randrange()")
         if stop is None:
-            # We don't check for "step != 1" because it hasn't been
-            # type checked and converted to an integer yet.
-            if step is not _ONE:
-                raise TypeError('Missing a non-None stop argument')
             if istart > 0:
                 return self._randbelow(istart)
             raise ValueError("empty range for randrange()")
 
         # stop argument supplied.
-        try:
-            istop = _index(stop)
-        except TypeError:
-            istop = int(stop)
-            if istop != stop:
-                _warn('randrange() will raise TypeError in the future',
-                      DeprecationWarning, 2)
-                raise ValueError("non-integer stop for randrange()")
-            _warn('non-integer arguments to randrange() have been deprecated '
-                  'since Python 3.10 and will be removed in a subsequent '
-                  'version',
-                  DeprecationWarning, 2)
+        istop = int(stop)
+        if istop != stop:
+            raise ValueError("non-integer stop for randrange()")
         width = istop - istart
-        try:
-            istep = _index(step)
-        except TypeError:
-            istep = int(step)
-            if istep != step:
-                _warn('randrange() will raise TypeError in the future',
-                      DeprecationWarning, 2)
-                raise ValueError("non-integer step for randrange()")
-            _warn('non-integer arguments to randrange() have been deprecated '
-                  'since Python 3.10 and will be removed in a subsequent '
-                  'version',
-                  DeprecationWarning, 2)
-        # Fast path.
-        if istep == 1:
-            if width > 0:
-                return istart + self._randbelow(width)
+        if step == 1 and width > 0:
+            return istart + self._randbelow(width)
+        if step == 1:
             raise ValueError("empty range for randrange() (%d, %d, %d)" % (istart, istop, width))
 
         # Non-unit step argument supplied.
+        istep = int(step)
+        if istep != step:
+            raise ValueError("non-integer step for randrange()")
         if istep > 0:
             n = (width + istep - 1) // istep
         elif istep < 0:
             n = (width + istep + 1) // istep
         else:
             raise ValueError("zero step for randrange()")
+
         if n <= 0:
             raise ValueError("empty range for randrange()")
+
         return istart + istep * self._randbelow(n)
 
     def randint(self, a, b):
@@ -456,14 +424,13 @@
         # too many calls to _randbelow(), making them slower and
         # causing them to eat more entropy than necessary.
 
+        if isinstance(population, _Set):
+            _warn('Sampling from a set deprecated\n'
+                  'since Python 3.9 and will be removed in a subsequent version.',
+                  DeprecationWarning, 2)
+            population = tuple(population)
         if not isinstance(population, _Sequence):
-            if isinstance(population, _Set):
-                _warn('Sampling from a set deprecated\n'
-                      'since Python 3.9 and will be removed in a subsequent version.',
-                      DeprecationWarning, 2)
-                population = tuple(population)
-            else:
-                raise TypeError("Population must be a sequence.  For dicts or sets, use sorted(d).")
+            raise TypeError("Population must be a sequence.  For dicts or sets, use sorted(d).")
         n = len(population)
         if counts is not None:
             cum_counts = list(_accumulate(counts))
@@ -474,7 +441,7 @@
                 raise TypeError('Counts must be integers')
             if total <= 0:
                 raise ValueError('Total of counts must be greater than zero')
-            selections = self.sample(range(total), k=k)
+            selections = sample(range(total), k=k)
             bisect = _bisect
             return [population[bisect(cum_counts, s)] for s in selections]
         randbelow = self._randbelow
@@ -517,15 +484,7 @@
                 floor = _floor
                 n += 0.0    # convert to float for a small speed improvement
                 return [population[floor(random() * n)] for i in _repeat(None, k)]
-            try:
-                cum_weights = list(_accumulate(weights))
-            except TypeError:
-                if not isinstance(weights, int):
-                    raise
-                k = weights
-                raise TypeError(
-                    f'The number of choices must be a keyword argument: {k=}'
-                ) from None
+            cum_weights = list(_accumulate(weights))
         elif weights is not None:
             raise TypeError('Cannot specify both weights and cumulative weights')
         if len(cum_weights) != n:
@@ -533,8 +492,6 @@
         total = cum_weights[-1] + 0.0   # convert to float
         if total <= 0.0:
             raise ValueError('Total of weights must be greater than zero')
-        if not _isfinite(total):
-            raise ValueError('Total of weights must be finite')
         bisect = _bisect
         hi = n - 1
         return [population[bisect(cum_weights, random() * total, 0, hi)]
@@ -725,7 +682,7 @@
             bbb = alpha - LOG4
             ccc = alpha + ainv
 
-            while True:
+            while 1:
                 u1 = random()
                 if not 1e-7 < u1 < 0.9999999:
                     continue
@@ -792,7 +749,7 @@
         # Jain, pg. 495
 
         u = 1.0 - self.random()
-        return u ** (-1.0 / alpha)
+        return 1.0 / u ** (1.0 / alpha)
 
     def weibullvariate(self, alpha, beta):
         """Weibull distribution.
@@ -888,7 +845,7 @@
     from time import perf_counter
 
     t0 = perf_counter()
-    data = [func(*args) for i in _repeat(None, n)]
+    data = [func(*args) for i in range(n)]
     t1 = perf_counter()
 
     xbar = mean(data)
diff --git a/common/py3-stdlib/re.py b/common/py3-stdlib/re.py
index 1d82b50..bfb7b1c 100644
--- a/common/py3-stdlib/re.py
+++ b/common/py3-stdlib/re.py
@@ -176,6 +176,7 @@
                 res = f'~{res}'
         return res
     __str__ = object.__str__
+
 globals().update(RegexFlag.__members__)
 
 # sre exception
diff --git a/common/py3-stdlib/rlcompleter.py b/common/py3-stdlib/rlcompleter.py
index 98b7930..bca4a7b 100644
--- a/common/py3-stdlib/rlcompleter.py
+++ b/common/py3-stdlib/rlcompleter.py
@@ -31,7 +31,6 @@
 
 import atexit
 import builtins
-import inspect
 import __main__
 
 __all__ = ["Completer"]
@@ -97,13 +96,7 @@
 
     def _callable_postfix(self, val, word):
         if callable(val):
-            word += "("
-            try:
-                if not inspect.signature(val).parameters:
-                    word += ")"
-            except ValueError:
-                pass
-
+            word = word + "("
         return word
 
     def global_matches(self, text):
@@ -176,20 +169,13 @@
                 if (word[:n] == attr and
                     not (noprefix and word[:n+1] == noprefix)):
                     match = "%s.%s" % (expr, word)
-                    if isinstance(getattr(type(thisobject), word, None),
-                                  property):
-                        # bpo-44752: thisobject.word is a method decorated by
-                        # `@property`. What follows applies a postfix if
-                        # thisobject.word is callable, but know we know that
-                        # this is not callable (because it is a property).
-                        # Also, getattr(thisobject, word) will evaluate the
-                        # property method, which is not desirable.
-                        matches.append(match)
-                        continue
-                    if (value := getattr(thisobject, word, None)) is not None:
-                        matches.append(self._callable_postfix(value, match))
+                    try:
+                        val = getattr(thisobject, word)
+                    except Exception:
+                        pass  # Include even if attribute not set
                     else:
-                        matches.append(match)
+                        match = self._callable_postfix(val, match)
+                    matches.append(match)
             if matches or not noprefix:
                 break
             if noprefix == '_':
diff --git a/common/py3-stdlib/runpy.py b/common/py3-stdlib/runpy.py
index caba121..7e1e1ac 100644
--- a/common/py3-stdlib/runpy.py
+++ b/common/py3-stdlib/runpy.py
@@ -16,6 +16,7 @@
 import io
 import types
 import os
+from pkgutil import read_code, get_importer
 
 __all__ = [
     "run_module", "run_path",
@@ -232,7 +233,6 @@
 
 def _get_code_from_file(run_name, fname):
     # Check for a compiled file first
-    from pkgutil import read_code
     decoded_path = os.path.abspath(os.fsdecode(fname))
     with io.open_code(decoded_path) as f:
         code = read_code(f)
@@ -255,7 +255,6 @@
     if run_name is None:
         run_name = "<run_path>"
     pkg_name = run_name.rpartition(".")[0]
-    from pkgutil import get_importer
     importer = get_importer(path_name)
     # Trying to avoid importing imp so as to not consume the deprecation warning.
     is_NullImporter = False
diff --git a/common/py3-stdlib/sched.py b/common/py3-stdlib/sched.py
index 14613cf..ff87874 100644
--- a/common/py3-stdlib/sched.py
+++ b/common/py3-stdlib/sched.py
@@ -26,19 +26,23 @@
 import time
 import heapq
 from collections import namedtuple
-from itertools import count
 import threading
 from time import monotonic as _time
 
 __all__ = ["scheduler"]
 
-Event = namedtuple('Event', 'time, priority, sequence, action, argument, kwargs')
+class Event(namedtuple('Event', 'time, priority, action, argument, kwargs')):
+    __slots__ = []
+    def __eq__(s, o): return (s.time, s.priority) == (o.time, o.priority)
+    def __lt__(s, o): return (s.time, s.priority) <  (o.time, o.priority)
+    def __le__(s, o): return (s.time, s.priority) <= (o.time, o.priority)
+    def __gt__(s, o): return (s.time, s.priority) >  (o.time, o.priority)
+    def __ge__(s, o): return (s.time, s.priority) >= (o.time, o.priority)
+
 Event.time.__doc__ = ('''Numeric type compatible with the return value of the
 timefunc function passed to the constructor.''')
 Event.priority.__doc__ = ('''Events scheduled for the same time will be executed
 in the order of their priority.''')
-Event.sequence.__doc__ = ('''A continually increasing sequence number that
-    separates events if time and priority are equal.''')
 Event.action.__doc__ = ('''Executing the event means executing
 action(*argument, **kwargs)''')
 Event.argument.__doc__ = ('''argument is a sequence holding the positional
@@ -57,7 +61,6 @@
         self._lock = threading.RLock()
         self.timefunc = timefunc
         self.delayfunc = delayfunc
-        self._sequence_generator = count()
 
     def enterabs(self, time, priority, action, argument=(), kwargs=_sentinel):
         """Enter a new event in the queue at an absolute time.
@@ -68,10 +71,8 @@
         """
         if kwargs is _sentinel:
             kwargs = {}
-
+        event = Event(time, priority, action, argument, kwargs)
         with self._lock:
-            event = Event(time, priority, next(self._sequence_generator),
-                          action, argument, kwargs)
             heapq.heappush(self._queue, event)
         return event # The ID
 
@@ -135,8 +136,7 @@
             with lock:
                 if not q:
                     break
-                (time, priority, sequence, action,
-                 argument, kwargs) = q[0]
+                time, priority, action, argument, kwargs = q[0]
                 now = timefunc()
                 if time > now:
                     delay = True
diff --git a/common/py3-stdlib/shelve.py b/common/py3-stdlib/shelve.py
index e053c39..5d443a0 100644
--- a/common/py3-stdlib/shelve.py
+++ b/common/py3-stdlib/shelve.py
@@ -56,7 +56,7 @@
 the persistent dictionary on disk, if feasible).
 """
 
-from pickle import DEFAULT_PROTOCOL, Pickler, Unpickler
+from pickle import Pickler, Unpickler
 from io import BytesIO
 
 import collections.abc
@@ -85,7 +85,7 @@
                  keyencoding="utf-8"):
         self.dict = dict
         if protocol is None:
-            protocol = DEFAULT_PROTOCOL
+            protocol = 3
         self._protocol = protocol
         self.writeback = writeback
         self.cache = {}
diff --git a/common/py3-stdlib/shutil.py b/common/py3-stdlib/shutil.py
index 37bf98d..f0e833d 100644
--- a/common/py3-stdlib/shutil.py
+++ b/common/py3-stdlib/shutil.py
@@ -32,6 +32,16 @@
 except ImportError:
     _LZMA_SUPPORTED = False
 
+try:
+    from pwd import getpwnam
+except ImportError:
+    getpwnam = None
+
+try:
+    from grp import getgrnam
+except ImportError:
+    getgrnam = None
+
 _WINDOWS = os.name == 'nt'
 posix = nt = None
 if os.name == 'posix':
@@ -251,37 +261,28 @@
     if not follow_symlinks and _islink(src):
         os.symlink(os.readlink(src), dst)
     else:
-        with open(src, 'rb') as fsrc:
-            try:
-                with open(dst, 'wb') as fdst:
-                    # macOS
-                    if _HAS_FCOPYFILE:
-                        try:
-                            _fastcopy_fcopyfile(fsrc, fdst, posix._COPYFILE_DATA)
-                            return dst
-                        except _GiveupOnFastCopy:
-                            pass
-                    # Linux
-                    elif _USE_CP_SENDFILE:
-                        try:
-                            _fastcopy_sendfile(fsrc, fdst)
-                            return dst
-                        except _GiveupOnFastCopy:
-                            pass
-                    # Windows, see:
-                    # https://github.com/python/cpython/pull/7160#discussion_r195405230
-                    elif _WINDOWS and file_size > 0:
-                        _copyfileobj_readinto(fsrc, fdst, min(file_size, COPY_BUFSIZE))
-                        return dst
+        with open(src, 'rb') as fsrc, open(dst, 'wb') as fdst:
+            # macOS
+            if _HAS_FCOPYFILE:
+                try:
+                    _fastcopy_fcopyfile(fsrc, fdst, posix._COPYFILE_DATA)
+                    return dst
+                except _GiveupOnFastCopy:
+                    pass
+            # Linux
+            elif _USE_CP_SENDFILE:
+                try:
+                    _fastcopy_sendfile(fsrc, fdst)
+                    return dst
+                except _GiveupOnFastCopy:
+                    pass
+            # Windows, see:
+            # https://github.com/python/cpython/pull/7160#discussion_r195405230
+            elif _WINDOWS and file_size > 0:
+                _copyfileobj_readinto(fsrc, fdst, min(file_size, COPY_BUFSIZE))
+                return dst
 
-                    copyfileobj(fsrc, fdst)
-
-            # Issue 43219, raise a less confusing exception
-            except IsADirectoryError as e:
-                if not os.path.exists(dst):
-                    raise FileNotFoundError(f'Directory does not exist: {dst}') from e
-                else:
-                    raise
+            copyfileobj(fsrc, fdst)
 
     return dst
 
@@ -646,7 +647,6 @@
         if is_dir:
             try:
                 dirfd = os.open(entry.name, os.O_RDONLY, dir_fd=topfd)
-                dirfd_closed = False
             except OSError:
                 onerror(os.open, fullname, sys.exc_info())
             else:
@@ -654,8 +654,6 @@
                     if os.path.samestat(orig_st, os.fstat(dirfd)):
                         _rmtree_safe_fd(dirfd, fullname, onerror)
                         try:
-                            os.close(dirfd)
-                            dirfd_closed = True
                             os.rmdir(entry.name, dir_fd=topfd)
                         except OSError:
                             onerror(os.rmdir, fullname, sys.exc_info())
@@ -669,8 +667,7 @@
                         except OSError:
                             onerror(os.path.islink, fullname, sys.exc_info())
                 finally:
-                    if not dirfd_closed:
-                        os.close(dirfd)
+                    os.close(dirfd)
         else:
             try:
                 os.unlink(entry.name, dir_fd=topfd)
@@ -713,7 +710,6 @@
             return
         try:
             fd = os.open(path, os.O_RDONLY)
-            fd_closed = False
         except Exception:
             onerror(os.open, path, sys.exc_info())
             return
@@ -721,8 +717,6 @@
             if os.path.samestat(orig_st, os.fstat(fd)):
                 _rmtree_safe_fd(fd, path, onerror)
                 try:
-                    os.close(fd)
-                    fd_closed = True
                     os.rmdir(path)
                 except OSError:
                     onerror(os.rmdir, path, sys.exc_info())
@@ -733,8 +727,7 @@
                 except OSError:
                     onerror(os.path.islink, path, sys.exc_info())
         finally:
-            if not fd_closed:
-                os.close(fd)
+            os.close(fd)
     else:
         try:
             if _rmtree_islink(path):
@@ -820,12 +813,6 @@
             if _destinsrc(src, dst):
                 raise Error("Cannot move a directory '%s' into itself"
                             " '%s'." % (src, dst))
-            if (_is_immutable(src)
-                    or (not os.access(src, os.W_OK) and os.listdir(src)
-                        and sys.platform == 'darwin')):
-                raise PermissionError("Cannot move the non-empty directory "
-                                      "'%s': Lacking write permission to '%s'."
-                                      % (src, src))
             copytree(src, real_dst, copy_function=copy_function,
                      symlinks=True)
             rmtree(src)
@@ -843,21 +830,10 @@
         dst += os.path.sep
     return dst.startswith(src)
 
-def _is_immutable(src):
-    st = _stat(src)
-    immutable_states = [stat.UF_IMMUTABLE, stat.SF_IMMUTABLE]
-    return hasattr(st, 'st_flags') and st.st_flags in immutable_states
-
 def _get_gid(name):
     """Returns a gid, given a group name."""
-    if name is None:
+    if getgrnam is None or name is None:
         return None
-
-    try:
-        from grp import getgrnam
-    except ImportError:
-        return None
-
     try:
         result = getgrnam(name)
     except KeyError:
@@ -868,14 +844,8 @@
 
 def _get_uid(name):
     """Returns an uid, given a user name."""
-    if name is None:
+    if getpwnam is None or name is None:
         return None
-
-    try:
-        from pwd import getpwnam
-    except ImportError:
-        return None
-
     try:
         result = getpwnam(name)
     except KeyError:
@@ -1178,16 +1148,20 @@
             if name.startswith('/') or '..' in name:
                 continue
 
-            targetpath = os.path.join(extract_dir, *name.split('/'))
-            if not targetpath:
+            target = os.path.join(extract_dir, *name.split('/'))
+            if not target:
                 continue
 
-            _ensure_directory(targetpath)
+            _ensure_directory(target)
             if not name.endswith('/'):
                 # file
-                with zip.open(name, 'r') as source, \
-                        open(targetpath, 'wb') as target:
-                    copyfileobj(source, target)
+                data = zip.read(info.filename)
+                f = open(target, 'wb')
+                try:
+                    f.write(data)
+                finally:
+                    f.close()
+                    del data
     finally:
         zip.close()
 
diff --git a/common/py3-stdlib/signal.py b/common/py3-stdlib/signal.py
index 50b215b..d4a6d6f 100644
--- a/common/py3-stdlib/signal.py
+++ b/common/py3-stdlib/signal.py
@@ -1,5 +1,6 @@
 import _signal
 from _signal import *
+from functools import wraps as _wraps
 from enum import IntEnum as _IntEnum
 
 _globals = globals()
@@ -41,16 +42,6 @@
         return value
 
 
-# Similar to functools.wraps(), but only assign __doc__.
-# __module__ should be preserved,
-# __name__ and __qualname__ are already fine,
-# __annotations__ is not set.
-def _wraps(wrapped):
-    def decorator(wrapper):
-        wrapper.__doc__ = wrapped.__doc__
-        return wrapper
-    return decorator
-
 @_wraps(_signal.signal)
 def signal(signalnum, handler):
     handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler))
@@ -68,6 +59,7 @@
     def pthread_sigmask(how, mask):
         sigs_set = _signal.pthread_sigmask(how, mask)
         return set(_int_to_enum(x, Signals) for x in sigs_set)
+    pthread_sigmask.__doc__ = _signal.pthread_sigmask.__doc__
 
 
 if 'sigpending' in _globals:
@@ -81,6 +73,7 @@
     def sigwait(sigset):
         retsig = _signal.sigwait(sigset)
         return _int_to_enum(retsig, Signals)
+    sigwait.__doc__ = _signal.sigwait
 
 
 if 'valid_signals' in _globals:
diff --git a/common/py3-stdlib/site.py b/common/py3-stdlib/site.py
index 939893e..9e617af 100644
--- a/common/py3-stdlib/site.py
+++ b/common/py3-stdlib/site.py
@@ -88,11 +88,6 @@
 USER_BASE = None
 
 
-def _trace(message):
-    if sys.flags.verbose:
-        print(message, file=sys.stderr)
-
-
 def makepath(*paths):
     dir = os.path.join(*paths)
     try:
@@ -105,15 +100,8 @@
 def abs_paths():
     """Set all module __file__ and __cached__ attributes to an absolute path"""
     for m in set(sys.modules.values()):
-        loader_module = None
-        try:
-            loader_module = m.__loader__.__module__
-        except AttributeError:
-            try:
-                loader_module = m.__spec__.loader.__module__
-            except AttributeError:
-                pass
-        if loader_module not in {'_frozen_importlib', '_frozen_importlib_external'}:
+        if (getattr(getattr(m, '__loader__', None), '__module__', None) not in
+                ('_frozen_importlib', '_frozen_importlib_external')):
             continue   # don't mess with a PEP 302-supplied __file__
         try:
             m.__file__ = os.path.abspath(m.__file__)
@@ -168,19 +156,14 @@
     else:
         reset = False
     fullname = os.path.join(sitedir, name)
-    _trace(f"Processing .pth file: {fullname!r}")
     try:
-        # locale encoding is not ideal especially on Windows. But we have used
-        # it for a long time. setuptools uses the locale encoding too.
-        f = io.TextIOWrapper(io.open_code(fullname), encoding="locale")
+        f = io.TextIOWrapper(io.open_code(fullname))
     except OSError:
         return
     with f:
         for n, line in enumerate(f):
             if line.startswith("#"):
                 continue
-            if line.strip() == "":
-                continue
             try:
                 if line.startswith(("import ", "import\t")):
                     exec(line)
@@ -207,7 +190,6 @@
 def addsitedir(sitedir, known_paths=None):
     """Add 'sitedir' argument to sys.path if missing and handle .pth files in
     'sitedir'"""
-    _trace(f"Adding directory: {sitedir!r}")
     if known_paths is None:
         known_paths = _init_pathinfo()
         reset = True
@@ -266,10 +248,6 @@
     if env_base:
         return env_base
 
-    # VxWorks has no home directories
-    if sys.platform == "vxworks":
-        return None
-
     def joinuser(*args):
         return os.path.expanduser(os.path.join(*args))
 
@@ -289,8 +267,7 @@
     version = sys.version_info
 
     if os.name == 'nt':
-        ver_nodot = sys.winver.replace('.', '')
-        return f'{userbase}\\Python{ver_nodot}\\site-packages'
+        return f'{userbase}\\Python{version[0]}{version[1]}\\site-packages'
 
     if sys.platform == 'darwin' and sys._framework:
         return f'{userbase}/lib/python/site-packages'
@@ -317,14 +294,11 @@
     If the global variable ``USER_SITE`` is not initialized yet, this
     function will also set it.
     """
-    global USER_SITE, ENABLE_USER_SITE
+    global USER_SITE
     userbase = getuserbase() # this will also set USER_BASE
 
     if USER_SITE is None:
-        if userbase is None:
-            ENABLE_USER_SITE = False # disable user site and return None
-        else:
-            USER_SITE = _get_path(userbase)
+        USER_SITE = _get_path(userbase)
 
     return USER_SITE
 
@@ -336,7 +310,6 @@
     """
     # get the per user site-package path
     # this call will also make sure USER_BASE and USER_SITE are set
-    _trace("Processing user site-packages")
     user_site = getusersitepackages()
 
     if ENABLE_USER_SITE and os.path.isdir(user_site):
@@ -381,7 +354,6 @@
 
 def addsitepackages(known_paths, prefixes=None):
     """Add site-packages to sys.path"""
-    _trace("Processing global site-packages")
     for sitedir in getsitepackages(prefixes):
         if os.path.isdir(sitedir):
             addsitedir(sitedir, known_paths)
@@ -639,14 +611,11 @@
         for dir in sys.path:
             print("    %r," % (dir,))
         print("]")
-        def exists(path):
-            if path is not None and os.path.isdir(path):
-                return "exists"
-            else:
-                return "doesn't exist"
-        print(f"USER_BASE: {user_base!r} ({exists(user_base)})")
-        print(f"USER_SITE: {user_site!r} ({exists(user_site)})")
-        print(f"ENABLE_USER_SITE: {ENABLE_USER_SITE!r}")
+        print("USER_BASE: %r (%s)" % (user_base,
+            "exists" if os.path.isdir(user_base) else "doesn't exist"))
+        print("USER_SITE: %r (%s)" % (user_site,
+            "exists" if os.path.isdir(user_site) else "doesn't exist"))
+        print("ENABLE_USER_SITE: %r" %  ENABLE_USER_SITE)
         sys.exit(0)
 
     buffer = []
diff --git a/common/py3-stdlib/smtpd.py b/common/py3-stdlib/smtpd.py
index bc43331..8f1a22e 100755
--- a/common/py3-stdlib/smtpd.py
+++ b/common/py3-stdlib/smtpd.py
@@ -83,6 +83,8 @@
 import getopt
 import time
 import socket
+import asyncore
+import asynchat
 import collections
 from warnings import warn
 from email._header_value_parser import get_addr_spec, get_angle_addr
@@ -92,19 +94,6 @@
     "MailmanProxy",
 ]
 
-warn(
-    'The smtpd module is deprecated and unmaintained.  Please see aiosmtpd '
-    '(https://aiosmtpd.readthedocs.io/) for the recommended replacement.',
-    DeprecationWarning,
-    stacklevel=2)
-
-
-# These are imported after the above warning so that users get the correct
-# deprecation warning.
-import asyncore
-import asynchat
-
-
 program = sys.argv[0]
 __version__ = 'Python SMTP proxy version 0.3'
 
@@ -174,7 +163,7 @@
             # a race condition  may occur if the other end is closing
             # before we can get the peername
             self.close()
-            if err.errno != errno.ENOTCONN:
+            if err.args[0] != errno.ENOTCONN:
                 raise
             return
         print('Peer:', repr(self.peer), file=DEBUGSTREAM)
diff --git a/common/py3-stdlib/smtplib.py b/common/py3-stdlib/smtplib.py
index 324a1c1..7808ba0 100755
--- a/common/py3-stdlib/smtplib.py
+++ b/common/py3-stdlib/smtplib.py
@@ -64,7 +64,6 @@
 CRLF = "\r\n"
 bCRLF = b"\r\n"
 _MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3
-_MAXCHALLENGE = 5  # Maximum number of AUTH challenges sent
 
 OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
 
@@ -168,7 +167,7 @@
     """Quote data for email.
 
     Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
-    internet CRLF end-of-line.
+    Internet CRLF end-of-line.
     """
     return re.sub(r'(?m)^\.', '..',
         re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
@@ -223,7 +222,7 @@
     helo_resp = None
     ehlo_msg = "ehlo"
     ehlo_resp = None
-    does_esmtp = False
+    does_esmtp = 0
     default_port = SMTP_PORT
 
     def __init__(self, host='', port=0, local_hostname=None,
@@ -231,8 +230,8 @@
                  source_address=None):
         """Initialize a new instance.
 
-        If specified, `host` is the name of the remote host to which to
-        connect.  If specified, `port` specifies the port to which to connect.
+        If specified, `host' is the name of the remote host to which to
+        connect.  If specified, `port' specifies the port to which to connect.
         By default, smtplib.SMTP_PORT is used.  If a host is specified the
         connect method is called, and if it returns anything other than a
         success code an SMTPConnectError is raised.  If specified,
@@ -249,7 +248,6 @@
         self.esmtp_features = {}
         self.command_encoding = 'ascii'
         self.source_address = source_address
-        self._auth_challenge_count = 0
 
         if host:
             (code, msg) = self.connect(host, port)
@@ -367,15 +365,10 @@
     def putcmd(self, cmd, args=""):
         """Send a command to the server."""
         if args == "":
-            s = cmd
+            str = '%s%s' % (cmd, CRLF)
         else:
-            s = f'{cmd} {args}'
-        if '\r' in s or '\n' in s:
-            s = s.replace('\n', '\\n').replace('\r', '\\r')
-            raise ValueError(
-                f'command and arguments contain prohibited newline characters: {s}'
-            )
-        self.send(f'{s}{CRLF}')
+            str = '%s %s%s' % (cmd, args, CRLF)
+        self.send(str)
 
     def getreply(self):
         """Get a reply from the server.
@@ -459,7 +452,7 @@
         self.ehlo_resp = msg
         if code != 250:
             return (code, msg)
-        self.does_esmtp = True
+        self.does_esmtp = 1
         #parse the ehlo response -ddm
         assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp)
         resp = self.ehlo_resp.decode("latin-1").split('\n')
@@ -640,23 +633,14 @@
         if initial_response is not None:
             response = encode_base64(initial_response.encode('ascii'), eol='')
             (code, resp) = self.docmd("AUTH", mechanism + " " + response)
-            self._auth_challenge_count = 1
         else:
             (code, resp) = self.docmd("AUTH", mechanism)
-            self._auth_challenge_count = 0
         # If server responds with a challenge, send the response.
-        while code == 334:
-            self._auth_challenge_count += 1
+        if code == 334:
             challenge = base64.decodebytes(resp)
             response = encode_base64(
                 authobject(challenge).encode('ascii'), eol='')
             (code, resp) = self.docmd(response)
-            # If server keeps sending challenges, something is wrong.
-            if self._auth_challenge_count > _MAXCHALLENGE:
-                raise SMTPException(
-                    "Server AUTH mechanism infinite loop. Last response: "
-                    + repr((code, resp))
-                )
         if code in (235, 503):
             return (code, resp)
         raise SMTPAuthenticationError(code, resp)
@@ -678,7 +662,7 @@
     def auth_login(self, challenge=None):
         """ Authobject to use with LOGIN authentication. Requires self.user and
         self.password to be set."""
-        if challenge is None or self._auth_challenge_count < 2:
+        if challenge is None:
             return self.user
         else:
             return self.password
@@ -797,7 +781,7 @@
             self.helo_resp = None
             self.ehlo_resp = None
             self.esmtp_features = {}
-            self.does_esmtp = False
+            self.does_esmtp = 0
         else:
             # RFC 3207:
             # 501 Syntax error (no parameters allowed)
@@ -1098,8 +1082,7 @@
         # Handle Unix-domain sockets.
         try:
             self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
-                self.sock.settimeout(self.timeout)
+            self.sock.settimeout(self.timeout)
             self.file = None
             self.sock.connect(host)
         except OSError:
diff --git a/common/py3-stdlib/socket.py b/common/py3-stdlib/socket.py
index 63ba0ac..cafa573 100755
--- a/common/py3-stdlib/socket.py
+++ b/common/py3-stdlib/socket.py
@@ -337,7 +337,6 @@
             buffer = io.BufferedWriter(raw, buffering)
         if binary:
             return buffer
-        encoding = io.text_encoding(encoding)
         text = io.TextIOWrapper(buffer, encoding, errors, newline)
         text.mode = mode
         return text
@@ -378,7 +377,7 @@
             try:
                 while True:
                     if timeout and not selector_select(timeout):
-                        raise TimeoutError('timed out')
+                        raise _socket.timeout('timed out')
                     if count:
                         blocksize = count - total_sent
                         if blocksize <= 0:
@@ -707,7 +706,7 @@
                 self._timeout_occurred = True
                 raise
             except error as e:
-                if e.errno in _blocking_errnos:
+                if e.args[0] in _blocking_errnos:
                     return None
                 raise
 
@@ -723,7 +722,7 @@
             return self._sock.send(b)
         except error as e:
             # XXX what about EINTR?
-            if e.errno in _blocking_errnos:
+            if e.args[0] in _blocking_errnos:
                 return None
             raise
 
@@ -782,9 +781,8 @@
     An empty argument is interpreted as meaning the local host.
 
     First the hostname returned by gethostbyaddr() is checked, then
-    possibly existing aliases. In case no FQDN is available and `name`
-    was given, it is returned unchanged. If `name` was empty or '0.0.0.0',
-    hostname from gethostname() is returned.
+    possibly existing aliases. In case no FQDN is available, hostname
+    from gethostname() is returned.
     """
     name = name.strip()
     if not name or name == '0.0.0.0':
diff --git a/common/py3-stdlib/socketserver.py b/common/py3-stdlib/socketserver.py
index 0d9583d..57c1ae6 100644
--- a/common/py3-stdlib/socketserver.py
+++ b/common/py3-stdlib/socketserver.py
@@ -628,39 +628,6 @@
             self.collect_children(blocking=self.block_on_close)
 
 
-class _Threads(list):
-    """
-    Joinable list of all non-daemon threads.
-    """
-    def append(self, thread):
-        self.reap()
-        if thread.daemon:
-            return
-        super().append(thread)
-
-    def pop_all(self):
-        self[:], result = [], self[:]
-        return result
-
-    def join(self):
-        for thread in self.pop_all():
-            thread.join()
-
-    def reap(self):
-        self[:] = (thread for thread in self if thread.is_alive())
-
-
-class _NoThreads:
-    """
-    Degenerate version of _Threads.
-    """
-    def append(self, thread):
-        pass
-
-    def join(self):
-        pass
-
-
 class ThreadingMixIn:
     """Mix-in class to handle each request in a new thread."""
 
@@ -669,9 +636,9 @@
     daemon_threads = False
     # If true, server_close() waits until all non-daemonic threads terminate.
     block_on_close = True
-    # Threads object
+    # For non-daemonic threads, list of threading.Threading objects
     # used by server_close() to wait for all threads completion.
-    _threads = _NoThreads()
+    _threads = None
 
     def process_request_thread(self, request, client_address):
         """Same as in BaseServer but as a thread.
@@ -688,17 +655,23 @@
 
     def process_request(self, request, client_address):
         """Start a new thread to process the request."""
-        if self.block_on_close:
-            vars(self).setdefault('_threads', _Threads())
         t = threading.Thread(target = self.process_request_thread,
                              args = (request, client_address))
         t.daemon = self.daemon_threads
-        self._threads.append(t)
+        if not t.daemon and self.block_on_close:
+            if self._threads is None:
+                self._threads = []
+            self._threads.append(t)
         t.start()
 
     def server_close(self):
         super().server_close()
-        self._threads.join()
+        if self.block_on_close:
+            threads = self._threads
+            self._threads = None
+            if threads:
+                for thread in threads:
+                    thread.join()
 
 
 if hasattr(os, "fork"):
diff --git a/common/py3-stdlib/sqlite3/__init__.py b/common/py3-stdlib/sqlite3/__init__.py
index 0dedf18..6c91df2 100644
--- a/common/py3-stdlib/sqlite3/__init__.py
+++ b/common/py3-stdlib/sqlite3/__init__.py
@@ -20,52 +20,4 @@
 #    misrepresented as being the original software.
 # 3. This notice may not be removed or altered from any source distribution.
 
-"""
-The sqlite3 extension module provides a DB-API 2.0 (PEP 249) compilant
-interface to the SQLite library, and requires SQLite 3.7.15 or newer.
-
-To use the module, start by creating a database Connection object:
-
-    import sqlite3
-    cx = sqlite3.connect("test.db")  # test.db will be created or opened
-
-The special path name ":memory:" can be provided to connect to a transient
-in-memory database:
-
-    cx = sqlite3.connect(":memory:")  # connect to a database in RAM
-
-Once a connection has been established, create a Cursor object and call
-its execute() method to perform SQL queries:
-
-    cu = cx.cursor()
-
-    # create a table
-    cu.execute("create table lang(name, first_appeared)")
-
-    # insert values into a table
-    cu.execute("insert into lang values (?, ?)", ("C", 1972))
-
-    # execute a query and iterate over the result
-    for row in cu.execute("select * from lang"):
-        print(row)
-
-    cx.close()
-
-The sqlite3 module is written by Gerhard Häring <gh@ghaering.de>.
-"""
-
 from sqlite3.dbapi2 import *
-
-
-# bpo-42264: OptimizedUnicode was deprecated in Python 3.10.  It's scheduled
-# for removal in Python 3.12.
-def __getattr__(name):
-    if name == "OptimizedUnicode":
-        import warnings
-        msg = ("""
-            OptimizedUnicode is deprecated and will be removed in Python 3.12.
-            Since Python 3.3 it has simply been an alias for 'str'.
-        """)
-        warnings.warn(msg, DeprecationWarning, stacklevel=2)
-        return str
-    raise AttributeError(f"module 'sqlite3' has no attribute '{name}'")
diff --git a/common/py3-stdlib/sqlite3/dbapi2.py b/common/py3-stdlib/sqlite3/dbapi2.py
index cfe6225..991682c 100644
--- a/common/py3-stdlib/sqlite3/dbapi2.py
+++ b/common/py3-stdlib/sqlite3/dbapi2.py
@@ -84,20 +84,6 @@
 
 register_adapters_and_converters()
 
-# bpo-24464: enable_shared_cache was deprecated in Python 3.10.  It's
-# scheduled for removal in Python 3.12.
-def enable_shared_cache(enable):
-    from _sqlite3 import enable_shared_cache as _old_enable_shared_cache
-    import warnings
-    msg = (
-        "enable_shared_cache is deprecated and will be removed in Python 3.12. "
-        "Shared cache is strongly discouraged by the SQLite 3 documentation. "
-        "If shared cache must be used, open the database in URI mode using"
-        "the cache=shared query parameter."
-    )
-    warnings.warn(msg, DeprecationWarning, stacklevel=2)
-    return _old_enable_shared_cache(enable)
-
 # Clean up namespace
 
 del(register_adapters_and_converters)
diff --git a/common/py3-stdlib/sqlite3/test/backup.py b/common/py3-stdlib/sqlite3/test/backup.py
index 4e30594..ad1da97 100644
--- a/common/py3-stdlib/sqlite3/test/backup.py
+++ b/common/py3-stdlib/sqlite3/test/backup.py
@@ -2,6 +2,7 @@
 import unittest
 
 
+@unittest.skipIf(sqlite.sqlite_version_info < (3, 6, 11), "Backup API not supported")
 class BackupTests(unittest.TestCase):
     def setUp(self):
         cx = self.cx = sqlite.connect(":memory:")
@@ -17,11 +18,9 @@
         self.assertEqual(result[0][0], 3)
         self.assertEqual(result[1][0], 4)
 
-    def test_bad_target(self):
+    def test_bad_target_none(self):
         with self.assertRaises(TypeError):
             self.cx.backup(None)
-        with self.assertRaises(TypeError):
-            self.cx.backup()
 
     def test_bad_target_filename(self):
         with self.assertRaises(TypeError):
@@ -149,7 +148,10 @@
         with self.assertRaises(sqlite.OperationalError) as cm:
             with sqlite.connect(':memory:') as bck:
                 self.cx.backup(bck, name='non-existing')
-        self.assertIn("unknown database", str(cm.exception))
+        self.assertIn(
+            str(cm.exception),
+            ['SQL logic error', 'SQL logic error or missing database']
+        )
 
         self.cx.execute("ATTACH DATABASE ':memory:' AS attached_db")
         self.cx.execute('CREATE TABLE attached_db.foo (key INTEGER)')
@@ -161,7 +163,7 @@
 
 
 def suite():
-    return unittest.TestLoader().loadTestsFromTestCase(BackupTests)
+    return unittest.makeSuite(BackupTests)
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/common/py3-stdlib/sqlite3/test/dbapi.py b/common/py3-stdlib/sqlite3/test/dbapi.py
index e332184..ad9c9f0 100644
--- a/common/py3-stdlib/sqlite3/test/dbapi.py
+++ b/common/py3-stdlib/sqlite3/test/dbapi.py
@@ -1,6 +1,7 @@
+#-*- coding: iso-8859-1 -*-
 # pysqlite2/test/dbapi.py: tests for DB-API compliance
 #
-# Copyright (C) 2004-2010 Gerhard Häring <gh@ghaering.de>
+# Copyright (C) 2004-2010 Gerhard Häring <gh@ghaering.de>
 #
 # This file is part of pysqlite.
 #
@@ -20,85 +21,68 @@
 #    misrepresented as being the original software.
 # 3. This notice may not be removed or altered from any source distribution.
 
-import subprocess
 import threading
 import unittest
 import sqlite3 as sqlite
-import sys
 
-from test.support import check_disallow_instantiation, SHORT_TIMEOUT
-from test.support.os_helper import TESTFN, unlink
+from test.support import TESTFN, unlink
 
 
 class ModuleTests(unittest.TestCase):
-    def test_api_level(self):
+    def CheckAPILevel(self):
         self.assertEqual(sqlite.apilevel, "2.0",
                          "apilevel is %s, should be 2.0" % sqlite.apilevel)
 
-    def test_thread_safety(self):
+    def CheckThreadSafety(self):
         self.assertEqual(sqlite.threadsafety, 1,
                          "threadsafety is %d, should be 1" % sqlite.threadsafety)
 
-    def test_param_style(self):
+    def CheckParamStyle(self):
         self.assertEqual(sqlite.paramstyle, "qmark",
                          "paramstyle is '%s', should be 'qmark'" %
                          sqlite.paramstyle)
 
-    def test_warning(self):
+    def CheckWarning(self):
         self.assertTrue(issubclass(sqlite.Warning, Exception),
                      "Warning is not a subclass of Exception")
 
-    def test_error(self):
+    def CheckError(self):
         self.assertTrue(issubclass(sqlite.Error, Exception),
                         "Error is not a subclass of Exception")
 
-    def test_interface_error(self):
+    def CheckInterfaceError(self):
         self.assertTrue(issubclass(sqlite.InterfaceError, sqlite.Error),
                         "InterfaceError is not a subclass of Error")
 
-    def test_database_error(self):
+    def CheckDatabaseError(self):
         self.assertTrue(issubclass(sqlite.DatabaseError, sqlite.Error),
                         "DatabaseError is not a subclass of Error")
 
-    def test_data_error(self):
+    def CheckDataError(self):
         self.assertTrue(issubclass(sqlite.DataError, sqlite.DatabaseError),
                         "DataError is not a subclass of DatabaseError")
 
-    def test_operational_error(self):
+    def CheckOperationalError(self):
         self.assertTrue(issubclass(sqlite.OperationalError, sqlite.DatabaseError),
                         "OperationalError is not a subclass of DatabaseError")
 
-    def test_integrity_error(self):
+    def CheckIntegrityError(self):
         self.assertTrue(issubclass(sqlite.IntegrityError, sqlite.DatabaseError),
                         "IntegrityError is not a subclass of DatabaseError")
 
-    def test_internal_error(self):
+    def CheckInternalError(self):
         self.assertTrue(issubclass(sqlite.InternalError, sqlite.DatabaseError),
                         "InternalError is not a subclass of DatabaseError")
 
-    def test_programming_error(self):
+    def CheckProgrammingError(self):
         self.assertTrue(issubclass(sqlite.ProgrammingError, sqlite.DatabaseError),
                         "ProgrammingError is not a subclass of DatabaseError")
 
-    def test_not_supported_error(self):
+    def CheckNotSupportedError(self):
         self.assertTrue(issubclass(sqlite.NotSupportedError,
                                    sqlite.DatabaseError),
                         "NotSupportedError is not a subclass of DatabaseError")
 
-    # sqlite3_enable_shared_cache() is deprecated on macOS and calling it may raise
-    # OperationalError on some buildbots.
-    @unittest.skipIf(sys.platform == "darwin", "shared cache is deprecated on macOS")
-    def test_shared_cache_deprecated(self):
-        for enable in (True, False):
-            with self.assertWarns(DeprecationWarning) as cm:
-                sqlite.enable_shared_cache(enable)
-            self.assertIn("dbapi.py", cm.filename)
-
-    def test_disallow_instantiation(self):
-        cx = sqlite.connect(":memory:")
-        check_disallow_instantiation(self, type(cx("select 1")))
-
-
 class ConnectionTests(unittest.TestCase):
 
     def setUp(self):
@@ -110,38 +94,38 @@
     def tearDown(self):
         self.cx.close()
 
-    def test_commit(self):
+    def CheckCommit(self):
         self.cx.commit()
 
-    def test_commit_after_no_changes(self):
+    def CheckCommitAfterNoChanges(self):
         """
         A commit should also work when no changes were made to the database.
         """
         self.cx.commit()
         self.cx.commit()
 
-    def test_rollback(self):
+    def CheckRollback(self):
         self.cx.rollback()
 
-    def test_rollback_after_no_changes(self):
+    def CheckRollbackAfterNoChanges(self):
         """
         A rollback should also work when no changes were made to the database.
         """
         self.cx.rollback()
         self.cx.rollback()
 
-    def test_cursor(self):
+    def CheckCursor(self):
         cu = self.cx.cursor()
 
-    def test_failed_open(self):
+    def CheckFailedOpen(self):
         YOU_CANNOT_OPEN_THIS = "/foo/bar/bla/23534/mydb.db"
         with self.assertRaises(sqlite.OperationalError):
             con = sqlite.connect(YOU_CANNOT_OPEN_THIS)
 
-    def test_close(self):
+    def CheckClose(self):
         self.cx.close()
 
-    def test_exceptions(self):
+    def CheckExceptions(self):
         # Optional DB-API extension.
         self.assertEqual(self.cx.Warning, sqlite.Warning)
         self.assertEqual(self.cx.Error, sqlite.Error)
@@ -154,7 +138,7 @@
         self.assertEqual(self.cx.ProgrammingError, sqlite.ProgrammingError)
         self.assertEqual(self.cx.NotSupportedError, sqlite.NotSupportedError)
 
-    def test_in_transaction(self):
+    def CheckInTransaction(self):
         # Can't use db from setUp because we want to test initial state.
         cx = sqlite.connect(":memory:")
         cu = cx.cursor()
@@ -172,11 +156,11 @@
         row = cu.fetchone()
         self.assertEqual(cx.in_transaction, False)
 
-    def test_in_transaction_ro(self):
+    def CheckInTransactionRO(self):
         with self.assertRaises(AttributeError):
             self.cx.in_transaction = True
 
-    def test_open_with_path_like_object(self):
+    def CheckOpenWithPathLikeObject(self):
         """ Checks that we can successfully connect to a database using an object that
             is PathLike, i.e. has __fspath__(). """
         self.addCleanup(unlink, TESTFN)
@@ -187,7 +171,11 @@
         with sqlite.connect(path) as cx:
             cx.execute('create table test(id integer)')
 
-    def test_open_uri(self):
+    def CheckOpenUri(self):
+        if sqlite.sqlite_version_info < (3, 7, 7):
+            with self.assertRaises(sqlite.NotSupportedError):
+                sqlite.connect(':memory:', uri=True)
+            return
         self.addCleanup(unlink, TESTFN)
         with sqlite.connect(TESTFN) as cx:
             cx.execute('create table test(id integer)')
@@ -197,26 +185,12 @@
             with self.assertRaises(sqlite.OperationalError):
                 cx.execute('insert into test(id) values(1)')
 
-
-class UninitialisedConnectionTests(unittest.TestCase):
-    def setUp(self):
-        self.cx = sqlite.Connection.__new__(sqlite.Connection)
-
-    def test_uninit_operations(self):
-        funcs = (
-            lambda: self.cx.isolation_level,
-            lambda: self.cx.total_changes,
-            lambda: self.cx.in_transaction,
-            lambda: self.cx.iterdump(),
-            lambda: self.cx.cursor(),
-            lambda: self.cx.close(),
-        )
-        for func in funcs:
-            with self.subTest(func=func):
-                self.assertRaisesRegex(sqlite.ProgrammingError,
-                                       "Base Connection.__init__ not called",
-                                       func)
-
+    @unittest.skipIf(sqlite.sqlite_version_info >= (3, 3, 1),
+                     'needs sqlite versions older than 3.3.1')
+    def CheckSameThreadErrorOnOldVersion(self):
+        with self.assertRaises(sqlite.NotSupportedError) as cm:
+            sqlite.connect(':memory:', check_same_thread=False)
+        self.assertEqual(str(cm.exception), 'shared connections not available')
 
 class CursorTests(unittest.TestCase):
     def setUp(self):
@@ -232,21 +206,21 @@
         self.cu.close()
         self.cx.close()
 
-    def test_execute_no_args(self):
+    def CheckExecuteNoArgs(self):
         self.cu.execute("delete from test")
 
-    def test_execute_illegal_sql(self):
+    def CheckExecuteIllegalSql(self):
         with self.assertRaises(sqlite.OperationalError):
             self.cu.execute("select asdf")
 
-    def test_execute_too_much_sql(self):
+    def CheckExecuteTooMuchSql(self):
         with self.assertRaises(sqlite.Warning):
             self.cu.execute("select 5+4; select 4+5")
 
-    def test_execute_too_much_sql2(self):
+    def CheckExecuteTooMuchSql2(self):
         self.cu.execute("select 5+4; -- foo bar")
 
-    def test_execute_too_much_sql3(self):
+    def CheckExecuteTooMuchSql3(self):
         self.cu.execute("""
             select 5+4;
 
@@ -255,53 +229,53 @@
             */
             """)
 
-    def test_execute_wrong_sql_arg(self):
+    def CheckExecuteWrongSqlArg(self):
         with self.assertRaises(TypeError):
             self.cu.execute(42)
 
-    def test_execute_arg_int(self):
+    def CheckExecuteArgInt(self):
         self.cu.execute("insert into test(id) values (?)", (42,))
 
-    def test_execute_arg_float(self):
+    def CheckExecuteArgFloat(self):
         self.cu.execute("insert into test(income) values (?)", (2500.32,))
 
-    def test_execute_arg_string(self):
+    def CheckExecuteArgString(self):
         self.cu.execute("insert into test(name) values (?)", ("Hugo",))
 
-    def test_execute_arg_string_with_zero_byte(self):
+    def CheckExecuteArgStringWithZeroByte(self):
         self.cu.execute("insert into test(name) values (?)", ("Hu\x00go",))
 
         self.cu.execute("select name from test where id=?", (self.cu.lastrowid,))
         row = self.cu.fetchone()
         self.assertEqual(row[0], "Hu\x00go")
 
-    def test_execute_non_iterable(self):
+    def CheckExecuteNonIterable(self):
         with self.assertRaises(ValueError) as cm:
             self.cu.execute("insert into test(id) values (?)", 42)
         self.assertEqual(str(cm.exception), 'parameters are of unsupported type')
 
-    def test_execute_wrong_no_of_args1(self):
+    def CheckExecuteWrongNoOfArgs1(self):
         # too many parameters
         with self.assertRaises(sqlite.ProgrammingError):
             self.cu.execute("insert into test(id) values (?)", (17, "Egon"))
 
-    def test_execute_wrong_no_of_args2(self):
+    def CheckExecuteWrongNoOfArgs2(self):
         # too little parameters
         with self.assertRaises(sqlite.ProgrammingError):
             self.cu.execute("insert into test(id) values (?)")
 
-    def test_execute_wrong_no_of_args3(self):
+    def CheckExecuteWrongNoOfArgs3(self):
         # no parameters, parameters are needed
         with self.assertRaises(sqlite.ProgrammingError):
             self.cu.execute("insert into test(id) values (?)")
 
-    def test_execute_param_list(self):
+    def CheckExecuteParamList(self):
         self.cu.execute("insert into test(name) values ('foo')")
         self.cu.execute("select name from test where name=?", ["foo"])
         row = self.cu.fetchone()
         self.assertEqual(row[0], "foo")
 
-    def test_execute_param_sequence(self):
+    def CheckExecuteParamSequence(self):
         class L:
             def __len__(self):
                 return 1
@@ -314,7 +288,7 @@
         row = self.cu.fetchone()
         self.assertEqual(row[0], "foo")
 
-    def test_execute_param_sequence_bad_len(self):
+    def CheckExecuteParamSequenceBadLen(self):
         # Issue41662: Error in __len__() was overridden with ProgrammingError.
         class L:
             def __len__(self):
@@ -326,13 +300,13 @@
         with self.assertRaises(ZeroDivisionError):
             self.cu.execute("select name from test where name=?", L())
 
-    def test_execute_dict_mapping(self):
+    def CheckExecuteDictMapping(self):
         self.cu.execute("insert into test(name) values ('foo')")
         self.cu.execute("select name from test where name=:name", {"name": "foo"})
         row = self.cu.fetchone()
         self.assertEqual(row[0], "foo")
 
-    def test_execute_dict_mapping_mapping(self):
+    def CheckExecuteDictMapping_Mapping(self):
         class D(dict):
             def __missing__(self, key):
                 return "foo"
@@ -342,32 +316,32 @@
         row = self.cu.fetchone()
         self.assertEqual(row[0], "foo")
 
-    def test_execute_dict_mapping_too_little_args(self):
+    def CheckExecuteDictMappingTooLittleArgs(self):
         self.cu.execute("insert into test(name) values ('foo')")
         with self.assertRaises(sqlite.ProgrammingError):
             self.cu.execute("select name from test where name=:name and id=:id", {"name": "foo"})
 
-    def test_execute_dict_mapping_no_args(self):
+    def CheckExecuteDictMappingNoArgs(self):
         self.cu.execute("insert into test(name) values ('foo')")
         with self.assertRaises(sqlite.ProgrammingError):
             self.cu.execute("select name from test where name=:name")
 
-    def test_execute_dict_mapping_unnamed(self):
+    def CheckExecuteDictMappingUnnamed(self):
         self.cu.execute("insert into test(name) values ('foo')")
         with self.assertRaises(sqlite.ProgrammingError):
             self.cu.execute("select name from test where name=?", {"name": "foo"})
 
-    def test_close(self):
+    def CheckClose(self):
         self.cu.close()
 
-    def test_rowcount_execute(self):
+    def CheckRowcountExecute(self):
         self.cu.execute("delete from test")
         self.cu.execute("insert into test(name) values ('foo')")
         self.cu.execute("insert into test(name) values ('foo')")
         self.cu.execute("update test set name='bar'")
         self.assertEqual(self.cu.rowcount, 2)
 
-    def test_rowcount_select(self):
+    def CheckRowcountSelect(self):
         """
         pysqlite does not know the rowcount of SELECT statements, because we
         don't fetch all rows after executing the select statement. The rowcount
@@ -376,12 +350,12 @@
         self.cu.execute("select 5 union select 6")
         self.assertEqual(self.cu.rowcount, -1)
 
-    def test_rowcount_executemany(self):
+    def CheckRowcountExecutemany(self):
         self.cu.execute("delete from test")
         self.cu.executemany("insert into test(name) values (?)", [(1,), (2,), (3,)])
         self.assertEqual(self.cu.rowcount, 3)
 
-    def test_total_changes(self):
+    def CheckTotalChanges(self):
         self.cu.execute("insert into test(name) values ('foo')")
         self.cu.execute("insert into test(name) values ('foo')")
         self.assertLess(2, self.cx.total_changes, msg='total changes reported wrong value')
@@ -390,17 +364,14 @@
     # Sequences are required by the DB-API, iterators
     # enhancements in pysqlite.
 
-    def test_execute_many_sequence(self):
+    def CheckExecuteManySequence(self):
         self.cu.executemany("insert into test(income) values (?)", [(x,) for x in range(100, 110)])
 
-    def test_execute_many_iterator(self):
+    def CheckExecuteManyIterator(self):
         class MyIter:
             def __init__(self):
                 self.value = 5
 
-            def __iter__(self):
-                return self
-
             def __next__(self):
                 if self.value == 10:
                     raise StopIteration
@@ -410,26 +381,26 @@
 
         self.cu.executemany("insert into test(income) values (?)", MyIter())
 
-    def test_execute_many_generator(self):
+    def CheckExecuteManyGenerator(self):
         def mygen():
             for i in range(5):
                 yield (i,)
 
         self.cu.executemany("insert into test(income) values (?)", mygen())
 
-    def test_execute_many_wrong_sql_arg(self):
+    def CheckExecuteManyWrongSqlArg(self):
         with self.assertRaises(TypeError):
             self.cu.executemany(42, [(3,)])
 
-    def test_execute_many_select(self):
+    def CheckExecuteManySelect(self):
         with self.assertRaises(sqlite.ProgrammingError):
             self.cu.executemany("select ?", [(3,)])
 
-    def test_execute_many_not_iterable(self):
+    def CheckExecuteManyNotIterable(self):
         with self.assertRaises(TypeError):
             self.cu.executemany("insert into test(income) values (?)", 42)
 
-    def test_fetch_iter(self):
+    def CheckFetchIter(self):
         # Optional DB-API extension.
         self.cu.execute("delete from test")
         self.cu.execute("insert into test(id) values (?)", (5,))
@@ -441,20 +412,20 @@
         self.assertEqual(lst[0], 5)
         self.assertEqual(lst[1], 6)
 
-    def test_fetchone(self):
+    def CheckFetchone(self):
         self.cu.execute("select name from test")
         row = self.cu.fetchone()
         self.assertEqual(row[0], "foo")
         row = self.cu.fetchone()
         self.assertEqual(row, None)
 
-    def test_fetchone_no_statement(self):
+    def CheckFetchoneNoStatement(self):
         cur = self.cx.cursor()
         row = cur.fetchone()
         self.assertEqual(row, None)
 
-    def test_array_size(self):
-        # must default to 1
+    def CheckArraySize(self):
+        # must default ot 1
         self.assertEqual(self.cu.arraysize, 1)
 
         # now set to 2
@@ -470,51 +441,51 @@
 
         self.assertEqual(len(res), 2)
 
-    def test_fetchmany(self):
+    def CheckFetchmany(self):
         self.cu.execute("select name from test")
         res = self.cu.fetchmany(100)
         self.assertEqual(len(res), 1)
         res = self.cu.fetchmany(100)
         self.assertEqual(res, [])
 
-    def test_fetchmany_kw_arg(self):
+    def CheckFetchmanyKwArg(self):
         """Checks if fetchmany works with keyword arguments"""
         self.cu.execute("select name from test")
         res = self.cu.fetchmany(size=100)
         self.assertEqual(len(res), 1)
 
-    def test_fetchall(self):
+    def CheckFetchall(self):
         self.cu.execute("select name from test")
         res = self.cu.fetchall()
         self.assertEqual(len(res), 1)
         res = self.cu.fetchall()
         self.assertEqual(res, [])
 
-    def test_setinputsizes(self):
+    def CheckSetinputsizes(self):
         self.cu.setinputsizes([3, 4, 5])
 
-    def test_setoutputsize(self):
+    def CheckSetoutputsize(self):
         self.cu.setoutputsize(5, 0)
 
-    def test_setoutputsize_no_column(self):
+    def CheckSetoutputsizeNoColumn(self):
         self.cu.setoutputsize(42)
 
-    def test_cursor_connection(self):
+    def CheckCursorConnection(self):
         # Optional DB-API extension.
         self.assertEqual(self.cu.connection, self.cx)
 
-    def test_wrong_cursor_callable(self):
+    def CheckWrongCursorCallable(self):
         with self.assertRaises(TypeError):
             def f(): pass
             cur = self.cx.cursor(f)
 
-    def test_cursor_wrong_class(self):
+    def CheckCursorWrongClass(self):
         class Foo: pass
         foo = Foo()
         with self.assertRaises(TypeError):
             cur = sqlite.Cursor(foo)
 
-    def test_last_row_id_on_replace(self):
+    def CheckLastRowIDOnReplace(self):
         """
         INSERT OR REPLACE and REPLACE INTO should produce the same behavior.
         """
@@ -524,7 +495,7 @@
                 self.cu.execute(sql.format(statement), (1, 'foo'))
                 self.assertEqual(self.cu.lastrowid, 1)
 
-    def test_last_row_id_on_ignore(self):
+    def CheckLastRowIDOnIgnore(self):
         self.cu.execute(
             "insert or ignore into test(unique_test) values (?)",
             ('test',))
@@ -534,7 +505,7 @@
             ('test',))
         self.assertEqual(self.cu.lastrowid, 2)
 
-    def test_last_row_id_insert_o_r(self):
+    def CheckLastRowIDInsertOR(self):
         results = []
         for statement in ('FAIL', 'ABORT', 'ROLLBACK'):
             sql = 'INSERT OR {} INTO test(unique_test) VALUES (?)'
@@ -562,7 +533,7 @@
         self.cur.close()
         self.con.close()
 
-    def test_con_cursor(self):
+    def CheckConCursor(self):
         def run(con, errors):
             try:
                 cur = con.cursor()
@@ -580,7 +551,7 @@
         if len(errors) > 0:
             self.fail("\n".join(errors))
 
-    def test_con_commit(self):
+    def CheckConCommit(self):
         def run(con, errors):
             try:
                 con.commit()
@@ -598,7 +569,7 @@
         if len(errors) > 0:
             self.fail("\n".join(errors))
 
-    def test_con_rollback(self):
+    def CheckConRollback(self):
         def run(con, errors):
             try:
                 con.rollback()
@@ -616,7 +587,7 @@
         if len(errors) > 0:
             self.fail("\n".join(errors))
 
-    def test_con_close(self):
+    def CheckConClose(self):
         def run(con, errors):
             try:
                 con.close()
@@ -634,7 +605,7 @@
         if len(errors) > 0:
             self.fail("\n".join(errors))
 
-    def test_cur_implicit_begin(self):
+    def CheckCurImplicitBegin(self):
         def run(cur, errors):
             try:
                 cur.execute("insert into test(name) values ('a')")
@@ -652,7 +623,7 @@
         if len(errors) > 0:
             self.fail("\n".join(errors))
 
-    def test_cur_close(self):
+    def CheckCurClose(self):
         def run(cur, errors):
             try:
                 cur.close()
@@ -670,7 +641,7 @@
         if len(errors) > 0:
             self.fail("\n".join(errors))
 
-    def test_cur_execute(self):
+    def CheckCurExecute(self):
         def run(cur, errors):
             try:
                 cur.execute("select name from test")
@@ -689,7 +660,7 @@
         if len(errors) > 0:
             self.fail("\n".join(errors))
 
-    def test_cur_iter_next(self):
+    def CheckCurIterNext(self):
         def run(cur, errors):
             try:
                 row = cur.fetchone()
@@ -710,29 +681,29 @@
             self.fail("\n".join(errors))
 
 class ConstructorTests(unittest.TestCase):
-    def test_date(self):
+    def CheckDate(self):
         d = sqlite.Date(2004, 10, 28)
 
-    def test_time(self):
+    def CheckTime(self):
         t = sqlite.Time(12, 39, 35)
 
-    def test_timestamp(self):
+    def CheckTimestamp(self):
         ts = sqlite.Timestamp(2004, 10, 28, 12, 39, 35)
 
-    def test_date_from_ticks(self):
+    def CheckDateFromTicks(self):
         d = sqlite.DateFromTicks(42)
 
-    def test_time_from_ticks(self):
+    def CheckTimeFromTicks(self):
         t = sqlite.TimeFromTicks(42)
 
-    def test_timestamp_from_ticks(self):
+    def CheckTimestampFromTicks(self):
         ts = sqlite.TimestampFromTicks(42)
 
-    def test_binary(self):
+    def CheckBinary(self):
         b = sqlite.Binary(b"\0'")
 
 class ExtensionTests(unittest.TestCase):
-    def test_script_string_sql(self):
+    def CheckScriptStringSql(self):
         con = sqlite.connect(":memory:")
         cur = con.cursor()
         cur.executescript("""
@@ -745,31 +716,31 @@
         res = cur.fetchone()[0]
         self.assertEqual(res, 5)
 
-    def test_script_syntax_error(self):
+    def CheckScriptSyntaxError(self):
         con = sqlite.connect(":memory:")
         cur = con.cursor()
         with self.assertRaises(sqlite.OperationalError):
             cur.executescript("create table test(x); asdf; create table test2(x)")
 
-    def test_script_error_normal(self):
+    def CheckScriptErrorNormal(self):
         con = sqlite.connect(":memory:")
         cur = con.cursor()
         with self.assertRaises(sqlite.OperationalError):
             cur.executescript("create table test(sadfsadfdsa); select foo from hurz;")
 
-    def test_cursor_executescript_as_bytes(self):
+    def CheckCursorExecutescriptAsBytes(self):
         con = sqlite.connect(":memory:")
         cur = con.cursor()
         with self.assertRaises(ValueError) as cm:
             cur.executescript(b"create table test(foo); insert into test(foo) values (5);")
         self.assertEqual(str(cm.exception), 'script argument must be unicode.')
 
-    def test_connection_execute(self):
+    def CheckConnectionExecute(self):
         con = sqlite.connect(":memory:")
         result = con.execute("select 5").fetchone()[0]
         self.assertEqual(result, 5, "Basic test of Connection.execute")
 
-    def test_connection_executemany(self):
+    def CheckConnectionExecutemany(self):
         con = sqlite.connect(":memory:")
         con.execute("create table test(foo)")
         con.executemany("insert into test(foo) values (?)", [(3,), (4,)])
@@ -777,46 +748,46 @@
         self.assertEqual(result[0][0], 3, "Basic test of Connection.executemany")
         self.assertEqual(result[1][0], 4, "Basic test of Connection.executemany")
 
-    def test_connection_executescript(self):
+    def CheckConnectionExecutescript(self):
         con = sqlite.connect(":memory:")
         con.executescript("create table test(foo); insert into test(foo) values (5);")
         result = con.execute("select foo from test").fetchone()[0]
         self.assertEqual(result, 5, "Basic test of Connection.executescript")
 
 class ClosedConTests(unittest.TestCase):
-    def test_closed_con_cursor(self):
+    def CheckClosedConCursor(self):
         con = sqlite.connect(":memory:")
         con.close()
         with self.assertRaises(sqlite.ProgrammingError):
             cur = con.cursor()
 
-    def test_closed_con_commit(self):
+    def CheckClosedConCommit(self):
         con = sqlite.connect(":memory:")
         con.close()
         with self.assertRaises(sqlite.ProgrammingError):
             con.commit()
 
-    def test_closed_con_rollback(self):
+    def CheckClosedConRollback(self):
         con = sqlite.connect(":memory:")
         con.close()
         with self.assertRaises(sqlite.ProgrammingError):
             con.rollback()
 
-    def test_closed_cur_execute(self):
+    def CheckClosedCurExecute(self):
         con = sqlite.connect(":memory:")
         cur = con.cursor()
         con.close()
         with self.assertRaises(sqlite.ProgrammingError):
             cur.execute("select 4")
 
-    def test_closed_create_function(self):
+    def CheckClosedCreateFunction(self):
         con = sqlite.connect(":memory:")
         con.close()
         def f(x): return 17
         with self.assertRaises(sqlite.ProgrammingError):
             con.create_function("foo", 1, f)
 
-    def test_closed_create_aggregate(self):
+    def CheckClosedCreateAggregate(self):
         con = sqlite.connect(":memory:")
         con.close()
         class Agg:
@@ -829,7 +800,7 @@
         with self.assertRaises(sqlite.ProgrammingError):
             con.create_aggregate("foo", 1, Agg)
 
-    def test_closed_set_authorizer(self):
+    def CheckClosedSetAuthorizer(self):
         con = sqlite.connect(":memory:")
         con.close()
         def authorizer(*args):
@@ -837,21 +808,21 @@
         with self.assertRaises(sqlite.ProgrammingError):
             con.set_authorizer(authorizer)
 
-    def test_closed_set_progress_callback(self):
+    def CheckClosedSetProgressCallback(self):
         con = sqlite.connect(":memory:")
         con.close()
         def progress(): pass
         with self.assertRaises(sqlite.ProgrammingError):
             con.set_progress_handler(progress, 100)
 
-    def test_closed_call(self):
+    def CheckClosedCall(self):
         con = sqlite.connect(":memory:")
         con.close()
         with self.assertRaises(sqlite.ProgrammingError):
             con()
 
 class ClosedCurTests(unittest.TestCase):
-    def test_closed(self):
+    def CheckClosed(self):
         con = sqlite.connect(":memory:")
         cur = con.cursor()
         cur.close()
@@ -889,7 +860,7 @@
         self.cu.close()
         self.cx.close()
 
-    def test_on_conflict_rollback_with_explicit_transaction(self):
+    def CheckOnConflictRollbackWithExplicitTransaction(self):
         self.cx.isolation_level = None  # autocommit mode
         self.cu = self.cx.cursor()
         # Start an explicit transaction.
@@ -904,7 +875,7 @@
         # Transaction should have rolled back and nothing should be in table.
         self.assertEqual(self.cu.fetchall(), [])
 
-    def test_on_conflict_abort_raises_with_explicit_transactions(self):
+    def CheckOnConflictAbortRaisesWithExplicitTransactions(self):
         # Abort cancels the current sql statement but doesn't change anything
         # about the current transaction.
         self.cx.isolation_level = None  # autocommit mode
@@ -920,7 +891,7 @@
         # Expect the first two inserts to work, third to do nothing.
         self.assertEqual(self.cu.fetchall(), [('abort_test', None), (None, 'foo',)])
 
-    def test_on_conflict_rollback_without_transaction(self):
+    def CheckOnConflictRollbackWithoutTransaction(self):
         # Start of implicit transaction
         self.cu.execute("INSERT INTO test(name) VALUES ('abort_test')")
         self.cu.execute("INSERT OR ROLLBACK INTO test(unique_name) VALUES ('foo')")
@@ -930,7 +901,7 @@
         # Implicit transaction is rolled back on error.
         self.assertEqual(self.cu.fetchall(), [])
 
-    def test_on_conflict_abort_raises_without_transactions(self):
+    def CheckOnConflictAbortRaisesWithoutTransactions(self):
         # Abort cancels the current sql statement but doesn't change anything
         # about the current transaction.
         self.cu.execute("INSERT INTO test(name) VALUES ('abort_test')")
@@ -941,20 +912,20 @@
         self.cu.execute("SELECT name, unique_name FROM test")
         self.assertEqual(self.cu.fetchall(), [('abort_test', None), (None, 'foo',)])
 
-    def test_on_conflict_fail(self):
+    def CheckOnConflictFail(self):
         self.cu.execute("INSERT OR FAIL INTO test(unique_name) VALUES ('foo')")
         with self.assertRaises(sqlite.IntegrityError):
             self.cu.execute("INSERT OR FAIL INTO test(unique_name) VALUES ('foo')")
         self.assertEqual(self.cu.fetchall(), [])
 
-    def test_on_conflict_ignore(self):
+    def CheckOnConflictIgnore(self):
         self.cu.execute("INSERT OR IGNORE INTO test(unique_name) VALUES ('foo')")
         # Nothing should happen.
         self.cu.execute("INSERT OR IGNORE INTO test(unique_name) VALUES ('foo')")
         self.cu.execute("SELECT unique_name FROM test")
         self.assertEqual(self.cu.fetchall(), [('foo',)])
 
-    def test_on_conflict_replace(self):
+    def CheckOnConflictReplace(self):
         self.cu.execute("INSERT OR REPLACE INTO test(name, unique_name) VALUES ('Data!', 'foo')")
         # There shouldn't be an IntegrityError exception.
         self.cu.execute("INSERT OR REPLACE INTO test(name, unique_name) VALUES ('Very different data!', 'foo')")
@@ -962,94 +933,21 @@
         self.assertEqual(self.cu.fetchall(), [('Very different data!', 'foo')])
 
 
-class MultiprocessTests(unittest.TestCase):
-    CONNECTION_TIMEOUT = SHORT_TIMEOUT / 1000.  # Defaults to 30 ms
-
-    def tearDown(self):
-        unlink(TESTFN)
-
-    def test_ctx_mgr_rollback_if_commit_failed(self):
-        # bpo-27334: ctx manager does not rollback if commit fails
-        SCRIPT = f"""if 1:
-            import sqlite3
-            def wait():
-                print("started")
-                assert "database is locked" in input()
-
-            cx = sqlite3.connect("{TESTFN}", timeout={self.CONNECTION_TIMEOUT})
-            cx.create_function("wait", 0, wait)
-            with cx:
-                cx.execute("create table t(t)")
-            try:
-                # execute two transactions; both will try to lock the db
-                cx.executescript('''
-                    -- start a transaction and wait for parent
-                    begin transaction;
-                    select * from t;
-                    select wait();
-                    rollback;
-
-                    -- start a new transaction; would fail if parent holds lock
-                    begin transaction;
-                    select * from t;
-                    rollback;
-                ''')
-            finally:
-                cx.close()
-        """
-
-        # spawn child process
-        proc = subprocess.Popen(
-            [sys.executable, "-c", SCRIPT],
-            encoding="utf-8",
-            bufsize=0,
-            stdin=subprocess.PIPE,
-            stdout=subprocess.PIPE,
-        )
-        self.addCleanup(proc.communicate)
-
-        # wait for child process to start
-        self.assertEqual("started", proc.stdout.readline().strip())
-
-        cx = sqlite.connect(TESTFN, timeout=self.CONNECTION_TIMEOUT)
-        try:  # context manager should correctly release the db lock
-            with cx:
-                cx.execute("insert into t values('test')")
-        except sqlite.OperationalError as exc:
-            proc.stdin.write(str(exc))
-        else:
-            proc.stdin.write("no error")
-        finally:
-            cx.close()
-
-        # terminate child process
-        self.assertIsNone(proc.returncode)
-        try:
-            proc.communicate(input="end", timeout=SHORT_TIMEOUT)
-        except subprocess.TimeoutExpired:
-            proc.kill()
-            proc.communicate()
-            raise
-        self.assertEqual(proc.returncode, 0)
-
-
 def suite():
-    tests = [
-        ClosedConTests,
-        ClosedCurTests,
-        ConnectionTests,
-        ConstructorTests,
-        CursorTests,
-        ExtensionTests,
-        ModuleTests,
-        MultiprocessTests,
-        SqliteOnConflictTests,
-        ThreadTests,
-        UninitialisedConnectionTests,
-    ]
-    return unittest.TestSuite(
-        [unittest.TestLoader().loadTestsFromTestCase(t) for t in tests]
-    )
+    module_suite = unittest.makeSuite(ModuleTests, "Check")
+    connection_suite = unittest.makeSuite(ConnectionTests, "Check")
+    cursor_suite = unittest.makeSuite(CursorTests, "Check")
+    thread_suite = unittest.makeSuite(ThreadTests, "Check")
+    constructor_suite = unittest.makeSuite(ConstructorTests, "Check")
+    ext_suite = unittest.makeSuite(ExtensionTests, "Check")
+    closed_con_suite = unittest.makeSuite(ClosedConTests, "Check")
+    closed_cur_suite = unittest.makeSuite(ClosedCurTests, "Check")
+    on_conflict_suite = unittest.makeSuite(SqliteOnConflictTests, "Check")
+    return unittest.TestSuite((
+        module_suite, connection_suite, cursor_suite, thread_suite,
+        constructor_suite, ext_suite, closed_con_suite, closed_cur_suite,
+        on_conflict_suite,
+    ))
 
 def test():
     runner = unittest.TextTestRunner()
diff --git a/common/py3-stdlib/sqlite3/test/dump.py b/common/py3-stdlib/sqlite3/test/dump.py
index 618a7fd..a1f45a4 100644
--- a/common/py3-stdlib/sqlite3/test/dump.py
+++ b/common/py3-stdlib/sqlite3/test/dump.py
@@ -11,7 +11,7 @@
     def tearDown(self):
         self.cx.close()
 
-    def test_table_dump(self):
+    def CheckTableDump(self):
         expected_sqls = [
                 """CREATE TABLE "index"("index" blob);"""
                 ,
@@ -49,7 +49,7 @@
         [self.assertEqual(expected_sqls[i], actual_sqls[i])
             for i in range(len(expected_sqls))]
 
-    def test_unorderable_row(self):
+    def CheckUnorderableRow(self):
         # iterdump() should be able to cope with unorderable row types (issue #15545)
         class UnorderableRow:
             def __init__(self, cursor, row):
@@ -71,12 +71,7 @@
         self.assertEqual(expected, got)
 
 def suite():
-    tests = [
-        DumpTests,
-    ]
-    return unittest.TestSuite(
-        [unittest.TestLoader().loadTestsFromTestCase(t) for t in tests]
-    )
+    return unittest.TestSuite(unittest.makeSuite(DumpTests, "Check"))
 
 def test():
     runner = unittest.TextTestRunner()
diff --git a/common/py3-stdlib/sqlite3/test/factory.py b/common/py3-stdlib/sqlite3/test/factory.py
index 8764284..95dd24b 100644
--- a/common/py3-stdlib/sqlite3/test/factory.py
+++ b/common/py3-stdlib/sqlite3/test/factory.py
@@ -1,6 +1,7 @@
+#-*- coding: iso-8859-1 -*-
 # pysqlite2/test/factory.py: tests for the various factories in pysqlite
 #
-# Copyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>
+# Copyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>
 #
 # This file is part of pysqlite.
 #
@@ -46,7 +47,7 @@
     def tearDown(self):
         self.con.close()
 
-    def test_is_instance(self):
+    def CheckIsInstance(self):
         self.assertIsInstance(self.con, MyConnection)
 
 class CursorFactoryTests(unittest.TestCase):
@@ -56,7 +57,7 @@
     def tearDown(self):
         self.con.close()
 
-    def test_is_instance(self):
+    def CheckIsInstance(self):
         cur = self.con.cursor()
         self.assertIsInstance(cur, sqlite.Cursor)
         cur = self.con.cursor(MyCursor)
@@ -64,7 +65,7 @@
         cur = self.con.cursor(factory=lambda con: MyCursor(con))
         self.assertIsInstance(cur, MyCursor)
 
-    def test_invalid_factory(self):
+    def CheckInvalidFactory(self):
         # not a callable at all
         self.assertRaises(TypeError, self.con.cursor, None)
         # invalid callable with not exact one argument
@@ -76,7 +77,7 @@
     def setUp(self):
         self.con = sqlite.connect(":memory:")
 
-    def test_is_produced_by_factory(self):
+    def CheckIsProducedByFactory(self):
         cur = self.con.cursor(factory=MyCursor)
         cur.execute("select 4+5 as foo")
         row = cur.fetchone()
@@ -90,12 +91,12 @@
     def setUp(self):
         self.con = sqlite.connect(":memory:")
 
-    def test_custom_factory(self):
+    def CheckCustomFactory(self):
         self.con.row_factory = lambda cur, row: list(row)
         row = self.con.execute("select 1, 2").fetchone()
         self.assertIsInstance(row, list)
 
-    def test_sqlite_row_index(self):
+    def CheckSqliteRowIndex(self):
         self.con.row_factory = sqlite.Row
         row = self.con.execute("select 1 as a_1, 2 as b").fetchone()
         self.assertIsInstance(row, sqlite.Row)
@@ -124,7 +125,7 @@
         with self.assertRaises(IndexError):
             row[2**1000]
 
-    def test_sqlite_row_index_unicode(self):
+    def CheckSqliteRowIndexUnicode(self):
         self.con.row_factory = sqlite.Row
         row = self.con.execute("select 1 as \xff").fetchone()
         self.assertEqual(row["\xff"], 1)
@@ -133,7 +134,7 @@
         with self.assertRaises(IndexError):
             row['\xdf']
 
-    def test_sqlite_row_slice(self):
+    def CheckSqliteRowSlice(self):
         # A sqlite.Row can be sliced like a list.
         self.con.row_factory = sqlite.Row
         row = self.con.execute("select 1, 2, 3, 4").fetchone()
@@ -151,21 +152,21 @@
         self.assertEqual(row[0:4:2], (1, 3))
         self.assertEqual(row[3:0:-2], (4, 2))
 
-    def test_sqlite_row_iter(self):
+    def CheckSqliteRowIter(self):
         """Checks if the row object is iterable"""
         self.con.row_factory = sqlite.Row
         row = self.con.execute("select 1 as a, 2 as b").fetchone()
         for col in row:
             pass
 
-    def test_sqlite_row_as_tuple(self):
+    def CheckSqliteRowAsTuple(self):
         """Checks if the row object can be converted to a tuple"""
         self.con.row_factory = sqlite.Row
         row = self.con.execute("select 1 as a, 2 as b").fetchone()
         t = tuple(row)
         self.assertEqual(t, (row['a'], row['b']))
 
-    def test_sqlite_row_as_dict(self):
+    def CheckSqliteRowAsDict(self):
         """Checks if the row object can be correctly converted to a dictionary"""
         self.con.row_factory = sqlite.Row
         row = self.con.execute("select 1 as a, 2 as b").fetchone()
@@ -173,7 +174,7 @@
         self.assertEqual(d["a"], row["a"])
         self.assertEqual(d["b"], row["b"])
 
-    def test_sqlite_row_hash_cmp(self):
+    def CheckSqliteRowHashCmp(self):
         """Checks if the row object compares and hashes correctly"""
         self.con.row_factory = sqlite.Row
         row_1 = self.con.execute("select 1 as a, 2 as b").fetchone()
@@ -207,7 +208,7 @@
 
         self.assertEqual(hash(row_1), hash(row_2))
 
-    def test_sqlite_row_as_sequence(self):
+    def CheckSqliteRowAsSequence(self):
         """ Checks if the row object can act like a sequence """
         self.con.row_factory = sqlite.Row
         row = self.con.execute("select 1 as a, 2 as b").fetchone()
@@ -216,7 +217,7 @@
         self.assertEqual(list(reversed(row)), list(reversed(as_tuple)))
         self.assertIsInstance(row, Sequence)
 
-    def test_fake_cursor_class(self):
+    def CheckFakeCursorClass(self):
         # Issue #24257: Incorrect use of PyObject_IsInstance() caused
         # segmentation fault.
         # Issue #27861: Also applies for cursor factory.
@@ -233,31 +234,30 @@
     def setUp(self):
         self.con = sqlite.connect(":memory:")
 
-    def test_unicode(self):
-        austria = "Österreich"
+    def CheckUnicode(self):
+        austria = "Österreich"
         row = self.con.execute("select ?", (austria,)).fetchone()
         self.assertEqual(type(row[0]), str, "type of row[0] must be unicode")
 
-    def test_string(self):
+    def CheckString(self):
         self.con.text_factory = bytes
-        austria = "Österreich"
+        austria = "Österreich"
         row = self.con.execute("select ?", (austria,)).fetchone()
         self.assertEqual(type(row[0]), bytes, "type of row[0] must be bytes")
         self.assertEqual(row[0], austria.encode("utf-8"), "column must equal original data in UTF-8")
 
-    def test_custom(self):
+    def CheckCustom(self):
         self.con.text_factory = lambda x: str(x, "utf-8", "ignore")
-        austria = "Österreich"
+        austria = "Österreich"
         row = self.con.execute("select ?", (austria,)).fetchone()
         self.assertEqual(type(row[0]), str, "type of row[0] must be unicode")
         self.assertTrue(row[0].endswith("reich"), "column must contain original data")
 
-    def test_optimized_unicode(self):
-        # OptimizedUnicode is deprecated as of Python 3.10
-        with self.assertWarns(DeprecationWarning) as cm:
-            self.con.text_factory = sqlite.OptimizedUnicode
-        self.assertIn("factory.py", cm.filename)
-        austria = "Österreich"
+    def CheckOptimizedUnicode(self):
+        # In py3k, str objects are always returned when text_factory
+        # is OptimizedUnicode
+        self.con.text_factory = sqlite.OptimizedUnicode
+        austria = "Österreich"
         germany = "Deutchland"
         a_row = self.con.execute("select ?", (austria,)).fetchone()
         d_row = self.con.execute("select ?", (germany,)).fetchone()
@@ -273,25 +273,25 @@
         self.con.execute("create table test (value text)")
         self.con.execute("insert into test (value) values (?)", ("a\x00b",))
 
-    def test_string(self):
+    def CheckString(self):
         # text_factory defaults to str
         row = self.con.execute("select value from test").fetchone()
         self.assertIs(type(row[0]), str)
         self.assertEqual(row[0], "a\x00b")
 
-    def test_bytes(self):
+    def CheckBytes(self):
         self.con.text_factory = bytes
         row = self.con.execute("select value from test").fetchone()
         self.assertIs(type(row[0]), bytes)
         self.assertEqual(row[0], b"a\x00b")
 
-    def test_bytearray(self):
+    def CheckBytearray(self):
         self.con.text_factory = bytearray
         row = self.con.execute("select value from test").fetchone()
         self.assertIs(type(row[0]), bytearray)
         self.assertEqual(row[0], b"a\x00b")
 
-    def test_custom(self):
+    def CheckCustom(self):
         # A custom factory should receive a bytes argument
         self.con.text_factory = lambda x: x
         row = self.con.execute("select value from test").fetchone()
@@ -302,17 +302,13 @@
         self.con.close()
 
 def suite():
-    tests = [
-        ConnectionFactoryTests,
-        CursorFactoryTests,
-        RowFactoryTests,
-        RowFactoryTestsBackwardsCompat,
-        TextFactoryTests,
-        TextFactoryTestsWithEmbeddedZeroBytes,
-    ]
-    return unittest.TestSuite(
-        [unittest.TestLoader().loadTestsFromTestCase(t) for t in tests]
-    )
+    connection_suite = unittest.makeSuite(ConnectionFactoryTests, "Check")
+    cursor_suite = unittest.makeSuite(CursorFactoryTests, "Check")
+    row_suite_compat = unittest.makeSuite(RowFactoryTestsBackwardsCompat, "Check")
+    row_suite = unittest.makeSuite(RowFactoryTests, "Check")
+    text_suite = unittest.makeSuite(TextFactoryTests, "Check")
+    text_zero_bytes_suite = unittest.makeSuite(TextFactoryTestsWithEmbeddedZeroBytes, "Check")
+    return unittest.TestSuite((connection_suite, cursor_suite, row_suite_compat, row_suite, text_suite, text_zero_bytes_suite))
 
 def test():
     runner = unittest.TextTestRunner()
diff --git a/common/py3-stdlib/sqlite3/test/hooks.py b/common/py3-stdlib/sqlite3/test/hooks.py
index 8c60bdc..d74e74b 100644
--- a/common/py3-stdlib/sqlite3/test/hooks.py
+++ b/common/py3-stdlib/sqlite3/test/hooks.py
@@ -1,6 +1,7 @@
+#-*- coding: iso-8859-1 -*-
 # pysqlite2/test/hooks.py: tests for various SQLite-specific hooks
 #
-# Copyright (C) 2006-2007 Gerhard Häring <gh@ghaering.de>
+# Copyright (C) 2006-2007 Gerhard Häring <gh@ghaering.de>
 #
 # This file is part of pysqlite.
 #
@@ -23,27 +24,26 @@
 import unittest
 import sqlite3 as sqlite
 
-from test.support.os_helper import TESTFN, unlink
-
+from test.support import TESTFN, unlink
 
 class CollationTests(unittest.TestCase):
-    def test_create_collation_not_string(self):
+    def CheckCreateCollationNotString(self):
         con = sqlite.connect(":memory:")
         with self.assertRaises(TypeError):
             con.create_collation(None, lambda x, y: (x > y) - (x < y))
 
-    def test_create_collation_not_callable(self):
+    def CheckCreateCollationNotCallable(self):
         con = sqlite.connect(":memory:")
         with self.assertRaises(TypeError) as cm:
             con.create_collation("X", 42)
         self.assertEqual(str(cm.exception), 'parameter must be callable')
 
-    def test_create_collation_not_ascii(self):
+    def CheckCreateCollationNotAscii(self):
         con = sqlite.connect(":memory:")
         with self.assertRaises(sqlite.ProgrammingError):
-            con.create_collation("collä", lambda x, y: (x > y) - (x < y))
+            con.create_collation("collä", lambda x, y: (x > y) - (x < y))
 
-    def test_create_collation_bad_upper(self):
+    def CheckCreateCollationBadUpper(self):
         class BadUpperStr(str):
             def upper(self):
                 return None
@@ -60,7 +60,9 @@
         self.assertEqual(result[0][0], 'b')
         self.assertEqual(result[1][0], 'a')
 
-    def test_collation_is_used(self):
+    @unittest.skipIf(sqlite.sqlite_version_info < (3, 2, 1),
+                     'old SQLite versions crash on this test')
+    def CheckCollationIsUsed(self):
         def mycoll(x, y):
             # reverse order
             return -((x > y) - (x < y))
@@ -85,7 +87,7 @@
             result = con.execute(sql).fetchall()
         self.assertEqual(str(cm.exception), 'no such collation sequence: mycoll')
 
-    def test_collation_returns_large_integer(self):
+    def CheckCollationReturnsLargeInteger(self):
         def mycoll(x, y):
             # reverse order
             return -((x > y) - (x < y)) * 2**32
@@ -104,7 +106,7 @@
         self.assertEqual(result, [('c',), ('b',), ('a',)],
                          msg="the expected order was not returned")
 
-    def test_collation_register_twice(self):
+    def CheckCollationRegisterTwice(self):
         """
         Register two different collation functions under the same name.
         Verify that the last one is actually used.
@@ -118,7 +120,7 @@
         self.assertEqual(result[0][0], 'b')
         self.assertEqual(result[1][0], 'a')
 
-    def test_deregister_collation(self):
+    def CheckDeregisterCollation(self):
         """
         Register a collation, then deregister it. Make sure an error is raised if we try
         to use it.
@@ -131,7 +133,7 @@
         self.assertEqual(str(cm.exception), 'no such collation sequence: mycoll')
 
 class ProgressTests(unittest.TestCase):
-    def test_progress_handler_used(self):
+    def CheckProgressHandlerUsed(self):
         """
         Test that the progress handler is invoked once it is set.
         """
@@ -147,7 +149,7 @@
         self.assertTrue(progress_calls)
 
 
-    def test_opcode_count(self):
+    def CheckOpcodeCount(self):
         """
         Test that the opcode argument is respected.
         """
@@ -170,7 +172,7 @@
         second_count = len(progress_calls)
         self.assertGreaterEqual(first_count, second_count)
 
-    def test_cancel_operation(self):
+    def CheckCancelOperation(self):
         """
         Test that returning a non-zero value stops the operation in progress.
         """
@@ -184,7 +186,7 @@
             curs.execute,
             "create table bar (a, b)")
 
-    def test_clear_handler(self):
+    def CheckClearHandler(self):
         """
         Test that setting the progress handler to None clears the previously set handler.
         """
@@ -200,7 +202,7 @@
         self.assertEqual(action, 0, "progress handler was not cleared")
 
 class TraceCallbackTests(unittest.TestCase):
-    def test_trace_callback_used(self):
+    def CheckTraceCallbackUsed(self):
         """
         Test that the trace callback is invoked once it is set.
         """
@@ -213,7 +215,7 @@
         self.assertTrue(traced_statements)
         self.assertTrue(any("create table foo" in stmt for stmt in traced_statements))
 
-    def test_clear_trace_callback(self):
+    def CheckClearTraceCallback(self):
         """
         Test that setting the trace callback to None clears the previously set callback.
         """
@@ -226,7 +228,7 @@
         con.execute("create table foo(a, b)")
         self.assertFalse(traced_statements, "trace callback was not cleared")
 
-    def test_unicode_content(self):
+    def CheckUnicodeContent(self):
         """
         Test that the statement can contain unicode literals.
         """
@@ -237,13 +239,17 @@
             traced_statements.append(statement)
         con.set_trace_callback(trace)
         con.execute("create table foo(x)")
-        con.execute("insert into foo(x) values ('%s')" % unicode_value)
+        # Can't execute bound parameters as their values don't appear
+        # in traced statements before SQLite 3.6.21
+        # (cf. http://www.sqlite.org/draft/releaselog/3_6_21.html)
+        con.execute('insert into foo(x) values ("%s")' % unicode_value)
         con.commit()
         self.assertTrue(any(unicode_value in stmt for stmt in traced_statements),
                         "Unicode data %s garbled in trace callback: %s"
                         % (ascii(unicode_value), ', '.join(map(ascii, traced_statements))))
 
-    def test_trace_callback_content(self):
+    @unittest.skipIf(sqlite.sqlite_version_info < (3, 3, 9), "sqlite3_prepare_v2 is not available")
+    def CheckTraceCallbackContent(self):
         # set_trace_callback() shouldn't produce duplicate content (bpo-26187)
         traced_statements = []
         def trace(statement):
@@ -263,14 +269,10 @@
 
 
 def suite():
-    tests = [
-        CollationTests,
-        ProgressTests,
-        TraceCallbackTests,
-    ]
-    return unittest.TestSuite(
-        [unittest.TestLoader().loadTestsFromTestCase(t) for t in tests]
-    )
+    collation_suite = unittest.makeSuite(CollationTests, "Check")
+    progress_suite = unittest.makeSuite(ProgressTests, "Check")
+    trace_suite = unittest.makeSuite(TraceCallbackTests, "Check")
+    return unittest.TestSuite((collation_suite, progress_suite, trace_suite))
 
 def test():
     runner = unittest.TextTestRunner()
diff --git a/common/py3-stdlib/sqlite3/test/regression.py b/common/py3-stdlib/sqlite3/test/regression.py
index 70d0ff9..6aa86d5 100644
--- a/common/py3-stdlib/sqlite3/test/regression.py
+++ b/common/py3-stdlib/sqlite3/test/regression.py
@@ -1,6 +1,7 @@
+#-*- coding: iso-8859-1 -*-
 # pysqlite2/test/regression.py: pysqlite regression tests
 #
-# Copyright (C) 2006-2010 Gerhard Häring <gh@ghaering.de>
+# Copyright (C) 2006-2010 Gerhard Häring <gh@ghaering.de>
 #
 # This file is part of pysqlite.
 #
@@ -34,12 +35,12 @@
     def tearDown(self):
         self.con.close()
 
-    def test_pragma_user_version(self):
+    def CheckPragmaUserVersion(self):
         # This used to crash pysqlite because this pragma command returns NULL for the column name
         cur = self.con.cursor()
         cur.execute("pragma user_version")
 
-    def test_pragma_schema_version(self):
+    def CheckPragmaSchemaVersion(self):
         # This still crashed pysqlite <= 2.2.1
         con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES)
         try:
@@ -49,7 +50,7 @@
             cur.close()
             con.close()
 
-    def test_statement_reset(self):
+    def CheckStatementReset(self):
         # pysqlite 2.1.0 to 2.2.0 have the problem that not all statements are
         # reset before a rollback, but only those that are still in the
         # statement cache. The others are not accessible from the connection object.
@@ -64,7 +65,7 @@
 
         con.rollback()
 
-    def test_column_name_with_spaces(self):
+    def CheckColumnNameWithSpaces(self):
         cur = self.con.cursor()
         cur.execute('select 1 as "foo bar [datetime]"')
         self.assertEqual(cur.description[0][0], "foo bar [datetime]")
@@ -72,7 +73,7 @@
         cur.execute('select 1 as "foo baz"')
         self.assertEqual(cur.description[0][0], "foo baz")
 
-    def test_statement_finalization_on_close_db(self):
+    def CheckStatementFinalizationOnCloseDb(self):
         # pysqlite versions <= 2.3.3 only finalized statements in the statement
         # cache when closing the database. statements that were still
         # referenced in cursors weren't closed and could provoke "
@@ -86,7 +87,8 @@
             cur.execute("select 1 x union select " + str(i))
         con.close()
 
-    def test_on_conflict_rollback(self):
+    @unittest.skipIf(sqlite.sqlite_version_info < (3, 2, 2), 'needs sqlite 3.2.2 or newer')
+    def CheckOnConflictRollback(self):
         con = sqlite.connect(":memory:")
         con.execute("create table foo(x, unique(x) on conflict rollback)")
         con.execute("insert into foo(x) values (1)")
@@ -100,7 +102,7 @@
         except sqlite.OperationalError:
             self.fail("pysqlite knew nothing about the implicit ROLLBACK")
 
-    def test_workaround_for_buggy_sqlite_transfer_bindings(self):
+    def CheckWorkaroundForBuggySqliteTransferBindings(self):
         """
         pysqlite would crash with older SQLite versions unless
         a workaround is implemented.
@@ -109,14 +111,14 @@
         self.con.execute("drop table foo")
         self.con.execute("create table foo(bar)")
 
-    def test_empty_statement(self):
+    def CheckEmptyStatement(self):
         """
         pysqlite used to segfault with SQLite versions 3.5.x. These return NULL
         for "no-operation" statements
         """
         self.con.execute("")
 
-    def test_type_map_usage(self):
+    def CheckTypeMapUsage(self):
         """
         pysqlite until 2.4.1 did not rebuild the row_cast_map when recompiling
         a statement. This test exhibits the problem.
@@ -125,13 +127,13 @@
         con = sqlite.connect(":memory:",detect_types=sqlite.PARSE_DECLTYPES)
         con.execute("create table foo(bar timestamp)")
         con.execute("insert into foo(bar) values (?)", (datetime.datetime.now(),))
-        con.execute(SELECT).close()
+        con.execute(SELECT)
         con.execute("drop table foo")
         con.execute("create table foo(bar integer)")
         con.execute("insert into foo(bar) values (5)")
-        con.execute(SELECT).close()
+        con.execute(SELECT)
 
-    def test_bind_mutating_list(self):
+    def CheckBindMutatingList(self):
         # Issue41662: Crash when mutate a list of parameters during iteration.
         class X:
             def __conform__(self, protocol):
@@ -144,7 +146,7 @@
         with self.assertRaises(IndexError):
             con.execute("insert into foo(bar, baz) values (?, ?)", parameters)
 
-    def test_error_msg_decode_error(self):
+    def CheckErrorMsgDecodeError(self):
         # When porting the module to Python 3.0, the error message about
         # decoding errors disappeared. This verifies they're back again.
         with self.assertRaises(sqlite.OperationalError) as cm:
@@ -153,13 +155,13 @@
         msg = "Could not decode to UTF-8 column 'colname' with text 'xxx"
         self.assertIn(msg, str(cm.exception))
 
-    def test_register_adapter(self):
+    def CheckRegisterAdapter(self):
         """
         See issue 3312.
         """
         self.assertRaises(TypeError, sqlite.register_adapter, {}, None)
 
-    def test_set_isolation_level(self):
+    def CheckSetIsolationLevel(self):
         # See issue 27881.
         class CustomStr(str):
             def upper(self):
@@ -189,7 +191,7 @@
                     con.isolation_level = value
                 self.assertEqual(con.isolation_level, "DEFERRED")
 
-    def test_cursor_constructor_call_check(self):
+    def CheckCursorConstructorCallCheck(self):
         """
         Verifies that cursor methods check whether base class __init__ was
         called.
@@ -206,14 +208,14 @@
                                     r'^Base Cursor\.__init__ not called\.$'):
             cur.close()
 
-    def test_str_subclass(self):
+    def CheckStrSubclass(self):
         """
         The Python 3.0 port of the module didn't cope with values of subclasses of str.
         """
         class MyStr(str): pass
         self.con.execute("select ?", (MyStr("abc"),))
 
-    def test_connection_constructor_call_check(self):
+    def CheckConnectionConstructorCallCheck(self):
         """
         Verifies that connection methods check whether base class __init__ was
         called.
@@ -226,7 +228,7 @@
         with self.assertRaises(sqlite.ProgrammingError):
             cur = con.cursor()
 
-    def test_cursor_registration(self):
+    def CheckCursorRegistration(self):
         """
         Verifies that subclassed cursor classes are correctly registered with
         the connection object, too.  (fetch-across-rollback problem)
@@ -248,7 +250,7 @@
         with self.assertRaises(sqlite.InterfaceError):
             cur.fetchall()
 
-    def test_auto_commit(self):
+    def CheckAutoCommit(self):
         """
         Verifies that creating a connection in autocommit mode works.
         2.5.3 introduced a regression so that these could no longer
@@ -256,7 +258,7 @@
         """
         con = sqlite.connect(":memory:", isolation_level=None)
 
-    def test_pragma_autocommit(self):
+    def CheckPragmaAutocommit(self):
         """
         Verifies that running a PRAGMA statement that does an autocommit does
         work. This did not work in 2.5.3/2.5.4.
@@ -268,21 +270,21 @@
         cur.execute("pragma page_size")
         row = cur.fetchone()
 
-    def test_connection_call(self):
+    def CheckConnectionCall(self):
         """
         Call a connection with a non-string SQL request: check error handling
         of the statement constructor.
         """
         self.assertRaises(TypeError, self.con, 1)
 
-    def test_collation(self):
+    def CheckCollation(self):
         def collation_cb(a, b):
             return 1
         self.assertRaises(sqlite.ProgrammingError, self.con.create_collation,
             # Lone surrogate cannot be encoded to the default encoding (utf8)
             "\uDC80", collation_cb)
 
-    def test_recursive_cursor_use(self):
+    def CheckRecursiveCursorUse(self):
         """
         http://bugs.python.org/issue10811
 
@@ -303,7 +305,7 @@
             cur.executemany("insert into b (baz) values (?)",
                             ((i,) for i in foo()))
 
-    def test_convert_timestamp_microsecond_padding(self):
+    def CheckConvertTimestampMicrosecondPadding(self):
         """
         http://bugs.python.org/issue14720
 
@@ -329,13 +331,13 @@
             datetime.datetime(2012, 4, 4, 15, 6, 0, 123456),
         ])
 
-    def test_invalid_isolation_level_type(self):
+    def CheckInvalidIsolationLevelType(self):
         # isolation level is a string, not an integer
         self.assertRaises(TypeError,
                           sqlite.connect, ":memory:", isolation_level=123)
 
 
-    def test_null_character(self):
+    def CheckNullCharacter(self):
         # Issue #21147
         con = sqlite.connect(":memory:")
         self.assertRaises(ValueError, con, "\0select 1")
@@ -344,7 +346,7 @@
         self.assertRaises(ValueError, cur.execute, " \0select 2")
         self.assertRaises(ValueError, cur.execute, "select 2\0")
 
-    def test_commit_cursor_reset(self):
+    def CheckCommitCursorReset(self):
         """
         Connection.commit() did reset cursors, which made sqlite3
         to return rows multiple times when fetched from cursors
@@ -375,7 +377,7 @@
                 counter += 1
         self.assertEqual(counter, 3, "should have returned exactly three rows")
 
-    def test_bpo31770(self):
+    def CheckBpo31770(self):
         """
         The interpreter shouldn't crash in case Cursor.__init__() is called
         more than once.
@@ -391,11 +393,11 @@
         del ref
         support.gc_collect()
 
-    def test_del_isolation_level_segfault(self):
+    def CheckDelIsolation_levelSegfault(self):
         with self.assertRaises(AttributeError):
             del self.con.isolation_level
 
-    def test_bpo37347(self):
+    def CheckBpo37347(self):
         class Printer:
             def log(self, *args):
                 return sqlite.SQLITE_OK
@@ -409,19 +411,13 @@
             self.con.execute("select 1")  # trigger seg fault
             method(None)
 
-    def test_return_empty_bytestring(self):
-        cur = self.con.execute("select X''")
-        val = cur.fetchone()[0]
-        self.assertEqual(val, b'')
 
 
 def suite():
-    tests = [
-        RegressionTests
-    ]
-    return unittest.TestSuite(
-        [unittest.TestLoader().loadTestsFromTestCase(t) for t in tests]
-    )
+    regression_suite = unittest.makeSuite(RegressionTests, "Check")
+    return unittest.TestSuite((
+        regression_suite,
+    ))
 
 def test():
     runner = unittest.TextTestRunner()
diff --git a/common/py3-stdlib/sqlite3/test/transactions.py b/common/py3-stdlib/sqlite3/test/transactions.py
index 8028490..b8a13de 100644
--- a/common/py3-stdlib/sqlite3/test/transactions.py
+++ b/common/py3-stdlib/sqlite3/test/transactions.py
@@ -1,6 +1,7 @@
+#-*- coding: iso-8859-1 -*-
 # pysqlite2/test/transactions.py: tests transactions
 #
-# Copyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>
+# Copyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>
 #
 # This file is part of pysqlite.
 #
@@ -51,7 +52,7 @@
         except OSError:
             pass
 
-    def test_dml_does_not_auto_commit_before(self):
+    def CheckDMLDoesNotAutoCommitBefore(self):
         self.cur1.execute("create table test(i)")
         self.cur1.execute("insert into test(i) values (5)")
         self.cur1.execute("create table test2(j)")
@@ -59,14 +60,14 @@
         res = self.cur2.fetchall()
         self.assertEqual(len(res), 0)
 
-    def test_insert_starts_transaction(self):
+    def CheckInsertStartsTransaction(self):
         self.cur1.execute("create table test(i)")
         self.cur1.execute("insert into test(i) values (5)")
         self.cur2.execute("select i from test")
         res = self.cur2.fetchall()
         self.assertEqual(len(res), 0)
 
-    def test_update_starts_transaction(self):
+    def CheckUpdateStartsTransaction(self):
         self.cur1.execute("create table test(i)")
         self.cur1.execute("insert into test(i) values (5)")
         self.con1.commit()
@@ -75,7 +76,7 @@
         res = self.cur2.fetchone()[0]
         self.assertEqual(res, 5)
 
-    def test_delete_starts_transaction(self):
+    def CheckDeleteStartsTransaction(self):
         self.cur1.execute("create table test(i)")
         self.cur1.execute("insert into test(i) values (5)")
         self.con1.commit()
@@ -84,7 +85,7 @@
         res = self.cur2.fetchall()
         self.assertEqual(len(res), 1)
 
-    def test_replace_starts_transaction(self):
+    def CheckReplaceStartsTransaction(self):
         self.cur1.execute("create table test(i)")
         self.cur1.execute("insert into test(i) values (5)")
         self.con1.commit()
@@ -94,7 +95,7 @@
         self.assertEqual(len(res), 1)
         self.assertEqual(res[0][0], 5)
 
-    def test_toggle_auto_commit(self):
+    def CheckToggleAutoCommit(self):
         self.cur1.execute("create table test(i)")
         self.cur1.execute("insert into test(i) values (5)")
         self.con1.isolation_level = None
@@ -110,13 +111,17 @@
         res = self.cur2.fetchall()
         self.assertEqual(len(res), 1)
 
-    def test_raise_timeout(self):
+    @unittest.skipIf(sqlite.sqlite_version_info < (3, 2, 2),
+                     'test hangs on sqlite versions older than 3.2.2')
+    def CheckRaiseTimeout(self):
         self.cur1.execute("create table test(i)")
         self.cur1.execute("insert into test(i) values (5)")
         with self.assertRaises(sqlite.OperationalError):
             self.cur2.execute("insert into test(i) values (5)")
 
-    def test_locking(self):
+    @unittest.skipIf(sqlite.sqlite_version_info < (3, 2, 2),
+                     'test hangs on sqlite versions older than 3.2.2')
+    def CheckLocking(self):
         """
         This tests the improved concurrency with pysqlite 2.3.4. You needed
         to roll back con2 before you could commit con1.
@@ -128,7 +133,7 @@
         # NO self.con2.rollback() HERE!!!
         self.con1.commit()
 
-    def test_rollback_cursor_consistency(self):
+    def CheckRollbackCursorConsistency(self):
         """
         Checks if cursors on the connection are set into a "reset" state
         when a rollback is done on the connection.
@@ -148,12 +153,12 @@
         self.con = sqlite.connect(":memory:")
         self.cur = self.con.cursor()
 
-    def test_drop_table(self):
+    def CheckDropTable(self):
         self.cur.execute("create table test(i)")
         self.cur.execute("insert into test(i) values (5)")
         self.cur.execute("drop table test")
 
-    def test_pragma(self):
+    def CheckPragma(self):
         self.cur.execute("create table test(i)")
         self.cur.execute("insert into test(i) values (5)")
         self.cur.execute("pragma count_changes=1")
@@ -166,7 +171,7 @@
     def setUp(self):
         self.con = sqlite.connect(":memory:")
 
-    def test_ddl_does_not_autostart_transaction(self):
+    def CheckDdlDoesNotAutostartTransaction(self):
         # For backwards compatibility reasons, DDL statements should not
         # implicitly start a transaction.
         self.con.execute("create table test(i)")
@@ -174,7 +179,7 @@
         result = self.con.execute("select * from test").fetchall()
         self.assertEqual(result, [])
 
-    def test_immediate_transactional_ddl(self):
+    def CheckImmediateTransactionalDDL(self):
         # You can achieve transactional DDL by issuing a BEGIN
         # statement manually.
         self.con.execute("begin immediate")
@@ -183,7 +188,7 @@
         with self.assertRaises(sqlite.OperationalError):
             self.con.execute("select * from test")
 
-    def test_transactional_ddl(self):
+    def CheckTransactionalDDL(self):
         # You can achieve transactional DDL by issuing a BEGIN
         # statement manually.
         self.con.execute("begin")
@@ -196,14 +201,10 @@
         self.con.close()
 
 def suite():
-    tests = [
-        SpecialCommandTests,
-        TransactionTests,
-        TransactionalDDL,
-    ]
-    return unittest.TestSuite(
-        [unittest.TestLoader().loadTestsFromTestCase(t) for t in tests]
-    )
+    default_suite = unittest.makeSuite(TransactionTests, "Check")
+    special_command_suite = unittest.makeSuite(SpecialCommandTests, "Check")
+    ddl_suite = unittest.makeSuite(TransactionalDDL, "Check")
+    return unittest.TestSuite((default_suite, special_command_suite, ddl_suite))
 
 def test():
     runner = unittest.TextTestRunner()
diff --git a/common/py3-stdlib/sqlite3/test/types.py b/common/py3-stdlib/sqlite3/test/types.py
index 4bb1de8..d26a9cb 100644
--- a/common/py3-stdlib/sqlite3/test/types.py
+++ b/common/py3-stdlib/sqlite3/test/types.py
@@ -1,6 +1,7 @@
+#-*- coding: iso-8859-1 -*-
 # pysqlite2/test/types.py: tests for type conversion and detection
 #
-# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
+# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
 #
 # This file is part of pysqlite.
 #
@@ -39,33 +40,33 @@
         self.cur.close()
         self.con.close()
 
-    def test_string(self):
-        self.cur.execute("insert into test(s) values (?)", ("Österreich",))
+    def CheckString(self):
+        self.cur.execute("insert into test(s) values (?)", ("Österreich",))
         self.cur.execute("select s from test")
         row = self.cur.fetchone()
-        self.assertEqual(row[0], "Österreich")
+        self.assertEqual(row[0], "Österreich")
 
-    def test_small_int(self):
+    def CheckSmallInt(self):
         self.cur.execute("insert into test(i) values (?)", (42,))
         self.cur.execute("select i from test")
         row = self.cur.fetchone()
         self.assertEqual(row[0], 42)
 
-    def test_large_int(self):
+    def CheckLargeInt(self):
         num = 2**40
         self.cur.execute("insert into test(i) values (?)", (num,))
         self.cur.execute("select i from test")
         row = self.cur.fetchone()
         self.assertEqual(row[0], num)
 
-    def test_float(self):
+    def CheckFloat(self):
         val = 3.14
         self.cur.execute("insert into test(f) values (?)", (val,))
         self.cur.execute("select f from test")
         row = self.cur.fetchone()
         self.assertEqual(row[0], val)
 
-    def test_blob(self):
+    def CheckBlob(self):
         sample = b"Guglhupf"
         val = memoryview(sample)
         self.cur.execute("insert into test(b) values (?)", (val,))
@@ -73,10 +74,10 @@
         row = self.cur.fetchone()
         self.assertEqual(row[0], sample)
 
-    def test_unicode_execute(self):
-        self.cur.execute("select 'Österreich'")
+    def CheckUnicodeExecute(self):
+        self.cur.execute("select 'Österreich'")
         row = self.cur.fetchone()
-        self.assertEqual(row[0], "Österreich")
+        self.assertEqual(row[0], "Österreich")
 
 class DeclTypesTests(unittest.TestCase):
     class Foo:
@@ -110,20 +111,7 @@
     def setUp(self):
         self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_DECLTYPES)
         self.cur = self.con.cursor()
-        self.cur.execute("""
-            create table test(
-                i int,
-                s str,
-                f float,
-                b bool,
-                u unicode,
-                foo foo,
-                bin blob,
-                n1 number,
-                n2 number(5),
-                bad bad,
-                cbin cblob)
-        """)
+        self.cur.execute("create table test(i int, s str, f float, b bool, u unicode, foo foo, bin blob, n1 number, n2 number(5), bad bad)")
 
         # override float, make them always return the same number
         sqlite.converters["FLOAT"] = lambda x: 47.2
@@ -134,7 +122,6 @@
         sqlite.converters["BAD"] = DeclTypesTests.BadConform
         sqlite.converters["WRONG"] = lambda x: "WRONG"
         sqlite.converters["NUMBER"] = float
-        sqlite.converters["CBLOB"] = lambda x: b"blobish"
 
     def tearDown(self):
         del sqlite.converters["FLOAT"]
@@ -143,25 +130,24 @@
         del sqlite.converters["BAD"]
         del sqlite.converters["WRONG"]
         del sqlite.converters["NUMBER"]
-        del sqlite.converters["CBLOB"]
         self.cur.close()
         self.con.close()
 
-    def test_string(self):
+    def CheckString(self):
         # default
         self.cur.execute("insert into test(s) values (?)", ("foo",))
         self.cur.execute('select s as "s [WRONG]" from test')
         row = self.cur.fetchone()
         self.assertEqual(row[0], "foo")
 
-    def test_small_int(self):
+    def CheckSmallInt(self):
         # default
         self.cur.execute("insert into test(i) values (?)", (42,))
         self.cur.execute("select i from test")
         row = self.cur.fetchone()
         self.assertEqual(row[0], 42)
 
-    def test_large_int(self):
+    def CheckLargeInt(self):
         # default
         num = 2**40
         self.cur.execute("insert into test(i) values (?)", (num,))
@@ -169,7 +155,7 @@
         row = self.cur.fetchone()
         self.assertEqual(row[0], num)
 
-    def test_float(self):
+    def CheckFloat(self):
         # custom
         val = 3.14
         self.cur.execute("insert into test(f) values (?)", (val,))
@@ -177,7 +163,7 @@
         row = self.cur.fetchone()
         self.assertEqual(row[0], 47.2)
 
-    def test_bool(self):
+    def CheckBool(self):
         # custom
         self.cur.execute("insert into test(b) values (?)", (False,))
         self.cur.execute("select b from test")
@@ -190,7 +176,7 @@
         row = self.cur.fetchone()
         self.assertIs(row[0], True)
 
-    def test_unicode(self):
+    def CheckUnicode(self):
         # default
         val = "\xd6sterreich"
         self.cur.execute("insert into test(u) values (?)", (val,))
@@ -198,14 +184,14 @@
         row = self.cur.fetchone()
         self.assertEqual(row[0], val)
 
-    def test_foo(self):
+    def CheckFoo(self):
         val = DeclTypesTests.Foo("bla")
         self.cur.execute("insert into test(foo) values (?)", (val,))
         self.cur.execute("select foo from test")
         row = self.cur.fetchone()
         self.assertEqual(row[0], val)
 
-    def test_error_in_conform(self):
+    def CheckErrorInConform(self):
         val = DeclTypesTests.BadConform(TypeError)
         with self.assertRaises(sqlite.InterfaceError):
             self.cur.execute("insert into test(bad) values (?)", (val,))
@@ -218,19 +204,19 @@
         with self.assertRaises(KeyboardInterrupt):
             self.cur.execute("insert into test(bad) values (:val)", {"val": val})
 
-    def test_unsupported_seq(self):
+    def CheckUnsupportedSeq(self):
         class Bar: pass
         val = Bar()
         with self.assertRaises(sqlite.InterfaceError):
             self.cur.execute("insert into test(f) values (?)", (val,))
 
-    def test_unsupported_dict(self):
+    def CheckUnsupportedDict(self):
         class Bar: pass
         val = Bar()
         with self.assertRaises(sqlite.InterfaceError):
             self.cur.execute("insert into test(f) values (:val)", {"val": val})
 
-    def test_blob(self):
+    def CheckBlob(self):
         # default
         sample = b"Guglhupf"
         val = memoryview(sample)
@@ -239,27 +225,19 @@
         row = self.cur.fetchone()
         self.assertEqual(row[0], sample)
 
-    def test_number1(self):
+    def CheckNumber1(self):
         self.cur.execute("insert into test(n1) values (5)")
         value = self.cur.execute("select n1 from test").fetchone()[0]
         # if the converter is not used, it's an int instead of a float
         self.assertEqual(type(value), float)
 
-    def test_number2(self):
+    def CheckNumber2(self):
         """Checks whether converter names are cut off at '(' characters"""
         self.cur.execute("insert into test(n2) values (5)")
         value = self.cur.execute("select n2 from test").fetchone()[0]
         # if the converter is not used, it's an int instead of a float
         self.assertEqual(type(value), float)
 
-    def test_convert_zero_sized_blob(self):
-        self.con.execute("insert into test(cbin) values (?)", (b"",))
-        cur = self.con.execute("select cbin from test")
-        # Zero-sized blobs with converters returns None.  This differs from
-        # blobs without a converter, where b"" is returned.
-        self.assertIsNone(cur.fetchone()[0])
-
-
 class ColNamesTests(unittest.TestCase):
     def setUp(self):
         self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES)
@@ -279,7 +257,7 @@
         self.cur.close()
         self.con.close()
 
-    def test_decl_type_not_used(self):
+    def CheckDeclTypeNotUsed(self):
         """
         Assures that the declared type is not used when PARSE_DECLTYPES
         is not set.
@@ -289,13 +267,13 @@
         val = self.cur.fetchone()[0]
         self.assertEqual(val, "xxx")
 
-    def test_none(self):
+    def CheckNone(self):
         self.cur.execute("insert into test(x) values (?)", (None,))
         self.cur.execute("select x from test")
         val = self.cur.fetchone()[0]
         self.assertEqual(val, None)
 
-    def test_col_name(self):
+    def CheckColName(self):
         self.cur.execute("insert into test(x) values (?)", ("xxx",))
         self.cur.execute('select x as "x y [bar]" from test')
         val = self.cur.fetchone()[0]
@@ -305,12 +283,12 @@
         # '[' (and the preceeding space) should be stripped.
         self.assertEqual(self.cur.description[0][0], "x y")
 
-    def test_case_in_converter_name(self):
+    def CheckCaseInConverterName(self):
         self.cur.execute("select 'other' as \"x [b1b1]\"")
         val = self.cur.fetchone()[0]
         self.assertEqual(val, "MARKER")
 
-    def test_cursor_description_no_row(self):
+    def CheckCursorDescriptionNoRow(self):
         """
         cursor.description should at least provide the column name(s), even if
         no row returned.
@@ -318,7 +296,7 @@
         self.cur.execute("select * from test where 0 = 1")
         self.assertEqual(self.cur.description[0][0], "x")
 
-    def test_cursor_description_insert(self):
+    def CheckCursorDescriptionInsert(self):
         self.cur.execute("insert into test values (1)")
         self.assertIsNone(self.cur.description)
 
@@ -335,19 +313,19 @@
         self.cur.close()
         self.con.close()
 
-    def test_cursor_description_cte_simple(self):
+    def CheckCursorDescriptionCTESimple(self):
         self.cur.execute("with one as (select 1) select * from one")
         self.assertIsNotNone(self.cur.description)
         self.assertEqual(self.cur.description[0][0], "1")
 
-    def test_cursor_description_cte_multiple_columns(self):
+    def CheckCursorDescriptionCTESMultipleColumns(self):
         self.cur.execute("insert into test values(1)")
         self.cur.execute("insert into test values(2)")
         self.cur.execute("with testCTE as (select * from test) select * from testCTE")
         self.assertIsNotNone(self.cur.description)
         self.assertEqual(self.cur.description[0][0], "x")
 
-    def test_cursor_description_cte(self):
+    def CheckCursorDescriptionCTE(self):
         self.cur.execute("insert into test values (1)")
         self.cur.execute("with bar as (select * from test) select * from test where x = 1")
         self.assertIsNotNone(self.cur.description)
@@ -376,7 +354,7 @@
         self.cur.close()
         self.con.close()
 
-    def test_caster_is_used(self):
+    def CheckCasterIsUsed(self):
         self.cur.execute("select ?", (4,))
         val = self.cur.fetchone()[0]
         self.assertEqual(type(val), float)
@@ -394,7 +372,7 @@
     def tearDown(self):
         self.con.close()
 
-    def test_binary_input_for_converter(self):
+    def CheckBinaryInputForConverter(self):
         testdata = b"abcdefg" * 10
         result = self.con.execute('select ? as "x [bin]"', (memoryview(zlib.compress(testdata)),)).fetchone()[0]
         self.assertEqual(testdata, result)
@@ -409,21 +387,23 @@
         self.cur.close()
         self.con.close()
 
-    def test_sqlite_date(self):
+    def CheckSqliteDate(self):
         d = sqlite.Date(2004, 2, 14)
         self.cur.execute("insert into test(d) values (?)", (d,))
         self.cur.execute("select d from test")
         d2 = self.cur.fetchone()[0]
         self.assertEqual(d, d2)
 
-    def test_sqlite_timestamp(self):
+    def CheckSqliteTimestamp(self):
         ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0)
         self.cur.execute("insert into test(ts) values (?)", (ts,))
         self.cur.execute("select ts from test")
         ts2 = self.cur.fetchone()[0]
         self.assertEqual(ts, ts2)
 
-    def test_sql_timestamp(self):
+    @unittest.skipIf(sqlite.sqlite_version_info < (3, 1),
+                     'the date functions are available on 3.1 or later')
+    def CheckSqlTimestamp(self):
         now = datetime.datetime.utcnow()
         self.cur.execute("insert into test(ts) values (current_timestamp)")
         self.cur.execute("select ts from test")
@@ -431,14 +411,14 @@
         self.assertEqual(type(ts), datetime.datetime)
         self.assertEqual(ts.year, now.year)
 
-    def test_date_time_sub_seconds(self):
+    def CheckDateTimeSubSeconds(self):
         ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0, 500000)
         self.cur.execute("insert into test(ts) values (?)", (ts,))
         self.cur.execute("select ts from test")
         ts2 = self.cur.fetchone()[0]
         self.assertEqual(ts, ts2)
 
-    def test_date_time_sub_seconds_floating_point(self):
+    def CheckDateTimeSubSecondsFloatingPoint(self):
         ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0, 510241)
         self.cur.execute("insert into test(ts) values (?)", (ts,))
         self.cur.execute("select ts from test")
@@ -446,18 +426,14 @@
         self.assertEqual(ts, ts2)
 
 def suite():
-    tests = [
-        BinaryConverterTests,
-        ColNamesTests,
-        CommonTableExpressionTests,
-        DateTimeTests,
-        DeclTypesTests,
-        ObjectAdaptationTests,
-        SqliteTypeTests,
-    ]
-    return unittest.TestSuite(
-        [unittest.TestLoader().loadTestsFromTestCase(t) for t in tests]
-    )
+    sqlite_type_suite = unittest.makeSuite(SqliteTypeTests, "Check")
+    decltypes_type_suite = unittest.makeSuite(DeclTypesTests, "Check")
+    colnames_type_suite = unittest.makeSuite(ColNamesTests, "Check")
+    adaptation_suite = unittest.makeSuite(ObjectAdaptationTests, "Check")
+    bin_suite = unittest.makeSuite(BinaryConverterTests, "Check")
+    date_suite = unittest.makeSuite(DateTimeTests, "Check")
+    cte_suite = unittest.makeSuite(CommonTableExpressionTests, "Check")
+    return unittest.TestSuite((sqlite_type_suite, decltypes_type_suite, colnames_type_suite, adaptation_suite, bin_suite, date_suite, cte_suite))
 
 def test():
     runner = unittest.TextTestRunner()
diff --git a/common/py3-stdlib/sqlite3/test/userfunctions.py b/common/py3-stdlib/sqlite3/test/userfunctions.py
index 539fd4b..c11c82e 100644
--- a/common/py3-stdlib/sqlite3/test/userfunctions.py
+++ b/common/py3-stdlib/sqlite3/test/userfunctions.py
@@ -25,13 +25,8 @@
 import unittest.mock
 import sqlite3 as sqlite
 
-from test.support import gc_collect
-
-
 def func_returntext():
     return "foo"
-def func_returntextwithnull():
-    return "1\x002"
 def func_returnunicode():
     return "bar"
 def func_returnint():
@@ -47,6 +42,22 @@
 def func_raiseexception():
     5/0
 
+def func_isstring(v):
+    return type(v) is str
+def func_isint(v):
+    return type(v) is int
+def func_isfloat(v):
+    return type(v) is float
+def func_isnone(v):
+    return type(v) is type(None)
+def func_isblob(v):
+    return isinstance(v, (bytes, memoryview))
+def func_islonglong(v):
+    return isinstance(v, int) and v >= 1<<31
+
+def func(*args):
+    return len(args)
+
 class AggrNoStep:
     def __init__(self):
         pass
@@ -126,44 +137,36 @@
     def finalize(self):
         return self.val
 
-class AggrText:
-    def __init__(self):
-        self.txt = ""
-    def step(self, txt):
-        self.txt = self.txt + txt
-    def finalize(self):
-        return self.txt
-
-
 class FunctionTests(unittest.TestCase):
     def setUp(self):
         self.con = sqlite.connect(":memory:")
 
         self.con.create_function("returntext", 0, func_returntext)
-        self.con.create_function("returntextwithnull", 0, func_returntextwithnull)
         self.con.create_function("returnunicode", 0, func_returnunicode)
         self.con.create_function("returnint", 0, func_returnint)
         self.con.create_function("returnfloat", 0, func_returnfloat)
         self.con.create_function("returnnull", 0, func_returnnull)
         self.con.create_function("returnblob", 0, func_returnblob)
         self.con.create_function("returnlonglong", 0, func_returnlonglong)
-        self.con.create_function("returnnan", 0, lambda: float("nan"))
-        self.con.create_function("returntoolargeint", 0, lambda: 1 << 65)
         self.con.create_function("raiseexception", 0, func_raiseexception)
 
-        self.con.create_function("isblob", 1, lambda x: isinstance(x, bytes))
-        self.con.create_function("isnone", 1, lambda x: x is None)
-        self.con.create_function("spam", -1, lambda *x: len(x))
+        self.con.create_function("isstring", 1, func_isstring)
+        self.con.create_function("isint", 1, func_isint)
+        self.con.create_function("isfloat", 1, func_isfloat)
+        self.con.create_function("isnone", 1, func_isnone)
+        self.con.create_function("isblob", 1, func_isblob)
+        self.con.create_function("islonglong", 1, func_islonglong)
+        self.con.create_function("spam", -1, func)
         self.con.execute("create table test(t text)")
 
     def tearDown(self):
         self.con.close()
 
-    def test_func_error_on_create(self):
+    def CheckFuncErrorOnCreate(self):
         with self.assertRaises(sqlite.OperationalError):
             self.con.create_function("bla", -100, lambda x: 2*x)
 
-    def test_func_ref_count(self):
+    def CheckFuncRefCount(self):
         def getfunc():
             def f():
                 return 1
@@ -175,34 +178,28 @@
         cur = self.con.cursor()
         cur.execute("select reftest()")
 
-    def test_func_return_text(self):
+    def CheckFuncReturnText(self):
         cur = self.con.cursor()
         cur.execute("select returntext()")
         val = cur.fetchone()[0]
         self.assertEqual(type(val), str)
         self.assertEqual(val, "foo")
 
-    def test_func_return_text_with_null_char(self):
-        cur = self.con.cursor()
-        res = cur.execute("select returntextwithnull()").fetchone()[0]
-        self.assertEqual(type(res), str)
-        self.assertEqual(res, "1\x002")
-
-    def test_func_return_unicode(self):
+    def CheckFuncReturnUnicode(self):
         cur = self.con.cursor()
         cur.execute("select returnunicode()")
         val = cur.fetchone()[0]
         self.assertEqual(type(val), str)
         self.assertEqual(val, "bar")
 
-    def test_func_return_int(self):
+    def CheckFuncReturnInt(self):
         cur = self.con.cursor()
         cur.execute("select returnint()")
         val = cur.fetchone()[0]
         self.assertEqual(type(val), int)
         self.assertEqual(val, 42)
 
-    def test_func_return_float(self):
+    def CheckFuncReturnFloat(self):
         cur = self.con.cursor()
         cur.execute("select returnfloat()")
         val = cur.fetchone()[0]
@@ -210,99 +207,75 @@
         if val < 3.139 or val > 3.141:
             self.fail("wrong value")
 
-    def test_func_return_null(self):
+    def CheckFuncReturnNull(self):
         cur = self.con.cursor()
         cur.execute("select returnnull()")
         val = cur.fetchone()[0]
         self.assertEqual(type(val), type(None))
         self.assertEqual(val, None)
 
-    def test_func_return_blob(self):
+    def CheckFuncReturnBlob(self):
         cur = self.con.cursor()
         cur.execute("select returnblob()")
         val = cur.fetchone()[0]
         self.assertEqual(type(val), bytes)
         self.assertEqual(val, b"blob")
 
-    def test_func_return_long_long(self):
+    def CheckFuncReturnLongLong(self):
         cur = self.con.cursor()
         cur.execute("select returnlonglong()")
         val = cur.fetchone()[0]
         self.assertEqual(val, 1<<31)
 
-    def test_func_return_nan(self):
-        cur = self.con.cursor()
-        cur.execute("select returnnan()")
-        self.assertIsNone(cur.fetchone()[0])
-
-    def test_func_return_too_large_int(self):
-        cur = self.con.cursor()
-        with self.assertRaises(sqlite.OperationalError):
-            self.con.execute("select returntoolargeint()")
-
-    def test_func_exception(self):
+    def CheckFuncException(self):
         cur = self.con.cursor()
         with self.assertRaises(sqlite.OperationalError) as cm:
             cur.execute("select raiseexception()")
             cur.fetchone()
         self.assertEqual(str(cm.exception), 'user-defined function raised exception')
 
-    def test_any_arguments(self):
+    def CheckParamString(self):
+        cur = self.con.cursor()
+        cur.execute("select isstring(?)", ("foo",))
+        val = cur.fetchone()[0]
+        self.assertEqual(val, 1)
+
+    def CheckParamInt(self):
+        cur = self.con.cursor()
+        cur.execute("select isint(?)", (42,))
+        val = cur.fetchone()[0]
+        self.assertEqual(val, 1)
+
+    def CheckParamFloat(self):
+        cur = self.con.cursor()
+        cur.execute("select isfloat(?)", (3.14,))
+        val = cur.fetchone()[0]
+        self.assertEqual(val, 1)
+
+    def CheckParamNone(self):
+        cur = self.con.cursor()
+        cur.execute("select isnone(?)", (None,))
+        val = cur.fetchone()[0]
+        self.assertEqual(val, 1)
+
+    def CheckParamBlob(self):
+        cur = self.con.cursor()
+        cur.execute("select isblob(?)", (memoryview(b"blob"),))
+        val = cur.fetchone()[0]
+        self.assertEqual(val, 1)
+
+    def CheckParamLongLong(self):
+        cur = self.con.cursor()
+        cur.execute("select islonglong(?)", (1<<42,))
+        val = cur.fetchone()[0]
+        self.assertEqual(val, 1)
+
+    def CheckAnyArguments(self):
         cur = self.con.cursor()
         cur.execute("select spam(?, ?)", (1, 2))
         val = cur.fetchone()[0]
         self.assertEqual(val, 2)
 
-    def test_empty_blob(self):
-        cur = self.con.execute("select isblob(x'')")
-        self.assertTrue(cur.fetchone()[0])
-
-    def test_nan_float(self):
-        cur = self.con.execute("select isnone(?)", (float("nan"),))
-        # SQLite has no concept of nan; it is converted to NULL
-        self.assertTrue(cur.fetchone()[0])
-
-    def test_too_large_int(self):
-        err = "Python int too large to convert to SQLite INTEGER"
-        self.assertRaisesRegex(OverflowError, err, self.con.execute,
-                               "select spam(?)", (1 << 65,))
-
-    def test_non_contiguous_blob(self):
-        self.assertRaisesRegex(ValueError, "could not convert BLOB to buffer",
-                               self.con.execute, "select spam(?)",
-                               (memoryview(b"blob")[::2],))
-
-    def test_param_surrogates(self):
-        self.assertRaisesRegex(UnicodeEncodeError, "surrogates not allowed",
-                               self.con.execute, "select spam(?)",
-                               ("\ud803\ude6d",))
-
-    def test_func_params(self):
-        results = []
-        def append_result(arg):
-            results.append((arg, type(arg)))
-        self.con.create_function("test_params", 1, append_result)
-
-        dataset = [
-            (42, int),
-            (-1, int),
-            (1234567890123456789, int),
-            (4611686018427387905, int),  # 63-bit int with non-zero low bits
-            (3.14, float),
-            (float('inf'), float),
-            ("text", str),
-            ("1\x002", str),
-            ("\u02e2q\u02e1\u2071\u1d57\u1d49", str),
-            (b"blob", bytes),
-            (bytearray(range(2)), bytes),
-            (memoryview(b"blob"), bytes),
-            (None, type(None)),
-        ]
-        for val, _ in dataset:
-            cur = self.con.execute("select test_params(?)", (val,))
-            cur.fetchone()
-        self.assertEqual(dataset, results)
-
     # Regarding deterministic functions:
     #
     # Between 3.8.3 and 3.15.0, deterministic functions were only used to
@@ -311,7 +284,7 @@
     # deterministic functions were permitted in WHERE clauses of partial
     # indices, which allows testing based on syntax, iso. the query optimizer.
     @unittest.skipIf(sqlite.sqlite_version_info < (3, 8, 3), "Requires SQLite 3.8.3 or higher")
-    def test_func_non_deterministic(self):
+    def CheckFuncNonDeterministic(self):
         mock = unittest.mock.Mock(return_value=None)
         self.con.create_function("nondeterministic", 0, mock, deterministic=False)
         if sqlite.sqlite_version_info < (3, 15, 0):
@@ -322,7 +295,7 @@
                 self.con.execute("create index t on test(t) where nondeterministic() is not null")
 
     @unittest.skipIf(sqlite.sqlite_version_info < (3, 8, 3), "Requires SQLite 3.8.3 or higher")
-    def test_func_deterministic(self):
+    def CheckFuncDeterministic(self):
         mock = unittest.mock.Mock(return_value=None)
         self.con.create_function("deterministic", 0, mock, deterministic=True)
         if sqlite.sqlite_version_info < (3, 15, 0):
@@ -335,30 +308,14 @@
                 self.fail("Unexpected failure while creating partial index")
 
     @unittest.skipIf(sqlite.sqlite_version_info >= (3, 8, 3), "SQLite < 3.8.3 needed")
-    def test_func_deterministic_not_supported(self):
+    def CheckFuncDeterministicNotSupported(self):
         with self.assertRaises(sqlite.NotSupportedError):
             self.con.create_function("deterministic", 0, int, deterministic=True)
 
-    def test_func_deterministic_keyword_only(self):
+    def CheckFuncDeterministicKeywordOnly(self):
         with self.assertRaises(TypeError):
             self.con.create_function("deterministic", 0, int, True)
 
-    def test_function_destructor_via_gc(self):
-        # See bpo-44304: The destructor of the user function can
-        # crash if is called without the GIL from the gc functions
-        dest = sqlite.connect(':memory:')
-        def md5sum(t):
-            return
-
-        dest.create_function("md5", 1, md5sum)
-        x = dest("create table lang (name, first_appeared)")
-        del md5sum, dest
-
-        y = [x]
-        y.append(y)
-
-        del x,y
-        gc_collect()
 
 class AggregateTests(unittest.TestCase):
     def setUp(self):
@@ -384,88 +341,87 @@
         self.con.create_aggregate("checkType", 2, AggrCheckType)
         self.con.create_aggregate("checkTypes", -1, AggrCheckTypes)
         self.con.create_aggregate("mysum", 1, AggrSum)
-        self.con.create_aggregate("aggtxt", 1, AggrText)
 
     def tearDown(self):
         #self.cur.close()
         #self.con.close()
         pass
 
-    def test_aggr_error_on_create(self):
+    def CheckAggrErrorOnCreate(self):
         with self.assertRaises(sqlite.OperationalError):
             self.con.create_function("bla", -100, AggrSum)
 
-    def test_aggr_no_step(self):
+    def CheckAggrNoStep(self):
         cur = self.con.cursor()
         with self.assertRaises(AttributeError) as cm:
             cur.execute("select nostep(t) from test")
         self.assertEqual(str(cm.exception), "'AggrNoStep' object has no attribute 'step'")
 
-    def test_aggr_no_finalize(self):
+    def CheckAggrNoFinalize(self):
         cur = self.con.cursor()
         with self.assertRaises(sqlite.OperationalError) as cm:
             cur.execute("select nofinalize(t) from test")
             val = cur.fetchone()[0]
         self.assertEqual(str(cm.exception), "user-defined aggregate's 'finalize' method raised error")
 
-    def test_aggr_exception_in_init(self):
+    def CheckAggrExceptionInInit(self):
         cur = self.con.cursor()
         with self.assertRaises(sqlite.OperationalError) as cm:
             cur.execute("select excInit(t) from test")
             val = cur.fetchone()[0]
         self.assertEqual(str(cm.exception), "user-defined aggregate's '__init__' method raised error")
 
-    def test_aggr_exception_in_step(self):
+    def CheckAggrExceptionInStep(self):
         cur = self.con.cursor()
         with self.assertRaises(sqlite.OperationalError) as cm:
             cur.execute("select excStep(t) from test")
             val = cur.fetchone()[0]
         self.assertEqual(str(cm.exception), "user-defined aggregate's 'step' method raised error")
 
-    def test_aggr_exception_in_finalize(self):
+    def CheckAggrExceptionInFinalize(self):
         cur = self.con.cursor()
         with self.assertRaises(sqlite.OperationalError) as cm:
             cur.execute("select excFinalize(t) from test")
             val = cur.fetchone()[0]
         self.assertEqual(str(cm.exception), "user-defined aggregate's 'finalize' method raised error")
 
-    def test_aggr_check_param_str(self):
+    def CheckAggrCheckParamStr(self):
         cur = self.con.cursor()
-        cur.execute("select checkTypes('str', ?, ?)", ("foo", str()))
+        cur.execute("select checkType('str', ?)", ("foo",))
         val = cur.fetchone()[0]
-        self.assertEqual(val, 2)
+        self.assertEqual(val, 1)
 
-    def test_aggr_check_param_int(self):
+    def CheckAggrCheckParamInt(self):
         cur = self.con.cursor()
         cur.execute("select checkType('int', ?)", (42,))
         val = cur.fetchone()[0]
         self.assertEqual(val, 1)
 
-    def test_aggr_check_params_int(self):
+    def CheckAggrCheckParamsInt(self):
         cur = self.con.cursor()
         cur.execute("select checkTypes('int', ?, ?)", (42, 24))
         val = cur.fetchone()[0]
         self.assertEqual(val, 2)
 
-    def test_aggr_check_param_float(self):
+    def CheckAggrCheckParamFloat(self):
         cur = self.con.cursor()
         cur.execute("select checkType('float', ?)", (3.14,))
         val = cur.fetchone()[0]
         self.assertEqual(val, 1)
 
-    def test_aggr_check_param_none(self):
+    def CheckAggrCheckParamNone(self):
         cur = self.con.cursor()
         cur.execute("select checkType('None', ?)", (None,))
         val = cur.fetchone()[0]
         self.assertEqual(val, 1)
 
-    def test_aggr_check_param_blob(self):
+    def CheckAggrCheckParamBlob(self):
         cur = self.con.cursor()
         cur.execute("select checkType('blob', ?)", (memoryview(b"blob"),))
         val = cur.fetchone()[0]
         self.assertEqual(val, 1)
 
-    def test_aggr_check_aggr_sum(self):
+    def CheckAggrCheckAggrSum(self):
         cur = self.con.cursor()
         cur.execute("delete from test")
         cur.executemany("insert into test(i) values (?)", [(10,), (20,), (30,)])
@@ -473,20 +429,6 @@
         val = cur.fetchone()[0]
         self.assertEqual(val, 60)
 
-    def test_aggr_no_match(self):
-        cur = self.con.execute("select mysum(i) from (select 1 as i) where i == 0")
-        val = cur.fetchone()[0]
-        self.assertIsNone(val)
-
-    def test_aggr_text(self):
-        cur = self.con.cursor()
-        for txt in ["foo", "1\x002"]:
-            with self.subTest(txt=txt):
-                cur.execute("select aggtxt(?) from test", (txt,))
-                val = cur.fetchone()[0]
-                self.assertEqual(val, txt)
-
-
 class AuthorizerTests(unittest.TestCase):
     @staticmethod
     def authorizer_cb(action, arg1, arg2, dbname, source):
@@ -552,17 +494,17 @@
 
 
 def suite():
-    tests = [
-        AggregateTests,
-        AuthorizerIllegalTypeTests,
-        AuthorizerLargeIntegerTests,
-        AuthorizerRaiseExceptionTests,
-        AuthorizerTests,
-        FunctionTests,
-    ]
-    return unittest.TestSuite(
-        [unittest.TestLoader().loadTestsFromTestCase(t) for t in tests]
-    )
+    function_suite = unittest.makeSuite(FunctionTests, "Check")
+    aggregate_suite = unittest.makeSuite(AggregateTests, "Check")
+    authorizer_suite = unittest.makeSuite(AuthorizerTests)
+    return unittest.TestSuite((
+            function_suite,
+            aggregate_suite,
+            authorizer_suite,
+            unittest.makeSuite(AuthorizerRaiseExceptionTests),
+            unittest.makeSuite(AuthorizerIllegalTypeTests),
+            unittest.makeSuite(AuthorizerLargeIntegerTests),
+        ))
 
 def test():
     runner = unittest.TextTestRunner()
diff --git a/common/py3-stdlib/ssl.py b/common/py3-stdlib/ssl.py
index 181065d..30f4e59 100644
--- a/common/py3-stdlib/ssl.py
+++ b/common/py3-stdlib/ssl.py
@@ -253,7 +253,7 @@
     from _ssl import enum_certificates, enum_crls
 
 from socket import socket, SOCK_STREAM, create_connection
-from socket import SOL_SOCKET, SO_TYPE, _GLOBAL_DEFAULT_TIMEOUT
+from socket import SOL_SOCKET, SO_TYPE
 import socket as _socket
 import base64        # for DER-to-PEM translation
 import errno
@@ -381,11 +381,6 @@
     CertificateError is raised on failure. On success, the function
     returns nothing.
     """
-    warnings.warn(
-        "ssl.match_hostname() is deprecated",
-        category=DeprecationWarning,
-        stacklevel=2
-    )
     if not cert:
         raise ValueError("empty or no certificate, match_hostname needs a "
                          "SSL socket or SSL context with either "
@@ -484,14 +479,7 @@
     sslsocket_class = None  # SSLSocket is assigned later.
     sslobject_class = None  # SSLObject is assigned later.
 
-    def __new__(cls, protocol=None, *args, **kwargs):
-        if protocol is None:
-            warnings.warn(
-                "ssl.SSLContext() without protocol argument is deprecated.",
-                category=DeprecationWarning,
-                stacklevel=2
-            )
-            protocol = PROTOCOL_TLS
+    def __new__(cls, protocol=PROTOCOL_TLS, *args, **kwargs):
         self = _SSLContext.__new__(cls, protocol)
         return self
 
@@ -530,11 +518,6 @@
         )
 
     def set_npn_protocols(self, npn_protocols):
-        warnings.warn(
-            "ssl NPN is deprecated, use ALPN instead",
-            DeprecationWarning,
-            stacklevel=2
-        )
         protos = bytearray()
         for protocol in npn_protocols:
             b = bytes(protocol, 'ascii')
@@ -751,15 +734,12 @@
     # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION,
     # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE
     # by default.
+    context = SSLContext(PROTOCOL_TLS)
+
     if purpose == Purpose.SERVER_AUTH:
         # verify certs and host name in client mode
-        context = SSLContext(PROTOCOL_TLS_CLIENT)
         context.verify_mode = CERT_REQUIRED
         context.check_hostname = True
-    elif purpose == Purpose.CLIENT_AUTH:
-        context = SSLContext(PROTOCOL_TLS_SERVER)
-    else:
-        raise ValueError(purpose)
 
     if cafile or capath or cadata:
         context.load_verify_locations(cafile, capath, cadata)
@@ -775,7 +755,7 @@
             context.keylog_filename = keylogfile
     return context
 
-def _create_unverified_context(protocol=None, *, cert_reqs=CERT_NONE,
+def _create_unverified_context(protocol=PROTOCOL_TLS, *, cert_reqs=CERT_NONE,
                            check_hostname=False, purpose=Purpose.SERVER_AUTH,
                            certfile=None, keyfile=None,
                            cafile=None, capath=None, cadata=None):
@@ -792,18 +772,10 @@
     # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION,
     # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE
     # by default.
-    if purpose == Purpose.SERVER_AUTH:
-        # verify certs and host name in client mode
-        if protocol is None:
-            protocol = PROTOCOL_TLS_CLIENT
-    elif purpose == Purpose.CLIENT_AUTH:
-        if protocol is None:
-            protocol = PROTOCOL_TLS_SERVER
-    else:
-        raise ValueError(purpose)
-
     context = SSLContext(protocol)
-    context.check_hostname = check_hostname
+
+    if not check_hostname:
+        context.check_hostname = False
     if cert_reqs is not None:
         context.verify_mode = cert_reqs
     if check_hostname:
@@ -937,17 +909,15 @@
         """Return the currently selected NPN protocol as a string, or ``None``
         if a next protocol was not negotiated or if NPN is not supported by one
         of the peers."""
-        warnings.warn(
-            "ssl NPN is deprecated, use ALPN instead",
-            DeprecationWarning,
-            stacklevel=2
-        )
+        if _ssl.HAS_NPN:
+            return self._sslobj.selected_npn_protocol()
 
     def selected_alpn_protocol(self):
         """Return the currently selected ALPN protocol as a string, or ``None``
         if a next protocol was not negotiated or if ALPN is not supported by one
         of the peers."""
-        return self._sslobj.selected_alpn_protocol()
+        if _ssl.HAS_ALPN:
+            return self._sslobj.selected_alpn_protocol()
 
     def cipher(self):
         """Return the currently selected cipher as a 3-tuple ``(name,
@@ -1156,12 +1126,10 @@
     @_sslcopydoc
     def selected_npn_protocol(self):
         self._checkClosed()
-        warnings.warn(
-            "ssl NPN is deprecated, use ALPN instead",
-            DeprecationWarning,
-            stacklevel=2
-        )
-        return None
+        if self._sslobj is None or not _ssl.HAS_NPN:
+            return None
+        else:
+            return self._sslobj.selected_npn_protocol()
 
     @_sslcopydoc
     def selected_alpn_protocol(self):
@@ -1420,11 +1388,7 @@
                 do_handshake_on_connect=True,
                 suppress_ragged_eofs=True,
                 ciphers=None):
-    warnings.warn(
-        "ssl.wrap_socket() is deprecated, use SSLContext.wrap_socket()",
-        category=DeprecationWarning,
-        stacklevel=2
-    )
+
     if server_side and not certfile:
         raise ValueError("certfile must be specified for server-side "
                          "operations")
@@ -1502,14 +1466,11 @@
     d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
     return base64.decodebytes(d.encode('ASCII', 'strict'))
 
-def get_server_certificate(addr, ssl_version=PROTOCOL_TLS_CLIENT,
-                           ca_certs=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
+def get_server_certificate(addr, ssl_version=PROTOCOL_TLS, ca_certs=None):
     """Retrieve the certificate from the server at the specified address,
     and return it as a PEM-encoded string.
     If 'ca_certs' is specified, validate the server cert against it.
-    If 'ssl_version' is specified, use it in the connection attempt.
-    If 'timeout' is specified, use it in the connection attempt.
-    """
+    If 'ssl_version' is specified, use it in the connection attempt."""
 
     host, port = addr
     if ca_certs is not None:
@@ -1519,8 +1480,8 @@
     context = _create_stdlib_context(ssl_version,
                                      cert_reqs=cert_reqs,
                                      cafile=ca_certs)
-    with create_connection(addr, timeout=timeout) as sock:
-        with context.wrap_socket(sock, server_hostname=host) as sslsock:
+    with  create_connection(addr) as sock:
+        with context.wrap_socket(sock) as sslsock:
             dercert = sslsock.getpeercert(True)
     return DER_cert_to_PEM_cert(dercert)
 
diff --git a/common/py3-stdlib/statistics.py b/common/py3-stdlib/statistics.py
index f662453..f9d3802 100644
--- a/common/py3-stdlib/statistics.py
+++ b/common/py3-stdlib/statistics.py
@@ -73,30 +73,6 @@
 2.5
 
 
-Statistics for relations between two inputs
--------------------------------------------
-
-==================  ====================================================
-Function            Description
-==================  ====================================================
-covariance          Sample covariance for two variables.
-correlation         Pearson's correlation coefficient for two variables.
-linear_regression   Intercept and slope for simple linear regression.
-==================  ====================================================
-
-Calculate covariance, Pearson's correlation, and simple linear regression
-for two inputs:
-
->>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
->>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3]
->>> covariance(x, y)
-0.75
->>> correlation(x, y)  #doctest: +ELLIPSIS
-0.31622776601...
->>> linear_regression(x, y)  #doctest:
-LinearRegression(slope=0.1, intercept=1.5)
-
-
 Exceptions
 ----------
 
@@ -107,12 +83,9 @@
 __all__ = [
     'NormalDist',
     'StatisticsError',
-    'correlation',
-    'covariance',
     'fmean',
     'geometric_mean',
     'harmonic_mean',
-    'linear_regression',
     'mean',
     'median',
     'median_grouped',
@@ -133,11 +106,11 @@
 
 from fractions import Fraction
 from decimal import Decimal
-from itertools import groupby, repeat
+from itertools import groupby
 from bisect import bisect_left, bisect_right
 from math import hypot, sqrt, fabs, exp, erf, tau, log, fsum
 from operator import itemgetter
-from collections import Counter, namedtuple
+from collections import Counter
 
 # === Exceptions ===
 
@@ -147,17 +120,21 @@
 
 # === Private utilities ===
 
-def _sum(data):
-    """_sum(data) -> (type, sum, count)
+def _sum(data, start=0):
+    """_sum(data [, start]) -> (type, sum, count)
 
     Return a high-precision sum of the given numeric data as a fraction,
     together with the type to be converted to and the count of items.
 
+    If optional argument ``start`` is given, it is added to the total.
+    If ``data`` is empty, ``start`` (defaulting to 0) is returned.
+
+
     Examples
     --------
 
-    >>> _sum([3, 2.25, 4.5, -0.5, 0.25])
-    (<class 'float'>, Fraction(19, 2), 5)
+    >>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75)
+    (<class 'float'>, Fraction(11, 1), 5)
 
     Some sources of round-off error will be avoided:
 
@@ -180,9 +157,10 @@
     allowed.
     """
     count = 0
-    partials = {}
+    n, d = _exact_ratio(start)
+    partials = {d: n}
     partials_get = partials.get
-    T = int
+    T = _coerce(int, type(start))
     for typ, values in groupby(data, type):
         T = _coerce(T, typ)  # or raise TypeError
         for n, d in map(_exact_ratio, values):
@@ -195,7 +173,8 @@
         assert not _isfinite(total)
     else:
         # Sum all the partial sums using builtin sum.
-        total = sum(Fraction(n, d) for d, n in partials.items())
+        # FIXME is this faster if we sum them in order of the denominator?
+        total = sum(Fraction(n, d) for d, n in sorted(partials.items()))
     return (T, total, count)
 
 
@@ -246,19 +225,27 @@
     x is expected to be an int, Fraction, Decimal or float.
     """
     try:
-        return x.as_integer_ratio()
-    except AttributeError:
-        pass
+        # Optimise the common case of floats. We expect that the most often
+        # used numeric type will be builtin floats, so try to make this as
+        # fast as possible.
+        if type(x) is float or type(x) is Decimal:
+            return x.as_integer_ratio()
+        try:
+            # x may be an int, Fraction, or Integral ABC.
+            return (x.numerator, x.denominator)
+        except AttributeError:
+            try:
+                # x may be a float or Decimal subclass.
+                return x.as_integer_ratio()
+            except AttributeError:
+                # Just give up?
+                pass
     except (OverflowError, ValueError):
         # float NAN or INF.
         assert not _isfinite(x)
         return (x, None)
-    try:
-        # x may be an Integral ABC.
-        return (x.numerator, x.denominator)
-    except AttributeError:
-        msg = f"can't convert type '{type(x).__name__}' to numerator/denominator"
-        raise TypeError(msg)
+    msg = "can't convert type '{}' to numerator/denominator"
+    raise TypeError(msg.format(type(x).__name__))
 
 
 def _convert(value, T):
@@ -374,39 +361,40 @@
         return exp(fmean(map(log, data)))
     except ValueError:
         raise StatisticsError('geometric mean requires a non-empty dataset '
-                              'containing positive numbers') from None
+                              ' containing positive numbers') from None
 
 
-def harmonic_mean(data, weights=None):
+def harmonic_mean(data):
     """Return the harmonic mean of data.
 
-    The harmonic mean is the reciprocal of the arithmetic mean of the
-    reciprocals of the data.  It can be used for averaging ratios or
-    rates, for example speeds.
+    The harmonic mean, sometimes called the subcontrary mean, is the
+    reciprocal of the arithmetic mean of the reciprocals of the data,
+    and is often appropriate when averaging quantities which are rates
+    or ratios, for example speeds. Example:
 
-    Suppose a car travels 40 km/hr for 5 km and then speeds-up to
-    60 km/hr for another 5 km. What is the average speed?
+    Suppose an investor purchases an equal value of shares in each of
+    three companies, with P/E (price/earning) ratios of 2.5, 3 and 10.
+    What is the average P/E ratio for the investor's portfolio?
 
-        >>> harmonic_mean([40, 60])
-        48.0
+    >>> harmonic_mean([2.5, 3, 10])  # For an equal investment portfolio.
+    3.6
 
-    Suppose a car travels 40 km/hr for 5 km, and when traffic clears,
-    speeds-up to 60 km/hr for the remaining 30 km of the journey. What
-    is the average speed?
-
-        >>> harmonic_mean([40, 60], weights=[5, 30])
-        56.0
+    Using the arithmetic mean would give an average of about 5.167, which
+    is too high.
 
     If ``data`` is empty, or any element is less than zero,
     ``harmonic_mean`` will raise ``StatisticsError``.
     """
+    # For a justification for using harmonic mean for P/E ratios, see
+    # http://fixthepitch.pellucid.com/comps-analysis-the-missing-harmony-of-summary-statistics/
+    # http://papers.ssrn.com/sol3/papers.cfm?abstract_id=2621087
     if iter(data) is data:
         data = list(data)
     errmsg = 'harmonic mean does not support negative values'
     n = len(data)
     if n < 1:
         raise StatisticsError('harmonic_mean requires at least one data point')
-    elif n == 1 and weights is None:
+    elif n == 1:
         x = data[0]
         if isinstance(x, (numbers.Real, Decimal)):
             if x < 0:
@@ -414,23 +402,13 @@
             return x
         else:
             raise TypeError('unsupported type')
-    if weights is None:
-        weights = repeat(1, n)
-        sum_weights = n
-    else:
-        if iter(weights) is weights:
-            weights = list(weights)
-        if len(weights) != n:
-            raise StatisticsError('Number of weights does not match data size')
-        _, sum_weights, _ = _sum(w for w in _fail_neg(weights, errmsg))
     try:
-        data = _fail_neg(data, errmsg)
-        T, total, count = _sum(w / x if w else 0 for w, x in zip(weights, data))
+        T, total, count = _sum(1 / x for x in _fail_neg(data, errmsg))
     except ZeroDivisionError:
         return 0
-    if total <= 0:
-        raise StatisticsError('Weighted sum must be positive')
-    return _convert(sum_weights / total, T)
+    assert count == n
+    return _convert(n / total, T)
+
 
 # FIXME: investigate ways to calculate medians without sorting? Quickselect?
 def median(data):
@@ -705,20 +683,14 @@
     if c is not None:
         T, total, count = _sum((x-c)**2 for x in data)
         return (T, total)
-    T, total, count = _sum(data)
-    mean_n, mean_d = (total / count).as_integer_ratio()
-    partials = Counter()
-    for n, d in map(_exact_ratio, data):
-        diff_n = n * mean_d - d * mean_n
-        diff_d = d * mean_d
-        partials[diff_d * diff_d] += diff_n * diff_n
-    if None in partials:
-        # The sum will be a NAN or INF. We can ignore all the finite
-        # partials, and just look at this special one.
-        total = partials[None]
-        assert not _isfinite(total)
-    else:
-        total = sum(Fraction(n, d) for d, n in partials.items())
+    c = mean(data)
+    T, total, count = _sum((x-c)**2 for x in data)
+    # The following sum should mathematically equal zero, but due to rounding
+    # error may not.
+    U, total2, count2 = _sum((x - c) for x in data)
+    assert T == U and count == count2
+    total -= total2 ** 2 / len(data)
+    assert not total < 0, 'negative sum of square deviations: %f' % total
     return (T, total)
 
 
@@ -822,9 +794,6 @@
     1.0810874155219827
 
     """
-    # Fixme: Despite the exact sum of squared deviations, some inaccuracy
-    # remain because there are two rounding steps.  The first occurs in
-    # the _convert() step for variance(), the second occurs in math.sqrt().
     var = variance(data, xbar)
     try:
         return var.sqrt()
@@ -841,9 +810,6 @@
     0.986893273527251
 
     """
-    # Fixme: Despite the exact sum of squared deviations, some inaccuracy
-    # remain because there are two rounding steps.  The first occurs in
-    # the _convert() step for pvariance(), the second occurs in math.sqrt().
     var = pvariance(data, mu)
     try:
         return var.sqrt()
@@ -851,119 +817,6 @@
         return math.sqrt(var)
 
 
-# === Statistics for relations between two inputs ===
-
-# See https://en.wikipedia.org/wiki/Covariance
-#     https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
-#     https://en.wikipedia.org/wiki/Simple_linear_regression
-
-
-def covariance(x, y, /):
-    """Covariance
-
-    Return the sample covariance of two inputs *x* and *y*. Covariance
-    is a measure of the joint variability of two inputs.
-
-    >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
-    >>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3]
-    >>> covariance(x, y)
-    0.75
-    >>> z = [9, 8, 7, 6, 5, 4, 3, 2, 1]
-    >>> covariance(x, z)
-    -7.5
-    >>> covariance(z, x)
-    -7.5
-
-    """
-    n = len(x)
-    if len(y) != n:
-        raise StatisticsError('covariance requires that both inputs have same number of data points')
-    if n < 2:
-        raise StatisticsError('covariance requires at least two data points')
-    xbar = fsum(x) / n
-    ybar = fsum(y) / n
-    sxy = fsum((xi - xbar) * (yi - ybar) for xi, yi in zip(x, y))
-    return sxy / (n - 1)
-
-
-def correlation(x, y, /):
-    """Pearson's correlation coefficient
-
-    Return the Pearson's correlation coefficient for two inputs. Pearson's
-    correlation coefficient *r* takes values between -1 and +1. It measures the
-    strength and direction of the linear relationship, where +1 means very
-    strong, positive linear relationship, -1 very strong, negative linear
-    relationship, and 0 no linear relationship.
-
-    >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
-    >>> y = [9, 8, 7, 6, 5, 4, 3, 2, 1]
-    >>> correlation(x, x)
-    1.0
-    >>> correlation(x, y)
-    -1.0
-
-    """
-    n = len(x)
-    if len(y) != n:
-        raise StatisticsError('correlation requires that both inputs have same number of data points')
-    if n < 2:
-        raise StatisticsError('correlation requires at least two data points')
-    xbar = fsum(x) / n
-    ybar = fsum(y) / n
-    sxy = fsum((xi - xbar) * (yi - ybar) for xi, yi in zip(x, y))
-    sxx = fsum((xi - xbar) ** 2.0 for xi in x)
-    syy = fsum((yi - ybar) ** 2.0 for yi in y)
-    try:
-        return sxy / sqrt(sxx * syy)
-    except ZeroDivisionError:
-        raise StatisticsError('at least one of the inputs is constant')
-
-
-LinearRegression = namedtuple('LinearRegression', ('slope', 'intercept'))
-
-
-def linear_regression(x, y, /):
-    """Slope and intercept for simple linear regression.
-
-    Return the slope and intercept of simple linear regression
-    parameters estimated using ordinary least squares. Simple linear
-    regression describes relationship between an independent variable
-    *x* and a dependent variable *y* in terms of linear function:
-
-        y = slope * x + intercept + noise
-
-    where *slope* and *intercept* are the regression parameters that are
-    estimated, and noise represents the variability of the data that was
-    not explained by the linear regression (it is equal to the
-    difference between predicted and actual values of the dependent
-    variable).
-
-    The parameters are returned as a named tuple.
-
-    >>> x = [1, 2, 3, 4, 5]
-    >>> noise = NormalDist().samples(5, seed=42)
-    >>> y = [3 * x[i] + 2 + noise[i] for i in range(5)]
-    >>> linear_regression(x, y)  #doctest: +ELLIPSIS
-    LinearRegression(slope=3.09078914170..., intercept=1.75684970486...)
-
-    """
-    n = len(x)
-    if len(y) != n:
-        raise StatisticsError('linear regression requires that both inputs have same number of data points')
-    if n < 2:
-        raise StatisticsError('linear regression requires at least two data points')
-    xbar = fsum(x) / n
-    ybar = fsum(y) / n
-    sxy = fsum((xi - xbar) * (yi - ybar) for xi, yi in zip(x, y))
-    sxx = fsum((xi - xbar) ** 2.0 for xi in x)
-    try:
-        slope = sxy / sxx   # equivalent to:  covariance(x, y) / variance(x)
-    except ZeroDivisionError:
-        raise StatisticsError('x is constant')
-    intercept = ybar - slope * xbar
-    return LinearRegression(slope=slope, intercept=intercept)
-
-
 ## Normal Distribution #####################################################
 
 
diff --git a/common/py3-stdlib/subprocess.py b/common/py3-stdlib/subprocess.py
index ccb46a6..f1d829a 100644
--- a/common/py3-stdlib/subprocess.py
+++ b/common/py3-stdlib/subprocess.py
@@ -5,6 +5,7 @@
 # Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
 #
 # Licensed to PSF under a Contributor Agreement.
+# See http://www.python.org/2.4/license for licensing details.
 
 r"""Subprocesses with accessible I/O streams
 
@@ -54,10 +55,13 @@
 import types
 
 try:
-    import fcntl
+    import pwd
 except ImportError:
-    fcntl = None
-
+    pwd = None
+try:
+    import grp
+except ImportError:
+    grp = None
 
 __all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
            "getoutput", "check_output", "run", "CalledProcessError", "DEVNULL",
@@ -322,7 +326,7 @@
     if dev_mode:
         args.extend(('-X', 'dev'))
     for opt in ('faulthandler', 'tracemalloc', 'importtime',
-                'showrefcount', 'utf8'):
+                'showrefcount', 'utf8', 'oldparser'):
         if opt in xoptions:
             value = xoptions[opt]
             if value is True:
@@ -411,11 +415,7 @@
     if 'input' in kwargs and kwargs['input'] is None:
         # Explicitly passing input=None was previously equivalent to passing an
         # empty string. That is maintained here for backwards compatibility.
-        if kwargs.get('universal_newlines') or kwargs.get('text'):
-            empty = ''
-        else:
-            empty = b''
-        kwargs['input'] = empty
+        kwargs['input'] = '' if kwargs.get('universal_newlines', False) else b''
 
     return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
                **kwargs).stdout
@@ -660,9 +660,8 @@
         # os.posix_spawn() is not available
         return False
 
-    if sys.platform in ('darwin', 'sunos5'):
-        # posix_spawn() is a syscall on both macOS and Solaris,
-        # and properly reports errors
+    if sys.platform == 'darwin':
+        # posix_spawn() is a syscall on macOS and properly reports errors
         return True
 
     # Check libc name and runtime libc version
@@ -694,7 +693,7 @@
 _USE_POSIX_SPAWN = _use_posix_spawn()
 
 
-class Popen:
+class Popen(object):
     """ Execute a child program in a new process.
 
     For a complete description of the arguments see the Python documentation.
@@ -757,7 +756,7 @@
                  startupinfo=None, creationflags=0,
                  restore_signals=True, start_new_session=False,
                  pass_fds=(), *, user=None, group=None, extra_groups=None,
-                 encoding=None, errors=None, text=None, umask=-1, pipesize=-1):
+                 encoding=None, errors=None, text=None, umask=-1):
         """Create new Popen instance."""
         _cleanup()
         # Held while anything is calling waitpid before returncode has been
@@ -774,11 +773,6 @@
         if not isinstance(bufsize, int):
             raise TypeError("bufsize must be an integer")
 
-        if pipesize is None:
-            pipesize = -1  # Restore default
-        if not isinstance(pipesize, int):
-            raise TypeError("pipesize must be an integer")
-
         if _mswindows:
             if preexec_fn is not None:
                 raise ValueError("preexec_fn is not supported on Windows "
@@ -803,7 +797,6 @@
         self.returncode = None
         self.encoding = encoding
         self.errors = errors
-        self.pipesize = pipesize
 
         # Validate the combinations of text and universal_newlines
         if (text is not None and universal_newlines is not None
@@ -845,13 +838,6 @@
 
         self.text_mode = encoding or errors or text or universal_newlines
 
-        # PEP 597: We suppress the EncodingWarning in subprocess module
-        # for now (at Python 3.10), because we focus on files for now.
-        # This will be changed to encoding = io.text_encoding(encoding)
-        # in the future.
-        if self.text_mode and encoding is None:
-            self.encoding = encoding = "locale"
-
         # How long to resume waiting on a child after the first ^C.
         # There is no right value for this.  The purpose is to be polite
         # yet remain good for interactive users trying to exit a tool.
@@ -875,9 +861,7 @@
                                  "current platform")
 
             elif isinstance(group, str):
-                try:
-                    import grp
-                except ImportError:
+                if grp is None:
                     raise ValueError("The group parameter cannot be a string "
                                      "on systems without the grp module")
 
@@ -903,9 +887,7 @@
             gids = []
             for extra_group in extra_groups:
                 if isinstance(extra_group, str):
-                    try:
-                        import grp
-                    except ImportError:
+                    if grp is None:
                         raise ValueError("Items in extra_groups cannot be "
                                          "strings on systems without the "
                                          "grp module")
@@ -931,11 +913,10 @@
                                  "the current platform")
 
             elif isinstance(user, str):
-                try:
-                    import pwd
-                except ImportError:
+                if pwd is None:
                     raise ValueError("The user parameter cannot be a string "
                                      "on systems without the pwd module")
+
                 uid = pwd.getpwnam(user).pw_uid
             elif isinstance(user, int):
                 uid = user
@@ -1004,7 +985,7 @@
     def __repr__(self):
         obj_repr = (
             f"<{self.__class__.__name__}: "
-            f"returncode: {self.returncode} args: {self.args!r}>"
+            f"returncode: {self.returncode} args: {list(self.args)!r}>"
         )
         if len(obj_repr) > 80:
             obj_repr = obj_repr[:76] + "...>"
@@ -1540,8 +1521,10 @@
                 self.stderr.close()
 
             # All data exchanged.  Translate lists into strings.
-            stdout = stdout[0] if stdout else None
-            stderr = stderr[0] if stderr else None
+            if stdout is not None:
+                stdout = stdout[0]
+            if stderr is not None:
+                stderr = stderr[0]
 
             return (stdout, stderr)
 
@@ -1592,8 +1575,6 @@
                 pass
             elif stdin == PIPE:
                 p2cread, p2cwrite = os.pipe()
-                if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"):
-                    fcntl.fcntl(p2cwrite, fcntl.F_SETPIPE_SZ, self.pipesize)
             elif stdin == DEVNULL:
                 p2cread = self._get_devnull()
             elif isinstance(stdin, int):
@@ -1606,8 +1587,6 @@
                 pass
             elif stdout == PIPE:
                 c2pread, c2pwrite = os.pipe()
-                if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"):
-                    fcntl.fcntl(c2pwrite, fcntl.F_SETPIPE_SZ, self.pipesize)
             elif stdout == DEVNULL:
                 c2pwrite = self._get_devnull()
             elif isinstance(stdout, int):
@@ -1620,8 +1599,6 @@
                 pass
             elif stderr == PIPE:
                 errread, errwrite = os.pipe()
-                if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"):
-                    fcntl.fcntl(errwrite, fcntl.F_SETPIPE_SZ, self.pipesize)
             elif stderr == STDOUT:
                 if c2pwrite != -1:
                     errwrite = c2pwrite
diff --git a/common/py3-stdlib/symbol.py b/common/py3-stdlib/symbol.py
new file mode 100644
index 0000000..aaac8c9
--- /dev/null
+++ b/common/py3-stdlib/symbol.py
@@ -0,0 +1,122 @@
+"""Non-terminal symbols of Python grammar (from "graminit.h")."""
+
+#  This file is automatically generated; please don't muck it up!
+#
+#  To update the symbols in this file, 'cd' to the top directory of
+#  the python source tree after building the interpreter and run:
+#
+#    python3 Tools/scripts/generate_symbol_py.py Include/graminit.h Lib/symbol.py
+#
+# or just
+#
+#    make regen-symbol
+
+import warnings
+
+warnings.warn(
+    "The symbol module is deprecated and will be removed "
+    "in future versions of Python",
+    DeprecationWarning,
+    stacklevel=2,
+)
+
+#--start constants--
+single_input = 256
+file_input = 257
+eval_input = 258
+decorator = 259
+decorators = 260
+decorated = 261
+async_funcdef = 262
+funcdef = 263
+parameters = 264
+typedargslist = 265
+tfpdef = 266
+varargslist = 267
+vfpdef = 268
+stmt = 269
+simple_stmt = 270
+small_stmt = 271
+expr_stmt = 272
+annassign = 273
+testlist_star_expr = 274
+augassign = 275
+del_stmt = 276
+pass_stmt = 277
+flow_stmt = 278
+break_stmt = 279
+continue_stmt = 280
+return_stmt = 281
+yield_stmt = 282
+raise_stmt = 283
+import_stmt = 284
+import_name = 285
+import_from = 286
+import_as_name = 287
+dotted_as_name = 288
+import_as_names = 289
+dotted_as_names = 290
+dotted_name = 291
+global_stmt = 292
+nonlocal_stmt = 293
+assert_stmt = 294
+compound_stmt = 295
+async_stmt = 296
+if_stmt = 297
+while_stmt = 298
+for_stmt = 299
+try_stmt = 300
+with_stmt = 301
+with_item = 302
+except_clause = 303
+suite = 304
+namedexpr_test = 305
+test = 306
+test_nocond = 307
+lambdef = 308
+lambdef_nocond = 309
+or_test = 310
+and_test = 311
+not_test = 312
+comparison = 313
+comp_op = 314
+star_expr = 315
+expr = 316
+xor_expr = 317
+and_expr = 318
+shift_expr = 319
+arith_expr = 320
+term = 321
+factor = 322
+power = 323
+atom_expr = 324
+atom = 325
+testlist_comp = 326
+trailer = 327
+subscriptlist = 328
+subscript = 329
+sliceop = 330
+exprlist = 331
+testlist = 332
+dictorsetmaker = 333
+classdef = 334
+arglist = 335
+argument = 336
+comp_iter = 337
+sync_comp_for = 338
+comp_for = 339
+comp_if = 340
+encoding_decl = 341
+yield_expr = 342
+yield_arg = 343
+func_body_suite = 344
+func_type_input = 345
+func_type = 346
+typelist = 347
+#--end constants--
+
+sym_name = {}
+for _name, _value in list(globals().items()):
+    if type(_value) is type(0):
+        sym_name[_value] = _name
+del _name, _value
diff --git a/common/py3-stdlib/symtable.py b/common/py3-stdlib/symtable.py
index 98db1e2..521540f 100644
--- a/common/py3-stdlib/symtable.py
+++ b/common/py3-stdlib/symtable.py
@@ -10,11 +10,6 @@
 __all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"]
 
 def symtable(code, filename, compile_type):
-    """ Return the toplevel *SymbolTable* for the source code.
-
-    *filename* is the name of the file with the code
-    and *compile_type* is the *compile()* mode argument.
-    """
     top = _symtable.symtable(code, filename, compile_type)
     return _newSymbolTable(top, filename)
 
@@ -60,11 +55,6 @@
                                                             self._filename)
 
     def get_type(self):
-        """Return the type of the symbol table.
-
-        The values retuned are 'class', 'module' and
-        'function'.
-        """
         if self._table.type == _symtable.TYPE_MODULE:
             return "module"
         if self._table.type == _symtable.TYPE_FUNCTION:
@@ -75,51 +65,27 @@
                "unexpected type: {0}".format(self._table.type)
 
     def get_id(self):
-        """Return an identifier for the table.
-        """
         return self._table.id
 
     def get_name(self):
-        """Return the table's name.
-
-        This corresponds to the name of the class, function
-        or 'top' if the table is for a class, function or
-        global respectively.
-        """
         return self._table.name
 
     def get_lineno(self):
-        """Return the number of the first line in the
-        block for the table.
-        """
         return self._table.lineno
 
     def is_optimized(self):
-        """Return *True* if the locals in the table
-        are optimizable.
-        """
         return bool(self._table.type == _symtable.TYPE_FUNCTION)
 
     def is_nested(self):
-        """Return *True* if the block is a nested class
-        or function."""
         return bool(self._table.nested)
 
     def has_children(self):
-        """Return *True* if the block has nested namespaces.
-        """
         return bool(self._table.children)
 
     def get_identifiers(self):
-        """Return a list of names of symbols in the table.
-        """
         return self._table.symbols.keys()
 
     def lookup(self, name):
-        """Lookup a *name* in the table.
-
-        Returns a *Symbol* instance.
-        """
         sym = self._symbols.get(name)
         if sym is None:
             flags = self._table.symbols[name]
@@ -130,9 +96,6 @@
         return sym
 
     def get_symbols(self):
-        """Return a list of *Symbol* instances for
-        names in the table.
-        """
         return [self.lookup(ident) for ident in self.get_identifiers()]
 
     def __check_children(self, name):
@@ -141,8 +104,6 @@
                 if st.name == name]
 
     def get_children(self):
-        """Return a list of the nested symbol tables.
-        """
         return [_newSymbolTable(st, self._filename)
                 for st in self._table.children]
 
@@ -161,15 +122,11 @@
                      if test_func(self._table.symbols[ident]))
 
     def get_parameters(self):
-        """Return a tuple of parameters to the function.
-        """
         if self.__params is None:
             self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
         return self.__params
 
     def get_locals(self):
-        """Return a tuple of locals in the function.
-        """
         if self.__locals is None:
             locs = (LOCAL, CELL)
             test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs
@@ -177,8 +134,6 @@
         return self.__locals
 
     def get_globals(self):
-        """Return a tuple of globals in the function.
-        """
         if self.__globals is None:
             glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
             test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob
@@ -186,15 +141,11 @@
         return self.__globals
 
     def get_nonlocals(self):
-        """Return a tuple of nonlocals in the function.
-        """
         if self.__nonlocals is None:
             self.__nonlocals = self.__idents_matching(lambda x:x & DEF_NONLOCAL)
         return self.__nonlocals
 
     def get_frees(self):
-        """Return a tuple of free variables in the function.
-        """
         if self.__frees is None:
             is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE
             self.__frees = self.__idents_matching(is_free)
@@ -206,8 +157,6 @@
     __methods = None
 
     def get_methods(self):
-        """Return a tuple of methods declared in the class.
-        """
         if self.__methods is None:
             d = {}
             for st in self._table.children:
@@ -229,19 +178,12 @@
         return "<symbol {0!r}>".format(self.__name)
 
     def get_name(self):
-        """Return a name of a symbol.
-        """
         return self.__name
 
     def is_referenced(self):
-        """Return *True* if the symbol is used in
-        its block.
-        """
         return bool(self.__flags & _symtable.USE)
 
     def is_parameter(self):
-        """Return *True* if the symbol is a parameter.
-        """
         return bool(self.__flags & DEF_PARAM)
 
     def is_global(self):
@@ -251,12 +193,9 @@
                     or (self.__module_scope and self.__flags & DEF_BOUND))
 
     def is_nonlocal(self):
-        """Return *True* if the symbol is nonlocal."""
         return bool(self.__flags & DEF_NONLOCAL)
 
     def is_declared_global(self):
-        """Return *True* if the symbol is declared global
-        with a global statement."""
         return bool(self.__scope == GLOBAL_EXPLICIT)
 
     def is_local(self):
@@ -266,28 +205,19 @@
                     or (self.__module_scope and self.__flags & DEF_BOUND))
 
     def is_annotated(self):
-        """Return *True* if the symbol is annotated.
-        """
         return bool(self.__flags & DEF_ANNOT)
 
     def is_free(self):
-        """Return *True* if a referenced symbol is
-        not assigned to.
-        """
         return bool(self.__scope == FREE)
 
     def is_imported(self):
-        """Return *True* if the symbol is created from
-        an import statement.
-        """
         return bool(self.__flags & DEF_IMPORT)
 
     def is_assigned(self):
-        """Return *True* if a symbol is assigned to."""
         return bool(self.__flags & DEF_LOCAL)
 
     def is_namespace(self):
-        """Returns *True* if name binding introduces new namespace.
+        """Returns true if name binding introduces new namespace.
 
         If the name is used as the target of a function or class
         statement, this will be true.
@@ -304,7 +234,7 @@
         return self.__namespaces
 
     def get_namespace(self):
-        """Return the single namespace bound to this name.
+        """Returns the single namespace bound to this name.
 
         Raises ValueError if the name is bound to multiple namespaces.
         """
diff --git a/common/py3-stdlib/sysconfig.py b/common/py3-stdlib/sysconfig.py
index daf9f00..bf04ac5 100644
--- a/common/py3-stdlib/sysconfig.py
+++ b/common/py3-stdlib/sysconfig.py
@@ -18,11 +18,6 @@
     'parse_config_h',
 ]
 
-# Keys for get_config_var() that are never converted to Python integers.
-_ALWAYS_STR = {
-    'MACOSX_DEPLOYMENT_TARGET',
-}
-
 _INSTALL_SCHEMES = {
     'posix_prefix': {
         'stdlib': '{installed_base}/{platlibdir}/python{py_version_short}',
@@ -56,73 +51,42 @@
         'scripts': '{base}/Scripts',
         'data': '{base}',
         },
-    }
-
-
-# NOTE: site.py has copy of this function.
-# Sync it when modify this function.
-def _getuserbase():
-    env_base = os.environ.get("PYTHONUSERBASE", None)
-    if env_base:
-        return env_base
-
-    # VxWorks has no home directories
-    if sys.platform == "vxworks":
-        return None
-
-    def joinuser(*args):
-        return os.path.expanduser(os.path.join(*args))
-
-    if os.name == "nt":
-        base = os.environ.get("APPDATA") or "~"
-        return joinuser(base, "Python")
-
-    if sys.platform == "darwin" and sys._framework:
-        return joinuser("~", "Library", sys._framework,
-                        f"{sys.version_info[0]}.{sys.version_info[1]}")
-
-    return joinuser("~", ".local")
-
-_HAS_USER_BASE = (_getuserbase() is not None)
-
-if _HAS_USER_BASE:
-    _INSTALL_SCHEMES |= {
-        # NOTE: When modifying "purelib" scheme, update site._get_path() too.
-        'nt_user': {
-            'stdlib': '{userbase}/Python{py_version_nodot_plat}',
-            'platstdlib': '{userbase}/Python{py_version_nodot_plat}',
-            'purelib': '{userbase}/Python{py_version_nodot_plat}/site-packages',
-            'platlib': '{userbase}/Python{py_version_nodot_plat}/site-packages',
-            'include': '{userbase}/Python{py_version_nodot_plat}/Include',
-            'scripts': '{userbase}/Python{py_version_nodot_plat}/Scripts',
-            'data': '{userbase}',
-            },
-        'posix_user': {
-            'stdlib': '{userbase}/{platlibdir}/python{py_version_short}',
-            'platstdlib': '{userbase}/{platlibdir}/python{py_version_short}',
-            'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
-            'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
-            'include': '{userbase}/include/python{py_version_short}',
-            'scripts': '{userbase}/bin',
-            'data': '{userbase}',
-            },
-        'osx_framework_user': {
-            'stdlib': '{userbase}/lib/python',
-            'platstdlib': '{userbase}/lib/python',
-            'purelib': '{userbase}/lib/python/site-packages',
-            'platlib': '{userbase}/lib/python/site-packages',
-            'include': '{userbase}/include/python{py_version_short}',
-            'scripts': '{userbase}/bin',
-            'data': '{userbase}',
-            },
+    # NOTE: When modifying "purelib" scheme, update site._get_path() too.
+    'nt_user': {
+        'stdlib': '{userbase}/Python{py_version_nodot}',
+        'platstdlib': '{userbase}/Python{py_version_nodot}',
+        'purelib': '{userbase}/Python{py_version_nodot}/site-packages',
+        'platlib': '{userbase}/Python{py_version_nodot}/site-packages',
+        'include': '{userbase}/Python{py_version_nodot}/Include',
+        'scripts': '{userbase}/Python{py_version_nodot}/Scripts',
+        'data': '{userbase}',
+        },
+    'posix_user': {
+        'stdlib': '{userbase}/{platlibdir}/python{py_version_short}',
+        'platstdlib': '{userbase}/{platlibdir}/python{py_version_short}',
+        'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
+        'platlib': '{userbase}/{platlibdir}/python{py_version_short}/site-packages',
+        'include': '{userbase}/include/python{py_version_short}',
+        'scripts': '{userbase}/bin',
+        'data': '{userbase}',
+        },
+    'osx_framework_user': {
+        'stdlib': '{userbase}/lib/python',
+        'platstdlib': '{userbase}/lib/python',
+        'purelib': '{userbase}/lib/python/site-packages',
+        'platlib': '{userbase}/lib/python/site-packages',
+        'include': '{userbase}/include',
+        'scripts': '{userbase}/bin',
+        'data': '{userbase}',
+        },
     }
 
 _SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
                 'scripts', 'data')
 
 _PY_VERSION = sys.version.split()[0]
-_PY_VERSION_SHORT = f'{sys.version_info[0]}.{sys.version_info[1]}'
-_PY_VERSION_SHORT_NO_DOT = f'{sys.version_info[0]}{sys.version_info[1]}'
+_PY_VERSION_SHORT = '%d.%d' % sys.version_info[:2]
+_PY_VERSION_SHORT_NO_DOT = '%d%d' % sys.version_info[:2]
 _PREFIX = os.path.normpath(sys.prefix)
 _BASE_PREFIX = os.path.normpath(sys.base_prefix)
 _EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
@@ -130,12 +94,6 @@
 _CONFIG_VARS = None
 _USER_BASE = None
 
-# Regexes needed for parsing Makefile (and similar syntaxes,
-# like old-style Setup files).
-_variable_rx = r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)"
-_findvar1_rx = r"\$\(([A-Za-z][A-Za-z0-9_]*)\)"
-_findvar2_rx = r"\${([A-Za-z][A-Za-z0-9_]*)}"
-
 
 def _safe_realpath(path):
     try:
@@ -184,24 +142,18 @@
 
 if _PYTHON_BUILD:
     for scheme in ('posix_prefix', 'posix_home'):
-        # On POSIX-y platforms, Python will:
-        # - Build from .h files in 'headers' (which is only added to the
-        #   scheme when building CPython)
-        # - Install .h files to 'include'
-        scheme = _INSTALL_SCHEMES[scheme]
-        scheme['headers'] = scheme['include']
-        scheme['include'] = '{srcdir}/Include'
-        scheme['platinclude'] = '{projectbase}/.'
+        _INSTALL_SCHEMES[scheme]['include'] = '{srcdir}/Include'
+        _INSTALL_SCHEMES[scheme]['platinclude'] = '{projectbase}/.'
 
 
 def _subst_vars(s, local_vars):
     try:
         return s.format(**local_vars)
-    except KeyError as var:
+    except KeyError:
         try:
             return s.format(**os.environ)
-        except KeyError:
-            raise AttributeError(f'{var}') from None
+        except KeyError as var:
+            raise AttributeError('{%s}' % var) from None
 
 def _extend_dict(target_dict, other_dict):
     target_keys = target_dict.keys()
@@ -224,62 +176,60 @@
     return res
 
 
-def _get_preferred_schemes():
-    if os.name == 'nt':
-        return {
-            'prefix': 'nt',
-            'home': 'posix_home',
-            'user': 'nt_user',
-        }
-    if sys.platform == 'darwin' and sys._framework:
-        return {
-            'prefix': 'posix_prefix',
-            'home': 'posix_home',
-            'user': 'osx_framework_user',
-        }
-    return {
-        'prefix': 'posix_prefix',
-        'home': 'posix_home',
-        'user': 'posix_user',
-    }
+def _get_default_scheme():
+    if os.name == 'posix':
+        # the default scheme for posix is posix_prefix
+        return 'posix_prefix'
+    return os.name
 
 
-def get_preferred_scheme(key):
-    scheme = _get_preferred_schemes()[key]
-    if scheme not in _INSTALL_SCHEMES:
-        raise ValueError(
-            f"{key!r} returned {scheme!r}, which is not a valid scheme "
-            f"on this platform"
-        )
-    return scheme
+# NOTE: site.py has copy of this function.
+# Sync it when modify this function.
+def _getuserbase():
+    env_base = os.environ.get("PYTHONUSERBASE", None)
+    if env_base:
+        return env_base
+
+    def joinuser(*args):
+        return os.path.expanduser(os.path.join(*args))
+
+    if os.name == "nt":
+        base = os.environ.get("APPDATA") or "~"
+        return joinuser(base, "Python")
+
+    if sys.platform == "darwin" and sys._framework:
+        return joinuser("~", "Library", sys._framework,
+                        "%d.%d" % sys.version_info[:2])
+
+    return joinuser("~", ".local")
 
 
-def get_default_scheme():
-    return get_preferred_scheme('prefix')
-
-
-def _parse_makefile(filename, vars=None, keep_unresolved=True):
+def _parse_makefile(filename, vars=None):
     """Parse a Makefile-style file.
 
     A dictionary containing name/value pairs is returned.  If an
     optional dictionary is passed in as the second argument, it is
     used instead of a new dictionary.
     """
+    # Regexes needed for parsing Makefile (and similar syntaxes,
+    # like old-style Setup files).
     import re
+    _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
+    _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
+    _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
 
     if vars is None:
         vars = {}
     done = {}
     notdone = {}
 
-    with open(filename, encoding=sys.getfilesystemencoding(),
-              errors="surrogateescape") as f:
+    with open(filename, errors="surrogateescape") as f:
         lines = f.readlines()
 
     for line in lines:
         if line.startswith('#') or line.strip() == '':
             continue
-        m = re.match(_variable_rx, line)
+        m = _variable_rx.match(line)
         if m:
             n, v = m.group(1, 2)
             v = v.strip()
@@ -290,9 +240,6 @@
                 notdone[n] = v
             else:
                 try:
-                    if n in _ALWAYS_STR:
-                        raise ValueError
-
                     v = int(v)
                 except ValueError:
                     # insert literal `$'
@@ -312,8 +259,8 @@
     while len(variables) > 0:
         for name in tuple(variables):
             value = notdone[name]
-            m1 = re.search(_findvar1_rx, value)
-            m2 = re.search(_findvar2_rx, value)
+            m1 = _findvar1_rx.search(value)
+            m2 = _findvar2_rx.search(value)
             if m1 and m2:
                 m = m1 if m1.start() < m2.start() else m2
             else:
@@ -351,8 +298,6 @@
                         notdone[name] = value
                     else:
                         try:
-                            if name in _ALWAYS_STR:
-                                raise ValueError
                             value = int(value)
                         except ValueError:
                             done[name] = value.strip()
@@ -368,12 +313,9 @@
                                 done[name] = value
 
             else:
-                # Adds unresolved variables to the done dict.
-                # This is disabled when called from distutils.sysconfig
-                if keep_unresolved:
-                    done[name] = value
                 # bogus variable reference (e.g. "prefix=$/opt/python");
                 # just drop it since we can't deal
+                done[name] = value
                 variables.remove(name)
 
     # strip spurious spaces
@@ -391,20 +333,21 @@
     if _PYTHON_BUILD:
         return os.path.join(_sys_home or _PROJECT_BASE, "Makefile")
     if hasattr(sys, 'abiflags'):
-        config_dir_name = f'config-{_PY_VERSION_SHORT}{sys.abiflags}'
+        config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
     else:
         config_dir_name = 'config'
     if hasattr(sys.implementation, '_multiarch'):
-        config_dir_name += f'-{sys.implementation._multiarch}'
+        config_dir_name += '-%s' % sys.implementation._multiarch
     return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
 
 
 def _get_sysconfigdata_name():
-    multiarch = getattr(sys.implementation, '_multiarch', '')
-    return os.environ.get(
-        '_PYTHON_SYSCONFIGDATA_NAME',
-        f'_sysconfigdata_{sys.abiflags}_{sys.platform}_{multiarch}',
-    )
+    return os.environ.get('_PYTHON_SYSCONFIGDATA_NAME',
+        '_sysconfigdata_{abi}_{platform}_{multiarch}'.format(
+        abi=sys.abiflags,
+        platform=sys.platform,
+        multiarch=getattr(sys.implementation, '_multiarch', ''),
+    ))
 
 
 def _generate_posix_vars():
@@ -416,19 +359,19 @@
     try:
         _parse_makefile(makefile, vars)
     except OSError as e:
-        msg = f"invalid Python installation: unable to open {makefile}"
+        msg = "invalid Python installation: unable to open %s" % makefile
         if hasattr(e, "strerror"):
-            msg = f"{msg} ({e.strerror})"
+            msg = msg + " (%s)" % e.strerror
         raise OSError(msg)
     # load the installed pyconfig.h:
     config_h = get_config_h_filename()
     try:
-        with open(config_h, encoding="utf-8") as f:
+        with open(config_h) as f:
             parse_config_h(f, vars)
     except OSError as e:
-        msg = f"invalid Python installation: unable to open {config_h}"
+        msg = "invalid Python installation: unable to open %s" % config_h
         if hasattr(e, "strerror"):
-            msg = f"{msg} ({e.strerror})"
+            msg = msg + " (%s)" % e.strerror
         raise OSError(msg)
     # On AIX, there are wrong paths to the linker scripts in the Makefile
     # -- these paths are relative to the Python source, but when installed
@@ -454,7 +397,7 @@
         module.build_time_vars = vars
         sys.modules[name] = module
 
-    pybuilddir = f'build/lib.{get_platform()}-{_PY_VERSION_SHORT}'
+    pybuilddir = 'build/lib.%s-%s' % (get_platform(), _PY_VERSION_SHORT)
     if hasattr(sys, "gettotalrefcount"):
         pybuilddir += '-pydebug'
     os.makedirs(pybuilddir, exist_ok=True)
@@ -481,15 +424,13 @@
 def _init_non_posix(vars):
     """Initialize the module as appropriate for NT"""
     # set basic install directories
-    import _imp
     vars['LIBDEST'] = get_path('stdlib')
     vars['BINLIBDEST'] = get_path('platstdlib')
     vars['INCLUDEPY'] = get_path('include')
-    vars['EXT_SUFFIX'] = _imp.extension_suffixes()[0]
+    vars['EXT_SUFFIX'] = '.pyd'
     vars['EXE'] = '.exe'
     vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
     vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
-    vars['TZPATH'] = ''
 
 #
 # public APIs
@@ -517,8 +458,6 @@
         if m:
             n, v = m.group(1, 2)
             try:
-                if n in _ALWAYS_STR:
-                    raise ValueError
                 v = int(v)
             except ValueError:
                 pass
@@ -552,7 +491,7 @@
     return _SCHEME_KEYS
 
 
-def get_paths(scheme=get_default_scheme(), vars=None, expand=True):
+def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
     """Return a mapping containing an install scheme.
 
     ``scheme`` is the install scheme name. If not provided, it will
@@ -564,7 +503,7 @@
         return _INSTALL_SCHEMES[scheme]
 
 
-def get_path(name, scheme=get_default_scheme(), vars=None, expand=True):
+def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
     """Return a path corresponding to the scheme.
 
     ``scheme`` is the install scheme name.
@@ -604,24 +543,20 @@
         except AttributeError:
             # sys.abiflags may not be defined on all platforms.
             _CONFIG_VARS['abiflags'] = ''
-        try:
-            _CONFIG_VARS['py_version_nodot_plat'] = sys.winver.replace('.', '')
-        except AttributeError:
-            _CONFIG_VARS['py_version_nodot_plat'] = ''
 
         if os.name == 'nt':
             _init_non_posix(_CONFIG_VARS)
+            _CONFIG_VARS['TZPATH'] = ''
         if os.name == 'posix':
             _init_posix(_CONFIG_VARS)
         # For backward compatibility, see issue19555
         SO = _CONFIG_VARS.get('EXT_SUFFIX')
         if SO is not None:
             _CONFIG_VARS['SO'] = SO
-        if _HAS_USER_BASE:
-            # Setting 'userbase' is done below the call to the
-            # init function to enable using 'get_config_var' in
-            # the init-function.
-            _CONFIG_VARS['userbase'] = _getuserbase()
+        # Setting 'userbase' is done below the call to the
+        # init function to enable using 'get_config_var' in
+        # the init-function.
+        _CONFIG_VARS['userbase'] = _getuserbase()
 
         # Always convert srcdir to an absolute path
         srcdir = _CONFIG_VARS.get('srcdir', _PROJECT_BASE)
@@ -718,16 +653,16 @@
         # At least on Linux/Intel, 'machine' is the processor --
         # i386, etc.
         # XXX what about Alpha, SPARC, etc?
-        return  f"{osname}-{machine}"
+        return  "%s-%s" % (osname, machine)
     elif osname[:5] == "sunos":
         if release[0] >= "5":           # SunOS 5 == Solaris 2
             osname = "solaris"
-            release = f"{int(release[0]) - 3}.{release[2:]}"
+            release = "%d.%s" % (int(release[0]) - 3, release[2:])
             # We can't use "platform.architecture()[0]" because a
             # bootstrap problem. We use a dict to get an error
             # if some suspicious happens.
             bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
-            machine += f".{bitness[sys.maxsize]}"
+            machine += ".%s" % bitness[sys.maxsize]
         # fall through to standard osname-release-machine representation
     elif osname[:3] == "aix":
         from _aix_support import aix_platform
@@ -745,44 +680,18 @@
                                             get_config_vars(),
                                             osname, release, machine)
 
-    return f"{osname}-{release}-{machine}"
+    return "%s-%s-%s" % (osname, release, machine)
 
 
 def get_python_version():
     return _PY_VERSION_SHORT
 
 
-def expand_makefile_vars(s, vars):
-    """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
-    'string' according to 'vars' (a dictionary mapping variable names to
-    values).  Variables not present in 'vars' are silently expanded to the
-    empty string.  The variable values in 'vars' should not contain further
-    variable expansions; if 'vars' is the output of 'parse_makefile()',
-    you're fine.  Returns a variable-expanded version of 's'.
-    """
-    import re
-
-    # This algorithm does multiple expansion, so if vars['foo'] contains
-    # "${bar}", it will expand ${foo} to ${bar}, and then expand
-    # ${bar}... and so forth.  This is fine as long as 'vars' comes from
-    # 'parse_makefile()', which takes care of such expansions eagerly,
-    # according to make's variable expansion semantics.
-
-    while True:
-        m = re.search(_findvar1_rx, s) or re.search(_findvar2_rx, s)
-        if m:
-            (beg, end) = m.span()
-            s = s[0:beg] + vars.get(m.group(1)) + s[end:]
-        else:
-            break
-    return s
-
-
 def _print_dict(title, data):
     for index, (key, value) in enumerate(sorted(data.items())):
         if index == 0:
-            print(f'{title}: ')
-        print(f'\t{key} = "{value}"')
+            print('%s: ' % (title))
+        print('\t%s = "%s"' % (key, value))
 
 
 def _main():
@@ -790,9 +699,9 @@
     if '--generate-posix-vars' in sys.argv:
         _generate_posix_vars()
         return
-    print(f'Platform: "{get_platform()}"')
-    print(f'Python version: "{get_python_version()}"')
-    print(f'Current installation scheme: "{get_default_scheme()}"')
+    print('Platform: "%s"' % get_platform())
+    print('Python version: "%s"' % get_python_version())
+    print('Current installation scheme: "%s"' % _get_default_scheme())
     print()
     _print_dict('Paths', get_paths())
     print()
diff --git a/common/py3-stdlib/tarfile.py b/common/py3-stdlib/tarfile.py
index 6ada9a0..1d15612 100755
--- a/common/py3-stdlib/tarfile.py
+++ b/common/py3-stdlib/tarfile.py
@@ -200,7 +200,6 @@
     # base-256 representation. This allows values up to (256**(digits-1))-1.
     # A 0o200 byte indicates a positive number, a 0o377 byte a negative
     # number.
-    original_n = n
     n = int(n)
     if 0 <= n < 8 ** (digits - 1):
         s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL
@@ -364,7 +363,7 @@
                 try:
                     import zlib
                 except ImportError:
-                    raise CompressionError("zlib module is not available") from None
+                    raise CompressionError("zlib module is not available")
                 self.zlib = zlib
                 self.crc = zlib.crc32(b"")
                 if mode == "r":
@@ -377,7 +376,7 @@
                 try:
                     import bz2
                 except ImportError:
-                    raise CompressionError("bz2 module is not available") from None
+                    raise CompressionError("bz2 module is not available")
                 if mode == "r":
                     self.dbuf = b""
                     self.cmp = bz2.BZ2Decompressor()
@@ -389,7 +388,7 @@
                 try:
                     import lzma
                 except ImportError:
-                    raise CompressionError("lzma module is not available") from None
+                    raise CompressionError("lzma module is not available")
                 if mode == "r":
                     self.dbuf = b""
                     self.cmp = lzma.LZMADecompressor()
@@ -542,8 +541,8 @@
                     break
             try:
                 buf = self.cmp.decompress(buf)
-            except self.exception as e:
-                raise ReadError("invalid compressed data") from e
+            except self.exception:
+                raise ReadError("invalid compressed data")
             t.append(buf)
             c += len(buf)
         t = b"".join(t)
@@ -888,24 +887,15 @@
         # Test number fields for values that exceed the field limit or values
         # that like to be stored as float.
         for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
-            needs_pax = False
+            if name in pax_headers:
+                # The pax header has priority. Avoid overflow.
+                info[name] = 0
+                continue
 
             val = info[name]
-            val_is_float = isinstance(val, float)
-            val_int = round(val) if val_is_float else val
-            if not 0 <= val_int < 8 ** (digits - 1):
-                # Avoid overflow.
-                info[name] = 0
-                needs_pax = True
-            elif val_is_float:
-                # Put rounded value in ustar header, and full
-                # precision value in pax header.
-                info[name] = val_int
-                needs_pax = True
-
-            # The existing pax header has priority.
-            if needs_pax and name not in pax_headers:
+            if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
                 pax_headers[name] = str(val)
+                info[name] = 0
 
         # Create a pax extended header if necessary.
         if pax_headers:
@@ -1174,8 +1164,8 @@
         # Fetch the next header and process it.
         try:
             next = self.fromtarfile(tarfile)
-        except HeaderError as e:
-            raise SubsequentHeaderError(str(e)) from None
+        except HeaderError:
+            raise SubsequentHeaderError("missing or bad subsequent header")
 
         # Patch the TarInfo object from the next header with
         # the longname information.
@@ -1287,8 +1277,8 @@
         # Fetch the next header.
         try:
             next = self.fromtarfile(tarfile)
-        except HeaderError as e:
-            raise SubsequentHeaderError(str(e)) from None
+        except HeaderError:
+            raise SubsequentHeaderError("missing or bad subsequent header")
 
         # Process GNU sparse information.
         if "GNU.sparse.map" in pax_headers:
@@ -1543,7 +1533,7 @@
                         self.fileobj.seek(self.offset)
                         break
                     except HeaderError as e:
-                        raise ReadError(str(e)) from None
+                        raise ReadError(str(e))
 
             if self.mode in ("a", "w", "x"):
                 self._loaded = True
@@ -1613,20 +1603,17 @@
             # Find out which *open() is appropriate for opening the file.
             def not_compressed(comptype):
                 return cls.OPEN_METH[comptype] == 'taropen'
-            error_msgs = []
             for comptype in sorted(cls.OPEN_METH, key=not_compressed):
                 func = getattr(cls, cls.OPEN_METH[comptype])
                 if fileobj is not None:
                     saved_pos = fileobj.tell()
                 try:
                     return func(name, "r", fileobj, **kwargs)
-                except (ReadError, CompressionError) as e:
-                    error_msgs.append(f'- method {comptype}: {e!r}')
+                except (ReadError, CompressionError):
                     if fileobj is not None:
                         fileobj.seek(saved_pos)
                     continue
-            error_msgs_summary = '\n'.join(error_msgs)
-            raise ReadError(f"file could not be opened successfully:\n{error_msgs_summary}")
+            raise ReadError("file could not be opened successfully")
 
         elif ":" in mode:
             filemode, comptype = mode.split(":", 1)
@@ -1682,21 +1669,21 @@
         try:
             from gzip import GzipFile
         except ImportError:
-            raise CompressionError("gzip module is not available") from None
+            raise CompressionError("gzip module is not available")
 
         try:
             fileobj = GzipFile(name, mode + "b", compresslevel, fileobj)
-        except OSError as e:
+        except OSError:
             if fileobj is not None and mode == 'r':
-                raise ReadError("not a gzip file") from e
+                raise ReadError("not a gzip file")
             raise
 
         try:
             t = cls.taropen(name, mode, fileobj, **kwargs)
-        except OSError as e:
+        except OSError:
             fileobj.close()
             if mode == 'r':
-                raise ReadError("not a gzip file") from e
+                raise ReadError("not a gzip file")
             raise
         except:
             fileobj.close()
@@ -1715,16 +1702,16 @@
         try:
             from bz2 import BZ2File
         except ImportError:
-            raise CompressionError("bz2 module is not available") from None
+            raise CompressionError("bz2 module is not available")
 
         fileobj = BZ2File(fileobj or name, mode, compresslevel=compresslevel)
 
         try:
             t = cls.taropen(name, mode, fileobj, **kwargs)
-        except (OSError, EOFError) as e:
+        except (OSError, EOFError):
             fileobj.close()
             if mode == 'r':
-                raise ReadError("not a bzip2 file") from e
+                raise ReadError("not a bzip2 file")
             raise
         except:
             fileobj.close()
@@ -1743,16 +1730,16 @@
         try:
             from lzma import LZMAFile, LZMAError
         except ImportError:
-            raise CompressionError("lzma module is not available") from None
+            raise CompressionError("lzma module is not available")
 
         fileobj = LZMAFile(fileobj or name, mode, preset=preset)
 
         try:
             t = cls.taropen(name, mode, fileobj, **kwargs)
-        except (LZMAError, EOFError) as e:
+        except (LZMAError, EOFError):
             fileobj.close()
             if mode == 'r':
-                raise ReadError("not an lzma file") from e
+                raise ReadError("not an lzma file")
             raise
         except:
             fileobj.close()
@@ -1798,7 +1785,7 @@
            than once in the archive, its last occurrence is assumed to be the
            most up-to-date version.
         """
-        tarinfo = self._getmember(name.rstrip('/'))
+        tarinfo = self._getmember(name)
         if tarinfo is None:
             raise KeyError("filename %r not found" % name)
         return tarinfo
@@ -2266,7 +2253,7 @@
                 self._extract_member(self._find_link_target(tarinfo),
                                      targetpath)
             except KeyError:
-                raise ExtractError("unable to resolve link inside archive") from None
+                raise ExtractError("unable to resolve link inside archive")
 
     def chown(self, tarinfo, targetpath, numeric_owner):
         """Set owner of targetpath according to tarinfo. If numeric_owner
@@ -2294,16 +2281,16 @@
                     os.lchown(targetpath, u, g)
                 else:
                     os.chown(targetpath, u, g)
-            except OSError as e:
-                raise ExtractError("could not change owner") from e
+            except OSError:
+                raise ExtractError("could not change owner")
 
     def chmod(self, tarinfo, targetpath):
         """Set file permissions of targetpath according to tarinfo.
         """
         try:
             os.chmod(targetpath, tarinfo.mode)
-        except OSError as e:
-            raise ExtractError("could not change mode") from e
+        except OSError:
+            raise ExtractError("could not change mode")
 
     def utime(self, tarinfo, targetpath):
         """Set modification time of targetpath according to tarinfo.
@@ -2312,8 +2299,8 @@
             return
         try:
             os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
-        except OSError as e:
-            raise ExtractError("could not change modification time") from e
+        except OSError:
+            raise ExtractError("could not change modification time")
 
     #--------------------------------------------------------------------------
     def next(self):
@@ -2349,24 +2336,15 @@
                     self.offset += BLOCKSIZE
                     continue
                 elif self.offset == 0:
-                    raise ReadError(str(e)) from None
+                    raise ReadError(str(e))
             except EmptyHeaderError:
                 if self.offset == 0:
-                    raise ReadError("empty file") from None
+                    raise ReadError("empty file")
             except TruncatedHeaderError as e:
                 if self.offset == 0:
-                    raise ReadError(str(e)) from None
+                    raise ReadError(str(e))
             except SubsequentHeaderError as e:
-                raise ReadError(str(e)) from None
-            except Exception as e:
-                try:
-                    import zlib
-                    if isinstance(e, zlib.error):
-                        raise ReadError(f'zlib error: {e}') from None
-                    else:
-                        raise e
-                except ImportError:
-                    raise e
+                raise ReadError(str(e))
             break
 
         if tarinfo is not None:
diff --git a/common/py3-stdlib/tempfile.py b/common/py3-stdlib/tempfile.py
index 7b68212..770f72c 100644
--- a/common/py3-stdlib/tempfile.py
+++ b/common/py3-stdlib/tempfile.py
@@ -88,10 +88,6 @@
     for arg in args:
         if arg is None:
             continue
-
-        if isinstance(arg, _os.PathLike):
-            arg = _os.fspath(arg)
-
         if isinstance(arg, bytes):
             if return_type is str:
                 raise TypeError("Can't mix bytes and non-bytes in "
@@ -103,11 +99,7 @@
                                 "path components.")
             return_type = str
     if return_type is None:
-        if tempdir is None or isinstance(tempdir, str):
-            return str  # tempfile APIs return a str by default.
-        else:
-            # we could check for bytes but it'll fail later on anyway
-            return bytes
+        return str  # tempfile APIs return a str by default.
     return return_type
 
 
@@ -151,7 +143,10 @@
         return self
 
     def __next__(self):
-        return ''.join(self.rng.choices(self.characters, k=8))
+        c = self.characters
+        choose = self.rng.choice
+        letters = [choose(c) for dummy in range(8)]
+        return ''.join(letters)
 
 def _candidate_tempdir_list():
     """Generate a list of candidate temporary directories which
@@ -273,17 +268,17 @@
 # User visible interfaces.
 
 def gettempprefix():
-    """The default prefix for temporary directories as string."""
-    return _os.fsdecode(template)
+    """The default prefix for temporary directories."""
+    return template
 
 def gettempprefixb():
     """The default prefix for temporary directories as bytes."""
-    return _os.fsencode(template)
+    return _os.fsencode(gettempprefix())
 
 tempdir = None
 
-def _gettempdir():
-    """Private accessor for tempfile.tempdir."""
+def gettempdir():
+    """Accessor for tempfile.tempdir."""
     global tempdir
     if tempdir is None:
         _once_lock.acquire()
@@ -294,13 +289,9 @@
             _once_lock.release()
     return tempdir
 
-def gettempdir():
-    """Returns tempfile.tempdir as str."""
-    return _os.fsdecode(_gettempdir())
-
 def gettempdirb():
-    """Returns tempfile.tempdir as bytes."""
-    return _os.fsencode(_gettempdir())
+    """A bytes version of tempfile.gettempdir()."""
+    return _os.fsencode(gettempdir())
 
 def mkstemp(suffix=None, prefix=None, dir=None, text=False):
     """User-callable function to create and return a unique temporary
@@ -547,9 +538,6 @@
     if _os.name == 'nt' and delete:
         flags |= _os.O_TEMPORARY
 
-    if "b" not in mode:
-        encoding = _io.text_encoding(encoding)
-
     (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
     try:
         file = _io.open(fd, mode, buffering=buffering,
@@ -590,9 +578,6 @@
         """
         global _O_TMPFILE_WORKS
 
-        if "b" not in mode:
-            encoding = _io.text_encoding(encoding)
-
         prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
 
         flags = _bin_openflags
@@ -648,7 +633,6 @@
         if 'b' in mode:
             self._file = _io.BytesIO()
         else:
-            encoding = _io.text_encoding(encoding)
             self._file = _io.TextIOWrapper(_io.BytesIO(),
                             encoding=encoding, errors=errors,
                             newline=newline)
@@ -779,7 +763,7 @@
         return rv
 
 
-class TemporaryDirectory:
+class TemporaryDirectory(object):
     """Create and return a temporary directory.  This has the same
     behavior as mkdtemp but can be used as a context manager.  For
     example:
@@ -791,17 +775,14 @@
     in it are removed.
     """
 
-    def __init__(self, suffix=None, prefix=None, dir=None,
-                 ignore_cleanup_errors=False):
+    def __init__(self, suffix=None, prefix=None, dir=None):
         self.name = mkdtemp(suffix, prefix, dir)
-        self._ignore_cleanup_errors = ignore_cleanup_errors
         self._finalizer = _weakref.finalize(
             self, self._cleanup, self.name,
-            warn_message="Implicitly cleaning up {!r}".format(self),
-            ignore_errors=self._ignore_cleanup_errors)
+            warn_message="Implicitly cleaning up {!r}".format(self))
 
     @classmethod
-    def _rmtree(cls, name, ignore_errors=False):
+    def _rmtree(cls, name):
         def onerror(func, path, exc_info):
             if issubclass(exc_info[0], PermissionError):
                 def resetperms(path):
@@ -820,20 +801,19 @@
                         _os.unlink(path)
                     # PermissionError is raised on FreeBSD for directories
                     except (IsADirectoryError, PermissionError):
-                        cls._rmtree(path, ignore_errors=ignore_errors)
+                        cls._rmtree(path)
                 except FileNotFoundError:
                     pass
             elif issubclass(exc_info[0], FileNotFoundError):
                 pass
             else:
-                if not ignore_errors:
-                    raise
+                raise
 
         _shutil.rmtree(name, onerror=onerror)
 
     @classmethod
-    def _cleanup(cls, name, warn_message, ignore_errors=False):
-        cls._rmtree(name, ignore_errors=ignore_errors)
+    def _cleanup(cls, name, warn_message):
+        cls._rmtree(name)
         _warnings.warn(warn_message, ResourceWarning)
 
     def __repr__(self):
@@ -846,7 +826,7 @@
         self.cleanup()
 
     def cleanup(self):
-        if self._finalizer.detach() or _os.path.exists(self.name):
-            self._rmtree(self.name, ignore_errors=self._ignore_cleanup_errors)
+        if self._finalizer.detach():
+            self._rmtree(self.name)
 
     __class_getitem__ = classmethod(_types.GenericAlias)
diff --git a/common/py3-stdlib/textwrap.py b/common/py3-stdlib/textwrap.py
index 841de9b..30e693c 100644
--- a/common/py3-stdlib/textwrap.py
+++ b/common/py3-stdlib/textwrap.py
@@ -215,16 +215,8 @@
         # If we're allowed to break long words, then do so: put as much
         # of the next chunk onto the current line as will fit.
         if self.break_long_words:
-            end = space_left
-            chunk = reversed_chunks[-1]
-            if self.break_on_hyphens and len(chunk) > space_left:
-                # break after last hyphen, but only if there are
-                # non-hyphens before it
-                hyphen = chunk.rfind('-', 0, space_left)
-                if hyphen > 0 and any(c != '-' for c in chunk[:hyphen]):
-                    end = hyphen + 1
-            cur_line.append(chunk[:end])
-            reversed_chunks[-1] = chunk[end:]
+            cur_line.append(reversed_chunks[-1][:space_left])
+            reversed_chunks[-1] = reversed_chunks[-1][space_left:]
 
         # Otherwise, we have to preserve the long word intact.  Only add
         # it to the current line if there's nothing already there --
diff --git a/common/py3-stdlib/threading.py b/common/py3-stdlib/threading.py
index 2d89742..d96d99a 100644
--- a/common/py3-stdlib/threading.py
+++ b/common/py3-stdlib/threading.py
@@ -28,7 +28,7 @@
            'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
            'Barrier', 'BrokenBarrierError', 'Timer', 'ThreadError',
            'setprofile', 'settrace', 'local', 'stack_size',
-           'excepthook', 'ExceptHookArgs', 'gettrace', 'getprofile']
+           'excepthook', 'ExceptHookArgs']
 
 # Rename some stuff so "from threading import *" is safe
 _start_new_thread = _thread.start_new_thread
@@ -65,10 +65,6 @@
     global _profile_hook
     _profile_hook = func
 
-def getprofile():
-    """Get the profiler function as set by threading.setprofile()."""
-    return _profile_hook
-
 def settrace(func):
     """Set a trace function for all threads started from the threading module.
 
@@ -79,10 +75,6 @@
     global _trace_hook
     _trace_hook = func
 
-def gettrace():
-    """Get the trace function as set by threading.settrace()."""
-    return _trace_hook
-
 # Synchronization classes
 
 Lock = _allocate_lock
@@ -388,16 +380,7 @@
         """
         self.notify(len(self._waiters))
 
-    def notifyAll(self):
-        """Wake up all threads waiting on this condition.
-
-        This method is deprecated, use notify_all() instead.
-
-        """
-        import warnings
-        warnings.warn('notifyAll() is deprecated, use notify_all() instead',
-                      DeprecationWarning, stacklevel=2)
-        self.notify_all()
+    notifyAll = notify_all
 
 
 class Semaphore:
@@ -547,16 +530,7 @@
         """Return true if and only if the internal flag is true."""
         return self._flag
 
-    def isSet(self):
-        """Return true if and only if the internal flag is true.
-
-        This method is deprecated, use notify_all() instead.
-
-        """
-        import warnings
-        warnings.warn('isSet() is deprecated, use is_set() instead',
-                      DeprecationWarning, stacklevel=2)
-        return self.is_set()
+    isSet = is_set
 
     def set(self):
         """Set the internal flag to true.
@@ -634,7 +608,7 @@
         self._action = action
         self._timeout = timeout
         self._parties = parties
-        self._state = 0  # 0 filling, 1 draining, -1 resetting, -2 broken
+        self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
         self._count = 0
 
     def wait(self, timeout=None):
@@ -771,39 +745,22 @@
 
 
 # Helper to generate new thread names
-_counter = _count(1).__next__
-def _newname(name_template):
-    return name_template % _counter()
+_counter = _count().__next__
+_counter() # Consume 0 so first non-main thread has id 1.
+def _newname(template="Thread-%d"):
+    return template % _counter()
 
-# Active thread administration.
-#
-# bpo-44422: Use a reentrant lock to allow reentrant calls to functions like
-# threading.enumerate().
-_active_limbo_lock = RLock()
+# Active thread administration
+_active_limbo_lock = _allocate_lock()
 _active = {}    # maps thread id to Thread object
 _limbo = {}
 _dangling = WeakSet()
-
 # Set of Thread._tstate_lock locks of non-daemon threads used by _shutdown()
 # to wait until all Python thread states get deleted:
 # see Thread._set_tstate_lock().
 _shutdown_locks_lock = _allocate_lock()
 _shutdown_locks = set()
 
-def _maintain_shutdown_locks():
-    """
-    Drop any shutdown locks that don't correspond to running threads anymore.
-
-    Calling this from time to time avoids an ever-growing _shutdown_locks
-    set when Thread objects are not joined explicitly. See bpo-37788.
-
-    This must be called with _shutdown_locks_lock acquired.
-    """
-    # If a lock was released, the corresponding thread has exited
-    to_remove = [lock for lock in _shutdown_locks if not lock.locked()]
-    _shutdown_locks.difference_update(to_remove)
-
-
 # Main class for threads
 
 class Thread:
@@ -843,19 +800,8 @@
         assert group is None, "group argument must be None for now"
         if kwargs is None:
             kwargs = {}
-        if name:
-            name = str(name)
-        else:
-            name = _newname("Thread-%d")
-            if target is not None:
-                try:
-                    target_name = target.__name__
-                    name += f" ({target_name})"
-                except AttributeError:
-                    pass
-
         self._target = target
-        self._name = name
+        self._name = str(name or _newname())
         self._args = args
         self._kwargs = kwargs
         if daemon is not None:
@@ -942,7 +888,7 @@
 
         """
         try:
-            if self._target is not None:
+            if self._target:
                 self._target(*self._args, **self._kwargs)
         finally:
             # Avoid a refcycle if the thread is running a function with
@@ -986,7 +932,6 @@
 
         if not self.daemon:
             with _shutdown_locks_lock:
-                _maintain_shutdown_locks()
                 _shutdown_locks.add(self._tstate_lock)
 
     def _bootstrap_inner(self):
@@ -1042,8 +987,7 @@
         self._tstate_lock = None
         if not self.daemon:
             with _shutdown_locks_lock:
-                # Remove our lock and other released locks from _shutdown_locks
-                _maintain_shutdown_locks()
+                _shutdown_locks.discard(lock)
 
     def _delete(self):
         "Remove current thread from the dict of currently running threads."
@@ -1100,24 +1044,11 @@
         # If the lock is acquired, the C code is done, and self._stop() is
         # called.  That sets ._is_stopped to True, and ._tstate_lock to None.
         lock = self._tstate_lock
-        if lock is None:
-            # already determined that the C code is done
+        if lock is None:  # already determined that the C code is done
             assert self._is_stopped
-            return
-
-        try:
-            if lock.acquire(block, timeout):
-                lock.release()
-                self._stop()
-        except:
-            if lock.locked():
-                # bpo-45274: lock.acquire() acquired the lock, but the function
-                # was interrupted with an exception before reaching the
-                # lock.release(). It can happen if a signal handler raises an
-                # exception, like CTRL+C which raises KeyboardInterrupt.
-                lock.release()
-                self._stop()
-            raise
+        elif lock.acquire(block, timeout):
+            lock.release()
+            self._stop()
 
     @property
     def name(self):
@@ -1163,8 +1094,8 @@
         """Return whether the thread is alive.
 
         This method returns True just before the run() method starts until just
-        after the run() method terminates. See also the module function
-        enumerate().
+        after the run() method terminates. The module function enumerate()
+        returns a list of all alive threads.
 
         """
         assert self._initialized, "Thread.__init__() not called"
@@ -1197,47 +1128,15 @@
         self._daemonic = daemonic
 
     def isDaemon(self):
-        """Return whether this thread is a daemon.
-
-        This method is deprecated, use the daemon attribute instead.
-
-        """
-        import warnings
-        warnings.warn('isDaemon() is deprecated, get the daemon attribute instead',
-                      DeprecationWarning, stacklevel=2)
         return self.daemon
 
     def setDaemon(self, daemonic):
-        """Set whether this thread is a daemon.
-
-        This method is deprecated, use the .daemon property instead.
-
-        """
-        import warnings
-        warnings.warn('setDaemon() is deprecated, set the daemon attribute instead',
-                      DeprecationWarning, stacklevel=2)
         self.daemon = daemonic
 
     def getName(self):
-        """Return a string used for identification purposes only.
-
-        This method is deprecated, use the name attribute instead.
-
-        """
-        import warnings
-        warnings.warn('getName() is deprecated, get the name attribute instead',
-                      DeprecationWarning, stacklevel=2)
         return self.name
 
     def setName(self, name):
-        """Set the name string for this thread.
-
-        This method is deprecated, use the name attribute instead.
-
-        """
-        import warnings
-        warnings.warn('setName() is deprecated, set the name attribute instead',
-                      DeprecationWarning, stacklevel=2)
         self.name = name
 
 
@@ -1287,10 +1186,6 @@
         stderr.flush()
 
 
-# Original value of threading.excepthook
-__excepthook__ = excepthook
-
-
 def _make_invoke_excepthook():
     # Create a local namespace to ensure that variables remain alive
     # when _invoke_excepthook() is called, even if it is called late during
@@ -1432,16 +1327,7 @@
     except KeyError:
         return _DummyThread()
 
-def currentThread():
-    """Return the current Thread object, corresponding to the caller's thread of control.
-
-    This function is deprecated, use current_thread() instead.
-
-    """
-    import warnings
-    warnings.warn('currentThread() is deprecated, use current_thread() instead',
-                  DeprecationWarning, stacklevel=2)
-    return current_thread()
+currentThread = current_thread
 
 def active_count():
     """Return the number of Thread objects currently alive.
@@ -1453,16 +1339,7 @@
     with _active_limbo_lock:
         return len(_active) + len(_limbo)
 
-def activeCount():
-    """Return the number of Thread objects currently alive.
-
-    This function is deprecated, use active_count() instead.
-
-    """
-    import warnings
-    warnings.warn('activeCount() is deprecated, use active_count() instead',
-                  DeprecationWarning, stacklevel=2)
-    return active_count()
+activeCount = active_count
 
 def _enumerate():
     # Same as enumerate(), but without the lock. Internal use only.
@@ -1523,29 +1400,20 @@
 
     global _SHUTTING_DOWN
     _SHUTTING_DOWN = True
+    # Main thread
+    tlock = _main_thread._tstate_lock
+    # The main thread isn't finished yet, so its thread state lock can't have
+    # been released.
+    assert tlock is not None
+    assert tlock.locked()
+    tlock.release()
+    _main_thread._stop()
 
     # Call registered threading atexit functions before threads are joined.
     # Order is reversed, similar to atexit.
     for atexit_call in reversed(_threading_atexits):
         atexit_call()
 
-    # Main thread
-    if _main_thread.ident == get_ident():
-        tlock = _main_thread._tstate_lock
-        # The main thread isn't finished yet, so its thread state lock can't
-        # have been released.
-        assert tlock is not None
-        assert tlock.locked()
-        tlock.release()
-        _main_thread._stop()
-    else:
-        # bpo-1596321: _shutdown() must be called in the main thread.
-        # If the threading module was not imported by the main thread,
-        # _main_thread is the thread which imported the threading module.
-        # In this case, ignore _main_thread, similar behavior than for threads
-        # spawned by C libraries or using _thread.start_new_thread().
-        pass
-
     # Join all non-deamon threads
     while True:
         with _shutdown_locks_lock:
@@ -1556,7 +1424,7 @@
             break
 
         for lock in locks:
-            # mimic Thread.join()
+            # mimick Thread.join()
             lock.acquire()
             lock.release()
 
@@ -1589,7 +1457,7 @@
     # by another (non-forked) thread.  http://bugs.python.org/issue874900
     global _active_limbo_lock, _main_thread
     global _shutdown_locks_lock, _shutdown_locks
-    _active_limbo_lock = RLock()
+    _active_limbo_lock = _allocate_lock()
 
     # fork() only copied the current thread; clear references to others.
     new_active = {}
diff --git a/common/py3-stdlib/timeit.py b/common/py3-stdlib/timeit.py
index 9dfd454..6c3ec01 100755
--- a/common/py3-stdlib/timeit.py
+++ b/common/py3-stdlib/timeit.py
@@ -72,7 +72,6 @@
     _t0 = _timer()
     for _i in _it:
         {stmt}
-        pass
     _t1 = _timer()
     return _t1 - _t0
 """
diff --git a/common/py3-stdlib/token.py b/common/py3-stdlib/token.py
index 9d0c0bf..493bf04 100644
--- a/common/py3-stdlib/token.py
+++ b/common/py3-stdlib/token.py
@@ -62,13 +62,12 @@
 ASYNC = 56
 TYPE_IGNORE = 57
 TYPE_COMMENT = 58
-SOFT_KEYWORD = 59
 # These aren't used by the C tokenizer but are needed for tokenize.py
-ERRORTOKEN = 60
-COMMENT = 61
-NL = 62
-ENCODING = 63
-N_TOKENS = 64
+ERRORTOKEN = 59
+COMMENT = 60
+NL = 61
+ENCODING = 62
+N_TOKENS = 63
 # Special definitions for cooperation with parser
 NT_OFFSET = 256
 
diff --git a/common/py3-stdlib/tokenize.py b/common/py3-stdlib/tokenize.py
index 7d7736f..1aee21b 100644
--- a/common/py3-stdlib/tokenize.py
+++ b/common/py3-stdlib/tokenize.py
@@ -27,7 +27,6 @@
 from builtins import open as _builtin_open
 from codecs import lookup, BOM_UTF8
 import collections
-import functools
 from io import TextIOWrapper
 import itertools as _itertools
 import re
@@ -96,7 +95,6 @@
                 result.add(''.join(u))
     return result
 
-@functools.lru_cache
 def _compile(expr):
     return re.compile(expr, re.UNICODE)
 
@@ -604,7 +602,7 @@
                 pos += 1
 
     # Add an implicit NEWLINE if the input doesn't end in one
-    if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"):
+    if last_line and last_line[-1] not in '\r\n':
         yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
     for indent in indents[1:]:                 # pop remaining indent levels
         yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
diff --git a/common/py3-stdlib/trace.py b/common/py3-stdlib/trace.py
index 2cf3643..c505d8b 100755
--- a/common/py3-stdlib/trace.py
+++ b/common/py3-stdlib/trace.py
@@ -116,7 +116,7 @@
         return 0
 
 def _modname(path):
-    """Return a plausible module name for the path."""
+    """Return a plausible module name for the patch."""
 
     base = os.path.basename(path)
     filename, ext = os.path.splitext(base)
diff --git a/common/py3-stdlib/traceback.py b/common/py3-stdlib/traceback.py
index d6a010f..fb34de9 100644
--- a/common/py3-stdlib/traceback.py
+++ b/common/py3-stdlib/traceback.py
@@ -84,25 +84,7 @@
     "another exception occurred:\n\n")
 
 
-class _Sentinel:
-    def __repr__(self):
-        return "<implicit>"
-
-_sentinel = _Sentinel()
-
-def _parse_value_tb(exc, value, tb):
-    if (value is _sentinel) != (tb is _sentinel):
-        raise ValueError("Both or neither of value and tb must be given")
-    if value is tb is _sentinel:
-        if exc is not None:
-            return exc, exc.__traceback__
-        else:
-            return None, None
-    return value, tb
-
-
-def print_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \
-                    file=None, chain=True):
+def print_exception(etype, value, tb, limit=None, file=None, chain=True):
     """Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
 
     This differs from print_tb() in the following ways: (1) if
@@ -113,16 +95,17 @@
     occurred with a caret on the next line indicating the approximate
     position of the error.
     """
-    value, tb = _parse_value_tb(exc, value, tb)
+    # format_exception has ignored etype for some time, and code such as cgitb
+    # passes in bogus values as a result. For compatibility with such code we
+    # ignore it here (rather than in the new TracebackException API).
     if file is None:
         file = sys.stderr
-    te = TracebackException(type(value), value, tb, limit=limit, compact=True)
-    for line in te.format(chain=chain):
+    for line in TracebackException(
+            type(value), value, tb, limit=limit).format(chain=chain):
         print(line, file=file, end="")
 
 
-def format_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \
-                     chain=True):
+def format_exception(etype, value, tb, limit=None, chain=True):
     """Format a stack trace and the exception information.
 
     The arguments have the same meaning as the corresponding arguments
@@ -131,15 +114,19 @@
     these lines are concatenated and printed, exactly the same text is
     printed as does print_exception().
     """
-    value, tb = _parse_value_tb(exc, value, tb)
-    te = TracebackException(type(value), value, tb, limit=limit, compact=True)
-    return list(te.format(chain=chain))
+    # format_exception has ignored etype for some time, and code such as cgitb
+    # passes in bogus values as a result. For compatibility with such code we
+    # ignore it here (rather than in the new TracebackException API).
+    return list(TracebackException(
+        type(value), value, tb, limit=limit).format(chain=chain))
 
 
-def format_exception_only(exc, /, value=_sentinel):
+def format_exception_only(etype, value):
     """Format the exception part of a traceback.
 
-    The return value is a list of strings, each ending in a newline.
+    The arguments are the exception type and value such as given by
+    sys.last_type and sys.last_value. The return value is a list of
+    strings, each ending in a newline.
 
     Normally, the list contains a single string; however, for
     SyntaxError exceptions, it contains several lines that (when
@@ -150,10 +137,7 @@
     string in the list.
 
     """
-    if value is _sentinel:
-        value = exc
-    te = TracebackException(type(value), value, None, compact=True)
-    return list(te.format_exception_only())
+    return list(TracebackException(etype, value, None).format_exception_only())
 
 
 # -- not official API but folk probably use these two functions.
@@ -301,10 +285,9 @@
     @property
     def line(self):
         if self._line is None:
-            if self.lineno is None:
-                return None
-            self._line = linecache.getline(self.filename, self.lineno)
-        return self._line.strip()
+            self._line = linecache.getline(self.filename, self.lineno).strip()
+        return self._line
+
 
 def walk_stack(f):
     """Walk a stack yielding the frame and line number for each frame.
@@ -475,29 +458,52 @@
       occurred.
     - :attr:`lineno` For syntax errors - the linenumber where the error
       occurred.
-    - :attr:`end_lineno` For syntax errors - the end linenumber where the error
-      occurred. Can be `None` if not present.
     - :attr:`text` For syntax errors - the text where the error
       occurred.
     - :attr:`offset` For syntax errors - the offset into the text where the
       error occurred.
-    - :attr:`end_offset` For syntax errors - the offset into the text where the
-      error occurred. Can be `None` if not present.
     - :attr:`msg` For syntax errors - the compiler error message.
     """
 
     def __init__(self, exc_type, exc_value, exc_traceback, *, limit=None,
-            lookup_lines=True, capture_locals=False, compact=False,
-            _seen=None):
+            lookup_lines=True, capture_locals=False, _seen=None):
         # NB: we need to accept exc_traceback, exc_value, exc_traceback to
         # permit backwards compat with the existing API, otherwise we
         # need stub thunk objects just to glue it together.
         # Handle loops in __cause__ or __context__.
-        is_recursive_call = _seen is not None
         if _seen is None:
             _seen = set()
         _seen.add(id(exc_value))
-
+        # Gracefully handle (the way Python 2.4 and earlier did) the case of
+        # being called with no type or value (None, None, None).
+        if (exc_value and exc_value.__cause__ is not None
+            and id(exc_value.__cause__) not in _seen):
+            cause = TracebackException(
+                type(exc_value.__cause__),
+                exc_value.__cause__,
+                exc_value.__cause__.__traceback__,
+                limit=limit,
+                lookup_lines=False,
+                capture_locals=capture_locals,
+                _seen=_seen)
+        else:
+            cause = None
+        if (exc_value and exc_value.__context__ is not None
+            and id(exc_value.__context__) not in _seen):
+            context = TracebackException(
+                type(exc_value.__context__),
+                exc_value.__context__,
+                exc_value.__context__.__traceback__,
+                limit=limit,
+                lookup_lines=False,
+                capture_locals=capture_locals,
+                _seen=_seen)
+        else:
+            context = None
+        self.__cause__ = cause
+        self.__context__ = context
+        self.__suppress_context__ = \
+            exc_value.__suppress_context__ if exc_value else False
         # TODO: locals.
         self.stack = StackSummary.extract(
             walk_tb(exc_traceback), limit=limit, lookup_lines=lookup_lines,
@@ -509,62 +515,12 @@
         if exc_type and issubclass(exc_type, SyntaxError):
             # Handle SyntaxError's specially
             self.filename = exc_value.filename
-            lno = exc_value.lineno
-            self.lineno = str(lno) if lno is not None else None
-            end_lno = exc_value.end_lineno
-            self.end_lineno = str(end_lno) if end_lno is not None else None
+            self.lineno = str(exc_value.lineno)
             self.text = exc_value.text
             self.offset = exc_value.offset
-            self.end_offset = exc_value.end_offset
             self.msg = exc_value.msg
         if lookup_lines:
             self._load_lines()
-        self.__suppress_context__ = \
-            exc_value.__suppress_context__ if exc_value is not None else False
-
-        # Convert __cause__ and __context__ to `TracebackExceptions`s, use a
-        # queue to avoid recursion (only the top-level call gets _seen == None)
-        if not is_recursive_call:
-            queue = [(self, exc_value)]
-            while queue:
-                te, e = queue.pop()
-                if (e and e.__cause__ is not None
-                    and id(e.__cause__) not in _seen):
-                    cause = TracebackException(
-                        type(e.__cause__),
-                        e.__cause__,
-                        e.__cause__.__traceback__,
-                        limit=limit,
-                        lookup_lines=lookup_lines,
-                        capture_locals=capture_locals,
-                        _seen=_seen)
-                else:
-                    cause = None
-
-                if compact:
-                    need_context = (cause is None and
-                                    e is not None and
-                                    not e.__suppress_context__)
-                else:
-                    need_context = True
-                if (e and e.__context__ is not None
-                    and need_context and id(e.__context__) not in _seen):
-                    context = TracebackException(
-                        type(e.__context__),
-                        e.__context__,
-                        e.__context__.__traceback__,
-                        limit=limit,
-                        lookup_lines=lookup_lines,
-                        capture_locals=capture_locals,
-                        _seen=_seen)
-                else:
-                    context = None
-                te.__cause__ = cause
-                te.__context__ = context
-                if cause:
-                    queue.append((te.__cause__, e.__cause__))
-                if context:
-                    queue.append((te.__context__, e.__context__))
 
     @classmethod
     def from_exception(cls, exc, *args, **kwargs):
@@ -575,6 +531,10 @@
         """Private API. force all lines in the stack to be loaded."""
         for frame in self.stack:
             frame.line
+        if self.__context__:
+            self.__context__._load_lines()
+        if self.__cause__:
+            self.__cause__._load_lines()
 
     def __eq__(self, other):
         if isinstance(other, TracebackException):
@@ -604,8 +564,6 @@
         stype = self.exc_type.__qualname__
         smod = self.exc_type.__module__
         if smod not in ("__main__", "builtins"):
-            if not isinstance(smod, str):
-                smod = "<unknown>"
             stype = smod + '.' + stype
 
         if not issubclass(self.exc_type, SyntaxError):
@@ -616,12 +574,9 @@
     def _format_syntax_error(self, stype):
         """Format SyntaxError exceptions (internal helper)."""
         # Show exactly where the problem was found.
-        filename_suffix = ''
-        if self.lineno is not None:
-            yield '  File "{}", line {}\n'.format(
-                self.filename or "<string>", self.lineno)
-        elif self.filename is not None:
-            filename_suffix = ' ({})'.format(self.filename)
+        filename = self.filename or "<string>"
+        lineno = str(self.lineno) or '?'
+        yield '  File "{}", line {}\n'.format(filename, lineno)
 
         text = self.text
         if text is not None:
@@ -632,22 +587,14 @@
             ltext = rtext.lstrip(' \n\f')
             spaces = len(rtext) - len(ltext)
             yield '    {}\n'.format(ltext)
-
-            if self.offset is not None:
-                offset = self.offset
-                end_offset = self.end_offset if self.end_offset not in {None, 0} else offset
-                if offset == end_offset or end_offset == -1:
-                    end_offset = offset + 1
-
-                # Convert 1-based column offset to 0-based index into stripped text
-                colno = offset - 1 - spaces
-                end_colno = end_offset - 1 - spaces
-                if colno >= 0:
-                    # non-space whitespace (likes tabs) must be kept for alignment
-                    caretspace = ((c if c.isspace() else ' ') for c in ltext[:colno])
-                    yield '    {}{}'.format("".join(caretspace), ('^' * (end_colno - colno) + "\n"))
+            # Convert 1-based column offset to 0-based index into stripped text
+            caret = (self.offset or 0) - 1 - spaces
+            if caret >= 0:
+                # non-space whitespace (likes tabs) must be kept for alignment
+                caretspace = ((c if c.isspace() else ' ') for c in ltext[:caret])
+                yield '    {}^\n'.format(''.join(caretspace))
         msg = self.msg or "<no detail available>"
-        yield "{}: {}{}\n".format(stype, msg, filename_suffix)
+        yield "{}: {}\n".format(stype, msg)
 
     def format(self, *, chain=True):
         """Format the exception.
@@ -661,32 +608,15 @@
         The message indicating which exception occurred is always the last
         string in the output.
         """
-
-        output = []
-        exc = self
-        while exc:
-            if chain:
-                if exc.__cause__ is not None:
-                    chained_msg = _cause_message
-                    chained_exc = exc.__cause__
-                elif (exc.__context__  is not None and
-                      not exc.__suppress_context__):
-                    chained_msg = _context_message
-                    chained_exc = exc.__context__
-                else:
-                    chained_msg = None
-                    chained_exc = None
-
-                output.append((chained_msg, exc))
-                exc = chained_exc
-            else:
-                output.append((None, exc))
-                exc = None
-
-        for msg, exc in reversed(output):
-            if msg is not None:
-                yield msg
-            if exc.stack:
-                yield 'Traceback (most recent call last):\n'
-                yield from exc.stack.format()
-            yield from exc.format_exception_only()
+        if chain:
+            if self.__cause__ is not None:
+                yield from self.__cause__.format(chain=chain)
+                yield _cause_message
+            elif (self.__context__ is not None and
+                not self.__suppress_context__):
+                yield from self.__context__.format(chain=chain)
+                yield _context_message
+        if self.stack:
+            yield 'Traceback (most recent call last):\n'
+            yield from self.stack.format()
+        yield from self.format_exception_only()
diff --git a/common/py3-stdlib/tracemalloc.py b/common/py3-stdlib/tracemalloc.py
index cec99c5..69b4170 100644
--- a/common/py3-stdlib/tracemalloc.py
+++ b/common/py3-stdlib/tracemalloc.py
@@ -226,7 +226,7 @@
         return str(self[0])
 
     def __repr__(self):
-        s = f"<Traceback {tuple(self)}"
+        s = "<Traceback %r" % tuple(self)
         if self._total_nframe is None:
             s += ">"
         else:
diff --git a/common/py3-stdlib/turtle.py b/common/py3-stdlib/turtle.py
index f3b320b..ba8288d 100644
--- a/common/py3-stdlib/turtle.py
+++ b/common/py3-stdlib/turtle.py
@@ -38,7 +38,7 @@
 ----- turtle.py
 
 This module is an extended reimplementation of turtle.py from the
-Python standard distribution up to Python 2.5. (See: https://www.python.org)
+Python standard distribution up to Python 2.5. (See: http://www.python.org)
 
 It tries to keep the merits of turtle.py and to be (nearly) 100%
 compatible with it. This means in the first place to enable the
@@ -264,12 +264,12 @@
     def __neg__(self):
         return Vec2D(-self[0], -self[1])
     def __abs__(self):
-        return math.hypot(*self)
+        return (self[0]**2 + self[1]**2)**0.5
     def rotate(self, angle):
         """rotate self counterclockwise by angle
         """
         perp = Vec2D(-self[1], self[0])
-        angle = math.radians(angle)
+        angle = angle * math.pi / 180.0
         c, s = math.cos(angle), math.sin(angle)
         return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)
     def __getnewargs__(self):
@@ -464,18 +464,20 @@
        a corresponding TurtleScreenBase class has to be implemented.
     """
 
-    def _blankimage(self):
+    @staticmethod
+    def _blankimage():
         """return a blank image object
         """
-        img = TK.PhotoImage(width=1, height=1, master=self.cv)
+        img = TK.PhotoImage(width=1, height=1)
         img.blank()
         return img
 
-    def _image(self, filename):
+    @staticmethod
+    def _image(filename):
         """return an image object containing the
         imagedata from a gif-file named filename.
         """
-        return TK.PhotoImage(file=filename, master=self.cv)
+        return TK.PhotoImage(file=filename)
 
     def __init__(self, cv):
         self.cv = cv
@@ -809,7 +811,7 @@
         >>> screen.mainloop()
 
         """
-        self.cv.tk.mainloop()
+        TK.mainloop()
 
     def textinput(self, title, prompt):
         """Pop up a dialog window for input of a string.
@@ -824,7 +826,7 @@
         >>> screen.textinput("NIM", "Name of first player:")
 
         """
-        return simpledialog.askstring(title, prompt, parent=self.cv)
+        return simpledialog.askstring(title, prompt)
 
     def numinput(self, title, prompt, default=None, minval=None, maxval=None):
         """Pop up a dialog window for input of a number.
@@ -845,8 +847,7 @@
 
         """
         return simpledialog.askfloat(title, prompt, initialvalue=default,
-                                     minvalue=minval, maxvalue=maxval,
-                                     parent=self.cv)
+                                     minvalue=minval, maxvalue=maxval)
 
 
 ##############################################################################
@@ -964,8 +965,6 @@
 
     def __init__(self, cv, mode=_CFG["mode"],
                  colormode=_CFG["colormode"], delay=_CFG["delay"]):
-        TurtleScreenBase.__init__(self, cv)
-
         self._shapes = {
                    "arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))),
                   "turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7),
@@ -989,6 +988,7 @@
 
         self._bgpics = {"nopic" : ""}
 
+        TurtleScreenBase.__init__(self, cv)
         self._mode = mode
         self._delayvalue = delay
         self._colormode = _CFG["colormode"]
@@ -1598,7 +1598,7 @@
         >>> turtle.heading()
         1.5707963267948966
         """
-        self._setDegreesPerAU(math.tau)
+        self._setDegreesPerAU(2*math.pi)
 
     def _go(self, distance):
         """move turtle forward by specified distance"""
@@ -1645,7 +1645,7 @@
         Argument:
         distance -- a number
 
-        Move the turtle backward by distance, opposite to the direction the
+        Move the turtle backward by distance ,opposite to the direction the
         turtle is headed. Do not change the turtle's heading.
 
         Example (for a Turtle instance named turtle):
@@ -1889,7 +1889,7 @@
         elif isinstance(x, TNavigator):
             pos = x._position
         x, y = pos - self._position
-        result = round(math.degrees(math.atan2(y, x)), 10) % 360.0
+        result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
         result /= self._degreesPerAU
         return (self._angleOffset + self._angleOrient*result) % self._fullcircle
 
@@ -1904,7 +1904,7 @@
         67.0
         """
         x, y = self._orient
-        result = round(math.degrees(math.atan2(y, x)), 10) % 360.0
+        result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
         result /= self._degreesPerAU
         return (self._angleOffset + self._angleOrient*result) % self._fullcircle
 
@@ -1977,7 +1977,7 @@
             steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
         w = 1.0 * extent / steps
         w2 = 0.5 * w
-        l = 2.0 * radius * math.sin(math.radians(w2)*self._degreesPerAU)
+        l = 2.0 * radius * math.sin(w2*math.pi/180.0*self._degreesPerAU)
         if radius < 0:
             l, w, w2 = -l, -w, -w2
         tr = self._tracer()
@@ -2862,7 +2862,7 @@
         >>> turtle.fd(50)
         """
         tilt = -angle * self._degreesPerAU * self._angleOrient
-        tilt = math.radians(tilt) % math.tau
+        tilt = (tilt * math.pi / 180.0) % (2*math.pi)
         self.pen(resizemode="user", tilt=tilt)
 
     def tiltangle(self, angle=None):
@@ -2877,8 +2877,7 @@
         between the orientation of the turtleshape and the heading of the
         turtle (its direction of movement).
 
-        (Incorrectly marked as deprecated since Python 3.1, it is really
-        settiltangle that is deprecated.)
+        Deprecated since Python 3.1
 
         Examples (for a Turtle instance named turtle):
         >>> turtle.shape("circle")
@@ -2887,7 +2886,7 @@
         >>> turtle.tiltangle()
         """
         if angle is None:
-            tilt = -math.degrees(self._tilt) * self._angleOrient
+            tilt = -self._tilt * (180.0/math.pi) * self._angleOrient
             return (tilt / self._degreesPerAU) % self._fullcircle
         else:
             self.settiltangle(angle)
@@ -2941,7 +2940,7 @@
         if t11 * t22 - t12 * t21 == 0:
             raise TurtleGraphicsError("Bad shape transform matrix: must not be singular")
         self._shapetrafo = (m11, m12, m21, m22)
-        alfa = math.atan2(-m21, m11) % math.tau
+        alfa = math.atan2(-m21, m11) % (2 * math.pi)
         sa, ca = math.sin(alfa), math.cos(alfa)
         a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22,
                               sa*m11 + ca*m21, sa*m12 + ca*m22)
diff --git a/common/py3-stdlib/types.py b/common/py3-stdlib/types.py
index 62122a9..ad2020e 100644
--- a/common/py3-stdlib/types.py
+++ b/common/py3-stdlib/types.py
@@ -82,7 +82,7 @@
     updated = False
     shift = 0
     for i, base in enumerate(bases):
-        if isinstance(base, type) and not isinstance(base, GenericAlias):
+        if isinstance(base, type):
             continue
         if not hasattr(base, "__mro_entries__"):
             continue
@@ -155,12 +155,7 @@
     class's __getattr__ method; this is done by raising AttributeError.
 
     This allows one to have properties active on an instance, and have virtual
-    attributes on the class with the same name.  (Enum used this between Python
-    versions 3.4 - 3.9 .)
-
-    Subclass from this to use a different method of accessing virtual atributes
-    and still be treated properly by the inspect module. (Enum uses this since
-    Python 3.10 .)
+    attributes on the class with the same name (see Enum for an example).
 
     """
     def __init__(self, fget=None, fset=None, fdel=None, doc=None):
@@ -297,11 +292,8 @@
 
     return wrapped
 
-GenericAlias = type(list[int])
-UnionType = type(int | str)
 
-EllipsisType = type(Ellipsis)
-NoneType = type(None)
-NotImplementedType = type(NotImplemented)
+GenericAlias = type(list[int])
+
 
 __all__ = [n for n in globals() if n[:1] != '_']
diff --git a/common/py3-stdlib/typing.py b/common/py3-stdlib/typing.py
index 086d0f3..f5316ab 100644
--- a/common/py3-stdlib/typing.py
+++ b/common/py3-stdlib/typing.py
@@ -4,10 +4,8 @@
 At large scale, the structure of the module is following:
 * Imports and exports, all public names should be explicitly added to __all__.
 * Internal helper functions: these should never be used in code outside this module.
-* _SpecialForm and its instances (special forms):
-  Any, NoReturn, ClassVar, Union, Optional, Concatenate
-* Classes whose instances can be type arguments in addition to types:
-  ForwardRef, TypeVar and ParamSpec
+* _SpecialForm and its instances (special forms): Any, NoReturn, ClassVar, Union, Optional
+* Two classes whose instances can be type arguments in addition to types: ForwardRef and TypeVar
 * The core of internal generics API: _GenericAlias and _VariadicGenericAlias, the latter is
   currently only used by Tuple and Callable. All subscripted types like X[int], Union[int, str],
   etc., are instances of either of these classes.
@@ -37,13 +35,11 @@
     'Any',
     'Callable',
     'ClassVar',
-    'Concatenate',
     'Final',
     'ForwardRef',
     'Generic',
     'Literal',
     'Optional',
-    'ParamSpec',
     'Protocol',
     'Tuple',
     'Type',
@@ -100,13 +96,6 @@
     'TypedDict',  # Not really a type.
     'Generator',
 
-    # Other concrete types.
-    'BinaryIO',
-    'IO',
-    'Match',
-    'Pattern',
-    'TextIO',
-
     # One-off things.
     'AnyStr',
     'cast',
@@ -114,19 +103,14 @@
     'get_args',
     'get_origin',
     'get_type_hints',
-    'is_typeddict',
     'NewType',
     'no_type_check',
     'no_type_check_decorator',
     'NoReturn',
     'overload',
-    'ParamSpecArgs',
-    'ParamSpecKwargs',
     'runtime_checkable',
     'Text',
     'TYPE_CHECKING',
-    'TypeAlias',
-    'TypeGuard',
 ]
 
 # The pseudo-submodules 're' and 'io' are part of the public
@@ -134,16 +118,7 @@
 # legitimate imports of those modules.
 
 
-def _type_convert(arg, module=None, *, allow_special_forms=False):
-    """For converting None to type(None), and strings to ForwardRef."""
-    if arg is None:
-        return type(None)
-    if isinstance(arg, str):
-        return ForwardRef(arg, module=module, is_class=allow_special_forms)
-    return arg
-
-
-def _type_check(arg, msg, is_argument=True, module=None, *, allow_special_forms=False):
+def _type_check(arg, msg, is_argument=True):
     """Check that the argument is a type, and return it (internal helper).
 
     As a special case, accept None and return type(None) instead. Also wrap strings
@@ -156,32 +131,27 @@
     We append the repr() of the actual value (truncated to 100 chars).
     """
     invalid_generic_forms = (Generic, Protocol)
-    if not allow_special_forms:
-        invalid_generic_forms += (ClassVar,)
-        if is_argument:
-            invalid_generic_forms += (Final,)
+    if is_argument:
+        invalid_generic_forms = invalid_generic_forms + (ClassVar, Final)
 
-    arg = _type_convert(arg, module=module, allow_special_forms=allow_special_forms)
+    if arg is None:
+        return type(None)
+    if isinstance(arg, str):
+        return ForwardRef(arg)
     if (isinstance(arg, _GenericAlias) and
             arg.__origin__ in invalid_generic_forms):
         raise TypeError(f"{arg} is not valid as type argument")
-    if arg in (Any, NoReturn, Final, TypeAlias):
+    if arg in (Any, NoReturn):
         return arg
     if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
         raise TypeError(f"Plain {arg} is not valid as type argument")
-    if isinstance(arg, (type, TypeVar, ForwardRef, types.UnionType, ParamSpec,
-                        ParamSpecArgs, ParamSpecKwargs)):
+    if isinstance(arg, (type, TypeVar, ForwardRef)):
         return arg
     if not callable(arg):
         raise TypeError(f"{msg} Got {arg!r:.100}.")
     return arg
 
 
-def _is_param_expr(arg):
-    return arg is ... or isinstance(arg,
-            (tuple, list, ParamSpec, _ConcatenateGenericAlias))
-
-
 def _type_repr(obj):
     """Return the repr() of an object, special-casing types (internal helper).
 
@@ -203,19 +173,17 @@
     return repr(obj)
 
 
-def _collect_type_vars(types_, typevar_types=None):
-    """Collect all type variable contained
-    in types in order of first appearance (lexicographic order). For example::
+def _collect_type_vars(types):
+    """Collect all type variable contained in types in order of
+    first appearance (lexicographic order). For example::
 
         _collect_type_vars((T, List[S, T])) == (T, S)
     """
-    if typevar_types is None:
-        typevar_types = TypeVar
     tvars = []
-    for t in types_:
-        if isinstance(t, typevar_types) and t not in tvars:
+    for t in types:
+        if isinstance(t, TypeVar) and t not in tvars:
             tvars.append(t)
-        if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
+        if isinstance(t, (_GenericAlias, GenericAlias)):
             tvars.extend([t for t in t.__parameters__ if t not in tvars])
     return tuple(tvars)
 
@@ -228,27 +196,9 @@
         raise TypeError(f"{cls} is not a generic class")
     alen = len(parameters)
     if alen != elen:
-        raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
+        raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
                         f" actual {alen}, expected {elen}")
 
-def _prepare_paramspec_params(cls, params):
-    """Prepares the parameters for a Generic containing ParamSpec
-    variables (internal helper).
-    """
-    # Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612.
-    if (len(cls.__parameters__) == 1
-            and params and not _is_param_expr(params[0])):
-        assert isinstance(cls.__parameters__[0], ParamSpec)
-        return (params,)
-    else:
-        _check_generic(cls, params, len(cls.__parameters__))
-        _params = []
-        # Convert lists to tuples to help other libraries cache the results.
-        for p, tvar in zip(params, cls.__parameters__):
-            if isinstance(tvar, ParamSpec) and isinstance(p, list):
-                p = tuple(p)
-            _params.append(p)
-        return tuple(_params)
 
 def _deduplicate(params):
     # Weed out strict duplicates, preserving the first of each occurrence.
@@ -271,7 +221,7 @@
     # Flatten out Union[Union[...], ...].
     params = []
     for p in parameters:
-        if isinstance(p, (_UnionGenericAlias, types.UnionType)):
+        if isinstance(p, _UnionGenericAlias):
             params.extend(p.__args__)
         elif isinstance(p, tuple) and len(p) > 0 and p[0] is Union:
             params.extend(p[1:])
@@ -320,19 +270,17 @@
 def _eval_type(t, globalns, localns, recursive_guard=frozenset()):
     """Evaluate all forward references in the given type t.
     For use of globalns and localns see the docstring for get_type_hints().
-    recursive_guard is used to prevent infinite recursion with a recursive
-    ForwardRef.
+    recursive_guard is used to prevent prevent infinite recursion
+    with recursive ForwardRef.
     """
     if isinstance(t, ForwardRef):
         return t._evaluate(globalns, localns, recursive_guard)
-    if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
+    if isinstance(t, (_GenericAlias, GenericAlias)):
         ev_args = tuple(_eval_type(a, globalns, localns, recursive_guard) for a in t.__args__)
         if ev_args == t.__args__:
             return t
         if isinstance(t, GenericAlias):
             return GenericAlias(t.__origin__, ev_args)
-        if isinstance(t, types.UnionType):
-            return functools.reduce(operator.or_, ev_args)
         else:
             return t.copy_with(ev_args)
     return t
@@ -368,12 +316,6 @@
         self._name = getitem.__name__
         self.__doc__ = getitem.__doc__
 
-    def __getattr__(self, item):
-        if item in {'__name__', '__qualname__'}:
-            return self._name
-
-        raise AttributeError(item)
-
     def __mro_entries__(self, bases):
         raise TypeError(f"Cannot subclass {self!r}")
 
@@ -386,12 +328,6 @@
     def __call__(self, *args, **kwds):
         raise TypeError(f"Cannot instantiate {self!r}")
 
-    def __or__(self, other):
-        return Union[self, other]
-
-    def __ror__(self, other):
-        return Union[other, self]
-
     def __instancecheck__(self, obj):
         raise TypeError(f"{self} cannot be used with isinstance()")
 
@@ -404,10 +340,9 @@
 
 
 class _LiteralSpecialForm(_SpecialForm, _root=True):
+    @_tp_cache(typed=True)
     def __getitem__(self, parameters):
-        if not isinstance(parameters, tuple):
-            parameters = (parameters,)
-        return self._getitem(self, *parameters)
+        return self._getitem(self, parameters)
 
 
 @_SpecialForm
@@ -516,8 +451,6 @@
     parameters = _remove_dups_flatten(parameters)
     if len(parameters) == 1:
         return parameters[0]
-    if len(parameters) == 2 and type(None) in parameters:
-        return _UnionGenericAlias(self, parameters, name="Optional")
     return _UnionGenericAlias(self, parameters)
 
 @_SpecialForm
@@ -530,8 +463,7 @@
     return Union[arg, type(None)]
 
 @_LiteralSpecialForm
-@_tp_cache(typed=True)
-def Literal(self, *parameters):
+def Literal(self, parameters):
     """Special typing form to define literal types (a.k.a. value types).
 
     This form can be used to indicate to type checkers that the corresponding
@@ -554,6 +486,9 @@
     """
     # There is no '_type_check' call because arguments to Literal[...] are
     # values, not types.
+    if not isinstance(parameters, tuple):
+        parameters = (parameters,)
+
     parameters = _flatten_literal_params(parameters)
 
     try:
@@ -564,104 +499,14 @@
     return _LiteralGenericAlias(self, parameters)
 
 
-@_SpecialForm
-def TypeAlias(self, parameters):
-    """Special marker indicating that an assignment should
-    be recognized as a proper type alias definition by type
-    checkers.
-
-    For example::
-
-        Predicate: TypeAlias = Callable[..., bool]
-
-    It's invalid when used anywhere except as in the example above.
-    """
-    raise TypeError(f"{self} is not subscriptable")
-
-
-@_SpecialForm
-def Concatenate(self, parameters):
-    """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
-    higher order function which adds, removes or transforms parameters of a
-    callable.
-
-    For example::
-
-       Callable[Concatenate[int, P], int]
-
-    See PEP 612 for detailed information.
-    """
-    if parameters == ():
-        raise TypeError("Cannot take a Concatenate of no types.")
-    if not isinstance(parameters, tuple):
-        parameters = (parameters,)
-    if not isinstance(parameters[-1], ParamSpec):
-        raise TypeError("The last parameter to Concatenate should be a "
-                        "ParamSpec variable.")
-    msg = "Concatenate[arg, ...]: each arg must be a type."
-    parameters = (*(_type_check(p, msg) for p in parameters[:-1]), parameters[-1])
-    return _ConcatenateGenericAlias(self, parameters,
-                                    _typevar_types=(TypeVar, ParamSpec),
-                                    _paramspec_tvars=True)
-
-
-@_SpecialForm
-def TypeGuard(self, parameters):
-    """Special typing form used to annotate the return type of a user-defined
-    type guard function.  ``TypeGuard`` only accepts a single type argument.
-    At runtime, functions marked this way should return a boolean.
-
-    ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
-    type checkers to determine a more precise type of an expression within a
-    program's code flow.  Usually type narrowing is done by analyzing
-    conditional code flow and applying the narrowing to a block of code.  The
-    conditional expression here is sometimes referred to as a "type guard".
-
-    Sometimes it would be convenient to use a user-defined boolean function
-    as a type guard.  Such a function should use ``TypeGuard[...]`` as its
-    return type to alert static type checkers to this intention.
-
-    Using  ``-> TypeGuard`` tells the static type checker that for a given
-    function:
-
-    1. The return value is a boolean.
-    2. If the return value is ``True``, the type of its argument
-       is the type inside ``TypeGuard``.
-
-       For example::
-
-          def is_str(val: Union[str, float]):
-              # "isinstance" type guard
-              if isinstance(val, str):
-                  # Type of ``val`` is narrowed to ``str``
-                  ...
-              else:
-                  # Else, type of ``val`` is narrowed to ``float``.
-                  ...
-
-    Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
-    form of ``TypeA`` (it can even be a wider form) and this may lead to
-    type-unsafe results.  The main reason is to allow for things like
-    narrowing ``List[object]`` to ``List[str]`` even though the latter is not
-    a subtype of the former, since ``List`` is invariant.  The responsibility of
-    writing type-safe type guards is left to the user.
-
-    ``TypeGuard`` also works with type variables.  For more information, see
-    PEP 647 (User-Defined Type Guards).
-    """
-    item = _type_check(parameters, f'{self} accepts only single type.')
-    return _GenericAlias(self, (item,))
-
-
 class ForwardRef(_Final, _root=True):
     """Internal wrapper to hold a forward reference."""
 
     __slots__ = ('__forward_arg__', '__forward_code__',
                  '__forward_evaluated__', '__forward_value__',
-                 '__forward_is_argument__', '__forward_is_class__',
-                 '__forward_module__')
+                 '__forward_is_argument__')
 
-    def __init__(self, arg, is_argument=True, module=None, *, is_class=False):
+    def __init__(self, arg, is_argument=True):
         if not isinstance(arg, str):
             raise TypeError(f"Forward reference must be a string -- got {arg!r}")
         try:
@@ -673,8 +518,6 @@
         self.__forward_evaluated__ = False
         self.__forward_value__ = None
         self.__forward_is_argument__ = is_argument
-        self.__forward_is_class__ = is_class
-        self.__forward_module__ = module
 
     def _evaluate(self, globalns, localns, recursive_guard):
         if self.__forward_arg__ in recursive_guard:
@@ -686,15 +529,10 @@
                 globalns = localns
             elif localns is None:
                 localns = globalns
-            if self.__forward_module__ is not None:
-                globalns = getattr(
-                    sys.modules.get(self.__forward_module__, None), '__dict__', globalns
-                )
-            type_ = _type_check(
+            type_ =_type_check(
                 eval(self.__forward_code__, globalns, localns),
                 "Forward references must evaluate to types.",
                 is_argument=self.__forward_is_argument__,
-                allow_special_forms=self.__forward_is_class__,
             )
             self.__forward_value__ = _eval_type(
                 type_, globalns, localns, recursive_guard | {self.__forward_arg__}
@@ -708,50 +546,16 @@
         if self.__forward_evaluated__ and other.__forward_evaluated__:
             return (self.__forward_arg__ == other.__forward_arg__ and
                     self.__forward_value__ == other.__forward_value__)
-        return (self.__forward_arg__ == other.__forward_arg__ and
-                self.__forward_module__ == other.__forward_module__)
+        return self.__forward_arg__ == other.__forward_arg__
 
     def __hash__(self):
-        return hash((self.__forward_arg__, self.__forward_module__))
+        return hash(self.__forward_arg__)
 
     def __repr__(self):
         return f'ForwardRef({self.__forward_arg__!r})'
 
-class _TypeVarLike:
-    """Mixin for TypeVar-like types (TypeVar and ParamSpec)."""
-    def __init__(self, bound, covariant, contravariant):
-        """Used to setup TypeVars and ParamSpec's bound, covariant and
-        contravariant attributes.
-        """
-        if covariant and contravariant:
-            raise ValueError("Bivariant types are not supported.")
-        self.__covariant__ = bool(covariant)
-        self.__contravariant__ = bool(contravariant)
-        if bound:
-            self.__bound__ = _type_check(bound, "Bound must be a type.")
-        else:
-            self.__bound__ = None
 
-    def __or__(self, right):
-        return Union[self, right]
-
-    def __ror__(self, left):
-        return Union[left, self]
-
-    def __repr__(self):
-        if self.__covariant__:
-            prefix = '+'
-        elif self.__contravariant__:
-            prefix = '-'
-        else:
-            prefix = '~'
-        return prefix + self.__name__
-
-    def __reduce__(self):
-        return self.__name__
-
-
-class TypeVar( _Final, _Immutable, _TypeVarLike, _root=True):
+class TypeVar(_Final, _Immutable, _root=True):
     """Type variable.
 
     Usage::
@@ -801,13 +605,20 @@
     def __init__(self, name, *constraints, bound=None,
                  covariant=False, contravariant=False):
         self.__name__ = name
-        super().__init__(bound, covariant, contravariant)
+        if covariant and contravariant:
+            raise ValueError("Bivariant types are not supported.")
+        self.__covariant__ = bool(covariant)
+        self.__contravariant__ = bool(contravariant)
         if constraints and bound is not None:
             raise TypeError("Constraints cannot be combined with bound=...")
         if constraints and len(constraints) == 1:
             raise TypeError("A single constraint is not allowed")
         msg = "TypeVar(name, constraint, ...): constraints must be types."
         self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
+        if bound:
+            self.__bound__ = _type_check(bound, "Bound must be a type.")
+        else:
+            self.__bound__ = None
         try:
             def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')  # for pickling
         except (AttributeError, ValueError):
@@ -815,121 +626,17 @@
         if def_mod != 'typing':
             self.__module__ = def_mod
 
-
-class ParamSpecArgs(_Final, _Immutable, _root=True):
-    """The args for a ParamSpec object.
-
-    Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
-
-    ParamSpecArgs objects have a reference back to their ParamSpec:
-
-       P.args.__origin__ is P
-
-    This type is meant for runtime introspection and has no special meaning to
-    static type checkers.
-    """
-    def __init__(self, origin):
-        self.__origin__ = origin
-
     def __repr__(self):
-        return f"{self.__origin__.__name__}.args"
+        if self.__covariant__:
+            prefix = '+'
+        elif self.__contravariant__:
+            prefix = '-'
+        else:
+            prefix = '~'
+        return prefix + self.__name__
 
-    def __eq__(self, other):
-        if not isinstance(other, ParamSpecArgs):
-            return NotImplemented
-        return self.__origin__ == other.__origin__
-
-
-class ParamSpecKwargs(_Final, _Immutable, _root=True):
-    """The kwargs for a ParamSpec object.
-
-    Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
-
-    ParamSpecKwargs objects have a reference back to their ParamSpec:
-
-       P.kwargs.__origin__ is P
-
-    This type is meant for runtime introspection and has no special meaning to
-    static type checkers.
-    """
-    def __init__(self, origin):
-        self.__origin__ = origin
-
-    def __repr__(self):
-        return f"{self.__origin__.__name__}.kwargs"
-
-    def __eq__(self, other):
-        if not isinstance(other, ParamSpecKwargs):
-            return NotImplemented
-        return self.__origin__ == other.__origin__
-
-
-class ParamSpec(_Final, _Immutable, _TypeVarLike, _root=True):
-    """Parameter specification variable.
-
-    Usage::
-
-       P = ParamSpec('P')
-
-    Parameter specification variables exist primarily for the benefit of static
-    type checkers.  They are used to forward the parameter types of one
-    callable to another callable, a pattern commonly found in higher order
-    functions and decorators.  They are only valid when used in ``Concatenate``,
-    or as the first argument to ``Callable``, or as parameters for user-defined
-    Generics.  See class Generic for more information on generic types.  An
-    example for annotating a decorator::
-
-       T = TypeVar('T')
-       P = ParamSpec('P')
-
-       def add_logging(f: Callable[P, T]) -> Callable[P, T]:
-           '''A type-safe decorator to add logging to a function.'''
-           def inner(*args: P.args, **kwargs: P.kwargs) -> T:
-               logging.info(f'{f.__name__} was called')
-               return f(*args, **kwargs)
-           return inner
-
-       @add_logging
-       def add_two(x: float, y: float) -> float:
-           '''Add two numbers together.'''
-           return x + y
-
-    Parameter specification variables defined with covariant=True or
-    contravariant=True can be used to declare covariant or contravariant
-    generic types.  These keyword arguments are valid, but their actual semantics
-    are yet to be decided.  See PEP 612 for details.
-
-    Parameter specification variables can be introspected. e.g.:
-
-       P.__name__ == 'T'
-       P.__bound__ == None
-       P.__covariant__ == False
-       P.__contravariant__ == False
-
-    Note that only parameter specification variables defined in global scope can
-    be pickled.
-    """
-
-    __slots__ = ('__name__', '__bound__', '__covariant__', '__contravariant__',
-                 '__dict__')
-
-    @property
-    def args(self):
-        return ParamSpecArgs(self)
-
-    @property
-    def kwargs(self):
-        return ParamSpecKwargs(self)
-
-    def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
-        self.__name__ = name
-        super().__init__(bound, covariant, contravariant)
-        try:
-            def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
-        except (AttributeError, ValueError):
-            def_mod = None
-        if def_mod != 'typing':
-            self.__module__ = def_mod
+    def __reduce__(self):
+        return self.__name__
 
 
 def _is_dunder(attr):
@@ -974,18 +681,14 @@
         return tuple(res)
 
     def __getattr__(self, attr):
-        if attr in {'__name__', '__qualname__'}:
-            return self._name or self.__origin__.__name__
-
         # We are careful for copy and pickle.
-        # Also for simplicity we don't relay any dunder names
+        # Also for simplicity we just don't relay all dunder names
         if '__origin__' in self.__dict__ and not _is_dunder(attr):
             return getattr(self.__origin__, attr)
         raise AttributeError(attr)
 
     def __setattr__(self, attr, val):
-        if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams',
-                                        '_typevar_types', '_paramspec_tvars'}:
+        if _is_dunder(attr) or attr in ('_name', '_inst', '_nparams'):
             super().__setattr__(attr, val)
         else:
             setattr(self.__origin__, attr, val)
@@ -997,9 +700,6 @@
         raise TypeError("Subscripted generics cannot be used with"
                         " class and instance checks")
 
-    def __dir__(self):
-        return list(set(super().__dir__()
-                + [attr for attr in dir(self.__origin__) if not _is_dunder(attr)]))
 
 # Special typing constructs Union, Optional, Generic, Callable and Tuple
 # use three special attributes for internal bookkeeping of generic types:
@@ -1013,18 +713,14 @@
 
 
 class _GenericAlias(_BaseGenericAlias, _root=True):
-    def __init__(self, origin, params, *, inst=True, name=None,
-                 _typevar_types=TypeVar,
-                 _paramspec_tvars=False):
+    def __init__(self, origin, params, *, inst=True, name=None):
         super().__init__(origin, inst=inst, name=name)
         if not isinstance(params, tuple):
             params = (params,)
         self.__args__ = tuple(... if a is _TypingEllipsis else
                               () if a is _TypingEmpty else
                               a for a in params)
-        self.__parameters__ = _collect_type_vars(params, typevar_types=_typevar_types)
-        self._typevar_types = _typevar_types
-        self._paramspec_tvars = _paramspec_tvars
+        self.__parameters__ = _collect_type_vars(params)
         if not name:
             self.__module__ = origin.__module__
 
@@ -1037,12 +733,6 @@
     def __hash__(self):
         return hash((self.__origin__, self.__args__))
 
-    def __or__(self, right):
-        return Union[self, right]
-
-    def __ror__(self, left):
-        return Union[left, self]
-
     @_tp_cache
     def __getitem__(self, params):
         if self.__origin__ in (Generic, Protocol):
@@ -1050,40 +740,25 @@
             raise TypeError(f"Cannot subscript already-subscripted {self}")
         if not isinstance(params, tuple):
             params = (params,)
-        params = tuple(_type_convert(p) for p in params)
-        if (self._paramspec_tvars
-                and any(isinstance(t, ParamSpec) for t in self.__parameters__)):
-            params = _prepare_paramspec_params(self, params)
-        else:
-            _check_generic(self, params, len(self.__parameters__))
+        msg = "Parameters to generic types must be types."
+        params = tuple(_type_check(p, msg) for p in params)
+        _check_generic(self, params, len(self.__parameters__))
 
         subst = dict(zip(self.__parameters__, params))
         new_args = []
         for arg in self.__args__:
-            if isinstance(arg, self._typevar_types):
-                if isinstance(arg, ParamSpec):
-                    arg = subst[arg]
-                    if not _is_param_expr(arg):
-                        raise TypeError(f"Expected a list of types, an ellipsis, "
-                                        f"ParamSpec, or Concatenate. Got {arg}")
-                else:
-                    arg = subst[arg]
-            elif isinstance(arg, (_GenericAlias, GenericAlias, types.UnionType)):
+            if isinstance(arg, TypeVar):
+                arg = subst[arg]
+            elif isinstance(arg, (_GenericAlias, GenericAlias)):
                 subparams = arg.__parameters__
                 if subparams:
                     subargs = tuple(subst[x] for x in subparams)
                     arg = arg[subargs]
-            # Required to flatten out the args for CallableGenericAlias
-            if self.__origin__ == collections.abc.Callable and isinstance(arg, tuple):
-                new_args.extend(arg)
-            else:
-                new_args.append(arg)
+            new_args.append(arg)
         return self.copy_with(tuple(new_args))
 
     def copy_with(self, params):
-        return self.__class__(self.__origin__, params, name=self._name, inst=self._inst,
-                              _typevar_types=self._typevar_types,
-                              _paramspec_tvars=self._paramspec_tvars)
+        return self.__class__(self.__origin__, params, name=self._name, inst=self._inst)
 
     def __repr__(self):
         if self._name:
@@ -1104,9 +779,6 @@
         return operator.getitem, (origin, args)
 
     def __mro_entries__(self, bases):
-        if isinstance(self.__origin__, _SpecialForm):
-            raise TypeError(f"Cannot subclass {self!r}")
-
         if self._name:  # generic version of an ABC or built-in class
             return super().__mro_entries__(bases)
         if self.__origin__ is Generic:
@@ -1160,25 +832,19 @@
     def __reduce__(self):
         return self._name
 
-    def __or__(self, right):
-        return Union[self, right]
-
-    def __ror__(self, left):
-        return Union[left, self]
 
 class _CallableGenericAlias(_GenericAlias, _root=True):
     def __repr__(self):
         assert self._name == 'Callable'
-        args = self.__args__
-        if len(args) == 2 and _is_param_expr(args[0]):
+        if len(self.__args__) == 2 and self.__args__[0] is Ellipsis:
             return super().__repr__()
         return (f'typing.Callable'
-                f'[[{", ".join([_type_repr(a) for a in args[:-1]])}], '
-                f'{_type_repr(args[-1])}]')
+                f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], '
+                f'{_type_repr(self.__args__[-1])}]')
 
     def __reduce__(self):
         args = self.__args__
-        if not (len(args) == 2 and _is_param_expr(args[0])):
+        if not (len(args) == 2 and args[0] is ...):
             args = list(args[:-1]), args[-1]
         return operator.getitem, (Callable, args)
 
@@ -1186,22 +852,20 @@
 class _CallableType(_SpecialGenericAlias, _root=True):
     def copy_with(self, params):
         return _CallableGenericAlias(self.__origin__, params,
-                                     name=self._name, inst=self._inst,
-                                     _typevar_types=(TypeVar, ParamSpec),
-                                     _paramspec_tvars=True)
+                                     name=self._name, inst=self._inst)
 
     def __getitem__(self, params):
         if not isinstance(params, tuple) or len(params) != 2:
             raise TypeError("Callable must be used as "
                             "Callable[[arg, ...], result].")
         args, result = params
-        # This relaxes what args can be on purpose to allow things like
-        # PEP 612 ParamSpec.  Responsibility for whether a user is using
-        # Callable[...] properly is deferred to static type checkers.
-        if isinstance(args, list):
-            params = (tuple(args), result)
+        if args is Ellipsis:
+            params = (Ellipsis, result)
         else:
-            params = (args, result)
+            if not isinstance(args, list):
+                raise TypeError(f"Callable[args, result]: args must be a list."
+                                f" Got {args}")
+            params = (tuple(args), result)
         return self.__getitem_inner__(params)
 
     @_tp_cache
@@ -1211,9 +875,8 @@
         result = _type_check(result, msg)
         if args is Ellipsis:
             return self.copy_with((_TypingEllipsis, result))
-        if not isinstance(args, tuple):
-            args = (args,)
-        args = tuple(_type_convert(arg) for arg in args)
+        msg = "Callable[[arg, ...], result]: each arg must be a type."
+        args = tuple(_type_check(arg, msg) for arg in args)
         params = args + (result,)
         return self.copy_with(params)
 
@@ -1239,7 +902,7 @@
         return Union[params]
 
     def __eq__(self, other):
-        if not isinstance(other, (_UnionGenericAlias, types.UnionType)):
+        if not isinstance(other, _UnionGenericAlias):
             return NotImplemented
         return set(self.__args__) == set(other.__args__)
 
@@ -1255,18 +918,6 @@
                 return f'typing.Optional[{_type_repr(args[0])}]'
         return super().__repr__()
 
-    def __instancecheck__(self, obj):
-        return self.__subclasscheck__(type(obj))
-
-    def __subclasscheck__(self, cls):
-        for arg in self.__args__:
-            if issubclass(cls, arg):
-                return True
-
-    def __reduce__(self):
-        func, (origin, args) = super().__reduce__()
-        return func, (Union, args)
-
 
 def _value_and_type_iter(parameters):
     return ((p, type(p)) for p in parameters)
@@ -1284,18 +935,6 @@
         return hash(frozenset(_value_and_type_iter(self.__args__)))
 
 
-class _ConcatenateGenericAlias(_GenericAlias, _root=True):
-    def copy_with(self, params):
-        if isinstance(params[-1], (list, tuple)):
-            return (*params[:-1], *params[-1])
-        if isinstance(params[-1], _ConcatenateGenericAlias):
-            params = (*params[:-1], *params[-1].__args__)
-        elif not isinstance(params[-1], ParamSpec):
-            raise TypeError("The last parameter to Concatenate should be a "
-                            "ParamSpec variable.")
-        return super().copy_with(params)
-
-
 class Generic:
     """Abstract base class for generic types.
 
@@ -1326,25 +965,20 @@
         if not params and cls is not Tuple:
             raise TypeError(
                 f"Parameter list to {cls.__qualname__}[...] cannot be empty")
-        params = tuple(_type_convert(p) for p in params)
+        msg = "Parameters to generic types must be types."
+        params = tuple(_type_check(p, msg) for p in params)
         if cls in (Generic, Protocol):
             # Generic and Protocol can only be subscripted with unique type variables.
-            if not all(isinstance(p, (TypeVar, ParamSpec)) for p in params):
+            if not all(isinstance(p, TypeVar) for p in params):
                 raise TypeError(
-                    f"Parameters to {cls.__name__}[...] must all be type variables "
-                    f"or parameter specification variables.")
+                    f"Parameters to {cls.__name__}[...] must all be type variables")
             if len(set(params)) != len(params):
                 raise TypeError(
                     f"Parameters to {cls.__name__}[...] must all be unique")
         else:
             # Subscripting a regular Generic subclass.
-            if any(isinstance(t, ParamSpec) for t in cls.__parameters__):
-                params = _prepare_paramspec_params(cls, params)
-            else:
-                _check_generic(cls, params, len(cls.__parameters__))
-        return _GenericAlias(cls, params,
-                             _typevar_types=(TypeVar, ParamSpec),
-                             _paramspec_tvars=True)
+            _check_generic(cls, params, len(cls.__parameters__))
+        return _GenericAlias(cls, params)
 
     def __init_subclass__(cls, *args, **kwargs):
         super().__init_subclass__(*args, **kwargs)
@@ -1356,7 +990,7 @@
         if error:
             raise TypeError("Cannot inherit from plain Generic")
         if '__orig_bases__' in cls.__dict__:
-            tvars = _collect_type_vars(cls.__orig_bases__, (TypeVar, ParamSpec))
+            tvars = _collect_type_vars(cls.__orig_bases__)
             # Look for Generic[T1, ..., Tn].
             # If found, tvars must be a subset of it.
             # If not found, tvars is it.
@@ -1426,55 +1060,24 @@
     return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
 
 
-def _no_init_or_replace_init(self, *args, **kwargs):
-    cls = type(self)
-
-    if cls._is_protocol:
+def _no_init(self, *args, **kwargs):
+    if type(self)._is_protocol:
         raise TypeError('Protocols cannot be instantiated')
 
-    # Already using a custom `__init__`. No need to calculate correct
-    # `__init__` to call. This can lead to RecursionError. See bpo-45121.
-    if cls.__init__ is not _no_init_or_replace_init:
-        return
 
-    # Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`.
-    # The first instantiation of the subclass will call `_no_init_or_replace_init` which
-    # searches for a proper new `__init__` in the MRO. The new `__init__`
-    # replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent
-    # instantiation of the protocol subclass will thus use the new
-    # `__init__` and no longer call `_no_init_or_replace_init`.
-    for base in cls.__mro__:
-        init = base.__dict__.get('__init__', _no_init_or_replace_init)
-        if init is not _no_init_or_replace_init:
-            cls.__init__ = init
-            break
-    else:
-        # should not happen
-        cls.__init__ = object.__init__
-
-    cls.__init__(self, *args, **kwargs)
-
-
-def _caller(depth=1, default='__main__'):
-    try:
-        return sys._getframe(depth + 1).f_globals.get('__name__', default)
-    except (AttributeError, ValueError):  # For platforms without _getframe()
-        return None
-
-
-def _allow_reckless_class_checks(depth=3):
+def _allow_reckless_class_cheks():
     """Allow instance and class checks for special stdlib modules.
 
     The abc and functools modules indiscriminately call isinstance() and
     issubclass() on the whole MRO of a user class, which may contain protocols.
     """
     try:
-        return sys._getframe(depth).f_globals['__name__'] in ['abc', 'functools']
+        return sys._getframe(3).f_globals['__name__'] in ['abc', 'functools']
     except (AttributeError, ValueError):  # For platforms without _getframe().
         return True
 
 
-_PROTO_ALLOWLIST = {
+_PROTO_WHITELIST = {
     'collections.abc': [
         'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
         'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
@@ -1489,14 +1092,6 @@
     def __instancecheck__(cls, instance):
         # We need this method for situations where attributes are
         # assigned in __init__.
-        if (
-            getattr(cls, '_is_protocol', False) and
-            not getattr(cls, '_is_runtime_protocol', False) and
-            not _allow_reckless_class_checks(depth=2)
-        ):
-            raise TypeError("Instance and class checks can only be used with"
-                            " @runtime_checkable protocols")
-
         if ((not getattr(cls, '_is_protocol', False) or
                 _is_callable_members_only(cls)) and
                 issubclass(instance.__class__, cls)):
@@ -1559,12 +1154,12 @@
 
             # First, perform various sanity checks.
             if not getattr(cls, '_is_runtime_protocol', False):
-                if _allow_reckless_class_checks():
+                if _allow_reckless_class_cheks():
                     return NotImplemented
                 raise TypeError("Instance and class checks can only be used with"
                                 " @runtime_checkable protocols")
             if not _is_callable_members_only(cls):
-                if _allow_reckless_class_checks():
+                if _allow_reckless_class_cheks():
                     return NotImplemented
                 raise TypeError("Protocols with non-method members"
                                 " don't support issubclass()")
@@ -1601,12 +1196,12 @@
         # ... otherwise check consistency of bases, and prohibit instantiation.
         for base in cls.__bases__:
             if not (base in (object, Generic) or
-                    base.__module__ in _PROTO_ALLOWLIST and
-                    base.__name__ in _PROTO_ALLOWLIST[base.__module__] or
+                    base.__module__ in _PROTO_WHITELIST and
+                    base.__name__ in _PROTO_WHITELIST[base.__module__] or
                     issubclass(base, Generic) and base._is_protocol):
                 raise TypeError('Protocols can only inherit from other'
                                 ' protocols, got %r' % base)
-        cls.__init__ = _no_init_or_replace_init
+        cls.__init__ = _no_init
 
 
 class _AnnotatedAlias(_GenericAlias, _root=True):
@@ -1649,11 +1244,6 @@
     def __hash__(self):
         return hash((self.__origin__, self.__metadata__))
 
-    def __getattr__(self, attr):
-        if attr in {'__name__', '__qualname__'}:
-            return 'Annotated'
-        return super().__getattr__(attr)
-
 
 class Annotated:
     """Add context specific metadata to a type.
@@ -1698,7 +1288,7 @@
                             "with at least two arguments (a type and an "
                             "annotation).")
         msg = "Annotated[t, ...]: t must be a type."
-        origin = _type_check(params[0], msg, allow_special_forms=True)
+        origin = _type_check(params[0], msg)
         metadata = tuple(params[1:])
         return _AnnotatedAlias(origin, metadata)
 
@@ -1792,8 +1382,7 @@
     - If no dict arguments are passed, an attempt is made to use the
       globals from obj (or the respective module's globals for classes),
       and these are also used as the locals.  If the object does not appear
-      to have globals, an empty dictionary is used.  For classes, the search
-      order is globals first then locals.
+      to have globals, an empty dictionary is used.
 
     - If one dict argument is passed, it is used for both globals and
       locals.
@@ -1809,27 +1398,16 @@
         hints = {}
         for base in reversed(obj.__mro__):
             if globalns is None:
-                base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {})
+                base_globals = sys.modules[base.__module__].__dict__
             else:
                 base_globals = globalns
             ann = base.__dict__.get('__annotations__', {})
-            if isinstance(ann, types.GetSetDescriptorType):
-                ann = {}
-            base_locals = dict(vars(base)) if localns is None else localns
-            if localns is None and globalns is None:
-                # This is surprising, but required.  Before Python 3.10,
-                # get_type_hints only evaluated the globalns of
-                # a class.  To maintain backwards compatibility, we reverse
-                # the globalns and localns order so that eval() looks into
-                # *base_globals* first rather than *base_locals*.
-                # This only affects ForwardRefs.
-                base_globals, base_locals = base_locals, base_globals
             for name, value in ann.items():
                 if value is None:
                     value = type(None)
                 if isinstance(value, str):
-                    value = ForwardRef(value, is_argument=False, is_class=True)
-                value = _eval_type(value, base_globals, base_locals)
+                    value = ForwardRef(value, is_argument=False)
+                value = _eval_type(value, base_globals, localns)
                 hints[name] = value
         return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
 
@@ -1860,13 +1438,7 @@
         if value is None:
             value = type(None)
         if isinstance(value, str):
-            # class-level forward refs were handled above, this must be either
-            # a module-level annotation or a function argument annotation
-            value = ForwardRef(
-                value,
-                is_argument=not isinstance(obj, types.ModuleType),
-                is_class=False,
-            )
+            value = ForwardRef(value)
         value = _eval_type(value, globalns, localns)
         if name in defaults and defaults[name] is None:
             value = Optional[value]
@@ -1889,12 +1461,6 @@
         if stripped_args == t.__args__:
             return t
         return GenericAlias(t.__origin__, stripped_args)
-    if isinstance(t, types.UnionType):
-        stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
-        if stripped_args == t.__args__:
-            return t
-        return functools.reduce(operator.or_, stripped_args)
-
     return t
 
 
@@ -1911,17 +1477,13 @@
         get_origin(Generic[T]) is Generic
         get_origin(Union[T, int]) is Union
         get_origin(List[Tuple[T, T]][int]) == list
-        get_origin(P.args) is P
     """
     if isinstance(tp, _AnnotatedAlias):
         return Annotated
-    if isinstance(tp, (_BaseGenericAlias, GenericAlias,
-                       ParamSpecArgs, ParamSpecKwargs)):
+    if isinstance(tp, (_BaseGenericAlias, GenericAlias)):
         return tp.__origin__
     if tp is Generic:
         return Generic
-    if isinstance(tp, types.UnionType):
-        return types.UnionType
     return None
 
 
@@ -1938,31 +1500,16 @@
     """
     if isinstance(tp, _AnnotatedAlias):
         return (tp.__origin__,) + tp.__metadata__
-    if isinstance(tp, (_GenericAlias, GenericAlias)):
+    if isinstance(tp, _GenericAlias):
         res = tp.__args__
-        if (tp.__origin__ is collections.abc.Callable
-                and not (len(res) == 2 and _is_param_expr(res[0]))):
+        if tp.__origin__ is collections.abc.Callable and res[0] is not Ellipsis:
             res = (list(res[:-1]), res[-1])
         return res
-    if isinstance(tp, types.UnionType):
+    if isinstance(tp, GenericAlias):
         return tp.__args__
     return ()
 
 
-def is_typeddict(tp):
-    """Check if an annotation is a TypedDict class
-
-    For example::
-        class Film(TypedDict):
-            title: str
-            year: int
-
-        is_typeddict(Film)  # => True
-        is_typeddict(Union[list, str])  # => False
-    """
-    return isinstance(tp, _TypedDictMeta)
-
-
 def no_type_check(arg):
     """Decorator to indicate that annotations are not type hints.
 
@@ -2353,8 +1900,7 @@
         own_annotation_keys = set(own_annotations.keys())
         msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
         own_annotations = {
-            n: _type_check(tp, msg, module=tp_dict.__module__)
-            for n, tp in own_annotations.items()
+            n: _type_check(tp, msg) for n, tp in own_annotations.items()
         }
         required_keys = set()
         optional_keys = set()
@@ -2434,24 +1980,24 @@
         raise TypeError("TypedDict takes either a dict or keyword arguments,"
                         " but not both")
 
-    ns = {'__annotations__': dict(fields)}
+    ns = {'__annotations__': dict(fields), '__total__': total}
     try:
         # Setting correct module is necessary to make typed dict classes pickleable.
         ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
     except (AttributeError, ValueError):
         pass
 
-    return _TypedDictMeta(typename, (), ns, total=total)
+    return _TypedDictMeta(typename, (), ns)
 
 _TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
 TypedDict.__mro_entries__ = lambda bases: (_TypedDict,)
 
 
-class NewType:
+def NewType(name, tp):
     """NewType creates simple unique types with almost zero
     runtime overhead. NewType(name, tp) is considered a subtype of tp
     by static type checkers. At runtime, NewType(name, tp) returns
-    a dummy callable that simply returns its argument. Usage::
+    a dummy function that simply returns its argument. Usage::
 
         UserId = NewType('UserId', int)
 
@@ -2466,30 +2012,12 @@
         num = UserId(5) + 1     # type: int
     """
 
-    def __init__(self, name, tp):
-        self.__qualname__ = name
-        if '.' in name:
-            name = name.rpartition('.')[-1]
-        self.__name__ = name
-        self.__supertype__ = tp
-        def_mod = _caller()
-        if def_mod != 'typing':
-            self.__module__ = def_mod
-
-    def __repr__(self):
-        return f'{self.__module__}.{self.__qualname__}'
-
-    def __call__(self, x):
+    def new_type(x):
         return x
 
-    def __reduce__(self):
-        return self.__qualname__
-
-    def __or__(self, other):
-        return Union[self, other]
-
-    def __ror__(self, other):
-        return Union[other, self]
+    new_type.__name__ = name
+    new_type.__supertype__ = tp
+    return new_type
 
 
 # Python-version-specific alias (Python 2: unicode; Python 3: str)
diff --git a/common/py3-stdlib/unittest/_log.py b/common/py3-stdlib/unittest/_log.py
index 94868e5..94e7e75 100644
--- a/common/py3-stdlib/unittest/_log.py
+++ b/common/py3-stdlib/unittest/_log.py
@@ -26,11 +26,11 @@
 
 
 class _AssertLogsContext(_BaseTestCaseContext):
-    """A context manager for assertLogs() and assertNoLogs() """
+    """A context manager used to implement TestCase.assertLogs()."""
 
     LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s"
 
-    def __init__(self, test_case, logger_name, level, no_logs):
+    def __init__(self, test_case, logger_name, level):
         _BaseTestCaseContext.__init__(self, test_case)
         self.logger_name = logger_name
         if level:
@@ -38,7 +38,6 @@
         else:
             self.level = logging.INFO
         self.msg = None
-        self.no_logs = no_logs
 
     def __enter__(self):
         if isinstance(self.logger_name, logging.Logger):
@@ -47,7 +46,6 @@
             logger = self.logger = logging.getLogger(self.logger_name)
         formatter = logging.Formatter(self.LOGGING_FORMAT)
         handler = _CapturingHandler()
-        handler.setLevel(self.level)
         handler.setFormatter(formatter)
         self.watcher = handler.watcher
         self.old_handlers = logger.handlers[:]
@@ -56,31 +54,16 @@
         logger.handlers = [handler]
         logger.setLevel(self.level)
         logger.propagate = False
-        if self.no_logs:
-            return
         return handler.watcher
 
     def __exit__(self, exc_type, exc_value, tb):
         self.logger.handlers = self.old_handlers
         self.logger.propagate = self.old_propagate
         self.logger.setLevel(self.old_level)
-
         if exc_type is not None:
             # let unexpected exceptions pass through
             return False
-
-        if self.no_logs:
-            # assertNoLogs
-            if len(self.watcher.records) > 0:
-                self._raiseFailure(
-                    "Unexpected logs found: {!r}".format(
-                        self.watcher.output
-                    )
-                )
-
-        else:
-            # assertLogs
-            if len(self.watcher.records) == 0:
-                self._raiseFailure(
-                    "no logs of level {} or higher triggered on {}"
-                    .format(logging.getLevelName(self.level), self.logger.name))
+        if len(self.watcher.records) == 0:
+            self._raiseFailure(
+                "no logs of level {} or higher triggered on {}"
+                .format(logging.getLevelName(self.level), self.logger.name))
diff --git a/common/py3-stdlib/unittest/async_case.py b/common/py3-stdlib/unittest/async_case.py
index 2323119..1bc1312 100644
--- a/common/py3-stdlib/unittest/async_case.py
+++ b/common/py3-stdlib/unittest/async_case.py
@@ -4,6 +4,7 @@
 from .case import TestCase
 
 
+
 class IsolatedAsyncioTestCase(TestCase):
     # Names intentionally have a long prefix
     # to reduce a chance of clashing with user-defined attributes
@@ -51,7 +52,7 @@
         # We intentionally don't add inspect.iscoroutinefunction() check
         # for func argument because there is no way
         # to check for async function reliably:
-        # 1. It can be "async def func()" itself
+        # 1. It can be "async def func()" iself
         # 2. Class can implement "async def __call__()" method
         # 3. Regular "def func()" that returns awaitable object
         self.addCleanup(*(func, *args), **kwargs)
@@ -71,15 +72,15 @@
         self._callMaybeAsync(function, *args, **kwargs)
 
     def _callAsync(self, func, /, *args, **kwargs):
-        assert self._asyncioTestLoop is not None, 'asyncio test loop is not initialized'
+        assert self._asyncioTestLoop is not None
         ret = func(*args, **kwargs)
-        assert inspect.isawaitable(ret), f'{func!r} returned non-awaitable'
+        assert inspect.isawaitable(ret)
         fut = self._asyncioTestLoop.create_future()
         self._asyncioCallsQueue.put_nowait((fut, ret))
         return self._asyncioTestLoop.run_until_complete(fut)
 
     def _callMaybeAsync(self, func, /, *args, **kwargs):
-        assert self._asyncioTestLoop is not None, 'asyncio test loop is not initialized'
+        assert self._asyncioTestLoop is not None
         ret = func(*args, **kwargs)
         if inspect.isawaitable(ret):
             fut = self._asyncioTestLoop.create_future()
@@ -101,14 +102,14 @@
                 ret = await awaitable
                 if not fut.cancelled():
                     fut.set_result(ret)
-            except (SystemExit, KeyboardInterrupt):
+            except asyncio.CancelledError:
                 raise
-            except (BaseException, asyncio.CancelledError) as ex:
+            except Exception as ex:
                 if not fut.cancelled():
                     fut.set_exception(ex)
 
     def _setupAsyncioLoop(self):
-        assert self._asyncioTestLoop is None, 'asyncio test loop already initialized'
+        assert self._asyncioTestLoop is None
         loop = asyncio.new_event_loop()
         asyncio.set_event_loop(loop)
         loop.set_debug(True)
@@ -118,7 +119,7 @@
         loop.run_until_complete(fut)
 
     def _tearDownAsyncioLoop(self):
-        assert self._asyncioTestLoop is not None, 'asyncio test loop is not initialized'
+        assert self._asyncioTestLoop is not None
         loop = self._asyncioTestLoop
         self._asyncioTestLoop = None
         self._asyncioCallsQueue.put_nowait(None)
@@ -134,7 +135,7 @@
                 task.cancel()
 
             loop.run_until_complete(
-                asyncio.gather(*to_cancel, return_exceptions=True))
+                asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
 
             for task in to_cancel:
                 if task.cancelled():
@@ -157,12 +158,3 @@
             return super().run(result)
         finally:
             self._tearDownAsyncioLoop()
-
-    def debug(self):
-        self._setupAsyncioLoop()
-        super().debug()
-        self._tearDownAsyncioLoop()
-
-    def __del__(self):
-        if self._asyncioTestLoop is not None:
-            self._tearDownAsyncioLoop()
diff --git a/common/py3-stdlib/unittest/case.py b/common/py3-stdlib/unittest/case.py
index 61003d0..f8bc865 100644
--- a/common/py3-stdlib/unittest/case.py
+++ b/common/py3-stdlib/unittest/case.py
@@ -252,7 +252,7 @@
     def __enter__(self):
         # The __warningregistry__'s need to be in a pristine state for tests
         # to work properly.
-        for v in list(sys.modules.values()):
+        for v in sys.modules.values():
             if getattr(v, '__warningregistry__', None):
                 v.__warningregistry__ = {}
         self.warnings_manager = warnings.catch_warnings(record=True)
@@ -295,6 +295,7 @@
             self._raiseFailure("{} not triggered".format(exc_name))
 
 
+
 class _OrderedChainMap(collections.ChainMap):
     def __iter__(self):
         seen = set()
@@ -555,71 +556,73 @@
         function(*args, **kwargs)
 
     def run(self, result=None):
+        orig_result = result
         if result is None:
             result = self.defaultTestResult()
             startTestRun = getattr(result, 'startTestRun', None)
-            stopTestRun = getattr(result, 'stopTestRun', None)
             if startTestRun is not None:
                 startTestRun()
-        else:
-            stopTestRun = None
 
         result.startTest(self)
-        try:
-            testMethod = getattr(self, self._testMethodName)
-            if (getattr(self.__class__, "__unittest_skip__", False) or
-                getattr(testMethod, "__unittest_skip__", False)):
-                # If the class or method was skipped.
+
+        testMethod = getattr(self, self._testMethodName)
+        if (getattr(self.__class__, "__unittest_skip__", False) or
+            getattr(testMethod, "__unittest_skip__", False)):
+            # If the class or method was skipped.
+            try:
                 skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
                             or getattr(testMethod, '__unittest_skip_why__', ''))
                 self._addSkip(result, self, skip_why)
-                return result
-
-            expecting_failure = (
-                getattr(self, "__unittest_expecting_failure__", False) or
-                getattr(testMethod, "__unittest_expecting_failure__", False)
-            )
-            outcome = _Outcome(result)
-            try:
-                self._outcome = outcome
-
-                with outcome.testPartExecutor(self):
-                    self._callSetUp()
-                if outcome.success:
-                    outcome.expecting_failure = expecting_failure
-                    with outcome.testPartExecutor(self, isTest=True):
-                        self._callTestMethod(testMethod)
-                    outcome.expecting_failure = False
-                    with outcome.testPartExecutor(self):
-                        self._callTearDown()
-
-                self.doCleanups()
-                for test, reason in outcome.skipped:
-                    self._addSkip(result, test, reason)
-                self._feedErrorsToResult(result, outcome.errors)
-                if outcome.success:
-                    if expecting_failure:
-                        if outcome.expectedFailure:
-                            self._addExpectedFailure(result, outcome.expectedFailure)
-                        else:
-                            self._addUnexpectedSuccess(result)
-                    else:
-                        result.addSuccess(self)
-                return result
             finally:
-                # explicitly break reference cycles:
-                # outcome.errors -> frame -> outcome -> outcome.errors
-                # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
-                outcome.errors.clear()
-                outcome.expectedFailure = None
+                result.stopTest(self)
+            return
+        expecting_failure_method = getattr(testMethod,
+                                           "__unittest_expecting_failure__", False)
+        expecting_failure_class = getattr(self,
+                                          "__unittest_expecting_failure__", False)
+        expecting_failure = expecting_failure_class or expecting_failure_method
+        outcome = _Outcome(result)
+        try:
+            self._outcome = outcome
 
-                # clear the outcome, no more needed
-                self._outcome = None
+            with outcome.testPartExecutor(self):
+                self._callSetUp()
+            if outcome.success:
+                outcome.expecting_failure = expecting_failure
+                with outcome.testPartExecutor(self, isTest=True):
+                    self._callTestMethod(testMethod)
+                outcome.expecting_failure = False
+                with outcome.testPartExecutor(self):
+                    self._callTearDown()
 
+            self.doCleanups()
+            for test, reason in outcome.skipped:
+                self._addSkip(result, test, reason)
+            self._feedErrorsToResult(result, outcome.errors)
+            if outcome.success:
+                if expecting_failure:
+                    if outcome.expectedFailure:
+                        self._addExpectedFailure(result, outcome.expectedFailure)
+                    else:
+                        self._addUnexpectedSuccess(result)
+                else:
+                    result.addSuccess(self)
+            return result
         finally:
             result.stopTest(self)
-            if stopTestRun is not None:
-                stopTestRun()
+            if orig_result is None:
+                stopTestRun = getattr(result, 'stopTestRun', None)
+                if stopTestRun is not None:
+                    stopTestRun()
+
+            # explicitly break reference cycles:
+            # outcome.errors -> frame -> outcome -> outcome.errors
+            # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
+            outcome.errors.clear()
+            outcome.expectedFailure = None
+
+            # clear the outcome, no more needed
+            self._outcome = None
 
     def doCleanups(self):
         """Execute all cleanup functions. Normally called for you after
@@ -651,20 +654,12 @@
 
     def debug(self):
         """Run the test without collecting errors in a TestResult"""
-        testMethod = getattr(self, self._testMethodName)
-        if (getattr(self.__class__, "__unittest_skip__", False) or
-            getattr(testMethod, "__unittest_skip__", False)):
-            # If the class or method was skipped.
-            skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
-                        or getattr(testMethod, '__unittest_skip_why__', ''))
-            raise SkipTest(skip_why)
-
-        self._callSetUp()
-        self._callTestMethod(testMethod)
-        self._callTearDown()
+        self.setUp()
+        getattr(self, self._testMethodName)()
+        self.tearDown()
         while self._cleanups:
-            function, args, kwargs = self._cleanups.pop()
-            self._callCleanup(function, *args, **kwargs)
+            function, args, kwargs = self._cleanups.pop(-1)
+            function(*args, **kwargs)
 
     def skipTest(self, reason):
         """Skip this test."""
@@ -793,16 +788,7 @@
         """
         # Lazy import to avoid importing logging if it is not needed.
         from ._log import _AssertLogsContext
-        return _AssertLogsContext(self, logger, level, no_logs=False)
-
-    def assertNoLogs(self, logger=None, level=None):
-        """ Fail unless no log messages of level *level* or higher are emitted
-        on *logger_name* or its children.
-
-        This method must be used as a context manager.
-        """
-        from ._log import _AssertLogsContext
-        return _AssertLogsContext(self, logger, level, no_logs=True)
+        return _AssertLogsContext(self, logger, level)
 
     def _getAssertEqualityFunc(self, first, second):
         """Get a detailed comparison function for the types of the two args.
@@ -1146,8 +1132,7 @@
     def assertDictContainsSubset(self, subset, dictionary, msg=None):
         """Checks whether dictionary is a superset of subset."""
         warnings.warn('assertDictContainsSubset is deprecated',
-                      DeprecationWarning,
-                      stacklevel=2)
+                      DeprecationWarning)
         missing = []
         mismatched = []
         for key, value in subset.items():
diff --git a/common/py3-stdlib/unittest/mock.py b/common/py3-stdlib/unittest/mock.py
index 7152f86..b495a5f 100644
--- a/common/py3-stdlib/unittest/mock.py
+++ b/common/py3-stdlib/unittest/mock.py
@@ -36,10 +36,6 @@
 from functools import wraps, partial
 
 
-class InvalidSpecError(Exception):
-    """Indicates that an invalid value was used as a mock spec."""
-
-
 _builtins = {name for name in dir(builtins) if not name.startswith('_')}
 
 FILTER_DIR = True
@@ -410,7 +406,7 @@
             # Check if spec is an async object or function
             bound_args = _MOCK_SIG.bind_partial(cls, *args, **kw).arguments
             spec_arg = bound_args.get('spec_set', bound_args.get('spec'))
-            if spec_arg is not None and _is_async_obj(spec_arg):
+            if spec_arg and _is_async_obj(spec_arg):
                 bases = (AsyncMockMixin, cls)
         new = type(cls.__name__, bases, {'__doc__': cls.__doc__})
         instance = _safe_super(NonCallableMock, cls).__new__(new)
@@ -635,10 +631,9 @@
         elif _is_magic(name):
             raise AttributeError(name)
         if not self._mock_unsafe:
-            if name.startswith(('assert', 'assret', 'asert', 'aseert', 'assrt')):
-                raise AttributeError(
-                    f"{name!r} is not a valid assertion. Use a spec "
-                    f"for the mock if {name!r} is meant to be an attribute.")
+            if name.startswith(('assert', 'assret')):
+                raise AttributeError("Attributes cannot start with 'assert' "
+                                     "or 'assret'")
 
         result = self._mock_children.get(name)
         if result is _deleted:
@@ -657,17 +652,10 @@
             self._mock_children[name]  = result
 
         elif isinstance(result, _SpecState):
-            try:
-                result = create_autospec(
-                    result.spec, result.spec_set, result.instance,
-                    result.parent, result.name
-                )
-            except InvalidSpecError:
-                target_name = self.__dict__['_mock_name'] or self
-                raise InvalidSpecError(
-                    f'Cannot autospec attr {name!r} from target '
-                    f'{target_name!r} as it has already been mocked out. '
-                    f'[target={self!r}, attr={result.spec!r}]')
+            result = create_autospec(
+                result.spec, result.spec_set, result.instance,
+                result.parent, result.name
+            )
             self._mock_children[name]  = result
 
         return result
@@ -1004,11 +992,6 @@
         if _new_name in self.__dict__['_spec_asyncs']:
             return AsyncMock(**kw)
 
-        if self._mock_sealed:
-            attribute = f".{kw['name']}" if "name" in kw else "()"
-            mock_name = self._extract_mock_name() + attribute
-            raise AttributeError(mock_name)
-
         _type = type(self)
         if issubclass(_type, MagicMock) and _new_name in _async_method_magics:
             # Any asynchronous magic becomes an AsyncMock
@@ -1027,6 +1010,12 @@
                 klass = Mock
         else:
             klass = _type.__mro__[1]
+
+        if self._mock_sealed:
+            attribute = "." + kw["name"] if "name" in kw else "()"
+            mock_name = self._extract_mock_name() + attribute
+            raise AttributeError(mock_name)
+
         return klass(**kw)
 
 
@@ -1252,17 +1241,6 @@
     return thing
 
 
-# _check_spec_arg_typos takes kwargs from commands like patch and checks that
-# they don't contain common misspellings of arguments related to autospeccing.
-def _check_spec_arg_typos(kwargs_to_check):
-    typos = ("autospect", "auto_spec", "set_spec")
-    for typo in typos:
-        if typo in kwargs_to_check:
-            raise RuntimeError(
-                f"{typo!r} might be a typo; use unsafe=True if this is intended"
-            )
-
-
 class _patch(object):
 
     attribute_name = None
@@ -1270,7 +1248,7 @@
 
     def __init__(
             self, getter, attribute, new, spec, create,
-            spec_set, autospec, new_callable, kwargs, *, unsafe=False
+            spec_set, autospec, new_callable, kwargs
         ):
         if new_callable is not None:
             if new is not DEFAULT:
@@ -1281,16 +1259,6 @@
                 raise ValueError(
                     "Cannot use 'autospec' and 'new_callable' together"
                 )
-        if not unsafe:
-            _check_spec_arg_typos(kwargs)
-        if _is_instance_mock(spec):
-            raise InvalidSpecError(
-                f'Cannot spec attr {attribute!r} as the spec '
-                f'has already been mocked out. [spec={spec!r}]')
-        if _is_instance_mock(spec_set):
-            raise InvalidSpecError(
-                f'Cannot spec attr {attribute!r} as the spec_set '
-                f'target has already been mocked out. [spec_set={spec_set!r}]')
 
         self.getter = getter
         self.attribute = attribute
@@ -1518,18 +1486,6 @@
             if autospec is True:
                 autospec = original
 
-            if _is_instance_mock(self.target):
-                raise InvalidSpecError(
-                    f'Cannot autospec attr {self.attribute!r} as the patch '
-                    f'target has already been mocked out. '
-                    f'[target={self.target!r}, attr={autospec!r}]')
-            if _is_instance_mock(autospec):
-                target_name = getattr(self.target, '__name__', self.target)
-                raise InvalidSpecError(
-                    f'Cannot autospec attr {self.attribute!r} from target '
-                    f'{target_name!r} as it has already been mocked out. '
-                    f'[target={self.target!r}, attr={autospec!r}]')
-
             new = create_autospec(autospec, spec_set=spec_set,
                                   _name=self.attribute, **kwargs)
         elif kwargs:
@@ -1602,9 +1558,9 @@
 def _get_target(target):
     try:
         target, attribute = target.rsplit('.', 1)
-    except (TypeError, ValueError, AttributeError):
-        raise TypeError(
-            f"Need a valid target to patch. You supplied: {target!r}")
+    except (TypeError, ValueError):
+        raise TypeError("Need a valid target to patch. You supplied: %r" %
+                        (target,))
     getter = lambda: _importer(target)
     return getter, attribute
 
@@ -1612,7 +1568,7 @@
 def _patch_object(
         target, attribute, new=DEFAULT, spec=None,
         create=False, spec_set=None, autospec=None,
-        new_callable=None, *, unsafe=False, **kwargs
+        new_callable=None, **kwargs
     ):
     """
     patch the named member (`attribute`) on an object (`target`) with a mock
@@ -1634,7 +1590,7 @@
     getter = lambda: target
     return _patch(
         getter, attribute, new, spec, create,
-        spec_set, autospec, new_callable, kwargs, unsafe=unsafe
+        spec_set, autospec, new_callable, kwargs
     )
 
 
@@ -1689,7 +1645,7 @@
 
 def patch(
         target, new=DEFAULT, spec=None, create=False,
-        spec_set=None, autospec=None, new_callable=None, *, unsafe=False, **kwargs
+        spec_set=None, autospec=None, new_callable=None, **kwargs
     ):
     """
     `patch` acts as a function decorator, class decorator or a context
@@ -1751,10 +1707,6 @@
     use "as" then the patched object will be bound to the name after the
     "as"; very useful if `patch` is creating a mock object for you.
 
-    Patch will raise a `RuntimeError` if passed some common misspellings of
-    the arguments autospec and spec_set. Pass the argument `unsafe` with the
-    value True to disable that check.
-
     `patch` takes arbitrary keyword arguments. These will be passed to
     `AsyncMock` if the patched object is asynchronous, to `MagicMock`
     otherwise or to `new_callable` if specified.
@@ -1765,7 +1717,7 @@
     getter, attribute = _get_target(target)
     return _patch(
         getter, attribute, new, spec, create,
-        spec_set, autospec, new_callable, kwargs, unsafe=unsafe
+        spec_set, autospec, new_callable, kwargs
     )
 
 
@@ -2615,7 +2567,7 @@
 
 
 def create_autospec(spec, spec_set=False, instance=False, _parent=None,
-                    _name=None, *, unsafe=False, **kwargs):
+                    _name=None, **kwargs):
     """Create a mock object using another object as a spec. Attributes on the
     mock will use the corresponding attribute on the `spec` object as their
     spec.
@@ -2631,10 +2583,6 @@
     spec for an instance object by passing `instance=True`. The returned mock
     will only be callable if instances of the mock are callable.
 
-    `create_autospec` will raise a `RuntimeError` if passed some common
-    misspellings of the arguments autospec and spec_set. Pass the argument
-    `unsafe` with the value True to disable that check.
-
     `create_autospec` also takes arbitrary keyword arguments that are passed to
     the constructor of the created mock."""
     if _is_list(spec):
@@ -2643,9 +2591,6 @@
         spec = type(spec)
 
     is_type = isinstance(spec, type)
-    if _is_instance_mock(spec):
-        raise InvalidSpecError(f'Cannot autospec a Mock object. '
-                               f'[object={spec!r}]')
     is_async_func = _is_async_func(spec)
     _kwargs = {'spec': spec}
     if spec_set:
@@ -2655,8 +2600,6 @@
         _kwargs = {}
     if _kwargs and instance:
         _kwargs['_spec_as_instance'] = True
-    if not unsafe:
-        _check_spec_arg_typos(kwargs)
 
     _kwargs.update(kwargs)
 
@@ -2926,8 +2869,6 @@
             continue
         if not isinstance(m, NonCallableMock):
             continue
-        if isinstance(m._mock_children.get(attr), _SpecState):
-            continue
         if m._mock_new_parent is mock:
             seal(m)
 
diff --git a/common/py3-stdlib/unittest/result.py b/common/py3-stdlib/unittest/result.py
index 3da7005..111317b 100644
--- a/common/py3-stdlib/unittest/result.py
+++ b/common/py3-stdlib/unittest/result.py
@@ -173,10 +173,17 @@
     def _exc_info_to_string(self, err, test):
         """Converts a sys.exc_info()-style tuple of values into a string."""
         exctype, value, tb = err
-        tb = self._clean_tracebacks(exctype, value, tb, test)
+        # Skip test runner traceback levels
+        while tb and self._is_relevant_tb_level(tb):
+            tb = tb.tb_next
+
+        if exctype is test.failureException:
+            # Skip assert*() traceback levels
+            length = self._count_relevant_tb_levels(tb)
+        else:
+            length = None
         tb_e = traceback.TracebackException(
-            exctype, value, tb,
-            capture_locals=self.tb_locals, compact=True)
+            exctype, value, tb, limit=length, capture_locals=self.tb_locals)
         msgLines = list(tb_e.format())
 
         if self.buffer:
@@ -192,49 +199,16 @@
                 msgLines.append(STDERR_LINE % error)
         return ''.join(msgLines)
 
-    def _clean_tracebacks(self, exctype, value, tb, test):
-        ret = None
-        first = True
-        excs = [(exctype, value, tb)]
-        while excs:
-            (exctype, value, tb) = excs.pop()
-            # Skip test runner traceback levels
-            while tb and self._is_relevant_tb_level(tb):
-                tb = tb.tb_next
-
-            # Skip assert*() traceback levels
-            if exctype is test.failureException:
-                self._remove_unittest_tb_frames(tb)
-
-            if first:
-                ret = tb
-                first = False
-            else:
-                value.__traceback__ = tb
-
-            if value is not None:
-                for c in (value.__cause__, value.__context__):
-                    if c is not None:
-                        excs.append((type(c), c, c.__traceback__))
-        return ret
 
     def _is_relevant_tb_level(self, tb):
         return '__unittest' in tb.tb_frame.f_globals
 
-    def _remove_unittest_tb_frames(self, tb):
-        '''Truncates usercode tb at the first unittest frame.
-
-        If the first frame of the traceback is in user code,
-        the prefix up to the first unittest frame is returned.
-        If the first frame is already in the unittest module,
-        the traceback is not modified.
-        '''
-        prev = None
+    def _count_relevant_tb_levels(self, tb):
+        length = 0
         while tb and not self._is_relevant_tb_level(tb):
-            prev = tb
+            length += 1
             tb = tb.tb_next
-        if prev is not None:
-            prev.tb_next = None
+        return length
 
     def __repr__(self):
         return ("<%s run=%i errors=%i failures=%i>" %
diff --git a/common/py3-stdlib/unittest/runner.py b/common/py3-stdlib/unittest/runner.py
index caf1590..45e7e4c 100644
--- a/common/py3-stdlib/unittest/runner.py
+++ b/common/py3-stdlib/unittest/runner.py
@@ -59,7 +59,6 @@
         super(TextTestResult, self).addSuccess(test)
         if self.showAll:
             self.stream.writeln("ok")
-            self.stream.flush()
         elif self.dots:
             self.stream.write('.')
             self.stream.flush()
@@ -68,7 +67,6 @@
         super(TextTestResult, self).addError(test, err)
         if self.showAll:
             self.stream.writeln("ERROR")
-            self.stream.flush()
         elif self.dots:
             self.stream.write('E')
             self.stream.flush()
@@ -77,7 +75,6 @@
         super(TextTestResult, self).addFailure(test, err)
         if self.showAll:
             self.stream.writeln("FAIL")
-            self.stream.flush()
         elif self.dots:
             self.stream.write('F')
             self.stream.flush()
@@ -86,7 +83,6 @@
         super(TextTestResult, self).addSkip(test, reason)
         if self.showAll:
             self.stream.writeln("skipped {0!r}".format(reason))
-            self.stream.flush()
         elif self.dots:
             self.stream.write("s")
             self.stream.flush()
@@ -95,7 +91,6 @@
         super(TextTestResult, self).addExpectedFailure(test, err)
         if self.showAll:
             self.stream.writeln("expected failure")
-            self.stream.flush()
         elif self.dots:
             self.stream.write("x")
             self.stream.flush()
@@ -104,7 +99,6 @@
         super(TextTestResult, self).addUnexpectedSuccess(test)
         if self.showAll:
             self.stream.writeln("unexpected success")
-            self.stream.flush()
         elif self.dots:
             self.stream.write("u")
             self.stream.flush()
@@ -112,7 +106,6 @@
     def printErrors(self):
         if self.dots or self.showAll:
             self.stream.writeln()
-            self.stream.flush()
         self.printErrorList('ERROR', self.errors)
         self.printErrorList('FAIL', self.failures)
 
@@ -122,7 +115,6 @@
             self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
             self.stream.writeln(self.separator2)
             self.stream.writeln("%s" % err)
-            self.stream.flush()
 
 
 class TextTestRunner(object):
@@ -226,5 +218,4 @@
             self.stream.writeln(" (%s)" % (", ".join(infos),))
         else:
             self.stream.write("\n")
-        self.stream.flush()
         return result
diff --git a/common/py3-stdlib/unittest/suite.py b/common/py3-stdlib/unittest/suite.py
index 6f45b6f..41993f9 100644
--- a/common/py3-stdlib/unittest/suite.py
+++ b/common/py3-stdlib/unittest/suite.py
@@ -149,7 +149,6 @@
         if getattr(currentClass, "__unittest_skip__", False):
             return
 
-        failed = False
         try:
             currentClass._classSetupFailed = False
         except TypeError:
@@ -158,32 +157,27 @@
             pass
 
         setUpClass = getattr(currentClass, 'setUpClass', None)
-        doClassCleanups = getattr(currentClass, 'doClassCleanups', None)
         if setUpClass is not None:
             _call_if_exists(result, '_setupStdout')
             try:
-                try:
-                    setUpClass()
-                except Exception as e:
-                    if isinstance(result, _DebugResult):
-                        raise
-                    failed = True
-                    try:
-                        currentClass._classSetupFailed = True
-                    except TypeError:
-                        pass
-                    className = util.strclass(currentClass)
-                    self._createClassOrModuleLevelException(result, e,
-                                                            'setUpClass',
-                                                            className)
-                if failed and doClassCleanups is not None:
-                    doClassCleanups()
-                    for exc_info in currentClass.tearDown_exceptions:
-                        self._createClassOrModuleLevelException(
-                                result, exc_info[1], 'setUpClass', className,
-                                info=exc_info)
+                setUpClass()
+            except Exception as e:
+                if isinstance(result, _DebugResult):
+                    raise
+                currentClass._classSetupFailed = True
+                className = util.strclass(currentClass)
+                self._createClassOrModuleLevelException(result, e,
+                                                        'setUpClass',
+                                                        className)
             finally:
                 _call_if_exists(result, '_restoreStdout')
+                if currentClass._classSetupFailed is True:
+                    currentClass.doClassCleanups()
+                    if len(currentClass.tearDown_exceptions) > 0:
+                        for exc in currentClass.tearDown_exceptions:
+                            self._createClassOrModuleLevelException(
+                                    result, exc[1], 'setUpClass', className,
+                                    info=exc)
 
     def _get_previous_module(self, result):
         previousModule = None
@@ -211,22 +205,20 @@
         if setUpModule is not None:
             _call_if_exists(result, '_setupStdout')
             try:
+                setUpModule()
+            except Exception as e:
                 try:
-                    setUpModule()
-                except Exception as e:
-                    if isinstance(result, _DebugResult):
-                        raise
-                    result._moduleSetUpFailed = True
-                    self._createClassOrModuleLevelException(result, e,
+                    case.doModuleCleanups()
+                except Exception as exc:
+                    self._createClassOrModuleLevelException(result, exc,
                                                             'setUpModule',
                                                             currentModule)
-                if result._moduleSetUpFailed:
-                    try:
-                        case.doModuleCleanups()
-                    except Exception as e:
-                        self._createClassOrModuleLevelException(result, e,
-                                                                'setUpModule',
-                                                                currentModule)
+                if isinstance(result, _DebugResult):
+                    raise
+                result._moduleSetUpFailed = True
+                self._createClassOrModuleLevelException(result, e,
+                                                        'setUpModule',
+                                                        currentModule)
             finally:
                 _call_if_exists(result, '_restoreStdout')
 
@@ -259,33 +251,30 @@
         except KeyError:
             return
 
-        _call_if_exists(result, '_setupStdout')
-        try:
-            tearDownModule = getattr(module, 'tearDownModule', None)
-            if tearDownModule is not None:
-                try:
-                    tearDownModule()
-                except Exception as e:
-                    if isinstance(result, _DebugResult):
-                        raise
-                    self._createClassOrModuleLevelException(result, e,
-                                                            'tearDownModule',
-                                                            previousModule)
+        tearDownModule = getattr(module, 'tearDownModule', None)
+        if tearDownModule is not None:
+            _call_if_exists(result, '_setupStdout')
             try:
-                case.doModuleCleanups()
+                tearDownModule()
             except Exception as e:
                 if isinstance(result, _DebugResult):
                     raise
                 self._createClassOrModuleLevelException(result, e,
                                                         'tearDownModule',
                                                         previousModule)
-        finally:
-            _call_if_exists(result, '_restoreStdout')
+            finally:
+                _call_if_exists(result, '_restoreStdout')
+                try:
+                    case.doModuleCleanups()
+                except Exception as e:
+                    self._createClassOrModuleLevelException(result, e,
+                                                            'tearDownModule',
+                                                            previousModule)
 
     def _tearDownPreviousClass(self, test, result):
         previousClass = getattr(result, '_previousTestClass', None)
         currentClass = test.__class__
-        if currentClass == previousClass or previousClass is None:
+        if currentClass == previousClass:
             return
         if getattr(previousClass, '_classSetupFailed', False):
             return
@@ -295,34 +284,27 @@
             return
 
         tearDownClass = getattr(previousClass, 'tearDownClass', None)
-        doClassCleanups = getattr(previousClass, 'doClassCleanups', None)
-        if tearDownClass is None and doClassCleanups is None:
-            return
-
-        _call_if_exists(result, '_setupStdout')
-        try:
-            if tearDownClass is not None:
-                try:
-                    tearDownClass()
-                except Exception as e:
-                    if isinstance(result, _DebugResult):
-                        raise
-                    className = util.strclass(previousClass)
-                    self._createClassOrModuleLevelException(result, e,
-                                                            'tearDownClass',
-                                                            className)
-            if doClassCleanups is not None:
-                doClassCleanups()
-                for exc_info in previousClass.tearDown_exceptions:
-                    if isinstance(result, _DebugResult):
-                        raise exc_info[1]
-                    className = util.strclass(previousClass)
-                    self._createClassOrModuleLevelException(result, exc_info[1],
-                                                            'tearDownClass',
-                                                            className,
-                                                            info=exc_info)
-        finally:
-            _call_if_exists(result, '_restoreStdout')
+        if tearDownClass is not None:
+            _call_if_exists(result, '_setupStdout')
+            try:
+                tearDownClass()
+            except Exception as e:
+                if isinstance(result, _DebugResult):
+                    raise
+                className = util.strclass(previousClass)
+                self._createClassOrModuleLevelException(result, e,
+                                                        'tearDownClass',
+                                                        className)
+            finally:
+                _call_if_exists(result, '_restoreStdout')
+                previousClass.doClassCleanups()
+                if len(previousClass.tearDown_exceptions) > 0:
+                    for exc in previousClass.tearDown_exceptions:
+                        className = util.strclass(previousClass)
+                        self._createClassOrModuleLevelException(result, exc[1],
+                                                                'tearDownClass',
+                                                                className,
+                                                                info=exc)
 
 
 class _ErrorHolder(object):
diff --git a/common/py3-stdlib/unittest/test/test_assertions.py b/common/py3-stdlib/unittest/test/test_assertions.py
index a0db342..f5e64d6 100644
--- a/common/py3-stdlib/unittest/test/test_assertions.py
+++ b/common/py3-stdlib/unittest/test/test_assertions.py
@@ -2,7 +2,6 @@
 import warnings
 import weakref
 import unittest
-from test.support import gc_collect
 from itertools import product
 
 
@@ -125,10 +124,8 @@
                     self.foo()
 
         Foo("test_functional").run()
-        gc_collect()  # For PyPy or other GCs.
         self.assertIsNone(wr())
         Foo("test_with").run()
-        gc_collect()  # For PyPy or other GCs.
         self.assertIsNone(wr())
 
     def testAssertNotRegex(self):
diff --git a/common/py3-stdlib/unittest/test/test_async_case.py b/common/py3-stdlib/unittest/test/test_async_case.py
index e46b99f..2db441d 100644
--- a/common/py3-stdlib/unittest/test/test_async_case.py
+++ b/common/py3-stdlib/unittest/test/test_async_case.py
@@ -1,10 +1,5 @@
 import asyncio
 import unittest
-from test import support
-
-
-class MyException(Exception):
-    pass
 
 
 def tearDownModule():
@@ -12,14 +7,9 @@
 
 
 class TestAsyncCase(unittest.TestCase):
-    maxDiff = None
-
-    def tearDown(self):
-        # Ensure that IsolatedAsyncioTestCase instances are destroyed before
-        # starting a new event loop
-        support.gc_collect()
-
     def test_full_cycle(self):
+        events = []
+
         class Test(unittest.IsolatedAsyncioTestCase):
             def setUp(self):
                 self.assertEqual(events, [])
@@ -28,13 +18,12 @@
             async def asyncSetUp(self):
                 self.assertEqual(events, ['setUp'])
                 events.append('asyncSetUp')
-                self.addAsyncCleanup(self.on_cleanup1)
 
             async def test_func(self):
                 self.assertEqual(events, ['setUp',
                                           'asyncSetUp'])
                 events.append('test')
-                self.addAsyncCleanup(self.on_cleanup2)
+                self.addAsyncCleanup(self.on_cleanup)
 
             async def asyncTearDown(self):
                 self.assertEqual(events, ['setUp',
@@ -49,48 +38,34 @@
                                           'asyncTearDown'])
                 events.append('tearDown')
 
-            async def on_cleanup1(self):
-                self.assertEqual(events, ['setUp',
-                                          'asyncSetUp',
-                                          'test',
-                                          'asyncTearDown',
-                                          'tearDown',
-                                          'cleanup2'])
-                events.append('cleanup1')
-
-            async def on_cleanup2(self):
+            async def on_cleanup(self):
                 self.assertEqual(events, ['setUp',
                                           'asyncSetUp',
                                           'test',
                                           'asyncTearDown',
                                           'tearDown'])
-                events.append('cleanup2')
+                events.append('cleanup')
 
-        events = []
         test = Test("test_func")
-        result = test.run()
-        self.assertEqual(result.errors, [])
-        self.assertEqual(result.failures, [])
-        expected = ['setUp', 'asyncSetUp', 'test',
-                    'asyncTearDown', 'tearDown', 'cleanup2', 'cleanup1']
-        self.assertEqual(events, expected)
-
-        events = []
-        test = Test("test_func")
-        test.debug()
-        self.assertEqual(events, expected)
-        test.doCleanups()
-        self.assertEqual(events, expected)
+        test.run()
+        self.assertEqual(events, ['setUp',
+                                  'asyncSetUp',
+                                  'test',
+                                  'asyncTearDown',
+                                  'tearDown',
+                                  'cleanup'])
 
     def test_exception_in_setup(self):
+        events = []
+
         class Test(unittest.IsolatedAsyncioTestCase):
             async def asyncSetUp(self):
                 events.append('asyncSetUp')
-                self.addAsyncCleanup(self.on_cleanup)
-                raise MyException()
+                raise Exception()
 
             async def test_func(self):
                 events.append('test')
+                self.addAsyncCleanup(self.on_cleanup)
 
             async def asyncTearDown(self):
                 events.append('asyncTearDown')
@@ -99,34 +74,21 @@
                 events.append('cleanup')
 
 
-        events = []
         test = Test("test_func")
-        result = test.run()
-        self.assertEqual(events, ['asyncSetUp', 'cleanup'])
-        self.assertIs(result.errors[0][0], test)
-        self.assertIn('MyException', result.errors[0][1])
-
-        events = []
-        test = Test("test_func")
-        try:
-            test.debug()
-        except MyException:
-            pass
-        else:
-            self.fail('Expected a MyException exception')
+        test.run()
         self.assertEqual(events, ['asyncSetUp'])
-        test.doCleanups()
-        self.assertEqual(events, ['asyncSetUp', 'cleanup'])
 
     def test_exception_in_test(self):
+        events = []
+
         class Test(unittest.IsolatedAsyncioTestCase):
             async def asyncSetUp(self):
                 events.append('asyncSetUp')
 
             async def test_func(self):
                 events.append('test')
+                raise Exception()
                 self.addAsyncCleanup(self.on_cleanup)
-                raise MyException()
 
             async def asyncTearDown(self):
                 events.append('asyncTearDown')
@@ -134,26 +96,35 @@
             async def on_cleanup(self):
                 events.append('cleanup')
 
-        events = []
         test = Test("test_func")
-        result = test.run()
-        self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown', 'cleanup'])
-        self.assertIs(result.errors[0][0], test)
-        self.assertIn('MyException', result.errors[0][1])
+        test.run()
+        self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown'])
 
+    def test_exception_in_test_after_adding_cleanup(self):
         events = []
+
+        class Test(unittest.IsolatedAsyncioTestCase):
+            async def asyncSetUp(self):
+                events.append('asyncSetUp')
+
+            async def test_func(self):
+                events.append('test')
+                self.addAsyncCleanup(self.on_cleanup)
+                raise Exception()
+
+            async def asyncTearDown(self):
+                events.append('asyncTearDown')
+
+            async def on_cleanup(self):
+                events.append('cleanup')
+
         test = Test("test_func")
-        try:
-            test.debug()
-        except MyException:
-            pass
-        else:
-            self.fail('Expected a MyException exception')
-        self.assertEqual(events, ['asyncSetUp', 'test'])
-        test.doCleanups()
-        self.assertEqual(events, ['asyncSetUp', 'test', 'cleanup'])
+        test.run()
+        self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown', 'cleanup'])
 
     def test_exception_in_tear_down(self):
+        events = []
+
         class Test(unittest.IsolatedAsyncioTestCase):
             async def asyncSetUp(self):
                 events.append('asyncSetUp')
@@ -164,70 +135,37 @@
 
             async def asyncTearDown(self):
                 events.append('asyncTearDown')
-                raise MyException()
+                raise Exception()
 
             async def on_cleanup(self):
                 events.append('cleanup')
 
-        events = []
         test = Test("test_func")
-        result = test.run()
+        test.run()
         self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown', 'cleanup'])
-        self.assertIs(result.errors[0][0], test)
-        self.assertIn('MyException', result.errors[0][1])
 
-        events = []
-        test = Test("test_func")
-        try:
-            test.debug()
-        except MyException:
-            pass
-        else:
-            self.fail('Expected a MyException exception')
-        self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown'])
-        test.doCleanups()
-        self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown', 'cleanup'])
 
     def test_exception_in_tear_clean_up(self):
+        events = []
+
         class Test(unittest.IsolatedAsyncioTestCase):
             async def asyncSetUp(self):
                 events.append('asyncSetUp')
 
             async def test_func(self):
                 events.append('test')
-                self.addAsyncCleanup(self.on_cleanup1)
-                self.addAsyncCleanup(self.on_cleanup2)
+                self.addAsyncCleanup(self.on_cleanup)
 
             async def asyncTearDown(self):
                 events.append('asyncTearDown')
 
-            async def on_cleanup1(self):
-                events.append('cleanup1')
-                raise MyException('some error')
+            async def on_cleanup(self):
+                events.append('cleanup')
+                raise Exception()
 
-            async def on_cleanup2(self):
-                events.append('cleanup2')
-                raise MyException('other error')
-
-        events = []
         test = Test("test_func")
-        result = test.run()
-        self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown', 'cleanup2', 'cleanup1'])
-        self.assertIs(result.errors[0][0], test)
-        self.assertIn('MyException: other error', result.errors[0][1])
-        self.assertIn('MyException: some error', result.errors[1][1])
-
-        events = []
-        test = Test("test_func")
-        try:
-            test.debug()
-        except MyException:
-            pass
-        else:
-            self.fail('Expected a MyException exception')
-        self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown', 'cleanup2'])
-        test.doCleanups()
-        self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown', 'cleanup2', 'cleanup1'])
+        test.run()
+        self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown', 'cleanup'])
 
     def test_cleanups_interleave_order(self):
         events = []
@@ -252,95 +190,6 @@
                                   'async_cleanup 2',
                                   'sync_cleanup 1'])
 
-    def test_base_exception_from_async_method(self):
-        events = []
-        class Test(unittest.IsolatedAsyncioTestCase):
-            async def test_base(self):
-                events.append("test_base")
-                raise BaseException()
-                events.append("not it")
-
-            async def test_no_err(self):
-                events.append("test_no_err")
-
-            async def test_cancel(self):
-                raise asyncio.CancelledError()
-
-        test = Test("test_base")
-        output = test.run()
-        self.assertFalse(output.wasSuccessful())
-
-        test = Test("test_no_err")
-        test.run()
-        self.assertEqual(events, ['test_base', 'test_no_err'])
-
-        test = Test("test_cancel")
-        output = test.run()
-        self.assertFalse(output.wasSuccessful())
-
-    def test_cancellation_hanging_tasks(self):
-        cancelled = False
-        class Test(unittest.IsolatedAsyncioTestCase):
-            async def test_leaking_task(self):
-                async def coro():
-                    nonlocal cancelled
-                    try:
-                        await asyncio.sleep(1)
-                    except asyncio.CancelledError:
-                        cancelled = True
-                        raise
-
-                # Leave this running in the background
-                asyncio.create_task(coro())
-
-        test = Test("test_leaking_task")
-        output = test.run()
-        self.assertTrue(cancelled)
-
-    def test_debug_cleanup_same_loop(self):
-        class Test(unittest.IsolatedAsyncioTestCase):
-            async def asyncSetUp(self):
-                async def coro():
-                    await asyncio.sleep(0)
-                fut = asyncio.ensure_future(coro())
-                self.addAsyncCleanup(self.cleanup, fut)
-                events.append('asyncSetUp')
-
-            async def test_func(self):
-                events.append('test')
-                raise MyException()
-
-            async def asyncTearDown(self):
-                events.append('asyncTearDown')
-
-            async def cleanup(self, fut):
-                try:
-                    # Raises an exception if in different loop
-                    await asyncio.wait([fut])
-                    events.append('cleanup')
-                except:
-                    import traceback
-                    traceback.print_exc()
-                    raise
-
-        events = []
-        test = Test("test_func")
-        result = test.run()
-        self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown', 'cleanup'])
-        self.assertIn('MyException', result.errors[0][1])
-
-        events = []
-        test = Test("test_func")
-        try:
-            test.debug()
-        except MyException:
-            pass
-        else:
-            self.fail('Expected a MyException exception')
-        self.assertEqual(events, ['asyncSetUp', 'test'])
-        test.doCleanups()
-        self.assertEqual(events, ['asyncSetUp', 'test', 'cleanup'])
-
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/common/py3-stdlib/unittest/test/test_case.py b/common/py3-stdlib/unittest/test/test_case.py
index 9b3a598..f855c4d 100644
--- a/common/py3-stdlib/unittest/test/test_case.py
+++ b/common/py3-stdlib/unittest/test/test_case.py
@@ -8,7 +8,6 @@
 import warnings
 import weakref
 import inspect
-import types
 
 from copy import deepcopy
 from test import support
@@ -19,7 +18,7 @@
     TestEquality, TestHashing, LoggingResult, LegacyLoggingResult,
     ResultWithNoStartTestRunStopTestRun
 )
-from test.support import captured_stderr, gc_collect
+from test.support import captured_stderr
 
 
 log_foo = logging.getLogger('foo')
@@ -611,8 +610,6 @@
                  'Tests shortDescription() for a method with a longer '
                  'docstring.')
 
-    @unittest.skipIf(sys.flags.optimize >= 2,
-                     "Docstrings are omitted with -O2 and above")
     def testShortDescriptionWhitespaceTrimming(self):
         """
             Tests shortDescription() whitespace is trimmed, so that the first
@@ -708,10 +705,6 @@
             with self.assertRaises(self.failureException):
                 self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
 
-        with self.assertWarns(DeprecationWarning) as warninfo:
-            self.assertDictContainsSubset({}, {})
-        self.assertEqual(warninfo.warnings[0].filename, __file__)
-
     def testAssertEqual(self):
         equal_pairs = [
                 ((), ()),
@@ -1357,20 +1350,6 @@
             pass
         self.assertRaises(TypeError, self.assertWarnsRegex, MyWarn, lambda: True)
 
-    def testAssertWarnsModifySysModules(self):
-        # bpo-29620: handle modified sys.modules during iteration
-        class Foo(types.ModuleType):
-            @property
-            def __warningregistry__(self):
-                sys.modules['@bar@'] = 'bar'
-
-        sys.modules['@foo@'] = Foo('foo')
-        try:
-            self.assertWarns(UserWarning, warnings.warn, 'expected')
-        finally:
-            del sys.modules['@foo@']
-            del sys.modules['@bar@']
-
     def testAssertRaisesRegexMismatch(self):
         def Stub():
             raise Exception('Unexpected')
@@ -1679,18 +1658,6 @@
                 with self.assertLogs(level='WARNING'):
                     log_foo.info("1")
 
-    def testAssertLogsFailureLevelTooHigh_FilterInRootLogger(self):
-        # Failure due to level too high - message propagated to root
-        with self.assertNoStderr():
-            oldLevel = log_foo.level
-            log_foo.setLevel(logging.INFO)
-            try:
-                with self.assertRaises(self.failureException):
-                    with self.assertLogs(level='WARNING'):
-                        log_foo.info("1")
-            finally:
-                log_foo.setLevel(oldLevel)
-
     def testAssertLogsFailureMismatchingLogger(self):
         # Failure due to mismatching logger (and the logged message is
         # passed through)
@@ -1699,81 +1666,6 @@
                 with self.assertLogs('foo'):
                     log_quux.error("1")
 
-    def testAssertLogsUnexpectedException(self):
-        # Check unexpected exception will go through.
-        with self.assertRaises(ZeroDivisionError):
-            with self.assertLogs():
-                raise ZeroDivisionError("Unexpected")
-
-    def testAssertNoLogsDefault(self):
-        with self.assertRaises(self.failureException) as cm:
-            with self.assertNoLogs():
-                log_foo.info("1")
-                log_foobar.debug("2")
-        self.assertEqual(
-            str(cm.exception),
-            "Unexpected logs found: ['INFO:foo:1']",
-        )
-
-    def testAssertNoLogsFailureFoundLogs(self):
-        with self.assertRaises(self.failureException) as cm:
-            with self.assertNoLogs():
-                log_quux.error("1")
-                log_foo.error("foo")
-
-        self.assertEqual(
-            str(cm.exception),
-            "Unexpected logs found: ['ERROR:quux:1', 'ERROR:foo:foo']",
-        )
-
-    def testAssertNoLogsPerLogger(self):
-        with self.assertNoStderr():
-            with self.assertLogs(log_quux):
-                with self.assertNoLogs(logger=log_foo):
-                    log_quux.error("1")
-
-    def testAssertNoLogsFailurePerLogger(self):
-        # Failure due to unexpected logs for the given logger or its
-        # children.
-        with self.assertRaises(self.failureException) as cm:
-            with self.assertLogs(log_quux):
-                with self.assertNoLogs(logger=log_foo):
-                    log_quux.error("1")
-                    log_foobar.info("2")
-        self.assertEqual(
-            str(cm.exception),
-            "Unexpected logs found: ['INFO:foo.bar:2']",
-        )
-
-    def testAssertNoLogsPerLevel(self):
-        # Check per-level filtering
-        with self.assertNoStderr():
-            with self.assertNoLogs(level="ERROR"):
-                log_foo.info("foo")
-                log_quux.debug("1")
-
-    def testAssertNoLogsFailurePerLevel(self):
-        # Failure due to unexpected logs at the specified level.
-        with self.assertRaises(self.failureException) as cm:
-            with self.assertNoLogs(level="DEBUG"):
-                log_foo.debug("foo")
-                log_quux.debug("1")
-        self.assertEqual(
-            str(cm.exception),
-            "Unexpected logs found: ['DEBUG:foo:foo', 'DEBUG:quux:1']",
-        )
-
-    def testAssertNoLogsUnexpectedException(self):
-        # Check unexpected exception will go through.
-        with self.assertRaises(ZeroDivisionError):
-            with self.assertNoLogs():
-                raise ZeroDivisionError("Unexpected")
-
-    def testAssertNoLogsYieldsNone(self):
-        with self.assertNoLogs() as value:
-            pass
-        self.assertIsNone(value)
-
     def testDeprecatedMethodNames(self):
         """
         Test that the deprecated methods raise a DeprecationWarning. See #9424.
@@ -1953,7 +1845,6 @@
         for method_name in ('test1', 'test2'):
             testcase = TestCase(method_name)
             testcase.run()
-            gc_collect()  # For PyPy or other GCs.
             self.assertEqual(MyException.ninstance, 0)
 
 
diff --git a/common/py3-stdlib/unittest/test/test_discovery.py b/common/py3-stdlib/unittest/test/test_discovery.py
index 9d502c5..16e081e 100644
--- a/common/py3-stdlib/unittest/test/test_discovery.py
+++ b/common/py3-stdlib/unittest/test/test_discovery.py
@@ -5,7 +5,6 @@
 import types
 import pickle
 from test import support
-from test.support import import_helper
 import test.test_importlib.util
 
 import unittest
@@ -849,7 +848,7 @@
 
         with unittest.mock.patch('builtins.__import__', _import):
             # Since loader.discover() can modify sys.path, restore it when done.
-            with import_helper.DirsOnSysPath():
+            with support.DirsOnSysPath():
                 # Make sure to remove 'package' from sys.modules when done.
                 with test.test_importlib.util.uncache('package'):
                     suite = loader.discover('package')
@@ -866,7 +865,7 @@
 
         with unittest.mock.patch('builtins.__import__', _import):
             # Since loader.discover() can modify sys.path, restore it when done.
-            with import_helper.DirsOnSysPath():
+            with support.DirsOnSysPath():
                 # Make sure to remove 'package' from sys.modules when done.
                 with test.test_importlib.util.uncache('package'):
                     with self.assertRaises(TypeError) as cm:
diff --git a/common/py3-stdlib/unittest/test/test_program.py b/common/py3-stdlib/unittest/test/test_program.py
index b7fbbc1..eef82ff 100644
--- a/common/py3-stdlib/unittest/test/test_program.py
+++ b/common/py3-stdlib/unittest/test/test_program.py
@@ -6,7 +6,6 @@
 from test import support
 import unittest
 import unittest.test
-from unittest.test.test_result import BufferedWriter
 
 
 class Test_TestProgram(unittest.TestCase):
@@ -58,9 +57,9 @@
 
     class FooBar(unittest.TestCase):
         def testPass(self):
-            pass
+            assert True
         def testFail(self):
-            raise AssertionError
+            assert False
 
     class FooBarLoader(unittest.TestLoader):
         """Test loader that returns a suite containing FooBar."""
@@ -105,39 +104,30 @@
                           program.testNames)
 
     def test_NonExit(self):
-        stream = BufferedWriter()
         program = unittest.main(exit=False,
                                 argv=["foobar"],
-                                testRunner=unittest.TextTestRunner(stream=stream),
+                                testRunner=unittest.TextTestRunner(stream=io.StringIO()),
                                 testLoader=self.FooBarLoader())
         self.assertTrue(hasattr(program, 'result'))
-        self.assertIn('\nFAIL: testFail ', stream.getvalue())
-        self.assertTrue(stream.getvalue().endswith('\n\nFAILED (failures=1)\n'))
 
 
     def test_Exit(self):
-        stream = BufferedWriter()
         self.assertRaises(
             SystemExit,
             unittest.main,
             argv=["foobar"],
-            testRunner=unittest.TextTestRunner(stream=stream),
+            testRunner=unittest.TextTestRunner(stream=io.StringIO()),
             exit=True,
             testLoader=self.FooBarLoader())
-        self.assertIn('\nFAIL: testFail ', stream.getvalue())
-        self.assertTrue(stream.getvalue().endswith('\n\nFAILED (failures=1)\n'))
 
 
     def test_ExitAsDefault(self):
-        stream = BufferedWriter()
         self.assertRaises(
             SystemExit,
             unittest.main,
             argv=["foobar"],
-            testRunner=unittest.TextTestRunner(stream=stream),
+            testRunner=unittest.TextTestRunner(stream=io.StringIO()),
             testLoader=self.FooBarLoader())
-        self.assertIn('\nFAIL: testFail ', stream.getvalue())
-        self.assertTrue(stream.getvalue().endswith('\n\nFAILED (failures=1)\n'))
 
 
 class InitialisableProgram(unittest.TestProgram):
diff --git a/common/py3-stdlib/unittest/test/test_result.py b/common/py3-stdlib/unittest/test/test_result.py
index c5aaba0..0ffb87b 100644
--- a/common/py3-stdlib/unittest/test/test_result.py
+++ b/common/py3-stdlib/unittest/test/test_result.py
@@ -2,11 +2,10 @@
 import sys
 import textwrap
 
-from test.support import warnings_helper, captured_stdout, captured_stderr
+from test import support
 
 import traceback
 import unittest
-from unittest.util import strclass
 
 
 class MockTraceback(object):
@@ -23,32 +22,6 @@
     unittest.result.traceback = traceback
 
 
-def bad_cleanup1():
-    print('do cleanup1')
-    raise TypeError('bad cleanup1')
-
-
-def bad_cleanup2():
-    print('do cleanup2')
-    raise ValueError('bad cleanup2')
-
-
-class BufferedWriter:
-    def __init__(self):
-        self.result = ''
-        self.buffer = ''
-
-    def write(self, arg):
-        self.buffer += arg
-
-    def flush(self):
-        self.result += self.buffer
-        self.buffer = ''
-
-    def getvalue(self):
-        return self.result
-
-
 class Test_TestResult(unittest.TestCase):
     # Note: there are not separate tests for TestResult.wasSuccessful(),
     # TestResult.errors, TestResult.failures, TestResult.testsRun or
@@ -220,61 +193,6 @@
         self.assertIs(test_case, test)
         self.assertIsInstance(formatted_exc, str)
 
-    def test_addFailure_filter_traceback_frames(self):
-        class Foo(unittest.TestCase):
-            def test_1(self):
-                pass
-
-        test = Foo('test_1')
-        def get_exc_info():
-            try:
-                test.fail("foo")
-            except:
-                return sys.exc_info()
-
-        exc_info_tuple = get_exc_info()
-
-        full_exc = traceback.format_exception(*exc_info_tuple)
-
-        result = unittest.TestResult()
-        result.startTest(test)
-        result.addFailure(test, exc_info_tuple)
-        result.stopTest(test)
-
-        formatted_exc = result.failures[0][1]
-        dropped = [l for l in full_exc if l not in formatted_exc]
-        self.assertEqual(len(dropped), 1)
-        self.assertIn("raise self.failureException(msg)", dropped[0])
-
-    def test_addFailure_filter_traceback_frames_context(self):
-        class Foo(unittest.TestCase):
-            def test_1(self):
-                pass
-
-        test = Foo('test_1')
-        def get_exc_info():
-            try:
-                try:
-                    test.fail("foo")
-                except:
-                    raise ValueError(42)
-            except:
-                return sys.exc_info()
-
-        exc_info_tuple = get_exc_info()
-
-        full_exc = traceback.format_exception(*exc_info_tuple)
-
-        result = unittest.TestResult()
-        result.startTest(test)
-        result.addFailure(test, exc_info_tuple)
-        result.stopTest(test)
-
-        formatted_exc = result.failures[0][1]
-        dropped = [l for l in full_exc if l not in formatted_exc]
-        self.assertEqual(len(dropped), 1)
-        self.assertIn("raise self.failureException(msg)", dropped[0])
-
     # "addError(test, err)"
     # ...
     # "Called when the test case test raises an unexpected exception err
@@ -515,13 +433,10 @@
         self.assertTrue(result.shouldStop)
 
     def testFailFastSetByRunner(self):
-        stream = BufferedWriter()
-        runner = unittest.TextTestRunner(stream=stream, failfast=True)
+        runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True)
         def test(result):
             self.assertTrue(result.failfast)
         result = runner.run(test)
-        stream.flush()
-        self.assertTrue(stream.getvalue().endswith('\n\nOK\n'))
 
 
 classDict = dict(unittest.TestResult.__dict__)
@@ -543,8 +458,8 @@
 class Test_OldTestResult(unittest.TestCase):
 
     def assertOldResultWarning(self, test, failures):
-        with warnings_helper.check_warnings(
-                ("TestResult has no add.+ method,", RuntimeWarning)):
+        with support.check_warnings(("TestResult has no add.+ method,",
+                                     RuntimeWarning)):
             result = OldResult()
             test.run(result)
             self.assertEqual(len(result.failures), failures)
@@ -718,320 +633,36 @@
             self.assertEqual(result._original_stderr.getvalue(), expectedErrMessage)
             self.assertMultiLineEqual(message, expectedFullMessage)
 
-    def testBufferSetUp(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
-        result.buffer = True
-
-        class Foo(unittest.TestCase):
-            def setUp(self):
-                print('set up')
-                1/0
-            def test_foo(self):
-                pass
-        suite = unittest.TestSuite([Foo('test_foo')])
-        suite(result)
-        expected_out = '\nStdout:\nset up\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
-        self.assertEqual(len(result.errors), 1)
-        description = f'test_foo ({strclass(Foo)})'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(str(test_case), description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-
-    def testBufferTearDown(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
-        result.buffer = True
-
-        class Foo(unittest.TestCase):
-            def tearDown(self):
-                print('tear down')
-                1/0
-            def test_foo(self):
-                pass
-        suite = unittest.TestSuite([Foo('test_foo')])
-        suite(result)
-        expected_out = '\nStdout:\ntear down\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
-        self.assertEqual(len(result.errors), 1)
-        description = f'test_foo ({strclass(Foo)})'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(str(test_case), description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-
-    def testBufferDoCleanups(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
-        result.buffer = True
-
-        class Foo(unittest.TestCase):
-            def setUp(self):
-                print('set up')
-                self.addCleanup(bad_cleanup1)
-                self.addCleanup(bad_cleanup2)
-            def test_foo(self):
-                pass
-        suite = unittest.TestSuite([Foo('test_foo')])
-        suite(result)
-        expected_out = '\nStdout:\nset up\ndo cleanup2\ndo cleanup1\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
-        self.assertEqual(len(result.errors), 2)
-        description = f'test_foo ({strclass(Foo)})'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(str(test_case), description)
-        self.assertIn('ValueError: bad cleanup2', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-        test_case, formatted_exc = result.errors[1]
-        self.assertEqual(str(test_case), description)
-        self.assertIn('TypeError: bad cleanup1', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-
-    def testBufferSetUp_DoCleanups(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
-        result.buffer = True
-
-        class Foo(unittest.TestCase):
-            def setUp(self):
-                print('set up')
-                self.addCleanup(bad_cleanup1)
-                self.addCleanup(bad_cleanup2)
-                1/0
-            def test_foo(self):
-                pass
-        suite = unittest.TestSuite([Foo('test_foo')])
-        suite(result)
-        expected_out = '\nStdout:\nset up\ndo cleanup2\ndo cleanup1\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
-        self.assertEqual(len(result.errors), 3)
-        description = f'test_foo ({strclass(Foo)})'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(str(test_case), description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-        test_case, formatted_exc = result.errors[1]
-        self.assertEqual(str(test_case), description)
-        self.assertIn('ValueError: bad cleanup2', formatted_exc)
-        self.assertNotIn('ZeroDivisionError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-        test_case, formatted_exc = result.errors[2]
-        self.assertEqual(str(test_case), description)
-        self.assertIn('TypeError: bad cleanup1', formatted_exc)
-        self.assertNotIn('ZeroDivisionError', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-
-    def testBufferTearDown_DoCleanups(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
-        result.buffer = True
-
-        class Foo(unittest.TestCase):
-            def setUp(self):
-                print('set up')
-                self.addCleanup(bad_cleanup1)
-                self.addCleanup(bad_cleanup2)
-            def tearDown(self):
-                print('tear down')
-                1/0
-            def test_foo(self):
-                pass
-        suite = unittest.TestSuite([Foo('test_foo')])
-        suite(result)
-        expected_out = '\nStdout:\nset up\ntear down\ndo cleanup2\ndo cleanup1\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
-        self.assertEqual(len(result.errors), 3)
-        description = f'test_foo ({strclass(Foo)})'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(str(test_case), description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-        test_case, formatted_exc = result.errors[1]
-        self.assertEqual(str(test_case), description)
-        self.assertIn('ValueError: bad cleanup2', formatted_exc)
-        self.assertNotIn('ZeroDivisionError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-        test_case, formatted_exc = result.errors[2]
-        self.assertEqual(str(test_case), description)
-        self.assertIn('TypeError: bad cleanup1', formatted_exc)
-        self.assertNotIn('ZeroDivisionError', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-
     def testBufferSetupClass(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
+        result = unittest.TestResult()
         result.buffer = True
 
         class Foo(unittest.TestCase):
             @classmethod
             def setUpClass(cls):
-                print('set up class')
                 1/0
             def test_foo(self):
                 pass
         suite = unittest.TestSuite([Foo('test_foo')])
         suite(result)
-        expected_out = '\nStdout:\nset up class\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
         self.assertEqual(len(result.errors), 1)
-        description = f'setUpClass ({strclass(Foo)})'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
 
     def testBufferTearDownClass(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
+        result = unittest.TestResult()
         result.buffer = True
 
         class Foo(unittest.TestCase):
             @classmethod
             def tearDownClass(cls):
-                print('tear down class')
                 1/0
             def test_foo(self):
                 pass
         suite = unittest.TestSuite([Foo('test_foo')])
         suite(result)
-        expected_out = '\nStdout:\ntear down class\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
         self.assertEqual(len(result.errors), 1)
-        description = f'tearDownClass ({strclass(Foo)})'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-
-    def testBufferDoClassCleanups(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
-        result.buffer = True
-
-        class Foo(unittest.TestCase):
-            @classmethod
-            def setUpClass(cls):
-                print('set up class')
-                cls.addClassCleanup(bad_cleanup1)
-                cls.addClassCleanup(bad_cleanup2)
-            @classmethod
-            def tearDownClass(cls):
-                print('tear down class')
-            def test_foo(self):
-                pass
-        suite = unittest.TestSuite([Foo('test_foo')])
-        suite(result)
-        expected_out = '\nStdout:\ntear down class\ndo cleanup2\ndo cleanup1\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
-        self.assertEqual(len(result.errors), 2)
-        description = f'tearDownClass ({strclass(Foo)})'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ValueError: bad cleanup2', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-        test_case, formatted_exc = result.errors[1]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('TypeError: bad cleanup1', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-
-    def testBufferSetupClass_DoClassCleanups(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
-        result.buffer = True
-
-        class Foo(unittest.TestCase):
-            @classmethod
-            def setUpClass(cls):
-                print('set up class')
-                cls.addClassCleanup(bad_cleanup1)
-                cls.addClassCleanup(bad_cleanup2)
-                1/0
-            def test_foo(self):
-                pass
-        suite = unittest.TestSuite([Foo('test_foo')])
-        suite(result)
-        expected_out = '\nStdout:\nset up class\ndo cleanup2\ndo cleanup1\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
-        self.assertEqual(len(result.errors), 3)
-        description = f'setUpClass ({strclass(Foo)})'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn('\nStdout:\nset up class\n', formatted_exc)
-        test_case, formatted_exc = result.errors[1]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ValueError: bad cleanup2', formatted_exc)
-        self.assertNotIn('ZeroDivisionError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-        test_case, formatted_exc = result.errors[2]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('TypeError: bad cleanup1', formatted_exc)
-        self.assertNotIn('ZeroDivisionError', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-
-    def testBufferTearDownClass_DoClassCleanups(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
-        result.buffer = True
-
-        class Foo(unittest.TestCase):
-            @classmethod
-            def setUpClass(cls):
-                print('set up class')
-                cls.addClassCleanup(bad_cleanup1)
-                cls.addClassCleanup(bad_cleanup2)
-            @classmethod
-            def tearDownClass(cls):
-                print('tear down class')
-                1/0
-            def test_foo(self):
-                pass
-        suite = unittest.TestSuite([Foo('test_foo')])
-        suite(result)
-        expected_out = '\nStdout:\ntear down class\ndo cleanup2\ndo cleanup1\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
-        self.assertEqual(len(result.errors), 3)
-        description = f'tearDownClass ({strclass(Foo)})'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn('\nStdout:\ntear down class\n', formatted_exc)
-        test_case, formatted_exc = result.errors[1]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ValueError: bad cleanup2', formatted_exc)
-        self.assertNotIn('ZeroDivisionError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-        test_case, formatted_exc = result.errors[2]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('TypeError: bad cleanup1', formatted_exc)
-        self.assertNotIn('ZeroDivisionError', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
 
     def testBufferSetUpModule(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
+        result = unittest.TestResult()
         result.buffer = True
 
         class Foo(unittest.TestCase):
@@ -1040,7 +671,6 @@
         class Module(object):
             @staticmethod
             def setUpModule():
-                print('set up module')
                 1/0
 
         Foo.__module__ = 'Module'
@@ -1048,18 +678,10 @@
         self.addCleanup(sys.modules.pop, 'Module')
         suite = unittest.TestSuite([Foo('test_foo')])
         suite(result)
-        expected_out = '\nStdout:\nset up module\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
         self.assertEqual(len(result.errors), 1)
-        description = 'setUpModule (Module)'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
 
     def testBufferTearDownModule(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
+        result = unittest.TestResult()
         result.buffer = True
 
         class Foo(unittest.TestCase):
@@ -1068,7 +690,6 @@
         class Module(object):
             @staticmethod
             def tearDownModule():
-                print('tear down module')
                 1/0
 
         Foo.__module__ = 'Module'
@@ -1076,124 +697,7 @@
         self.addCleanup(sys.modules.pop, 'Module')
         suite = unittest.TestSuite([Foo('test_foo')])
         suite(result)
-        expected_out = '\nStdout:\ntear down module\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
         self.assertEqual(len(result.errors), 1)
-        description = 'tearDownModule (Module)'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-
-    def testBufferDoModuleCleanups(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
-        result.buffer = True
-
-        class Foo(unittest.TestCase):
-            def test_foo(self):
-                pass
-        class Module(object):
-            @staticmethod
-            def setUpModule():
-                print('set up module')
-                unittest.addModuleCleanup(bad_cleanup1)
-                unittest.addModuleCleanup(bad_cleanup2)
-
-        Foo.__module__ = 'Module'
-        sys.modules['Module'] = Module
-        self.addCleanup(sys.modules.pop, 'Module')
-        suite = unittest.TestSuite([Foo('test_foo')])
-        suite(result)
-        expected_out = '\nStdout:\ndo cleanup2\ndo cleanup1\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
-        self.assertEqual(len(result.errors), 1)
-        description = 'tearDownModule (Module)'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ValueError: bad cleanup2', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-
-    def testBufferSetUpModule_DoModuleCleanups(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
-        result.buffer = True
-
-        class Foo(unittest.TestCase):
-            def test_foo(self):
-                pass
-        class Module(object):
-            @staticmethod
-            def setUpModule():
-                print('set up module')
-                unittest.addModuleCleanup(bad_cleanup1)
-                unittest.addModuleCleanup(bad_cleanup2)
-                1/0
-
-        Foo.__module__ = 'Module'
-        sys.modules['Module'] = Module
-        self.addCleanup(sys.modules.pop, 'Module')
-        suite = unittest.TestSuite([Foo('test_foo')])
-        suite(result)
-        expected_out = '\nStdout:\nset up module\ndo cleanup2\ndo cleanup1\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
-        self.assertEqual(len(result.errors), 2)
-        description = 'setUpModule (Module)'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn('\nStdout:\nset up module\n', formatted_exc)
-        test_case, formatted_exc = result.errors[1]
-        self.assertIn(expected_out, formatted_exc)
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ValueError: bad cleanup2', formatted_exc)
-        self.assertNotIn('ZeroDivisionError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
-
-    def testBufferTearDownModule_DoModuleCleanups(self):
-        with captured_stdout() as stdout:
-            result = unittest.TestResult()
-        result.buffer = True
-
-        class Foo(unittest.TestCase):
-            def test_foo(self):
-                pass
-        class Module(object):
-            @staticmethod
-            def setUpModule():
-                print('set up module')
-                unittest.addModuleCleanup(bad_cleanup1)
-                unittest.addModuleCleanup(bad_cleanup2)
-            @staticmethod
-            def tearDownModule():
-                print('tear down module')
-                1/0
-
-        Foo.__module__ = 'Module'
-        sys.modules['Module'] = Module
-        self.addCleanup(sys.modules.pop, 'Module')
-        suite = unittest.TestSuite([Foo('test_foo')])
-        suite(result)
-        expected_out = '\nStdout:\ntear down module\ndo cleanup2\ndo cleanup1\n'
-        self.assertEqual(stdout.getvalue(), expected_out)
-        self.assertEqual(len(result.errors), 2)
-        description = 'tearDownModule (Module)'
-        test_case, formatted_exc = result.errors[0]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
-        self.assertNotIn('ValueError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn('\nStdout:\ntear down module\n', formatted_exc)
-        test_case, formatted_exc = result.errors[1]
-        self.assertEqual(test_case.description, description)
-        self.assertIn('ValueError: bad cleanup2', formatted_exc)
-        self.assertNotIn('ZeroDivisionError', formatted_exc)
-        self.assertNotIn('TypeError', formatted_exc)
-        self.assertIn(expected_out, formatted_exc)
 
 
 if __name__ == '__main__':
diff --git a/common/py3-stdlib/unittest/test/test_runner.py b/common/py3-stdlib/unittest/test/test_runner.py
index 453e6c3..dd9a1b6 100644
--- a/common/py3-stdlib/unittest/test/test_runner.py
+++ b/common/py3-stdlib/unittest/test/test_runner.py
@@ -222,42 +222,14 @@
         self.assertEqual(ordering,
                          ['setUpClass', 'test', 'tearDownClass', 'cleanup_good'])
 
-    def test_run_class_cleanUp_without_tearDownClass(self):
+    def test_debug_executes_classCleanUp(self):
         ordering = []
-        blowUp = True
 
         class TestableTest(unittest.TestCase):
             @classmethod
             def setUpClass(cls):
                 ordering.append('setUpClass')
                 cls.addClassCleanup(cleanup, ordering)
-                if blowUp:
-                    raise Exception()
-            def testNothing(self):
-                ordering.append('test')
-            @classmethod
-            @property
-            def tearDownClass(cls):
-                raise AttributeError
-
-        runTests(TestableTest)
-        self.assertEqual(ordering, ['setUpClass', 'cleanup_good'])
-
-        ordering = []
-        blowUp = False
-        runTests(TestableTest)
-        self.assertEqual(ordering,
-                         ['setUpClass', 'test', 'cleanup_good'])
-
-    def test_debug_executes_classCleanUp(self):
-        ordering = []
-        blowUp = False
-
-        class TestableTest(unittest.TestCase):
-            @classmethod
-            def setUpClass(cls):
-                ordering.append('setUpClass')
-                cls.addClassCleanup(cleanup, ordering, blowUp=blowUp)
             def testNothing(self):
                 ordering.append('test')
             @classmethod
@@ -269,48 +241,6 @@
         self.assertEqual(ordering,
                          ['setUpClass', 'test', 'tearDownClass', 'cleanup_good'])
 
-        ordering = []
-        blowUp = True
-        suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestableTest)
-        with self.assertRaises(Exception) as cm:
-            suite.debug()
-        self.assertEqual(str(cm.exception), 'CleanUpExc')
-        self.assertEqual(ordering,
-                         ['setUpClass', 'test', 'tearDownClass', 'cleanup_exc'])
-
-    def test_debug_executes_classCleanUp_when_teardown_exception(self):
-        ordering = []
-        blowUp = False
-
-        class TestableTest(unittest.TestCase):
-            @classmethod
-            def setUpClass(cls):
-                ordering.append('setUpClass')
-                cls.addClassCleanup(cleanup, ordering, blowUp=blowUp)
-            def testNothing(self):
-                ordering.append('test')
-            @classmethod
-            def tearDownClass(cls):
-                raise Exception('TearDownClassExc')
-
-        suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestableTest)
-        with self.assertRaises(Exception) as cm:
-            suite.debug()
-        self.assertEqual(str(cm.exception), 'TearDownClassExc')
-        self.assertEqual(ordering, ['setUpClass', 'test'])
-        self.assertTrue(TestableTest._class_cleanups)
-        TestableTest._class_cleanups.clear()
-
-        ordering = []
-        blowUp = True
-        suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestableTest)
-        with self.assertRaises(Exception) as cm:
-            suite.debug()
-        self.assertEqual(str(cm.exception), 'TearDownClassExc')
-        self.assertEqual(ordering, ['setUpClass', 'test'])
-        self.assertTrue(TestableTest._class_cleanups)
-        TestableTest._class_cleanups.clear()
-
     def test_doClassCleanups_with_errors_addClassCleanUp(self):
         class TestableTest(unittest.TestCase):
             def testNothing(self):
@@ -402,7 +332,6 @@
         self.assertEqual(ordering,
                          ['setUpClass', 'setUp', 'test',
                           'tearDownClass', 'cleanup_exc'])
-
         ordering = []
         class_blow_up = True
         method_blow_up = False
@@ -426,26 +355,6 @@
                          ['setUpClass', 'setUp', 'tearDownClass',
                           'cleanup_exc'])
 
-    def test_with_errors_in_tearDownClass(self):
-        ordering = []
-        class TestableTest(unittest.TestCase):
-            @classmethod
-            def setUpClass(cls):
-                ordering.append('setUpClass')
-                cls.addClassCleanup(cleanup, ordering)
-            def testNothing(self):
-                ordering.append('test')
-            @classmethod
-            def tearDownClass(cls):
-                ordering.append('tearDownClass')
-                raise Exception('TearDownExc')
-
-        result = runTests(TestableTest)
-        self.assertEqual(result.errors[0][1].splitlines()[-1],
-                         'Exception: TearDownExc')
-        self.assertEqual(ordering,
-                         ['setUpClass', 'test', 'tearDownClass', 'cleanup_good'])
-
 
 class TestModuleCleanUp(unittest.TestCase):
     def test_add_and_do_ModuleCleanup(self):
@@ -623,69 +532,13 @@
                           'tearDownModule2', 'cleanup_good'])
         self.assertEqual(unittest.case._module_cleanups, [])
 
-    def test_run_module_cleanUp_without_teardown(self):
-        ordering = []
-        class Module(object):
-            @staticmethod
-            def setUpModule():
-                ordering.append('setUpModule')
-                unittest.addModuleCleanup(cleanup, ordering)
-
-        class TestableTest(unittest.TestCase):
-            @classmethod
-            def setUpClass(cls):
-                ordering.append('setUpClass')
-            def testNothing(self):
-                ordering.append('test')
-            @classmethod
-            def tearDownClass(cls):
-                ordering.append('tearDownClass')
-
-        TestableTest.__module__ = 'Module'
-        sys.modules['Module'] = Module
-        runTests(TestableTest)
-        self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test',
-                                    'tearDownClass', 'cleanup_good'])
-        self.assertEqual(unittest.case._module_cleanups, [])
-
-    def test_run_module_cleanUp_when_teardown_exception(self):
-        ordering = []
-        class Module(object):
-            @staticmethod
-            def setUpModule():
-                ordering.append('setUpModule')
-                unittest.addModuleCleanup(cleanup, ordering)
-            @staticmethod
-            def tearDownModule():
-                raise Exception('CleanUpExc')
-
-        class TestableTest(unittest.TestCase):
-            @classmethod
-            def setUpClass(cls):
-                ordering.append('setUpClass')
-            def testNothing(self):
-                ordering.append('test')
-            @classmethod
-            def tearDownClass(cls):
-                ordering.append('tearDownClass')
-
-        TestableTest.__module__ = 'Module'
-        sys.modules['Module'] = Module
-        result = runTests(TestableTest)
-        self.assertEqual(result.errors[0][1].splitlines()[-1],
-                         'Exception: CleanUpExc')
-        self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test',
-                                    'tearDownClass', 'cleanup_good'])
-        self.assertEqual(unittest.case._module_cleanups, [])
-
     def test_debug_module_executes_cleanUp(self):
         ordering = []
-        blowUp = False
         class Module(object):
             @staticmethod
             def setUpModule():
                 ordering.append('setUpModule')
-                unittest.addModuleCleanup(cleanup, ordering, blowUp=blowUp)
+                unittest.addModuleCleanup(cleanup, ordering)
             @staticmethod
             def tearDownModule():
                 ordering.append('tearDownModule')
@@ -709,60 +562,6 @@
                           'tearDownModule', 'cleanup_good'])
         self.assertEqual(unittest.case._module_cleanups, [])
 
-        ordering = []
-        blowUp = True
-        suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestableTest)
-        with self.assertRaises(Exception) as cm:
-            suite.debug()
-        self.assertEqual(str(cm.exception), 'CleanUpExc')
-        self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test',
-                                    'tearDownClass', 'tearDownModule', 'cleanup_exc'])
-        self.assertEqual(unittest.case._module_cleanups, [])
-
-    def test_debug_module_cleanUp_when_teardown_exception(self):
-        ordering = []
-        blowUp = False
-        class Module(object):
-            @staticmethod
-            def setUpModule():
-                ordering.append('setUpModule')
-                unittest.addModuleCleanup(cleanup, ordering, blowUp=blowUp)
-            @staticmethod
-            def tearDownModule():
-                raise Exception('TearDownModuleExc')
-
-        class TestableTest(unittest.TestCase):
-            @classmethod
-            def setUpClass(cls):
-                ordering.append('setUpClass')
-            def testNothing(self):
-                ordering.append('test')
-            @classmethod
-            def tearDownClass(cls):
-                ordering.append('tearDownClass')
-
-        TestableTest.__module__ = 'Module'
-        sys.modules['Module'] = Module
-        suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestableTest)
-        with self.assertRaises(Exception) as cm:
-            suite.debug()
-        self.assertEqual(str(cm.exception), 'TearDownModuleExc')
-        self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test',
-                                    'tearDownClass'])
-        self.assertTrue(unittest.case._module_cleanups)
-        unittest.case._module_cleanups.clear()
-
-        ordering = []
-        blowUp = True
-        suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestableTest)
-        with self.assertRaises(Exception) as cm:
-            suite.debug()
-        self.assertEqual(str(cm.exception), 'TearDownModuleExc')
-        self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test',
-                                    'tearDownClass'])
-        self.assertTrue(unittest.case._module_cleanups)
-        unittest.case._module_cleanups.clear()
-
     def test_addClassCleanup_arg_errors(self):
         cleanups = []
         def cleanup(*args, **kwargs):
@@ -918,9 +717,9 @@
         method_blow_up = False
         result = runTests(TestableTest)
         self.assertEqual(result.errors[0][1].splitlines()[-1],
-                         'Exception: ModuleExc')
-        self.assertEqual(result.errors[1][1].splitlines()[-1],
                          'Exception: CleanUpExc')
+        self.assertEqual(result.errors[1][1].splitlines()[-1],
+                         'Exception: ModuleExc')
         self.assertEqual(ordering, ['setUpModule', 'cleanup_exc'])
 
         ordering = []
diff --git a/common/py3-stdlib/unittest/test/test_skipping.py b/common/py3-stdlib/unittest/test/test_skipping.py
index 7cb9d33..1c178a9 100644
--- a/common/py3-stdlib/unittest/test/test_skipping.py
+++ b/common/py3-stdlib/unittest/test/test_skipping.py
@@ -7,50 +7,30 @@
 
     def test_skipping(self):
         class Foo(unittest.TestCase):
-            def defaultTestResult(self):
-                return LoggingResult(events)
             def test_skip_me(self):
                 self.skipTest("skip")
         events = []
         result = LoggingResult(events)
         test = Foo("test_skip_me")
-        self.assertIs(test.run(result), result)
+        test.run(result)
         self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
         self.assertEqual(result.skipped, [(test, "skip")])
 
-        events = []
-        result = test.run()
-        self.assertEqual(events, ['startTestRun', 'startTest', 'addSkip',
-                                  'stopTest', 'stopTestRun'])
-        self.assertEqual(result.skipped, [(test, "skip")])
-        self.assertEqual(result.testsRun, 1)
-
         # Try letting setUp skip the test now.
         class Foo(unittest.TestCase):
-            def defaultTestResult(self):
-                return LoggingResult(events)
             def setUp(self):
                 self.skipTest("testing")
             def test_nothing(self): pass
         events = []
         result = LoggingResult(events)
         test = Foo("test_nothing")
-        self.assertIs(test.run(result), result)
+        test.run(result)
         self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
         self.assertEqual(result.skipped, [(test, "testing")])
         self.assertEqual(result.testsRun, 1)
 
-        events = []
-        result = test.run()
-        self.assertEqual(events, ['startTestRun', 'startTest', 'addSkip',
-                                  'stopTest', 'stopTestRun'])
-        self.assertEqual(result.skipped, [(test, "testing")])
-        self.assertEqual(result.testsRun, 1)
-
     def test_skipping_subtests(self):
         class Foo(unittest.TestCase):
-            def defaultTestResult(self):
-                return LoggingResult(events)
             def test_skip_me(self):
                 with self.subTest(a=1):
                     with self.subTest(b=2):
@@ -60,7 +40,7 @@
         events = []
         result = LoggingResult(events)
         test = Foo("test_skip_me")
-        self.assertIs(test.run(result), result)
+        test.run(result)
         self.assertEqual(events, ['startTest', 'addSkip', 'addSkip',
                                   'addSkip', 'stopTest'])
         self.assertEqual(len(result.skipped), 3)
@@ -74,22 +54,11 @@
         self.assertIsNot(subtest, test)
         self.assertEqual(result.skipped[2], (test, "skip 3"))
 
-        events = []
-        result = test.run()
-        self.assertEqual(events,
-                         ['startTestRun', 'startTest', 'addSkip', 'addSkip',
-                          'addSkip', 'stopTest', 'stopTestRun'])
-        self.assertEqual([msg for subtest, msg in result.skipped],
-                         ['skip 1', 'skip 2', 'skip 3'])
-
     def test_skipping_decorators(self):
         op_table = ((unittest.skipUnless, False, True),
                     (unittest.skipIf, True, False))
         for deco, do_skip, dont_skip in op_table:
             class Foo(unittest.TestCase):
-                def defaultTestResult(self):
-                    return LoggingResult(events)
-
                 @deco(do_skip, "testing")
                 def test_skip(self): pass
 
@@ -97,11 +66,10 @@
                 def test_dont_skip(self): pass
             test_do_skip = Foo("test_skip")
             test_dont_skip = Foo("test_dont_skip")
-
             suite = unittest.TestSuite([test_do_skip, test_dont_skip])
             events = []
             result = LoggingResult(events)
-            self.assertIs(suite.run(result), result)
+            suite.run(result)
             self.assertEqual(len(result.skipped), 1)
             expected = ['startTest', 'addSkip', 'stopTest',
                         'startTest', 'addSuccess', 'stopTest']
@@ -110,39 +78,16 @@
             self.assertEqual(result.skipped, [(test_do_skip, "testing")])
             self.assertTrue(result.wasSuccessful())
 
-            events = []
-            result = test_do_skip.run()
-            self.assertEqual(events, ['startTestRun', 'startTest', 'addSkip',
-                                      'stopTest', 'stopTestRun'])
-            self.assertEqual(result.skipped, [(test_do_skip, "testing")])
-
-            events = []
-            result = test_dont_skip.run()
-            self.assertEqual(events, ['startTestRun', 'startTest', 'addSuccess',
-                                      'stopTest', 'stopTestRun'])
-            self.assertEqual(result.skipped, [])
-
     def test_skip_class(self):
         @unittest.skip("testing")
         class Foo(unittest.TestCase):
-            def defaultTestResult(self):
-                return LoggingResult(events)
             def test_1(self):
                 record.append(1)
-        events = []
         record = []
-        result = LoggingResult(events)
+        result = unittest.TestResult()
         test = Foo("test_1")
         suite = unittest.TestSuite([test])
-        self.assertIs(suite.run(result), result)
-        self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
-        self.assertEqual(result.skipped, [(test, "testing")])
-        self.assertEqual(record, [])
-
-        events = []
-        result = test.run()
-        self.assertEqual(events, ['startTestRun', 'startTest', 'addSkip',
-                                  'stopTest', 'stopTestRun'])
+        suite.run(result)
         self.assertEqual(result.skipped, [(test, "testing")])
         self.assertEqual(record, [])
 
@@ -157,62 +102,10 @@
         result = unittest.TestResult()
         test = Foo("test_1")
         suite = unittest.TestSuite([test])
-        self.assertIs(suite.run(result), result)
+        suite.run(result)
         self.assertEqual(result.skipped, [(test, "testing")])
         self.assertEqual(record, [])
 
-    def test_skip_in_setup(self):
-        class Foo(unittest.TestCase):
-            def setUp(self):
-                self.skipTest("skip")
-            def test_skip_me(self):
-                self.fail("shouldn't come here")
-        events = []
-        result = LoggingResult(events)
-        test = Foo("test_skip_me")
-        self.assertIs(test.run(result), result)
-        self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
-        self.assertEqual(result.skipped, [(test, "skip")])
-
-    def test_skip_in_cleanup(self):
-        class Foo(unittest.TestCase):
-            def test_skip_me(self):
-                pass
-            def tearDown(self):
-                self.skipTest("skip")
-        events = []
-        result = LoggingResult(events)
-        test = Foo("test_skip_me")
-        self.assertIs(test.run(result), result)
-        self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
-        self.assertEqual(result.skipped, [(test, "skip")])
-
-    def test_failure_and_skip_in_cleanup(self):
-        class Foo(unittest.TestCase):
-            def test_skip_me(self):
-                self.fail("fail")
-            def tearDown(self):
-                self.skipTest("skip")
-        events = []
-        result = LoggingResult(events)
-        test = Foo("test_skip_me")
-        self.assertIs(test.run(result), result)
-        self.assertEqual(events, ['startTest', 'addSkip', 'addFailure', 'stopTest'])
-        self.assertEqual(result.skipped, [(test, "skip")])
-
-    def test_skipping_and_fail_in_cleanup(self):
-        class Foo(unittest.TestCase):
-            def test_skip_me(self):
-                self.skipTest("skip")
-            def tearDown(self):
-                self.fail("fail")
-        events = []
-        result = LoggingResult(events)
-        test = Foo("test_skip_me")
-        self.assertIs(test.run(result), result)
-        self.assertEqual(events, ['startTest', 'addSkip', 'addFailure', 'stopTest'])
-        self.assertEqual(result.skipped, [(test, "skip")])
-
     def test_expected_failure(self):
         class Foo(unittest.TestCase):
             @unittest.expectedFailure
@@ -221,12 +114,10 @@
         events = []
         result = LoggingResult(events)
         test = Foo("test_die")
-        self.assertIs(test.run(result), result)
+        test.run(result)
         self.assertEqual(events,
                          ['startTest', 'addExpectedFailure', 'stopTest'])
-        self.assertFalse(result.failures)
         self.assertEqual(result.expectedFailures[0][0], test)
-        self.assertFalse(result.unexpectedSuccesses)
         self.assertTrue(result.wasSuccessful())
 
     def test_expected_failure_with_wrapped_class(self):
@@ -238,12 +129,10 @@
         events = []
         result = LoggingResult(events)
         test = Foo("test_1")
-        self.assertIs(test.run(result), result)
+        test.run(result)
         self.assertEqual(events,
                          ['startTest', 'addExpectedFailure', 'stopTest'])
-        self.assertFalse(result.failures)
         self.assertEqual(result.expectedFailures[0][0], test)
-        self.assertFalse(result.unexpectedSuccesses)
         self.assertTrue(result.wasSuccessful())
 
     def test_expected_failure_with_wrapped_subclass(self):
@@ -258,12 +147,10 @@
         events = []
         result = LoggingResult(events)
         test = Bar("test_1")
-        self.assertIs(test.run(result), result)
+        test.run(result)
         self.assertEqual(events,
                          ['startTest', 'addExpectedFailure', 'stopTest'])
-        self.assertFalse(result.failures)
         self.assertEqual(result.expectedFailures[0][0], test)
-        self.assertFalse(result.unexpectedSuccesses)
         self.assertTrue(result.wasSuccessful())
 
     def test_expected_failure_subtests(self):
@@ -283,52 +170,12 @@
         events = []
         result = LoggingResult(events)
         test = Foo("test_die")
-        self.assertIs(test.run(result), result)
+        test.run(result)
         self.assertEqual(events,
                          ['startTest', 'addSubTestSuccess',
                           'addExpectedFailure', 'stopTest'])
-        self.assertFalse(result.failures)
         self.assertEqual(len(result.expectedFailures), 1)
         self.assertIs(result.expectedFailures[0][0], test)
-        self.assertFalse(result.unexpectedSuccesses)
-        self.assertTrue(result.wasSuccessful())
-
-    def test_expected_failure_and_fail_in_cleanup(self):
-        class Foo(unittest.TestCase):
-            @unittest.expectedFailure
-            def test_die(self):
-                self.fail("help me!")
-            def tearDown(self):
-                self.fail("bad tearDown")
-        events = []
-        result = LoggingResult(events)
-        test = Foo("test_die")
-        self.assertIs(test.run(result), result)
-        self.assertEqual(events,
-                         ['startTest', 'addFailure', 'stopTest'])
-        self.assertEqual(len(result.failures), 1)
-        self.assertIn('AssertionError: bad tearDown', result.failures[0][1])
-        self.assertFalse(result.expectedFailures)
-        self.assertFalse(result.unexpectedSuccesses)
-        self.assertFalse(result.wasSuccessful())
-
-    def test_expected_failure_and_skip_in_cleanup(self):
-        class Foo(unittest.TestCase):
-            @unittest.expectedFailure
-            def test_die(self):
-                self.fail("help me!")
-            def tearDown(self):
-                self.skipTest("skip")
-        events = []
-        result = LoggingResult(events)
-        test = Foo("test_die")
-        self.assertIs(test.run(result), result)
-        self.assertEqual(events,
-                         ['startTest', 'addSkip', 'stopTest'])
-        self.assertFalse(result.failures)
-        self.assertFalse(result.expectedFailures)
-        self.assertFalse(result.unexpectedSuccesses)
-        self.assertEqual(result.skipped, [(test, "skip")])
         self.assertTrue(result.wasSuccessful())
 
     def test_unexpected_success(self):
@@ -339,11 +186,10 @@
         events = []
         result = LoggingResult(events)
         test = Foo("test_die")
-        self.assertIs(test.run(result), result)
+        test.run(result)
         self.assertEqual(events,
                          ['startTest', 'addUnexpectedSuccess', 'stopTest'])
         self.assertFalse(result.failures)
-        self.assertFalse(result.expectedFailures)
         self.assertEqual(result.unexpectedSuccesses, [test])
         self.assertFalse(result.wasSuccessful())
 
@@ -362,54 +208,15 @@
         events = []
         result = LoggingResult(events)
         test = Foo("test_die")
-        self.assertIs(test.run(result), result)
+        test.run(result)
         self.assertEqual(events,
                          ['startTest',
                           'addSubTestSuccess', 'addSubTestSuccess',
                           'addUnexpectedSuccess', 'stopTest'])
         self.assertFalse(result.failures)
-        self.assertFalse(result.expectedFailures)
         self.assertEqual(result.unexpectedSuccesses, [test])
         self.assertFalse(result.wasSuccessful())
 
-    def test_unexpected_success_and_fail_in_cleanup(self):
-        class Foo(unittest.TestCase):
-            @unittest.expectedFailure
-            def test_die(self):
-                pass
-            def tearDown(self):
-                self.fail("bad tearDown")
-        events = []
-        result = LoggingResult(events)
-        test = Foo("test_die")
-        self.assertIs(test.run(result), result)
-        self.assertEqual(events,
-                         ['startTest', 'addFailure', 'stopTest'])
-        self.assertEqual(len(result.failures), 1)
-        self.assertIn('AssertionError: bad tearDown', result.failures[0][1])
-        self.assertFalse(result.expectedFailures)
-        self.assertFalse(result.unexpectedSuccesses)
-        self.assertFalse(result.wasSuccessful())
-
-    def test_unexpected_success_and_skip_in_cleanup(self):
-        class Foo(unittest.TestCase):
-            @unittest.expectedFailure
-            def test_die(self):
-                pass
-            def tearDown(self):
-                self.skipTest("skip")
-        events = []
-        result = LoggingResult(events)
-        test = Foo("test_die")
-        self.assertIs(test.run(result), result)
-        self.assertEqual(events,
-                         ['startTest', 'addSkip', 'stopTest'])
-        self.assertFalse(result.failures)
-        self.assertFalse(result.expectedFailures)
-        self.assertFalse(result.unexpectedSuccesses)
-        self.assertEqual(result.skipped, [(test, "skip")])
-        self.assertTrue(result.wasSuccessful())
-
     def test_skip_doesnt_run_setup(self):
         class Foo(unittest.TestCase):
             wasSetUp = False
@@ -425,7 +232,7 @@
         result = unittest.TestResult()
         test = Foo("test_1")
         suite = unittest.TestSuite([test])
-        self.assertIs(suite.run(result), result)
+        suite.run(result)
         self.assertEqual(result.skipped, [(test, "testing")])
         self.assertFalse(Foo.wasSetUp)
         self.assertFalse(Foo.wasTornDown)
@@ -445,7 +252,7 @@
         result = unittest.TestResult()
         test = Foo("test_1")
         suite = unittest.TestSuite([test])
-        self.assertIs(suite.run(result), result)
+        suite.run(result)
         self.assertEqual(result.skipped, [(test, "testing")])
 
     def test_skip_without_reason(self):
@@ -457,74 +264,8 @@
         result = unittest.TestResult()
         test = Foo("test_1")
         suite = unittest.TestSuite([test])
-        self.assertIs(suite.run(result), result)
+        suite.run(result)
         self.assertEqual(result.skipped, [(test, "")])
 
-    def test_debug_skipping(self):
-        class Foo(unittest.TestCase):
-            def setUp(self):
-                events.append("setUp")
-            def tearDown(self):
-                events.append("tearDown")
-            def test1(self):
-                self.skipTest('skipping exception')
-                events.append("test1")
-            @unittest.skip("skipping decorator")
-            def test2(self):
-                events.append("test2")
-
-        events = []
-        test = Foo("test1")
-        with self.assertRaises(unittest.SkipTest) as cm:
-            test.debug()
-        self.assertIn("skipping exception", str(cm.exception))
-        self.assertEqual(events, ["setUp"])
-
-        events = []
-        test = Foo("test2")
-        with self.assertRaises(unittest.SkipTest) as cm:
-            test.debug()
-        self.assertIn("skipping decorator", str(cm.exception))
-        self.assertEqual(events, [])
-
-    def test_debug_skipping_class(self):
-        @unittest.skip("testing")
-        class Foo(unittest.TestCase):
-            def setUp(self):
-                events.append("setUp")
-            def tearDown(self):
-                events.append("tearDown")
-            def test(self):
-                events.append("test")
-
-        events = []
-        test = Foo("test")
-        with self.assertRaises(unittest.SkipTest) as cm:
-            test.debug()
-        self.assertIn("testing", str(cm.exception))
-        self.assertEqual(events, [])
-
-    def test_debug_skipping_subtests(self):
-        class Foo(unittest.TestCase):
-            def setUp(self):
-                events.append("setUp")
-            def tearDown(self):
-                events.append("tearDown")
-            def test(self):
-                with self.subTest(a=1):
-                    events.append('subtest')
-                    self.skipTest("skip subtest")
-                    events.append('end subtest')
-                events.append('end test')
-
-        events = []
-        result = LoggingResult(events)
-        test = Foo("test")
-        with self.assertRaises(unittest.SkipTest) as cm:
-            test.debug()
-        self.assertIn("skip subtest", str(cm.exception))
-        self.assertEqual(events, ['setUp', 'subtest'])
-
-
 if __name__ == "__main__":
     unittest.main()
diff --git a/common/py3-stdlib/unittest/test/testmock/testasync.py b/common/py3-stdlib/unittest/test/testmock/testasync.py
index e1866a3..690ca4f 100644
--- a/common/py3-stdlib/unittest/test/testmock/testasync.py
+++ b/common/py3-stdlib/unittest/test/testmock/testasync.py
@@ -199,9 +199,9 @@
         with self.assertRaises(RuntimeError):
             create_autospec(async_func, instance=True)
 
-    @unittest.skip('Broken test from https://bugs.python.org/issue37251')
     def test_create_autospec_awaitable_class(self):
-        self.assertIsInstance(create_autospec(AwaitableClass), AsyncMock)
+        awaitable_mock = create_autospec(spec=AwaitableClass())
+        self.assertIsInstance(create_autospec(awaitable_mock), AsyncMock)
 
     def test_create_autospec(self):
         spec = create_autospec(async_func_args)
diff --git a/common/py3-stdlib/unittest/test/testmock/testmock.py b/common/py3-stdlib/unittest/test/testmock/testmock.py
index fdba543..ce674e7 100644
--- a/common/py3-stdlib/unittest/test/testmock/testmock.py
+++ b/common/py3-stdlib/unittest/test/testmock/testmock.py
@@ -11,7 +11,7 @@
     call, DEFAULT, patch, sentinel,
     MagicMock, Mock, NonCallableMock,
     NonCallableMagicMock, AsyncMock, _Call, _CallList,
-    create_autospec, InvalidSpecError
+    create_autospec
 )
 
 
@@ -38,12 +38,6 @@
     def smeth(a, b, c, d=None): pass
 
 
-class Typos():
-    autospect = None
-    auto_spec = None
-    set_spec = None
-
-
 def something(a): pass
 
 
@@ -205,28 +199,6 @@
         self.assertRaisesRegex(ValueError, 'Bazinga!', mock)
 
 
-    def test_autospec_mock(self):
-        class A(object):
-            class B(object):
-                C = None
-
-        with mock.patch.object(A, 'B'):
-            with self.assertRaisesRegex(InvalidSpecError,
-                                        "Cannot autospec attr 'B' from target <MagicMock spec='A'"):
-                create_autospec(A).B
-            with self.assertRaisesRegex(InvalidSpecError,
-                                        "Cannot autospec attr 'B' from target 'A'"):
-                mock.patch.object(A, 'B', autospec=True).start()
-            with self.assertRaisesRegex(InvalidSpecError,
-                                        "Cannot autospec attr 'C' as the patch target "):
-                mock.patch.object(A.B, 'C', autospec=True).start()
-            with self.assertRaisesRegex(InvalidSpecError,
-                                        "Cannot spec attr 'B' as the spec "):
-                mock.patch.object(A, 'B', spec=A.B).start()
-            with self.assertRaisesRegex(InvalidSpecError,
-                                        "Cannot spec attr 'B' as the spec_set "):
-                mock.patch.object(A, 'B', spec_set=A.B).start()
-
     def test_reset_mock(self):
         parent = Mock()
         spec = ["something"]
@@ -1626,23 +1598,14 @@
     #Issue21238
     def test_mock_unsafe(self):
         m = Mock()
-        msg = "is not a valid assertion. Use a spec for the mock"
+        msg = "Attributes cannot start with 'assert' or 'assret'"
         with self.assertRaisesRegex(AttributeError, msg):
             m.assert_foo_call()
         with self.assertRaisesRegex(AttributeError, msg):
             m.assret_foo_call()
-        with self.assertRaisesRegex(AttributeError, msg):
-            m.asert_foo_call()
-        with self.assertRaisesRegex(AttributeError, msg):
-            m.aseert_foo_call()
-        with self.assertRaisesRegex(AttributeError, msg):
-            m.assrt_foo_call()
         m = Mock(unsafe=True)
         m.assert_foo_call()
         m.assret_foo_call()
-        m.asert_foo_call()
-        m.aseert_foo_call()
-        m.assrt_foo_call()
 
     #Issue21262
     def test_assert_not_called(self):
@@ -2193,62 +2156,6 @@
                 obj = mock(spec=Something)
                 self.assertIsInstance(obj, Something)
 
-    def test_bool_not_called_when_passing_spec_arg(self):
-        class Something:
-            def __init__(self):
-                self.obj_with_bool_func = unittest.mock.MagicMock()
-
-        obj = Something()
-        with unittest.mock.patch.object(obj, 'obj_with_bool_func', spec=object): pass
-
-        self.assertEqual(obj.obj_with_bool_func.__bool__.call_count, 0)
-
-    def test_misspelled_arguments(self):
-        class Foo():
-            one = 'one'
-        # patch, patch.object and create_autospec need to check for misspelled
-        # arguments explicitly and throw a RuntimError if found.
-        with self.assertRaises(RuntimeError):
-            with patch(f'{__name__}.Something.meth', autospect=True): pass
-        with self.assertRaises(RuntimeError):
-            with patch.object(Foo, 'one', autospect=True): pass
-        with self.assertRaises(RuntimeError):
-            with patch(f'{__name__}.Something.meth', auto_spec=True): pass
-        with self.assertRaises(RuntimeError):
-            with patch.object(Foo, 'one', auto_spec=True): pass
-        with self.assertRaises(RuntimeError):
-            with patch(f'{__name__}.Something.meth', set_spec=True): pass
-        with self.assertRaises(RuntimeError):
-            with patch.object(Foo, 'one', set_spec=True): pass
-        with self.assertRaises(RuntimeError):
-            m = create_autospec(Foo, set_spec=True)
-        # patch.multiple, on the other hand, should flag misspelled arguments
-        # through an AttributeError, when trying to find the keys from kwargs
-        # as attributes on the target.
-        with self.assertRaises(AttributeError):
-            with patch.multiple(
-                f'{__name__}.Something', meth=DEFAULT, autospect=True): pass
-        with self.assertRaises(AttributeError):
-            with patch.multiple(
-                f'{__name__}.Something', meth=DEFAULT, auto_spec=True): pass
-        with self.assertRaises(AttributeError):
-            with patch.multiple(
-                f'{__name__}.Something', meth=DEFAULT, set_spec=True): pass
-
-        with patch(f'{__name__}.Something.meth', unsafe=True, autospect=True):
-            pass
-        with patch.object(Foo, 'one', unsafe=True, autospect=True): pass
-        with patch(f'{__name__}.Something.meth', unsafe=True, auto_spec=True):
-            pass
-        with patch.object(Foo, 'one', unsafe=True, auto_spec=True): pass
-        with patch(f'{__name__}.Something.meth', unsafe=True, set_spec=True):
-            pass
-        with patch.object(Foo, 'one', unsafe=True, set_spec=True): pass
-        m = create_autospec(Foo, set_spec=True, unsafe=True)
-        with patch.multiple(
-            f'{__name__}.Typos', autospect=True, set_spec=True, auto_spec=True):
-            pass
-
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/common/py3-stdlib/unittest/test/testmock/testpatch.py b/common/py3-stdlib/unittest/test/testmock/testpatch.py
index 8ab63a1..d8c1515 100644
--- a/common/py3-stdlib/unittest/test/testmock/testpatch.py
+++ b/common/py3-stdlib/unittest/test/testmock/testpatch.py
@@ -1875,10 +1875,9 @@
             self.assertEqual(foo(), 1)
         self.assertEqual(foo(), 0)
 
-        orig_doc = foo.__doc__
         with patch.object(foo, '__doc__', "FUN"):
             self.assertEqual(foo.__doc__, "FUN")
-        self.assertEqual(foo.__doc__, orig_doc)
+        self.assertEqual(foo.__doc__, "TEST")
 
         with patch.object(foo, '__module__', "testpatch2"):
             self.assertEqual(foo.__module__, "testpatch2")
@@ -1933,13 +1932,8 @@
 
 
     def test_invalid_target(self):
-        class Foo:
-            pass
-
-        for target in ['', 12, Foo()]:
-            with self.subTest(target=target):
-                with self.assertRaises(TypeError):
-                    patch(target)
+        with self.assertRaises(TypeError):
+            patch('')
 
 
     def test_cant_set_kwargs_when_passing_a_mock(self):
diff --git a/common/py3-stdlib/unittest/test/testmock/testsealable.py b/common/py3-stdlib/unittest/test/testmock/testsealable.py
index daba2b4..59f5233 100644
--- a/common/py3-stdlib/unittest/test/testmock/testsealable.py
+++ b/common/py3-stdlib/unittest/test/testmock/testsealable.py
@@ -128,7 +128,7 @@
             m.attr_sample2
 
     def test_integration_with_spec_method_definition(self):
-        """You need to define the methods, even if they are in the spec"""
+        """You need to defin the methods, even if they are in the spec"""
         m = mock.Mock(SampleObject)
 
         m.method_sample1.return_value = 1
@@ -171,67 +171,6 @@
             m.test1().test2.test3().test4()
         self.assertIn("mock.test1().test2.test3().test4", str(cm.exception))
 
-    def test_seal_with_autospec(self):
-        # https://bugs.python.org/issue45156
-        class Foo:
-            foo = 0
-            def bar1(self):
-                return 1
-            def bar2(self):
-                return 2
-
-            class Baz:
-                baz = 3
-                def ban(self):
-                    return 4
-
-        for spec_set in (True, False):
-            with self.subTest(spec_set=spec_set):
-                foo = mock.create_autospec(Foo, spec_set=spec_set)
-                foo.bar1.return_value = 'a'
-                foo.Baz.ban.return_value = 'b'
-
-                mock.seal(foo)
-
-                self.assertIsInstance(foo.foo, mock.NonCallableMagicMock)
-                self.assertIsInstance(foo.bar1, mock.MagicMock)
-                self.assertIsInstance(foo.bar2, mock.MagicMock)
-                self.assertIsInstance(foo.Baz, mock.MagicMock)
-                self.assertIsInstance(foo.Baz.baz, mock.NonCallableMagicMock)
-                self.assertIsInstance(foo.Baz.ban, mock.MagicMock)
-
-                self.assertEqual(foo.bar1(), 'a')
-                foo.bar1.return_value = 'new_a'
-                self.assertEqual(foo.bar1(), 'new_a')
-                self.assertEqual(foo.Baz.ban(), 'b')
-                foo.Baz.ban.return_value = 'new_b'
-                self.assertEqual(foo.Baz.ban(), 'new_b')
-
-                with self.assertRaises(TypeError):
-                    foo.foo()
-                with self.assertRaises(AttributeError):
-                    foo.bar = 1
-                with self.assertRaises(AttributeError):
-                    foo.bar2()
-
-                foo.bar2.return_value = 'bar2'
-                self.assertEqual(foo.bar2(), 'bar2')
-
-                with self.assertRaises(AttributeError):
-                    foo.missing_attr
-                with self.assertRaises(AttributeError):
-                    foo.missing_attr = 1
-                with self.assertRaises(AttributeError):
-                    foo.missing_method()
-                with self.assertRaises(TypeError):
-                    foo.Baz.baz()
-                with self.assertRaises(AttributeError):
-                    foo.Baz.missing_attr
-                with self.assertRaises(AttributeError):
-                    foo.Baz.missing_attr = 1
-                with self.assertRaises(AttributeError):
-                    foo.Baz.missing_method()
-
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/common/py3-stdlib/urllib/parse.py b/common/py3-stdlib/urllib/parse.py
index b35997b..ea897c3 100644
--- a/common/py3-stdlib/urllib/parse.py
+++ b/common/py3-stdlib/urllib/parse.py
@@ -78,9 +78,6 @@
                 '0123456789'
                 '+-.')
 
-# Unsafe bytes to be removed per WHATWG spec
-_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n']
-
 # XXX: Consider replacing with functools.lru_cache
 MAX_CACHE_SIZE = 20
 _parse_cache = {}
@@ -456,11 +453,6 @@
     """
 
     url, scheme, _coerce_result = _coerce_args(url, scheme)
-
-    for b in _UNSAFE_URL_BYTES_TO_REMOVE:
-        url = url.replace(b, "")
-        scheme = scheme.replace(b, "")
-
     allow_fragments = bool(allow_fragments)
     key = url, scheme, allow_fragments, type(url), type(scheme)
     cached = _parse_cache.get(key, None)
@@ -670,7 +662,7 @@
 
 
 def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
-             encoding='utf-8', errors='replace', max_num_fields=None, separator='&'):
+             encoding='utf-8', errors='replace', max_num_fields=None):
     """Parse a query given as a string argument.
 
         Arguments:
@@ -694,15 +686,12 @@
         max_num_fields: int. If set, then throws a ValueError if there
             are more than n fields read by parse_qsl().
 
-        separator: str. The symbol to use for separating the query arguments.
-            Defaults to &.
-
         Returns a dictionary.
     """
     parsed_result = {}
     pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
                       encoding=encoding, errors=errors,
-                      max_num_fields=max_num_fields, separator=separator)
+                      max_num_fields=max_num_fields)
     for name, value in pairs:
         if name in parsed_result:
             parsed_result[name].append(value)
@@ -712,7 +701,7 @@
 
 
 def parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
-              encoding='utf-8', errors='replace', max_num_fields=None, separator='&'):
+              encoding='utf-8', errors='replace', max_num_fields=None):
     """Parse a query given as a string argument.
 
         Arguments:
@@ -735,27 +724,21 @@
         max_num_fields: int. If set, then throws a ValueError
             if there are more than n fields read by parse_qsl().
 
-        separator: str. The symbol to use for separating the query arguments.
-            Defaults to &.
-
         Returns a list, as G-d intended.
     """
     qs, _coerce_result = _coerce_args(qs)
-    separator, _ = _coerce_args(separator)
-
-    if not separator or (not isinstance(separator, (str, bytes))):
-        raise ValueError("Separator must be of type string or bytes.")
 
     # If max_num_fields is defined then check that the number of fields
     # is less than max_num_fields. This prevents a memory exhaustion DOS
     # attack via post bodies with many fields.
     if max_num_fields is not None:
-        num_fields = 1 + qs.count(separator)
+        num_fields = 1 + qs.count('&') + qs.count(';')
         if max_num_fields < num_fields:
             raise ValueError('Max number of fields exceeded')
 
+    pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
     r = []
-    for name_value in qs.split(separator):
+    for name_value in pairs:
         if not name_value and not strict_parsing:
             continue
         nv = name_value.split('=', 1)
diff --git a/common/py3-stdlib/urllib/request.py b/common/py3-stdlib/urllib/request.py
index 34b1b0b..a8c870b 100644
--- a/common/py3-stdlib/urllib/request.py
+++ b/common/py3-stdlib/urllib/request.py
@@ -64,7 +64,7 @@
 # install it
 urllib.request.install_opener(opener)
 
-f = urllib.request.urlopen('https://www.python.org/')
+f = urllib.request.urlopen('http://www.python.org/')
 """
 
 # XXX issues:
@@ -202,8 +202,6 @@
         context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH,
                                              cafile=cafile,
                                              capath=capath)
-        # send ALPN extension to indicate HTTP/1.1 protocol
-        context.set_alpn_protocols(['http/1.1'])
         https_handler = HTTPSHandler(context=context)
         opener = build_opener(https_handler)
     elif context:
@@ -773,11 +771,7 @@
             raise ValueError("proxy URL with no authority: %r" % proxy)
         # We have an authority, so for RFC 3986-compliant URLs (by ss 3.
         # and 3.3.), path is empty or starts with '/'
-        if '@' in r_scheme:
-            host_separator = r_scheme.find('@')
-            end = r_scheme.find("/", host_separator)
-        else:
-            end = r_scheme.find("/", 2)
+        end = r_scheme.find("/", 2)
         if end == -1:
             end = None
         authority = r_scheme[2:end]
@@ -889,10 +883,10 @@
             return True
         if base[0] != test[0]:
             return False
-        prefix = base[1]
-        if prefix[-1:] != '/':
-            prefix += '/'
-        return test[1].startswith(prefix)
+        common = posixpath.commonprefix((base[1], test[1]))
+        if len(common) == len(base[1]):
+            return True
+        return False
 
 
 class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
@@ -945,7 +939,7 @@
     # (single quotes are a violation of the RFC, but appear in the wild)
     rx = re.compile('(?:^|,)'   # start of the string or ','
                     '[ \t]*'    # optional whitespaces
-                    '([^ \t,]+)' # scheme like "Basic"
+                    '([^ \t]+)' # scheme like "Basic"
                     '[ \t]+'    # mandatory whitespaces
                     # realm=xxx
                     # realm='xxx'
diff --git a/common/py3-stdlib/weakref.py b/common/py3-stdlib/weakref.py
index 994ea8a..5fa851d 100644
--- a/common/py3-stdlib/weakref.py
+++ b/common/py3-stdlib/weakref.py
@@ -2,7 +2,7 @@
 
 This module is an implementation of PEP 205:
 
-https://www.python.org/dev/peps/pep-0205/
+http://www.python.org/dev/peps/pep-0205/
 """
 
 # Naming convention: Variables named "wr" are weak reference objects;
@@ -119,17 +119,14 @@
         self.data = {}
         self.update(other, **kw)
 
-    def _commit_removals(self, _atomic_removal=_remove_dead_weakref):
-        pop = self._pending_removals.pop
+    def _commit_removals(self):
+        l = self._pending_removals
         d = self.data
         # We shouldn't encounter any KeyError, because this method should
         # always be called *before* mutating the dict.
-        while True:
-            try:
-                key = pop()
-            except IndexError:
-                return
-            _atomic_removal(d, key)
+        while l:
+            key = l.pop()
+            _remove_dead_weakref(d, key)
 
     def __getitem__(self, key):
         if self._pending_removals:
@@ -373,10 +370,7 @@
                 if self._iterating:
                     self._pending_removals.append(k)
                 else:
-                    try:
-                        del self.data[k]
-                    except KeyError:
-                        pass
+                    del self.data[k]
         self._remove = remove
         # A list of dead weakrefs (keys to be removed)
         self._pending_removals = []
@@ -390,16 +384,11 @@
         # because a dead weakref never compares equal to a live weakref,
         # even if they happened to refer to equal objects.
         # However, it means keys may already have been removed.
-        pop = self._pending_removals.pop
+        l = self._pending_removals
         d = self.data
-        while True:
+        while l:
             try:
-                key = pop()
-            except IndexError:
-                return
-
-            try:
-                del d[key]
+                del d[l.pop()]
             except KeyError:
                 pass
 
diff --git a/common/py3-stdlib/webbrowser.py b/common/py3-stdlib/webbrowser.py
index ec3cece..6023c1e 100755
--- a/common/py3-stdlib/webbrowser.py
+++ b/common/py3-stdlib/webbrowser.py
@@ -1,5 +1,5 @@
 #! /usr/bin/env python3
-"""Interfaces for launching and remotely controlling web browsers."""
+"""Interfaces for launching and remotely controlling Web browsers."""
 # Maintained by Georg Brandl.
 
 import os
@@ -532,10 +532,6 @@
         # OS X can use below Unix support (but we prefer using the OS X
         # specific stuff)
 
-    if sys.platform == "serenityos":
-        # SerenityOS webbrowser, simply called "Browser".
-        register("Browser", None, BackgroundBrowser("Browser"))
-
     if sys.platform[:3] == "win":
         # First try to use the default Windows browser
         register("windows-default", WindowsDefault)
diff --git a/common/py3-stdlib/wsgiref/validate.py b/common/py3-stdlib/wsgiref/validate.py
index 6e16578..48ac007 100644
--- a/common/py3-stdlib/wsgiref/validate.py
+++ b/common/py3-stdlib/wsgiref/validate.py
@@ -137,7 +137,7 @@
 
     """
     When applied between a WSGI server and a WSGI application, this
-    middleware will check for WSGI compliance on a number of levels.
+    middleware will check for WSGI compliancy on a number of levels.
     This middleware does not modify the request or response in any
     way, but will raise an AssertionError if anything seems off
     (except for a failure to close the application iterator, which
diff --git a/common/py3-stdlib/xml/etree/ElementInclude.py b/common/py3-stdlib/xml/etree/ElementInclude.py
index 40a9b22..5303062 100644
--- a/common/py3-stdlib/xml/etree/ElementInclude.py
+++ b/common/py3-stdlib/xml/etree/ElementInclude.py
@@ -42,7 +42,7 @@
 # --------------------------------------------------------------------
 
 # Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
+# See http://www.python.org/psf/license for licensing details.
 
 ##
 # Limited XInclude support for the ElementTree package.
diff --git a/common/py3-stdlib/xml/etree/ElementPath.py b/common/py3-stdlib/xml/etree/ElementPath.py
index a1170b5..d318e65 100644
--- a/common/py3-stdlib/xml/etree/ElementPath.py
+++ b/common/py3-stdlib/xml/etree/ElementPath.py
@@ -48,7 +48,7 @@
 # --------------------------------------------------------------------
 
 # Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
+# See http://www.python.org/psf/license for licensing details.
 
 ##
 # Implementation module for XPath support.  There's usually no reason
@@ -65,9 +65,8 @@
     r"//?|"
     r"\.\.|"
     r"\(\)|"
-    r"!=|"
     r"[/.*:\[\]\(\)@=])|"
-    r"((?:\{[^}]+\})?[^/\[\]\(\)@!=\s]+)|"
+    r"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"
     r"\s+"
     )
 
@@ -254,19 +253,15 @@
                 if elem.get(key) is not None:
                     yield elem
         return select
-    if signature == "@-='" or signature == "@-!='":
-        # [@attribute='value'] or [@attribute!='value']
+    if signature == "@-='":
+        # [@attribute='value']
         key = predicate[1]
         value = predicate[-1]
         def select(context, result):
             for elem in result:
                 if elem.get(key) == value:
                     yield elem
-        def select_negated(context, result):
-            for elem in result:
-                if (attr_value := elem.get(key)) is not None and attr_value != value:
-                    yield elem
-        return select_negated if '!=' in signature else select
+        return select
     if signature == "-" and not re.match(r"\-?\d+$", predicate[0]):
         # [tag]
         tag = predicate[0]
@@ -275,10 +270,8 @@
                 if elem.find(tag) is not None:
                     yield elem
         return select
-    if signature == ".='" or signature == ".!='" or (
-            (signature == "-='" or signature == "-!='")
-            and not re.match(r"\-?\d+$", predicate[0])):
-        # [.='value'] or [tag='value'] or [.!='value'] or [tag!='value']
+    if signature == ".='" or (signature == "-='" and not re.match(r"\-?\d+$", predicate[0])):
+        # [.='value'] or [tag='value']
         tag = predicate[0]
         value = predicate[-1]
         if tag:
@@ -288,22 +281,12 @@
                         if "".join(e.itertext()) == value:
                             yield elem
                             break
-            def select_negated(context, result):
-                for elem in result:
-                    for e in elem.iterfind(tag):
-                        if "".join(e.itertext()) != value:
-                            yield elem
-                            break
         else:
             def select(context, result):
                 for elem in result:
                     if "".join(elem.itertext()) == value:
                         yield elem
-            def select_negated(context, result):
-                for elem in result:
-                    if "".join(elem.itertext()) != value:
-                        yield elem
-        return select_negated if '!=' in signature else select
+        return select
     if signature == "-" or signature == "-()" or signature == "-()-":
         # [index] or [last()] or [last()-index]
         if signature == "-":
diff --git a/common/py3-stdlib/xml/etree/ElementTree.py b/common/py3-stdlib/xml/etree/ElementTree.py
index 07be860..7a26900 100644
--- a/common/py3-stdlib/xml/etree/ElementTree.py
+++ b/common/py3-stdlib/xml/etree/ElementTree.py
@@ -35,7 +35,7 @@
 
 #---------------------------------------------------------------------
 # Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
+# See http://www.python.org/psf/license for licensing details.
 #
 # ElementTree
 # Copyright (c) 1999-2008 by Fredrik Lundh.  All rights reserved.
@@ -252,7 +252,7 @@
         """
         for element in elements:
             self._assert_is_element(element)
-            self._children.append(element)
+        self._children.extend(elements)
 
     def insert(self, index, subelement):
         """Insert *subelement* at position *index*."""
@@ -1248,14 +1248,8 @@
     # Use the internal, undocumented _parser argument for now; When the
     # parser argument of iterparse is removed, this can be killed.
     pullparser = XMLPullParser(events=events, _parser=parser)
-
-    def iterator(source):
-        close_source = False
+    def iterator():
         try:
-            if not hasattr(source, "read"):
-                source = open(source, "rb")
-                close_source = True
-            yield None
             while True:
                 yield from pullparser.read_events()
                 # load event buffer
@@ -1271,12 +1265,16 @@
                 source.close()
 
     class IterParseIterator(collections.abc.Iterator):
-        __next__ = iterator(source).__next__
+        __next__ = iterator().__next__
     it = IterParseIterator()
     it.root = None
     del iterator, IterParseIterator
 
-    next(it)
+    close_source = False
+    if not hasattr(source, "read"):
+        source = open(source, "rb")
+        close_source = True
+
     return it
 
 
@@ -1285,7 +1283,7 @@
     def __init__(self, events=None, *, _parser=None):
         # The _parser argument is for internal use only and must not be relied
         # upon in user code. It will be removed in a future release.
-        # See https://bugs.python.org/issue17741 for more details.
+        # See http://bugs.python.org/issue17741 for more details.
 
         self._events_queue = collections.deque()
         self._parser = _parser or XMLParser(target=TreeBuilder())
@@ -1562,6 +1560,7 @@
         # Configure pyexpat: buffering, new-style attribute handling.
         parser.buffer_text = 1
         parser.ordered_attributes = 1
+        parser.specified_attributes = 1
         self._doctype = None
         self.entity = {}
         try:
@@ -1581,6 +1580,7 @@
         for event_name in events_to_report:
             if event_name == "start":
                 parser.ordered_attributes = 1
+                parser.specified_attributes = 1
                 def handler(tag, attrib_in, event=event_name, append=append,
                             start=self._start):
                     append((event, start(tag, attrib_in)))
diff --git a/common/py3-stdlib/xml/etree/__init__.py b/common/py3-stdlib/xml/etree/__init__.py
index e2ec534..27fd8f6 100644
--- a/common/py3-stdlib/xml/etree/__init__.py
+++ b/common/py3-stdlib/xml/etree/__init__.py
@@ -30,4 +30,4 @@
 # --------------------------------------------------------------------
 
 # Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
+# See http://www.python.org/psf/license for licensing details.
diff --git a/common/py3-stdlib/xml/sax/handler.py b/common/py3-stdlib/xml/sax/handler.py
index e8d417e..481733d 100644
--- a/common/py3-stdlib/xml/sax/handler.py
+++ b/common/py3-stdlib/xml/sax/handler.py
@@ -340,48 +340,3 @@
                   property_xml_string,
                   property_encoding,
                   property_interning_dict]
-
-
-class LexicalHandler:
-    """Optional SAX2 handler for lexical events.
-
-    This handler is used to obtain lexical information about an XML
-    document, that is, information about how the document was encoded
-    (as opposed to what it contains, which is reported to the
-    ContentHandler), such as comments and CDATA marked section
-    boundaries.
-
-    To set the LexicalHandler of an XMLReader, use the setProperty
-    method with the property identifier
-    'http://xml.org/sax/properties/lexical-handler'."""
-
-    def comment(self, content):
-        """Reports a comment anywhere in the document (including the
-        DTD and outside the document element).
-
-        content is a string that holds the contents of the comment."""
-
-    def startDTD(self, name, public_id, system_id):
-        """Report the start of the DTD declarations, if the document
-        has an associated DTD.
-
-        A startEntity event will be reported before declaration events
-        from the external DTD subset are reported, and this can be
-        used to infer from which subset DTD declarations derive.
-
-        name is the name of the document element type, public_id the
-        public identifier of the DTD (or None if none were supplied)
-        and system_id the system identfier of the external subset (or
-        None if none were supplied)."""
-
-    def endDTD(self):
-        """Signals the end of DTD declarations."""
-
-    def startCDATA(self):
-        """Reports the beginning of a CDATA marked section.
-
-        The contents of the CDATA marked section will be reported
-        through the characters event."""
-
-    def endCDATA(self):
-        """Reports the end of a CDATA marked section."""
diff --git a/common/py3-stdlib/xmlrpc/client.py b/common/py3-stdlib/xmlrpc/client.py
index a614cef..d15d60d 100644
--- a/common/py3-stdlib/xmlrpc/client.py
+++ b/common/py3-stdlib/xmlrpc/client.py
@@ -264,22 +264,16 @@
 
 # Issue #13305: different format codes across platforms
 _day0 = datetime(1, 1, 1)
-def _try(fmt):
-    try:
-        return _day0.strftime(fmt) == '0001'
-    except ValueError:
-        return False
-if _try('%Y'):      # Mac OS X
+if _day0.strftime('%Y') == '0001':      # Mac OS X
     def _iso8601_format(value):
         return value.strftime("%Y%m%dT%H:%M:%S")
-elif _try('%4Y'):   # Linux
+elif _day0.strftime('%4Y') == '0001':   # Linux
     def _iso8601_format(value):
         return value.strftime("%4Y%m%dT%H:%M:%S")
 else:
     def _iso8601_format(value):
         return value.strftime("%Y%m%dT%H:%M:%S").zfill(17)
 del _day0
-del _try
 
 
 def _strftime(value):
@@ -1427,13 +1421,11 @@
         # establish a "logical" server connection
 
         # get the url
-        p = urllib.parse.urlsplit(uri)
+        p = urllib.parse.urlparse(uri)
         if p.scheme not in ("http", "https"):
             raise OSError("unsupported XML-RPC protocol")
         self.__host = p.netloc
-        self.__handler = urllib.parse.urlunsplit(["", "", *p[2:]])
-        if not self.__handler:
-            self.__handler = "/RPC2"
+        self.__handler = p.path or "/RPC2"
 
         if transport is None:
             if p.scheme == "https":
diff --git a/common/py3-stdlib/xmlrpc/server.py b/common/py3-stdlib/xmlrpc/server.py
index 69a260f..287e324 100644
--- a/common/py3-stdlib/xmlrpc/server.py
+++ b/common/py3-stdlib/xmlrpc/server.py
@@ -750,7 +750,7 @@
                 url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
                 results.append('<a href="%s">%s</a>' % (url, escape(all)))
             elif pep:
-                url = 'https://www.python.org/dev/peps/pep-%04d/' % int(pep)
+                url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
                 results.append('<a href="%s">%s</a>' % (url, escape(all)))
             elif text[end:end+1] == '(':
                 results.append(self.namelink(name, methods, funcs, classes))
diff --git a/common/py3-stdlib/zipfile.py b/common/py3-stdlib/zipfile.py
index 67cfdfb..816f858 100644
--- a/common/py3-stdlib/zipfile.py
+++ b/common/py3-stdlib/zipfile.py
@@ -16,7 +16,6 @@
 import threading
 import time
 import contextlib
-import pathlib
 
 try:
     import zlib # We may need its compression method
@@ -1121,15 +1120,8 @@
     def write(self, data):
         if self.closed:
             raise ValueError('I/O operation on closed file.')
-
-        # Accept any data that supports the buffer protocol
-        if isinstance(data, (bytes, bytearray)):
-            nbytes = len(data)
-        else:
-            data = memoryview(data)
-            nbytes = data.nbytes
+        nbytes = len(data)
         self._file_size += nbytes
-
         self._crc = crc32(data, self._crc)
         if self._compressor:
             data = self._compressor.compress(data)
@@ -2205,12 +2197,13 @@
         if not isinstance(source, ZipFile):
             return cls(source)
 
-        # Only allow for FastLookup when supplied zipfile is read-only
+        # Only allow for FastPath when supplied zipfile is read-only
         if 'r' not in source.mode:
             cls = CompleteDirs
 
-        source.__class__ = cls
-        return source
+        res = cls.__new__(cls)
+        vars(res).update(vars(source))
+        return res
 
 
 class FastLookup(CompleteDirs):
@@ -2218,7 +2211,6 @@
     ZipFile subclass to ensure implicit
     dirs exist and are resolved rapidly.
     """
-
     def namelist(self):
         with contextlib.suppress(AttributeError):
             return self.__names
@@ -2250,7 +2242,7 @@
     >>> zf.writestr('a.txt', 'content of a')
     >>> zf.writestr('b/c.txt', 'content of c')
     >>> zf.writestr('b/d/e.txt', 'content of e')
-    >>> zf.filename = 'mem/abcde.zip'
+    >>> zf.filename = 'abcde.zip'
 
     Path accepts the zipfile object itself or a filename
 
@@ -2262,9 +2254,9 @@
 
     >>> a, b = root.iterdir()
     >>> a
-    Path('mem/abcde.zip', 'a.txt')
+    Path('abcde.zip', 'a.txt')
     >>> b
-    Path('mem/abcde.zip', 'b/')
+    Path('abcde.zip', 'b/')
 
     name property:
 
@@ -2275,7 +2267,7 @@
 
     >>> c = b / 'c.txt'
     >>> c
-    Path('mem/abcde.zip', 'b/c.txt')
+    Path('abcde.zip', 'b/c.txt')
     >>> c.name
     'c.txt'
 
@@ -2293,68 +2285,36 @@
 
     Coercion to string:
 
-    >>> import os
-    >>> str(c).replace(os.sep, posixpath.sep)
-    'mem/abcde.zip/b/c.txt'
-
-    At the root, ``name``, ``filename``, and ``parent``
-    resolve to the zipfile. Note these attributes are not
-    valid and will raise a ``ValueError`` if the zipfile
-    has no filename.
-
-    >>> root.name
-    'abcde.zip'
-    >>> str(root.filename).replace(os.sep, posixpath.sep)
-    'mem/abcde.zip'
-    >>> str(root.parent)
-    'mem'
+    >>> str(c)
+    'abcde.zip/b/c.txt'
     """
 
     __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
 
     def __init__(self, root, at=""):
-        """
-        Construct a Path from a ZipFile or filename.
-
-        Note: When the source is an existing ZipFile object,
-        its type (__class__) will be mutated to a
-        specialized type. If the caller wishes to retain the
-        original type, the caller should either create a
-        separate ZipFile object or pass a filename.
-        """
         self.root = FastLookup.make(root)
         self.at = at
 
-    def open(self, mode='r', *args, pwd=None, **kwargs):
+    def open(self, mode='r', *args, **kwargs):
         """
         Open this entry as text or binary following the semantics
         of ``pathlib.Path.open()`` by passing arguments through
         to io.TextIOWrapper().
         """
-        if self.is_dir():
-            raise IsADirectoryError(self)
+        pwd = kwargs.pop('pwd', None)
         zip_mode = mode[0]
-        if not self.exists() and zip_mode == 'r':
-            raise FileNotFoundError(self)
         stream = self.root.open(self.at, zip_mode, pwd=pwd)
         if 'b' in mode:
             if args or kwargs:
                 raise ValueError("encoding args invalid for binary operation")
             return stream
-        else:
-            kwargs["encoding"] = io.text_encoding(kwargs.get("encoding"))
         return io.TextIOWrapper(stream, *args, **kwargs)
 
     @property
     def name(self):
-        return pathlib.Path(self.at).name or self.filename.name
-
-    @property
-    def filename(self):
-        return pathlib.Path(self.root.filename).joinpath(self.at)
+        return posixpath.basename(self.at.rstrip("/"))
 
     def read_text(self, *args, **kwargs):
-        kwargs["encoding"] = io.text_encoding(kwargs.get("encoding"))
         with self.open('r', *args, **kwargs) as strm:
             return strm.read()
 
@@ -2366,13 +2326,13 @@
         return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
 
     def _next(self, at):
-        return self.__class__(self.root, at)
+        return Path(self.root, at)
 
     def is_dir(self):
         return not self.at or self.at.endswith("/")
 
     def is_file(self):
-        return self.exists() and not self.is_dir()
+        return not self.is_dir()
 
     def exists(self):
         return self.at in self.root._name_set()
@@ -2389,16 +2349,14 @@
     def __repr__(self):
         return self.__repr.format(self=self)
 
-    def joinpath(self, *other):
-        next = posixpath.join(self.at, *other)
+    def joinpath(self, add):
+        next = posixpath.join(self.at, add)
         return self._next(self.root.resolve_dir(next))
 
     __truediv__ = joinpath
 
     @property
     def parent(self):
-        if not self.at:
-            return self.filename.parent
         parent_at = posixpath.dirname(self.at.rstrip('/'))
         if parent_at:
             parent_at += '/'
diff --git a/common/py3-stdlib/zipimport.py b/common/py3-stdlib/zipimport.py
index 25eaee9..5ef0a17 100644
--- a/common/py3-stdlib/zipimport.py
+++ b/common/py3-stdlib/zipimport.py
@@ -22,7 +22,6 @@
 import marshal  # for loads
 import sys  # for modules
 import time  # for mktime
-import _warnings  # For warn()
 
 __all__ = ['ZipImportError', 'zipimporter']
 
@@ -43,7 +42,7 @@
 STRING_END_ARCHIVE = b'PK\x05\x06'
 MAX_COMMENT_LEN = (1 << 16) - 1
 
-class zipimporter(_bootstrap_external._LoaderBasics):
+class zipimporter:
     """zipimporter(archivepath) -> zipimporter object
 
     Create a new zipimporter instance. 'archivepath' must be a path to
@@ -116,12 +115,7 @@
         full path name if it's possibly a portion of a namespace package,
         or None otherwise. The optional 'path' argument is ignored -- it's
         there for compatibility with the importer protocol.
-
-        Deprecated since Python 3.10. Use find_spec() instead.
         """
-        _warnings.warn("zipimporter.find_loader() is deprecated and slated for "
-                       "removal in Python 3.12; use find_spec() instead",
-                       DeprecationWarning)
         mi = _get_module_info(self, fullname)
         if mi is not None:
             # This is a module or package.
@@ -152,46 +146,15 @@
         instance itself if the module was found, or None if it wasn't.
         The optional 'path' argument is ignored -- it's there for compatibility
         with the importer protocol.
-
-        Deprecated since Python 3.10. Use find_spec() instead.
         """
-        _warnings.warn("zipimporter.find_module() is deprecated and slated for "
-                       "removal in Python 3.12; use find_spec() instead",
-                       DeprecationWarning)
         return self.find_loader(fullname, path)[0]
 
-    def find_spec(self, fullname, target=None):
-        """Create a ModuleSpec for the specified module.
-
-        Returns None if the module cannot be found.
-        """
-        module_info = _get_module_info(self, fullname)
-        if module_info is not None:
-            return _bootstrap.spec_from_loader(fullname, self, is_package=module_info)
-        else:
-            # Not a module or regular package. See if this is a directory, and
-            # therefore possibly a portion of a namespace package.
-
-            # We're only interested in the last path component of fullname
-            # earlier components are recorded in self.prefix.
-            modpath = _get_module_path(self, fullname)
-            if _is_dir(self, modpath):
-                # This is possibly a portion of a namespace
-                # package. Return the string representing its path,
-                # without a trailing separator.
-                path = f'{self.archive}{path_sep}{modpath}'
-                spec = _bootstrap.ModuleSpec(name=fullname, loader=None,
-                                             is_package=True)
-                spec.submodule_search_locations.append(path)
-                return spec
-            else:
-                return None
 
     def get_code(self, fullname):
         """get_code(fullname) -> code object.
 
         Return the code object for the specified module. Raise ZipImportError
-        if the module couldn't be imported.
+        if the module couldn't be found.
         """
         code, ispackage, modpath = _get_module_code(self, fullname)
         return code
@@ -221,8 +184,7 @@
     def get_filename(self, fullname):
         """get_filename(fullname) -> filename string.
 
-        Return the filename for the specified module or raise ZipImportError
-        if it couldn't be imported.
+        Return the filename for the specified module.
         """
         # Deciding the filename requires working out where the code
         # would come from if the module was actually loaded
@@ -274,13 +236,8 @@
 
         Load the module specified by 'fullname'. 'fullname' must be the
         fully qualified (dotted) module name. It returns the imported
-        module, or raises ZipImportError if it could not be imported.
-
-        Deprecated since Python 3.10. Use exec_module() instead.
+        module, or raises ZipImportError if it wasn't found.
         """
-        msg = ("zipimport.zipimporter.load_module() is deprecated and slated for "
-               "removal in Python 3.12; use exec_module() instead")
-        _warnings.warn(msg, DeprecationWarning)
         code, ispackage, modpath = _get_module_code(self, fullname)
         mod = sys.modules.get(fullname)
         if mod is None or not isinstance(mod, _module_type):
@@ -323,18 +280,11 @@
                 return None
         except ZipImportError:
             return None
-        from importlib.readers import ZipReader
-        return ZipReader(self, fullname)
-
-
-    def invalidate_caches(self):
-        """Reload the file data of the archive path."""
-        try:
-            self._files = _read_directory(self.archive)
-            _zip_directory_cache[self.archive] = self._files
-        except ZipImportError:
-            _zip_directory_cache.pop(self.archive, None)
-            self._files = {}
+        if not _ZipImportResourceReader._registered:
+            from importlib.abc import ResourceReader
+            ResourceReader.register(_ZipImportResourceReader)
+            _ZipImportResourceReader._registered = True
+        return _ZipImportResourceReader(self, fullname)
 
 
     def __repr__(self):
@@ -630,15 +580,20 @@
 
 
 # Given the contents of a .py[co] file, unmarshal the data
-# and return the code object. Raises ImportError it the magic word doesn't
-# match, or if the recorded .py[co] metadata does not match the source.
+# and return the code object. Return None if it the magic word doesn't
+# match, or if the recorded .py[co] metadata does not match the source,
+# (we do this instead of raising an exception as we fall back
+# to .py if available and we don't want to mask other errors).
 def _unmarshal_code(self, pathname, fullpath, fullname, data):
     exc_details = {
         'name': fullname,
         'path': fullpath,
     }
 
-    flags = _bootstrap_external._classify_pyc(data, fullname, exc_details)
+    try:
+        flags = _bootstrap_external._classify_pyc(data, fullname, exc_details)
+    except ImportError:
+        return None
 
     hash_based = flags & 0b1 != 0
     if hash_based:
@@ -652,8 +607,11 @@
                     source_bytes,
                 )
 
-                _bootstrap_external._validate_hash_pyc(
-                    data, source_hash, fullname, exc_details)
+                try:
+                    _bootstrap_external._validate_hash_pyc(
+                        data, source_hash, fullname, exc_details)
+                except ImportError:
+                    return None
     else:
         source_mtime, source_size = \
             _get_mtime_and_size_of_source(self, fullpath)
@@ -739,7 +697,6 @@
 # 'fullname'.
 def _get_module_code(self, fullname):
     path = _get_module_path(self, fullname)
-    import_error = None
     for suffix, isbytecode, ispackage in _zip_searchorder:
         fullpath = path + suffix
         _bootstrap._verbose_message('trying {}{}{}', self.archive, path_sep, fullpath, verbosity=2)
@@ -750,12 +707,8 @@
         else:
             modpath = toc_entry[0]
             data = _get_data(self.archive, toc_entry)
-            code = None
             if isbytecode:
-                try:
-                    code = _unmarshal_code(self, modpath, fullpath, fullname, data)
-                except ImportError as exc:
-                    import_error = exc
+                code = _unmarshal_code(self, modpath, fullpath, fullname, data)
             else:
                 code = _compile_source(modpath, data)
             if code is None:
@@ -765,8 +718,75 @@
             modpath = toc_entry[0]
             return code, ispackage, modpath
     else:
-        if import_error:
-            msg = f"module load failed: {import_error}"
-            raise ZipImportError(msg, name=fullname) from import_error
-        else:
-            raise ZipImportError(f"can't find module {fullname!r}", name=fullname)
+        raise ZipImportError(f"can't find module {fullname!r}", name=fullname)
+
+
+class _ZipImportResourceReader:
+    """Private class used to support ZipImport.get_resource_reader().
+
+    This class is allowed to reference all the innards and private parts of
+    the zipimporter.
+    """
+    _registered = False
+
+    def __init__(self, zipimporter, fullname):
+        self.zipimporter = zipimporter
+        self.fullname = fullname
+
+    def open_resource(self, resource):
+        fullname_as_path = self.fullname.replace('.', '/')
+        path = f'{fullname_as_path}/{resource}'
+        from io import BytesIO
+        try:
+            return BytesIO(self.zipimporter.get_data(path))
+        except OSError:
+            raise FileNotFoundError(path)
+
+    def resource_path(self, resource):
+        # All resources are in the zip file, so there is no path to the file.
+        # Raising FileNotFoundError tells the higher level API to extract the
+        # binary data and create a temporary file.
+        raise FileNotFoundError
+
+    def is_resource(self, name):
+        # Maybe we could do better, but if we can get the data, it's a
+        # resource.  Otherwise it isn't.
+        fullname_as_path = self.fullname.replace('.', '/')
+        path = f'{fullname_as_path}/{name}'
+        try:
+            self.zipimporter.get_data(path)
+        except OSError:
+            return False
+        return True
+
+    def contents(self):
+        # This is a bit convoluted, because fullname will be a module path,
+        # but _files is a list of file names relative to the top of the
+        # archive's namespace.  We want to compare file paths to find all the
+        # names of things inside the module represented by fullname.  So we
+        # turn the module path of fullname into a file path relative to the
+        # top of the archive, and then we iterate through _files looking for
+        # names inside that "directory".
+        from pathlib import Path
+        fullname_path = Path(self.zipimporter.get_filename(self.fullname))
+        relative_path = fullname_path.relative_to(self.zipimporter.archive)
+        # Don't forget that fullname names a package, so its path will include
+        # __init__.py, which we want to ignore.
+        assert relative_path.name == '__init__.py'
+        package_path = relative_path.parent
+        subdirs_seen = set()
+        for filename in self.zipimporter._files:
+            try:
+                relative = Path(filename).relative_to(package_path)
+            except ValueError:
+                continue
+            # If the path of the file (which is relative to the top of the zip
+            # namespace), relative to the package given when the resource
+            # reader was created, has a parent, then it's a name in a
+            # subdirectory and thus we skip it.
+            parent_name = relative.parent.name
+            if len(parent_name) == 0:
+                yield relative.name
+            elif parent_name not in subdirs_seen:
+                subdirs_seen.add(parent_name)
+                yield parent_name
diff --git a/darwin-x86/bin/openssl b/darwin-x86/bin/openssl
index bb81a57..c6c6bdb 100755
--- a/darwin-x86/bin/openssl
+++ b/darwin-x86/bin/openssl
Binary files differ
diff --git a/darwin-x86/bin/py2-cmd b/darwin-x86/bin/py2-cmd
index c98dbe2..72d6b67 100755
--- a/darwin-x86/bin/py2-cmd
+++ b/darwin-x86/bin/py2-cmd
Binary files differ
diff --git a/darwin-x86/bin/py3-cmd b/darwin-x86/bin/py3-cmd
index 0f64784..9d333db 100755
--- a/darwin-x86/bin/py3-cmd
+++ b/darwin-x86/bin/py3-cmd
Binary files differ
diff --git a/darwin-x86/bin/py3-launcher-autorun64 b/darwin-x86/bin/py3-launcher-autorun64
index 748d463..d714e56 100755
--- a/darwin-x86/bin/py3-launcher-autorun64
+++ b/darwin-x86/bin/py3-launcher-autorun64
Binary files differ
diff --git a/darwin-x86/bin/py3-launcher64 b/darwin-x86/bin/py3-launcher64
index 2f1fbcd..8b43883 100755
--- a/darwin-x86/bin/py3-launcher64
+++ b/darwin-x86/bin/py3-launcher64
Binary files differ
diff --git a/linux-x86/bin/openssl b/linux-x86/bin/openssl
index 39d259a..8ff7907 100755
--- a/linux-x86/bin/openssl
+++ b/linux-x86/bin/openssl
Binary files differ
diff --git a/linux-x86/bin/py3-cmd b/linux-x86/bin/py3-cmd
index 5451cc9..0904311 100755
--- a/linux-x86/bin/py3-cmd
+++ b/linux-x86/bin/py3-cmd
Binary files differ
diff --git a/linux-x86/bin/py3-launcher-autorun64 b/linux-x86/bin/py3-launcher-autorun64
index 996c034..af3df5e 100755
--- a/linux-x86/bin/py3-launcher-autorun64
+++ b/linux-x86/bin/py3-launcher-autorun64
Binary files differ
diff --git a/linux-x86/bin/py3-launcher64 b/linux-x86/bin/py3-launcher64
index af07028..1705135 100755
--- a/linux-x86/bin/py3-launcher64
+++ b/linux-x86/bin/py3-launcher64
Binary files differ
diff --git a/linux_musl-x86/bin/openssl b/linux_musl-x86/bin/openssl
index 43cd826..efd8157 100755
--- a/linux_musl-x86/bin/openssl
+++ b/linux_musl-x86/bin/openssl
Binary files differ
diff --git a/linux_musl-x86/bin/py3-cmd b/linux_musl-x86/bin/py3-cmd
index 469f237..982c41e 100755
--- a/linux_musl-x86/bin/py3-cmd
+++ b/linux_musl-x86/bin/py3-cmd
Binary files differ
diff --git a/linux_musl-x86/bin/py3-launcher-autorun-static64 b/linux_musl-x86/bin/py3-launcher-autorun-static64
index b9dbb5b..b744124 100755
--- a/linux_musl-x86/bin/py3-launcher-autorun-static64
+++ b/linux_musl-x86/bin/py3-launcher-autorun-static64
Binary files differ
diff --git a/linux_musl-x86/bin/py3-launcher-autorun64 b/linux_musl-x86/bin/py3-launcher-autorun64
index 6ddb171..74635d9 100755
--- a/linux_musl-x86/bin/py3-launcher-autorun64
+++ b/linux_musl-x86/bin/py3-launcher-autorun64
Binary files differ
diff --git a/linux_musl-x86/bin/py3-launcher-static64 b/linux_musl-x86/bin/py3-launcher-static64
index dc37783..94fc036 100755
--- a/linux_musl-x86/bin/py3-launcher-static64
+++ b/linux_musl-x86/bin/py3-launcher-static64
Binary files differ
diff --git a/linux_musl-x86/bin/py3-launcher64 b/linux_musl-x86/bin/py3-launcher64
index dddd671..8841d22 100755
--- a/linux_musl-x86/bin/py3-launcher64
+++ b/linux_musl-x86/bin/py3-launcher64
Binary files differ
diff --git a/manifest.xml b/manifest.xml
index 026382f..e9bcb5f 100644
--- a/manifest.xml
+++ b/manifest.xml
@@ -5,13 +5,13 @@
 
   <default remote="aosp" revision="build-tools-release" sync-j="4" />
 
-  <project name="platform/build" path="build/make" revision="a732827c69b1f0bed6346505700acbe673df4f45" upstream="build-tools-release">
+  <project name="platform/build" path="build/make" revision="12f80f0c34752030f67d11d2497543b40af725e1" upstream="build-tools-release">
     <linkfile dest="build/tools" src="tools" />
 </project>
 
   <project name="platform/build/blueprint" path="build/blueprint" revision="534ae5cc77676e734f9a6658f48f6b55357c29fa" upstream="build-tools-release" />
 
-  <project name="platform/build/soong" path="build/soong" revision="51efa0bc413bcf22cbaedf627421947077d0d19f" upstream="build-tools-release">
+  <project name="platform/build/soong" path="build/soong" revision="81801da24fc8a1873a785d9e19581fed6725b34e" upstream="build-tools-release">
     <linkfile dest="Android.bp" src="root.bp" />
 
     <linkfile dest="bootstrap.bash" src="bootstrap.bash" />
@@ -19,11 +19,11 @@
 
   <project name="platform/external/golang-protobuf" path="external/golang-protobuf" revision="1d4a1b807962dbec87fc593e68b0ed20619504ce" upstream="build-tools-release" />
 
-  <project clone-depth="1" name="platform/prebuilts/build-tools" path="prebuilts/build-tools" revision="4c35c8228a5caa4a46b7ff9b3cb3bb5664b7b186" upstream="build-tools-release" />
+  <project clone-depth="1" name="platform/prebuilts/build-tools" path="prebuilts/build-tools" revision="a3471315830206a36b9e4f7c79392233723513af" upstream="build-tools-release" />
 
   <project clone-depth="1" groups="pdk" name="platform/prebuilts/remoteexecution-client" path="prebuilts/remoteexecution-client" revision="5dcada3170c979d617c46192c44c55a7762aabdb" upstream="build-tools-release" />
 
-  <project clone-depth="1" groups="linux" name="platform/prebuilts/clang/host/linux-x86" path="prebuilts/clang/host/linux-x86" revision="e9ac327a18797eff95f581f8868990b63fede493" upstream="build-tools-release" />
+  <project clone-depth="1" groups="linux" name="platform/prebuilts/clang/host/linux-x86" path="prebuilts/clang/host/linux-x86" revision="e18e8609b72d001a36ed210ec3895fc55c89db82" upstream="build-tools-release" />
 
   <project clone-depth="1" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" revision="7454fb3d3249c268b7e61a6551e4a17dfd031ea9" upstream="build-tools-release" />
 
@@ -35,7 +35,7 @@
 
   <project clone-depth="1" groups="linux" name="platform/prebuilts/ninja/linux-x86" path="prebuilts/ninja/linux-x86" revision="cfaa2ca50f534ec2945c2587ffa119519268efad" upstream="build-tools-release" />
 
-  <project clone-depth="1" groups="darwin" name="platform/prebuilts/clang/host/darwin-x86" path="prebuilts/clang/host/darwin-x86" revision="23822d84a740a310cbb90bc6e3aa4397022a6409" upstream="build-tools-release" />
+  <project clone-depth="1" groups="darwin" name="platform/prebuilts/clang/host/darwin-x86" path="prebuilts/clang/host/darwin-x86" revision="531b1668cffbc49b5a0705d16521ab9d2318784b" upstream="build-tools-release" />
 
   <project clone-depth="1" groups="darwin" name="platform/prebuilts/gcc/darwin-x86/host/headers" path="prebuilts/gcc/darwin-x86/host/headers" revision="4ac4f7cc41cf3c9e36fc3d6cf37fd1cfa9587a68" upstream="build-tools-release" />
 
@@ -45,7 +45,7 @@
 
   <project clone-depth="1" groups="darwin" name="platform/prebuilts/ninja/darwin-x86" path="prebuilts/ninja/darwin-x86" revision="2433f0ca209251e67a89bbaf6c34da9fe54edeac" upstream="build-tools-release" />
 
-  <project clone-depth="1" groups="linux" name="platform/prebuilts/clang/host/windows-x86" path="prebuilts/clang/host/windows-x86" revision="6403bfef08d6a381529bfeab3bb6aeb64fa2a495" upstream="build-tools-release" />
+  <project clone-depth="1" groups="linux" name="platform/prebuilts/clang/host/windows-x86" path="prebuilts/clang/host/windows-x86" revision="6113c650a597695ccc3ff7a8d7cf99a74a842fea" upstream="build-tools-release" />
 
   <project clone-depth="1" groups="linux" name="platform/prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8" revision="394eb6414b099b017e355ceae71d5bd22938c46e" upstream="build-tools-release" />
 
@@ -57,13 +57,13 @@
 
   <project clone-depth="1" name="platform/prebuilts/clang-tools" path="prebuilts/clang-tools" revision="acf69ef3af49f628d230a3075f861b2d624a43a8" upstream="build-tools-release" />
 
-  <project clone-depth="1" name="platform/prebuilts/misc" path="prebuilts/misc" revision="586429b23a3196f27108b6891402831a88dafd7b" upstream="build-tools-release" />
+  <project clone-depth="1" name="platform/prebuilts/misc" path="prebuilts/misc" revision="e7c40b8cf082b2a3f832c17af3e81ff87d5fd8e9" upstream="build-tools-release" />
 
   <project clone-depth="1" groups="pdk,tools" name="platform/prebuilts/tools" path="prebuilts/tools" revision="c3c160abf40cad1f0b12ae8ecba5acecb99e9674" upstream="build-tools-release" />
 
-  <project name="platform/bionic" path="bionic" revision="bcdc1834b5d812ce22456f8e0ff10cab3ecc8a86" upstream="build-tools-release" />
+  <project name="platform/bionic" path="bionic" revision="ffb60205090aa3d688f1420a8528acf31d3f70c5" upstream="build-tools-release" />
 
-  <project name="platform/development" path="development" revision="5fa095a623ae1102d81487c9b34a3e8b20fada11" upstream="build-tools-release" />
+  <project name="platform/development" path="development" revision="8614161df8e891bdd1ec09a2a401b91fee80387c" upstream="build-tools-release" />
 
   <project name="platform/external/arm-optimized-routines" path="external/arm-optimized-routines" revision="1c9813378c5dd9770241459eb61a4a039e026c6b" upstream="build-tools-release" />
 
@@ -107,7 +107,7 @@
 
   <project name="platform/external/google-java-format" path="external/google-java-format" revision="2e807a09ae0a6425158ce9f6dd5c973a18000c69" upstream="build-tools-release" />
 
-  <project name="platform/external/googletest" path="external/googletest" revision="d2d9eae964f8a1c9431c908d4a5dc502801c42ed" upstream="build-tools-release" />
+  <project name="platform/external/googletest" path="external/googletest" revision="23658bb9f8c596f2e2cc57082f59d117b46d123e" upstream="build-tools-release" />
 
   <project name="platform/external/guava" path="external/guava" revision="cea8b35c5ef589dc9ceff02bc8e85f6e8b721fb9" upstream="build-tools-release" />
 
@@ -157,7 +157,7 @@
 
   <project name="platform/external/zopfli" path="external/zopfli" revision="cf3f34689a36a959d4578e82adb46532e38c2eaa" upstream="build-tools-release" />
 
-  <project name="platform/system/core" path="system/core" revision="1e74fb535906d47e59ec05b91e18bbad46de1680" upstream="build-tools-release" />
+  <project name="platform/system/core" path="system/core" revision="0c9a655738d44648293801900b9c8ac5c485474c" upstream="build-tools-release" />
 
   <project name="platform/system/libbase" path="system/libbase" revision="a6c75ad3b52211a9368fda383b535c30f5c5ca58" upstream="build-tools-release" />
 
@@ -169,11 +169,11 @@
 
   <project name="platform/system/unwinding" path="system/unwinding" revision="c16e46be3ab8d4878f414a0b316406690d946a22" upstream="build-tools-release" />
 
-  <project name="platform/system/tools/xsdc" path="system/tools/xsdc" revision="ad69f814f6456bf07799cb0576ed7d4d3f554a71" upstream="build-tools-release" />
+  <project name="platform/system/tools/xsdc" path="system/tools/xsdc" revision="3ba8aa48392ab0559b4cb4e2c440eec0ae39ae00" upstream="build-tools-release" />
 
-  <project name="platform/test/app_compat/csuite" path="test/app_compat/csuite" revision="5387d15c213eb1a3366bae2545ad8d3f790643d3" upstream="build-tools-release" />
+  <project name="platform/test/app_compat/csuite" path="test/app_compat/csuite" revision="4b95fc80b1f366280b2e1a62f6a8a31b850b7e8b" upstream="build-tools-release" />
 
-  <project name="platform/art" path="art" revision="b40387e8d4dcaf13c8b4b5037988546b9c8129cd" upstream="build-tools-release" />
+  <project name="platform/art" path="art" revision="f814566140865b64ac4e084d3f2f0bd619a607da" upstream="build-tools-release" />
 
   <project name="platform/build/kati" path="build/kati" revision="6ad4268491b968b4cb257aeeb8c6a605ea8af8bf" upstream="build-tools-release" />
 
@@ -197,11 +197,11 @@
 
   <project name="platform/external/one-true-awk" path="external/one-true-awk" revision="539f77df4407fddf23dce84e20a59b1475b20e3f" upstream="build-tools-release" />
 
-  <project name="platform/external/openssl" path="external/openssl" revision="1998e09b0111afe2318e3c9b47be510c9ce6f07a" upstream="build-tools-release" />
+  <project name="platform/external/openssl" path="external/openssl" revision="005e88f9deb1e50b98694404bb85a688e119abeb" upstream="build-tools-release" />
 
   <project name="platform/external/python/cpython2" path="external/python/cpython2" revision="66a1bd5b30cf3915a4e087c40aaceb5561ff9e79" upstream="build-tools-release" />
 
-  <project name="platform/external/python/cpython3" path="external/python/cpython3" revision="69d927acd83d5b74e046bc7b55a18423b5a049d8" upstream="build-tools-release" />
+  <project name="platform/external/python/cpython3" path="external/python/cpython3" revision="711bec18e70680e202068d42885052f8b47257b3" upstream="build-tools-release" />
 
   <project name="platform/external/toybox" path="external/toybox" revision="5b39beb4a0780e107d42c46e78c42b1d9399953a" upstream="build-tools-release" />