Issue #19925: Added tests for the spwd module. Original patch by Vajrasky Kok.
diff --git a/.hgignore b/.hgignore
index c67ffb8..4dc93c4 100644
--- a/.hgignore
+++ b/.hgignore
@@ -18,6 +18,7 @@
 platform$
 pyconfig.h$
 python$
+python.bat$
 python.exe$
 python-config$
 python-config.py$
diff --git a/.hgtouch b/.hgtouch
index 7e3a5e7..00efbef 100644
--- a/.hgtouch
+++ b/.hgtouch
@@ -4,6 +4,8 @@
 
 Python/importlib.h: Lib/importlib/_bootstrap.py Modules/_freeze_importlib.c
 
+Include/opcode.h:  Lib/opcode.py Tools/scripts/generate_opcode_h.py
+
 Include/Python-ast.h: Parser/Python.asdl Parser/asdl.py Parser/asdl_c.py
 Python/Python-ast.c: Include/Python-ast.h
 
diff --git a/Doc/README.txt b/Doc/README.txt
index 6df12ab..fed1f74 100644
--- a/Doc/README.txt
+++ b/Doc/README.txt
@@ -3,7 +3,7 @@
 
 This directory contains the reStructuredText (reST) sources to the Python
 documentation.  You don't need to build them yourself, prebuilt versions are
-available at <https://docs.python.org/3.4/download.html>.
+available at <https://docs.python.org/dev/download.html>.
 
 Documentation on authoring Python documentation, including information about
 both style and markup, is available in the "Documenting Python" chapter of the
diff --git a/Doc/c-api/memory.rst b/Doc/c-api/memory.rst
index a82e1c2..ec5f691 100644
--- a/Doc/c-api/memory.rst
+++ b/Doc/c-api/memory.rst
@@ -92,8 +92,8 @@
 need to be held.
 
 The default raw memory block allocator uses the following functions:
-:c:func:`malloc`, :c:func:`realloc` and :c:func:`free`; call ``malloc(1)`` when
-requesting zero bytes.
+:c:func:`malloc`, :c:func:`calloc`, :c:func:`realloc` and :c:func:`free`; call
+``malloc(1)`` (or ``calloc(1, 1)``) when requesting zero bytes.
 
 .. versionadded:: 3.4
 
@@ -106,6 +106,17 @@
    been initialized in any way.
 
 
+.. c:function:: void* PyMem_RawCalloc(size_t nelem, size_t elsize)
+
+   Allocates *nelem* elements each whose size in bytes is *elsize* and returns
+   a pointer of type :c:type:`void\*` to the allocated memory, or *NULL* if the
+   request fails. The memory is initialized to zeros. Requesting zero elements
+   or elements of size zero bytes returns a distinct non-*NULL* pointer if
+   possible, as if ``PyMem_RawCalloc(1, 1)`` had been called instead.
+
+   .. versionadded:: 3.5
+
+
 .. c:function:: void* PyMem_RawRealloc(void *p, size_t n)
 
    Resizes the memory block pointed to by *p* to *n* bytes. The contents will
@@ -136,8 +147,8 @@
 memory from the Python heap.
 
 The default memory block allocator uses the following functions:
-:c:func:`malloc`, :c:func:`realloc` and :c:func:`free`; call ``malloc(1)`` when
-requesting zero bytes.
+:c:func:`malloc`, :c:func:`calloc`, :c:func:`realloc` and :c:func:`free`; call
+``malloc(1)`` (or ``calloc(1, 1)``) when requesting zero bytes.
 
 .. warning::
 
@@ -152,6 +163,17 @@
    been called instead. The memory will not have been initialized in any way.
 
 
+.. c:function:: void* PyMem_Calloc(size_t nelem, size_t elsize)
+
+   Allocates *nelem* elements each whose size in bytes is *elsize* and returns
+   a pointer of type :c:type:`void\*` to the allocated memory, or *NULL* if the
+   request fails. The memory is initialized to zeros. Requesting zero elements
+   or elements of size zero bytes returns a distinct non-*NULL* pointer if
+   possible, as if ``PyMem_Calloc(1, 1)`` had been called instead.
+
+   .. versionadded:: 3.5
+
+
 .. c:function:: void* PyMem_Realloc(void *p, size_t n)
 
    Resizes the memory block pointed to by *p* to *n* bytes. The contents will be
@@ -222,11 +244,17 @@
    +----------------------------------------------------------+---------------------------------------+
    | ``void* malloc(void *ctx, size_t size)``                 | allocate a memory block               |
    +----------------------------------------------------------+---------------------------------------+
+   | ``void* calloc(void *ctx, size_t nelem, size_t elsize)`` | allocate a memory block initialized   |
+   |                                                          | with zeros                            |
+   +----------------------------------------------------------+---------------------------------------+
    | ``void* realloc(void *ctx, void *ptr, size_t new_size)`` | allocate or resize a memory block     |
    +----------------------------------------------------------+---------------------------------------+
    | ``void free(void *ctx, void *ptr)``                      | free a memory block                   |
    +----------------------------------------------------------+---------------------------------------+
 
+   .. versionchanged:: 3.5
+      Add a new field ``calloc``.
+
 .. c:type:: PyMemAllocatorDomain
 
    Enum used to identify an allocator domain. Domains:
diff --git a/Doc/c-api/number.rst b/Doc/c-api/number.rst
index 21951c3..9bcb649 100644
--- a/Doc/c-api/number.rst
+++ b/Doc/c-api/number.rst
@@ -30,6 +30,14 @@
    the equivalent of the Python expression ``o1 * o2``.
 
 
+.. c:function:: PyObject* PyNumber_MatrixMultiply(PyObject *o1, PyObject *o2)
+
+   Returns the result of matrix multiplication on *o1* and *o2*, or *NULL* on
+   failure.  This is the equivalent of the Python expression ``o1 @ o2``.
+
+   .. versionadded:: 3.5
+
+
 .. c:function:: PyObject* PyNumber_FloorDivide(PyObject *o1, PyObject *o2)
 
    Return the floor of *o1* divided by *o2*, or *NULL* on failure.  This is
@@ -146,6 +154,15 @@
    the Python statement ``o1 *= o2``.
 
 
+.. c:function:: PyObject* PyNumber_InPlaceMatrixMultiply(PyObject *o1, PyObject *o2)
+
+   Returns the result of matrix multiplication on *o1* and *o2*, or *NULL* on
+   failure.  The operation is done *in-place* when *o1* supports it.  This is
+   the equivalent of the Python statement ``o1 @= o2``.
+
+   .. versionadded:: 3.5
+
+
 .. c:function:: PyObject* PyNumber_InPlaceFloorDivide(PyObject *o1, PyObject *o2)
 
    Returns the mathematical floor of dividing *o1* by *o2*, or *NULL* on failure.
diff --git a/Doc/c-api/typeobj.rst b/Doc/c-api/typeobj.rst
index 3a64724..f21d058 100644
--- a/Doc/c-api/typeobj.rst
+++ b/Doc/c-api/typeobj.rst
@@ -1139,6 +1139,9 @@
             binaryfunc nb_inplace_true_divide;
 
             unaryfunc nb_index;
+
+            binaryfunc nb_matrix_multiply;
+            binaryfunc nb_inplace_matrix_multiply;
        } PyNumberMethods;
 
    .. note::
diff --git a/Doc/distutils/apiref.rst b/Doc/distutils/apiref.rst
index e1357fa..4c38a07 100644
--- a/Doc/distutils/apiref.rst
+++ b/Doc/distutils/apiref.rst
@@ -1099,13 +1099,13 @@
    during the build of Python), not the OS version of the current system.
 
    For universal binary builds on Mac OS X the architecture value reflects
-   the univeral binary status instead of the architecture of the current
+   the universal binary status instead of the architecture of the current
    processor. For 32-bit universal binaries the architecture is ``fat``,
    for 64-bit universal binaries the architecture is ``fat64``, and
    for 4-way universal binaries the architecture is ``universal``. Starting
    from Python 2.7 and Python 3.2 the architecture ``fat3`` is used for
    a 3-way universal build (ppc, i386, x86_64) and ``intel`` is used for
-   a univeral build with the i386 and x86_64 architectures
+   a universal build with the i386 and x86_64 architectures
 
    Examples of returned values on Mac OS X:
 
diff --git a/Doc/distutils/builtdist.rst b/Doc/distutils/builtdist.rst
index 83c68ae..a67c68e 100644
--- a/Doc/distutils/builtdist.rst
+++ b/Doc/distutils/builtdist.rst
@@ -355,7 +355,7 @@
 would create a 64bit installation executable on your 32bit version of Windows.
 
 To cross-compile, you must download the Python source code and cross-compile
-Python itself for the platform you are targetting - it is not possible from a
+Python itself for the platform you are targeting - it is not possible from a
 binary installation of Python (as the .lib etc file for other platforms are
 not included.)  In practice, this means the user of a 32 bit operating
 system will need to use Visual Studio 2008 to open the
diff --git a/Doc/howto/clinic.rst b/Doc/howto/clinic.rst
index 750ddbe..ca8e1cb 100644
--- a/Doc/howto/clinic.rst
+++ b/Doc/howto/clinic.rst
@@ -886,7 +886,7 @@
 Advanced converters
 -------------------
 
-Remeber those format units you skipped for your first
+Remember those format units you skipped for your first
 time because they were advanced?  Here's how to handle those too.
 
 The trick is, all those format units take arguments--either
@@ -1020,12 +1020,12 @@
 the ``"as"`` should come before the return converter.)
 
 There's one additional complication when using return converters: how do you
-indicate an error has occured?  Normally, a function returns a valid (non-``NULL``)
+indicate an error has occurred?  Normally, a function returns a valid (non-``NULL``)
 pointer for success, and ``NULL`` for failure.  But if you use an integer return converter,
 all integers are valid.  How can Argument Clinic detect an error?  Its solution: each return
 converter implicitly looks for a special value that indicates an error.  If you return
 that value, and an error has been set (``PyErr_Occurred()`` returns a true
-value), then the generated code will propogate the error.  Otherwise it will
+value), then the generated code will propagate the error.  Otherwise it will
 encode the value you return like normal.
 
 Currently Argument Clinic supports only a few return converters::
@@ -1573,7 +1573,7 @@
 ``line_prefix`` is a string that will be prepended to every line of Clinic's output;
 ``line_suffix`` is a string that will be appended to every line of Clinic's output.
 
-Both of these suport two format strings:
+Both of these support two format strings:
 
   ``{block comment start}``
     Turns into the string ``/*``, the start-comment text sequence for C files.
diff --git a/Doc/howto/pyporting.rst b/Doc/howto/pyporting.rst
index 9d7e859..17fc81b 100644
--- a/Doc/howto/pyporting.rst
+++ b/Doc/howto/pyporting.rst
@@ -60,7 +60,7 @@
 `trove classifiers`_ to signify what versions of Python it **currently**
 supports. At minimum you should specify the major version(s), e.g.
 ``Programming Language :: Python :: 2`` if your project currently only supports
-Python 2. It is preferrable that you be as specific as possible by listing every
+Python 2. It is preferable that you be as specific as possible by listing every
 major/minor version of Python that you support, e.g. if your project supports
 Python 2.6 and 2.7, then you want the classifiers of::
 
diff --git a/Doc/howto/regex.rst b/Doc/howto/regex.rst
index fbe763b..9ae04d7 100644
--- a/Doc/howto/regex.rst
+++ b/Doc/howto/regex.rst
@@ -852,7 +852,7 @@
 problem.  Both of them use a common syntax for regular expression extensions, so
 we'll look at that first.
 
-Perl 5 is well-known for its powerful additions to standard regular expressions.
+Perl 5 is well known for its powerful additions to standard regular expressions.
 For these new features the Perl developers couldn't choose new single-keystroke metacharacters
 or new special sequences beginning with ``\`` without making Perl's regular
 expressions confusingly different from standard REs.  If they chose ``&`` as a
diff --git a/Doc/howto/sockets.rst b/Doc/howto/sockets.rst
index 820beb5..b482c57 100644
--- a/Doc/howto/sockets.rst
+++ b/Doc/howto/sockets.rst
@@ -234,7 +234,7 @@
 following message. You'll need to put that aside and hold onto it, until it's
 needed.
 
-Prefixing the message with it's length (say, as 5 numeric characters) gets more
+Prefixing the message with its length (say, as 5 numeric characters) gets more
 complex, because (believe it or not), you may not get all 5 characters in one
 ``recv``. In playing around, you'll get away with it; but in high network loads,
 your code will very quickly break unless you use two ``recv`` loops - the first
diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst
index b1b5135..36dc5f3 100644
--- a/Doc/library/argparse.rst
+++ b/Doc/library/argparse.rst
@@ -1873,7 +1873,7 @@
 
    Arguments that are read from a file (see the *fromfile_prefix_chars*
    keyword argument to the :class:`ArgumentParser` constructor) are read one
-   argument per line. :meth:`convert_arg_line_to_args` can be overriden for
+   argument per line. :meth:`convert_arg_line_to_args` can be overridden for
    fancier reading.
 
    This method takes a single argument *arg_line* which is a string read from
diff --git a/Doc/library/cmd.rst b/Doc/library/cmd.rst
index 9722928..6d57b77 100644
--- a/Doc/library/cmd.rst
+++ b/Doc/library/cmd.rst
@@ -252,7 +252,7 @@
             'Move turtle to an absolute position with changing orientation.  GOTO 100 200'
             goto(*parse(arg))
         def do_home(self, arg):
-            'Return turtle to the home postion:  HOME'
+            'Return turtle to the home position:  HOME'
             home()
         def do_circle(self, arg):
             'Draw circle with given radius an options extent and steps:  CIRCLE 50'
diff --git a/Doc/library/code.rst b/Doc/library/code.rst
index 5b5d7cc..99bdedc 100644
--- a/Doc/library/code.rst
+++ b/Doc/library/code.rst
@@ -4,6 +4,7 @@
 .. module:: code
    :synopsis: Facilities to implement read-eval-print loops.
 
+**Source code:** :source:`Lib/code.py`
 
 The ``code`` module provides facilities to implement read-eval-print loops in
 Python.  Two classes and convenience functions are included which can be used to
@@ -165,4 +166,3 @@
    newline.  When the user enters the EOF key sequence, :exc:`EOFError` is raised.
    The base implementation reads from ``sys.stdin``; a subclass may replace this
    with a different implementation.
-
diff --git a/Doc/library/codecs.rst b/Doc/library/codecs.rst
index fb3af3b..36144e9 100644
--- a/Doc/library/codecs.rst
+++ b/Doc/library/codecs.rst
@@ -7,6 +7,7 @@
 .. sectionauthor:: Marc-André Lemburg <mal@lemburg.com>
 .. sectionauthor:: Martin v. Löwis <martin@v.loewis.de>
 
+**Source code:** :source:`Lib/codecs.py`
 
 .. index::
    single: Unicode
@@ -22,10 +23,9 @@
 
 It defines the following functions:
 
-.. function:: encode(obj, [encoding[, errors]])
+.. function:: encode(obj, encoding='utf-8', errors='strict')
 
-   Encodes *obj* using the codec registered for *encoding*. The default
-   encoding is ``utf-8``.
+   Encodes *obj* using the codec registered for *encoding*.
 
    *Errors* may be given to set the desired error handling scheme. The
    default error handler is ``strict`` meaning that encoding errors raise
@@ -33,10 +33,9 @@
    :exc:`UnicodeEncodeError`). Refer to :ref:`codec-base-classes` for more
    information on codec error handling.
 
-.. function:: decode(obj, [encoding[, errors]])
+.. function:: decode(obj, encoding='utf-8', errors='strict')
 
-   Decodes *obj* using the codec registered for *encoding*. The default
-   encoding is ``utf-8``.
+   Decodes *obj* using the codec registered for *encoding*.
 
    *Errors* may be given to set the desired error handling scheme. The
    default error handler is ``strict`` meaning that decoding errors raise
@@ -1420,4 +1419,3 @@
 BOM will be prepended to the UTF-8 encoded bytes. For the stateful encoder this
 is only done once (on the first write to the byte stream).  For decoding an
 optional UTF-8 encoded BOM at the start of the data will be skipped.
-
diff --git a/Doc/library/collections.abc.rst b/Doc/library/collections.abc.rst
index 356f473..efa922b 100644
--- a/Doc/library/collections.abc.rst
+++ b/Doc/library/collections.abc.rst
@@ -179,7 +179,7 @@
 (3)
    The :class:`Set` mixin provides a :meth:`_hash` method to compute a hash value
    for the set; however, :meth:`__hash__` is not defined because not all sets
-   are hashable or immutable.  To add set hashabilty using mixins,
+   are hashable or immutable.  To add set hashability using mixins,
    inherit from both :meth:`Set` and :meth:`Hashable`, then define
    ``__hash__ = Set._hash``.
 
diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst
index f5fe12a..3ec3240 100644
--- a/Doc/library/collections.rst
+++ b/Doc/library/collections.rst
@@ -978,12 +978,15 @@
 keyword arguments, but their order is lost because Python's function call
 semantics pass-in keyword arguments using a regular unordered dictionary.
 
+.. versionchanged:: 3.5
+   The items, keys, and values :term:`views <view>` of :class:`OrderedDict` now
+   support reverse iteration using :func:`reversed`.
 
 :class:`OrderedDict` Examples and Recipes
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Since an ordered dictionary remembers its insertion order, it can be used
-in conjuction with sorting to make a sorted dictionary::
+in conjunction with sorting to make a sorted dictionary::
 
     >>> # regular unsorted dictionary
     >>> d = {'banana': 3, 'apple':4, 'pear': 1, 'orange': 2}
diff --git a/Doc/library/concurrent.futures.rst b/Doc/library/concurrent.futures.rst
index 0495737..08c926a 100644
--- a/Doc/library/concurrent.futures.rst
+++ b/Doc/library/concurrent.futures.rst
@@ -175,6 +175,8 @@
    An :class:`Executor` subclass that executes calls asynchronously using a pool
    of at most *max_workers* processes.  If *max_workers* is ``None`` or not
    given, it will default to the number of processors on the machine.
+   If *max_workers* is lower or equal to ``0``, then a :exc:`ValueError`
+   will be raised.
 
    .. versionchanged:: 3.3
       When one of the worker processes terminates abruptly, a
diff --git a/Doc/library/configparser.rst b/Doc/library/configparser.rst
index 024d27c..4d65a82 100644
--- a/Doc/library/configparser.rst
+++ b/Doc/library/configparser.rst
@@ -11,6 +11,8 @@
 .. sectionauthor:: Christopher G. Petrilli <petrilli@amber.org>
 .. sectionauthor:: Ɓukasz Langa <lukasz@langa.pl>
 
+**Source code:** :source:`Lib/configparser.py`
+
 .. index::
    pair: .ini; file
    pair: configuration; file
@@ -386,7 +388,7 @@
 * All sections include ``DEFAULTSECT`` values as well which means that
   ``.clear()`` on a section may not leave the section visibly empty.  This is
   because default values cannot be deleted from the section (because technically
-  they are not there).  If they are overriden in the section, deleting causes
+  they are not there).  If they are overridden in the section, deleting causes
   the default value to be visible again.  Trying to delete a default value
   causes a ``KeyError``.
 
@@ -667,7 +669,7 @@
 
 More advanced customization may be achieved by overriding default values of
 these parser attributes.  The defaults are defined on the classes, so they
-may be overriden by subclasses or by attribute assignment.
+may be overridden by subclasses or by attribute assignment.
 
 .. attribute:: BOOLEAN_STATES
 
diff --git a/Doc/library/copyreg.rst b/Doc/library/copyreg.rst
index 50d5879..18306c7 100644
--- a/Doc/library/copyreg.rst
+++ b/Doc/library/copyreg.rst
@@ -9,7 +9,7 @@
    module: pickle
    module: copy
 
-The :mod:`copyreg` module offers a way to define fuctions used while pickling
+The :mod:`copyreg` module offers a way to define functions used while pickling
 specific objects.  The :mod:`pickle` and :mod:`copy` modules use those functions
 when pickling/copying those objects.  The module provides configuration
 information about object constructors which are not classes.
diff --git a/Doc/library/csv.rst b/Doc/library/csv.rst
index ccc9dc6..616df55 100644
--- a/Doc/library/csv.rst
+++ b/Doc/library/csv.rst
@@ -5,6 +5,7 @@
    :synopsis: Write and read tabular data to and from delimited files.
 .. sectionauthor:: Skip Montanaro <skip@pobox.com>
 
+**Source code:** :source:`Lib/csv.py`
 
 .. index::
    single: csv
diff --git a/Doc/library/datetime.rst b/Doc/library/datetime.rst
index e4f1eb2..553046f 100644
--- a/Doc/library/datetime.rst
+++ b/Doc/library/datetime.rst
@@ -7,6 +7,8 @@
 .. sectionauthor:: Tim Peters <tim@zope.com>
 .. sectionauthor:: A.M. Kuchling <amk@amk.ca>
 
+**Source code:** :source:`Lib/datetime.py`
+
 .. XXX what order should the types be discussed in?
 
 The :mod:`datetime` module supplies classes for manipulating dates and times in
@@ -1376,10 +1378,13 @@
 
 * efficient pickling
 
-* in Boolean contexts, a :class:`.time` object is considered to be true if and
-  only if, after converting it to minutes and subtracting :meth:`utcoffset` (or
-  ``0`` if that's ``None``), the result is non-zero.
+In boolean contexts, a :class:`.time` object is always considered to be true.
 
+.. versionchanged:: 3.5
+   Before Python 3.5, a :class:`.time` object was considered to be false if it
+   represented midnight in UTC.  This behavior was considered obscure and
+   error-prone and has been removed in Python 3.5.  See :issue:`13936` for full
+   details.
 
 Instance methods:
 
diff --git a/Doc/library/decimal.rst b/Doc/library/decimal.rst
index 059ae7c..7052985 100644
--- a/Doc/library/decimal.rst
+++ b/Doc/library/decimal.rst
@@ -12,6 +12,8 @@
 .. moduleauthor:: Stefan Krah <skrah at bytereef.org>
 .. sectionauthor:: Raymond D. Hettinger <python at rcn.com>
 
+**Source code:** :source:`Lib/decimal.py`
+
 .. import modules for testing inline doctests with the Sphinx doctest builder
 .. testsetup:: *
 
@@ -742,7 +744,7 @@
       * ``"NaN"``, indicating that the operand is a quiet NaN (Not a Number).
       * ``"sNaN"``, indicating that the operand is a signaling NaN.
 
-   .. method:: quantize(exp, rounding=None, context=None, watchexp=True)
+   .. method:: quantize(exp, rounding=None, context=None)
 
       Return a value equal to the first operand after rounding and having the
       exponent of the second operand.
@@ -765,14 +767,8 @@
       ``context`` argument; if neither argument is given the rounding mode of
       the current thread's context is used.
 
-      If *watchexp* is set (default), then an error is returned whenever the
-      resulting exponent is greater than :attr:`Emax` or less than
-      :attr:`Etiny`.
-
-      .. deprecated:: 3.3
-         *watchexp* is an implementation detail from the pure Python version
-         and is not present in the C version. It will be removed in version
-         3.4, where it defaults to ``True``.
+      An error is returned whenever the resulting exponent is greater than
+      :attr:`Emax` or less than :attr:`Etiny`.
 
    .. method:: radix()
 
@@ -2092,4 +2088,3 @@
 
    >>> Context(prec=5, rounding=ROUND_DOWN).create_decimal('1.2345678')
    Decimal('1.2345')
-
diff --git a/Doc/library/difflib.rst b/Doc/library/difflib.rst
index 2a75d2c..707f179 100644
--- a/Doc/library/difflib.rst
+++ b/Doc/library/difflib.rst
@@ -7,6 +7,8 @@
 .. sectionauthor:: Tim Peters <tim_one@users.sourceforge.net>
 .. Markup by Fred L. Drake, Jr. <fdrake@acm.org>
 
+**Source code:** :source:`Lib/difflib.py`
+
 .. testsetup::
 
    import sys
@@ -25,7 +27,9 @@
    little fancier than, an algorithm published in the late 1980's by Ratcliff and
    Obershelp under the hyperbolic name "gestalt pattern matching."  The idea is to
    find the longest contiguous matching subsequence that contains no "junk"
-   elements (the Ratcliff and Obershelp algorithm doesn't address junk).  The same
+   elements; these "junk" elements are ones that are uninteresting in some
+   sense, such as blank lines or whitespace.  (Handling junk is an
+   extension to the Ratcliff and Obershelp algorithm.) The same
    idea is then applied recursively to the pieces of the sequences to the left and
    to the right of the matching subsequence.  This does not yield minimal edit
    sequences, but does tend to yield matches that "look right" to people.
@@ -208,7 +212,7 @@
    Compare *a* and *b* (lists of strings); return a :class:`Differ`\ -style
    delta (a :term:`generator` generating the delta lines).
 
-   Optional keyword parameters *linejunk* and *charjunk* are for filter functions
+   Optional keyword parameters *linejunk* and *charjunk* are filtering functions
    (or ``None``):
 
    *linejunk*: A function that accepts a single string argument, and returns
@@ -222,7 +226,7 @@
    *charjunk*: A function that accepts a character (a string of length 1), and
    returns if the character is junk, or false if not. The default is module-level
    function :func:`IS_CHARACTER_JUNK`, which filters out whitespace characters (a
-   blank or tab; note: bad idea to include newline in this!).
+   blank or tab; it's a bad idea to include newline in this!).
 
    :file:`Tools/scripts/ndiff.py` is a command-line front-end to this function.
 
@@ -622,6 +626,12 @@
    length 1), and returns true if the character is junk. The default is ``None``,
    meaning that no character is considered junk.
 
+   These junk-filtering functions speed up matching to find
+   differences and do not cause any differing lines or characters to
+   be ignored.  Read the description of the
+   :meth:`~SequenceMatcher.find_longest_match` method's *isjunk*
+   parameter for an explanation.
+
    :class:`Differ` objects are used (deltas generated) via a single method:
 
 
diff --git a/Doc/library/dis.rst b/Doc/library/dis.rst
index d86550f..fbabe35 100644
--- a/Doc/library/dis.rst
+++ b/Doc/library/dis.rst
@@ -364,6 +364,11 @@
    Implements ``TOS = TOS1 * TOS``.
 
 
+.. opcode:: BINARY_MATRIX_MULTIPLY
+
+   Implements ``TOS = TOS1 @ TOS``.
+
+
 .. opcode:: BINARY_FLOOR_DIVIDE
 
    Implements ``TOS = TOS1 // TOS``.
@@ -436,6 +441,11 @@
    Implements in-place ``TOS = TOS1 * TOS``.
 
 
+.. opcode:: INPLACE_MATRIX_MULTIPLY
+
+   Implements in-place ``TOS = TOS1 @ TOS``.
+
+
 .. opcode:: INPLACE_FLOOR_DIVIDE
 
    Implements in-place ``TOS = TOS1 // TOS``.
diff --git a/Doc/library/doctest.rst b/Doc/library/doctest.rst
index 50626e9..fb63fde 100644
--- a/Doc/library/doctest.rst
+++ b/Doc/library/doctest.rst
@@ -1058,15 +1058,9 @@
 
    This function uses the same search technique as :func:`testmod`.
 
-   .. note::
-      Unlike :func:`testmod` and :class:`DocTestFinder`, this function raises
-      a :exc:`ValueError` if *module* contains no docstrings.  You can prevent
-      this error by passing a :class:`DocTestFinder` instance as the
-      *test_finder* argument with its *exclude_empty* keyword argument set
-      to ``False``::
-
-         >>> finder = doctest.DocTestFinder(exclude_empty=False)
-         >>> suite = doctest.DocTestSuite(test_finder=finder)
+   .. versionchanged:: 3.5
+      :func:`DocTestSuite` returns an empty :class:`unittest.TestSuite` if *module*
+      contains no docstrings instead of raising :exc:`ValueError`.
 
 
 Under the covers, :func:`DocTestSuite` creates a :class:`unittest.TestSuite` out
diff --git a/Doc/library/formatter.rst b/Doc/library/formatter.rst
index 1847a80..a515f74 100644
--- a/Doc/library/formatter.rst
+++ b/Doc/library/formatter.rst
@@ -5,7 +5,7 @@
    :synopsis: Generic output formatter and device interface.
    :deprecated:
 
-.. deprecated:: 3.4
+.. deprecated-removed:: 3.4 3.6
    Due to lack of usage, the formatter module has been deprecated and is slated
    for removal in Python 3.6.
 
diff --git a/Doc/library/http.server.rst b/Doc/library/http.server.rst
index 0d8e7fe..ec54643 100644
--- a/Doc/library/http.server.rst
+++ b/Doc/library/http.server.rst
@@ -220,7 +220,7 @@
 
    .. method:: send_response_only(code, message=None)
 
-      Sends the reponse header only, used for the purposes when ``100
+      Sends the response header only, used for the purposes when ``100
       Continue`` response is sent by the server to the client. The headers not
       buffered and sent directly the output stream.If the *message* is not
       specified, the HTTP message corresponding the response *code*  is sent.
diff --git a/Doc/library/imghdr.rst b/Doc/library/imghdr.rst
index 9e89523..06faa88 100644
--- a/Doc/library/imghdr.rst
+++ b/Doc/library/imghdr.rst
@@ -48,6 +48,11 @@
 +------------+-----------------------------------+
 | ``'png'``  | Portable Network Graphics         |
 +------------+-----------------------------------+
+| ``'webp'`` | WebP files                        |
++------------+-----------------------------------+
+
+.. versionchanged:: 3.5
+   The *webp* type was added.
 
 You can extend the list of file types :mod:`imghdr` can recognize by appending
 to this variable:
diff --git a/Doc/library/importlib.rst b/Doc/library/importlib.rst
index 09a5d71..0adeefb 100644
--- a/Doc/library/importlib.rst
+++ b/Doc/library/importlib.rst
@@ -499,7 +499,7 @@
         .. versionchanged:: 3.4
            Raises :exc:`ImportError` instead of :exc:`NotImplementedError`.
 
-    .. method:: source_to_code(data, path='<string>')
+    .. staticmethod:: source_to_code(data, path='<string>')
 
         Create a code object from Python source.
 
@@ -508,8 +508,14 @@
         the "path" to where the source code originated from, which can be an
         abstract concept (e.g. location in a zip file).
 
+        With the subsequent code object one can execute it in a module by
+        running ``exec(code, module.__dict__)``.
+
         .. versionadded:: 3.4
 
+        .. versionchanged:: 3.5
+           Made the method static.
+
     .. method:: exec_module(module)
 
        Implementation of :meth:`Loader.exec_module`.
@@ -1201,3 +1207,38 @@
    module will be file-based.
 
    .. versionadded:: 3.4
+
+.. class:: LazyLoader(loader)
+
+   A class which postpones the execution of the loader of a module until the
+   module has an attribute accessed.
+
+   This class **only** works with loaders that define
+   :meth:`importlib.abc.Loader.exec_module` as control over what module type
+   is used for the module is required. For the same reasons, the loader
+   **cannot** define :meth:`importlib.abc.Loader.create_module`. Finally,
+   modules which substitute the object placed into :attr:`sys.modules` will
+   not work as there is no way to properly replace the module references
+   throughout the interpreter safely; :exc:`ValueError` is raised if such a
+   substitution is detected.
+
+   .. note::
+      For projects where startup time is critical, this class allows for
+      potentially minimizing the cost of loading a module if it is never used.
+      For projects where startup time is not essential then use of this class is
+      **heavily** discouraged due to error messages created during loading being
+      postponed and thus occurring out of context.
+
+   .. versionadded:: 3.5
+
+   .. classmethod:: factory(loader)
+
+      A static method which returns a callable that creates a lazy loader. This
+      is meant to be used in situations where the loader is passed by class
+      instead of by instance.
+      ::
+
+        suffixes = importlib.machinery.SOURCE_SUFFIXES
+        loader = importlib.machinery.SourceFileLoader
+        lazy_loader = importlib.util.LazyLoader.factory(loader)
+        finder = importlib.machinery.FileFinder(path, [(lazy_loader, suffixes)])
diff --git a/Doc/library/inspect.rst b/Doc/library/inspect.rst
index 0c08712..21408f4 100644
--- a/Doc/library/inspect.rst
+++ b/Doc/library/inspect.rst
@@ -462,6 +462,9 @@
    Signature objects are *immutable*.  Use :meth:`Signature.replace` to make a
    modified copy.
 
+   .. versionchanged:: 3.5
+      Signature objects are picklable and hashable.
+
    .. attribute:: Signature.empty
 
       A special class-level marker to specify absence of a return annotation.
@@ -506,12 +509,29 @@
          >>> str(new_sig)
          "(a, b) -> 'new return anno'"
 
+   .. classmethod:: Signature.from_callable(obj)
+
+       Return a :class:`Signature` (or its subclass) object for a given callable
+       ``obj``. This method simplifies subclassing of :class:`Signature`:
+
+       ::
+
+         class MySignature(Signature):
+             pass
+         sig = MySignature.from_callable(min)
+         assert isinstance(sig, MySignature)
+
+       .. versionadded:: 3.5
+
 
 .. class:: Parameter(name, kind, \*, default=Parameter.empty, annotation=Parameter.empty)
 
    Parameter objects are *immutable*.  Instead of modifying a Parameter object,
    you can use :meth:`Parameter.replace` to create a modified copy.
 
+   .. versionchanged:: 3.5
+      Parameter objects are picklable and hashable.
+
    .. attribute:: Parameter.empty
 
       A special class-level marker to specify absence of default values and
diff --git a/Doc/library/ipaddress.rst b/Doc/library/ipaddress.rst
index 9625e71..d48fac9 100644
--- a/Doc/library/ipaddress.rst
+++ b/Doc/library/ipaddress.rst
@@ -103,7 +103,7 @@
    1. A string in decimal-dot notation, consisting of four decimal integers in
       the inclusive range 0-255, separated by dots (e.g. ``192.168.0.1``). Each
       integer represents an octet (byte) in the address. Leading zeroes are
-      tolerated only for values less then 8 (as there is no ambiguity
+      tolerated only for values less than 8 (as there is no ambiguity
       between the decimal and octal interpretations of such strings).
    2. An integer that fits into 32 bits.
    3. An integer packed into a :class:`bytes` object of length 4 (most
@@ -146,6 +146,20 @@
       the appropriate length (most significant octet first). This is 4 bytes
       for IPv4 and 16 bytes for IPv6.
 
+   .. attribute:: reverse_pointer
+
+      The name of the reverse DNS PTR record for the IP address, e.g.::
+
+          >>> ipaddress.ip_address("127.0.0.1").reverse_pointer
+          '1.0.0.127.in-addr.arpa'
+          >>> ipaddress.ip_address("2001:db8::1").reverse_pointer
+          '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
+
+      This is the name that could be used for performing a PTR lookup, not the
+      resolved hostname itself.
+
+   .. versionadded:: 3.5
+
    .. attribute:: is_multicast
 
       ``True`` if the address is reserved for multicast use.  See
@@ -226,6 +240,7 @@
    :class:`IPv4Address` class:
 
    .. attribute:: packed
+   .. attribute:: reverse_pointer
    .. attribute:: version
    .. attribute:: max_prefixlen
    .. attribute:: is_multicast
@@ -377,6 +392,12 @@
    3. An integer packed into a :class:`bytes` object of length 4, big-endian.
       The interpretation is similar to an integer *address*.
 
+   4. A two-tuple of an address description and a netmask, where the address
+      description is either a string, a 32-bits integer, a 4-bytes packed
+      integer, or an existing IPv4Address object; and the netmask is either
+      an integer representing the prefix length (e.g. ``24``) or a string
+      representing the prefix mask (e.g. ``255.255.255.0``).
+
    An :exc:`AddressValueError` is raised if *address* is not a valid IPv4
    address.  A :exc:`NetmaskValueError` is raised if the mask is not valid for
    an IPv4 address.
@@ -389,6 +410,10 @@
    objects will raise :exc:`TypeError` if the argument's IP version is
    incompatible to ``self``
 
+   .. versionchanged:: 3.5
+
+      Added the two-tuple form for the *address* constructor parameter.
+
    .. attribute:: version
    .. attribute:: max_prefixlen
 
@@ -553,6 +578,11 @@
    3. An integer packed into a :class:`bytes` object of length 16, bit-endian.
       The interpretation is similar to an integer *address*.
 
+   4. A two-tuple of an address description and a netmask, where the address
+      description is either a string, a 128-bits integer, a 16-bytes packed
+      integer, or an existing IPv4Address object; and the netmask is an
+      integer representing the prefix length.
+
    An :exc:`AddressValueError` is raised if *address* is not a valid IPv6
    address.  A :exc:`NetmaskValueError` is raised if the mask is not valid for
    an IPv6 address.
@@ -561,6 +591,10 @@
    then :exc:`ValueError` is raised.  Otherwise, the host bits are masked out
    to determine the appropriate network address.
 
+   .. versionchanged:: 3.5
+
+      Added the two-tuple form for the *address* constructor parameter.
+
    .. attribute:: version
    .. attribute:: max_prefixlen
    .. attribute:: is_multicast
diff --git a/Doc/library/itertools.rst b/Doc/library/itertools.rst
index f489535..1f0bced 100644
--- a/Doc/library/itertools.rst
+++ b/Doc/library/itertools.rst
@@ -87,10 +87,15 @@
 
 .. function:: accumulate(iterable[, func])
 
-    Make an iterator that returns accumulated sums. Elements may be any addable
-    type including :class:`~decimal.Decimal` or :class:`~fractions.Fraction`.
-    If the optional *func* argument is supplied, it should be a function of two
-    arguments and it will be used instead of addition.
+    Make an iterator that returns accumulated sums, or accumulated
+    results of other binary functions (specified via the optional
+    *func* argument).  If *func* is supplied, it should be a function
+    of two arguments. Elements of the input *iterable* may be any type
+    that can be accepted as arguments to *func*. (For example, with
+    the default operation of addition, elements may be any addable
+    type including :class:`~decimal.Decimal` or
+    :class:`~fractions.Fraction`.) If the input iterable is empty, the
+    output iterable will also be empty.
 
     Equivalent to::
 
diff --git a/Doc/library/json.rst b/Doc/library/json.rst
index 5d97ee8..d6bdd8a 100644
--- a/Doc/library/json.rst
+++ b/Doc/library/json.rst
@@ -104,6 +104,8 @@
     $ echo '{1.2:3.4}' | python -mjson.tool
     Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
 
+See :ref:`json-commandline` for detailed documentation.
+
 .. highlight:: python3
 
 .. note::
@@ -563,3 +565,54 @@
    {'x': 3}
 
 The *object_pairs_hook* parameter can be used to alter this behavior.
+
+.. highlight:: bash
+
+.. _json-commandline:
+
+Command Line Interface
+----------------------
+
+The :mod:`json.tool` module provides a simple command line interface to validate
+and pretty-print JSON objects.
+
+If the optional :option:`infile` and :option:`outfile` arguments are not
+specified, :attr:`sys.stdin` and :attr:`sys.stdout` will be used respectively::
+
+    $ echo '{"json": "obj"}' | python -m json.tool
+    {
+        "json": "obj"
+    }
+    $ echo '{1.2:3.4}' | python -m json.tool
+    Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
+
+
+Command line options
+^^^^^^^^^^^^^^^^^^^^
+
+.. cmdoption:: infile
+
+   The JSON file to be validated or pretty-printed::
+
+      $ python -m json.tool mp_films.json
+      [
+          {
+              "title": "And Now for Something Completely Different",
+              "year": 1971
+          },
+          {
+              "title": "Monty Python and the Holy Grail",
+              "year": 1975
+          }
+      ]
+
+   If *infile* is not specified, read from :attr:`sys.stdin`.
+
+.. cmdoption:: outfile
+
+   Write the output of the *infile* to the given *outfile*. Otherwise, write it
+   to :attr:`sys.stdout`.
+
+.. cmdoption:: -h, --help
+
+   Show the help message.
diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst
index 315c168..9f3b074 100644
--- a/Doc/library/logging.handlers.rst
+++ b/Doc/library/logging.handlers.rst
@@ -435,7 +435,7 @@
    .. method:: createSocket()
 
       Tries to create a socket; on failure, uses an exponential back-off
-      algorithm.  On intial failure, the handler will drop the message it was
+      algorithm.  On initial failure, the handler will drop the message it was
       trying to send.  When subsequent messages are handled by the same
       instance, it will not try connecting until some time has passed.  The
       default parameters are such that the initial delay is one second, and if
diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst
index 5fac730..409b2cb 100644
--- a/Doc/library/multiprocessing.rst
+++ b/Doc/library/multiprocessing.rst
@@ -1320,6 +1320,9 @@
    Note that accessing the ctypes object through the wrapper can be a lot slower
    than accessing the raw ctypes object.
 
+   .. versionchanged:: 3.5
+      Synchronized objects support the :term:`context manager` protocol.
+
 
 The table below compares the syntax for creating shared ctypes objects from
 shared memory with the normal ctypes syntax.  (In the table ``MyStruct`` is some
diff --git a/Doc/library/operator.rst b/Doc/library/operator.rst
index 3bcbaa4..3654d13 100644
--- a/Doc/library/operator.rst
+++ b/Doc/library/operator.rst
@@ -138,6 +138,14 @@
    Return ``a * b``, for *a* and *b* numbers.
 
 
+.. function:: matmul(a, b)
+              __matmul__(a, b)
+
+   Return ``a @ b``.
+
+   .. versionadded:: 3.5
+
+
 .. function:: neg(obj)
               __neg__(obj)
 
@@ -400,6 +408,8 @@
 +-----------------------+-------------------------+---------------------------------------+
 | Multiplication        | ``a * b``               | ``mul(a, b)``                         |
 +-----------------------+-------------------------+---------------------------------------+
+| Matrix Multiplication | ``a @ b``               | ``matmul(a, b)``                      |
++-----------------------+-------------------------+---------------------------------------+
 | Negation (Arithmetic) | ``- a``                 | ``neg(a)``                            |
 +-----------------------+-------------------------+---------------------------------------+
 | Negation (Logical)    | ``not a``               | ``not_(a)``                           |
@@ -508,6 +518,14 @@
    ``a = imul(a, b)`` is equivalent to ``a *= b``.
 
 
+.. function:: imatmul(a, b)
+              __imatmul__(a, b)
+
+   ``a = imatmul(a, b)`` is equivalent to ``a @= b``.
+
+   .. versionadded:: 3.5
+
+
 .. function:: ior(a, b)
               __ior__(a, b)
 
diff --git a/Doc/library/os.rst b/Doc/library/os.rst
index 3d492ba..54b2542 100644
--- a/Doc/library/os.rst
+++ b/Doc/library/os.rst
@@ -1087,6 +1087,9 @@
    All platforms support sockets as *out* file descriptor, and some platforms
    allow other types (e.g. regular file, pipe) as well.
 
+   Cross-platform applications should not use *headers*, *trailers* and *flags*
+   arguments.
+
    Availability: Unix.
 
    .. versionadded:: 3.3
@@ -2730,10 +2733,27 @@
    Availability: Unix.
 
 
-.. function:: popen(...)
+.. function:: popen(command, mode='r', buffering=-1)
 
-   Run child processes, returning opened pipes for communications.  These functions
-   are described in section :ref:`os-newstreams`.
+   Open a pipe to or from *command*.  The return value is an open file object
+   connected to the pipe, which can be read or written depending on whether *mode*
+   is ``'r'`` (default) or ``'w'``. The *buffering* argument has the same meaning as
+   the corresponding argument to the built-in :func:`open` function. The
+   returned file object reads or writes text strings rather than bytes.
+
+   The ``close`` method returns :const:`None` if the subprocess exited
+   successfully, or the subprocess's return code if there was an
+   error. On POSIX systems, if the return code is positive it
+   represents the return value of the process left-shifted by one
+   byte.  If the return code is negative, the process was terminated
+   by the signal given by the negated value of the return code.  (For
+   example, the return value might be ``- signal.SIGKILL`` if the
+   subprocess was killed.)  On Windows systems, the return value
+   contains the signed integer return code from the child process.
+
+   This is implemented using :class:`subprocess.Popen`; see that class's
+   documentation for more powerful ways to manage and communicate with
+   subprocesses.
 
 
 .. function:: spawnl(mode, path, ...)
diff --git a/Doc/library/pathlib.rst b/Doc/library/pathlib.rst
index ec1dc4f..0a2a4e3 100644
--- a/Doc/library/pathlib.rst
+++ b/Doc/library/pathlib.rst
@@ -884,6 +884,25 @@
    Remove this directory.  The directory must be empty.
 
 
+.. method:: Path.samefile(other_path)
+
+   Return whether this path points to the same file as *other_path*, which
+   can be either a Path object, or a string.  The semantics are similar
+   to :func:`os.path.samefile` and :func:`os.path.samestat`.
+
+   An :exc:`OSError` can be raised if either file cannot be accessed for some
+   reason.
+
+      >>> p = Path('spam')
+      >>> q = Path('eggs')
+      >>> p.samefile(q)
+      False
+      >>> p.samefile('spam')
+      True
+
+   .. versionadded:: 3.5
+
+
 .. method:: Path.symlink_to(target, target_is_directory=False)
 
    Make this path a symbolic link to *target*.  Under Windows,
diff --git a/Doc/library/pkgutil.rst b/Doc/library/pkgutil.rst
index 13ea7b9..5d3295d 100644
--- a/Doc/library/pkgutil.rst
+++ b/Doc/library/pkgutil.rst
@@ -58,7 +58,7 @@
 
    .. deprecated:: 3.3
       This emulation is no longer needed, as the standard import mechanism
-      is now fully PEP 302 compliant and available in :mod:`importlib`
+      is now fully PEP 302 compliant and available in :mod:`importlib`.
 
 
 .. class:: ImpLoader(fullname, file, filename, etc)
@@ -67,7 +67,7 @@
 
    .. deprecated:: 3.3
       This emulation is no longer needed, as the standard import mechanism
-      is now fully PEP 302 compliant and available in :mod:`importlib`
+      is now fully PEP 302 compliant and available in :mod:`importlib`.
 
 
 .. function:: find_loader(fullname)
diff --git a/Doc/library/plistlib.rst b/Doc/library/plistlib.rst
index 6a2d6b4..b0d5bcf 100644
--- a/Doc/library/plistlib.rst
+++ b/Doc/library/plistlib.rst
@@ -129,7 +129,7 @@
    and binary) file object. Returns the unpacked root object (which usually
    is a dictionary).
 
-   This function calls :func:`load` to do the actual work, the the documentation
+   This function calls :func:`load` to do the actual work, see the documentation
    of :func:`that function <load>` for an explanation of the keyword arguments.
 
    .. note::
diff --git a/Doc/library/random.rst b/Doc/library/random.rst
index 11dd367..f8b7727 100644
--- a/Doc/library/random.rst
+++ b/Doc/library/random.rst
@@ -46,8 +46,7 @@
 .. warning::
 
    The pseudo-random generators of this module should not be used for
-   security purposes.  Use :func:`os.urandom` or :class:`SystemRandom` if
-   you require a cryptographically secure pseudo-random number generator.
+   security purposes.
 
 
 Bookkeeping functions:
diff --git a/Doc/library/resource.rst b/Doc/library/resource.rst
index f8112cc..7c0e4ca 100644
--- a/Doc/library/resource.rst
+++ b/Doc/library/resource.rst
@@ -45,7 +45,7 @@
 
 .. data:: RLIM_INFINITY
 
-   Constant used to represent the the limit for an unlimited resource.
+   Constant used to represent the limit for an unlimited resource.
 
 
 .. function:: getrlimit(resource)
diff --git a/Doc/library/select.rst b/Doc/library/select.rst
index 973a0cc..a5e0c13 100644
--- a/Doc/library/select.rst
+++ b/Doc/library/select.rst
@@ -210,7 +210,7 @@
    .. warning::
 
       Registering a file descriptor that's already registered is not an
-      error, but the result is undefined. The appropiate action is to
+      error, but the result is undefined. The appropriate action is to
       unregister or modify it first. This is an important difference
       compared with :c:func:`poll`.
 
diff --git a/Doc/library/selectors.rst b/Doc/library/selectors.rst
index 98377c8..8bd9e1c 100644
--- a/Doc/library/selectors.rst
+++ b/Doc/library/selectors.rst
@@ -45,6 +45,7 @@
    +-- SelectSelector
    +-- PollSelector
    +-- EpollSelector
+   +-- DevpollSelector
    +-- KqueueSelector
 
 
@@ -207,6 +208,16 @@
       This returns the file descriptor used by the underlying
       :func:`select.epoll` object.
 
+.. class:: DevpollSelector()
+
+   :func:`select.devpoll`-based selector.
+
+   .. method:: fileno()
+
+      This returns the file descriptor used by the underlying
+      :func:`select.devpoll` object.
+
+   .. versionadded:: 3.5
 
 .. class:: KqueueSelector()
 
diff --git a/Doc/library/shutil.rst b/Doc/library/shutil.rst
index e4f348c..7cc397d 100644
--- a/Doc/library/shutil.rst
+++ b/Doc/library/shutil.rst
@@ -341,7 +341,7 @@
 
    On Windows, the current directory is always prepended to the *path* whether
    or not you use the default or provide your own, which is the behavior the
-   command shell uses when finding executables.  Additionaly, when finding the
+   command shell uses when finding executables.  Additionally, when finding the
    *cmd* in the *path*, the ``PATHEXT`` environment variable is checked.  For
    example, if you call ``shutil.which("python")``, :func:`which` will search
    ``PATHEXT`` to know that it should look for ``python.exe`` within the *path*
@@ -421,6 +421,26 @@
    copytree(source, destination, ignore=_logpath)
 
 
+.. _shutil-rmtree-example:
+
+rmtree example
+~~~~~~~~~~~~~~
+
+This example shows how to remove a directory tree on Windows where some
+of the files have their read-only bit set. It uses the onerror callback
+to clear the readonly bit and reattempt the remove. Any subsequent failure
+will propagate. ::
+
+    import os, stat
+    import shutil
+
+    def remove_readonly(func, path, _):
+        "Clear the readonly bit and reattempt the removal"
+        os.chmod(path, stat.S_IWRITE)
+        func(path)
+
+    shutil.rmtree(directory, onerror=remove_readonly)
+
 .. _archiving-operations:
 
 Archiving operations
diff --git a/Doc/library/signal.rst b/Doc/library/signal.rst
index 84e2836..a97ce66 100644
--- a/Doc/library/signal.rst
+++ b/Doc/library/signal.rst
@@ -65,6 +65,16 @@
 Module contents
 ---------------
 
+.. versionchanged:: 3.5
+   signal (SIG*), handler (:const:`SIG_DFL`, :const:`SIG_IGN`) and sigmask
+   (:const:`SIG_BLOCK`, :const:`SIG_UNBLOCK`, :const:`SIG_SETMASK`)
+   related constants listed below were turned into
+   :class:`enums <enum.IntEnum>`.
+   :func:`getsignal`, :func:`pthread_sigmask`, :func:`sigpending` and
+   :func:`sigwait` functions return human-readable
+   :class:`enums <enum.IntEnum>`.
+
+
 The variables defined in the :mod:`signal` module are:
 
 
diff --git a/Doc/library/site.rst b/Doc/library/site.rst
index e57b8cc..ee2a68a 100644
--- a/Doc/library/site.rst
+++ b/Doc/library/site.rst
@@ -38,7 +38,7 @@
 if it refers to an existing directory, and if so, adds it to ``sys.path`` and
 also inspects the newly added path for configuration files.
 
-.. deprecated:: 3.4
+.. deprecated-removed:: 3.4 3.5
    Support for the "site-python" directory will be removed in 3.5.
 
 If a file named "pyvenv.cfg" exists one directory above sys.executable,
@@ -184,7 +184,7 @@
    unless the Python interpreter was started with the :option:`-S` flag.
 
    .. versionchanged:: 3.3
-      This function used to be called unconditionnally.
+      This function used to be called unconditionally.
 
 
 .. function:: addsitedir(sitedir, known_paths=None)
diff --git a/Doc/library/socket.rst b/Doc/library/socket.rst
index 54c6bad..3a9d611 100644
--- a/Doc/library/socket.rst
+++ b/Doc/library/socket.rst
@@ -906,12 +906,15 @@
    On other platforms, the generic :func:`fcntl.fcntl` and :func:`fcntl.ioctl`
    functions may be used; they accept a socket object as their first argument.
 
-.. method:: socket.listen(backlog)
+.. method:: socket.listen([backlog])
 
-   Listen for connections made to the socket.  The *backlog* argument specifies the
-   maximum number of queued connections and should be at least 0; the maximum value
-   is system-dependent (usually 5), the minimum value is forced to 0.
+   Enable a server to accept connections.  If *backlog* is specified, it must
+   be at least 0 (if it is lower, it is set to 0); it specifies the number of
+   unaccepted connections that the system will allow before refusing new
+   connections. If not specified, a default reasonable value is chosen.
 
+   .. versionchanged:: 3.5
+      The *backlog* parameter is now optional.
 
 .. method:: socket.makefile(mode='r', buffering=None, *, encoding=None, \
                             errors=None, newline=None)
@@ -1444,7 +1447,7 @@
 can use the :meth:`socket.send`, and the :meth:`socket.recv` operations (and
 their counterparts) on the socket object as usual.
 
-This example might require special priviledge::
+This example might require special privileges::
 
    import socket
    import struct
diff --git a/Doc/library/socketserver.rst b/Doc/library/socketserver.rst
index 1ec4438..9db36d5 100644
--- a/Doc/library/socketserver.rst
+++ b/Doc/library/socketserver.rst
@@ -113,7 +113,7 @@
 Another approach to handling multiple simultaneous requests in an environment
 that supports neither threads nor :func:`~os.fork` (or where these are too
 expensive or inappropriate for the service) is to maintain an explicit table of
-partially finished requests and to use :func:`~select.select` to decide which
+partially finished requests and to use :mod:`selectors` to decide which
 request to work on next (or whether to handle a new incoming request).  This is
 particularly important for stream services where each client can potentially be
 connected for a long time (if threads or subprocesses cannot be used).  See
@@ -136,7 +136,7 @@
 .. method:: BaseServer.fileno()
 
    Return an integer file descriptor for the socket on which the server is
-   listening.  This function is most commonly passed to :func:`select.select`, to
+   listening.  This function is most commonly passed to :mod:`selectors`, to
    allow monitoring multiple servers in the same process.
 
 
diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst
index b473c45..0b0edd8 100644
--- a/Doc/library/ssl.rst
+++ b/Doc/library/ssl.rst
@@ -372,22 +372,34 @@
       IDN A-labels such as ``www*.xn--pthon-kva.org`` are still supported,
       but ``x*.python.org`` no longer matches ``xn--tda.python.org``.
 
-.. function:: cert_time_to_seconds(timestring)
+.. function:: cert_time_to_seconds(cert_time)
 
-   Returns a floating-point value containing a normal seconds-after-the-epoch
-   time value, given the time-string representing the "notBefore" or "notAfter"
-   date from a certificate.
+   Return the time in seconds since the Epoch, given the ``cert_time``
+   string representing the "notBefore" or "notAfter" date from a
+   certificate in ``"%b %d %H:%M:%S %Y %Z"`` strptime format (C
+   locale).
 
-   Here's an example::
+   Here's an example:
 
-     >>> import ssl
-     >>> ssl.cert_time_to_seconds("May  9 00:00:00 2007 GMT")
-     1178694000.0
-     >>> import time
-     >>> time.ctime(ssl.cert_time_to_seconds("May  9 00:00:00 2007 GMT"))
-     'Wed May  9 00:00:00 2007'
+   .. doctest:: newcontext
 
-.. function:: get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None)
+      >>> import ssl
+      >>> timestamp = ssl.cert_time_to_seconds("Jan  5 09:34:43 2018 GMT")
+      >>> timestamp
+      1515144883
+      >>> from datetime import datetime
+      >>> print(datetime.utcfromtimestamp(timestamp))
+      2018-01-05 09:34:43
+
+   "notBefore" or "notAfter" dates must use GMT (:rfc:`5280`).
+
+   .. versionchanged:: 3.5
+      Interpret the input time as a time in UTC as specified by 'GMT'
+      timezone in the input string. Local timezone was used
+      previously. Return an integer (no fractions of a second in the
+      input format)
+
+.. function:: get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None)
 
    Given the address ``addr`` of an SSL-protected server, as a (*hostname*,
    *port-number*) pair, fetches the server's certificate, and returns it as a
@@ -401,6 +413,10 @@
    .. versionchanged:: 3.3
       This function is now IPv6-compatible.
 
+   .. versionchanged:: 3.5
+      The default *ssl_version* is changed from :data:`PROTOCOL_SSLv3` to
+      :data:`PROTOCOL_SSLv23` for maximum compatibility with modern servers.
+
 .. function:: DER_cert_to_PEM_cert(DER_cert_bytes)
 
    Given a certificate as a DER-encoded blob of bytes, returns a PEM-encoded
@@ -1005,7 +1021,7 @@
    :data:`CERT_NONE`.  At least one of *cafile* or *capath* must be specified.
 
    This method can also load certification revocation lists (CRLs) in PEM or
-   or DER format. In order to make use of CRLs, :attr:`SSLContext.verify_flags`
+   DER format. In order to make use of CRLs, :attr:`SSLContext.verify_flags`
    must be configured properly.
 
    The *cafile* string, if present, is the path to a file of concatenated
@@ -1602,6 +1618,12 @@
   socket first, and attempts to *read* from the SSL socket may require
   a prior *write* to the underlying socket.
 
+  .. versionchanged:: 3.5
+
+     In earlier Python versions, the :meth:`!SSLSocket.send` method
+     returned zero instead of raising :exc:`SSLWantWriteError` or
+     :exc:`SSLWantReadError`.
+
 - Calling :func:`~select.select` tells you that the OS-level socket can be
   read from (or written to), but it does not imply that there is sufficient
   data at the upper SSL layer.  For example, only part of an SSL frame might
@@ -1671,7 +1693,7 @@
 Verifying certificates
 ''''''''''''''''''''''
 
-When calling the the :class:`SSLContext` constructor directly,
+When calling the :class:`SSLContext` constructor directly,
 :const:`CERT_NONE` is the default.  Since it does not authenticate the other
 peer, it can be insecure, especially in client mode where most of time you
 would like to ensure the authenticity of the server you're talking to.
diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst
index 854993c..d76e29f 100644
--- a/Doc/library/subprocess.rst
+++ b/Doc/library/subprocess.rst
@@ -629,6 +629,7 @@
    must be bytes or, if *universal_newlines* was ``True``, a string.
 
    :meth:`communicate` returns a tuple ``(stdoutdata, stderrdata)``.
+   The data will be bytes or, if *universal_newlines* was ``True``, strings.
 
    Note that if you want to send data to the process's stdin, you need to create
    the Popen object with ``stdin=PIPE``.  Similarly, to get anything other than
diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst
index 4a3b3ea..03ee769 100644
--- a/Doc/library/threading.rst
+++ b/Doc/library/threading.rst
@@ -630,7 +630,7 @@
             cv.wait()
 
       Therefore, the same rules apply as with :meth:`wait`: The lock must be
-      held when called and is re-aquired on return.  The predicate is evaluated
+      held when called and is re-acquired on return.  The predicate is evaluated
       with the lock held.
 
       .. versionadded:: 3.2
diff --git a/Doc/library/tkinter.ttk.rst b/Doc/library/tkinter.ttk.rst
index 6f8bf1c..b0eefcb 100644
--- a/Doc/library/tkinter.ttk.rst
+++ b/Doc/library/tkinter.ttk.rst
@@ -1167,7 +1167,7 @@
 Each widget in :mod:`ttk` is assigned a style, which specifies the set of
 elements making up the widget and how they are arranged, along with dynamic
 and default settings for element options. By default the style name is the
-same as the widget's class name, but it may be overriden by the widget's style
+same as the widget's class name, but it may be overridden by the widget's style
 option. If you don't know the class name of a widget, use the method
 :meth:`Misc.winfo_class` (somewidget.winfo_class()).
 
diff --git a/Doc/library/token.rst b/Doc/library/token.rst
index 4cd7098..88fb38b 100644
--- a/Doc/library/token.rst
+++ b/Doc/library/token.rst
@@ -93,6 +93,7 @@
           DOUBLESLASH
           DOUBLESLASHEQUAL
           AT
+          ATEQUAL
           RARROW
           ELLIPSIS
           OP
diff --git a/Doc/library/tracemalloc.rst b/Doc/library/tracemalloc.rst
index 3405518..f1e2602 100644
--- a/Doc/library/tracemalloc.rst
+++ b/Doc/library/tracemalloc.rst
@@ -350,7 +350,7 @@
    the *nframe* parameter of the :func:`start` function to store more frames.
 
    The :mod:`tracemalloc` module must be tracing memory allocations to take a
-   snapshot, see the the :func:`start` function.
+   snapshot, see the :func:`start` function.
 
    See also the :func:`get_object_traceback` function.
 
diff --git a/Doc/library/turtle.rst b/Doc/library/turtle.rst
index b015530..dbb1aa9 100644
--- a/Doc/library/turtle.rst
+++ b/Doc/library/turtle.rst
@@ -1809,7 +1809,7 @@
 
    Pop up a dialog window for input of a number. title is the title of the
    dialog window, prompt is a text mostly describing what numerical information
-   to input. default: default value, minval: minimum value for imput,
+   to input. default: default value, minval: minimum value for input,
    maxval: maximum value for input
    The number input must be in the range minval .. maxval if these are
    given. If not, a hint is issued and the dialog remains open for
@@ -1879,7 +1879,7 @@
 
       >>> cv = screen.getcanvas()
       >>> cv
-      <turtle.ScrolledCanvas object at ...>
+      <turtle.ScrolledCanvas object ...>
 
 
 .. function:: getshapes()
@@ -2397,7 +2397,7 @@
   Accordingly the latter has got an alias: :meth:`Screen.onkeyrelease`.
 
 - The method  :meth:`Screen.mainloop` has been added. So when working only
-  with Screen and Turtle objects one must not additonally import
+  with Screen and Turtle objects one must not additionally import
   :func:`mainloop` anymore.
 
 - Two input methods has been added :meth:`Screen.textinput` and
diff --git a/Doc/library/unittest.mock.rst b/Doc/library/unittest.mock.rst
index cb72a68..4f58892 100644
--- a/Doc/library/unittest.mock.rst
+++ b/Doc/library/unittest.mock.rst
@@ -198,7 +198,7 @@
 the `new_callable` argument to `patch`.
 
 
-.. class:: Mock(spec=None, side_effect=None, return_value=DEFAULT, wraps=None, name=None, spec_set=None, **kwargs)
+.. class:: Mock(spec=None, side_effect=None, return_value=DEFAULT, wraps=None, name=None, spec_set=None, unsafe=False, **kwargs)
 
     Create a new `Mock` object. `Mock` takes several optional arguments
     that specify the behaviour of the Mock object:
@@ -235,6 +235,12 @@
       this is a new Mock (created on first access). See the
       :attr:`return_value` attribute.
 
+    * `unsafe`: By default if any attribute starts with *assert* or
+      *assret* will raise an `AttributeError`. Passing `unsafe=True` will allow
+      access to these attributes.
+
+      .. versionadded:: 3.5
+
     * `wraps`: Item for the mock object to wrap. If `wraps` is not None then
       calling the Mock will pass the call through to the wrapped object
       (returning the real result). Attribute access on the mock will return a
@@ -315,6 +321,20 @@
             >>> calls = [call(4), call(2), call(3)]
             >>> mock.assert_has_calls(calls, any_order=True)
 
+    .. method:: assert_not_called(*args, **kwargs)
+
+        Assert the mock was never called.
+
+            >>> m = Mock()
+            >>> m.hello.assert_not_called()
+            >>> obj = m.hello()
+            >>> m.hello.assert_not_called()
+            Traceback (most recent call last):
+              ...
+            AssertionError: Expected 'hello' to not have been called. Called 1 times.
+
+        .. versionadded:: 3.5
+
 
     .. method:: reset_mock()
 
@@ -1031,6 +1051,12 @@
     default because it can be dangerous. With it switched on you can write
     passing tests against APIs that don't actually exist!
 
+    .. note::
+
+        .. versionchanged:: 3.5
+           If you are patching builtins in a module then you don't
+           need to pass `create=True`, it will be added by default.
+
     Patch can be used as a `TestCase` class decorator. It works by
     decorating each test method in the class. This reduces the boilerplate
     code when your test methods share a common patchings set. `patch` finds
@@ -1401,6 +1427,21 @@
 
     Stop all active patches. Only stops patches started with `start`.
 
+.. patch-builtins:
+
+patch builtins
+~~~~~~~~~~~~~~~
+You can patch any builtins within a module. The following example patches
+builtin `ord`:
+
+    >>> @patch('__main__.ord')
+    ... def test(mock_ord):
+    ...     mock_ord.return_value = 101
+    ...     print(ord('c'))
+    ...
+    >>> test()
+    101
+
 
 TEST_PREFIX
 ~~~~~~~~~~~
@@ -2011,7 +2052,7 @@
 enough that a helper function is useful.
 
     >>> m = mock_open()
-    >>> with patch('__main__.open', m, create=True):
+    >>> with patch('__main__.open', m):
     ...     with open('foo', 'w') as h:
     ...         h.write('some stuff')
     ...
@@ -2026,7 +2067,7 @@
 
 And for reading files:
 
-    >>> with patch('__main__.open', mock_open(read_data='bibble'), create=True) as m:
+    >>> with patch('__main__.open', mock_open(read_data='bibble')) as m:
     ...     with open('foo') as h:
     ...         result = h.read()
     ...
diff --git a/Doc/library/urllib.request.rst b/Doc/library/urllib.request.rst
index 019f59c..b588dad 100644
--- a/Doc/library/urllib.request.rst
+++ b/Doc/library/urllib.request.rst
@@ -67,7 +67,7 @@
    :class:`http.client.HTTPResponse` object which has the following
    :ref:`httpresponse-objects` methods.
 
-   For ftp, file, and data urls and requests explicity handled by legacy
+   For ftp, file, and data urls and requests explicitly handled by legacy
    :class:`URLopener` and :class:`FancyURLopener` classes, this function
    returns a :class:`urllib.response.addinfourl` object which can work as
    :term:`context manager` and has methods such as
@@ -1067,7 +1067,7 @@
 the various ways in which a (X)HTML or a XML document could have specified its
 encoding information.
 
-As the python.org website uses *utf-8* encoding as specified in it's meta tag, we
+As the python.org website uses *utf-8* encoding as specified in its meta tag, we
 will use the same for decoding the bytes object. ::
 
    >>> with urllib.request.urlopen('http://www.python.org/') as f:
diff --git a/Doc/library/weakref.rst b/Doc/library/weakref.rst
index 9ca60a9..cc883b1 100644
--- a/Doc/library/weakref.rst
+++ b/Doc/library/weakref.rst
@@ -566,8 +566,8 @@
 
 .. note::
 
-   If you create a finalizer object in a daemonic thread just as the
-   the program exits then there is the possibility that the finalizer
+   If you create a finalizer object in a daemonic thread just as the program
+   exits then there is the possibility that the finalizer
    does not get called at exit.  However, in a daemonic thread
    :func:`atexit.register`, ``try: ... finally: ...`` and ``with: ...``
    do not guarantee that cleanup occurs either.
diff --git a/Doc/library/webbrowser.rst b/Doc/library/webbrowser.rst
index ef63769..aa5e4ad 100644
--- a/Doc/library/webbrowser.rst
+++ b/Doc/library/webbrowser.rst
@@ -20,7 +20,7 @@
 the user exits the browser.
 
 If the environment variable :envvar:`BROWSER` exists, it is interpreted as the
-:data:`os.pathsep`-separated list of browsers to try ahead of the the platform
+:data:`os.pathsep`-separated list of browsers to try ahead of the platform
 defaults.  When the value of a list part contains the string ``%s``, then it is
 interpreted as a literal browser command line to be used with the argument URL
 substituted for ``%s``; if the part does not contain ``%s``, it is simply
diff --git a/Doc/library/xml.dom.rst b/Doc/library/xml.dom.rst
index 19512ed..4914738 100644
--- a/Doc/library/xml.dom.rst
+++ b/Doc/library/xml.dom.rst
@@ -412,7 +412,7 @@
 .. method:: NodeList.item(i)
 
    Return the *i*'th item from the sequence, if there is one, or ``None``.  The
-   index *i* is not allowed to be less then zero or greater than or equal to the
+   index *i* is not allowed to be less than zero or greater than or equal to the
    length of the sequence.
 
 
diff --git a/Doc/library/xmlrpc.client.rst b/Doc/library/xmlrpc.client.rst
index 3cb19d1..6f14227 100644
--- a/Doc/library/xmlrpc.client.rst
+++ b/Doc/library/xmlrpc.client.rst
@@ -191,6 +191,11 @@
    no such string is available, an empty string is returned. The documentation
    string may contain HTML markup.
 
+.. versionchanged:: 3.5
+
+   Instances of :class:`ServerProxy` support the :term:`context manager` protocol
+   for closing the underlying transport.
+
 
 A working example follows. The server code::
 
@@ -208,9 +213,9 @@
 
    import xmlrpc.client
 
-   proxy = xmlrpc.client.ServerProxy("http://localhost:8000/")
-   print("3 is even: %s" % str(proxy.is_even(3)))
-   print("100 is even: %s" % str(proxy.is_even(100)))
+   with xmlrpc.client.ServerProxy("http://localhost:8000/") as proxy:
+       print("3 is even: %s" % str(proxy.is_even(3)))
+       print("100 is even: %s" % str(proxy.is_even(100)))
 
 .. _datetime-objects:
 
@@ -518,14 +523,14 @@
    from xmlrpc.client import ServerProxy, Error
 
    # server = ServerProxy("http://localhost:8000") # local server
-   server = ServerProxy("http://betty.userland.com")
+   with ServerProxy("http://betty.userland.com") as proxy:
 
-   print(server)
+       print(proxy)
 
-   try:
-       print(server.examples.getStateName(41))
-   except Error as v:
-       print("ERROR", v)
+       try:
+           print(proxy.examples.getStateName(41))
+       except Error as v:
+           print("ERROR", v)
 
 To access an XML-RPC server through a proxy, you need to define  a custom
 transport.  The following example shows how:
diff --git a/Doc/reference/datamodel.rst b/Doc/reference/datamodel.rst
index 78dfd79..e46688e 100644
--- a/Doc/reference/datamodel.rst
+++ b/Doc/reference/datamodel.rst
@@ -1968,6 +1968,7 @@
 .. method:: object.__add__(self, other)
             object.__sub__(self, other)
             object.__mul__(self, other)
+            object.__matmul__(self, other)
             object.__truediv__(self, other)
             object.__floordiv__(self, other)
             object.__mod__(self, other)
@@ -1984,15 +1985,16 @@
       builtin: pow
       builtin: pow
 
-   These methods are called to implement the binary arithmetic operations (``+``,
-   ``-``, ``*``, ``/``, ``//``, ``%``, :func:`divmod`, :func:`pow`, ``**``, ``<<``,
-   ``>>``, ``&``, ``^``, ``|``).  For instance, to evaluate the expression
-   ``x + y``, where *x* is an instance of a class that has an :meth:`__add__`
-   method, ``x.__add__(y)`` is called.  The :meth:`__divmod__` method should be the
-   equivalent to using :meth:`__floordiv__` and :meth:`__mod__`; it should not be
-   related to :meth:`__truediv__`.  Note that :meth:`__pow__` should be defined
-   to accept an optional third argument if the ternary version of the built-in
-   :func:`pow` function is to be supported.
+   These methods are called to implement the binary arithmetic operations
+   (``+``, ``-``, ``*``, ``@``, ``/``, ``//``, ``%``, :func:`divmod`,
+   :func:`pow`, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``).  For instance, to
+   evaluate the expression ``x + y``, where *x* is an instance of a class that
+   has an :meth:`__add__` method, ``x.__add__(y)`` is called.  The
+   :meth:`__divmod__` method should be the equivalent to using
+   :meth:`__floordiv__` and :meth:`__mod__`; it should not be related to
+   :meth:`__truediv__`.  Note that :meth:`__pow__` should be defined to accept
+   an optional third argument if the ternary version of the built-in :func:`pow`
+   function is to be supported.
 
    If one of those methods does not support the operation with the supplied
    arguments, it should return ``NotImplemented``.
@@ -2001,6 +2003,7 @@
 .. method:: object.__radd__(self, other)
             object.__rsub__(self, other)
             object.__rmul__(self, other)
+            object.__rmatmul__(self, other)
             object.__rtruediv__(self, other)
             object.__rfloordiv__(self, other)
             object.__rmod__(self, other)
@@ -2016,14 +2019,14 @@
       builtin: divmod
       builtin: pow
 
-   These methods are called to implement the binary arithmetic operations (``+``,
-   ``-``, ``*``, ``/``, ``//``, ``%``, :func:`divmod`, :func:`pow`, ``**``,
-   ``<<``, ``>>``, ``&``, ``^``, ``|``) with reflected (swapped) operands.
-   These functions are only called if the left operand does not support the
-   corresponding operation and the operands are of different types. [#]_  For
-   instance, to evaluate the expression ``x - y``, where *y* is an instance of
-   a class that has an :meth:`__rsub__` method, ``y.__rsub__(x)`` is called if
-   ``x.__sub__(y)`` returns *NotImplemented*.
+   These methods are called to implement the binary arithmetic operations
+   (``+``, ``-``, ``*``, ``@``, ``/``, ``//``, ``%``, :func:`divmod`,
+   :func:`pow`, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with reflected
+   (swapped) operands.  These functions are only called if the left operand does
+   not support the corresponding operation and the operands are of different
+   types. [#]_ For instance, to evaluate the expression ``x - y``, where *y* is
+   an instance of a class that has an :meth:`__rsub__` method, ``y.__rsub__(x)``
+   is called if ``x.__sub__(y)`` returns *NotImplemented*.
 
    .. index:: builtin: pow
 
@@ -2041,6 +2044,7 @@
 .. method:: object.__iadd__(self, other)
             object.__isub__(self, other)
             object.__imul__(self, other)
+            object.__imatmul__(self, other)
             object.__itruediv__(self, other)
             object.__ifloordiv__(self, other)
             object.__imod__(self, other)
@@ -2052,17 +2056,17 @@
             object.__ior__(self, other)
 
    These methods are called to implement the augmented arithmetic assignments
-   (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``, ``**=``, ``<<=``, ``>>=``,
-   ``&=``, ``^=``, ``|=``).  These methods should attempt to do the operation
-   in-place (modifying *self*) and return the result (which could be, but does
-   not have to be, *self*).  If a specific method is not defined, the augmented
-   assignment falls back to the normal methods.  For instance, if *x* is an
-   instance of a class with an :meth:`__iadd__` method, ``x += y`` is equivalent
-   to ``x = x.__iadd__(y)`` . Otherwise, ``x.__add__(y)`` and ``y.__radd__(x)``
-   are considered, as with the evaluation of ``x + y``. In certain situations,
-   augmented assignment can result in unexpected errors (see
-   :ref:`faq-augmented-assignment-tuple-error`), but this behavior is in
-   fact part of the data model.
+   (``+=``, ``-=``, ``*=``, ``@=``, ``/=``, ``//=``, ``%=``, ``**=``, ``<<=``,
+   ``>>=``, ``&=``, ``^=``, ``|=``).  These methods should attempt to do the
+   operation in-place (modifying *self*) and return the result (which could be,
+   but does not have to be, *self*).  If a specific method is not defined, the
+   augmented assignment falls back to the normal methods.  For instance, if *x*
+   is an instance of a class with an :meth:`__iadd__` method, ``x += y`` is
+   equivalent to ``x = x.__iadd__(y)`` . Otherwise, ``x.__add__(y)`` and
+   ``y.__radd__(x)`` are considered, as with the evaluation of ``x + y``. In
+   certain situations, augmented assignment can result in unexpected errors (see
+   :ref:`faq-augmented-assignment-tuple-error`), but this behavior is in fact
+   part of the data model.
 
 
 .. method:: object.__neg__(self)
diff --git a/Doc/reference/expressions.rst b/Doc/reference/expressions.rst
index 06baba0..5b92a48 100644
--- a/Doc/reference/expressions.rst
+++ b/Doc/reference/expressions.rst
@@ -892,8 +892,9 @@
 operators and one for additive operators:
 
 .. productionlist::
-   m_expr: `u_expr` | `m_expr` "*" `u_expr` | `m_expr` "//" `u_expr` | `m_expr` "/" `u_expr`
-         : | `m_expr` "%" `u_expr`
+   m_expr: `u_expr` | `m_expr` "*" `u_expr` | `m_expr` "@" `m_expr` |
+         : `m_expr` "//" `u_expr`| `m_expr` "/" `u_expr` |
+         : `m_expr` "%" `u_expr`
    a_expr: `m_expr` | `a_expr` "+" `m_expr` | `a_expr` "-" `m_expr`
 
 .. index:: single: multiplication
@@ -904,6 +905,13 @@
 common type and then multiplied together.  In the latter case, sequence
 repetition is performed; a negative repetition factor yields an empty sequence.
 
+.. index:: single: matrix multiplication
+
+The ``@`` (at) operator is intended to be used for matrix multiplication.  No
+builtin Python types implement this operator.
+
+.. versionadded:: 3.5
+
 .. index::
    exception: ZeroDivisionError
    single: division
@@ -1346,8 +1354,9 @@
 +-----------------------------------------------+-------------------------------------+
 | ``+``, ``-``                                  | Addition and subtraction            |
 +-----------------------------------------------+-------------------------------------+
-| ``*``, ``/``, ``//``, ``%``                   | Multiplication, division, remainder |
-|                                               | [#]_                                |
+| ``*``, ``@``, ``/``, ``//``, ``%``            | Multiplication, matrix              |
+|                                               | multiplication division,            |
+|                                               | remainder [#]_                      |
 +-----------------------------------------------+-------------------------------------+
 | ``+x``, ``-x``, ``~x``                        | Positive, negative, bitwise NOT     |
 +-----------------------------------------------+-------------------------------------+
diff --git a/Doc/reference/simple_stmts.rst b/Doc/reference/simple_stmts.rst
index 66c234c..a42d1f6 100644
--- a/Doc/reference/simple_stmts.rst
+++ b/Doc/reference/simple_stmts.rst
@@ -280,7 +280,7 @@
 .. productionlist::
    augmented_assignment_stmt: `augtarget` `augop` (`expression_list` | `yield_expression`)
    augtarget: `identifier` | `attributeref` | `subscription` | `slicing`
-   augop: "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="
+   augop: "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**="
         : | ">>=" | "<<=" | "&=" | "^=" | "|="
 
 (See section :ref:`primaries` for the syntax definitions for the last three
diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/sphinxext/pyspecific.py
index 31d8c06..e37ef89 100644
--- a/Doc/tools/sphinxext/pyspecific.py
+++ b/Doc/tools/sphinxext/pyspecific.py
@@ -10,7 +10,7 @@
 """
 
 ISSUE_URI = 'http://bugs.python.org/issue%s'
-SOURCE_URI = 'http://hg.python.org/cpython/file/3.4/%s'
+SOURCE_URI = 'http://hg.python.org/cpython/file/default/%s'
 
 from docutils import nodes, utils
 
diff --git a/Doc/tools/sphinxext/susp-ignored.csv b/Doc/tools/sphinxext/susp-ignored.csv
index 1769023..7acc79b 100644
--- a/Doc/tools/sphinxext/susp-ignored.csv
+++ b/Doc/tools/sphinxext/susp-ignored.csv
@@ -276,9 +276,5 @@
 whatsnew/3.2,,:gz,">>> with tarfile.open(name='myarchive.tar.gz', mode='w:gz') as tf:"
 whatsnew/3.2,,:location,zope9-location = ${zope9:location}
 whatsnew/3.2,,:prefix,zope-conf = ${custom:prefix}/etc/zope.conf
-whatsnew/changelog,,:platform,:platform:
 whatsnew/changelog,,:gz,": TarFile opened with external fileobj and ""w:gz"" mode didn't"
-whatsnew/changelog,,:PythonCmd,"With Tk < 8.5 _tkinter.c:PythonCmd() raised UnicodeDecodeError, caused"
-whatsnew/changelog,,::,": Fix FTP tests for IPv6, bind to ""::1"" instead of ""localhost""."
 whatsnew/changelog,,::,": Use ""127.0.0.1"" or ""::1"" instead of ""localhost"" as much as"
-whatsnew/changelog,,:password,user:password
diff --git a/Doc/tutorial/interpreter.rst b/Doc/tutorial/interpreter.rst
index 44dc6d1..5c23ad7 100644
--- a/Doc/tutorial/interpreter.rst
+++ b/Doc/tutorial/interpreter.rst
@@ -10,13 +10,13 @@
 Invoking the Interpreter
 ========================
 
-The Python interpreter is usually installed as :file:`/usr/local/bin/python3.4`
+The Python interpreter is usually installed as :file:`/usr/local/bin/python3.5`
 on those machines where it is available; putting :file:`/usr/local/bin` in your
 Unix shell's search path makes it possible to start it by typing the command:
 
 .. code-block:: text
 
-   python3.4
+   python3.5
 
 to the shell. [#]_ Since the choice of the directory where the interpreter lives
 is an installation option, other places are possible; check with your local
@@ -24,11 +24,11 @@
 popular alternative location.)
 
 On Windows machines, the Python installation is usually placed in
-:file:`C:\\Python34`, though you can change this when you're running the
+:file:`C:\\Python35`, though you can change this when you're running the
 installer.  To add this directory to your path,  you can type the following
 command into the command prompt in a DOS box::
 
-   set path=%path%;C:\python34
+   set path=%path%;C:\python35
 
 Typing an end-of-file character (:kbd:`Control-D` on Unix, :kbd:`Control-Z` on
 Windows) at the primary prompt causes the interpreter to exit with a zero exit
@@ -94,8 +94,8 @@
 prints a welcome message stating its version number and a copyright notice
 before printing the first prompt::
 
-   $ python3.4
-   Python 3.4 (default, Mar 16 2014, 09:25:04)
+   $ python3.5
+   Python 3.5 (default, Sep 16 2015, 09:25:04)
    [GCC 4.8.2] on linux
    Type "help", "copyright", "credits" or "license" for more information.
    >>>
@@ -148,7 +148,7 @@
 On BSD'ish Unix systems, Python scripts can be made directly executable, like
 shell scripts, by putting the line ::
 
-   #! /usr/bin/env python3.4
+   #! /usr/bin/env python3.5
 
 (assuming that the interpreter is on the user's :envvar:`PATH`) at the beginning
 of the script and giving the file an executable mode.  The ``#!`` must be the
diff --git a/Doc/tutorial/stdlib.rst b/Doc/tutorial/stdlib.rst
index cd73bc2..954ef44 100644
--- a/Doc/tutorial/stdlib.rst
+++ b/Doc/tutorial/stdlib.rst
@@ -15,7 +15,7 @@
 
    >>> import os
    >>> os.getcwd()      # Return the current working directory
-   'C:\\Python34'
+   'C:\\Python35'
    >>> os.chdir('/server/accesslogs')   # Change current working directory
    >>> os.system('mkdir today')   # Run the command mkdir in the system shell
    0
diff --git a/Doc/tutorial/stdlib2.rst b/Doc/tutorial/stdlib2.rst
index c0197ea..497c584 100644
--- a/Doc/tutorial/stdlib2.rst
+++ b/Doc/tutorial/stdlib2.rst
@@ -277,7 +277,7 @@
    Traceback (most recent call last):
      File "<stdin>", line 1, in <module>
        d['primary']                # entry was automatically removed
-     File "C:/python34/lib/weakref.py", line 46, in __getitem__
+     File "C:/python35/lib/weakref.py", line 46, in __getitem__
        o = self.data[key]()
    KeyError: 'primary'
 
diff --git a/Doc/whatsnew/2.1.rst b/Doc/whatsnew/2.1.rst
index b1ab48e..144ea1c 100644
--- a/Doc/whatsnew/2.1.rst
+++ b/Doc/whatsnew/2.1.rst
@@ -219,7 +219,7 @@
 
 .. seealso::
 
-   :pep:`207` - Rich Comparisions
+   :pep:`207` - Rich Comparisons
       Written by Guido van Rossum, heavily based on earlier work by David Ascher, and
       implemented by Guido van Rossum.
 
diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst
index cda63e4..7631e75 100644
--- a/Doc/whatsnew/3.3.rst
+++ b/Doc/whatsnew/3.3.rst
@@ -1579,7 +1579,7 @@
   avoid race conditions in multi-threaded programs.
 
 * The :mod:`os` module has a new :func:`~os.sendfile` function which provides
-  an efficent "zero-copy" way for copying data from one file (or socket)
+  an efficient "zero-copy" way for copying data from one file (or socket)
   descriptor to another. The phrase "zero-copy" refers to the fact that all of
   the copying of data between the two descriptors is done entirely by the
   kernel, with no copying of data into userspace buffers. :func:`~os.sendfile`
@@ -1908,7 +1908,7 @@
 :meth:`~socketserver.BaseServer.service_actions` that is called by the
 :meth:`~socketserver.BaseServer.serve_forever` method in the service loop.
 :class:`~socketserver.ForkingMixIn` now uses this to clean up zombie
-child proceses.  (Contributed by Justin Warkentin in :issue:`11109`.)
+child processes.  (Contributed by Justin Warkentin in :issue:`11109`.)
 
 
 sqlite3
@@ -2360,7 +2360,7 @@
   bytecode file, make sure to call :func:`importlib.invalidate_caches` to clear
   out the cache for the finders to notice the new file.
 
-* :exc:`ImportError` now uses the full name of the module that was attemped to
+* :exc:`ImportError` now uses the full name of the module that was attempted to
   be imported. Doctests that check ImportErrors' message will need to be
   updated to use the full name of the module instead of just the tail of the
   name.
diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst
new file mode 100644
index 0000000..91cec97
--- /dev/null
+++ b/Doc/whatsnew/3.5.rst
@@ -0,0 +1,270 @@
+****************************
+  What's New In Python 3.5
+****************************
+
+:Release: |release|
+:Date: |today|
+
+.. Rules for maintenance:
+
+   * Anyone can add text to this document.  Do not spend very much time
+   on the wording of your changes, because your text will probably
+   get rewritten to some degree.
+
+   * The maintainer will go through Misc/NEWS periodically and add
+   changes; it's therefore more important to add your changes to
+   Misc/NEWS than to this file.
+
+   * This is not a complete list of every single change; completeness
+   is the purpose of Misc/NEWS.  Some changes I consider too small
+   or esoteric to include.  If such a change is added to the text,
+   I'll just remove it.  (This is another reason you shouldn't spend
+   too much time on writing your addition.)
+
+   * If you want to draw your new text to the attention of the
+   maintainer, add 'XXX' to the beginning of the paragraph or
+   section.
+
+   * It's OK to just add a fragmentary note about a change.  For
+   example: "XXX Describe the transmogrify() function added to the
+   socket module."  The maintainer will research the change and
+   write the necessary text.
+
+   * You can comment out your additions if you like, but it's not
+   necessary (especially when a final release is some months away).
+
+   * Credit the author of a patch or bugfix.   Just the name is
+   sufficient; the e-mail address isn't necessary.
+
+   * It's helpful to add the bug/patch number as a comment:
+
+   XXX Describe the transmogrify() function added to the socket
+   module.
+   (Contributed by P.Y. Developer in :issue:`12345`.)
+
+   This saves the maintainer the effort of going through the Mercurial log
+   when researching a change.
+
+This article explains the new features in Python 3.5, compared to 3.4.
+
+For full details, see the :source:`Misc/NEWS` file.
+
+.. note:: Prerelease users should be aware that this document is currently in
+   draft form. It will be updated substantially as Python 3.5 moves towards
+   release, so it's worth checking back even after reading earlier versions.
+
+
+.. seealso::
+
+    .. :pep:`4XX` - Python 3.5 Release Schedule
+
+
+Summary -- Release highlights
+=============================
+
+.. This section singles out the most important changes in Python 3.3.
+   Brevity is key.
+
+New syntax features:
+
+* None yet.
+
+New library modules:
+
+* None yet.
+
+New built-in features:
+
+* None yet.
+
+Implementation improvements:
+
+* When the ``LC_TYPE`` locale is the POSIX locale (``C`` locale),
+  :py:data:`sys.stdin` and :py:data:`sys.stdout` are now using the
+  ``surrogateescape`` error handler, instead of the ``strict`` error handler
+  (:issue:`19977`).
+
+Significantly Improved Library Modules:
+
+* None yet.
+
+Security improvements:
+
+* None yet.
+
+Please read on for a comprehensive list of user-facing changes.
+
+
+.. PEP-sized items next.
+
+.. _pep-4XX:
+
+.. PEP 4XX: Virtual Environments
+.. =============================
+
+
+.. (Implemented by Foo Bar.)
+
+.. .. seealso::
+
+    :pep:`4XX` - Python Virtual Environments
+       PEP written by Carl Meyer
+
+
+
+
+Other Language Changes
+======================
+
+Some smaller changes made to the core Python language are:
+
+* None yet.
+
+
+
+New Modules
+===========
+
+.. module name
+.. -----------
+
+* None yet.
+
+
+Improved Modules
+================
+
+doctest
+-------
+
+* :func:`doctest.DocTestSuite` returns an empty :class:`unittest.TestSuite` if
+  *module* contains no docstrings instead of raising :exc:`ValueError`
+  (contributed by Glenn Jones in :issue:`15916`).
+
+importlib
+---------
+
+* :class:`importlib.util.LazyLoader` allows for the lazy loading of modules in
+  applications where startup time is paramount (contributed by Brett Cannon in
+  :issue:`17621`).
+
+* :func:`importlib.abc.InspectLoader.source_to_code` is now a
+  static method to make it easier to work with source code in a string.
+  With a module object that you want to initialize you can then use
+  ``exec(code, module.__dict__)`` to execute the code in the module.
+
+inspect
+-------
+
+* :class:`inspect.Signature` and :class:`inspect.Parameter` are now
+  picklable and hashable (contributed by Yury Selivanov in :issue:`20726`
+  and :issue:`20334`).
+
+* New class method :meth:`inspect.Signature.from_callable`, which makes
+  subclassing of :class:`~inspect.Signature` easier (contributed
+  by Yury Selivanov and Eric Snow in :issue:`17373`).
+
+ipaddress
+---------
+
+* :class:`ipaddress.IPv4Network` and :class:`ipaddress.IPv6Network` now
+  accept an ``(address, netmask)`` tuple argument, so as to easily construct
+  network objects from existing addresses (contributed by Peter Moody
+  and Antoine Pitrou in :issue:`16531`).
+
+signal
+------
+
+* Different constants of :mod:`signal` module are now enumeration values using
+  the :mod:`enum` module. This allows meaningful names to be printed during
+  debugging, instead of integer “magic numbers”. (contribute by Giampaolo
+  Rodola' in :issue:`21076`)
+
+xmlrpc
+------
+
+* :class:`xmlrpc.client.ServerProxy` is now a :term:`context manager`
+  (contributed by Claudiu Popa in :issue:`20627`).
+
+
+Optimizations
+=============
+
+The following performance enhancements have been added:
+
+* Construction of ``bytes(int)`` and ``bytearray(int)`` (filled by zero bytes)
+  is faster and use less memory (until the bytearray buffer is filled with
+  data) for large objects. ``calloc()`` is used instead of ``malloc()`` to
+  allocate memory for these objects.
+
+* Some operations on :class:`~ipaddress.IPv4Network` and
+  :class:`~ipaddress.IPv6Network` have been massively sped up, such as
+  :meth:`~ipaddress.IPv4Network.subnets`, :meth:`~ipaddress.IPv4Network.supernet`,
+  :func:`~ipaddress.summarize_address_range`, :func:`~ipaddress.collapse_addresses`.
+  The speed up can range from 3x to 15x.
+  (:issue:`21486`, :issue:`21487`, :issue:`20826`)
+
+
+Build and C API Changes
+=======================
+
+Changes to Python's build process and to the C API include:
+
+* New ``calloc`` functions:
+
+  * :c:func:`PyMem_RawCalloc`
+  * :c:func:`PyMem_Calloc`
+  * :c:func:`PyObject_Calloc`
+  * :c:func:`_PyObject_GC_Calloc`
+
+
+Deprecated
+==========
+
+Unsupported Operating Systems
+-----------------------------
+
+* None yet.
+
+
+Deprecated Python modules, functions and methods
+------------------------------------------------
+
+* The :mod:`formatter` module has now graduated to full deprecation and is still
+  slated for removal in Python 3.6.
+
+
+Deprecated functions and types of the C API
+-------------------------------------------
+
+* None yet.
+
+
+Deprecated features
+-------------------
+
+* None yet.
+
+
+Porting to Python 3.5
+=====================
+
+This section lists previously described changes and other bugfixes
+that may require changes to your code.
+
+Changes in the Python API
+-------------------------
+
+* Before Python 3.5, a :class:`datetime.time` object was considered to be false
+  if it represented midnight in UTC.  This behavior was considered obscure and
+  error-prone and has been removed in Python 3.5.  See :issue:`13936` for full
+  details.
+
+* :meth:`ssl.SSLSocket.send()` now raises either :exc:`ssl.SSLWantReadError`
+  or :exc:`ssl.SSLWantWriteError` on a non-blocking socket if the operation
+  would block. Previously, it would return 0.  See :issue:`20951`.
+
+Changes in the C API
+--------------------
+
+* The :c:type:`PyMemAllocator` structure has a new ``calloc`` field.
diff --git a/Doc/whatsnew/index.rst b/Doc/whatsnew/index.rst
index 29902e4..edb5502 100644
--- a/Doc/whatsnew/index.rst
+++ b/Doc/whatsnew/index.rst
@@ -11,6 +11,7 @@
 .. toctree::
    :maxdepth: 2
 
+   3.5.rst
    3.4.rst
    3.3.rst
    3.2.rst
diff --git a/Grammar/Grammar b/Grammar/Grammar
index d7aaffd..354fe60 100644
--- a/Grammar/Grammar
+++ b/Grammar/Grammar
@@ -40,7 +40,7 @@
 expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
                      ('=' (yield_expr|testlist_star_expr))*)
 testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
-augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
+augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
             '<<=' | '>>=' | '**=' | '//=')
 # For normal assignments, additional restrictions enforced by the interpreter
 del_stmt: 'del' exprlist
@@ -97,7 +97,7 @@
 and_expr: shift_expr ('&' shift_expr)*
 shift_expr: arith_expr (('<<'|'>>') arith_expr)*
 arith_expr: term (('+'|'-') term)*
-term: factor (('*'|'/'|'%'|'//') factor)*
+term: factor (('*'|'@'|'/'|'%'|'//') factor)*
 factor: ('+'|'-'|'~') factor | power
 power: atom trailer* ['**' factor]
 atom: ('(' [yield_expr|testlist_comp] ')' |
diff --git a/Include/Python-ast.h b/Include/Python-ast.h
index 67d677b..37e9a60 100644
--- a/Include/Python-ast.h
+++ b/Include/Python-ast.h
@@ -15,9 +15,9 @@
 
 typedef enum _boolop { And=1, Or=2 } boolop_ty;
 
-typedef enum _operator { Add=1, Sub=2, Mult=3, Div=4, Mod=5, Pow=6, LShift=7,
-                         RShift=8, BitOr=9, BitXor=10, BitAnd=11, FloorDiv=12 }
-                         operator_ty;
+typedef enum _operator { Add=1, Sub=2, Mult=3, MatMult=4, Div=5, Mod=6, Pow=7,
+                         LShift=8, RShift=9, BitOr=10, BitXor=11, BitAnd=12,
+                         FloorDiv=13 } operator_ty;
 
 typedef enum _unaryop { Invert=1, Not=2, UAdd=3, USub=4 } unaryop_ty;
 
diff --git a/Include/abstract.h b/Include/abstract.h
index 6e850b8..db70f21 100644
--- a/Include/abstract.h
+++ b/Include/abstract.h
@@ -658,6 +658,12 @@
      o1*o2.
        */
 
+     PyAPI_FUNC(PyObject *) PyNumber_MatrixMultiply(PyObject *o1, PyObject *o2);
+
+       /*
+     This is the equivalent of the Python expression: o1 @ o2.
+       */
+
      PyAPI_FUNC(PyObject *) PyNumber_FloorDivide(PyObject *o1, PyObject *o2);
 
        /*
@@ -832,6 +838,12 @@
      o1 *= o2.
        */
 
+     PyAPI_FUNC(PyObject *) PyNumber_InPlaceMatrixMultiply(PyObject *o1, PyObject *o2);
+
+       /*
+     This is the equivalent of the Python expression: o1 @= o2.
+       */
+
      PyAPI_FUNC(PyObject *) PyNumber_InPlaceFloorDivide(PyObject *o1,
                                                         PyObject *o2);
 
diff --git a/Include/code.h b/Include/code.h
index 7c7e5bf..ff2b97e 100644
--- a/Include/code.h
+++ b/Include/code.h
@@ -21,7 +21,12 @@
     PyObject *co_varnames;	/* tuple of strings (local variable names) */
     PyObject *co_freevars;	/* tuple of strings (free variable names) */
     PyObject *co_cellvars;      /* tuple of strings (cell variable names) */
-    /* The rest doesn't count for hash or comparisons */
+    /* The rest aren't used in either hash or comparisons, except for
+       co_name (used in both) and co_firstlineno (used only in
+       comparisons).  This is done to preserve the name and line number
+       for tracebacks and debuggers; otherwise, constant de-duplication
+       would collapse identical functions/lambdas defined on different lines.
+    */
     unsigned char *co_cell2arg; /* Maps cell vars which are arguments. */
     PyObject *co_filename;	/* unicode (where it was loaded from) */
     PyObject *co_name;		/* unicode (name, for reference) */
diff --git a/Include/dictobject.h b/Include/dictobject.h
index ef122bd..09dff59 100644
--- a/Include/dictobject.h
+++ b/Include/dictobject.h
@@ -50,6 +50,10 @@
 
 PyAPI_FUNC(PyObject *) PyDict_New(void);
 PyAPI_FUNC(PyObject *) PyDict_GetItem(PyObject *mp, PyObject *key);
+#ifndef Py_LIMITED_API
+PyAPI_FUNC(PyObject *) _PyDict_GetItem_KnownHash(PyObject *mp, PyObject *key,
+                                       Py_hash_t hash);
+#endif
 PyAPI_FUNC(PyObject *) PyDict_GetItemWithError(PyObject *mp, PyObject *key);
 PyAPI_FUNC(PyObject *) _PyDict_GetItemIdWithError(PyObject *dp,
                                                   struct _Py_Identifier *key);
@@ -58,6 +62,10 @@
     PyObject *mp, PyObject *key, PyObject *defaultobj);
 #endif
 PyAPI_FUNC(int) PyDict_SetItem(PyObject *mp, PyObject *key, PyObject *item);
+#ifndef Py_LIMITED_API
+PyAPI_FUNC(int) _PyDict_SetItem_KnownHash(PyObject *mp, PyObject *key,
+                                          PyObject *item, Py_hash_t hash);
+#endif
 PyAPI_FUNC(int) PyDict_DelItem(PyObject *mp, PyObject *key);
 PyAPI_FUNC(void) PyDict_Clear(PyObject *mp);
 PyAPI_FUNC(int) PyDict_Next(
diff --git a/Include/object.h b/Include/object.h
index 7584d4c..f3c87eb 100644
--- a/Include/object.h
+++ b/Include/object.h
@@ -275,6 +275,9 @@
     binaryfunc nb_inplace_true_divide;
 
     unaryfunc nb_index;
+
+    binaryfunc nb_matrix_multiply;
+    binaryfunc nb_inplace_matrix_multiply;
 } PyNumberMethods;
 
 typedef struct {
diff --git a/Include/objimpl.h b/Include/objimpl.h
index 3f21b70..65b6d91 100644
--- a/Include/objimpl.h
+++ b/Include/objimpl.h
@@ -95,6 +95,7 @@
    the raw memory.
 */
 PyAPI_FUNC(void *) PyObject_Malloc(size_t size);
+PyAPI_FUNC(void *) PyObject_Calloc(size_t nelem, size_t elsize);
 PyAPI_FUNC(void *) PyObject_Realloc(void *ptr, size_t new_size);
 PyAPI_FUNC(void) PyObject_Free(void *ptr);
 
@@ -321,7 +322,8 @@
         (!PyTuple_CheckExact(obj) || _PyObject_GC_IS_TRACKED(obj)))
 #endif /* Py_LIMITED_API */
 
-PyAPI_FUNC(PyObject *) _PyObject_GC_Malloc(size_t);
+PyAPI_FUNC(PyObject *) _PyObject_GC_Malloc(size_t size);
+PyAPI_FUNC(PyObject *) _PyObject_GC_Calloc(size_t size);
 PyAPI_FUNC(PyObject *) _PyObject_GC_New(PyTypeObject *);
 PyAPI_FUNC(PyVarObject *) _PyObject_GC_NewVar(PyTypeObject *, Py_ssize_t);
 PyAPI_FUNC(void) PyObject_GC_Track(void *);
diff --git a/Include/opcode.h b/Include/opcode.h
index 0936f2d..0638b54 100644
--- a/Include/opcode.h
+++ b/Include/opcode.h
@@ -1,3 +1,4 @@
+/* Auto-generated by Tools/scripts/generate_opcode_h.py */
 #ifndef Py_OPCODE_H
 #define Py_OPCODE_H
 #ifdef __cplusplus
@@ -5,141 +6,111 @@
 #endif
 
 
-/* Instruction opcodes for compiled code */
-
-#define POP_TOP         1
-#define ROT_TWO         2
-#define ROT_THREE       3
-#define DUP_TOP         4
-#define DUP_TOP_TWO     5
-#define NOP             9
-
-#define UNARY_POSITIVE  10
-#define UNARY_NEGATIVE  11
-#define UNARY_NOT       12
-
-#define UNARY_INVERT    15
-
-#define BINARY_POWER    19
-
-#define BINARY_MULTIPLY 20
-
-#define BINARY_MODULO   22
-#define BINARY_ADD      23
-#define BINARY_SUBTRACT 24
-#define BINARY_SUBSCR   25
-#define BINARY_FLOOR_DIVIDE 26
-#define BINARY_TRUE_DIVIDE 27
-#define INPLACE_FLOOR_DIVIDE 28
-#define INPLACE_TRUE_DIVIDE 29
-
-#define STORE_MAP       54
-#define INPLACE_ADD     55
-#define INPLACE_SUBTRACT        56
-#define INPLACE_MULTIPLY        57
-
-#define INPLACE_MODULO  59
-#define STORE_SUBSCR    60
-#define DELETE_SUBSCR   61
-
-#define BINARY_LSHIFT   62
-#define BINARY_RSHIFT   63
-#define BINARY_AND      64
-#define BINARY_XOR      65
-#define BINARY_OR       66
-#define INPLACE_POWER   67
-#define GET_ITER        68
-#define PRINT_EXPR      70
-#define LOAD_BUILD_CLASS 71
-#define YIELD_FROM      72
-
-#define INPLACE_LSHIFT  75
-#define INPLACE_RSHIFT  76
-#define INPLACE_AND     77
-#define INPLACE_XOR     78
-#define INPLACE_OR      79
-#define BREAK_LOOP      80
-#define WITH_CLEANUP    81
-
-#define RETURN_VALUE    83
-#define IMPORT_STAR     84
-
-#define YIELD_VALUE     86
-#define POP_BLOCK       87
-#define END_FINALLY     88
-#define POP_EXCEPT      89
-
-#define HAVE_ARGUMENT   90      /* Opcodes from here have an argument: */
-
-#define STORE_NAME      90      /* Index in name list */
-#define DELETE_NAME     91      /* "" */
-#define UNPACK_SEQUENCE 92      /* Number of sequence items */
-#define FOR_ITER        93
-#define UNPACK_EX       94      /* Num items before variable part +
-                                   (Num items after variable part << 8) */
-
-#define STORE_ATTR      95      /* Index in name list */
-#define DELETE_ATTR     96      /* "" */
-#define STORE_GLOBAL    97      /* "" */
-#define DELETE_GLOBAL   98      /* "" */
-
-#define LOAD_CONST      100     /* Index in const list */
-#define LOAD_NAME       101     /* Index in name list */
-#define BUILD_TUPLE     102     /* Number of tuple items */
-#define BUILD_LIST      103     /* Number of list items */
-#define BUILD_SET       104     /* Number of set items */
-#define BUILD_MAP       105     /* Always zero for now */
-#define LOAD_ATTR       106     /* Index in name list */
-#define COMPARE_OP      107     /* Comparison operator */
-#define IMPORT_NAME     108     /* Index in name list */
-#define IMPORT_FROM     109     /* Index in name list */
-
-#define JUMP_FORWARD    110     /* Number of bytes to skip */
-#define JUMP_IF_FALSE_OR_POP 111        /* Target byte offset from beginning of code */
-#define JUMP_IF_TRUE_OR_POP 112 /* "" */
-#define JUMP_ABSOLUTE   113     /* "" */
-#define POP_JUMP_IF_FALSE 114   /* "" */
-#define POP_JUMP_IF_TRUE 115    /* "" */
-
-#define LOAD_GLOBAL     116     /* Index in name list */
-
-#define CONTINUE_LOOP   119     /* Start of loop (absolute) */
-#define SETUP_LOOP      120     /* Target address (relative) */
-#define SETUP_EXCEPT    121     /* "" */
-#define SETUP_FINALLY   122     /* "" */
-
-#define LOAD_FAST       124     /* Local variable number */
-#define STORE_FAST      125     /* Local variable number */
-#define DELETE_FAST     126     /* Local variable number */
-
-#define RAISE_VARARGS   130     /* Number of raise arguments (1, 2 or 3) */
-/* CALL_FUNCTION_XXX opcodes defined below depend on this definition */
-#define CALL_FUNCTION   131     /* #args + (#kwargs<<8) */
-#define MAKE_FUNCTION   132     /* #defaults + #kwdefaults<<8 + #annotations<<16 */
-#define BUILD_SLICE     133     /* Number of items */
-
-#define MAKE_CLOSURE    134     /* same as MAKE_FUNCTION */
-#define LOAD_CLOSURE    135     /* Load free variable from closure */
-#define LOAD_DEREF      136     /* Load and dereference from closure cell */ 
-#define STORE_DEREF     137     /* Store into cell */ 
-#define DELETE_DEREF    138     /* Delete closure cell */ 
-
-/* The next 3 opcodes must be contiguous and satisfy
-   (CALL_FUNCTION_VAR - CALL_FUNCTION) & 3 == 1  */
-#define CALL_FUNCTION_VAR          140  /* #args + (#kwargs<<8) */
-#define CALL_FUNCTION_KW           141  /* #args + (#kwargs<<8) */
-#define CALL_FUNCTION_VAR_KW       142  /* #args + (#kwargs<<8) */
-
-#define SETUP_WITH 143
-
-/* Support for opargs more than 16 bits long */
-#define EXTENDED_ARG  144
-
-#define LIST_APPEND     145
-#define SET_ADD         146
-#define MAP_ADD         147
-
-#define LOAD_CLASSDEREF   148
+    /* Instruction opcodes for compiled code */
+#define POP_TOP             	1  
+#define ROT_TWO             	2  
+#define ROT_THREE           	3  
+#define DUP_TOP             	4  
+#define DUP_TOP_TWO         	5  
+#define NOP                 	9  
+#define UNARY_POSITIVE      	10 
+#define UNARY_NEGATIVE      	11 
+#define UNARY_NOT           	12 
+#define UNARY_INVERT        	15 
+#define BINARY_MATRIX_MULTIPLY	16 
+#define INPLACE_MATRIX_MULTIPLY	17 
+#define BINARY_POWER        	19 
+#define BINARY_MULTIPLY     	20 
+#define BINARY_MODULO       	22 
+#define BINARY_ADD          	23 
+#define BINARY_SUBTRACT     	24 
+#define BINARY_SUBSCR       	25 
+#define BINARY_FLOOR_DIVIDE 	26 
+#define BINARY_TRUE_DIVIDE  	27 
+#define INPLACE_FLOOR_DIVIDE	28 
+#define INPLACE_TRUE_DIVIDE 	29 
+#define STORE_MAP           	54 
+#define INPLACE_ADD         	55 
+#define INPLACE_SUBTRACT    	56 
+#define INPLACE_MULTIPLY    	57 
+#define INPLACE_MODULO      	59 
+#define STORE_SUBSCR        	60 
+#define DELETE_SUBSCR       	61 
+#define BINARY_LSHIFT       	62 
+#define BINARY_RSHIFT       	63 
+#define BINARY_AND          	64 
+#define BINARY_XOR          	65 
+#define BINARY_OR           	66 
+#define INPLACE_POWER       	67 
+#define GET_ITER            	68 
+#define PRINT_EXPR          	70 
+#define LOAD_BUILD_CLASS    	71 
+#define YIELD_FROM          	72 
+#define INPLACE_LSHIFT      	75 
+#define INPLACE_RSHIFT      	76 
+#define INPLACE_AND         	77 
+#define INPLACE_XOR         	78 
+#define INPLACE_OR          	79 
+#define BREAK_LOOP          	80 
+#define WITH_CLEANUP        	81 
+#define RETURN_VALUE        	83 
+#define IMPORT_STAR         	84 
+#define YIELD_VALUE         	86 
+#define POP_BLOCK           	87 
+#define END_FINALLY         	88 
+#define POP_EXCEPT          	89 
+#define HAVE_ARGUMENT       	90 
+#define STORE_NAME          	90 
+#define DELETE_NAME         	91 
+#define UNPACK_SEQUENCE     	92 
+#define FOR_ITER            	93 
+#define UNPACK_EX           	94 
+#define STORE_ATTR          	95 
+#define DELETE_ATTR         	96 
+#define STORE_GLOBAL        	97 
+#define DELETE_GLOBAL       	98 
+#define LOAD_CONST          	100
+#define LOAD_NAME           	101
+#define BUILD_TUPLE         	102
+#define BUILD_LIST          	103
+#define BUILD_SET           	104
+#define BUILD_MAP           	105
+#define LOAD_ATTR           	106
+#define COMPARE_OP          	107
+#define IMPORT_NAME         	108
+#define IMPORT_FROM         	109
+#define JUMP_FORWARD        	110
+#define JUMP_IF_FALSE_OR_POP	111
+#define JUMP_IF_TRUE_OR_POP 	112
+#define JUMP_ABSOLUTE       	113
+#define POP_JUMP_IF_FALSE   	114
+#define POP_JUMP_IF_TRUE    	115
+#define LOAD_GLOBAL         	116
+#define CONTINUE_LOOP       	119
+#define SETUP_LOOP          	120
+#define SETUP_EXCEPT        	121
+#define SETUP_FINALLY       	122
+#define LOAD_FAST           	124
+#define STORE_FAST          	125
+#define DELETE_FAST         	126
+#define RAISE_VARARGS       	130
+#define CALL_FUNCTION       	131
+#define MAKE_FUNCTION       	132
+#define BUILD_SLICE         	133
+#define MAKE_CLOSURE        	134
+#define LOAD_CLOSURE        	135
+#define LOAD_DEREF          	136
+#define STORE_DEREF         	137
+#define DELETE_DEREF        	138
+#define CALL_FUNCTION_VAR   	140
+#define CALL_FUNCTION_KW    	141
+#define CALL_FUNCTION_VAR_KW	142
+#define SETUP_WITH          	143
+#define EXTENDED_ARG        	144
+#define LIST_APPEND         	145
+#define SET_ADD             	146
+#define MAP_ADD             	147
+#define LOAD_CLASSDEREF     	148
 
 /* EXCEPT_HANDLER is a special, implicit block type which is created when
    entering an except handler. It is not an opcode but we define it here
@@ -148,8 +119,9 @@
 #define EXCEPT_HANDLER 257
 
 
-enum cmp_op {PyCmp_LT=Py_LT, PyCmp_LE=Py_LE, PyCmp_EQ=Py_EQ, PyCmp_NE=Py_NE, PyCmp_GT=Py_GT, PyCmp_GE=Py_GE,
-             PyCmp_IN, PyCmp_NOT_IN, PyCmp_IS, PyCmp_IS_NOT, PyCmp_EXC_MATCH, PyCmp_BAD};
+enum cmp_op {PyCmp_LT=Py_LT, PyCmp_LE=Py_LE, PyCmp_EQ=Py_EQ, PyCmp_NE=Py_NE,
+                PyCmp_GT=Py_GT, PyCmp_GE=Py_GE, PyCmp_IN, PyCmp_NOT_IN,
+                PyCmp_IS, PyCmp_IS_NOT, PyCmp_EXC_MATCH, PyCmp_BAD};
 
 #define HAS_ARG(op) ((op) >= HAVE_ARGUMENT)
 
diff --git a/Include/patchlevel.h b/Include/patchlevel.h
index 960d7d0..16124f5 100644
--- a/Include/patchlevel.h
+++ b/Include/patchlevel.h
@@ -17,13 +17,13 @@
 /* Version parsed out into numeric values */
 /*--start constants--*/
 #define PY_MAJOR_VERSION	3
-#define PY_MINOR_VERSION	4
-#define PY_MICRO_VERSION	1
-#define PY_RELEASE_LEVEL	PY_RELEASE_LEVEL_FINAL
+#define PY_MINOR_VERSION	5
+#define PY_MICRO_VERSION	0
+#define PY_RELEASE_LEVEL	PY_RELEASE_LEVEL_ALPHA
 #define PY_RELEASE_SERIAL	0
 
 /* Version as a string */
-#define PY_VERSION      	"3.4.1"
+#define PY_VERSION      	"3.5.0a0"
 /*--end constants--*/
 
 /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2.
diff --git a/Include/pymacro.h b/Include/pymacro.h
index 7997c55..3f6f5dc 100644
--- a/Include/pymacro.h
+++ b/Include/pymacro.h
@@ -1,13 +1,26 @@
 #ifndef Py_PYMACRO_H
 #define Py_PYMACRO_H
 
+/* Minimum value between x and y */
 #define Py_MIN(x, y) (((x) > (y)) ? (y) : (x))
+
+/* Maximum value between x and y */
 #define Py_MAX(x, y) (((x) > (y)) ? (x) : (y))
 
+/* Absolute value of the number x */
+#define Py_ABS(x) ((x) < 0 ? -(x) : (x))
+
+#define _Py_XSTRINGIFY(x) #x
+
+/* Convert the argument to a string. For example, Py_STRINGIFY(123) is replaced
+   with "123" by the preprocessor. Defines are also replaced by their value.
+   For example Py_STRINGIFY(__LINE__) is replaced by the line number, not
+   by "__LINE__". */
+#define Py_STRINGIFY(x) _Py_XSTRINGIFY(x)
+
 /* Argument must be a char or an int in [-128, 127] or [0, 255]. */
 #define Py_CHARMASK(c) ((unsigned char)((c) & 0xff))
 
-
 /* Assert a build-time dependency, as an expression.
 
    Your compile will fail if the condition isn't true, or can't be evaluated
diff --git a/Include/pymem.h b/Include/pymem.h
index 2372b86..7a8dd43 100644
--- a/Include/pymem.h
+++ b/Include/pymem.h
@@ -13,6 +13,7 @@
 
 #ifndef Py_LIMITED_API
 PyAPI_FUNC(void *) PyMem_RawMalloc(size_t size);
+PyAPI_FUNC(void *) PyMem_RawCalloc(size_t nelem, size_t elsize);
 PyAPI_FUNC(void *) PyMem_RawRealloc(void *ptr, size_t new_size);
 PyAPI_FUNC(void) PyMem_RawFree(void *ptr);
 #endif
@@ -57,6 +58,7 @@
 */
 
 PyAPI_FUNC(void *) PyMem_Malloc(size_t size);
+PyAPI_FUNC(void *) PyMem_Calloc(size_t nelem, size_t elsize);
 PyAPI_FUNC(void *) PyMem_Realloc(void *ptr, size_t new_size);
 PyAPI_FUNC(void) PyMem_Free(void *ptr);
 
@@ -132,6 +134,9 @@
     /* allocate a memory block */
     void* (*malloc) (void *ctx, size_t size);
 
+    /* allocate a memory block initialized by zeros */
+    void* (*calloc) (void *ctx, size_t nelem, size_t elsize);
+
     /* allocate or resize a memory block */
     void* (*realloc) (void *ctx, void *ptr, size_t new_size);
 
diff --git a/Include/pyport.h b/Include/pyport.h
index c706213..69deb9f 100644
--- a/Include/pyport.h
+++ b/Include/pyport.h
@@ -588,6 +588,25 @@
     } while (0)
 #endif
 
+#ifdef HAVE_GCC_ASM_FOR_MC68881
+#define HAVE_PY_SET_53BIT_PRECISION 1
+#define _Py_SET_53BIT_PRECISION_HEADER \
+  unsigned int old_fpcr, new_fpcr
+#define _Py_SET_53BIT_PRECISION_START					\
+  do {									\
+    __asm__ ("fmove.l %%fpcr,%0" : "=g" (old_fpcr));			\
+    /* Set double precision / round to nearest.  */			\
+    new_fpcr = (old_fpcr & ~0xf0) | 0x80;				\
+    if (new_fpcr != old_fpcr)						\
+      __asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (new_fpcr));	\
+  } while (0)
+#define _Py_SET_53BIT_PRECISION_END					\
+  do {									\
+    if (new_fpcr != old_fpcr)						\
+      __asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (old_fpcr));	\
+  } while (0)
+#endif
+
 /* default definitions are empty */
 #ifndef HAVE_PY_SET_53BIT_PRECISION
 #define _Py_SET_53BIT_PRECISION_HEADER
diff --git a/Include/token.h b/Include/token.h
index 905022b..2b213ee 100644
--- a/Include/token.h
+++ b/Include/token.h
@@ -58,13 +58,14 @@
 #define DOUBLESTAREQUAL	46
 #define DOUBLESLASH	47
 #define DOUBLESLASHEQUAL 48
-#define AT              49	
-#define RARROW          50
-#define ELLIPSIS        51
+#define AT              49
+#define ATEQUAL		50
+#define RARROW          51
+#define ELLIPSIS        52
 /* Don't forget to update the table _PyParser_TokenNames in tokenizer.c! */
-#define OP		52
-#define ERRORTOKEN	53
-#define N_TOKENS	54
+#define OP		53
+#define ERRORTOKEN	54
+#define N_TOKENS	55
 
 /* Special definitions for cooperation with parser */
 
diff --git a/Include/typeslots.h b/Include/typeslots.h
index ad3cdfb..da2e87c 100644
--- a/Include/typeslots.h
+++ b/Include/typeslots.h
@@ -74,3 +74,5 @@
 #define Py_tp_members 72
 #define Py_tp_getset 73
 #define Py_tp_free 74
+#define Py_nb_matrix_multiply 75
+#define Py_nb_inplace_matrix_multiply 76
diff --git a/Lib/_collections_abc.py b/Lib/_collections_abc.py
index faa1ff2..6281723 100644
--- a/Lib/_collections_abc.py
+++ b/Lib/_collections_abc.py
@@ -440,6 +440,8 @@
 
 class MappingView(Sized):
 
+    __slots__ = '_mapping',
+
     def __init__(self, mapping):
         self._mapping = mapping
 
@@ -452,6 +454,8 @@
 
 class KeysView(MappingView, Set):
 
+    __slots__ = ()
+
     @classmethod
     def _from_iterable(self, it):
         return set(it)
@@ -467,6 +471,8 @@
 
 class ItemsView(MappingView, Set):
 
+    __slots__ = ()
+
     @classmethod
     def _from_iterable(self, it):
         return set(it)
@@ -489,6 +495,8 @@
 
 class ValuesView(MappingView):
 
+    __slots__ = ()
+
     def __contains__(self, value):
         for key in self._mapping:
             if value == self._mapping[key]:
diff --git a/Lib/asyncore.py b/Lib/asyncore.py
index 75481dd..37efa9b 100644
--- a/Lib/asyncore.py
+++ b/Lib/asyncore.py
@@ -404,20 +404,6 @@
                 if why.args[0] not in (ENOTCONN, EBADF):
                     raise
 
-    # cheap inheritance, used to pass all other attribute
-    # references to the underlying socket object.
-    def __getattr__(self, attr):
-        try:
-            retattr = getattr(self.socket, attr)
-        except AttributeError:
-            raise AttributeError("%s instance has no attribute '%s'"
-                                 %(self.__class__.__name__, attr))
-        else:
-            msg = "%(me)s.%(attr)s is deprecated; use %(me)s.socket.%(attr)s " \
-                  "instead" % {'me' : self.__class__.__name__, 'attr' : attr}
-            warnings.warn(msg, DeprecationWarning, stacklevel=2)
-            return retattr
-
     # log and log_info may be overridden to provide more sophisticated
     # logging and warning methods. In general, log is for 'hit' logging
     # and 'log_info' is for informational, warning and error logging.
diff --git a/Lib/collections/__init__.py b/Lib/collections/__init__.py
index d6deb6a..a55fee1 100644
--- a/Lib/collections/__init__.py
+++ b/Lib/collections/__init__.py
@@ -20,6 +20,23 @@
 ### OrderedDict
 ################################################################################
 
+class _OrderedDictKeysView(KeysView):
+
+    def __reversed__(self):
+        yield from reversed(self._mapping)
+
+class _OrderedDictItemsView(ItemsView):
+
+    def __reversed__(self):
+        for key in reversed(self._mapping):
+            yield (key, self._mapping[key])
+
+class _OrderedDictValuesView(ValuesView):
+
+    def __reversed__(self):
+        for key in reversed(self._mapping):
+            yield self._mapping[key]
+
 class _Link(object):
     __slots__ = 'prev', 'next', 'key', '__weakref__'
 
@@ -79,6 +96,8 @@
         link_next = link.next
         link_prev.next = link_next
         link_next.prev = link_prev
+        link.prev = None
+        link.next = None
 
     def __iter__(self):
         'od.__iter__() <==> iter(od)'
@@ -162,9 +181,19 @@
         return size
 
     update = __update = MutableMapping.update
-    keys = MutableMapping.keys
-    values = MutableMapping.values
-    items = MutableMapping.items
+
+    def keys(self):
+        "D.keys() -> a set-like object providing a view on D's keys"
+        return _OrderedDictKeysView(self)
+
+    def items(self):
+        "D.items() -> a set-like object providing a view on D's items"
+        return _OrderedDictItemsView(self)
+
+    def values(self):
+        "D.values() -> an object providing a view on D's values"
+        return _OrderedDictValuesView(self)
+
     __ne__ = MutableMapping.__ne__
 
     __marker = object()
diff --git a/Lib/concurrent/futures/process.py b/Lib/concurrent/futures/process.py
index 07b5225..1299390 100644
--- a/Lib/concurrent/futures/process.py
+++ b/Lib/concurrent/futures/process.py
@@ -334,6 +334,9 @@
         if max_workers is None:
             self._max_workers = os.cpu_count() or 1
         else:
+            if max_workers <= 0:
+                raise ValueError("max_workers must be greater than 0")
+
             self._max_workers = max_workers
 
         # Make the call queue slightly larger than the number of processes to
diff --git a/Lib/concurrent/futures/thread.py b/Lib/concurrent/futures/thread.py
index f9beb0f..8d6081c 100644
--- a/Lib/concurrent/futures/thread.py
+++ b/Lib/concurrent/futures/thread.py
@@ -87,6 +87,9 @@
             max_workers: The maximum number of threads that can be used to
                 execute the given calls.
         """
+        if max_workers <= 0:
+            raise ValueError("max_workers must be greater than 0")
+
         self._max_workers = max_workers
         self._work_queue = queue.Queue()
         self._threads = set()
diff --git a/Lib/copy.py b/Lib/copy.py
index bb8840e..383609b 100644
--- a/Lib/copy.py
+++ b/Lib/copy.py
@@ -221,17 +221,15 @@
 d[list] = _deepcopy_list
 
 def _deepcopy_tuple(x, memo):
-    y = []
-    for a in x:
-        y.append(deepcopy(a, memo))
+    y = [deepcopy(a, memo) for a in x]
     # We're not going to put the tuple in the memo, but it's still important we
     # check for it, in case the tuple contains recursive mutable structures.
     try:
         return memo[id(x)]
     except KeyError:
         pass
-    for i in range(len(x)):
-        if x[i] is not y[i]:
+    for k, j in zip(x, y):
+        if k is not j:
             y = tuple(y)
             break
     else:
diff --git a/Lib/datetime.py b/Lib/datetime.py
index 1789714..3c534d0 100644
--- a/Lib/datetime.py
+++ b/Lib/datetime.py
@@ -1249,12 +1249,6 @@
         _check_tzinfo_arg(tzinfo)
         return time(hour, minute, second, microsecond, tzinfo)
 
-    def __bool__(self):
-        if self.second or self.microsecond:
-            return True
-        offset = self.utcoffset() or timedelta(0)
-        return timedelta(hours=self.hour, minutes=self.minute) != offset
-
     # Pickle support.
 
     def _getstate(self):
diff --git a/Lib/decimal.py b/Lib/decimal.py
index 5b98473..6d0b34c 100644
--- a/Lib/decimal.py
+++ b/Lib/decimal.py
@@ -2523,7 +2523,7 @@
             end -= 1
         return _dec_from_triple(dup._sign, dup._int[:end], exp)
 
-    def quantize(self, exp, rounding=None, context=None, watchexp=True):
+    def quantize(self, exp, rounding=None, context=None):
         """Quantize self so its exponent is the same as that of exp.
 
         Similar to self._rescale(exp._exp) but with error checking.
@@ -2546,16 +2546,6 @@
                 return context._raise_error(InvalidOperation,
                                         'quantize with one INF')
 
-        # if we're not watching exponents, do a simple rescale
-        if not watchexp:
-            ans = self._rescale(exp._exp, rounding)
-            # raise Inexact and Rounded where appropriate
-            if ans._exp > self._exp:
-                context._raise_error(Rounded)
-                if ans != self:
-                    context._raise_error(Inexact)
-            return ans
-
         # exp._exp should be between Etiny and Emax
         if not (context.Etiny() <= exp._exp <= context.Emax):
             return context._raise_error(InvalidOperation,
diff --git a/Lib/difflib.py b/Lib/difflib.py
index 38dfef4..48d7e57 100644
--- a/Lib/difflib.py
+++ b/Lib/difflib.py
@@ -852,10 +852,9 @@
           and return true iff the string is junk. The module-level function
           `IS_LINE_JUNK` may be used to filter out lines without visible
           characters, except for at most one splat ('#').  It is recommended
-          to leave linejunk None; as of Python 2.3, the underlying
-          SequenceMatcher class has grown an adaptive notion of "noise" lines
-          that's better than any static definition the author has ever been
-          able to craft.
+          to leave linejunk None; the underlying SequenceMatcher class has
+          an adaptive notion of "noise" lines that's better than any static
+          definition the author has ever been able to craft.
 
         - `charjunk`: A function that should accept a string of length 1. The
           module-level function `IS_CHARACTER_JUNK` may be used to filter out
@@ -1298,17 +1297,18 @@
     Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
 
     Optional keyword parameters `linejunk` and `charjunk` are for filter
-    functions (or None):
+    functions, or can be None:
 
-    - linejunk: A function that should accept a single string argument, and
+    - linejunk: A function that should accept a single string argument and
       return true iff the string is junk.  The default is None, and is
-      recommended; as of Python 2.3, an adaptive notion of "noise" lines is
-      used that does a good job on its own.
+      recommended; the underlying SequenceMatcher class has an adaptive
+      notion of "noise" lines.
 
-    - charjunk: A function that should accept a string of length 1. The
-      default is module-level function IS_CHARACTER_JUNK, which filters out
-      whitespace characters (a blank or tab; note: bad idea to include newline
-      in this!).
+    - charjunk: A function that accepts a character (string of length
+      1), and returns true iff the character is junk. The default is
+      the module-level function IS_CHARACTER_JUNK, which filters out
+      whitespace characters (a blank or tab; note: it's a bad idea to
+      include newline in this!).
 
     Tools/scripts/ndiff.py is a command-line front-end to this function.
 
@@ -1679,7 +1679,7 @@
         tabsize -- tab stop spacing, defaults to 8.
         wrapcolumn -- column number where lines are broken and wrapped,
             defaults to None where lines are not wrapped.
-        linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
+        linejunk,charjunk -- keyword arguments passed into ndiff() (used by
             HtmlDiff() to generate the side by side HTML differences).  See
             ndiff() documentation for argument default values and descriptions.
         """
diff --git a/Lib/distutils/__init__.py b/Lib/distutils/__init__.py
index 9463a35..328bea6 100644
--- a/Lib/distutils/__init__.py
+++ b/Lib/distutils/__init__.py
@@ -13,5 +13,5 @@
 # Updated automatically by the Python release process.
 #
 #--start constants--
-__version__ = "3.4.1"
+__version__ = "3.5.0a0"
 #--end constants--
diff --git a/Lib/distutils/command/upload.py b/Lib/distutils/command/upload.py
index d6762e4..b906435 100644
--- a/Lib/distutils/command/upload.py
+++ b/Lib/distutils/command/upload.py
@@ -1,24 +1,21 @@
-"""distutils.command.upload
+"""
+distutils.command.upload
 
-Implements the Distutils 'upload' subcommand (upload package to PyPI)."""
+Implements the Distutils 'upload' subcommand (upload package to a package
+index).
+"""
 
-from distutils.errors import *
-from distutils.core import PyPIRCCommand
-from distutils.spawn import spawn
-from distutils import log
-import sys
-import os, io
-import socket
+import os
+import io
 import platform
+import hashlib
 from base64 import standard_b64encode
 from urllib.request import urlopen, Request, HTTPError
 from urllib.parse import urlparse
-
-# this keeps compatibility for 2.3 and 2.4
-if sys.version < "2.5":
-    from md5 import md5
-else:
-    from hashlib import md5
+from distutils.errors import DistutilsOptionError
+from distutils.core import PyPIRCCommand
+from distutils.spawn import spawn
+from distutils import log
 
 class upload(PyPIRCCommand):
 
@@ -60,7 +57,8 @@
 
     def run(self):
         if not self.distribution.dist_files:
-            raise DistutilsOptionError("No dist file created in earlier command")
+            msg = "No dist file created in earlier command"
+            raise DistutilsOptionError(msg)
         for command, pyversion, filename in self.distribution.dist_files:
             self.upload_file(command, pyversion, filename)
 
@@ -103,10 +101,10 @@
             'content': (os.path.basename(filename),content),
             'filetype': command,
             'pyversion': pyversion,
-            'md5_digest': md5(content).hexdigest(),
+            'md5_digest': hashlib.md5(content).hexdigest(),
 
             # additional meta-data
-            'metadata_version' : '1.0',
+            'metadata_version': '1.0',
             'summary': meta.get_description(),
             'home_page': meta.get_url(),
             'author': meta.get_contact(),
@@ -149,7 +147,7 @@
         for key, value in data.items():
             title = '\nContent-Disposition: form-data; name="%s"' % key
             # handle multiple entries for the same name
-            if type(value) != type([]):
+            if not isinstance(value, list):
                 value = [value]
             for value in value:
                 if type(value) is tuple:
@@ -167,13 +165,15 @@
         body.write(b"\n")
         body = body.getvalue()
 
-        self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)
+        msg = "Submitting %s to %s" % (filename, self.repository)
+        self.announce(msg, log.INFO)
 
         # build the Request
-        headers = {'Content-type':
-                        'multipart/form-data; boundary=%s' % boundary,
-                   'Content-length': str(len(body)),
-                   'Authorization': auth}
+        headers = {
+            'Content-type': 'multipart/form-data; boundary=%s' % boundary,
+            'Content-length': str(len(body)),
+            'Authorization': auth,
+        }
 
         request = Request(self.repository, data=body,
                           headers=headers)
diff --git a/Lib/distutils/extension.py b/Lib/distutils/extension.py
index a93655a..cc04a18 100644
--- a/Lib/distutils/extension.py
+++ b/Lib/distutils/extension.py
@@ -131,6 +131,14 @@
             msg = "Unknown Extension options: %s" % options
             warnings.warn(msg)
 
+    def __repr__(self):
+        return '<%s.%s(%r) at %#x>' % (
+            self.__class__.__module__,
+            self.__class__.__name__,
+            self.name,
+            id(self))
+
+
 def read_setup_file(filename):
     """Reads a Setup file and returns Extension instances."""
     from distutils.sysconfig import (parse_makefile, expand_makefile_vars,
diff --git a/Lib/doctest.py b/Lib/doctest.py
index d212ad6..be824f4 100644
--- a/Lib/doctest.py
+++ b/Lib/doctest.py
@@ -2376,15 +2376,6 @@
         suite = _DocTestSuite()
         suite.addTest(SkipDocTestCase(module))
         return suite
-    elif not tests:
-        # Why do we want to do this? Because it reveals a bug that might
-        # otherwise be hidden.
-        # It is probably a bug that this exception is not also raised if the
-        # number of doctest examples in tests is zero (i.e. if no doctest
-        # examples were found).  However, we should probably not be raising
-        # an exception at all here, though it is too late to make this change
-        # for a maintenance release.  See also issue #14649.
-        raise ValueError(module, "has no docstrings")
 
     tests.sort()
     suite = _DocTestSuite()
diff --git a/Lib/encodings/cp65001.py b/Lib/encodings/cp65001.py
index 287eb87..95cb2ae 100644
--- a/Lib/encodings/cp65001.py
+++ b/Lib/encodings/cp65001.py
@@ -11,20 +11,23 @@
 ### Codec APIs
 
 encode = functools.partial(codecs.code_page_encode, 65001)
-decode = functools.partial(codecs.code_page_decode, 65001)
+_decode = functools.partial(codecs.code_page_decode, 65001)
+
+def decode(input, errors='strict'):
+    return codecs.code_page_decode(65001, input, errors, True)
 
 class IncrementalEncoder(codecs.IncrementalEncoder):
     def encode(self, input, final=False):
         return encode(input, self.errors)[0]
 
 class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
-    _buffer_decode = decode
+    _buffer_decode = _decode
 
 class StreamWriter(codecs.StreamWriter):
     encode = encode
 
 class StreamReader(codecs.StreamReader):
-    decode = decode
+    decode = _decode
 
 ### encodings module API
 
diff --git a/Lib/ensurepip/__init__.py b/Lib/ensurepip/__init__.py
index 84c2125..4af1b1d 100644
--- a/Lib/ensurepip/__init__.py
+++ b/Lib/ensurepip/__init__.py
@@ -8,7 +8,7 @@
 __all__ = ["version", "bootstrap"]
 
 
-_SETUPTOOLS_VERSION = "2.1"
+_SETUPTOOLS_VERSION = "3.6"
 
 _PIP_VERSION = "1.5.6"
 
diff --git a/Lib/ensurepip/_bundled/setuptools-2.1-py2.py3-none-any.whl b/Lib/ensurepip/_bundled/setuptools-3.6-py2.py3-none-any.whl
similarity index 66%
rename from Lib/ensurepip/_bundled/setuptools-2.1-py2.py3-none-any.whl
rename to Lib/ensurepip/_bundled/setuptools-3.6-py2.py3-none-any.whl
index ed77b59..f0ffcfc 100644
--- a/Lib/ensurepip/_bundled/setuptools-2.1-py2.py3-none-any.whl
+++ b/Lib/ensurepip/_bundled/setuptools-3.6-py2.py3-none-any.whl
Binary files differ
diff --git a/Lib/formatter.py b/Lib/formatter.py
index d8cca52..769bd6a 100644
--- a/Lib/formatter.py
+++ b/Lib/formatter.py
@@ -21,7 +21,7 @@
 import sys
 import warnings
 warnings.warn('the formatter module is deprecated and will be removed in '
-              'Python 3.6', PendingDeprecationWarning)
+              'Python 3.6', DeprecationWarning)
 
 
 AS_IS = None
diff --git a/Lib/fractions.py b/Lib/fractions.py
index 79e83ff..43f146f 100644
--- a/Lib/fractions.py
+++ b/Lib/fractions.py
@@ -70,7 +70,7 @@
     __slots__ = ('_numerator', '_denominator')
 
     # We're immutable, so use __new__ not __init__
-    def __new__(cls, numerator=0, denominator=None):
+    def __new__(cls, numerator=0, denominator=None, _normalize=True):
         """Constructs a Rational.
 
         Takes a string like '3/2' or '1.5', another Rational instance, a
@@ -165,9 +165,12 @@
 
         if denominator == 0:
             raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
-        g = gcd(numerator, denominator)
-        self._numerator = numerator // g
-        self._denominator = denominator // g
+        if _normalize:
+            g = gcd(numerator, denominator)
+            numerator //= g
+            denominator //= g
+        self._numerator = numerator
+        self._denominator = denominator
         return self
 
     @classmethod
@@ -453,10 +456,12 @@
                 power = b.numerator
                 if power >= 0:
                     return Fraction(a._numerator ** power,
-                                    a._denominator ** power)
+                                    a._denominator ** power,
+                                    _normalize=False)
                 else:
                     return Fraction(a._denominator ** -power,
-                                    a._numerator ** -power)
+                                    a._numerator ** -power,
+                                    _normalize=False)
             else:
                 # A fractional power will generally produce an
                 # irrational number.
@@ -480,15 +485,15 @@
 
     def __pos__(a):
         """+a: Coerces a subclass instance to Fraction"""
-        return Fraction(a._numerator, a._denominator)
+        return Fraction(a._numerator, a._denominator, _normalize=False)
 
     def __neg__(a):
         """-a"""
-        return Fraction(-a._numerator, a._denominator)
+        return Fraction(-a._numerator, a._denominator, _normalize=False)
 
     def __abs__(a):
         """abs(a)"""
-        return Fraction(abs(a._numerator), a._denominator)
+        return Fraction(abs(a._numerator), a._denominator, _normalize=False)
 
     def __trunc__(a):
         """trunc(a)"""
diff --git a/Lib/heapq.py b/Lib/heapq.py
index d615239..41626c5 100644
--- a/Lib/heapq.py
+++ b/Lib/heapq.py
@@ -127,7 +127,7 @@
 __all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
            'nlargest', 'nsmallest', 'heappushpop']
 
-from itertools import islice, count, tee, chain
+from itertools import islice, count
 
 def heappush(heap, item):
     """Push item onto heap, maintaining the heap invariant."""
@@ -141,9 +141,8 @@
         returnitem = heap[0]
         heap[0] = lastelt
         _siftup(heap, 0)
-    else:
-        returnitem = lastelt
-    return returnitem
+        return returnitem
+    return lastelt
 
 def heapreplace(heap, item):
     """Pop and return the current smallest value, and add the new item.
@@ -179,12 +178,12 @@
     for i in reversed(range(n//2)):
         _siftup(x, i)
 
-def _heappushpop_max(heap, item):
-    """Maxheap version of a heappush followed by a heappop."""
-    if heap and item < heap[0]:
-        item, heap[0] = heap[0], item
-        _siftup_max(heap, 0)
-    return item
+def _heapreplace_max(heap, item):
+    """Maxheap version of a heappop followed by a heappush."""
+    returnitem = heap[0]    # raises appropriate IndexError if heap is empty
+    heap[0] = item
+    _siftup_max(heap, 0)
+    return returnitem
 
 def _heapify_max(x):
     """Transform list into a maxheap, in-place, in O(len(x)) time."""
@@ -192,42 +191,6 @@
     for i in reversed(range(n//2)):
         _siftup_max(x, i)
 
-def nlargest(n, iterable):
-    """Find the n largest elements in a dataset.
-
-    Equivalent to:  sorted(iterable, reverse=True)[:n]
-    """
-    if n < 0:
-        return []
-    it = iter(iterable)
-    result = list(islice(it, n))
-    if not result:
-        return result
-    heapify(result)
-    _heappushpop = heappushpop
-    for elem in it:
-        _heappushpop(result, elem)
-    result.sort(reverse=True)
-    return result
-
-def nsmallest(n, iterable):
-    """Find the n smallest elements in a dataset.
-
-    Equivalent to:  sorted(iterable)[:n]
-    """
-    if n < 0:
-        return []
-    it = iter(iterable)
-    result = list(islice(it, n))
-    if not result:
-        return result
-    _heapify_max(result)
-    _heappushpop = _heappushpop_max
-    for elem in it:
-        _heappushpop(result, elem)
-    result.sort()
-    return result
-
 # 'heap' is a heap at all indices >= startpos, except possibly for pos.  pos
 # is the index of a leaf with a possibly out-of-order value.  Restore the
 # heap invariant.
@@ -345,6 +308,10 @@
     from _heapq import *
 except ImportError:
     pass
+try:
+    from _heapq import _heapreplace_max
+except ImportError:
+    pass
 
 def merge(*iterables):
     '''Merge multiple sorted inputs into a single sorted output.
@@ -385,22 +352,86 @@
         yield v
         yield from next.__self__
 
-# Extend the implementations of nsmallest and nlargest to use a key= argument
-_nsmallest = nsmallest
+
+# Algorithm notes for nlargest() and nsmallest()
+# ==============================================
+#
+# Make a single pass over the data while keeping the k most extreme values
+# in a heap.  Memory consumption is limited to keeping k values in a list.
+#
+# Measured performance for random inputs:
+#
+#                                   number of comparisons
+#    n inputs     k-extreme values  (average of 5 trials)   % more than min()
+# -------------   ----------------  ---------------------   -----------------
+#      1,000           100                  3,317               133.2%
+#     10,000           100                 14,046                40.5%
+#    100,000           100                105,749                 5.7%
+#  1,000,000           100              1,007,751                 0.8%
+# 10,000,000           100             10,009,401                 0.1%
+#
+# Theoretical number of comparisons for k smallest of n random inputs:
+#
+# Step   Comparisons                  Action
+# ----   --------------------------   ---------------------------
+#  1     1.66 * k                     heapify the first k-inputs
+#  2     n - k                        compare remaining elements to top of heap
+#  3     k * (1 + lg2(k)) * ln(n/k)   replace the topmost value on the heap
+#  4     k * lg2(k) - (k/2)           final sort of the k most extreme values
+# Combining and simplifying for a rough estimate gives:
+#        comparisons = n + k * (1 + log(n/k)) * (1 + log(k, 2))
+#
+# Computing the number of comparisons for step 3:
+# -----------------------------------------------
+# * For the i-th new value from the iterable, the probability of being in the
+#   k most extreme values is k/i.  For example, the probability of the 101st
+#   value seen being in the 100 most extreme values is 100/101.
+# * If the value is a new extreme value, the cost of inserting it into the
+#   heap is 1 + log(k, 2).
+# * The probabilty times the cost gives:
+#            (k/i) * (1 + log(k, 2))
+# * Summing across the remaining n-k elements gives:
+#            sum((k/i) * (1 + log(k, 2)) for xrange(k+1, n+1))
+# * This reduces to:
+#            (H(n) - H(k)) * k * (1 + log(k, 2))
+# * Where H(n) is the n-th harmonic number estimated by:
+#            gamma = 0.5772156649
+#            H(n) = log(n, e) + gamma + 1.0 / (2.0 * n)
+#   http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence
+# * Substituting the H(n) formula:
+#            comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2)
+#
+# Worst-case for step 3:
+# ----------------------
+# In the worst case, the input data is reversed sorted so that every new element
+# must be inserted in the heap:
+#
+#             comparisons = 1.66 * k + log(k, 2) * (n - k)
+#
+# Alternative Algorithms
+# ----------------------
+# Other algorithms were not used because they:
+# 1) Took much more auxiliary memory,
+# 2) Made multiple passes over the data.
+# 3) Made more comparisons in common cases (small k, large n, semi-random input).
+# See the more detailed comparison of approach at:
+# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest
+
 def nsmallest(n, iterable, key=None):
     """Find the n smallest elements in a dataset.
 
     Equivalent to:  sorted(iterable, key=key)[:n]
     """
+
     # Short-cut for n==1 is to use min() when len(iterable)>0
     if n == 1:
         it = iter(iterable)
-        head = list(islice(it, 1))
-        if not head:
-            return []
+        sentinel = object()
         if key is None:
-            return [min(chain(head, it))]
-        return [min(chain(head, it), key=key)]
+            result = min(it, default=sentinel)
+        else:
+            result = min(it, default=sentinel, key=key)
+        return [] if result is sentinel else [result]
 
     # When n>=size, it's faster to use sorted()
     try:
@@ -413,17 +444,40 @@
 
     # When key is none, use simpler decoration
     if key is None:
-        it = zip(iterable, count())                         # decorate
-        result = _nsmallest(n, it)
-        return [r[0] for r in result]                       # undecorate
+        it = iter(iterable)
+        result = list(islice(zip(it, count()), n))
+        if not result:
+            return result
+        _heapify_max(result)
+        order = n
+        top = result[0][0]
+        _heapreplace = _heapreplace_max
+        for elem in it:
+            if elem < top:
+                _heapreplace(result, (elem, order))
+                top = result[0][0]
+                order += 1
+        result.sort()
+        return [r[0] for r in result]
 
     # General case, slowest method
-    in1, in2 = tee(iterable)
-    it = zip(map(key, in1), count(), in2)                   # decorate
-    result = _nsmallest(n, it)
-    return [r[2] for r in result]                           # undecorate
+    it = iter(iterable)
+    result = [(key(elem), i, elem) for i, elem in zip(range(n), it)]
+    if not result:
+        return result
+    _heapify_max(result)
+    order = n
+    top = result[0][0]
+    _heapreplace = _heapreplace_max
+    for elem in it:
+        k = key(elem)
+        if k < top:
+            _heapreplace(result, (k, order, elem))
+            top = result[0][0]
+            order += 1
+    result.sort()
+    return [r[2] for r in result]
 
-_nlargest = nlargest
 def nlargest(n, iterable, key=None):
     """Find the n largest elements in a dataset.
 
@@ -433,12 +487,12 @@
     # Short-cut for n==1 is to use max() when len(iterable)>0
     if n == 1:
         it = iter(iterable)
-        head = list(islice(it, 1))
-        if not head:
-            return []
+        sentinel = object()
         if key is None:
-            return [max(chain(head, it))]
-        return [max(chain(head, it), key=key)]
+            result = max(it, default=sentinel)
+        else:
+            result = max(it, default=sentinel, key=key)
+        return [] if result is sentinel else [result]
 
     # When n>=size, it's faster to use sorted()
     try:
@@ -451,26 +505,42 @@
 
     # When key is none, use simpler decoration
     if key is None:
-        it = zip(iterable, count(0,-1))                     # decorate
-        result = _nlargest(n, it)
-        return [r[0] for r in result]                       # undecorate
+        it = iter(iterable)
+        result = list(islice(zip(it, count(0, -1)), n))
+        if not result:
+            return result
+        heapify(result)
+        order = -n
+        top = result[0][0]
+        _heapreplace = heapreplace
+        for elem in it:
+            if top < elem:
+                _heapreplace(result, (elem, order))
+                top = result[0][0]
+                order -= 1
+        result.sort(reverse=True)
+        return [r[0] for r in result]
 
     # General case, slowest method
-    in1, in2 = tee(iterable)
-    it = zip(map(key, in1), count(0,-1), in2)               # decorate
-    result = _nlargest(n, it)
-    return [r[2] for r in result]                           # undecorate
+    it = iter(iterable)
+    result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
+    if not result:
+        return result
+    heapify(result)
+    order = -n
+    top = result[0][0]
+    _heapreplace = heapreplace
+    for elem in it:
+        k = key(elem)
+        if top < k:
+            _heapreplace(result, (k, order, elem))
+            top = result[0][0]
+            order -= 1
+    result.sort(reverse=True)
+    return [r[2] for r in result]
+
 
 if __name__ == "__main__":
-    # Simple sanity test
-    heap = []
-    data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
-    for item in data:
-        heappush(heap, item)
-    sort = []
-    while heap:
-        sort.append(heappop(heap))
-    print(sort)
 
     import doctest
     doctest.testmod()
diff --git a/Lib/http/client.py b/Lib/http/client.py
index d2013f2..ad5590c 100644
--- a/Lib/http/client.py
+++ b/Lib/http/client.py
@@ -270,7 +270,7 @@
     return email.parser.Parser(_class=_class).parsestr(hstring)
 
 
-class HTTPResponse(io.RawIOBase):
+class HTTPResponse(io.BufferedIOBase):
 
     # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
 
@@ -495,9 +495,10 @@
             return b""
 
         if amt is not None:
-            # Amount is given, so call base class version
-            # (which is implemented in terms of self.readinto)
-            return super(HTTPResponse, self).read(amt)
+            # Amount is given, implement using readinto
+            b = bytearray(amt)
+            n = self.readinto(b)
+            return memoryview(b)[:n].tobytes()
         else:
             # Amount is not given (unbounded read) so we must check self.length
             # and self.chunked
@@ -577,71 +578,67 @@
             if line in (b'\r\n', b'\n', b''):
                 break
 
+    def _get_chunk_left(self):
+        # return self.chunk_left, reading a new chunk if necessary.
+        # chunk_left == 0: at the end of the current chunk, need to close it
+        # chunk_left == None: No current chunk, should read next.
+        # This function returns non-zero or None if the last chunk has
+        # been read.
+        chunk_left = self.chunk_left
+        if not chunk_left: # Can be 0 or None
+            if chunk_left is not None:
+                # We are at the end of chunk. dicard chunk end
+                self._safe_read(2)  # toss the CRLF at the end of the chunk
+            try:
+                chunk_left = self._read_next_chunk_size()
+            except ValueError:
+                raise IncompleteRead(b'')
+            if chunk_left == 0:
+                # last chunk: 1*("0") [ chunk-extension ] CRLF
+                self._read_and_discard_trailer()
+                # we read everything; close the "file"
+                self._close_conn()
+                chunk_left = None
+            self.chunk_left = chunk_left
+        return chunk_left
+
     def _readall_chunked(self):
         assert self.chunked != _UNKNOWN
-        chunk_left = self.chunk_left
         value = []
-        while True:
-            if chunk_left is None:
-                try:
-                    chunk_left = self._read_next_chunk_size()
-                    if chunk_left == 0:
-                        break
-                except ValueError:
-                    raise IncompleteRead(b''.join(value))
-            value.append(self._safe_read(chunk_left))
-
-            # we read the whole chunk, get another
-            self._safe_read(2)      # toss the CRLF at the end of the chunk
-            chunk_left = None
-
-        self._read_and_discard_trailer()
-
-        # we read everything; close the "file"
-        self._close_conn()
-
-        return b''.join(value)
+        try:
+            while True:
+                chunk_left = self._get_chunk_left()
+                if chunk_left is None:
+                    break
+                value.append(self._safe_read(chunk_left))
+                self.chunk_left = 0
+            return b''.join(value)
+        except IncompleteRead:
+            raise IncompleteRead(b''.join(value))
 
     def _readinto_chunked(self, b):
         assert self.chunked != _UNKNOWN
-        chunk_left = self.chunk_left
-
         total_bytes = 0
         mvb = memoryview(b)
-        while True:
-            if chunk_left is None:
-                try:
-                    chunk_left = self._read_next_chunk_size()
-                    if chunk_left == 0:
-                        break
-                except ValueError:
-                    raise IncompleteRead(bytes(b[0:total_bytes]))
+        try:
+            while True:
+                chunk_left = self._get_chunk_left()
+                if chunk_left is None:
+                    return total_bytes
 
-            if len(mvb) < chunk_left:
-                n = self._safe_readinto(mvb)
-                self.chunk_left = chunk_left - n
-                return total_bytes + n
-            elif len(mvb) == chunk_left:
-                n = self._safe_readinto(mvb)
-                self._safe_read(2)  # toss the CRLF at the end of the chunk
-                self.chunk_left = None
-                return total_bytes + n
-            else:
-                temp_mvb = mvb[0:chunk_left]
+                if len(mvb) <= chunk_left:
+                    n = self._safe_readinto(mvb)
+                    self.chunk_left = chunk_left - n
+                    return total_bytes + n
+
+                temp_mvb = mvb[:chunk_left]
                 n = self._safe_readinto(temp_mvb)
                 mvb = mvb[n:]
                 total_bytes += n
+                self.chunk_left = 0
 
-            # we read the whole chunk, get another
-            self._safe_read(2)      # toss the CRLF at the end of the chunk
-            chunk_left = None
-
-        self._read_and_discard_trailer()
-
-        # we read everything; close the "file"
-        self._close_conn()
-
-        return total_bytes
+        except IncompleteRead:
+            raise IncompleteRead(bytes(b[0:total_bytes]))
 
     def _safe_read(self, amt):
         """Read the number of bytes requested, compensating for partial reads.
@@ -682,6 +679,73 @@
             total_bytes += n
         return total_bytes
 
+    def read1(self, n=-1):
+        """Read with at most one underlying system call.  If at least one
+        byte is buffered, return that instead.
+        """
+        if self.fp is None or self._method == "HEAD":
+            return b""
+        if self.chunked:
+            return self._read1_chunked(n)
+        try:
+            result = self.fp.read1(n)
+        except ValueError:
+            if n >= 0:
+                raise
+            # some implementations, like BufferedReader, don't support -1
+            # Read an arbitrarily selected largeish chunk.
+            result = self.fp.read1(16*1024)
+        if not result and n:
+            self._close_conn()
+        return result
+
+    def peek(self, n=-1):
+        # Having this enables IOBase.readline() to read more than one
+        # byte at a time
+        if self.fp is None or self._method == "HEAD":
+            return b""
+        if self.chunked:
+            return self._peek_chunked(n)
+        return self.fp.peek(n)
+
+    def readline(self, limit=-1):
+        if self.fp is None or self._method == "HEAD":
+            return b""
+        if self.chunked:
+            # Fallback to IOBase readline which uses peek() and read()
+            return super().readline(limit)
+        result = self.fp.readline(limit)
+        if not result and limit:
+            self._close_conn()
+        return result
+
+    def _read1_chunked(self, n):
+        # Strictly speaking, _get_chunk_left() may cause more than one read,
+        # but that is ok, since that is to satisfy the chunked protocol.
+        chunk_left = self._get_chunk_left()
+        if chunk_left is None or n == 0:
+            return b''
+        if not (0 <= n <= chunk_left):
+            n = chunk_left # if n is negative or larger than chunk_left
+        read = self.fp.read1(n)
+        self.chunk_left -= len(read)
+        if not read:
+            raise IncompleteRead(b"")
+        return read
+
+    def _peek_chunked(self, n):
+        # Strictly speaking, _get_chunk_left() may cause more than one read,
+        # but that is ok, since that is to satisfy the chunked protocol.
+        try:
+            chunk_left = self._get_chunk_left()
+        except IncompleteRead:
+            return b'' # peek doesn't worry about protocol
+        if chunk_left is None:
+            return b'' # eof
+        # peek is allowed to return more than requested.  Just request the
+        # entire chunk, and truncate what we get.
+        return self.fp.peek(chunk_left)[:chunk_left]
+
     def fileno(self):
         return self.fp.fileno()
 
diff --git a/Lib/idlelib/idlever.py b/Lib/idlelib/idlever.py
index 22acb41..d4178b8 100644
--- a/Lib/idlelib/idlever.py
+++ b/Lib/idlelib/idlever.py
@@ -1 +1 @@
-IDLE_VERSION = "3.4.1"
+IDLE_VERSION = "3.5.0a0"
diff --git a/Lib/imghdr.py b/Lib/imghdr.py
index add2ea8..fe77e49 100644
--- a/Lib/imghdr.py
+++ b/Lib/imghdr.py
@@ -110,6 +110,12 @@
 
 tests.append(test_bmp)
 
+def test_webp(h, f):
+    if h.startswith(b'RIFF') and h[8:12] == b'WEBP':
+        return 'webp'
+
+tests.append(test_webp)
+
 #--------------------#
 # Small test program #
 #--------------------#
diff --git a/Lib/importlib/_bootstrap.py b/Lib/importlib/_bootstrap.py
index b8836c1..705c393 100644
--- a/Lib/importlib/_bootstrap.py
+++ b/Lib/importlib/_bootstrap.py
@@ -419,12 +419,13 @@
 #     Python 3.4a4  3290 (changes to __qualname__ computation)
 #     Python 3.4a4  3300 (more changes to __qualname__ computation)
 #     Python 3.4rc2 3310 (alter __qualname__ computation)
+#     Python 3.5a0  3320 (matrix multiplication operator)
 #
 # MAGIC must change whenever the bytecode emitted by the compiler may no
 # longer be understood by older implementations of the eval loop (usually
 # due to the addition of new opcodes).
 
-MAGIC_NUMBER = (3310).to_bytes(2, 'little') + b'\r\n'
+MAGIC_NUMBER = (3320).to_bytes(2, 'little') + b'\r\n'
 _RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little')  # For import.c
 
 _PYCACHE = '__pycache__'
diff --git a/Lib/importlib/abc.py b/Lib/importlib/abc.py
index 558abd3..7f6f235 100644
--- a/Lib/importlib/abc.py
+++ b/Lib/importlib/abc.py
@@ -217,7 +217,8 @@
         """
         raise ImportError
 
-    def source_to_code(self, data, path='<string>'):
+    @staticmethod
+    def source_to_code(data, path='<string>'):
         """Compile 'data' into a code object.
 
         The 'data' argument can be anything that compile() can handle. The'path'
diff --git a/Lib/importlib/util.py b/Lib/importlib/util.py
index 6d73b1d..e50ef6d 100644
--- a/Lib/importlib/util.py
+++ b/Lib/importlib/util.py
@@ -1,5 +1,5 @@
 """Utility code for constructing importers, etc."""
-
+from . import abc
 from ._bootstrap import MAGIC_NUMBER
 from ._bootstrap import cache_from_source
 from ._bootstrap import decode_source
@@ -12,6 +12,7 @@
 from contextlib import contextmanager
 import functools
 import sys
+import types
 import warnings
 
 
@@ -200,3 +201,94 @@
             return fxn(self, module, *args, **kwargs)
 
     return module_for_loader_wrapper
+
+
+class _Module(types.ModuleType):
+
+    """A subclass of the module type to allow __class__ manipulation."""
+
+
+class _LazyModule(types.ModuleType):
+
+    """A subclass of the module type which triggers loading upon attribute access."""
+
+    def __getattribute__(self, attr):
+        """Trigger the load of the module and return the attribute."""
+        # All module metadata must be garnered from __spec__ in order to avoid
+        # using mutated values.
+        # Stop triggering this method.
+        self.__class__ = _Module
+        # Get the original name to make sure no object substitution occurred
+        # in sys.modules.
+        original_name = self.__spec__.name
+        # Figure out exactly what attributes were mutated between the creation
+        # of the module and now.
+        attrs_then = self.__spec__.loader_state
+        attrs_now = self.__dict__
+        attrs_updated = {}
+        for key, value in attrs_now.items():
+            # Code that set the attribute may have kept a reference to the
+            # assigned object, making identity more important than equality.
+            if key not in attrs_then:
+                attrs_updated[key] = value
+            elif id(attrs_now[key]) != id(attrs_then[key]):
+                attrs_updated[key] = value
+        self.__spec__.loader.exec_module(self)
+        # If exec_module() was used directly there is no guarantee the module
+        # object was put into sys.modules.
+        if original_name in sys.modules:
+            if id(self) != id(sys.modules[original_name]):
+                msg = ('module object for {!r} substituted in sys.modules '
+                       'during a lazy load')
+            raise ValueError(msg.format(original_name))
+        # Update after loading since that's what would happen in an eager
+        # loading situation.
+        self.__dict__.update(attrs_updated)
+        return getattr(self, attr)
+
+    def __delattr__(self, attr):
+        """Trigger the load and then perform the deletion."""
+        # To trigger the load and raise an exception if the attribute
+        # doesn't exist.
+        self.__getattribute__(attr)
+        delattr(self, attr)
+
+
+class LazyLoader(abc.Loader):
+
+    """A loader that creates a module which defers loading until attribute access."""
+
+    @staticmethod
+    def __check_eager_loader(loader):
+        if not hasattr(loader, 'exec_module'):
+            raise TypeError('loader must define exec_module()')
+        elif hasattr(loader.__class__, 'create_module'):
+            if abc.Loader.create_module != loader.__class__.create_module:
+                # Only care if create_module() is overridden in a subclass of
+                # importlib.abc.Loader.
+                raise TypeError('loader cannot define create_module()')
+
+    @classmethod
+    def factory(cls, loader):
+        """Construct a callable which returns the eager loader made lazy."""
+        cls.__check_eager_loader(loader)
+        return lambda *args, **kwargs: cls(loader(*args, **kwargs))
+
+    def __init__(self, loader):
+        self.__check_eager_loader(loader)
+        self.loader = loader
+
+    def create_module(self, spec):
+        """Create a module which can have its __class__ manipulated."""
+        return _Module(spec.name)
+
+    def exec_module(self, module):
+        """Make the module load lazily."""
+        module.__spec__.loader = self.loader
+        module.__loader__ = self.loader
+        # Don't need to worry about deep-copying as trying to set an attribute
+        # on an object would have triggered the load,
+        # e.g. ``module.__spec__.loader = None`` would trigger a load from
+        # trying to access module.__spec__.
+        module.__spec__.loader_state = module.__dict__.copy()
+        module.__class__ = _LazyModule
diff --git a/Lib/inspect.py b/Lib/inspect.py
index 4c3e33d..4ac76b1 100644
--- a/Lib/inspect.py
+++ b/Lib/inspect.py
@@ -17,7 +17,7 @@
     getclasstree() - arrange classes so as to represent their hierarchy
 
     getargspec(), getargvalues(), getcallargs() - get info about function arguments
-    getfullargspec() - same, with support for Python-3000 features
+    getfullargspec() - same, with support for Python 3 features
     formatargspec(), formatargvalues() - format an argument spec
     getouterframes(), getinnerframes() - get info about frames
     currentframe() - get the current stack frame
@@ -32,6 +32,7 @@
               'Yury Selivanov <yselivanov@sprymix.com>')
 
 import ast
+import enum
 import importlib.machinery
 import itertools
 import linecache
@@ -919,7 +920,7 @@
     'varargs' and 'varkw' are the names of the * and ** arguments or None.
     'defaults' is an n-tuple of the default values of the last n arguments.
 
-    Use the getfullargspec() API for Python-3000 code, as annotations
+    Use the getfullargspec() API for Python 3 code, as annotations
     and keyword arguments are supported. getargspec() will raise ValueError
     if the func has either annotations or keyword arguments.
     """
@@ -966,9 +967,10 @@
         # getfullargspec() historically ignored __wrapped__ attributes,
         # so we ensure that remains the case in 3.3+
 
-        sig = _signature_internal(func,
-                                  follow_wrapper_chains=False,
-                                  skip_bound_arg=False)
+        sig = _signature_from_callable(func,
+                                       follow_wrapper_chains=False,
+                                       skip_bound_arg=False,
+                                       sigcls=Signature)
     except Exception as ex:
         # Most of the times 'signature' will raise ValueError.
         # But, it can also raise AttributeError, and, maybe something
@@ -1495,6 +1497,10 @@
 
 
 def _signature_get_user_defined_method(cls, method_name):
+    """Private helper. Checks if ``cls`` has an attribute
+    named ``method_name`` and returns it only if it is a
+    pure python function.
+    """
     try:
         meth = getattr(cls, method_name)
     except AttributeError:
@@ -1507,9 +1513,10 @@
 
 
 def _signature_get_partial(wrapped_sig, partial, extra_args=()):
-    # Internal helper to calculate how 'wrapped_sig' signature will
-    # look like after applying a 'functools.partial' object (or alike)
-    # on it.
+    """Private helper to calculate how 'wrapped_sig' signature will
+    look like after applying a 'functools.partial' object (or alike)
+    on it.
+    """
 
     old_params = wrapped_sig.parameters
     new_params = OrderedDict(old_params.items())
@@ -1582,8 +1589,9 @@
 
 
 def _signature_bound_method(sig):
-    # Internal helper to transform signatures for unbound
-    # functions to bound methods
+    """Private helper to transform signatures for unbound
+    functions to bound methods.
+    """
 
     params = tuple(sig.parameters.values())
 
@@ -1607,8 +1615,9 @@
 
 
 def _signature_is_builtin(obj):
-    # Internal helper to test if `obj` is a callable that might
-    # support Argument Clinic's __text_signature__ protocol.
+    """Private helper to test if `obj` is a callable that might
+    support Argument Clinic's __text_signature__ protocol.
+    """
     return (isbuiltin(obj) or
             ismethoddescriptor(obj) or
             isinstance(obj, _NonUserDefinedCallables) or
@@ -1618,10 +1627,11 @@
 
 
 def _signature_is_functionlike(obj):
-    # Internal helper to test if `obj` is a duck type of FunctionType.
-    # A good example of such objects are functions compiled with
-    # Cython, which have all attributes that a pure Python function
-    # would have, but have their code statically compiled.
+    """Private helper to test if `obj` is a duck type of FunctionType.
+    A good example of such objects are functions compiled with
+    Cython, which have all attributes that a pure Python function
+    would have, but have their code statically compiled.
+    """
 
     if not callable(obj) or isclass(obj):
         # All function-like objects are obviously callables,
@@ -1642,11 +1652,12 @@
 
 
 def _signature_get_bound_param(spec):
-    # Internal helper to get first parameter name from a
-    # __text_signature__ of a builtin method, which should
-    # be in the following format: '($param1, ...)'.
-    # Assumptions are that the first argument won't have
-    # a default value or an annotation.
+    """ Private helper to get first parameter name from a
+    __text_signature__ of a builtin method, which should
+    be in the following format: '($param1, ...)'.
+    Assumptions are that the first argument won't have
+    a default value or an annotation.
+    """
 
     assert spec.startswith('($')
 
@@ -1665,7 +1676,9 @@
 
 def _signature_strip_non_python_syntax(signature):
     """
-    Takes a signature in Argument Clinic's extended signature format.
+    Private helper function. Takes a signature in Argument Clinic's
+    extended signature format.
+
     Returns a tuple of three things:
       * that signature re-rendered in standard Python syntax,
       * the index of the "self" parameter (generally 0), or None if
@@ -1734,8 +1747,10 @@
 
 
 def _signature_fromstr(cls, obj, s, skip_bound_arg=True):
-    # Internal helper to parse content of '__text_signature__'
-    # and return a Signature based on it
+    """Private helper to parse content of '__text_signature__'
+    and return a Signature based on it.
+    """
+
     Parameter = cls._parameter_cls
 
     clean_signature, self_parameter, last_positional_only = \
@@ -1873,8 +1888,10 @@
 
 
 def _signature_from_builtin(cls, func, skip_bound_arg=True):
-    # Internal helper function to get signature for
-    # builtin callables
+    """Private helper function to get signature for
+    builtin callables.
+    """
+
     if not _signature_is_builtin(func):
         raise TypeError("{!r} is not a Python builtin "
                         "function".format(func))
@@ -1886,7 +1903,14 @@
     return _signature_fromstr(cls, func, s, skip_bound_arg)
 
 
-def _signature_internal(obj, follow_wrapper_chains=True, skip_bound_arg=True):
+def _signature_from_callable(obj, *,
+                             follow_wrapper_chains=True,
+                             skip_bound_arg=True,
+                             sigcls):
+
+    """Private helper function to get signature for arbitrary
+    callable objects.
+    """
 
     if not callable(obj):
         raise TypeError('{!r} is not a callable object'.format(obj))
@@ -1894,9 +1918,12 @@
     if isinstance(obj, types.MethodType):
         # In this case we skip the first parameter of the underlying
         # function (usually `self` or `cls`).
-        sig = _signature_internal(obj.__func__,
-                                  follow_wrapper_chains,
-                                  skip_bound_arg)
+        sig = _signature_from_callable(
+            obj.__func__,
+            follow_wrapper_chains=follow_wrapper_chains,
+            skip_bound_arg=skip_bound_arg,
+            sigcls=sigcls)
+
         if skip_bound_arg:
             return _signature_bound_method(sig)
         else:
@@ -1927,9 +1954,12 @@
             # (usually `self`, or `cls`) will not be passed
             # automatically (as for boundmethods)
 
-            wrapped_sig = _signature_internal(partialmethod.func,
-                                              follow_wrapper_chains,
-                                              skip_bound_arg)
+            wrapped_sig = _signature_from_callable(
+                partialmethod.func,
+                follow_wrapper_chains=follow_wrapper_chains,
+                skip_bound_arg=skip_bound_arg,
+                sigcls=sigcls)
+
             sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))
 
             first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]
@@ -1940,16 +1970,18 @@
     if isfunction(obj) or _signature_is_functionlike(obj):
         # If it's a pure Python function, or an object that is duck type
         # of a Python function (Cython functions, for instance), then:
-        return Signature.from_function(obj)
+        return sigcls.from_function(obj)
 
     if _signature_is_builtin(obj):
-        return _signature_from_builtin(Signature, obj,
+        return _signature_from_builtin(sigcls, obj,
                                        skip_bound_arg=skip_bound_arg)
 
     if isinstance(obj, functools.partial):
-        wrapped_sig = _signature_internal(obj.func,
-                                          follow_wrapper_chains,
-                                          skip_bound_arg)
+        wrapped_sig = _signature_from_callable(
+            obj.func,
+            follow_wrapper_chains=follow_wrapper_chains,
+            skip_bound_arg=skip_bound_arg,
+            sigcls=sigcls)
         return _signature_get_partial(wrapped_sig, obj)
 
     sig = None
@@ -1960,23 +1992,29 @@
         # in its metaclass
         call = _signature_get_user_defined_method(type(obj), '__call__')
         if call is not None:
-            sig = _signature_internal(call,
-                                      follow_wrapper_chains,
-                                      skip_bound_arg)
+            sig = _signature_from_callable(
+                call,
+                follow_wrapper_chains=follow_wrapper_chains,
+                skip_bound_arg=skip_bound_arg,
+                sigcls=sigcls)
         else:
             # Now we check if the 'obj' class has a '__new__' method
             new = _signature_get_user_defined_method(obj, '__new__')
             if new is not None:
-                sig = _signature_internal(new,
-                                          follow_wrapper_chains,
-                                          skip_bound_arg)
+                sig = _signature_from_callable(
+                    new,
+                    follow_wrapper_chains=follow_wrapper_chains,
+                    skip_bound_arg=skip_bound_arg,
+                    sigcls=sigcls)
             else:
                 # Finally, we should have at least __init__ implemented
                 init = _signature_get_user_defined_method(obj, '__init__')
                 if init is not None:
-                    sig = _signature_internal(init,
-                                              follow_wrapper_chains,
-                                              skip_bound_arg)
+                    sig = _signature_from_callable(
+                        init,
+                        follow_wrapper_chains=follow_wrapper_chains,
+                        skip_bound_arg=skip_bound_arg,
+                        sigcls=sigcls)
 
         if sig is None:
             # At this point we know, that `obj` is a class, with no user-
@@ -1998,7 +2036,7 @@
                     if text_sig:
                         # If 'obj' class has a __text_signature__ attribute:
                         # return a signature based on it
-                        return _signature_fromstr(Signature, obj, text_sig)
+                        return _signature_fromstr(sigcls, obj, text_sig)
 
             # No '__text_signature__' was found for the 'obj' class.
             # Last option is to check if its '__init__' is
@@ -2018,9 +2056,11 @@
         call = _signature_get_user_defined_method(type(obj), '__call__')
         if call is not None:
             try:
-                sig = _signature_internal(call,
-                                          follow_wrapper_chains,
-                                          skip_bound_arg)
+                sig = _signature_from_callable(
+                    call,
+                    follow_wrapper_chains=follow_wrapper_chains,
+                    skip_bound_arg=skip_bound_arg,
+                    sigcls=sigcls)
             except ValueError as ex:
                 msg = 'no signature found for {!r}'.format(obj)
                 raise ValueError(msg) from ex
@@ -2040,41 +2080,35 @@
 
     raise ValueError('callable {!r} is not supported by signature'.format(obj))
 
-def signature(obj):
-    '''Get a signature object for the passed callable.'''
-    return _signature_internal(obj)
-
 
 class _void:
-    '''A private marker - used in Parameter & Signature'''
+    """A private marker - used in Parameter & Signature."""
 
 
 class _empty:
-    pass
+    """Marker object for Signature.empty and Parameter.empty."""
 
 
-class _ParameterKind(int):
-    def __new__(self, *args, name):
-        obj = int.__new__(self, *args)
-        obj._name = name
-        return obj
+class _ParameterKind(enum.IntEnum):
+    POSITIONAL_ONLY = 0
+    POSITIONAL_OR_KEYWORD = 1
+    VAR_POSITIONAL = 2
+    KEYWORD_ONLY = 3
+    VAR_KEYWORD = 4
 
     def __str__(self):
-        return self._name
-
-    def __repr__(self):
-        return '<_ParameterKind: {!r}>'.format(self._name)
+        return self._name_
 
 
-_POSITIONAL_ONLY        = _ParameterKind(0, name='POSITIONAL_ONLY')
-_POSITIONAL_OR_KEYWORD  = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
-_VAR_POSITIONAL         = _ParameterKind(2, name='VAR_POSITIONAL')
-_KEYWORD_ONLY           = _ParameterKind(3, name='KEYWORD_ONLY')
-_VAR_KEYWORD            = _ParameterKind(4, name='VAR_KEYWORD')
+_POSITIONAL_ONLY         = _ParameterKind.POSITIONAL_ONLY
+_POSITIONAL_OR_KEYWORD   = _ParameterKind.POSITIONAL_OR_KEYWORD
+_VAR_POSITIONAL          = _ParameterKind.VAR_POSITIONAL
+_KEYWORD_ONLY            = _ParameterKind.KEYWORD_ONLY
+_VAR_KEYWORD             = _ParameterKind.VAR_KEYWORD
 
 
 class Parameter:
-    '''Represents a parameter in a function signature.
+    """Represents a parameter in a function signature.
 
     Has the following public attributes:
 
@@ -2093,7 +2127,7 @@
         Possible values: `Parameter.POSITIONAL_ONLY`,
         `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
         `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
-    '''
+    """
 
     __slots__ = ('_name', '_kind', '_default', '_annotation')
 
@@ -2130,6 +2164,16 @@
 
         self._name = name
 
+    def __reduce__(self):
+        return (type(self),
+                (self._name, self._kind),
+                {'_default': self._default,
+                 '_annotation': self._annotation})
+
+    def __setstate__(self, state):
+        self._default = state['_default']
+        self._annotation = state['_annotation']
+
     @property
     def name(self):
         return self._name
@@ -2148,7 +2192,7 @@
 
     def replace(self, *, name=_void, kind=_void,
                 annotation=_void, default=_void):
-        '''Creates a customized copy of the Parameter.'''
+        """Creates a customized copy of the Parameter."""
 
         if name is _void:
             name = self._name
@@ -2184,8 +2228,18 @@
         return formatted
 
     def __repr__(self):
-        return '<{} at {:#x} {!r}>'.format(self.__class__.__name__,
-                                           id(self), self.name)
+        return '<{} at {:#x} "{}">'.format(self.__class__.__name__,
+                                           id(self), self)
+
+    def __hash__(self):
+        hash_tuple = (self.name, int(self.kind))
+
+        if self._annotation is not _empty:
+            hash_tuple += (self._annotation,)
+        if self._default is not _empty:
+            hash_tuple += (self._default,)
+
+        return hash(hash_tuple)
 
     def __eq__(self, other):
         return (issubclass(other.__class__, Parameter) and
@@ -2199,7 +2253,7 @@
 
 
 class BoundArguments:
-    '''Result of `Signature.bind` call.  Holds the mapping of arguments
+    """Result of `Signature.bind` call.  Holds the mapping of arguments
     to the function's parameters.
 
     Has the following public attributes:
@@ -2213,7 +2267,7 @@
         Tuple of positional arguments values.
     * kwargs : dict
         Dict of keyword arguments values.
-    '''
+    """
 
     def __init__(self, signature, arguments):
         self.arguments = arguments
@@ -2286,7 +2340,7 @@
 
 
 class Signature:
-    '''A Signature object represents the overall signature of a function.
+    """A Signature object represents the overall signature of a function.
     It stores a Parameter object for each parameter accepted by the
     function, as well as information specific to the function itself.
 
@@ -2306,7 +2360,7 @@
     * bind_partial(*args, **kwargs) -> BoundArguments
         Creates a partial mapping from positional and keyword arguments
         to parameters (simulating 'functools.partial' behavior.)
-    '''
+    """
 
     __slots__ = ('_return_annotation', '_parameters')
 
@@ -2317,9 +2371,9 @@
 
     def __init__(self, parameters=None, *, return_annotation=_empty,
                  __validate_parameters__=True):
-        '''Constructs Signature from the given list of Parameter
+        """Constructs Signature from the given list of Parameter
         objects and 'return_annotation'.  All arguments are optional.
-        '''
+        """
 
         if parameters is None:
             params = OrderedDict()
@@ -2368,7 +2422,7 @@
 
     @classmethod
     def from_function(cls, func):
-        '''Constructs Signature for the given python function'''
+        """Constructs Signature for the given python function."""
 
         is_duck_function = False
         if not isfunction(func):
@@ -2449,8 +2503,14 @@
 
     @classmethod
     def from_builtin(cls, func):
+        """Constructs Signature for the given builtin function."""
         return _signature_from_builtin(cls, func)
 
+    @classmethod
+    def from_callable(cls, obj):
+        """Constructs Signature for the given callable object."""
+        return _signature_from_callable(obj, sigcls=cls)
+
     @property
     def parameters(self):
         return self._parameters
@@ -2460,10 +2520,10 @@
         return self._return_annotation
 
     def replace(self, *, parameters=_void, return_annotation=_void):
-        '''Creates a customized copy of the Signature.
+        """Creates a customized copy of the Signature.
         Pass 'parameters' and/or 'return_annotation' arguments
         to override them in the new copy.
-        '''
+        """
 
         if parameters is _void:
             parameters = self.parameters.values()
@@ -2474,6 +2534,12 @@
         return type(self)(parameters,
                           return_annotation=return_annotation)
 
+    def __hash__(self):
+        hash_tuple = tuple(self.parameters.values())
+        if self._return_annotation is not _empty:
+            hash_tuple += (self._return_annotation,)
+        return hash(hash_tuple)
+
     def __eq__(self, other):
         if (not issubclass(type(other), Signature) or
                     self.return_annotation != other.return_annotation or
@@ -2508,7 +2574,7 @@
         return not self.__eq__(other)
 
     def _bind(self, args, kwargs, *, partial=False):
-        '''Private method.  Don't use directly.'''
+        """Private method. Don't use directly."""
 
         arguments = OrderedDict()
 
@@ -2635,19 +2701,31 @@
         return self._bound_arguments_cls(self, arguments)
 
     def bind(*args, **kwargs):
-        '''Get a BoundArguments object, that maps the passed `args`
+        """Get a BoundArguments object, that maps the passed `args`
         and `kwargs` to the function's signature.  Raises `TypeError`
         if the passed arguments can not be bound.
-        '''
+        """
         return args[0]._bind(args[1:], kwargs)
 
     def bind_partial(*args, **kwargs):
-        '''Get a BoundArguments object, that partially maps the
+        """Get a BoundArguments object, that partially maps the
         passed `args` and `kwargs` to the function's signature.
         Raises `TypeError` if the passed arguments can not be bound.
-        '''
+        """
         return args[0]._bind(args[1:], kwargs, partial=True)
 
+    def __reduce__(self):
+        return (type(self),
+                (tuple(self._parameters.values()),),
+                {'_return_annotation': self._return_annotation})
+
+    def __setstate__(self, state):
+        self._return_annotation = state['_return_annotation']
+
+    def __repr__(self):
+        return '<{} at {:#x} "{}">'.format(self.__class__.__name__,
+                                           id(self), self)
+
     def __str__(self):
         result = []
         render_pos_only_separator = False
@@ -2693,6 +2771,12 @@
 
         return rendered
 
+
+def signature(obj):
+    """Get a signature object for the passed callable."""
+    return Signature.from_callable(obj)
+
+
 def _main():
     """ Logic for inspecting an object given at command line """
     import argparse
diff --git a/Lib/ipaddress.py b/Lib/ipaddress.py
index 54df39a..bf2de2d 100644
--- a/Lib/ipaddress.py
+++ b/Lib/ipaddress.py
@@ -195,11 +195,7 @@
     """
     if number == 0:
         return bits
-    for i in range(bits):
-        if (number >> i) & 1:
-            return i
-    # All bits of interest were zero, even if there are more in the number
-    return bits
+    return min(bits, (~number & (number-1)).bit_length())
 
 
 def summarize_address_range(first, last):
@@ -250,15 +246,14 @@
     while first_int <= last_int:
         nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
                     (last_int - first_int + 1).bit_length() - 1)
-        net = ip('%s/%d' % (first, ip_bits - nbits))
+        net = ip((first_int, ip_bits - nbits))
         yield net
         first_int += 1 << nbits
         if first_int - 1 == ip._ALL_ONES:
             break
-        first = first.__class__(first_int)
 
 
-def _collapse_addresses_recursive(addresses):
+def _collapse_addresses_internal(addresses):
     """Loops through the addresses, collapsing concurrent netblocks.
 
     Example:
@@ -268,7 +263,7 @@
         ip3 = IPv4Network('192.0.2.128/26')
         ip4 = IPv4Network('192.0.2.192/26')
 
-        _collapse_addresses_recursive([ip1, ip2, ip3, ip4]) ->
+        _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
           [IPv4Network('192.0.2.0/24')]
 
         This shouldn't be called directly; it is called via
@@ -282,28 +277,29 @@
         passed.
 
     """
-    while True:
-        last_addr = None
-        ret_array = []
-        optimized = False
-
-        for cur_addr in addresses:
-            if not ret_array:
-                last_addr = cur_addr
-                ret_array.append(cur_addr)
-            elif (cur_addr.network_address >= last_addr.network_address and
-                cur_addr.broadcast_address <= last_addr.broadcast_address):
-                optimized = True
-            elif cur_addr == list(last_addr.supernet().subnets())[1]:
-                ret_array[-1] = last_addr = last_addr.supernet()
-                optimized = True
-            else:
-                last_addr = cur_addr
-                ret_array.append(cur_addr)
-
-        addresses = ret_array
-        if not optimized:
-            return addresses
+    # First merge
+    to_merge = list(addresses)
+    subnets = {}
+    while to_merge:
+        net = to_merge.pop()
+        supernet = net.supernet()
+        existing = subnets.get(supernet)
+        if existing is None:
+            subnets[supernet] = net
+        elif existing != net:
+            # Merge consecutive subnets
+            del subnets[supernet]
+            to_merge.append(supernet)
+    # Then iterate over resulting networks, skipping subsumed subnets
+    last = None
+    for net in sorted(subnets.values()):
+        if last is not None:
+            # Since they are sorted, last.network_address <= net.network_address
+            # is a given.
+            if last.broadcast_address >= net.broadcast_address:
+                continue
+        yield net
+        last = net
 
 
 def collapse_addresses(addresses):
@@ -352,15 +348,13 @@
 
     # sort and dedup
     ips = sorted(set(ips))
-    nets = sorted(set(nets))
 
     while i < len(ips):
         (first, last) = _find_address_range(ips[i:])
         i = ips.index(last) + 1
         addrs.extend(summarize_address_range(first, last))
 
-    return iter(_collapse_addresses_recursive(sorted(
-        addrs + nets, key=_BaseNetwork._get_networks_key)))
+    return _collapse_addresses_internal(addrs + nets)
 
 
 def get_mixed_type_key(obj):
@@ -436,6 +430,17 @@
         return str(self)
 
     @property
+    def reverse_pointer(self):
+        """The name of the reverse DNS pointer for the IP address, e.g.:
+            >>> ipaddress.ip_address("127.0.0.1").reverse_pointer
+            '1.0.0.127.in-addr.arpa'
+            >>> ipaddress.ip_address("2001:db8::1").reverse_pointer
+            '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
+
+        """
+        return self._reverse_pointer()
+
+    @property
     def version(self):
         msg = '%200s has no version specified' % (type(self),)
         raise NotImplementedError(msg)
@@ -456,7 +461,8 @@
             raise AddressValueError(msg % (address, address_len,
                                            expected_len, self._version))
 
-    def _ip_int_from_prefix(self, prefixlen):
+    @classmethod
+    def _ip_int_from_prefix(cls, prefixlen):
         """Turn the prefix length into a bitwise netmask
 
         Args:
@@ -466,9 +472,10 @@
             An integer.
 
         """
-        return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
+        return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
 
-    def _prefix_from_ip_int(self, ip_int):
+    @classmethod
+    def _prefix_from_ip_int(cls, ip_int):
         """Return prefix length from the bitwise netmask.
 
         Args:
@@ -481,22 +488,24 @@
             ValueError: If the input intermingles zeroes & ones
         """
         trailing_zeroes = _count_righthand_zero_bits(ip_int,
-                                                     self._max_prefixlen)
-        prefixlen = self._max_prefixlen - trailing_zeroes
+                                                     cls._max_prefixlen)
+        prefixlen = cls._max_prefixlen - trailing_zeroes
         leading_ones = ip_int >> trailing_zeroes
         all_ones = (1 << prefixlen) - 1
         if leading_ones != all_ones:
-            byteslen = self._max_prefixlen // 8
+            byteslen = cls._max_prefixlen // 8
             details = ip_int.to_bytes(byteslen, 'big')
             msg = 'Netmask pattern %r mixes zeroes & ones'
             raise ValueError(msg % details)
         return prefixlen
 
-    def _report_invalid_netmask(self, netmask_str):
+    @classmethod
+    def _report_invalid_netmask(cls, netmask_str):
         msg = '%r is not a valid netmask' % netmask_str
         raise NetmaskValueError(msg) from None
 
-    def _prefix_from_prefix_string(self, prefixlen_str):
+    @classmethod
+    def _prefix_from_prefix_string(cls, prefixlen_str):
         """Return prefix length from a numeric string
 
         Args:
@@ -511,16 +520,17 @@
         # int allows a leading +/- as well as surrounding whitespace,
         # so we ensure that isn't the case
         if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
-            self._report_invalid_netmask(prefixlen_str)
+            cls._report_invalid_netmask(prefixlen_str)
         try:
             prefixlen = int(prefixlen_str)
         except ValueError:
-            self._report_invalid_netmask(prefixlen_str)
-        if not (0 <= prefixlen <= self._max_prefixlen):
-            self._report_invalid_netmask(prefixlen_str)
+            cls._report_invalid_netmask(prefixlen_str)
+        if not (0 <= prefixlen <= cls._max_prefixlen):
+            cls._report_invalid_netmask(prefixlen_str)
         return prefixlen
 
-    def _prefix_from_ip_string(self, ip_str):
+    @classmethod
+    def _prefix_from_ip_string(cls, ip_str):
         """Turn a netmask/hostmask string into a prefix length
 
         Args:
@@ -534,24 +544,24 @@
         """
         # Parse the netmask/hostmask like an IP address.
         try:
-            ip_int = self._ip_int_from_string(ip_str)
+            ip_int = cls._ip_int_from_string(ip_str)
         except AddressValueError:
-            self._report_invalid_netmask(ip_str)
+            cls._report_invalid_netmask(ip_str)
 
         # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
         # Note that the two ambiguous cases (all-ones and all-zeroes) are
         # treated as netmasks.
         try:
-            return self._prefix_from_ip_int(ip_int)
+            return cls._prefix_from_ip_int(ip_int)
         except ValueError:
             pass
 
         # Invert the bits, and try matching a /0+1+/ hostmask instead.
-        ip_int ^= self._ALL_ONES
+        ip_int ^= cls._ALL_ONES
         try:
-            return self._prefix_from_ip_int(ip_int)
+            return cls._prefix_from_ip_int(ip_int)
         except ValueError:
-            self._report_invalid_netmask(ip_str)
+            cls._report_invalid_netmask(ip_str)
 
 
 class _BaseAddress(_IPAddressBase):
@@ -933,20 +943,11 @@
                 'prefix length diff %d is invalid for netblock %s' % (
                     new_prefixlen, self))
 
-        first = self.__class__('%s/%s' %
-                                 (self.network_address,
-                                  self._prefixlen + prefixlen_diff))
-
-        yield first
-        current = first
-        while True:
-            broadcast = current.broadcast_address
-            if broadcast == self.broadcast_address:
-                return
-            new_addr = self._address_class(int(broadcast) + 1)
-            current = self.__class__('%s/%s' % (new_addr,
-                                                new_prefixlen))
-
+        start = int(self.network_address)
+        end = int(self.broadcast_address)
+        step = (int(self.hostmask) + 1) >> prefixlen_diff
+        for new_addr in range(start, end, step):
+            current = self.__class__((new_addr, new_prefixlen))
             yield current
 
     def supernet(self, prefixlen_diff=1, new_prefix=None):
@@ -980,15 +981,15 @@
                 raise ValueError('cannot set prefixlen_diff and new_prefix')
             prefixlen_diff = self._prefixlen - new_prefix
 
-        if self.prefixlen - prefixlen_diff < 0:
+        new_prefixlen = self.prefixlen - prefixlen_diff
+        if new_prefixlen < 0:
             raise ValueError(
                 'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
                 (self.prefixlen, prefixlen_diff))
-        # TODO (pmoody): optimize this.
-        t = self.__class__('%s/%d' % (self.network_address,
-                                      self.prefixlen - prefixlen_diff),
-                                     strict=False)
-        return t.__class__('%s/%d' % (t.network_address, t.prefixlen))
+        return self.__class__((
+            int(self.network_address) & (int(self.netmask) << prefixlen_diff),
+            new_prefixlen
+            ))
 
     @property
     def is_multicast(self):
@@ -1089,14 +1090,43 @@
     # the valid octets for host and netmasks. only useful for IPv4.
     _valid_mask_octets = frozenset((255, 254, 252, 248, 240, 224, 192, 128, 0))
 
+    _max_prefixlen = IPV4LENGTH
+    # There are only a handful of valid v4 netmasks, so we cache them all
+    # when constructed (see _make_netmask()).
+    _netmask_cache = {}
+
     def __init__(self, address):
         self._version = 4
-        self._max_prefixlen = IPV4LENGTH
 
     def _explode_shorthand_ip_string(self):
         return str(self)
 
-    def _ip_int_from_string(self, ip_str):
+    @classmethod
+    def _make_netmask(cls, arg):
+        """Make a (netmask, prefix_len) tuple from the given argument.
+
+        Argument can be:
+        - an integer (the prefix length)
+        - a string representing the prefix length (e.g. "24")
+        - a string representing the prefix netmask (e.g. "255.255.255.0")
+        """
+        if arg not in cls._netmask_cache:
+            if isinstance(arg, int):
+                prefixlen = arg
+            else:
+                try:
+                    # Check for a netmask in prefix length form
+                    prefixlen = cls._prefix_from_prefix_string(arg)
+                except NetmaskValueError:
+                    # Check for a netmask or hostmask in dotted-quad form.
+                    # This may raise NetmaskValueError.
+                    prefixlen = cls._prefix_from_ip_string(arg)
+            netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
+            cls._netmask_cache[arg] = netmask, prefixlen
+        return cls._netmask_cache[arg]
+
+    @classmethod
+    def _ip_int_from_string(cls, ip_str):
         """Turn the given IP string into an integer for comparison.
 
         Args:
@@ -1117,11 +1147,12 @@
             raise AddressValueError("Expected 4 octets in %r" % ip_str)
 
         try:
-            return int.from_bytes(map(self._parse_octet, octets), 'big')
+            return int.from_bytes(map(cls._parse_octet, octets), 'big')
         except ValueError as exc:
             raise AddressValueError("%s in %r" % (exc, ip_str)) from None
 
-    def _parse_octet(self, octet_str):
+    @classmethod
+    def _parse_octet(cls, octet_str):
         """Convert a decimal octet into an integer.
 
         Args:
@@ -1137,7 +1168,7 @@
         if not octet_str:
             raise ValueError("Empty octet not permitted")
         # Whitelist the characters, since int() allows a lot of bizarre stuff.
-        if not self._DECIMAL_DIGITS.issuperset(octet_str):
+        if not cls._DECIMAL_DIGITS.issuperset(octet_str):
             msg = "Only decimal digits permitted in %r"
             raise ValueError(msg % octet_str)
         # We do the length check second, since the invalid character error
@@ -1157,7 +1188,8 @@
             raise ValueError("Octet %d (> 255) not permitted" % octet_int)
         return octet_int
 
-    def _string_from_ip_int(self, ip_int):
+    @classmethod
+    def _string_from_ip_int(cls, ip_int):
         """Turns a 32-bit integer into dotted decimal notation.
 
         Args:
@@ -1221,6 +1253,15 @@
             return True
         return False
 
+    def _reverse_pointer(self):
+        """Return the reverse DNS pointer name for the IPv4 address.
+
+        This implements the method described in RFC1035 3.5.
+
+        """
+        reverse_octets = str(self).split('.')[::-1]
+        return '.'.join(reverse_octets) + '.in-addr.arpa'
+
     @property
     def max_prefixlen(self):
         return self._max_prefixlen
@@ -1284,8 +1325,7 @@
              reserved IPv4 Network range.
 
         """
-        reserved_network = IPv4Network('240.0.0.0/4')
-        return self in reserved_network
+        return self in self._constants._reserved_network
 
     @property
     @functools.lru_cache()
@@ -1297,21 +1337,7 @@
             iana-ipv4-special-registry.
 
         """
-        return (self in IPv4Network('0.0.0.0/8') or
-                self in IPv4Network('10.0.0.0/8') or
-                self in IPv4Network('127.0.0.0/8') or
-                self in IPv4Network('169.254.0.0/16') or
-                self in IPv4Network('172.16.0.0/12') or
-                self in IPv4Network('192.0.0.0/29') or
-                self in IPv4Network('192.0.0.170/31') or
-                self in IPv4Network('192.0.2.0/24') or
-                self in IPv4Network('192.168.0.0/16') or
-                self in IPv4Network('198.18.0.0/15') or
-                self in IPv4Network('198.51.100.0/24') or
-                self in IPv4Network('203.0.113.0/24') or
-                self in IPv4Network('240.0.0.0/4') or
-                self in IPv4Network('255.255.255.255/32'))
-
+        return any(self in net for net in self._constants._private_networks)
 
     @property
     def is_multicast(self):
@@ -1322,8 +1348,7 @@
             See RFC 3171 for details.
 
         """
-        multicast_network = IPv4Network('224.0.0.0/4')
-        return self in multicast_network
+        return self in self._constants._multicast_network
 
     @property
     def is_unspecified(self):
@@ -1334,8 +1359,7 @@
             RFC 5735 3.
 
         """
-        unspecified_address = IPv4Address('0.0.0.0')
-        return self == unspecified_address
+        return self == self._constants._unspecified_address
 
     @property
     def is_loopback(self):
@@ -1345,8 +1369,7 @@
             A boolean, True if the address is a loopback per RFC 3330.
 
         """
-        loopback_network = IPv4Network('127.0.0.0/8')
-        return self in loopback_network
+        return self in self._constants._loopback_network
 
     @property
     def is_link_local(self):
@@ -1356,8 +1379,7 @@
             A boolean, True if the address is link-local per RFC 3927.
 
         """
-        linklocal_network = IPv4Network('169.254.0.0/16')
-        return self in linklocal_network
+        return self in self._constants._linklocal_network
 
 
 class IPv4Interface(IPv4Address):
@@ -1369,6 +1391,18 @@
             self._prefixlen = self._max_prefixlen
             return
 
+        if isinstance(address, tuple):
+            IPv4Address.__init__(self, address[0])
+            if len(address) > 1:
+                self._prefixlen = int(address[1])
+            else:
+                self._prefixlen = self._max_prefixlen
+
+            self.network = IPv4Network(address, strict=False)
+            self.netmask = self.network.netmask
+            self.hostmask = self.network.hostmask
+            return
+
         addr = _split_optional_netmask(address)
         IPv4Address.__init__(self, addr[0])
 
@@ -1484,20 +1518,28 @@
         _BaseV4.__init__(self, address)
         _BaseNetwork.__init__(self, address)
 
-        # Constructing from a packed address
-        if isinstance(address, bytes):
+        # Constructing from a packed address or integer
+        if isinstance(address, (int, bytes)):
             self.network_address = IPv4Address(address)
-            self._prefixlen = self._max_prefixlen
-            self.netmask = IPv4Address(self._ALL_ONES)
-            #fixme: address/network test here
+            self.netmask, self._prefixlen = self._make_netmask(self._max_prefixlen)
+            #fixme: address/network test here.
             return
 
-        # Efficient constructor from integer.
-        if isinstance(address, int):
-            self.network_address = IPv4Address(address)
-            self._prefixlen = self._max_prefixlen
-            self.netmask = IPv4Address(self._ALL_ONES)
-            #fixme: address/network test here.
+        if isinstance(address, tuple):
+            if len(address) > 1:
+                arg = address[1]
+            else:
+                # We weren't given an address[1]
+                arg = self._max_prefixlen
+            self.network_address = IPv4Address(address[0])
+            self.netmask, self._prefixlen = self._make_netmask(arg)
+            packed = int(self.network_address)
+            if packed & int(self.netmask) != packed:
+                if strict:
+                    raise ValueError('%s has host bits set' % self)
+                else:
+                    self.network_address = IPv4Address(packed &
+                                                       int(self.netmask))
             return
 
         # Assume input argument to be string or any object representation
@@ -1506,16 +1548,10 @@
         self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
 
         if len(addr) == 2:
-            try:
-                # Check for a netmask in prefix length form
-                self._prefixlen = self._prefix_from_prefix_string(addr[1])
-            except NetmaskValueError:
-                # Check for a netmask or hostmask in dotted-quad form.
-                # This may raise NetmaskValueError.
-                self._prefixlen = self._prefix_from_ip_string(addr[1])
+            arg = addr[1]
         else:
-            self._prefixlen = self._max_prefixlen
-        self.netmask = IPv4Address(self._ip_int_from_prefix(self._prefixlen))
+            arg = self._max_prefixlen
+        self.netmask, self._prefixlen = self._make_netmask(arg)
 
         if strict:
             if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
@@ -1542,6 +1578,37 @@
                 not self.is_private)
 
 
+class _IPv4Constants:
+    _linklocal_network = IPv4Network('169.254.0.0/16')
+
+    _loopback_network = IPv4Network('127.0.0.0/8')
+
+    _multicast_network = IPv4Network('224.0.0.0/4')
+
+    _private_networks = [
+        IPv4Network('0.0.0.0/8'),
+        IPv4Network('10.0.0.0/8'),
+        IPv4Network('127.0.0.0/8'),
+        IPv4Network('169.254.0.0/16'),
+        IPv4Network('172.16.0.0/12'),
+        IPv4Network('192.0.0.0/29'),
+        IPv4Network('192.0.0.170/31'),
+        IPv4Network('192.0.2.0/24'),
+        IPv4Network('192.168.0.0/16'),
+        IPv4Network('198.18.0.0/15'),
+        IPv4Network('198.51.100.0/24'),
+        IPv4Network('203.0.113.0/24'),
+        IPv4Network('240.0.0.0/4'),
+        IPv4Network('255.255.255.255/32'),
+        ]
+
+    _reserved_network = IPv4Network('240.0.0.0/4')
+
+    _unspecified_address = IPv4Address('0.0.0.0')
+
+
+IPv4Address._constants = _IPv4Constants
+
 
 class _BaseV6:
 
@@ -1555,12 +1622,35 @@
     _ALL_ONES = (2**IPV6LENGTH) - 1
     _HEXTET_COUNT = 8
     _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
+    _max_prefixlen = IPV6LENGTH
+
+    # There are only a bunch of valid v6 netmasks, so we cache them all
+    # when constructed (see _make_netmask()).
+    _netmask_cache = {}
 
     def __init__(self, address):
         self._version = 6
-        self._max_prefixlen = IPV6LENGTH
 
-    def _ip_int_from_string(self, ip_str):
+    @classmethod
+    def _make_netmask(cls, arg):
+        """Make a (netmask, prefix_len) tuple from the given argument.
+
+        Argument can be:
+        - an integer (the prefix length)
+        - a string representing the prefix length (e.g. "24")
+        - a string representing the prefix netmask (e.g. "255.255.255.0")
+        """
+        if arg not in cls._netmask_cache:
+            if isinstance(arg, int):
+                prefixlen = arg
+            else:
+                prefixlen = cls._prefix_from_prefix_string(arg)
+            netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
+            cls._netmask_cache[arg] = netmask, prefixlen
+        return cls._netmask_cache[arg]
+
+    @classmethod
+    def _ip_int_from_string(cls, ip_str):
         """Turn an IPv6 ip_str into an integer.
 
         Args:
@@ -1596,7 +1686,7 @@
         # An IPv6 address can't have more than 8 colons (9 parts).
         # The extra colon comes from using the "::" notation for a single
         # leading or trailing zero part.
-        _max_parts = self._HEXTET_COUNT + 1
+        _max_parts = cls._HEXTET_COUNT + 1
         if len(parts) > _max_parts:
             msg = "At most %d colons permitted in %r" % (_max_parts-1, ip_str)
             raise AddressValueError(msg)
@@ -1628,17 +1718,17 @@
                 if parts_lo:
                     msg = "Trailing ':' only permitted as part of '::' in %r"
                     raise AddressValueError(msg % ip_str)  # :$ requires ::$
-            parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
+            parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
             if parts_skipped < 1:
                 msg = "Expected at most %d other parts with '::' in %r"
-                raise AddressValueError(msg % (self._HEXTET_COUNT-1, ip_str))
+                raise AddressValueError(msg % (cls._HEXTET_COUNT-1, ip_str))
         else:
             # Otherwise, allocate the entire address to parts_hi.  The
             # endpoints could still be empty, but _parse_hextet() will check
             # for that.
-            if len(parts) != self._HEXTET_COUNT:
+            if len(parts) != cls._HEXTET_COUNT:
                 msg = "Exactly %d parts expected without '::' in %r"
-                raise AddressValueError(msg % (self._HEXTET_COUNT, ip_str))
+                raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
             if not parts[0]:
                 msg = "Leading ':' only permitted as part of '::' in %r"
                 raise AddressValueError(msg % ip_str)  # ^: requires ^::
@@ -1654,16 +1744,17 @@
             ip_int = 0
             for i in range(parts_hi):
                 ip_int <<= 16
-                ip_int |= self._parse_hextet(parts[i])
+                ip_int |= cls._parse_hextet(parts[i])
             ip_int <<= 16 * parts_skipped
             for i in range(-parts_lo, 0):
                 ip_int <<= 16
-                ip_int |= self._parse_hextet(parts[i])
+                ip_int |= cls._parse_hextet(parts[i])
             return ip_int
         except ValueError as exc:
             raise AddressValueError("%s in %r" % (exc, ip_str)) from None
 
-    def _parse_hextet(self, hextet_str):
+    @classmethod
+    def _parse_hextet(cls, hextet_str):
         """Convert an IPv6 hextet string into an integer.
 
         Args:
@@ -1678,7 +1769,7 @@
 
         """
         # Whitelist the characters, since int() allows a lot of bizarre stuff.
-        if not self._HEX_DIGITS.issuperset(hextet_str):
+        if not cls._HEX_DIGITS.issuperset(hextet_str):
             raise ValueError("Only hex digits permitted in %r" % hextet_str)
         # We do the length check second, since the invalid character error
         # is likely to be more informative for the user
@@ -1688,7 +1779,8 @@
         # Length check means we can skip checking the integer value
         return int(hextet_str, 16)
 
-    def _compress_hextets(self, hextets):
+    @classmethod
+    def _compress_hextets(cls, hextets):
         """Compresses a list of hextets.
 
         Compresses a list of strings, replacing the longest continuous
@@ -1735,7 +1827,8 @@
 
         return hextets
 
-    def _string_from_ip_int(self, ip_int=None):
+    @classmethod
+    def _string_from_ip_int(cls, ip_int=None):
         """Turns a 128-bit integer into hexadecimal notation.
 
         Args:
@@ -1749,15 +1842,15 @@
 
         """
         if ip_int is None:
-            ip_int = int(self._ip)
+            ip_int = int(cls._ip)
 
-        if ip_int > self._ALL_ONES:
+        if ip_int > cls._ALL_ONES:
             raise ValueError('IPv6 address is too large')
 
         hex_str = '%032x' % ip_int
         hextets = ['%x' % int(hex_str[x:x+4], 16) for x in range(0, 32, 4)]
 
-        hextets = self._compress_hextets(hextets)
+        hextets = cls._compress_hextets(hextets)
         return ':'.join(hextets)
 
     def _explode_shorthand_ip_string(self):
@@ -1784,6 +1877,15 @@
             return '%s/%d' % (':'.join(parts), self._prefixlen)
         return ':'.join(parts)
 
+    def _reverse_pointer(self):
+        """Return the reverse DNS pointer name for the IPv6 address.
+
+        This implements the method described in RFC3596 2.5.
+
+        """
+        reverse_chars = self.exploded[::-1].replace(':', '')
+        return '.'.join(reverse_chars) + '.ip6.arpa'
+
     @property
     def max_prefixlen(self):
         return self._max_prefixlen
@@ -1848,8 +1950,7 @@
             See RFC 2373 2.7 for details.
 
         """
-        multicast_network = IPv6Network('ff00::/8')
-        return self in multicast_network
+        return self in self._constants._multicast_network
 
     @property
     def is_reserved(self):
@@ -1860,16 +1961,7 @@
             reserved IPv6 Network ranges.
 
         """
-        reserved_networks = [IPv6Network('::/8'), IPv6Network('100::/8'),
-                             IPv6Network('200::/7'), IPv6Network('400::/6'),
-                             IPv6Network('800::/5'), IPv6Network('1000::/4'),
-                             IPv6Network('4000::/3'), IPv6Network('6000::/3'),
-                             IPv6Network('8000::/3'), IPv6Network('A000::/3'),
-                             IPv6Network('C000::/3'), IPv6Network('E000::/4'),
-                             IPv6Network('F000::/5'), IPv6Network('F800::/6'),
-                             IPv6Network('FE00::/9')]
-
-        return any(self in x for x in reserved_networks)
+        return any(self in x for x in self._constants._reserved_networks)
 
     @property
     def is_link_local(self):
@@ -1879,8 +1971,7 @@
             A boolean, True if the address is reserved per RFC 4291.
 
         """
-        linklocal_network = IPv6Network('fe80::/10')
-        return self in linklocal_network
+        return self in self._constants._linklocal_network
 
     @property
     def is_site_local(self):
@@ -1894,8 +1985,7 @@
             A boolean, True if the address is reserved per RFC 3513 2.5.6.
 
         """
-        sitelocal_network = IPv6Network('fec0::/10')
-        return self in sitelocal_network
+        return self in self._constants._sitelocal_network
 
     @property
     @functools.lru_cache()
@@ -1907,16 +1997,7 @@
             iana-ipv6-special-registry.
 
         """
-        return (self in IPv6Network('::1/128') or
-                self in IPv6Network('::/128') or
-                self in IPv6Network('::ffff:0:0/96') or
-                self in IPv6Network('100::/64') or
-                self in IPv6Network('2001::/23') or
-                self in IPv6Network('2001:2::/48') or
-                self in IPv6Network('2001:db8::/32') or
-                self in IPv6Network('2001:10::/28') or
-                self in IPv6Network('fc00::/7') or
-                self in IPv6Network('fe80::/10'))
+        return any(self in net for net in self._constants._private_networks)
 
     @property
     def is_global(self):
@@ -2001,6 +2082,16 @@
             self.network = IPv6Network(self._ip)
             self._prefixlen = self._max_prefixlen
             return
+        if isinstance(address, tuple):
+            IPv6Address.__init__(self, address[0])
+            if len(address) > 1:
+                self._prefixlen = int(address[1])
+            else:
+                self._prefixlen = self._max_prefixlen
+            self.network = IPv6Network(address, strict=False)
+            self.netmask = self.network.netmask
+            self.hostmask = self.network.hostmask
+            return
 
         addr = _split_optional_netmask(address)
         IPv6Address.__init__(self, addr[0])
@@ -2118,18 +2209,26 @@
         _BaseV6.__init__(self, address)
         _BaseNetwork.__init__(self, address)
 
-        # Efficient constructor from integer.
-        if isinstance(address, int):
+        # Efficient constructor from integer or packed address
+        if isinstance(address, (bytes, int)):
             self.network_address = IPv6Address(address)
-            self._prefixlen = self._max_prefixlen
-            self.netmask = IPv6Address(self._ALL_ONES)
+            self.netmask, self._prefixlen = self._make_netmask(self._max_prefixlen)
             return
 
-        # Constructing from a packed address
-        if isinstance(address, bytes):
-            self.network_address = IPv6Address(address)
-            self._prefixlen = self._max_prefixlen
-            self.netmask = IPv6Address(self._ALL_ONES)
+        if isinstance(address, tuple):
+            if len(address) > 1:
+                arg = address[1]
+            else:
+                arg = self._max_prefixlen
+            self.netmask, self._prefixlen = self._make_netmask(arg)
+            self.network_address = IPv6Address(address[0])
+            packed = int(self.network_address)
+            if packed & int(self.netmask) != packed:
+                if strict:
+                    raise ValueError('%s has host bits set' % self)
+                else:
+                    self.network_address = IPv6Address(packed &
+                                                       int(self.netmask))
             return
 
         # Assume input argument to be string or any object representation
@@ -2139,12 +2238,11 @@
         self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
 
         if len(addr) == 2:
-            # This may raise NetmaskValueError
-            self._prefixlen = self._prefix_from_prefix_string(addr[1])
+            arg = addr[1]
         else:
-            self._prefixlen = self._max_prefixlen
+            arg = self._max_prefixlen
+        self.netmask, self._prefixlen = self._make_netmask(arg)
 
-        self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
         if strict:
             if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
                 self.network_address):
@@ -2181,3 +2279,39 @@
         """
         return (self.network_address.is_site_local and
                 self.broadcast_address.is_site_local)
+
+
+class _IPv6Constants:
+
+    _linklocal_network = IPv6Network('fe80::/10')
+
+    _multicast_network = IPv6Network('ff00::/8')
+
+    _private_networks = [
+        IPv6Network('::1/128'),
+        IPv6Network('::/128'),
+        IPv6Network('::ffff:0:0/96'),
+        IPv6Network('100::/64'),
+        IPv6Network('2001::/23'),
+        IPv6Network('2001:2::/48'),
+        IPv6Network('2001:db8::/32'),
+        IPv6Network('2001:10::/28'),
+        IPv6Network('fc00::/7'),
+        IPv6Network('fe80::/10'),
+        ]
+
+    _reserved_networks = [
+        IPv6Network('::/8'), IPv6Network('100::/8'),
+        IPv6Network('200::/7'), IPv6Network('400::/6'),
+        IPv6Network('800::/5'), IPv6Network('1000::/4'),
+        IPv6Network('4000::/3'), IPv6Network('6000::/3'),
+        IPv6Network('8000::/3'), IPv6Network('A000::/3'),
+        IPv6Network('C000::/3'), IPv6Network('E000::/4'),
+        IPv6Network('F000::/5'), IPv6Network('F800::/6'),
+        IPv6Network('FE00::/9'),
+    ]
+
+    _sitelocal_network = IPv6Network('fec0::/10')
+
+
+IPv6Address._constants = _IPv6Constants
diff --git a/Lib/json/tool.py b/Lib/json/tool.py
index 7db4528..cd57e4f 100644
--- a/Lib/json/tool.py
+++ b/Lib/json/tool.py
@@ -10,21 +10,24 @@
     Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
 
 """
-import sys
+import argparse
 import json
+import sys
+
 
 def main():
-    if len(sys.argv) == 1:
-        infile = sys.stdin
-        outfile = sys.stdout
-    elif len(sys.argv) == 2:
-        infile = open(sys.argv[1], 'r')
-        outfile = sys.stdout
-    elif len(sys.argv) == 3:
-        infile = open(sys.argv[1], 'r')
-        outfile = open(sys.argv[2], 'w')
-    else:
-        raise SystemExit(sys.argv[0] + " [infile [outfile]]")
+    prog = 'python -m json.tool'
+    description = ('A simple command line interface for json module '
+                   'to validate and pretty-print JSON objects.')
+    parser = argparse.ArgumentParser(prog=prog, description=description)
+    parser.add_argument('infile', nargs='?', type=argparse.FileType(),
+                        help='a JSON file to be validated or pretty-printed')
+    parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
+                        help='write the output of infile to outfile')
+    options = parser.parse_args()
+
+    infile = options.infile or sys.stdin
+    outfile = options.outfile or sys.stdout
     with infile:
         try:
             obj = json.load(infile)
diff --git a/Lib/logging/config.py b/Lib/logging/config.py
index 895fb26..8a99923 100644
--- a/Lib/logging/config.py
+++ b/Lib/logging/config.py
@@ -116,11 +116,12 @@
         sectname = "formatter_%s" % form
         fs = cp.get(sectname, "format", raw=True, fallback=None)
         dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
+        stl = cp.get(sectname, "style", raw=True, fallback='%')
         c = logging.Formatter
         class_name = cp[sectname].get("class")
         if class_name:
             c = _resolve(class_name)
-        f = c(fs, dfs)
+        f = c(fs, dfs, stl)
         formatters[form] = f
     return formatters
 
@@ -660,7 +661,12 @@
             fmt = config.get('format', None)
             dfmt = config.get('datefmt', None)
             style = config.get('style', '%')
-            result = logging.Formatter(fmt, dfmt, style)
+            cname = config.get('class', None)
+            if not cname:
+                c = logging.Formatter
+            else:
+                c = _resolve(cname)
+            result = c(fmt, dfmt, style)
         return result
 
     def configure_filter(self, config):
diff --git a/Lib/multiprocessing/dummy/connection.py b/Lib/multiprocessing/dummy/connection.py
index 694ef96..1984375 100644
--- a/Lib/multiprocessing/dummy/connection.py
+++ b/Lib/multiprocessing/dummy/connection.py
@@ -59,9 +59,8 @@
             return True
         if timeout <= 0.0:
             return False
-        self._in.not_empty.acquire()
-        self._in.not_empty.wait(timeout)
-        self._in.not_empty.release()
+        with self._in.not_empty:
+            self._in.not_empty.wait(timeout)
         return self._in.qsize() > 0
 
     def close(self):
diff --git a/Lib/multiprocessing/heap.py b/Lib/multiprocessing/heap.py
index 344a45f..9e3016c 100644
--- a/Lib/multiprocessing/heap.py
+++ b/Lib/multiprocessing/heap.py
@@ -216,9 +216,8 @@
         assert 0 <= size < sys.maxsize
         if os.getpid() != self._lastpid:
             self.__init__()                     # reinitialize after fork
-        self._lock.acquire()
-        self._free_pending_blocks()
-        try:
+        with self._lock:
+            self._free_pending_blocks()
             size = self._roundup(max(size,1), self._alignment)
             (arena, start, stop) = self._malloc(size)
             new_stop = start + size
@@ -227,8 +226,6 @@
             block = (arena, start, new_stop)
             self._allocated_blocks.add(block)
             return block
-        finally:
-            self._lock.release()
 
 #
 # Class representing a chunk of an mmap -- can be inherited by child process
diff --git a/Lib/multiprocessing/managers.py b/Lib/multiprocessing/managers.py
index 66d46fc..820ae91 100644
--- a/Lib/multiprocessing/managers.py
+++ b/Lib/multiprocessing/managers.py
@@ -306,8 +306,7 @@
         '''
         Return some info --- useful to spot problems with refcounting
         '''
-        self.mutex.acquire()
-        try:
+        with self.mutex:
             result = []
             keys = list(self.id_to_obj.keys())
             keys.sort()
@@ -317,8 +316,6 @@
                                   (ident, self.id_to_refcount[ident],
                                    str(self.id_to_obj[ident][0])[:75]))
             return '\n'.join(result)
-        finally:
-            self.mutex.release()
 
     def number_of_objects(self, c):
         '''
@@ -343,8 +340,7 @@
         '''
         Create a new shared object and return its id
         '''
-        self.mutex.acquire()
-        try:
+        with self.mutex:
             callable, exposed, method_to_typeid, proxytype = \
                       self.registry[typeid]
 
@@ -374,8 +370,6 @@
             # has been created.
             self.incref(c, ident)
             return ident, tuple(exposed)
-        finally:
-            self.mutex.release()
 
     def get_methods(self, c, token):
         '''
@@ -392,22 +386,16 @@
         self.serve_client(c)
 
     def incref(self, c, ident):
-        self.mutex.acquire()
-        try:
+        with self.mutex:
             self.id_to_refcount[ident] += 1
-        finally:
-            self.mutex.release()
 
     def decref(self, c, ident):
-        self.mutex.acquire()
-        try:
+        with self.mutex:
             assert self.id_to_refcount[ident] >= 1
             self.id_to_refcount[ident] -= 1
             if self.id_to_refcount[ident] == 0:
                 del self.id_to_obj[ident], self.id_to_refcount[ident]
                 util.debug('disposing of obj with id %r', ident)
-        finally:
-            self.mutex.release()
 
 #
 # Class to represent state of a manager
@@ -671,14 +659,11 @@
 
     def __init__(self, token, serializer, manager=None,
                  authkey=None, exposed=None, incref=True):
-        BaseProxy._mutex.acquire()
-        try:
+        with BaseProxy._mutex:
             tls_idset = BaseProxy._address_to_local.get(token.address, None)
             if tls_idset is None:
                 tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
                 BaseProxy._address_to_local[token.address] = tls_idset
-        finally:
-            BaseProxy._mutex.release()
 
         # self._tls is used to record the connection used by this
         # thread to communicate with the manager at token.address
diff --git a/Lib/multiprocessing/pool.py b/Lib/multiprocessing/pool.py
index 8832a5c..77eb817 100644
--- a/Lib/multiprocessing/pool.py
+++ b/Lib/multiprocessing/pool.py
@@ -666,8 +666,7 @@
         return self
 
     def next(self, timeout=None):
-        self._cond.acquire()
-        try:
+        with self._cond:
             try:
                 item = self._items.popleft()
             except IndexError:
@@ -680,8 +679,6 @@
                     if self._index == self._length:
                         raise StopIteration
                     raise TimeoutError
-        finally:
-            self._cond.release()
 
         success, value = item
         if success:
@@ -691,8 +688,7 @@
     __next__ = next                    # XXX
 
     def _set(self, i, obj):
-        self._cond.acquire()
-        try:
+        with self._cond:
             if self._index == i:
                 self._items.append(obj)
                 self._index += 1
@@ -706,18 +702,13 @@
 
             if self._index == self._length:
                 del self._cache[self._job]
-        finally:
-            self._cond.release()
 
     def _set_length(self, length):
-        self._cond.acquire()
-        try:
+        with self._cond:
             self._length = length
             if self._index == self._length:
                 self._cond.notify()
                 del self._cache[self._job]
-        finally:
-            self._cond.release()
 
 #
 # Class whose instances are returned by `Pool.imap_unordered()`
@@ -726,15 +717,12 @@
 class IMapUnorderedIterator(IMapIterator):
 
     def _set(self, i, obj):
-        self._cond.acquire()
-        try:
+        with self._cond:
             self._items.append(obj)
             self._index += 1
             self._cond.notify()
             if self._index == self._length:
                 del self._cache[self._job]
-        finally:
-            self._cond.release()
 
 #
 #
@@ -760,10 +748,7 @@
     @staticmethod
     def _help_stuff_finish(inqueue, task_handler, size):
         # put sentinels at head of inqueue to make workers finish
-        inqueue.not_empty.acquire()
-        try:
+        with inqueue.not_empty:
             inqueue.queue.clear()
             inqueue.queue.extend([None] * size)
             inqueue.not_empty.notify_all()
-        finally:
-            inqueue.not_empty.release()
diff --git a/Lib/multiprocessing/queues.py b/Lib/multiprocessing/queues.py
index f650771..c07ad40 100644
--- a/Lib/multiprocessing/queues.py
+++ b/Lib/multiprocessing/queues.py
@@ -81,14 +81,11 @@
         if not self._sem.acquire(block, timeout):
             raise Full
 
-        self._notempty.acquire()
-        try:
+        with self._notempty:
             if self._thread is None:
                 self._start_thread()
             self._buffer.append(obj)
             self._notempty.notify()
-        finally:
-            self._notempty.release()
 
     def get(self, block=True, timeout=None):
         if block and timeout is None:
@@ -201,12 +198,9 @@
     @staticmethod
     def _finalize_close(buffer, notempty):
         debug('telling queue thread to quit')
-        notempty.acquire()
-        try:
+        with notempty:
             buffer.append(_sentinel)
             notempty.notify()
-        finally:
-            notempty.release()
 
     @staticmethod
     def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe):
@@ -295,35 +289,24 @@
         if not self._sem.acquire(block, timeout):
             raise Full
 
-        self._notempty.acquire()
-        self._cond.acquire()
-        try:
+        with self._notempty, self._cond:
             if self._thread is None:
                 self._start_thread()
             self._buffer.append(obj)
             self._unfinished_tasks.release()
             self._notempty.notify()
-        finally:
-            self._cond.release()
-            self._notempty.release()
 
     def task_done(self):
-        self._cond.acquire()
-        try:
+        with self._cond:
             if not self._unfinished_tasks.acquire(False):
                 raise ValueError('task_done() called too many times')
             if self._unfinished_tasks._semlock._is_zero():
                 self._cond.notify_all()
-        finally:
-            self._cond.release()
 
     def join(self):
-        self._cond.acquire()
-        try:
+        with self._cond:
             if not self._unfinished_tasks._semlock._is_zero():
                 self._cond.wait()
-        finally:
-            self._cond.release()
 
 #
 # Simplified Queue type -- really just a locked pipe
diff --git a/Lib/multiprocessing/sharedctypes.py b/Lib/multiprocessing/sharedctypes.py
index 0c17825..4258f59 100644
--- a/Lib/multiprocessing/sharedctypes.py
+++ b/Lib/multiprocessing/sharedctypes.py
@@ -188,6 +188,12 @@
         self.acquire = self._lock.acquire
         self.release = self._lock.release
 
+    def __enter__(self):
+        return self._lock.__enter__()
+
+    def __exit__(self, *args):
+        return self._lock.__exit__(*args)
+
     def __reduce__(self):
         assert_spawning(self)
         return synchronized, (self._obj, self._lock)
@@ -212,32 +218,20 @@
         return len(self._obj)
 
     def __getitem__(self, i):
-        self.acquire()
-        try:
+        with self:
             return self._obj[i]
-        finally:
-            self.release()
 
     def __setitem__(self, i, value):
-        self.acquire()
-        try:
+        with self:
             self._obj[i] = value
-        finally:
-            self.release()
 
     def __getslice__(self, start, stop):
-        self.acquire()
-        try:
+        with self:
             return self._obj[start:stop]
-        finally:
-            self.release()
 
     def __setslice__(self, start, stop, values):
-        self.acquire()
-        try:
+        with self:
             self._obj[start:stop] = values
-        finally:
-            self.release()
 
 
 class SynchronizedString(SynchronizedArray):
diff --git a/Lib/multiprocessing/synchronize.py b/Lib/multiprocessing/synchronize.py
index dea1cbd..7d44330 100644
--- a/Lib/multiprocessing/synchronize.py
+++ b/Lib/multiprocessing/synchronize.py
@@ -337,34 +337,24 @@
         self._flag = ctx.Semaphore(0)
 
     def is_set(self):
-        self._cond.acquire()
-        try:
+        with self._cond:
             if self._flag.acquire(False):
                 self._flag.release()
                 return True
             return False
-        finally:
-            self._cond.release()
 
     def set(self):
-        self._cond.acquire()
-        try:
+        with self._cond:
             self._flag.acquire(False)
             self._flag.release()
             self._cond.notify_all()
-        finally:
-            self._cond.release()
 
     def clear(self):
-        self._cond.acquire()
-        try:
+        with self._cond:
             self._flag.acquire(False)
-        finally:
-            self._cond.release()
 
     def wait(self, timeout=None):
-        self._cond.acquire()
-        try:
+        with self._cond:
             if self._flag.acquire(False):
                 self._flag.release()
             else:
@@ -374,8 +364,6 @@
                 self._flag.release()
                 return True
             return False
-        finally:
-            self._cond.release()
 
 #
 # Barrier
diff --git a/Lib/multiprocessing/util.py b/Lib/multiprocessing/util.py
index 0b695e4..8760c82 100644
--- a/Lib/multiprocessing/util.py
+++ b/Lib/multiprocessing/util.py
@@ -327,6 +327,13 @@
         self.acquire = self._lock.acquire
         self.release = self._lock.release
 
+    def __enter__(self):
+        return self._lock.__enter__()
+
+    def __exit__(self, *args):
+        return self._lock.__exit__(*args)
+
+
 class ForkAwareLocal(threading.local):
     def __init__(self):
         register_after_fork(self, lambda obj : obj.__dict__.clear())
diff --git a/Lib/opcode.py b/Lib/opcode.py
index 0bd1ee6..bfd3c4d 100644
--- a/Lib/opcode.py
+++ b/Lib/opcode.py
@@ -70,6 +70,9 @@
 
 def_op('UNARY_INVERT', 15)
 
+def_op('BINARY_MATRIX_MULTIPLY', 16)
+def_op('INPLACE_MATRIX_MULTIPLY', 17)
+
 def_op('BINARY_POWER', 19)
 def_op('BINARY_MULTIPLY', 20)
 
diff --git a/Lib/operator.py b/Lib/operator.py
index b60349f..856036d 100644
--- a/Lib/operator.py
+++ b/Lib/operator.py
@@ -105,6 +105,10 @@
     "Same as a * b."
     return a * b
 
+def matmul(a, b):
+    "Same as a @ b."
+    return a @ b
+
 def neg(a):
     "Same as -a."
     return -a
@@ -326,6 +330,11 @@
     a *= b
     return a
 
+def imatmul(a, b):
+    "Same as a @= b."
+    a @= b
+    return a
+
 def ior(a, b):
     "Same as a |= b."
     a |= b
@@ -383,6 +392,7 @@
 __lshift__ = lshift
 __mod__ = mod
 __mul__ = mul
+__matmul__ = matmul
 __neg__ = neg
 __or__ = or_
 __pos__ = pos
@@ -403,6 +413,7 @@
 __ilshift__ = ilshift
 __imod__ = imod
 __imul__ = imul
+__imatmul__ = imatmul
 __ior__ = ior
 __ipow__ = ipow
 __irshift__ = irshift
diff --git a/Lib/pathlib.py b/Lib/pathlib.py
index d3d1af8..03c5e6e 100644
--- a/Lib/pathlib.py
+++ b/Lib/pathlib.py
@@ -961,6 +961,17 @@
         """
         return cls(os.getcwd())
 
+    def samefile(self, other_path):
+        """Return whether `other_file` is the same or not as this file.
+        (as returned by os.path.samefile(file, other_file)).
+        """
+        st = self.stat()
+        try:
+            other_st = other_path.stat()
+        except AttributeError:
+            other_st = os.stat(other_path)
+        return os.path.samestat(st, other_st)
+
     def iterdir(self):
         """Iterate over the files in this directory.  Does not yield any
         result for the special paths '.' and '..'.
diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py
index 0d2d83c..905c7c8 100644
--- a/Lib/pydoc_data/topics.py
+++ b/Lib/pydoc_data/topics.py
@@ -1,10 +1,10 @@
 # -*- coding: utf-8 -*-
-# Autogenerated by Sphinx on Sat May 17 21:42:09 2014
+# Autogenerated by Sphinx on Mon Feb 10 04:20:03 2014
 topics = {'assert': '\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n   assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n   if __debug__:\n      if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n   if __debug__:\n      if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names.  In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O).  The current code generator emits no code for an\nassert statement when optimization is requested at compile time.  Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal.  The value for the built-in\nvariable is determined when the interpreter starts.\n',
- 'assignment': '\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n   assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n   target_list     ::= target ("," target)* [","]\n   target          ::= identifier\n              | "(" target_list ")"\n              | "[" target_list "]"\n              | attributeref\n              | subscription\n              | slicing\n              | "*" target\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable.  The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n  that target.\n\n* If the target list is a comma-separated list of targets: The object\n  must be an iterable with the same number of items as there are\n  targets in the target list, and the items are assigned, from left to\n  right, to the corresponding targets.\n\n  * If the target list contains one target prefixed with an asterisk,\n    called a "starred" target: The object must be a sequence with at\n    least as many items as there are targets in the target list, minus\n    one.  The first items of the sequence are assigned, from left to\n    right, to the targets before the starred target.  The final items\n    of the sequence are assigned to the targets after the starred\n    target.  A list of the remaining items in the sequence is then\n    assigned to the starred target (the list can be empty).\n\n  * Else: The object must be a sequence with the same number of items\n    as there are targets in the target list, and the items are\n    assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n  * If the name does not occur in a "global" or "nonlocal" statement\n    in the current code block: the name is bound to the object in the\n    current local namespace.\n\n  * Otherwise: the name is bound to the object in the global namespace\n    or the outer namespace determined by "nonlocal", respectively.\n\n  The name is rebound if it was already bound.  This may cause the\n  reference count for the object previously bound to the name to reach\n  zero, causing the object to be deallocated and its destructor (if it\n  has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n  brackets: The object must be an iterable with the same number of\n  items as there are targets in the target list, and its items are\n  assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n  the reference is evaluated.  It should yield an object with\n  assignable attributes; if this is not the case, "TypeError" is\n  raised.  That object is then asked to assign the assigned object to\n  the given attribute; if it cannot perform the assignment, it raises\n  an exception (usually but not necessarily "AttributeError").\n\n  Note: If the object is a class instance and the attribute reference\n  occurs on both sides of the assignment operator, the RHS expression,\n  "a.x" can access either an instance attribute or (if no instance\n  attribute exists) a class attribute.  The LHS target "a.x" is always\n  set as an instance attribute, creating it if necessary.  Thus, the\n  two occurrences of "a.x" do not necessarily refer to the same\n  attribute: if the RHS expression refers to a class attribute, the\n  LHS creates a new instance attribute as the target of the\n  assignment:\n\n     class Cls:\n         x = 3             # class variable\n     inst = Cls()\n     inst.x = inst.x + 1   # writes inst.x as 4 leaving Cls.x as 3\n\n  This description does not necessarily apply to descriptor\n  attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n  reference is evaluated.  It should yield either a mutable sequence\n  object (such as a list) or a mapping object (such as a dictionary).\n  Next, the subscript expression is evaluated.\n\n  If the primary is a mutable sequence object (such as a list), the\n  subscript must yield an integer.  If it is negative, the sequence\'s\n  length is added to it.  The resulting value must be a nonnegative\n  integer less than the sequence\'s length, and the sequence is asked\n  to assign the assigned object to its item with that index.  If the\n  index is out of range, "IndexError" is raised (assignment to a\n  subscripted sequence cannot add new items to a list).\n\n  If the primary is a mapping object (such as a dictionary), the\n  subscript must have a type compatible with the mapping\'s key type,\n  and the mapping is then asked to create a key/datum pair which maps\n  the subscript to the assigned object.  This can either replace an\n  existing key/value pair with the same key value, or insert a new\n  key/value pair (if no key with the same value existed).\n\n  For user-defined objects, the "__setitem__()" method is called with\n  appropriate arguments.\n\n* If the target is a slicing: The primary expression in the reference\n  is evaluated.  It should yield a mutable sequence object (such as a\n  list).  The assigned object should be a sequence object of the same\n  type.  Next, the lower and upper bound expressions are evaluated,\n  insofar they are present; defaults are zero and the sequence\'s\n  length.  The bounds should evaluate to integers. If either bound is\n  negative, the sequence\'s length is added to it.  The resulting\n  bounds are clipped to lie between zero and the sequence\'s length,\n  inclusive.  Finally, the sequence object is asked to replace the\n  slice with the items of the assigned sequence.  The length of the\n  slice may be different from the length of the assigned sequence,\n  thus changing the length of the target sequence, if the object\n  allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe!  For instance, the\nfollowing program prints "[0, 2]":\n\n   x = [0, 1]\n   i = 0\n   i, x[i] = 1, 2\n   print(x)\n\nSee also:\n\n   **PEP 3132** - Extended Iterable Unpacking\n      The specification for the "*target" feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n   augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n   augtarget                 ::= identifier | attributeref | subscription | slicing\n   augop                     ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n             | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n',
+ 'assignment': '\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n   assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n   target_list     ::= target ("," target)* [","]\n   target          ::= identifier\n              | "(" target_list ")"\n              | "[" target_list "]"\n              | attributeref\n              | subscription\n              | slicing\n              | "*" target\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable.  The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n  that target.\n\n* If the target list is a comma-separated list of targets: The\n  object must be an iterable with the same number of items as there\n  are targets in the target list, and the items are assigned, from\n  left to right, to the corresponding targets.\n\n  * If the target list contains one target prefixed with an\n    asterisk, called a "starred" target: The object must be a sequence\n    with at least as many items as there are targets in the target\n    list, minus one.  The first items of the sequence are assigned,\n    from left to right, to the targets before the starred target.  The\n    final items of the sequence are assigned to the targets after the\n    starred target.  A list of the remaining items in the sequence is\n    then assigned to the starred target (the list can be empty).\n\n  * Else: The object must be a sequence with the same number of\n    items as there are targets in the target list, and the items are\n    assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n  * If the name does not occur in a "global" or "nonlocal" statement\n    in the current code block: the name is bound to the object in the\n    current local namespace.\n\n  * Otherwise: the name is bound to the object in the global\n    namespace or the outer namespace determined by "nonlocal",\n    respectively.\n\n  The name is rebound if it was already bound.  This may cause the\n  reference count for the object previously bound to the name to reach\n  zero, causing the object to be deallocated and its destructor (if it\n  has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n  square brackets: The object must be an iterable with the same number\n  of items as there are targets in the target list, and its items are\n  assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n  the reference is evaluated.  It should yield an object with\n  assignable attributes; if this is not the case, "TypeError" is\n  raised.  That object is then asked to assign the assigned object to\n  the given attribute; if it cannot perform the assignment, it raises\n  an exception (usually but not necessarily "AttributeError").\n\n  Note: If the object is a class instance and the attribute reference\n  occurs on both sides of the assignment operator, the RHS expression,\n  "a.x" can access either an instance attribute or (if no instance\n  attribute exists) a class attribute.  The LHS target "a.x" is always\n  set as an instance attribute, creating it if necessary.  Thus, the\n  two occurrences of "a.x" do not necessarily refer to the same\n  attribute: if the RHS expression refers to a class attribute, the\n  LHS creates a new instance attribute as the target of the\n  assignment:\n\n     class Cls:\n         x = 3             # class variable\n     inst = Cls()\n     inst.x = inst.x + 1   # writes inst.x as 4 leaving Cls.x as 3\n\n  This description does not necessarily apply to descriptor\n  attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n  reference is evaluated.  It should yield either a mutable sequence\n  object (such as a list) or a mapping object (such as a dictionary).\n  Next, the subscript expression is evaluated.\n\n  If the primary is a mutable sequence object (such as a list), the\n  subscript must yield an integer.  If it is negative, the sequence\'s\n  length is added to it.  The resulting value must be a nonnegative\n  integer less than the sequence\'s length, and the sequence is asked\n  to assign the assigned object to its item with that index.  If the\n  index is out of range, "IndexError" is raised (assignment to a\n  subscripted sequence cannot add new items to a list).\n\n  If the primary is a mapping object (such as a dictionary), the\n  subscript must have a type compatible with the mapping\'s key type,\n  and the mapping is then asked to create a key/datum pair which maps\n  the subscript to the assigned object.  This can either replace an\n  existing key/value pair with the same key value, or insert a new\n  key/value pair (if no key with the same value existed).\n\n  For user-defined objects, the "__setitem__()" method is called with\n  appropriate arguments.\n\n* If the target is a slicing: The primary expression in the\n  reference is evaluated.  It should yield a mutable sequence object\n  (such as a list).  The assigned object should be a sequence object\n  of the same type.  Next, the lower and upper bound expressions are\n  evaluated, insofar they are present; defaults are zero and the\n  sequence\'s length.  The bounds should evaluate to integers. If\n  either bound is negative, the sequence\'s length is added to it.  The\n  resulting bounds are clipped to lie between zero and the sequence\'s\n  length, inclusive.  Finally, the sequence object is asked to replace\n  the slice with the items of the assigned sequence.  The length of\n  the slice may be different from the length of the assigned sequence,\n  thus changing the length of the target sequence, if the object\n  allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe!  For instance, the\nfollowing program prints "[0, 2]":\n\n   x = [0, 1]\n   i = 0\n   i, x[i] = 1, 2\n   print(x)\n\nSee also: **PEP 3132** - Extended Iterable Unpacking\n\n     The specification for the "*target" feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n   augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n   augtarget                 ::= identifier | attributeref | subscription | slicing\n   augop                     ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n             | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n',
  'atom-identifiers': '\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name.  See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them.  The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name.  For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used.  If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n',
  'atom-literals': "\nLiterals\n********\n\nPython supports string and bytes literals and various numeric\nliterals:\n\n   literal ::= stringliteral | bytesliteral\n               | integer | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\nbytes, integer, floating point number, complex number) with the given\nvalue.  The value may be approximated in the case of floating point\nand imaginary (complex) literals.  See section *Literals* for details.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value.  Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n",
- 'attribute-access': '\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n   Called when an attribute lookup has not found the attribute in the\n   usual places (i.e. it is not an instance attribute nor is it found\n   in the class tree for "self").  "name" is the attribute name. This\n   method should return the (computed) attribute value or raise an\n   "AttributeError" exception.\n\n   Note that if the attribute is found through the normal mechanism,\n   "__getattr__()" is not called.  (This is an intentional asymmetry\n   between "__getattr__()" and "__setattr__()".) This is done both for\n   efficiency reasons and because otherwise "__getattr__()" would have\n   no way to access other attributes of the instance.  Note that at\n   least for instance variables, you can fake total control by not\n   inserting any values in the instance attribute dictionary (but\n   instead inserting them in another object).  See the\n   "__getattribute__()" method below for a way to actually get total\n   control over attribute access.\n\nobject.__getattribute__(self, name)\n\n   Called unconditionally to implement attribute accesses for\n   instances of the class. If the class also defines "__getattr__()",\n   the latter will not be called unless "__getattribute__()" either\n   calls it explicitly or raises an "AttributeError". This method\n   should return the (computed) attribute value or raise an\n   "AttributeError" exception. In order to avoid infinite recursion in\n   this method, its implementation should always call the base class\n   method with the same name to access any attributes it needs, for\n   example, "object.__getattribute__(self, name)".\n\n   Note: This method may still be bypassed when looking up special methods\n     as the result of implicit invocation via language syntax or\n     built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n   Called when an attribute assignment is attempted.  This is called\n   instead of the normal mechanism (i.e. store the value in the\n   instance dictionary). *name* is the attribute name, *value* is the\n   value to be assigned to it.\n\n   If "__setattr__()" wants to assign to an instance attribute, it\n   should call the base class method with the same name, for example,\n   "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n   Like "__setattr__()" but for attribute deletion instead of\n   assignment.  This should only be implemented if "del obj.name" is\n   meaningful for the object.\n\nobject.__dir__(self)\n\n   Called when "dir()" is called on the object. A sequence must be\n   returned. "dir()" converts the returned sequence to a list and\n   sorts it.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents).  In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n   Called to get the attribute of the owner class (class attribute\n   access) or of an instance of that class (instance attribute\n   access). *owner* is always the owner class, while *instance* is the\n   instance that the attribute was accessed through, or "None" when\n   the attribute is accessed through the *owner*.  This method should\n   return the (computed) attribute value or raise an "AttributeError"\n   exception.\n\nobject.__set__(self, instance, value)\n\n   Called to set the attribute on an instance *instance* of the owner\n   class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n   Called to delete the attribute on an instance *instance* of the\n   owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol:  "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead.  Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n   The simplest and least common call is when user code directly\n   invokes a descriptor method:    "x.__get__(a)".\n\nInstance Binding\n   If binding to an object instance, "a.x" is transformed into the\n   call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n   If binding to a class, "A.x" is transformed into the call:\n   "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n   If "a" is an instance of "super", then the binding "super(B,\n   obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n   immediately preceding "B" and then invokes the descriptor with the\n   call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined.  A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()".  If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary.  If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor.  Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method.  Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary.  In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors.  Accordingly, instances can\nredefine and override methods.  This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of classes have a dictionary for attribute\nstorage.  This wastes space for objects having very few instance\nvariables.  The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable.  Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n   This class variable can be assigned a string, iterable, or sequence\n   of strings with variable names used by instances.  If defined in a\n   class, *__slots__* reserves space for the declared variables and\n   prevents the automatic creation of *__dict__* and *__weakref__* for\n   each instance.\n\n\nNotes on using *__slots__*\n--------------------------\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n  attribute of that class will always be accessible, so a *__slots__*\n  definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n  variables not listed in the *__slots__* definition.  Attempts to\n  assign to an unlisted variable name raises "AttributeError". If\n  dynamic assignment of new variables is desired, then add\n  "\'__dict__\'" to the sequence of strings in the *__slots__*\n  declaration.\n\n* Without a *__weakref__* variable for each instance, classes defining\n  *__slots__* do not support weak references to its instances. If weak\n  reference support is needed, then add "\'__weakref__\'" to the\n  sequence of strings in the *__slots__* declaration.\n\n* *__slots__* are implemented at the class level by creating\n  descriptors (*Implementing Descriptors*) for each variable name.  As\n  a result, class attributes cannot be used to set default values for\n  instance variables defined by *__slots__*; otherwise, the class\n  attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n  where it is defined.  As a result, subclasses will have a *__dict__*\n  unless they also define *__slots__* (which must only contain names\n  of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n  variable defined by the base class slot is inaccessible (except by\n  retrieving its descriptor directly from the base class). This\n  renders the meaning of the program undefined.  In the future, a\n  check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n  "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n  also be used; however, in the future, special meaning may be\n  assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n  *__slots__*.\n',
+ 'attribute-access': '\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n   Called when an attribute lookup has not found the attribute in the\n   usual places (i.e. it is not an instance attribute nor is it found\n   in the class tree for "self").  "name" is the attribute name. This\n   method should return the (computed) attribute value or raise an\n   "AttributeError" exception.\n\n   Note that if the attribute is found through the normal mechanism,\n   "__getattr__()" is not called.  (This is an intentional asymmetry\n   between "__getattr__()" and "__setattr__()".) This is done both for\n   efficiency reasons and because otherwise "__getattr__()" would have\n   no way to access other attributes of the instance.  Note that at\n   least for instance variables, you can fake total control by not\n   inserting any values in the instance attribute dictionary (but\n   instead inserting them in another object).  See the\n   "__getattribute__()" method below for a way to actually get total\n   control over attribute access.\n\nobject.__getattribute__(self, name)\n\n   Called unconditionally to implement attribute accesses for\n   instances of the class. If the class also defines "__getattr__()",\n   the latter will not be called unless "__getattribute__()" either\n   calls it explicitly or raises an "AttributeError". This method\n   should return the (computed) attribute value or raise an\n   "AttributeError" exception. In order to avoid infinite recursion in\n   this method, its implementation should always call the base class\n   method with the same name to access any attributes it needs, for\n   example, "object.__getattribute__(self, name)".\n\n   Note: This method may still be bypassed when looking up special\n     methods as the result of implicit invocation via language syntax\n     or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n   Called when an attribute assignment is attempted.  This is called\n   instead of the normal mechanism (i.e. store the value in the\n   instance dictionary). *name* is the attribute name, *value* is the\n   value to be assigned to it.\n\n   If "__setattr__()" wants to assign to an instance attribute, it\n   should call the base class method with the same name, for example,\n   "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n   Like "__setattr__()" but for attribute deletion instead of\n   assignment.  This should only be implemented if "del obj.name" is\n   meaningful for the object.\n\nobject.__dir__(self)\n\n   Called when "dir()" is called on the object. A sequence must be\n   returned. "dir()" converts the returned sequence to a list and\n   sorts it.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents).  In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n   Called to get the attribute of the owner class (class attribute\n   access) or of an instance of that class (instance attribute\n   access). *owner* is always the owner class, while *instance* is the\n   instance that the attribute was accessed through, or "None" when\n   the attribute is accessed through the *owner*.  This method should\n   return the (computed) attribute value or raise an "AttributeError"\n   exception.\n\nobject.__set__(self, instance, value)\n\n   Called to set the attribute on an instance *instance* of the owner\n   class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n   Called to delete the attribute on an instance *instance* of the\n   owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol:  "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead.  Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n   The simplest and least common call is when user code directly\n   invokes a descriptor method:    "x.__get__(a)".\n\nInstance Binding\n   If binding to an object instance, "a.x" is transformed into the\n   call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n   If binding to a class, "A.x" is transformed into the call:\n   "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n   If "a" is an instance of "super", then the binding "super(B,\n   obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n   immediately preceding "B" and then invokes the descriptor with the\n   call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined.  A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()".  If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary.  If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor.  Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method.  Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary.  In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors.  Accordingly, instances can\nredefine and override methods.  This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of classes have a dictionary for attribute\nstorage.  This wastes space for objects having very few instance\nvariables.  The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable.  Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n   This class variable can be assigned a string, iterable, or sequence\n   of strings with variable names used by instances.  If defined in a\n   class, *__slots__* reserves space for the declared variables and\n   prevents the automatic creation of *__dict__* and *__weakref__* for\n   each instance.\n\n\nNotes on using *__slots__*\n--------------------------\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n  attribute of that class will always be accessible, so a *__slots__*\n  definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n  variables not listed in the *__slots__* definition.  Attempts to\n  assign to an unlisted variable name raises "AttributeError". If\n  dynamic assignment of new variables is desired, then add\n  "\'__dict__\'" to the sequence of strings in the *__slots__*\n  declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n  defining *__slots__* do not support weak references to its\n  instances. If weak reference support is needed, then add\n  "\'__weakref__\'" to the sequence of strings in the *__slots__*\n  declaration.\n\n* *__slots__* are implemented at the class level by creating\n  descriptors (*Implementing Descriptors*) for each variable name.  As\n  a result, class attributes cannot be used to set default values for\n  instance variables defined by *__slots__*; otherwise, the class\n  attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n  where it is defined.  As a result, subclasses will have a *__dict__*\n  unless they also define *__slots__* (which must only contain names\n  of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n  instance variable defined by the base class slot is inaccessible\n  (except by retrieving its descriptor directly from the base class).\n  This renders the meaning of the program undefined.  In the future, a\n  check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n  "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n  may also be used; however, in the future, special meaning may be\n  assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n  *__slots__*.\n',
  'attribute-references': '\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n   attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, which most objects do.  This object is then\nasked to produce the attribute whose name is the identifier (which can\nbe customized by overriding the "__getattr__()" method).  If this\nattribute is not available, the exception "AttributeError" is raised.\nOtherwise, the type and value of the object produced is determined by\nthe object.  Multiple evaluations of the same attribute reference may\nyield different objects.\n',
  'augassign': '\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n   augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n   augtarget                 ::= identifier | attributeref | subscription | slicing\n   augop                     ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n             | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n',
  'binary': '\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels.  Note that some of these operations also apply to certain non-\nnumeric types.  Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n   m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n              | m_expr "%" u_expr\n   a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe "*" (multiplication) operator yields the product of its arguments.\nThe arguments must either both be numbers, or one argument must be an\ninteger and the other must be a sequence. In the former case, the\nnumbers are converted to a common type and then multiplied together.\nIn the latter case, sequence repetition is performed; a negative\nrepetition factor yields an empty sequence.\n\nThe "/" (division) and "//" (floor division) operators yield the\nquotient of their arguments.  The numeric arguments are first\nconverted to a common type. Division of integers yields a float, while\nfloor division of integers results in an integer; the result is that\nof mathematical division with the \'floor\' function applied to the\nresult.  Division by zero raises the "ZeroDivisionError" exception.\n\nThe "%" (modulo) operator yields the remainder from the division of\nthe first argument by the second.  The numeric arguments are first\nconverted to a common type.  A zero right argument raises the\n"ZeroDivisionError" exception.  The arguments may be floating point\nnumbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 +\n0.34".)  The modulo operator always yields a result with the same sign\nas its second operand (or zero); the absolute value of the result is\nstrictly smaller than the absolute value of the second operand [1].\n\nThe floor division and modulo operators are connected by the following\nidentity: "x == (x//y)*y + (x%y)".  Floor division and modulo are also\nconnected with the built-in function "divmod()": "divmod(x, y) ==\n(x//y, x%y)". [2].\n\nIn addition to performing the modulo operation on numbers, the "%"\noperator is also overloaded by string objects to perform old-style\nstring formatting (also known as interpolation).  The syntax for\nstring formatting is described in the Python Library Reference,\nsection *printf-style String Formatting*.\n\nThe floor division operator, the modulo operator, and the "divmod()"\nfunction are not defined for complex numbers.  Instead, convert to a\nfloating point number using the "abs()" function if appropriate.\n\nThe "+" (addition) operator yields the sum of its arguments.  The\narguments must either both be numbers or both sequences of the same\ntype.  In the former case, the numbers are converted to a common type\nand then added together.  In the latter case, the sequences are\nconcatenated.\n\nThe "-" (subtraction) operator yields the difference of its arguments.\nThe numeric arguments are first converted to a common type.\n',
@@ -17,63 +17,63 @@
  'break': '\nThe "break" statement\n*********************\n\n   break_stmt ::= "break"\n\n"break" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition within that\nloop.\n\nIt terminates the nearest enclosing loop, skipping the optional "else"\nclause if the loop has one.\n\nIf a "for" loop is terminated by "break", the loop control target\nkeeps its current value.\n\nWhen "break" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nloop.\n',
  'callable-types': '\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n   Called when the instance is "called" as a function; if this method\n   is defined, "x(arg1, arg2, ...)" is a shorthand for\n   "x.__call__(arg1, arg2, ...)".\n',
  'calls': '\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n   call                 ::= primary "(" [argument_list [","] | comprehension] ")"\n   argument_list        ::= positional_arguments ["," keyword_arguments]\n                       ["," "*" expression] ["," keyword_arguments]\n                       ["," "**" expression]\n                     | keyword_arguments ["," "*" expression]\n                       ["," keyword_arguments] ["," "**" expression]\n                     | "*" expression ["," keyword_arguments] ["," "**" expression]\n                     | "**" expression\n   positional_arguments ::= expression ("," expression)*\n   keyword_arguments    ::= keyword_item ("," keyword_item)*\n   keyword_item         ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n"__call__()" method are callable).  All argument expressions are\nevaluated before the call is attempted.  Please refer to section\n*Function definitions* for the syntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows.  First, a list of unfilled slots is\ncreated for the formal parameters.  If there are N positional\narguments, they are placed in the first N slots.  Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on).  If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot).  When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition.  (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.)  If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised.  Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword.  In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable.  Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow).  So:\n\n   >>> def f(a, b):\n   ...  print(a, b)\n   ...\n   >>> f(b=1, *(2,))\n   2 1\n   >>> f(a=1, *(2,))\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in ?\n   TypeError: f() got multiple values for keyword argument \'a\'\n   >>> f(1, *(2,))\n   1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments.  In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly "None", unless it raises an\nexception.  How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n   The code block for the function is executed, passing it the\n   argument list.  The first thing the code block will do is bind the\n   formal parameters to the arguments; this is described in section\n   *Function definitions*.  When the code block executes a "return"\n   statement, this specifies the return value of the function call.\n\na built-in function or method:\n   The result is up to the interpreter; see *Built-in Functions* for\n   the descriptions of built-in functions and methods.\n\na class object:\n   A new instance of that class is returned.\n\na class instance method:\n   The corresponding user-defined function is called, with an argument\n   list that is one longer than the argument list of the call: the\n   instance becomes the first argument.\n\na class instance:\n   The class must define a "__call__()" method; the effect is then the\n   same as if that method was called.\n',
- 'class': '\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n   classdef    ::= [decorators] "class" classname [inheritance] ":" suite\n   inheritance ::= "(" [parameter_list] ")"\n   classname   ::= identifier\n\nA class definition is an executable statement.  The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing.  Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n   class Foo:\n       pass\n\nis equivalent to\n\n   class Foo(object):\n       pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.)  When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n   @f1(arg)\n   @f2\n   class Foo: pass\n\nis equivalent to\n\n   class Foo: pass\n   Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators.  The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances.  Instance attributes\ncan be set in a method with "self.name = value".  Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way.  Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results.  *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also:\n\n   **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n   Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n    is a "finally" clause which happens to raise another exception.\n    That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n    exception or the execution of a "return", "continue", or "break"\n    statement.\n\n[3] A string literal appearing as the first statement in the function\n    body is transformed into the function\'s "__doc__" attribute and\n    therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n    body is transformed into the namespace\'s "__doc__" item and\n    therefore the class\'s *docstring*.\n',
- 'comparisons': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation.  Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n   comparison    ::= or_expr ( comp_operator or_expr )*\n   comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n                     | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects.  The objects need not have the same type. If both are\nnumbers, they are converted to a common type.  Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types.  You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The are\n  identical to themselves, "x is x" but are not equal to themselves,\n  "x != x".  Additionally, comparing any value to a not-a-number value\n  will return "False".  For example, both "3 < float(\'NaN\')" and\n  "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n  values of their elements.\n\n* Strings are compared lexicographically using the numeric equivalents\n  (the result of the built-in function "ord()") of their characters.\n  [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison of\n  corresponding elements.  This means that to compare equal, each\n  element must compare equal and the two sequences must be of the same\n  type and have the same length.\n\n  If not equal, the sequences are ordered the same as their first\n  differing elements.  For example, "[1,2,x] <= [1,2,y]" has the same\n  value as "x <= y".  If the corresponding element does not exist, the\n  shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n  same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n  \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n  superset tests.  Those relations do not define total orderings (the\n  two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n  another, nor supersets of one another).  Accordingly, sets are not\n  appropriate arguments for functions which depend on total ordering.\n  For example, "min()", "max()", and "sorted()" produce undefined\n  results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they are\n  the same object; the choice whether one object is considered smaller\n  or larger than another one is made arbitrarily but consistently\n  within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison.  Most\nnumeric types can be compared with one another.  When cross-type\ncomparison is not supported, the comparison method returns\n"NotImplemented".\n\nThe operators "in" and "not in" test for membership.  "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise.  "x\nnot in s" returns the negation of "x in s".  All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether a the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*.  An equivalent test is "y.find(x) != -1".  Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y".  If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception.  (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object.  "x is not y"\nyields the inverse truth value. [4]\n',
- 'compound': '\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way.  In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs.  "try" specifies exception handlers and/or cleanup\ncode for a group of statements, while the "with" statement allows the\nexecution of initialization and finalization code around a block of\ncode.  Function and class definitions are also syntactically compound\nstatements.\n\nCompound statements consist of one or more \'clauses.\'  A clause\nconsists of a header and a \'suite.\'  The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon.  A suite is a group of statements controlled by a\nclause.  A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines.  Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n   if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print()" calls are executed:\n\n   if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n   compound_stmt ::= if_stmt\n                     | while_stmt\n                     | for_stmt\n                     | try_stmt\n                     | with_stmt\n                     | funcdef\n                     | classdef\n   suite         ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n   statement     ::= stmt_list NEWLINE | compound_stmt\n   stmt_list     ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT".  Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n   if_stmt ::= "if" expression ":" suite\n               ( "elif" expression ":" suite )*\n               ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n   while_stmt ::= "while" expression ":" suite\n                  ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n   for_stmt ::= "for" target_list "in" expression_list ":" suite\n                ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject.  An iterator is created for the result of the\n"expression_list".  The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices.  Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted.  When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop.  Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the loop\n  (this can only occur for mutable sequences, i.e. lists).  An\n  internal counter is used to keep track of which item is used next,\n  and this is incremented on each iteration.  When this counter has\n  reached the length of the sequence the loop terminates.  This means\n  that if the suite deletes the current (or a previous) item from the\n  sequence, the next item will be skipped (since it gets the index of\n  the current item which has already been treated).  Likewise, if the\n  suite inserts an item in the sequence before the current item, the\n  current item will be treated again the next time through the loop.\n  This can lead to nasty bugs that can be avoided by making a\n  temporary copy using a slice of the whole sequence, e.g.,\n\n     for x in a[:]:\n         if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n   try_stmt  ::= try1_stmt | try2_stmt\n   try1_stmt ::= "try" ":" suite\n                 ("except" [expression ["as" target]] ":" suite)+\n                 ["else" ":" suite]\n                 ["finally" ":" suite]\n   try2_stmt ::= "try" ":" suite\n                 "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started.  This search inspects the except clauses\nin turn until one is found that matches the exception.  An expression-\nless except clause, if present, must be last; it matches any\nexception.  For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception.  An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed.  All except\nclauses must have an executable block.  When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause.  This is as if\n\n   except E as N:\n       foo\n\nwas translated to\n\n   except E as N:\n       try:\n           foo\n       finally:\n           del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause.  Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be access via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred.  "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler.  The "try"\nclause is executed, including any "except" and "else" clauses.  If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed.  If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause.  If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n   >>> def f():\n   ...     try:\n   ...         1/0\n   ...     finally:\n   ...         return 42\n   ...\n   >>> f()\n   42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed.  Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n   >>> def foo():\n   ...     try:\n   ...         return \'try\'\n   ...     finally:\n   ...         return \'finally\'\n   ...\n   >>> foo()\n   \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe "with" statement\n====================\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n   with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item") is\n   evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return value\n   from "__enter__()" is assigned to it.\n\n   Note: The "with" statement guarantees that if the "__enter__()" method\n     returns without an error, then "__exit__()" will always be\n     called. Thus, if an error occurs during the assignment to the\n     target list, it will be treated the same as an error occurring\n     within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked.  If an\n   exception caused the suite to be exited, its type, value, and\n   traceback are passed as arguments to "__exit__()". Otherwise, three\n   "None" arguments are supplied.\n\n   If the suite was exited due to an exception, and the return value\n   from the "__exit__()" method was false, the exception is reraised.\n   If the return value was true, the exception is suppressed, and\n   execution continues with the statement following the "with"\n   statement.\n\n   If the suite was exited for any reason other than an exception, the\n   return value from "__exit__()" is ignored, and execution proceeds\n   at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n   with A() as a, B() as b:\n       suite\n\nis equivalent to\n\n   with A() as a:\n       with B() as b:\n           suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n   **PEP 0343** - The "with" statement\n      The specification, background, and examples for the Python\n      "with" statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n   funcdef        ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n   decorators     ::= decorator+\n   decorator      ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n   dotted_name    ::= identifier ("." identifier)*\n   parameter_list ::= (defparameter ",")*\n                      ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n                      | "**" parameter\n                      | defparameter [","] )\n   parameter      ::= identifier [":" expression]\n   defparameter   ::= parameter ["=" expression]\n   funcname       ::= identifier\n\nA function definition is an executable statement.  Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function).  This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition.  The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object.  Multiple decorators are applied in\nnested fashion. For example, the following code\n\n   @f1(arg)\n   @f2\n   def func(): pass\n\nis equivalent to\n\n   def func(): pass\n   func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted.  If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call.  This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended.  A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n   def whats_on_the_telly(penguin=None):\n       if penguin is None:\n           penguin = []\n       penguin.append("property of the zoo")\n       return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values.  If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple.  If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name.  Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier".  Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist.  These annotations can be any valid Python expression and are\nevaluated when the function definition is executed.  Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction.  The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions.  This uses lambda\nexpressions, described in section *Lambdas*.  Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression.  The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects.  A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around.  Free variables used\nin the nested function can access the local variables of the function\ncontaining the def.  See section *Naming and binding* for details.\n\nSee also:\n\n   **PEP 3107** - Function Annotations\n      The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n   classdef    ::= [decorators] "class" classname [inheritance] ":" suite\n   inheritance ::= "(" [parameter_list] ")"\n   classname   ::= identifier\n\nA class definition is an executable statement.  The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing.  Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n   class Foo:\n       pass\n\nis equivalent to\n\n   class Foo(object):\n       pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.)  When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n   @f1(arg)\n   @f2\n   class Foo: pass\n\nis equivalent to\n\n   class Foo: pass\n   Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators.  The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances.  Instance attributes\ncan be set in a method with "self.name = value".  Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way.  Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results.  *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also:\n\n   **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n   Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n    is a "finally" clause which happens to raise another exception.\n    That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n    exception or the execution of a "return", "continue", or "break"\n    statement.\n\n[3] A string literal appearing as the first statement in the function\n    body is transformed into the function\'s "__doc__" attribute and\n    therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n    body is transformed into the namespace\'s "__doc__" item and\n    therefore the class\'s *docstring*.\n',
- 'context-managers': '\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code.  Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n   Enter the runtime context related to this object. The "with"\n   statement will bind this method\'s return value to the target(s)\n   specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n   Exit the runtime context related to this object. The parameters\n   describe the exception that caused the context to be exited. If the\n   context was exited without an exception, all three arguments will\n   be "None".\n\n   If an exception is supplied, and the method wishes to suppress the\n   exception (i.e., prevent it from being propagated), it should\n   return a true value. Otherwise, the exception will be processed\n   normally upon exit from this method.\n\n   Note that "__exit__()" methods should not reraise the passed-in\n   exception; this is the caller\'s responsibility.\n\nSee also:\n\n   **PEP 0343** - The "with" statement\n      The specification, background, and examples for the Python\n      "with" statement.\n',
+ 'class': '\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n   classdef    ::= [decorators] "class" classname [inheritance] ":" suite\n   inheritance ::= "(" [parameter_list] ")"\n   classname   ::= identifier\n\nA class definition is an executable statement.  The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing.  Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n   class Foo:\n       pass\n\nis equivalent to\n\n   class Foo(object):\n       pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.)  When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n   @f1(arg)\n   @f2\n   class Foo: pass\n\nis equivalent to\n\n   class Foo: pass\n   Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators.  The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances.  Instance attributes\ncan be set in a method with "self.name = value".  Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way.  Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results.  *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n  Class Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n    there is a "finally" clause which happens to raise another\n    exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n    an exception or the execution of a "return", "continue", or\n    "break" statement.\n\n[3] A string literal appearing as the first statement in the\n    function body is transformed into the function\'s "__doc__"\n    attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n    body is transformed into the namespace\'s "__doc__" item and\n    therefore the class\'s *docstring*.\n',
+ 'comparisons': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation.  Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n   comparison    ::= or_expr ( comp_operator or_expr )*\n   comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n                     | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects.  The objects need not have the same type. If both are\nnumbers, they are converted to a common type.  Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types.  You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The\n  are identical to themselves, "x is x" but are not equal to\n  themselves, "x != x".  Additionally, comparing any value to a\n  not-a-number value will return "False".  For example, both "3 <\n  float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n  values of their elements.\n\n* Strings are compared lexicographically using the numeric\n  equivalents (the result of the built-in function "ord()") of their\n  characters. [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison\n  of corresponding elements.  This means that to compare equal, each\n  element must compare equal and the two sequences must be of the same\n  type and have the same length.\n\n  If not equal, the sequences are ordered the same as their first\n  differing elements.  For example, "[1,2,x] <= [1,2,y]" has the same\n  value as "x <= y".  If the corresponding element does not exist, the\n  shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n  same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n  \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n  superset tests.  Those relations do not define total orderings (the\n  two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n  another, nor supersets of one another).  Accordingly, sets are not\n  appropriate arguments for functions which depend on total ordering.\n  For example, "min()", "max()", and "sorted()" produce undefined\n  results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they\n  are the same object; the choice whether one object is considered\n  smaller or larger than another one is made arbitrarily but\n  consistently within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison.  Most\nnumeric types can be compared with one another.  When cross-type\ncomparison is not supported, the comparison method returns\n"NotImplemented".\n\nThe operators "in" and "not in" test for membership.  "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise.  "x\nnot in s" returns the negation of "x in s".  All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether a the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*.  An equivalent test is "y.find(x) != -1".  Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y".  If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception.  (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object.  "x is not y"\nyields the inverse truth value. [4]\n',
+ 'compound': '\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way.  In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs.  "try" specifies exception handlers and/or cleanup\ncode for a group of statements, while the "with" statement allows the\nexecution of initialization and finalization code around a block of\ncode.  Function and class definitions are also syntactically compound\nstatements.\n\nCompound statements consist of one or more \'clauses.\'  A clause\nconsists of a header and a \'suite.\'  The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon.  A suite is a group of statements controlled by a\nclause.  A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines.  Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n   if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print()" calls are executed:\n\n   if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n   compound_stmt ::= if_stmt\n                     | while_stmt\n                     | for_stmt\n                     | try_stmt\n                     | with_stmt\n                     | funcdef\n                     | classdef\n   suite         ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n   statement     ::= stmt_list NEWLINE | compound_stmt\n   stmt_list     ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT".  Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n   if_stmt ::= "if" expression ":" suite\n               ( "elif" expression ":" suite )*\n               ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n   while_stmt ::= "while" expression ":" suite\n                  ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n   for_stmt ::= "for" target_list "in" expression_list ":" suite\n                ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject.  An iterator is created for the result of the\n"expression_list".  The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices.  Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted.  When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop.  Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n  loop (this can only occur for mutable sequences, i.e. lists).  An\n  internal counter is used to keep track of which item is used next,\n  and this is incremented on each iteration.  When this counter has\n  reached the length of the sequence the loop terminates.  This means\n  that if the suite deletes the current (or a previous) item from the\n  sequence, the next item will be skipped (since it gets the index of\n  the current item which has already been treated).  Likewise, if the\n  suite inserts an item in the sequence before the current item, the\n  current item will be treated again the next time through the loop.\n  This can lead to nasty bugs that can be avoided by making a\n  temporary copy using a slice of the whole sequence, e.g.,\n\n     for x in a[:]:\n         if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n   try_stmt  ::= try1_stmt | try2_stmt\n   try1_stmt ::= "try" ":" suite\n                 ("except" [expression ["as" target]] ":" suite)+\n                 ["else" ":" suite]\n                 ["finally" ":" suite]\n   try2_stmt ::= "try" ":" suite\n                 "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started.  This search inspects the except clauses\nin turn until one is found that matches the exception.  An expression-\nless except clause, if present, must be last; it matches any\nexception.  For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception.  An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed.  All except\nclauses must have an executable block.  When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause.  This is as if\n\n   except E as N:\n       foo\n\nwas translated to\n\n   except E as N:\n       try:\n           foo\n       finally:\n           del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause.  Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be access via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred.  "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler.  The "try"\nclause is executed, including any "except" and "else" clauses.  If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed.  If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause.  If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n   def f():\n       try:\n           1/0\n       finally:\n           return 42\n\n   >>> f()\n   42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe "with" statement\n====================\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n   with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n   is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n   value from "__enter__()" is assigned to it.\n\n   Note: The "with" statement guarantees that if the "__enter__()"\n     method returns without an error, then "__exit__()" will always be\n     called. Thus, if an error occurs during the assignment to the\n     target list, it will be treated the same as an error occurring\n     within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked.  If an\n   exception caused the suite to be exited, its type, value, and\n   traceback are passed as arguments to "__exit__()". Otherwise, three\n   "None" arguments are supplied.\n\n   If the suite was exited due to an exception, and the return value\n   from the "__exit__()" method was false, the exception is reraised.\n   If the return value was true, the exception is suppressed, and\n   execution continues with the statement following the "with"\n   statement.\n\n   If the suite was exited for any reason other than an exception, the\n   return value from "__exit__()" is ignored, and execution proceeds\n   at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n   with A() as a, B() as b:\n       suite\n\nis equivalent to\n\n   with A() as a:\n       with B() as b:\n           suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n     The specification, background, and examples for the Python "with"\n     statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n   funcdef        ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n   decorators     ::= decorator+\n   decorator      ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n   dotted_name    ::= identifier ("." identifier)*\n   parameter_list ::= (defparameter ",")*\n                      ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n                      | "**" parameter\n                      | defparameter [","] )\n   parameter      ::= identifier [":" expression]\n   defparameter   ::= parameter ["=" expression]\n   funcname       ::= identifier\n\nA function definition is an executable statement.  Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function).  This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition.  The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object.  Multiple decorators are applied in\nnested fashion. For example, the following code\n\n   @f1(arg)\n   @f2\n   def func(): pass\n\nis equivalent to\n\n   def func(): pass\n   func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted.  If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call.  This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended.  A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n   def whats_on_the_telly(penguin=None):\n       if penguin is None:\n           penguin = []\n       penguin.append("property of the zoo")\n       return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values.  If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple.  If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name.  Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier".  Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist.  These annotations can be any valid Python expression and are\nevaluated when the function definition is executed.  Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction.  The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions.  This uses lambda\nexpressions, described in section *Lambdas*.  Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression.  The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects.  A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around.  Free variables used\nin the nested function can access the local variables of the function\ncontaining the def.  See section *Naming and binding* for details.\n\nSee also: **PEP 3107** - Function Annotations\n\n     The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n   classdef    ::= [decorators] "class" classname [inheritance] ":" suite\n   inheritance ::= "(" [parameter_list] ")"\n   classname   ::= identifier\n\nA class definition is an executable statement.  The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing.  Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n   class Foo:\n       pass\n\nis equivalent to\n\n   class Foo(object):\n       pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.)  When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n   @f1(arg)\n   @f2\n   class Foo: pass\n\nis equivalent to\n\n   class Foo: pass\n   Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators.  The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances.  Instance attributes\ncan be set in a method with "self.name = value".  Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way.  Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results.  *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n  Class Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n    there is a "finally" clause which happens to raise another\n    exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n    an exception or the execution of a "return", "continue", or\n    "break" statement.\n\n[3] A string literal appearing as the first statement in the\n    function body is transformed into the function\'s "__doc__"\n    attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n    body is transformed into the namespace\'s "__doc__" item and\n    therefore the class\'s *docstring*.\n',
+ 'context-managers': '\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code.  Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n   Enter the runtime context related to this object. The "with"\n   statement will bind this method\'s return value to the target(s)\n   specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n   Exit the runtime context related to this object. The parameters\n   describe the exception that caused the context to be exited. If the\n   context was exited without an exception, all three arguments will\n   be "None".\n\n   If an exception is supplied, and the method wishes to suppress the\n   exception (i.e., prevent it from being propagated), it should\n   return a true value. Otherwise, the exception will be processed\n   normally upon exit from this method.\n\n   Note that "__exit__()" methods should not reraise the passed-in\n   exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n     The specification, background, and examples for the Python "with"\n     statement.\n',
  'continue': '\nThe "continue" statement\n************************\n\n   continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop.  It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n',
- 'conversions': '\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works that way:\n\n* If either argument is a complex number, the other is converted to\n  complex;\n\n* otherwise, if either argument is a floating point number, the other\n  is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator).  Extensions must define their own\nconversion behavior.\n',
- 'customization': '\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n   Called to create a new instance of class *cls*.  "__new__()" is a\n   static method (special-cased so you need not declare it as such)\n   that takes the class of which an instance was requested as its\n   first argument.  The remaining arguments are those passed to the\n   object constructor expression (the call to the class).  The return\n   value of "__new__()" should be the new object instance (usually an\n   instance of *cls*).\n\n   Typical implementations create a new instance of the class by\n   invoking the superclass\'s "__new__()" method using\n   "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n   arguments and then modifying the newly-created instance as\n   necessary before returning it.\n\n   If "__new__()" returns an instance of *cls*, then the new\n   instance\'s "__init__()" method will be invoked like\n   "__init__(self[, ...])", where *self* is the new instance and the\n   remaining arguments are the same as were passed to "__new__()".\n\n   If "__new__()" does not return an instance of *cls*, then the new\n   instance\'s "__init__()" method will not be invoked.\n\n   "__new__()" is intended mainly to allow subclasses of immutable\n   types (like int, str, or tuple) to customize instance creation.  It\n   is also commonly overridden in custom metaclasses in order to\n   customize class creation.\n\nobject.__init__(self[, ...])\n\n   Called when the instance is created.  The arguments are those\n   passed to the class constructor expression.  If a base class has an\n   "__init__()" method, the derived class\'s "__init__()" method, if\n   any, must explicitly call it to ensure proper initialization of the\n   base class part of the instance; for example:\n   "BaseClass.__init__(self, [args...])".  As a special constraint on\n   constructors, no value may be returned; doing so will cause a\n   "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n   Called when the instance is about to be destroyed.  This is also\n   called a destructor.  If a base class has a "__del__()" method, the\n   derived class\'s "__del__()" method, if any, must explicitly call it\n   to ensure proper deletion of the base class part of the instance.\n   Note that it is possible (though not recommended!) for the\n   "__del__()" method to postpone destruction of the instance by\n   creating a new reference to it.  It may then be called at a later\n   time when this new reference is deleted.  It is not guaranteed that\n   "__del__()" methods are called for objects that still exist when\n   the interpreter exits.\n\n   Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n     decrements the reference count for "x" by one, and the latter is\n     only called when "x"\'s reference count reaches zero.  Some common\n     situations that may prevent the reference count of an object from\n     going to zero include: circular references between objects (e.g.,\n     a doubly-linked list or a tree data structure with parent and\n     child pointers); a reference to the object on the stack frame of\n     a function that caught an exception (the traceback stored in\n     "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n     to the object on the stack frame that raised an unhandled\n     exception in interactive mode (the traceback stored in\n     "sys.last_traceback" keeps the stack frame alive).  The first\n     situation can only be remedied by explicitly breaking the cycles;\n     the latter two situations can be resolved by storing "None" in\n     "sys.last_traceback". Circular references which are garbage are\n     detected and cleaned up when the cyclic garbage collector is\n     enabled (it\'s on by default). Refer to the documentation for the\n     "gc" module for more information about this topic.\n\n   Warning: Due to the precarious circumstances under which "__del__()"\n     methods are invoked, exceptions that occur during their execution\n     are ignored, and a warning is printed to "sys.stderr" instead.\n     Also, when "__del__()" is invoked in response to a module being\n     deleted (e.g., when execution of the program is done), other\n     globals referenced by the "__del__()" method may already have\n     been deleted or in the process of being torn down (e.g. the\n     import machinery shutting down).  For this reason, "__del__()"\n     methods should do the absolute minimum needed to maintain\n     external invariants.  Starting with version 1.5, Python\n     guarantees that globals whose name begins with a single\n     underscore are deleted from their module before other globals are\n     deleted; if no other references to such globals exist, this may\n     help in assuring that imported modules are still available at the\n     time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n   Called by the "repr()" built-in function to compute the "official"\n   string representation of an object.  If at all possible, this\n   should look like a valid Python expression that could be used to\n   recreate an object with the same value (given an appropriate\n   environment).  If this is not possible, a string of the form\n   "<...some useful description...>" should be returned. The return\n   value must be a string object. If a class defines "__repr__()" but\n   not "__str__()", then "__repr__()" is also used when an "informal"\n   string representation of instances of that class is required.\n\n   This is typically used for debugging, so it is important that the\n   representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n   Called by "str(object)" and the built-in functions "format()" and\n   "print()" to compute the "informal" or nicely printable string\n   representation of an object.  The return value must be a *string*\n   object.\n\n   This method differs from "object.__repr__()" in that there is no\n   expectation that "__str__()" return a valid Python expression: a\n   more convenient or concise representation can be used.\n\n   The default implementation defined by the built-in type "object"\n   calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n   Called by "bytes()" to compute a byte-string representation of an\n   object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n   Called by the "format()" built-in function (and by extension, the\n   "str.format()" method of class "str") to produce a "formatted"\n   string representation of an object. The "format_spec" argument is a\n   string that contains a description of the formatting options\n   desired. The interpretation of the "format_spec" argument is up to\n   the type implementing "__format__()", however most classes will\n   either delegate formatting to one of the built-in types, or use a\n   similar formatting option syntax.\n\n   See *Format Specification Mini-Language* for a description of the\n   standard formatting syntax.\n\n   The return value must be a string object.\n\n   Changed in version 3.4: The __format__ method of "object" itself\n   raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n   These are the so-called "rich comparison" methods. The\n   correspondence between operator symbols and method names is as\n   follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n   "x==y" calls "x.__eq__(y)", "x!=y" calls "x.__ne__(y)", "x>y" calls\n   "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n   A rich comparison method may return the singleton "NotImplemented"\n   if it does not implement the operation for a given pair of\n   arguments. By convention, "False" and "True" are returned for a\n   successful comparison. However, these methods can return any value,\n   so if the comparison operator is used in a Boolean context (e.g.,\n   in the condition of an "if" statement), Python will call "bool()"\n   on the value to determine if the result is true or false.\n\n   There are no implied relationships among the comparison operators.\n   The truth of "x==y" does not imply that "x!=y" is false.\n   Accordingly, when defining "__eq__()", one should also define\n   "__ne__()" so that the operators will behave as expected.  See the\n   paragraph on "__hash__()" for some important notes on creating\n   *hashable* objects which support custom comparison operations and\n   are usable as dictionary keys.\n\n   There are no swapped-argument versions of these methods (to be used\n   when the left argument does not support the operation but the right\n   argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n   reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n   and "__eq__()" and "__ne__()" are their own reflection.\n\n   Arguments to rich comparison methods are never coerced.\n\n   To automatically generate ordering operations from a single root\n   operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n   Called by built-in function "hash()" and for operations on members\n   of hashed collections including "set", "frozenset", and "dict".\n   "__hash__()" should return an integer.  The only required property\n   is that objects which compare equal have the same hash value; it is\n   advised to somehow mix together (e.g. using exclusive or) the hash\n   values for the components of the object that also play a part in\n   comparison of objects.\n\n   Note: "hash()" truncates the value returned from an object\'s custom\n     "__hash__()" method to the size of a "Py_ssize_t".  This is\n     typically 8 bytes on 64-bit builds and 4 bytes on 32-bit builds.\n     If an object\'s   "__hash__()" must interoperate on builds of\n     different bit sizes, be sure to check the width on all supported\n     builds.  An easy way to do this is with "python -c "import sys;\n     print(sys.hash_info.width)""\n\n   If a class does not define an "__eq__()" method it should not\n   define a "__hash__()" operation either; if it defines "__eq__()"\n   but not "__hash__()", its instances will not be usable as items in\n   hashable collections.  If a class defines mutable objects and\n   implements an "__eq__()" method, it should not implement\n   "__hash__()", since the implementation of hashable collections\n   requires that a key\'s hash value is immutable (if the object\'s hash\n   value changes, it will be in the wrong hash bucket).\n\n   User-defined classes have "__eq__()" and "__hash__()" methods by\n   default; with them, all objects compare unequal (except with\n   themselves) and "x.__hash__()" returns an appropriate value such\n   that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n   A class that overrides "__eq__()" and does not define "__hash__()"\n   will have its "__hash__()" implicitly set to "None".  When the\n   "__hash__()" method of a class is "None", instances of the class\n   will raise an appropriate "TypeError" when a program attempts to\n   retrieve their hash value, and will also be correctly identified as\n   unhashable when checking "isinstance(obj, collections.Hashable").\n\n   If a class that overrides "__eq__()" needs to retain the\n   implementation of "__hash__()" from a parent class, the interpreter\n   must be told this explicitly by setting "__hash__ =\n   <ParentClass>.__hash__".\n\n   If a class that does not override "__eq__()" wishes to suppress\n   hash support, it should include "__hash__ = None" in the class\n   definition. A class which defines its own "__hash__()" that\n   explicitly raises a "TypeError" would be incorrectly identified as\n   hashable by an "isinstance(obj, collections.Hashable)" call.\n\n   Note: By default, the "__hash__()" values of str, bytes and datetime\n     objects are "salted" with an unpredictable random value.\n     Although they remain constant within an individual Python\n     process, they are not predictable between repeated invocations of\n     Python.This is intended to provide protection against a denial-\n     of-service caused by carefully-chosen inputs that exploit the\n     worst case performance of a dict insertion, O(n^2) complexity.\n     See http://www.ocert.org/advisories/ocert-2011-003.html for\n     details.Changing hash values affects the iteration order of\n     dicts, sets and other mappings.  Python has never made guarantees\n     about this ordering (and it typically varies between 32-bit and\n     64-bit builds).See also "PYTHONHASHSEED".\n\n   Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n   Called to implement truth value testing and the built-in operation\n   "bool()"; should return "False" or "True".  When this method is not\n   defined, "__len__()" is called, if it is defined, and the object is\n   considered true if its result is nonzero.  If a class defines\n   neither "__len__()" nor "__bool__()", all its instances are\n   considered true.\n',
- 'debugger': '\n"pdb" --- The Python Debugger\n*****************************\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs.  It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame.  It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source.  The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n   >>> import pdb\n   >>> import mymodule\n   >>> pdb.run(\'mymodule.test()\')\n   > <string>(0)?()\n   (Pdb) continue\n   > <string>(1)?()\n   (Pdb) continue\n   NameError: \'spam\'\n   > <string>(1)?()\n   (Pdb)\n\nChanged in version 3.3: Tab-completion via the "readline" module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the "p" command.\n\n"pdb.py" can also be invoked as a script to debug other scripts.  For\nexample:\n\n   python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally.  After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program.  Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: "pdb.py" now accepts a "-c" option that executes\ncommands as if given in a ".pdbrc" file, see *Debugger Commands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n   import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger.  You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "continue" command.\n\nThe typical usage to inspect a crashed program is:\n\n   >>> import pdb\n   >>> import mymodule\n   >>> mymodule.test()\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in ?\n     File "./mymodule.py", line 4, in test\n       test2()\n     File "./mymodule.py", line 3, in test2\n       print(spam)\n   NameError: spam\n   >>> pdb.pm()\n   > ./mymodule.py(3)test2()\n   -> print(spam)\n   (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n   Execute the *statement* (given as a string or a code object) under\n   debugger control.  The debugger prompt appears before any code is\n   executed; you can set breakpoints and type "continue", or you can\n   step through the statement using "step" or "next" (all these\n   commands are explained below).  The optional *globals* and *locals*\n   arguments specify the environment in which the code is executed; by\n   default the dictionary of the module "__main__" is used.  (See the\n   explanation of the built-in "exec()" or "eval()" functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n   Evaluate the *expression* (given as a string or a code object)\n   under debugger control.  When "runeval()" returns, it returns the\n   value of the expression.  Otherwise this function is similar to\n   "run()".\n\npdb.runcall(function, *args, **kwds)\n\n   Call the *function* (a function or method object, not a string)\n   with the given arguments.  When "runcall()" returns, it returns\n   whatever the function call returned.  The debugger prompt appears\n   as soon as the function is entered.\n\npdb.set_trace()\n\n   Enter the debugger at the calling stack frame.  This is useful to\n   hard-code a breakpoint at a given point in a program, even if the\n   code is not otherwise being debugged (e.g. when an assertion\n   fails).\n\npdb.post_mortem(traceback=None)\n\n   Enter post-mortem debugging of the given *traceback* object.  If no\n   *traceback* is given, it uses the one of the exception that is\n   currently being handled (an exception must be being handled if the\n   default is to be used).\n\npdb.pm()\n\n   Enter post-mortem debugging of the traceback found in\n   "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name.  If you want\nto access further features, you have to do this yourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n   "Pdb" is the debugger class.\n\n   The *completekey*, *stdin* and *stdout* arguments are passed to the\n   underlying "cmd.Cmd" class; see the description there.\n\n   The *skip* argument, if given, must be an iterable of glob-style\n   module name patterns.  The debugger will not step into frames that\n   originate in a module that matches one of these patterns. [1]\n\n   By default, Pdb sets a handler for the SIGINT signal (which is sent\n   when the user presses Ctrl-C on the console) when you give a\n   "continue" command. This allows you to break into the debugger\n   again by pressing Ctrl-C.  If you want Pdb not to touch the SIGINT\n   handler, set *nosigint* tot true.\n\n   Example call to enable tracing with *skip*:\n\n      import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n   New in version 3.1: The *skip* argument.\n\n   New in version 3.2: The *nosigint* argument.  Previously, a SIGINT\n   handler was never set by Pdb.\n\n   run(statement, globals=None, locals=None)\n   runeval(expression, globals=None, locals=None)\n   runcall(function, *args, **kwds)\n   set_trace()\n\n      See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below.  Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n"h(elp)" means that either "h" or "help" can be used to enter the help\ncommand (but not "he" or "hel", nor "H" or "Help" or "HELP").\nArguments to commands must be separated by whitespace (spaces or\ntabs).  Optional arguments are enclosed in square brackets ("[]") in\nthe command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n("|").\n\nEntering a blank line repeats the last command entered.  Exception: if\nthe last command was a "list" command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged.  Python statements can also be prefixed with an exclamation\npoint ("!").  This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*.  Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by ";;".\n(A single ";" is not used as it is the separator for multiple commands\nin a line that is passed to the Python parser.)  No intelligence is\napplied to separating the commands; the input is split at the first\n";;" pair, even if it is in the middle of a quoted string.\n\nIf a file ".pdbrc" exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt.  This is particularly useful for aliases.  If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ".pdbrc" can now contain commands that\ncontinue debugging, such as "continue" or "next".  Previously, these\ncommands had no effect.\n\nh(elp) [command]\n\n   Without argument, print the list of available commands.  With a\n   *command* as argument, print help about that command.  "help pdb"\n   displays the full documentation (the docstring of the "pdb"\n   module).  Since the *command* argument must be an identifier, "help\n   exec" must be entered to get help on the "!" command.\n\nw(here)\n\n   Print a stack trace, with the most recent frame at the bottom.  An\n   arrow indicates the current frame, which determines the context of\n   most commands.\n\nd(own) [count]\n\n   Move the current frame *count* (default one) levels down in the\n   stack trace (to a newer frame).\n\nu(p) [count]\n\n   Move the current frame *count* (default one) levels up in the stack\n   trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n   With a *lineno* argument, set a break there in the current file.\n   With a *function* argument, set a break at the first executable\n   statement within that function.  The line number may be prefixed\n   with a filename and a colon, to specify a breakpoint in another\n   file (probably one that hasn\'t been loaded yet).  The file is\n   searched on "sys.path".  Note that each breakpoint is assigned a\n   number to which all the other breakpoint commands refer.\n\n   If a second argument is present, it is an expression which must\n   evaluate to true before the breakpoint is honored.\n\n   Without argument, list all breaks, including for each breakpoint,\n   the number of times that breakpoint has been hit, the current\n   ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n   Temporary breakpoint, which is removed automatically when it is\n   first hit. The arguments are the same as for "break".\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n   With a *filename:lineno* argument, clear all the breakpoints at\n   this line. With a space separated list of breakpoint numbers, clear\n   those breakpoints. Without argument, clear all breaks (but first\n   ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n   Disable the breakpoints given as a space separated list of\n   breakpoint numbers.  Disabling a breakpoint means it cannot cause\n   the program to stop execution, but unlike clearing a breakpoint, it\n   remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n   Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n   Set the ignore count for the given breakpoint number.  If count is\n   omitted, the ignore count is set to 0.  A breakpoint becomes active\n   when the ignore count is zero.  When non-zero, the count is\n   decremented each time the breakpoint is reached and the breakpoint\n   is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n   Set a new *condition* for the breakpoint, an expression which must\n   evaluate to true before the breakpoint is honored.  If *condition*\n   is absent, any existing condition is removed; i.e., the breakpoint\n   is made unconditional.\n\ncommands [bpnumber]\n\n   Specify a list of commands for breakpoint number *bpnumber*.  The\n   commands themselves appear on the following lines.  Type a line\n   containing just "end" to terminate the commands. An example:\n\n      (Pdb) commands 1\n      (com) p some_variable\n      (com) end\n      (Pdb)\n\n   To remove all commands from a breakpoint, type commands and follow\n   it immediately with "end"; that is, give no commands.\n\n   With no *bpnumber* argument, commands refers to the last breakpoint\n   set.\n\n   You can use breakpoint commands to start your program up again.\n   Simply use the continue command, or step, or any other command that\n   resumes execution.\n\n   Specifying any command resuming execution (currently continue,\n   step, next, return, jump, quit and their abbreviations) terminates\n   the command list (as if that command was immediately followed by\n   end). This is because any time you resume execution (even with a\n   simple next or step), you may encounter another breakpoint--which\n   could have its own command list, leading to ambiguities about which\n   list to execute.\n\n   If you use the \'silent\' command in the command list, the usual\n   message about stopping at a breakpoint is not printed.  This may be\n   desirable for breakpoints that are to print a specific message and\n   then continue.  If none of the other commands print anything, you\n   see no sign that the breakpoint was reached.\n\ns(tep)\n\n   Execute the current line, stop at the first possible occasion\n   (either in a function that is called or on the next line in the\n   current function).\n\nn(ext)\n\n   Continue execution until the next line in the current function is\n   reached or it returns.  (The difference between "next" and "step"\n   is that "step" stops inside a called function, while "next"\n   executes called functions at (nearly) full speed, only stopping at\n   the next line in the current function.)\n\nunt(il) [lineno]\n\n   Without argument, continue execution until the line with a number\n   greater than the current one is reached.\n\n   With a line number, continue execution until a line with a number\n   greater or equal to that is reached.  In both cases, also stop when\n   the current frame returns.\n\n   Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n   Continue execution until the current function returns.\n\nc(ont(inue))\n\n   Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n   Set the next line that will be executed.  Only available in the\n   bottom-most frame.  This lets you jump back and execute code again,\n   or jump forward to skip code that you don\'t want to run.\n\n   It should be noted that not all jumps are allowed -- for instance\n   it is not possible to jump into the middle of a "for" loop or out\n   of a "finally" clause.\n\nl(ist) [first[, last]]\n\n   List source code for the current file.  Without arguments, list 11\n   lines around the current line or continue the previous listing.\n   With "." as argument, list 11 lines around the current line.  With\n   one argument, list 11 lines around at that line.  With two\n   arguments, list the given range; if the second argument is less\n   than the first, it is interpreted as a count.\n\n   The current line in the current frame is indicated by "->".  If an\n   exception is being debugged, the line where the exception was\n   originally raised or propagated is indicated by ">>", if it differs\n   from the current line.\n\n   New in version 3.2: The ">>" marker.\n\nll | longlist\n\n   List all source code for the current function or frame.\n   Interesting lines are marked as for "list".\n\n   New in version 3.2.\n\na(rgs)\n\n   Print the argument list of the current function.\n\np expression\n\n   Evaluate the *expression* in the current context and print its\n   value.\n\n   Note: "print()" can also be used, but is not a debugger command ---\n     this executes the Python "print()" function.\n\npp expression\n\n   Like the "p" command, except the value of the expression is pretty-\n   printed using the "pprint" module.\n\nwhatis expression\n\n   Print the type of the *expression*.\n\nsource expression\n\n   Try to get source code for the given object and display it.\n\n   New in version 3.2.\n\ndisplay [expression]\n\n   Display the value of the expression if it changed, each time\n   execution stops in the current frame.\n\n   Without expression, list all display expressions for the current\n   frame.\n\n   New in version 3.2.\n\nundisplay [expression]\n\n   Do not display the expression any more in the current frame.\n   Without expression, clear all display expressions for the current\n   frame.\n\n   New in version 3.2.\n\ninteract\n\n   Start an interative interpreter (using the "code" module) whose\n   global namespace contains all the (global and local) names found in\n   the current scope.\n\n   New in version 3.2.\n\nalias [name [command]]\n\n   Create an alias called *name* that executes *command*.  The command\n   must *not* be enclosed in quotes.  Replaceable parameters can be\n   indicated by "%1", "%2", and so on, while "%*" is replaced by all\n   the parameters. If no command is given, the current alias for\n   *name* is shown. If no arguments are given, all aliases are listed.\n\n   Aliases may be nested and can contain anything that can be legally\n   typed at the pdb prompt.  Note that internal pdb commands *can* be\n   overridden by aliases.  Such a command is then hidden until the\n   alias is removed.  Aliasing is recursively applied to the first\n   word of the command line; all other words in the line are left\n   alone.\n\n   As an example, here are two useful aliases (especially when placed\n   in the ".pdbrc" file):\n\n      # Print instance variables (usage "pi classInst")\n      alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n      # Print instance variables in self\n      alias ps pi self\n\nunalias name\n\n   Delete the specified alias.\n\n! statement\n\n   Execute the (one-line) *statement* in the context of the current\n   stack frame. The exclamation point can be omitted unless the first\n   word of the statement resembles a debugger command.  To set a\n   global variable, you can prefix the assignment command with a\n   "global" statement on the same line, e.g.:\n\n      (Pdb) global list_options; list_options = [\'-l\']\n      (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n   Restart the debugged Python program.  If an argument is supplied,\n   it is split with "shlex" and the result is used as the new\n   "sys.argv". History, breakpoints, actions and debugger options are\n   preserved. "restart" is an alias for "run".\n\nq(uit)\n\n   Quit from the debugger.  The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module is\n    determined by the "__name__" in the frame globals.\n',
+ 'conversions': '\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works that way:\n\n* If either argument is a complex number, the other is converted to\n  complex;\n\n* otherwise, if either argument is a floating point number, the\n  other is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator).  Extensions must define their own\nconversion behavior.\n',
+ 'customization': '\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n   Called to create a new instance of class *cls*.  "__new__()" is a\n   static method (special-cased so you need not declare it as such)\n   that takes the class of which an instance was requested as its\n   first argument.  The remaining arguments are those passed to the\n   object constructor expression (the call to the class).  The return\n   value of "__new__()" should be the new object instance (usually an\n   instance of *cls*).\n\n   Typical implementations create a new instance of the class by\n   invoking the superclass\'s "__new__()" method using\n   "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n   arguments and then modifying the newly-created instance as\n   necessary before returning it.\n\n   If "__new__()" returns an instance of *cls*, then the new\n   instance\'s "__init__()" method will be invoked like\n   "__init__(self[, ...])", where *self* is the new instance and the\n   remaining arguments are the same as were passed to "__new__()".\n\n   If "__new__()" does not return an instance of *cls*, then the new\n   instance\'s "__init__()" method will not be invoked.\n\n   "__new__()" is intended mainly to allow subclasses of immutable\n   types (like int, str, or tuple) to customize instance creation.  It\n   is also commonly overridden in custom metaclasses in order to\n   customize class creation.\n\nobject.__init__(self[, ...])\n\n   Called when the instance is created.  The arguments are those\n   passed to the class constructor expression.  If a base class has an\n   "__init__()" method, the derived class\'s "__init__()" method, if\n   any, must explicitly call it to ensure proper initialization of the\n   base class part of the instance; for example:\n   "BaseClass.__init__(self, [args...])".  As a special constraint on\n   constructors, no value may be returned; doing so will cause a\n   "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n   Called when the instance is about to be destroyed.  This is also\n   called a destructor.  If a base class has a "__del__()" method, the\n   derived class\'s "__del__()" method, if any, must explicitly call it\n   to ensure proper deletion of the base class part of the instance.\n   Note that it is possible (though not recommended!) for the\n   "__del__()" method to postpone destruction of the instance by\n   creating a new reference to it.  It may then be called at a later\n   time when this new reference is deleted.  It is not guaranteed that\n   "__del__()" methods are called for objects that still exist when\n   the interpreter exits.\n\n   Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n     decrements the reference count for "x" by one, and the latter is\n     only called when "x"\'s reference count reaches zero.  Some common\n     situations that may prevent the reference count of an object from\n     going to zero include: circular references between objects (e.g.,\n     a doubly-linked list or a tree data structure with parent and\n     child pointers); a reference to the object on the stack frame of\n     a function that caught an exception (the traceback stored in\n     "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n     to the object on the stack frame that raised an unhandled\n     exception in interactive mode (the traceback stored in\n     "sys.last_traceback" keeps the stack frame alive).  The first\n     situation can only be remedied by explicitly breaking the cycles;\n     the latter two situations can be resolved by storing "None" in\n     "sys.last_traceback". Circular references which are garbage are\n     detected and cleaned up when the cyclic garbage collector is\n     enabled (it\'s on by default). Refer to the documentation for the\n     "gc" module for more information about this topic.\n\n   Warning: Due to the precarious circumstances under which\n     "__del__()" methods are invoked, exceptions that occur during\n     their execution are ignored, and a warning is printed to\n     "sys.stderr" instead. Also, when "__del__()" is invoked in\n     response to a module being deleted (e.g., when execution of the\n     program is done), other globals referenced by the "__del__()"\n     method may already have been deleted or in the process of being\n     torn down (e.g. the import machinery shutting down).  For this\n     reason, "__del__()" methods should do the absolute minimum needed\n     to maintain external invariants.  Starting with version 1.5,\n     Python guarantees that globals whose name begins with a single\n     underscore are deleted from their module before other globals are\n     deleted; if no other references to such globals exist, this may\n     help in assuring that imported modules are still available at the\n     time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n   Called by the "repr()" built-in function to compute the "official"\n   string representation of an object.  If at all possible, this\n   should look like a valid Python expression that could be used to\n   recreate an object with the same value (given an appropriate\n   environment).  If this is not possible, a string of the form\n   "<...some useful description...>" should be returned. The return\n   value must be a string object. If a class defines "__repr__()" but\n   not "__str__()", then "__repr__()" is also used when an "informal"\n   string representation of instances of that class is required.\n\n   This is typically used for debugging, so it is important that the\n   representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n   Called by "str(object)" and the built-in functions "format()" and\n   "print()" to compute the "informal" or nicely printable string\n   representation of an object.  The return value must be a *string*\n   object.\n\n   This method differs from "object.__repr__()" in that there is no\n   expectation that "__str__()" return a valid Python expression: a\n   more convenient or concise representation can be used.\n\n   The default implementation defined by the built-in type "object"\n   calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n   Called by "bytes()" to compute a byte-string representation of an\n   object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n   Called by the "format()" built-in function (and by extension, the\n   "str.format()" method of class "str") to produce a "formatted"\n   string representation of an object. The "format_spec" argument is a\n   string that contains a description of the formatting options\n   desired. The interpretation of the "format_spec" argument is up to\n   the type implementing "__format__()", however most classes will\n   either delegate formatting to one of the built-in types, or use a\n   similar formatting option syntax.\n\n   See *Format Specification Mini-Language* for a description of the\n   standard formatting syntax.\n\n   The return value must be a string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n   These are the so-called "rich comparison" methods. The\n   correspondence between operator symbols and method names is as\n   follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n   "x==y" calls "x.__eq__(y)", "x!=y" calls "x.__ne__(y)", "x>y" calls\n   "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n   A rich comparison method may return the singleton "NotImplemented"\n   if it does not implement the operation for a given pair of\n   arguments. By convention, "False" and "True" are returned for a\n   successful comparison. However, these methods can return any value,\n   so if the comparison operator is used in a Boolean context (e.g.,\n   in the condition of an "if" statement), Python will call "bool()"\n   on the value to determine if the result is true or false.\n\n   There are no implied relationships among the comparison operators.\n   The truth of "x==y" does not imply that "x!=y" is false.\n   Accordingly, when defining "__eq__()", one should also define\n   "__ne__()" so that the operators will behave as expected.  See the\n   paragraph on "__hash__()" for some important notes on creating\n   *hashable* objects which support custom comparison operations and\n   are usable as dictionary keys.\n\n   There are no swapped-argument versions of these methods (to be used\n   when the left argument does not support the operation but the right\n   argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n   reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n   and "__eq__()" and "__ne__()" are their own reflection.\n\n   Arguments to rich comparison methods are never coerced.\n\n   To automatically generate ordering operations from a single root\n   operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n   Called by built-in function "hash()" and for operations on members\n   of hashed collections including "set", "frozenset", and "dict".\n   "__hash__()" should return an integer.  The only required property\n   is that objects which compare equal have the same hash value; it is\n   advised to somehow mix together (e.g. using exclusive or) the hash\n   values for the components of the object that also play a part in\n   comparison of objects.\n\n   Note: "hash()" truncates the value returned from an object\'s\n     custom "__hash__()" method to the size of a "Py_ssize_t".  This\n     is typically 8 bytes on 64-bit builds and 4 bytes on 32-bit\n     builds. If an object\'s   "__hash__()" must interoperate on builds\n     of different bit sizes, be sure to check the width on all\n     supported builds.  An easy way to do this is with "python -c\n     "import sys; print(sys.hash_info.width)""\n\n   If a class does not define an "__eq__()" method it should not\n   define a "__hash__()" operation either; if it defines "__eq__()"\n   but not "__hash__()", its instances will not be usable as items in\n   hashable collections.  If a class defines mutable objects and\n   implements an "__eq__()" method, it should not implement\n   "__hash__()", since the implementation of hashable collections\n   requires that a key\'s hash value is immutable (if the object\'s hash\n   value changes, it will be in the wrong hash bucket).\n\n   User-defined classes have "__eq__()" and "__hash__()" methods by\n   default; with them, all objects compare unequal (except with\n   themselves) and "x.__hash__()" returns an appropriate value such\n   that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n   A class that overrides "__eq__()" and does not define "__hash__()"\n   will have its "__hash__()" implicitly set to "None".  When the\n   "__hash__()" method of a class is "None", instances of the class\n   will raise an appropriate "TypeError" when a program attempts to\n   retrieve their hash value, and will also be correctly identified as\n   unhashable when checking "isinstance(obj, collections.Hashable").\n\n   If a class that overrides "__eq__()" needs to retain the\n   implementation of "__hash__()" from a parent class, the interpreter\n   must be told this explicitly by setting "__hash__ =\n   <ParentClass>.__hash__".\n\n   If a class that does not override "__eq__()" wishes to suppress\n   hash support, it should include "__hash__ = None" in the class\n   definition. A class which defines its own "__hash__()" that\n   explicitly raises a "TypeError" would be incorrectly identified as\n   hashable by an "isinstance(obj, collections.Hashable)" call.\n\n   Note: By default, the "__hash__()" values of str, bytes and\n     datetime objects are "salted" with an unpredictable random value.\n     Although they remain constant within an individual Python\n     process, they are not predictable between repeated invocations of\n     Python.This is intended to provide protection against a denial-\n     of-service caused by carefully-chosen inputs that exploit the\n     worst case performance of a dict insertion, O(n^2) complexity.\n     See http://www.ocert.org/advisories/ocert-2011-003.html for\n     details.Changing hash values affects the iteration order of\n     dicts, sets and other mappings.  Python has never made guarantees\n     about this ordering (and it typically varies between 32-bit and\n     64-bit builds).See also "PYTHONHASHSEED".\n\n   Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n   Called to implement truth value testing and the built-in operation\n   "bool()"; should return "False" or "True".  When this method is not\n   defined, "__len__()" is called, if it is defined, and the object is\n   considered true if its result is nonzero.  If a class defines\n   neither "__len__()" nor "__bool__()", all its instances are\n   considered true.\n',
+ 'debugger': '\n"pdb" --- The Python Debugger\n*****************************\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs.  It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame.  It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source.  The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n   >>> import pdb\n   >>> import mymodule\n   >>> pdb.run(\'mymodule.test()\')\n   > <string>(0)?()\n   (Pdb) continue\n   > <string>(1)?()\n   (Pdb) continue\n   NameError: \'spam\'\n   > <string>(1)?()\n   (Pdb)\n\nChanged in version 3.3: Tab-completion via the "readline" module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the "p" command.\n\n"pdb.py" can also be invoked as a script to debug other scripts.  For\nexample:\n\n   python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally.  After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program.  Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: "pdb.py" now accepts a "-c" option that executes\ncommands as if given in a ".pdbrc" file, see *Debugger Commands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n   import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger.  You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "continue" command.\n\nThe typical usage to inspect a crashed program is:\n\n   >>> import pdb\n   >>> import mymodule\n   >>> mymodule.test()\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in ?\n     File "./mymodule.py", line 4, in test\n       test2()\n     File "./mymodule.py", line 3, in test2\n       print(spam)\n   NameError: spam\n   >>> pdb.pm()\n   > ./mymodule.py(3)test2()\n   -> print(spam)\n   (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n   Execute the *statement* (given as a string or a code object) under\n   debugger control.  The debugger prompt appears before any code is\n   executed; you can set breakpoints and type "continue", or you can\n   step through the statement using "step" or "next" (all these\n   commands are explained below).  The optional *globals* and *locals*\n   arguments specify the environment in which the code is executed; by\n   default the dictionary of the module "__main__" is used.  (See the\n   explanation of the built-in "exec()" or "eval()" functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n   Evaluate the *expression* (given as a string or a code object)\n   under debugger control.  When "runeval()" returns, it returns the\n   value of the expression.  Otherwise this function is similar to\n   "run()".\n\npdb.runcall(function, *args, **kwds)\n\n   Call the *function* (a function or method object, not a string)\n   with the given arguments.  When "runcall()" returns, it returns\n   whatever the function call returned.  The debugger prompt appears\n   as soon as the function is entered.\n\npdb.set_trace()\n\n   Enter the debugger at the calling stack frame.  This is useful to\n   hard-code a breakpoint at a given point in a program, even if the\n   code is not otherwise being debugged (e.g. when an assertion\n   fails).\n\npdb.post_mortem(traceback=None)\n\n   Enter post-mortem debugging of the given *traceback* object.  If no\n   *traceback* is given, it uses the one of the exception that is\n   currently being handled (an exception must be being handled if the\n   default is to be used).\n\npdb.pm()\n\n   Enter post-mortem debugging of the traceback found in\n   "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name.  If you want\nto access further features, you have to do this yourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n   "Pdb" is the debugger class.\n\n   The *completekey*, *stdin* and *stdout* arguments are passed to the\n   underlying "cmd.Cmd" class; see the description there.\n\n   The *skip* argument, if given, must be an iterable of glob-style\n   module name patterns.  The debugger will not step into frames that\n   originate in a module that matches one of these patterns. [1]\n\n   By default, Pdb sets a handler for the SIGINT signal (which is sent\n   when the user presses Ctrl-C on the console) when you give a\n   "continue" command. This allows you to break into the debugger\n   again by pressing Ctrl-C.  If you want Pdb not to touch the SIGINT\n   handler, set *nosigint* tot true.\n\n   Example call to enable tracing with *skip*:\n\n      import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n   New in version 3.1: The *skip* argument.\n\n   New in version 3.2: The *nosigint* argument.  Previously, a SIGINT\n   handler was never set by Pdb.\n\n   run(statement, globals=None, locals=None)\n   runeval(expression, globals=None, locals=None)\n   runcall(function, *args, **kwds)\n   set_trace()\n\n      See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below.  Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n"h(elp)" means that either "h" or "help" can be used to enter the help\ncommand (but not "he" or "hel", nor "H" or "Help" or "HELP").\nArguments to commands must be separated by whitespace (spaces or\ntabs).  Optional arguments are enclosed in square brackets ("[]") in\nthe command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n("|").\n\nEntering a blank line repeats the last command entered.  Exception: if\nthe last command was a "list" command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged.  Python statements can also be prefixed with an exclamation\npoint ("!").  This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*.  Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by ";;".\n(A single ";" is not used as it is the separator for multiple commands\nin a line that is passed to the Python parser.)  No intelligence is\napplied to separating the commands; the input is split at the first\n";;" pair, even if it is in the middle of a quoted string.\n\nIf a file ".pdbrc" exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt.  This is particularly useful for aliases.  If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ".pdbrc" can now contain commands that\ncontinue debugging, such as "continue" or "next".  Previously, these\ncommands had no effect.\n\nh(elp) [command]\n\n   Without argument, print the list of available commands.  With a\n   *command* as argument, print help about that command.  "help pdb"\n   displays the full documentation (the docstring of the "pdb"\n   module).  Since the *command* argument must be an identifier, "help\n   exec" must be entered to get help on the "!" command.\n\nw(here)\n\n   Print a stack trace, with the most recent frame at the bottom.  An\n   arrow indicates the current frame, which determines the context of\n   most commands.\n\nd(own) [count]\n\n   Move the current frame *count* (default one) levels down in the\n   stack trace (to a newer frame).\n\nu(p) [count]\n\n   Move the current frame *count* (default one) levels up in the stack\n   trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n   With a *lineno* argument, set a break there in the current file.\n   With a *function* argument, set a break at the first executable\n   statement within that function.  The line number may be prefixed\n   with a filename and a colon, to specify a breakpoint in another\n   file (probably one that hasn\'t been loaded yet).  The file is\n   searched on "sys.path".  Note that each breakpoint is assigned a\n   number to which all the other breakpoint commands refer.\n\n   If a second argument is present, it is an expression which must\n   evaluate to true before the breakpoint is honored.\n\n   Without argument, list all breaks, including for each breakpoint,\n   the number of times that breakpoint has been hit, the current\n   ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n   Temporary breakpoint, which is removed automatically when it is\n   first hit. The arguments are the same as for "break".\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n   With a *filename:lineno* argument, clear all the breakpoints at\n   this line. With a space separated list of breakpoint numbers, clear\n   those breakpoints. Without argument, clear all breaks (but first\n   ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n   Disable the breakpoints given as a space separated list of\n   breakpoint numbers.  Disabling a breakpoint means it cannot cause\n   the program to stop execution, but unlike clearing a breakpoint, it\n   remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n   Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n   Set the ignore count for the given breakpoint number.  If count is\n   omitted, the ignore count is set to 0.  A breakpoint becomes active\n   when the ignore count is zero.  When non-zero, the count is\n   decremented each time the breakpoint is reached and the breakpoint\n   is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n   Set a new *condition* for the breakpoint, an expression which must\n   evaluate to true before the breakpoint is honored.  If *condition*\n   is absent, any existing condition is removed; i.e., the breakpoint\n   is made unconditional.\n\ncommands [bpnumber]\n\n   Specify a list of commands for breakpoint number *bpnumber*.  The\n   commands themselves appear on the following lines.  Type a line\n   containing just "end" to terminate the commands. An example:\n\n      (Pdb) commands 1\n      (com) p some_variable\n      (com) end\n      (Pdb)\n\n   To remove all commands from a breakpoint, type commands and follow\n   it immediately with "end"; that is, give no commands.\n\n   With no *bpnumber* argument, commands refers to the last breakpoint\n   set.\n\n   You can use breakpoint commands to start your program up again.\n   Simply use the continue command, or step, or any other command that\n   resumes execution.\n\n   Specifying any command resuming execution (currently continue,\n   step, next, return, jump, quit and their abbreviations) terminates\n   the command list (as if that command was immediately followed by\n   end). This is because any time you resume execution (even with a\n   simple next or step), you may encounter another breakpoint--which\n   could have its own command list, leading to ambiguities about which\n   list to execute.\n\n   If you use the \'silent\' command in the command list, the usual\n   message about stopping at a breakpoint is not printed.  This may be\n   desirable for breakpoints that are to print a specific message and\n   then continue.  If none of the other commands print anything, you\n   see no sign that the breakpoint was reached.\n\ns(tep)\n\n   Execute the current line, stop at the first possible occasion\n   (either in a function that is called or on the next line in the\n   current function).\n\nn(ext)\n\n   Continue execution until the next line in the current function is\n   reached or it returns.  (The difference between "next" and "step"\n   is that "step" stops inside a called function, while "next"\n   executes called functions at (nearly) full speed, only stopping at\n   the next line in the current function.)\n\nunt(il) [lineno]\n\n   Without argument, continue execution until the line with a number\n   greater than the current one is reached.\n\n   With a line number, continue execution until a line with a number\n   greater or equal to that is reached.  In both cases, also stop when\n   the current frame returns.\n\n   Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n   Continue execution until the current function returns.\n\nc(ont(inue))\n\n   Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n   Set the next line that will be executed.  Only available in the\n   bottom-most frame.  This lets you jump back and execute code again,\n   or jump forward to skip code that you don\'t want to run.\n\n   It should be noted that not all jumps are allowed -- for instance\n   it is not possible to jump into the middle of a "for" loop or out\n   of a "finally" clause.\n\nl(ist) [first[, last]]\n\n   List source code for the current file.  Without arguments, list 11\n   lines around the current line or continue the previous listing.\n   With "." as argument, list 11 lines around the current line.  With\n   one argument, list 11 lines around at that line.  With two\n   arguments, list the given range; if the second argument is less\n   than the first, it is interpreted as a count.\n\n   The current line in the current frame is indicated by "->".  If an\n   exception is being debugged, the line where the exception was\n   originally raised or propagated is indicated by ">>", if it differs\n   from the current line.\n\n   New in version 3.2: The ">>" marker.\n\nll | longlist\n\n   List all source code for the current function or frame.\n   Interesting lines are marked as for "list".\n\n   New in version 3.2.\n\na(rgs)\n\n   Print the argument list of the current function.\n\np expression\n\n   Evaluate the *expression* in the current context and print its\n   value.\n\n   Note: "print()" can also be used, but is not a debugger command\n     --- this executes the Python "print()" function.\n\npp expression\n\n   Like the "p" command, except the value of the expression is pretty-\n   printed using the "pprint" module.\n\nwhatis expression\n\n   Print the type of the *expression*.\n\nsource expression\n\n   Try to get source code for the given object and display it.\n\n   New in version 3.2.\n\ndisplay [expression]\n\n   Display the value of the expression if it changed, each time\n   execution stops in the current frame.\n\n   Without expression, list all display expressions for the current\n   frame.\n\n   New in version 3.2.\n\nundisplay [expression]\n\n   Do not display the expression any more in the current frame.\n   Without expression, clear all display expressions for the current\n   frame.\n\n   New in version 3.2.\n\ninteract\n\n   Start an interative interpreter (using the "code" module) whose\n   global namespace contains all the (global and local) names found in\n   the current scope.\n\n   New in version 3.2.\n\nalias [name [command]]\n\n   Create an alias called *name* that executes *command*.  The command\n   must *not* be enclosed in quotes.  Replaceable parameters can be\n   indicated by "%1", "%2", and so on, while "%*" is replaced by all\n   the parameters. If no command is given, the current alias for\n   *name* is shown. If no arguments are given, all aliases are listed.\n\n   Aliases may be nested and can contain anything that can be legally\n   typed at the pdb prompt.  Note that internal pdb commands *can* be\n   overridden by aliases.  Such a command is then hidden until the\n   alias is removed.  Aliasing is recursively applied to the first\n   word of the command line; all other words in the line are left\n   alone.\n\n   As an example, here are two useful aliases (especially when placed\n   in the ".pdbrc" file):\n\n      # Print instance variables (usage "pi classInst")\n      alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n      # Print instance variables in self\n      alias ps pi self\n\nunalias name\n\n   Delete the specified alias.\n\n! statement\n\n   Execute the (one-line) *statement* in the context of the current\n   stack frame. The exclamation point can be omitted unless the first\n   word of the statement resembles a debugger command.  To set a\n   global variable, you can prefix the assignment command with a\n   "global" statement on the same line, e.g.:\n\n      (Pdb) global list_options; list_options = [\'-l\']\n      (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n   Restart the debugged Python program.  If an argument is supplied,\n   it is split with "shlex" and the result is used as the new\n   "sys.argv". History, breakpoints, actions and debugger options are\n   preserved. "restart" is an alias for "run".\n\nq(uit)\n\n   Quit from the debugger.  The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module\n    is determined by the "__name__" in the frame globals.\n',
  'del': '\nThe "del" statement\n*******************\n\n   del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block.  If the name is unbound, a\n"NameError" exception will be raised.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n\nChanged in version 3.2: Previously it was illegal to delete a name\nfrom the local namespace if it occurs as a free variable in a nested\nblock.\n',
  'dict': '\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n   dict_display       ::= "{" [key_datum_list | dict_comprehension] "}"\n   key_datum_list     ::= key_datum ("," key_datum)* [","]\n   key_datum          ::= expression ":" expression\n   dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum.  This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*.  (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.)  Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n',
  'dynamic-features': '\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name.  An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names.  Names may be resolved in the local\nand global namespaces of the caller.  Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace.  [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace.  If only one namespace is\nspecified, it is used for both.\n',
  'else': '\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n   if_stmt ::= "if" expression ":" suite\n               ( "elif" expression ":" suite )*\n               ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n',
- 'exceptions': '\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions.  An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero).  A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement.  The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop.  In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances.  The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof.  The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API.  Their contents\n  may change from one version of Python to the next without warning\n  and should not be relied on by code which will run under multiple\n  versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n    operations is not available at the time the module is compiled.\n',
- 'execmodel': '\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects.  Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block.  A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block.  The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*.  A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block.  If a local\nvariable is defined in a block, its scope includes that block.  If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name.  The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope.  This\nmeans that the following will fail:\n\n   class A:\n       a = 42\n       b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope.  The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal".  If a name is bound at the module\nlevel, it is a global variable.  (The variables of the module code\nblock are local and global.)  If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised.  "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore.  This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block.  This can lead to errors when a name is used within a\nblock before it is bound.  This rule is subtle.  Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block.  The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace.  Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins".  The global namespace is searched first.  If\nthe name is not found there, the builtins namespace is searched.  The\nglobal statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used).  By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself.  "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail.  Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported.  The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block.  If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class.  Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name.  An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names.  Names may be resolved in the local\nand global namespaces of the caller.  Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace.  [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace.  If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions.  An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero).  A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement.  The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop.  In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances.  The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof.  The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API.  Their contents\n  may change from one version of Python to the next without warning\n  and should not be relied on by code which will run under multiple\n  versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n    operations is not available at the time the module is compiled.\n',
+ 'exceptions': '\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions.  An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero).  A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement.  The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop.  In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances.  The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof.  The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API.  Their\n  contents may change from one version of Python to the next without\n  warning and should not be relied on by code which will run under\n  multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n    these operations is not available at the time the module is\n    compiled.\n',
+ 'execmodel': '\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects.  Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block.  A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block.  The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*.  A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block.  If a local\nvariable is defined in a block, its scope includes that block.  If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name.  The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope.  This\nmeans that the following will fail:\n\n   class A:\n       a = 42\n       b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope.  The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal".  If a name is bound at the module\nlevel, it is a global variable.  (The variables of the module code\nblock are local and global.)  If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised.  "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore.  This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block.  This can lead to errors when a name is used within a\nblock before it is bound.  This rule is subtle.  Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block.  The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace.  Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins".  The global namespace is searched first.  If\nthe name is not found there, the builtins namespace is searched.  The\nglobal statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used).  By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself.  "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail.  Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported.  The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block.  If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class.  Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name.  An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names.  Names may be resolved in the local\nand global namespaces of the caller.  Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace.  [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace.  If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions.  An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero).  A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement.  The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop.  In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances.  The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof.  The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API.  Their\n  contents may change from one version of Python to the next without\n  warning and should not be relied on by code which will run under\n  multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n    these operations is not available at the time the module is\n    compiled.\n',
  'exprlists': '\nExpression lists\n****************\n\n   expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple.  The\nlength of the tuple is the number of expressions in the list.  The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases.  A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: "()".)\n',
  'floating': '\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n   floatnumber   ::= pointfloat | exponentfloat\n   pointfloat    ::= [intpart] fraction | intpart "."\n   exponentfloat ::= (intpart | pointfloat) exponent\n   intpart       ::= digit+\n   fraction      ::= "." digit+\n   exponent      ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts are always interpreted using\nradix 10. For example, "077e010" is legal, and denotes the same number\nas "77e10". The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n   3.14    10.    .001    1e100    3.14e-10    0e0\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator "-" and the\nliteral "1".\n',
- 'for': '\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n   for_stmt ::= "for" target_list "in" expression_list ":" suite\n                ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject.  An iterator is created for the result of the\n"expression_list".  The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices.  Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted.  When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop.  Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the loop\n  (this can only occur for mutable sequences, i.e. lists).  An\n  internal counter is used to keep track of which item is used next,\n  and this is incremented on each iteration.  When this counter has\n  reached the length of the sequence the loop terminates.  This means\n  that if the suite deletes the current (or a previous) item from the\n  sequence, the next item will be skipped (since it gets the index of\n  the current item which has already been treated).  Likewise, if the\n  suite inserts an item in the sequence before the current item, the\n  current item will be treated again the next time through the loop.\n  This can lead to nasty bugs that can be avoided by making a\n  temporary copy using a slice of the whole sequence, e.g.,\n\n     for x in a[:]:\n         if x < 0: a.remove(x)\n',
+ 'for': '\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n   for_stmt ::= "for" target_list "in" expression_list ":" suite\n                ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject.  An iterator is created for the result of the\n"expression_list".  The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices.  Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted.  When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop.  Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n  loop (this can only occur for mutable sequences, i.e. lists).  An\n  internal counter is used to keep track of which item is used next,\n  and this is incremented on each iteration.  When this counter has\n  reached the length of the sequence the loop terminates.  This means\n  that if the suite deletes the current (or a previous) item from the\n  sequence, the next item will be skipped (since it gets the index of\n  the current item which has already been treated).  Likewise, if the\n  suite inserts an item in the sequence before the current item, the\n  current item will be treated again the next time through the loop.\n  This can lead to nasty bugs that can be avoided by making a\n  temporary copy using a slice of the whole sequence, e.g.,\n\n     for x in a[:]:\n         if x < 0: a.remove(x)\n',
  'formatstrings': '\nFormat String Syntax\n********************\n\nThe "str.format()" method and the "Formatter" class share the same\nsyntax for format strings (although in the case of "Formatter",\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n"{}". Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output.  If you need to include\na brace character in the literal text, it can be escaped by doubling:\n"{{" and "}}".\n\nThe grammar for a replacement field is as follows:\n\n      replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n      field_name        ::= arg_name ("." attribute_name | "[" element_index "]")*\n      arg_name          ::= [identifier | integer]\n      attribute_name    ::= identifier\n      element_index     ::= integer | index_string\n      index_string      ::= <any source character except "]"> +\n      conversion        ::= "r" | "s" | "a"\n      format_spec       ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a  *conversion* field, which is\npreceded by an exclamation point "\'!\'", and a *format_spec*, which is\npreceded by a colon "\':\'".  These specify a non-default format for the\nreplacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword.  If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument.  If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings "\'10\'" or\n"\':-]\'") within a format string. The *arg_name* can be followed by any\nnumber of index or attribute expressions. An expression of the form\n"\'.name\'" selects the named attribute using "getattr()", while an\nexpression of the form "\'[index]\'" does an index lookup using\n"__getitem__()".\n\nChanged in version 3.1: The positional argument specifiers can be\nomitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n\nSome simple format string examples:\n\n   "First, thou shalt count to {0}" # References first positional argument\n   "Bring me a {}"                  # Implicitly references the first positional argument\n   "From {} to {}"                  # Same as "From {0} to {1}"\n   "My quest is {name}"             # References keyword argument \'name\'\n   "Weight in tons {0.weight}"      # \'weight\' attribute of first positional arg\n   "Units destroyed: {players[0]}"  # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the "__format__()"\nmethod of the value itself.  However, in some cases it is desirable to\nforce a type to be formatted as a string, overriding its own\ndefinition of formatting.  By converting the value to a string before\ncalling "__format__()", the normal formatting logic is bypassed.\n\nThree conversion flags are currently supported: "\'!s\'" which calls\n"str()" on the value, "\'!r\'" which calls "repr()" and "\'!a\'" which\ncalls "ascii()".\n\nSome examples:\n\n   "Harold\'s a clever {0!s}"        # Calls str() on the argument first\n   "Bring out the holy {name!r}"    # Calls repr() on the argument first\n   "More {!a}"                      # Calls ascii() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on.  Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed.  The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*).  They can also be passed directly to the\nbuilt-in "format()" function.  Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string ("""") produces\nthe same result as if you had called "str()" on the value. A non-empty\nformat string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n   format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n   fill        ::= <any character>\n   align       ::= "<" | ">" | "=" | "^"\n   sign        ::= "+" | "-" | " "\n   width       ::= integer\n   precision   ::= integer\n   type        ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nIf a valid *align* value is specified, it can be preceded by a *fill*\ncharacter that can be any character and defaults to a space if\nomitted. Note that it is not possible to use "{" and "}" as *fill*\nchar while using the "str.format()" method; this limitation however\ndoesn\'t affect the "format()" function.\n\nThe meaning of the various alignment options is as follows:\n\n   +-----------+------------------------------------------------------------+\n   | Option    | Meaning                                                    |\n   +===========+============================================================+\n   | "\'<\'"     | Forces the field to be left-aligned within the available   |\n   +-----------+------------------------------------------------------------+\n   | "\'>\'"     | Forces the field to be right-aligned within the available  |\n   +-----------+------------------------------------------------------------+\n   | "\'=\'"     | Forces the padding to be placed after the sign (if any)    |\n   +-----------+------------------------------------------------------------+\n   | "\'^\'"     | Forces the field to be centered within the available       |\n   +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n   +-----------+------------------------------------------------------------+\n   | Option    | Meaning                                                    |\n   +===========+============================================================+\n   | "\'+\'"     | indicates that a sign should be used for both positive as  |\n   +-----------+------------------------------------------------------------+\n   | "\'-\'"     | indicates that a sign should be used only for negative     |\n   +-----------+------------------------------------------------------------+\n   | space     | indicates that a leading space should be used on positive  |\n   +-----------+------------------------------------------------------------+\n\nThe "\'#\'" option causes the "alternate form" to be used for the\nconversion.  The alternate form is defined differently for different\ntypes.  This option is only valid for integer, float, complex and\nDecimal types. For integers, when binary, octal, or hexadecimal output\nis used, this option adds the prefix respective "\'0b\'", "\'0o\'", or\n"\'0x\'" to the output value. For floats, complex and Decimal the\nalternate form causes the result of the conversion to always contain a\ndecimal-point character, even if no digits follow it. Normally, a\ndecimal-point character appears in the result of these conversions\nonly if a digit follows it. In addition, for "\'g\'" and "\'G\'"\nconversions, trailing zeros are not removed from the result.\n\nThe "\',\'" option signals the use of a comma for a thousands separator.\nFor a locale aware separator, use the "\'n\'" integer presentation type\ninstead.\n\nChanged in version 3.1: Added the "\',\'" option (see also **PEP 378**).\n\n*width* is a decimal integer defining the minimum field width.  If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero ("\'0\'") character enables sign-\naware zero-padding for numeric types.  This is equivalent to a *fill*\ncharacter of "\'0\'" with an *alignment* type of "\'=\'".\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with "\'f\'" and "\'F\'", or before and after the decimal point\nfor a floating point value formatted with "\'g\'" or "\'G\'".  For non-\nnumber types the field indicates the maximum field size - in other\nwords, how many characters will be used from the field content. The\n*precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n   +-----------+------------------------------------------------------------+\n   | Type      | Meaning                                                    |\n   +===========+============================================================+\n   | "\'s\'"     | String format. This is the default type for strings and    |\n   +-----------+------------------------------------------------------------+\n   | None      | The same as "\'s\'".                                         |\n   +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n   +-----------+------------------------------------------------------------+\n   | Type      | Meaning                                                    |\n   +===========+============================================================+\n   | "\'b\'"     | Binary format. Outputs the number in base 2.               |\n   +-----------+------------------------------------------------------------+\n   | "\'c\'"     | Character. Converts the integer to the corresponding       |\n   +-----------+------------------------------------------------------------+\n   | "\'d\'"     | Decimal Integer. Outputs the number in base 10.            |\n   +-----------+------------------------------------------------------------+\n   | "\'o\'"     | Octal format. Outputs the number in base 8.                |\n   +-----------+------------------------------------------------------------+\n   | "\'x\'"     | Hex format. Outputs the number in base 16, using lower-    |\n   +-----------+------------------------------------------------------------+\n   | "\'X\'"     | Hex format. Outputs the number in base 16, using upper-    |\n   +-----------+------------------------------------------------------------+\n   | "\'n\'"     | Number. This is the same as "\'d\'", except that it uses the |\n   +-----------+------------------------------------------------------------+\n   | None      | The same as "\'d\'".                                         |\n   +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except "\'n\'"\nand None). When doing so, "float()" is used to convert the integer to\na floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n   +-----------+------------------------------------------------------------+\n   | Type      | Meaning                                                    |\n   +===========+============================================================+\n   | "\'e\'"     | Exponent notation. Prints the number in scientific         |\n   +-----------+------------------------------------------------------------+\n   | "\'E\'"     | Exponent notation. Same as "\'e\'" except it uses an upper   |\n   +-----------+------------------------------------------------------------+\n   | "\'f\'"     | Fixed point. Displays the number as a fixed-point number.  |\n   +-----------+------------------------------------------------------------+\n   | "\'F\'"     | Fixed point. Same as "\'f\'", but converts "nan" to "NAN"    |\n   +-----------+------------------------------------------------------------+\n   | "\'g\'"     | General format.  For a given precision "p >= 1", this      |\n   +-----------+------------------------------------------------------------+\n   | "\'G\'"     | General format. Same as "\'g\'" except switches to "\'E\'" if  |\n   +-----------+------------------------------------------------------------+\n   | "\'n\'"     | Number. This is the same as "\'g\'", except that it uses the |\n   +-----------+------------------------------------------------------------+\n   | "\'%\'"     | Percentage. Multiplies the number by 100 and displays in   |\n   +-----------+------------------------------------------------------------+\n   | None      | Similar to "\'g\'", except with at least one digit past the  |\n   +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old "%"-formatting.\n\nIn most of the cases the syntax is similar to the old "%"-formatting,\nwith the addition of the "{}" and with ":" used instead of "%". For\nexample, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n   >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n   \'a, b, c\'\n   >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\')  # 3.1+ only\n   \'a, b, c\'\n   >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n   \'c, b, a\'\n   >>> \'{2}, {1}, {0}\'.format(*\'abc\')      # unpacking argument sequence\n   \'c, b, a\'\n   >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\')   # arguments\' indices can be repeated\n   \'abracadabra\'\n\nAccessing arguments by name:\n\n   >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n   \'Coordinates: 37.24N, -115.81W\'\n   >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n   >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n   \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n   >>> c = 3-5j\n   >>> (\'The complex number {0} is formed from the real part {0.real} \'\n   ...  \'and the imaginary part {0.imag}.\').format(c)\n   \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n   >>> class Point:\n   ...     def __init__(self, x, y):\n   ...         self.x, self.y = x, y\n   ...     def __str__(self):\n   ...         return \'Point({self.x}, {self.y})\'.format(self=self)\n   ...\n   >>> str(Point(4, 2))\n   \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n   >>> coord = (3, 5)\n   >>> \'X: {0[0]};  Y: {0[1]}\'.format(coord)\n   \'X: 3;  Y: 5\'\n\nReplacing "%s" and "%r":\n\n   >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n   "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n   >>> \'{:<30}\'.format(\'left aligned\')\n   \'left aligned                  \'\n   >>> \'{:>30}\'.format(\'right aligned\')\n   \'                 right aligned\'\n   >>> \'{:^30}\'.format(\'centered\')\n   \'           centered           \'\n   >>> \'{:*^30}\'.format(\'centered\')  # use \'*\' as a fill char\n   \'***********centered***********\'\n\nReplacing "%+f", "%-f", and "% f" and specifying a sign:\n\n   >>> \'{:+f}; {:+f}\'.format(3.14, -3.14)  # show it always\n   \'+3.140000; -3.140000\'\n   >>> \'{: f}; {: f}\'.format(3.14, -3.14)  # show a space for positive numbers\n   \' 3.140000; -3.140000\'\n   >>> \'{:-f}; {:-f}\'.format(3.14, -3.14)  # show only the minus -- same as \'{:f}; {:f}\'\n   \'3.140000; -3.140000\'\n\nReplacing "%x" and "%o" and converting the value to different bases:\n\n   >>> # format also supports binary numbers\n   >>> "int: {0:d};  hex: {0:x};  oct: {0:o};  bin: {0:b}".format(42)\n   \'int: 42;  hex: 2a;  oct: 52;  bin: 101010\'\n   >>> # with 0x, 0o, or 0b as prefix:\n   >>> "int: {0:d};  hex: {0:#x};  oct: {0:#o};  bin: {0:#b}".format(42)\n   \'int: 42;  hex: 0x2a;  oct: 0o52;  bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n   >>> \'{:,}\'.format(1234567890)\n   \'1,234,567,890\'\n\nExpressing a percentage:\n\n   >>> points = 19\n   >>> total = 22\n   >>> \'Correct answers: {:.2%}\'.format(points/total)\n   \'Correct answers: 86.36%\'\n\nUsing type-specific formatting:\n\n   >>> import datetime\n   >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n   >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n   \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n   >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n   ...     \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n   ...\n   \'left<<<<<<<<<<<<\'\n   \'^^^^^center^^^^^\'\n   \'>>>>>>>>>>>right\'\n   >>>\n   >>> octets = [192, 168, 0, 1]\n   >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n   \'C0A80001\'\n   >>> int(_, 16)\n   3232235521\n   >>>\n   >>> width = 5\n   >>> for num in range(5,12): #doctest: +NORMALIZE_WHITESPACE\n   ...     for base in \'dXob\':\n   ...         print(\'{0:{width}{base}}\'.format(num, base=base, width=width), end=\' \')\n   ...     print()\n   ...\n       5     5     5   101\n       6     6     6   110\n       7     7     7   111\n       8     8    10  1000\n       9     9    11  1001\n      10     A    12  1010\n      11     B    13  1011\n',
- 'function': '\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n   funcdef        ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n   decorators     ::= decorator+\n   decorator      ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n   dotted_name    ::= identifier ("." identifier)*\n   parameter_list ::= (defparameter ",")*\n                      ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n                      | "**" parameter\n                      | defparameter [","] )\n   parameter      ::= identifier [":" expression]\n   defparameter   ::= parameter ["=" expression]\n   funcname       ::= identifier\n\nA function definition is an executable statement.  Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function).  This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition.  The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object.  Multiple decorators are applied in\nnested fashion. For example, the following code\n\n   @f1(arg)\n   @f2\n   def func(): pass\n\nis equivalent to\n\n   def func(): pass\n   func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted.  If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call.  This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended.  A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n   def whats_on_the_telly(penguin=None):\n       if penguin is None:\n           penguin = []\n       penguin.append("property of the zoo")\n       return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values.  If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple.  If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name.  Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier".  Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist.  These annotations can be any valid Python expression and are\nevaluated when the function definition is executed.  Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction.  The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions.  This uses lambda\nexpressions, described in section *Lambdas*.  Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression.  The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects.  A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around.  Free variables used\nin the nested function can access the local variables of the function\ncontaining the def.  See section *Naming and binding* for details.\n\nSee also:\n\n   **PEP 3107** - Function Annotations\n      The original specification for function annotations.\n',
+ 'function': '\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n   funcdef        ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n   decorators     ::= decorator+\n   decorator      ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n   dotted_name    ::= identifier ("." identifier)*\n   parameter_list ::= (defparameter ",")*\n                      ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n                      | "**" parameter\n                      | defparameter [","] )\n   parameter      ::= identifier [":" expression]\n   defparameter   ::= parameter ["=" expression]\n   funcname       ::= identifier\n\nA function definition is an executable statement.  Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function).  This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition.  The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object.  Multiple decorators are applied in\nnested fashion. For example, the following code\n\n   @f1(arg)\n   @f2\n   def func(): pass\n\nis equivalent to\n\n   def func(): pass\n   func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted.  If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call.  This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended.  A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n   def whats_on_the_telly(penguin=None):\n       if penguin is None:\n           penguin = []\n       penguin.append("property of the zoo")\n       return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values.  If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple.  If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name.  Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier".  Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist.  These annotations can be any valid Python expression and are\nevaluated when the function definition is executed.  Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction.  The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions.  This uses lambda\nexpressions, described in section *Lambdas*.  Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression.  The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects.  A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around.  Free variables used\nin the nested function can access the local variables of the function\ncontaining the def.  See section *Naming and binding* for details.\n\nSee also: **PEP 3107** - Function Annotations\n\n     The original specification for function annotations.\n',
  'global': '\nThe "global" statement\n**********************\n\n   global_stmt ::= "global" identifier ("," identifier)*\n\nThe "global" statement is a declaration which holds for the entire\ncurrent code block.  It means that the listed identifiers are to be\ninterpreted as globals.  It would be impossible to assign to a global\nvariable without "global", although free variables may refer to\nglobals without being declared global.\n\nNames listed in a "global" statement must not be used in the same code\nblock textually preceding that "global" statement.\n\nNames listed in a "global" statement must not be defined as formal\nparameters or in a "for" loop control target, "class" definition,\nfunction definition, or "import" statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the "global" is a directive to the parser.  It\napplies only to code parsed at the same time as the "global"\nstatement. In particular, a "global" statement contained in a string\nor code object supplied to the built-in "exec()" function does not\naffect the code block *containing* the function call, and code\ncontained in such a string is unaffected by "global" statements in the\ncode containing the function call.  The same applies to the "eval()"\nand "compile()" functions.\n',
  'id-classes': '\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings.  These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n   Not imported by "from module import *".  The special identifier "_"\n   is used in the interactive interpreter to store the result of the\n   last evaluation; it is stored in the "builtins" module.  When not\n   in interactive mode, "_" has no special meaning and is not defined.\n   See section *The import statement*.\n\n   Note: The name "_" is often used in conjunction with\n     internationalization; refer to the documentation for the\n     "gettext" module for more information on this convention.\n\n"__*__"\n   System-defined names. These names are defined by the interpreter\n   and its implementation (including the standard library).  Current\n   system names are discussed in the *Special method names* section\n   and elsewhere.  More will likely be defined in future versions of\n   Python.  *Any* use of "__*__" names, in any context, that does not\n   follow explicitly documented use, is subject to breakage without\n   warning.\n\n"__*"\n   Class-private names.  Names in this category, when used within the\n   context of a class definition, are re-written to use a mangled form\n   to help avoid name clashes between "private" attributes of base and\n   derived classes. See section *Identifiers (Names)*.\n',
  'identifiers': '\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions.\n\nThe syntax of identifiers in Python is based on the Unicode standard\nannex UAX-31, with elaboration and changes as defined below; see also\n**PEP 3131** for further details.\n\nWithin the ASCII range (U+0001..U+007F), the valid characters for\nidentifiers are the same as in Python 2.x: the uppercase and lowercase\nletters "A" through "Z", the underscore "_" and, except for the first\ncharacter, the digits "0" through "9".\n\nPython 3.0 introduces additional characters from outside the ASCII\nrange (see **PEP 3131**).  For these characters, the classification\nuses the version of the Unicode Character Database as included in the\n"unicodedata" module.\n\nIdentifiers are unlimited in length.  Case is significant.\n\n   identifier   ::= xid_start xid_continue*\n   id_start     ::= <all characters in general categories Lu, Ll, Lt, Lm, Lo, Nl, the underscore, and characters with the Other_ID_Start property>\n   id_continue  ::= <all characters in id_start, plus characters in the categories Mn, Mc, Nd, Pc and others with the Other_ID_Continue property>\n   xid_start    ::= <all characters in id_start whose NFKC normalization is in "id_start xid_continue*">\n   xid_continue ::= <all characters in id_continue whose NFKC normalization is in "id_continue*">\n\nThe Unicode category codes mentioned above stand for:\n\n* *Lu* - uppercase letters\n\n* *Ll* - lowercase letters\n\n* *Lt* - titlecase letters\n\n* *Lm* - modifier letters\n\n* *Lo* - other letters\n\n* *Nl* - letter numbers\n\n* *Mn* - nonspacing marks\n\n* *Mc* - spacing combining marks\n\n* *Nd* - decimal numbers\n\n* *Pc* - connector punctuations\n\n* *Other_ID_Start* - explicit list of characters in PropList.txt to\n  support backwards compatibility\n\n* *Other_ID_Continue* - likewise\n\nAll identifiers are converted into the normal form NFKC while parsing;\ncomparison of identifiers is based on NFKC.\n\nA non-normative HTML file listing all valid identifier characters for\nUnicode 4.1 can be found at http://www.dcl.hpi.uni-\npotsdam.de/home/loewis/table-3131.html.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers.  They must\nbe spelled exactly as written here:\n\n   False      class      finally    is         return\n   None       continue   for        lambda     try\n   True       def        from       nonlocal   while\n   and        del        global     not        with\n   as         elif       if         or         yield\n   assert     else       import     pass\n   break      except     in         raise\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings.  These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n   Not imported by "from module import *".  The special identifier "_"\n   is used in the interactive interpreter to store the result of the\n   last evaluation; it is stored in the "builtins" module.  When not\n   in interactive mode, "_" has no special meaning and is not defined.\n   See section *The import statement*.\n\n   Note: The name "_" is often used in conjunction with\n     internationalization; refer to the documentation for the\n     "gettext" module for more information on this convention.\n\n"__*__"\n   System-defined names. These names are defined by the interpreter\n   and its implementation (including the standard library).  Current\n   system names are discussed in the *Special method names* section\n   and elsewhere.  More will likely be defined in future versions of\n   Python.  *Any* use of "__*__" names, in any context, that does not\n   follow explicitly documented use, is subject to breakage without\n   warning.\n\n"__*"\n   Class-private names.  Names in this category, when used within the\n   context of a class definition, are re-written to use a mangled form\n   to help avoid name clashes between "private" attributes of base and\n   derived classes. See section *Identifiers (Names)*.\n',
  'if': '\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n   if_stmt ::= "if" expression ":" suite\n               ( "elif" expression ":" suite )*\n               ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n',
  'imaginary': '\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n   imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range.  To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)".  Some examples of imaginary literals:\n\n   3.14j   10.j    10j     .001j   1e100j  3.14e-10j\n',
- 'import': '\nThe "import" statement\n**********************\n\n   import_stmt     ::= "import" module ["as" name] ( "," module ["as" name] )*\n                   | "from" relative_module "import" identifier ["as" name]\n                   ( "," identifier ["as" name] )*\n                   | "from" relative_module "import" "(" identifier ["as" name]\n                   ( "," identifier ["as" name] )* [","] ")"\n                   | "from" module "import" "*"\n   module          ::= (identifier ".")* identifier\n   relative_module ::= "."* module | "."+\n   name            ::= identifier\n\nThe basic import statement (no "from" clause) is executed in two\nsteps:\n\n1. find a module, loading and initializing it if necessary\n\n2. define a name or names in the local namespace for the scope where\n   the "import" statement occurs.\n\nWhen the statement contains multiple clauses (separated by commas) the\ntwo steps are carried out separately for each clause, just as though\nthe clauses had been separated out into individiual import statements.\n\nThe details of the first step, finding and loading modules is\ndescribed in greater detail in the section on the *import system*,\nwhich also describes the various types of packages and modules that\ncan be imported, as well as all the hooks that can be used to\ncustomize the import system. Note that failures in this step may\nindicate either that the module could not be located, *or* that an\nerror occurred while initializing the module, which includes execution\nof the module\'s code.\n\nIf the requested module is retrieved successfully, it will be made\navailable in the local namespace in one of three ways:\n\n* If the module name is followed by "as", then the name following "as"\n  is bound directly to the imported module.\n\n* If no other name is specified, and the module being imported is a\n  top level module, the module\'s name is bound in the local namespace\n  as a reference to the imported module\n\n* If the module being imported is *not* a top level module, then the\n  name of the top level package that contains the module is bound in\n  the local namespace as a reference to the top level package. The\n  imported module must be accessed using its full qualified name\n  rather than directly\n\nThe "from" form uses a slightly more complex process:\n\n1. find the module specified in the "from" clause loading and\n   initializing it if necessary;\n\n2. for each of the identifiers specified in the "import" clauses:\n\n   1. check if the imported module has an attribute by that name\n\n   2. if not, attempt to import a submodule with that name and then\n      check the imported module again for that attribute\n\n   3. if the attribute is not found, "ImportError" is raised.\n\n   4. otherwise, a reference to that value is bound in the local\n      namespace, using the name in the "as" clause if it is present,\n      otherwise using the attribute name\n\nExamples:\n\n   import foo                 # foo imported and bound locally\n   import foo.bar.baz         # foo.bar.baz imported, foo bound locally\n   import foo.bar.baz as fbb  # foo.bar.baz imported and bound as fbb\n   from foo.bar import baz    # foo.bar.baz imported and bound as baz\n   from foo import attr       # foo imported and foo.attr bound as attr\n\nIf the list of identifiers is replaced by a star ("\'*\'"), all public\nnames defined in the module are bound in the local namespace for the\nscope where the "import" statement occurs.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule.  The names given in "__all__" are all considered public and\nare required to exist.  If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'").  "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope.  The wild\ncard form of import --- "import *" --- is only allowed at the module\nlevel. Attempting to use it in class or function definitions will\nraise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python.  The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language.  It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n   future_statement ::= "from" "__future__" "import" feature ["as" name]\n                        ("," feature ["as" name])*\n                        | "from" "__future__" "import" "(" feature ["as" name]\n                        ("," feature ["as" name])* [","] ")"\n   feature          ::= identifier\n   name             ::= identifier\n\nA future statement must appear near the top of the module.  The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 3.0 are "absolute_import",\n"division", "generators", "unicode_literals", "print_function",\n"nested_scopes" and "with_statement".  They are all redundant because\nthey are always enabled, and only kept for backwards compatibility.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code.  It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently.  Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n   import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by calls to the built-in functions "exec()" and\n"compile()" that occur in a module "M" containing a future statement\nwill, by default, use the new syntax or semantics associated with the\nfuture statement.  This can be controlled by optional arguments to\n"compile()" --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session.  If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n   **PEP 236** - Back to the __future__\n      The original proposal for the __future__ mechanism.\n',
- 'in': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation.  Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n   comparison    ::= or_expr ( comp_operator or_expr )*\n   comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n                     | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects.  The objects need not have the same type. If both are\nnumbers, they are converted to a common type.  Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types.  You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The are\n  identical to themselves, "x is x" but are not equal to themselves,\n  "x != x".  Additionally, comparing any value to a not-a-number value\n  will return "False".  For example, both "3 < float(\'NaN\')" and\n  "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n  values of their elements.\n\n* Strings are compared lexicographically using the numeric equivalents\n  (the result of the built-in function "ord()") of their characters.\n  [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison of\n  corresponding elements.  This means that to compare equal, each\n  element must compare equal and the two sequences must be of the same\n  type and have the same length.\n\n  If not equal, the sequences are ordered the same as their first\n  differing elements.  For example, "[1,2,x] <= [1,2,y]" has the same\n  value as "x <= y".  If the corresponding element does not exist, the\n  shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n  same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n  \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n  superset tests.  Those relations do not define total orderings (the\n  two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n  another, nor supersets of one another).  Accordingly, sets are not\n  appropriate arguments for functions which depend on total ordering.\n  For example, "min()", "max()", and "sorted()" produce undefined\n  results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they are\n  the same object; the choice whether one object is considered smaller\n  or larger than another one is made arbitrarily but consistently\n  within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison.  Most\nnumeric types can be compared with one another.  When cross-type\ncomparison is not supported, the comparison method returns\n"NotImplemented".\n\nThe operators "in" and "not in" test for membership.  "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise.  "x\nnot in s" returns the negation of "x in s".  All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether a the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*.  An equivalent test is "y.find(x) != -1".  Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y".  If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception.  (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object.  "x is not y"\nyields the inverse truth value. [4]\n',
+ 'import': '\nThe "import" statement\n**********************\n\n   import_stmt     ::= "import" module ["as" name] ( "," module ["as" name] )*\n                   | "from" relative_module "import" identifier ["as" name]\n                   ( "," identifier ["as" name] )*\n                   | "from" relative_module "import" "(" identifier ["as" name]\n                   ( "," identifier ["as" name] )* [","] ")"\n                   | "from" module "import" "*"\n   module          ::= (identifier ".")* identifier\n   relative_module ::= "."* module | "."+\n   name            ::= identifier\n\nThe basic import statement (no "from" clause) is executed in two\nsteps:\n\n1. find a module, loading and initializing it if necessary\n\n2. define a name or names in the local namespace for the scope\n   where the "import" statement occurs.\n\nWhen the statement contains multiple clauses (separated by commas) the\ntwo steps are carried out separately for each clause, just as though\nthe clauses had been separated out into individiual import statements.\n\nThe details of the first step, finding and loading modules is\ndescribed in greater detail in the section on the *import system*,\nwhich also describes the various types of packages and modules that\ncan be imported, as well as all the hooks that can be used to\ncustomize the import system. Note that failures in this step may\nindicate either that the module could not be located, *or* that an\nerror occurred while initializing the module, which includes execution\nof the module\'s code.\n\nIf the requested module is retrieved successfully, it will be made\navailable in the local namespace in one of three ways:\n\n* If the module name is followed by "as", then the name following\n  "as" is bound directly to the imported module.\n\n* If no other name is specified, and the module being imported is a\n  top level module, the module\'s name is bound in the local namespace\n  as a reference to the imported module\n\n* If the module being imported is *not* a top level module, then the\n  name of the top level package that contains the module is bound in\n  the local namespace as a reference to the top level package. The\n  imported module must be accessed using its full qualified name\n  rather than directly\n\nThe "from" form uses a slightly more complex process:\n\n1. find the module specified in the "from" clause loading and\n   initializing it if necessary;\n\n2. for each of the identifiers specified in the "import" clauses:\n\n   1. check if the imported module has an attribute by that name\n\n   2. if not, attempt to import a submodule with that name and then\n      check the imported module again for that attribute\n\n   3. if the attribute is not found, "ImportError" is raised.\n\n   4. otherwise, a reference to that value is bound in the local\n      namespace, using the name in the "as" clause if it is present,\n      otherwise using the attribute name\n\nExamples:\n\n   import foo                 # foo imported and bound locally\n   import foo.bar.baz         # foo.bar.baz imported, foo bound locally\n   import foo.bar.baz as fbb  # foo.bar.baz imported and bound as fbb\n   from foo.bar import baz    # foo.bar.baz imported and bound as baz\n   from foo import attr       # foo imported and foo.attr bound as attr\n\nIf the list of identifiers is replaced by a star ("\'*\'"), all public\nnames defined in the module are bound in the local namespace for the\nscope where the "import" statement occurs.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule.  The names given in "__all__" are all considered public and\nare required to exist.  If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'").  "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope.  The wild\ncard form of import --- "import *" --- is only allowed at the module\nlevel. Attempting to use it in class or function definitions will\nraise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python.  The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language.  It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n   future_statement ::= "from" "__future__" "import" feature ["as" name]\n                        ("," feature ["as" name])*\n                        | "from" "__future__" "import" "(" feature ["as" name]\n                        ("," feature ["as" name])* [","] ")"\n   feature          ::= identifier\n   name             ::= identifier\n\nA future statement must appear near the top of the module.  The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 3.0 are "absolute_import",\n"division", "generators", "unicode_literals", "print_function",\n"nested_scopes" and "with_statement".  They are all redundant because\nthey are always enabled, and only kept for backwards compatibility.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code.  It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently.  Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n   import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by calls to the built-in functions "exec()" and\n"compile()" that occur in a module "M" containing a future statement\nwill, by default, use the new syntax or semantics associated with the\nfuture statement.  This can be controlled by optional arguments to\n"compile()" --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session.  If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also: **PEP 236** - Back to the __future__\n\n     The original proposal for the __future__ mechanism.\n',
+ 'in': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation.  Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n   comparison    ::= or_expr ( comp_operator or_expr )*\n   comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n                     | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects.  The objects need not have the same type. If both are\nnumbers, they are converted to a common type.  Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types.  You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The\n  are identical to themselves, "x is x" but are not equal to\n  themselves, "x != x".  Additionally, comparing any value to a\n  not-a-number value will return "False".  For example, both "3 <\n  float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n  values of their elements.\n\n* Strings are compared lexicographically using the numeric\n  equivalents (the result of the built-in function "ord()") of their\n  characters. [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison\n  of corresponding elements.  This means that to compare equal, each\n  element must compare equal and the two sequences must be of the same\n  type and have the same length.\n\n  If not equal, the sequences are ordered the same as their first\n  differing elements.  For example, "[1,2,x] <= [1,2,y]" has the same\n  value as "x <= y".  If the corresponding element does not exist, the\n  shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n  same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n  \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n  superset tests.  Those relations do not define total orderings (the\n  two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n  another, nor supersets of one another).  Accordingly, sets are not\n  appropriate arguments for functions which depend on total ordering.\n  For example, "min()", "max()", and "sorted()" produce undefined\n  results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they\n  are the same object; the choice whether one object is considered\n  smaller or larger than another one is made arbitrarily but\n  consistently within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison.  Most\nnumeric types can be compared with one another.  When cross-type\ncomparison is not supported, the comparison method returns\n"NotImplemented".\n\nThe operators "in" and "not in" test for membership.  "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise.  "x\nnot in s" returns the negation of "x in s".  All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether a the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*.  An equivalent test is "y.find(x) != -1".  Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y".  If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception.  (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object.  "x is not y"\nyields the inverse truth value. [4]\n',
  'integers': '\nInteger literals\n****************\n\nInteger literals are described by the following lexical definitions:\n\n   integer        ::= decimalinteger | octinteger | hexinteger | bininteger\n   decimalinteger ::= nonzerodigit digit* | "0"+\n   nonzerodigit   ::= "1"..."9"\n   digit          ::= "0"..."9"\n   octinteger     ::= "0" ("o" | "O") octdigit+\n   hexinteger     ::= "0" ("x" | "X") hexdigit+\n   bininteger     ::= "0" ("b" | "B") bindigit+\n   octdigit       ::= "0"..."7"\n   hexdigit       ::= digit | "a"..."f" | "A"..."F"\n   bindigit       ::= "0" | "1"\n\nThere is no limit for the length of integer literals apart from what\ncan be stored in available memory.\n\nNote that leading zeros in a non-zero decimal number are not allowed.\nThis is for disambiguation with C-style octal literals, which Python\nused before version 3.0.\n\nSome examples of integer literals:\n\n   7     2147483647                        0o177    0b100110111\n   3     79228162514264337593543950336     0o377    0x100000000\n         79228162514264337593543950336              0xdeadbeef\n',
  'lambda': '\nLambdas\n*******\n\n   lambda_expr        ::= "lambda" [parameter_list]: expression\n   lambda_expr_nocond ::= "lambda" [parameter_list]: expression_nocond\n\nLambda expressions (sometimes called lambda forms) have the same\nsyntactic position as expressions.  They are a shorthand to create\nanonymous functions; the expression "lambda arguments: expression"\nyields a function object.  The unnamed object behaves like a function\nobject defined with\n\n   def <lambda>(arguments):\n       return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements or annotations.\n',
  'lists': '\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n   list_display ::= "[" [expression_list | comprehension] "]"\n\nA list display yields a new list object, the contents being specified\nby either a list of expressions or a comprehension.  When a comma-\nseparated list of expressions is supplied, its elements are evaluated\nfrom left to right and placed into the list object in that order.\nWhen a comprehension is supplied, the list is constructed from the\nelements resulting from the comprehension.\n',
  'naming': '\nNaming and binding\n******************\n\n*Names* refer to objects.  Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block.  A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block.  The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*.  A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block.  If a local\nvariable is defined in a block, its scope includes that block.  If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name.  The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope.  This\nmeans that the following will fail:\n\n   class A:\n       a = 42\n       b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope.  The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal".  If a name is bound at the module\nlevel, it is a global variable.  (The variables of the module code\nblock are local and global.)  If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised.  "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore.  This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block.  This can lead to errors when a name is used within a\nblock before it is bound.  This rule is subtle.  Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block.  The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace.  Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins".  The global namespace is searched first.  If\nthe name is not found there, the builtins namespace is searched.  The\nglobal statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used).  By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself.  "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail.  Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported.  The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block.  If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class.  Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name.  An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names.  Names may be resolved in the local\nand global namespaces of the caller.  Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace.  [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace.  If only one namespace is\nspecified, it is used for both.\n',
- 'nonlocal': '\nThe "nonlocal" statement\n************************\n\n   nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n\nThe "nonlocal" statement causes the listed identifiers to refer to\npreviously bound variables in the nearest enclosing scope.  This is\nimportant because the default behavior for binding is to search the\nlocal namespace first.  The statement allows encapsulated code to\nrebind variables outside of the local scope besides the global\n(module) scope.\n\nNames listed in a "nonlocal" statement, unlike to those listed in a\n"global" statement, must refer to pre-existing bindings in an\nenclosing scope (the scope in which a new binding should be created\ncannot be determined unambiguously).\n\nNames listed in a "nonlocal" statement must not collide with pre-\nexisting bindings in the local scope.\n\nSee also:\n\n   **PEP 3104** - Access to Names in Outer Scopes\n      The specification for the "nonlocal" statement.\n',
+ 'nonlocal': '\nThe "nonlocal" statement\n************************\n\n   nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n\nThe "nonlocal" statement causes the listed identifiers to refer to\npreviously bound variables in the nearest enclosing scope.  This is\nimportant because the default behavior for binding is to search the\nlocal namespace first.  The statement allows encapsulated code to\nrebind variables outside of the local scope besides the global\n(module) scope.\n\nNames listed in a "nonlocal" statement, unlike to those listed in a\n"global" statement, must refer to pre-existing bindings in an\nenclosing scope (the scope in which a new binding should be created\ncannot be determined unambiguously).\n\nNames listed in a "nonlocal" statement must not collide with pre-\nexisting bindings in the local scope.\n\nSee also: **PEP 3104** - Access to Names in Outer Scopes\n\n     The specification for the "nonlocal" statement.\n',
  'numbers': '\nNumeric literals\n****************\n\nThere are three types of numeric literals: integers, floating point\nnumbers, and imaginary numbers.  There are no complex literals\n(complex numbers can be formed by adding a real number and an\nimaginary number).\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator \'"-"\' and the\nliteral "1".\n',
- 'numeric-types': '\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n   "**", "<<", ">>", "&", "^", "|").  For instance, to evaluate the\n   expression "x + y", where *x* is an instance of a class that has an\n   "__add__()" method, "x.__add__(y)" is called.  The "__divmod__()"\n   method should be the equivalent to using "__floordiv__()" and\n   "__mod__()"; it should not be related to "__truediv__()".  Note\n   that "__pow__()" should be defined to accept an optional third\n   argument if the ternary version of the built-in "pow()" function is\n   to be supported.\n\n   If one of those methods does not support the operation with the\n   supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n   "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n   These functions are only called if the left operand does not\n   support the corresponding operation and the operands are of\n   different types. [2]  For instance, to evaluate the expression "x -\n   y", where *y* is an instance of a class that has an "__rsub__()"\n   method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n   *NotImplemented*.\n\n   Note that ternary "pow()" will not try calling "__rpow__()" (the\n   coercion rules would become too complicated).\n\n   Note: If the right operand\'s type is a subclass of the left operand\'s\n     type and that subclass provides the reflected method for the\n     operation, this method will be called before the left operand\'s\n     non-reflected method.  This behavior allows subclasses to\n     override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n   These methods are called to implement the augmented arithmetic\n   assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n   ">>=", "&=", "^=", "|=").  These methods should attempt to do the\n   operation in-place (modifying *self*) and return the result (which\n   could be, but does not have to be, *self*).  If a specific method\n   is not defined, the augmented assignment falls back to the normal\n   methods.  For instance, if *x* is an instance of a class with an\n   "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n   . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n   with the evaluation of "x + y". In certain situations, augmented\n   assignment can result in unexpected errors (see *Why does\n   a_tuple[i] += [\'item\'] raise an exception when the addition\n   works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n   Called to implement the unary arithmetic operations ("-", "+",\n   "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n   Called to implement the built-in functions "complex()", "int()",\n   "float()" and "round()".  Should return a value of the appropriate\n   type.\n\nobject.__index__(self)\n\n   Called to implement "operator.index()", and whenever Python needs\n   to losslessly convert the numeric object to an integer object (such\n   as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n   functions). Presence of this method indicates that the numeric\n   object is an integer type.  Must return an integer.\n\n   Note: When "__index__()" is defined, "__int__()" should also be\n     defined, and both shuld return the same value, in order to have a\n     coherent integer type class.\n',
+ 'numeric-types': '\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n   "**", "<<", ">>", "&", "^", "|").  For instance, to evaluate the\n   expression "x + y", where *x* is an instance of a class that has an\n   "__add__()" method, "x.__add__(y)" is called.  The "__divmod__()"\n   method should be the equivalent to using "__floordiv__()" and\n   "__mod__()"; it should not be related to "__truediv__()".  Note\n   that "__pow__()" should be defined to accept an optional third\n   argument if the ternary version of the built-in "pow()" function is\n   to be supported.\n\n   If one of those methods does not support the operation with the\n   supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n   "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n   These functions are only called if the left operand does not\n   support the corresponding operation and the operands are of\n   different types. [2]  For instance, to evaluate the expression "x -\n   y", where *y* is an instance of a class that has an "__rsub__()"\n   method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n   *NotImplemented*.\n\n   Note that ternary "pow()" will not try calling "__rpow__()" (the\n   coercion rules would become too complicated).\n\n   Note: If the right operand\'s type is a subclass of the left\n     operand\'s type and that subclass provides the reflected method\n     for the operation, this method will be called before the left\n     operand\'s non-reflected method.  This behavior allows subclasses\n     to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n   These methods are called to implement the augmented arithmetic\n   assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n   ">>=", "&=", "^=", "|=").  These methods should attempt to do the\n   operation in-place (modifying *self*) and return the result (which\n   could be, but does not have to be, *self*).  If a specific method\n   is not defined, the augmented assignment falls back to the normal\n   methods.  For instance, to execute the statement "x += y", where\n   *x* is an instance of a class that has an "__iadd__()" method,\n   "x.__iadd__(y)" is called.  If *x* is an instance of a class that\n   does not define a "__iadd__()" method, "x.__add__(y)" and\n   "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n   Called to implement the unary arithmetic operations ("-", "+",\n   "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n   Called to implement the built-in functions "complex()", "int()",\n   "float()" and "round()".  Should return a value of the appropriate\n   type.\n\nobject.__index__(self)\n\n   Called to implement "operator.index()", and whenever Python needs\n   to losslessly convert the numeric object to an integer object (such\n   as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n   functions). Presence of this method indicates that the numeric\n   object is an integer type.  Must return an integer.\n\n   Note: When "__index__()" is defined, "__int__()" should also be\n     defined, and both shuld return the same value, in order to have a\n     coherent integer type class.\n',
  'objects': '\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data.  All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value.  An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory.  The \'"is"\' operator compares the\nidentity of two objects; the "id()" function returns an integer\nrepresenting its identity.\n\n**CPython implementation detail:** For CPython, "id(x)" is the memory\naddress where "x" is stored.\n\nAn object\'s type determines the operations that the object supports\n(e.g., "does it have a length?") and also defines the possible values\nfor objects of that type.  The "type()" function returns an object\'s\ntype (which is an object itself).  Like its identity, an object\'s\n*type* is also unchangeable. [1]\n\nThe *value* of some objects can change.  Objects whose value can\nchange are said to be *mutable*; objects whose value is unchangeable\nonce they are created are called *immutable*. (The value of an\nimmutable container object that contains a reference to a mutable\nobject can change when the latter\'s value is changed; however the\ncontainer is still considered immutable, because the collection of\nobjects it contains cannot be changed.  So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected.  An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references.  See the documentation of the "gc" module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (ex:\nalways close files).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'"try"..."except"\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows.  It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a "close()" method. Programs\nare strongly recommended to explicitly close such objects.  The\n\'"try"..."finally"\' statement and the \'"with"\' statement provide\nconvenient ways to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries.  The references are part of a container\'s value.  In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied.  So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior.  Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed.  E.g., after "a = 1; b = 1",\n"a" and "b" may or may not refer to the same object with the value\none, depending on the implementation, but after "c = []; d = []", "c"\nand "d" are guaranteed to refer to two different, unique, newly\ncreated empty lists. (Note that "c = d = []" assigns the same object\nto both "c" and "d".)\n',
- 'operator-summary': '\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding).  Operators in the same box have the same precedence.  Unless\nthe syntax is explicitly given, operators are binary.  Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator                                        | Description                           |\n+=================================================+=======================================+\n| "lambda"                                        | Lambda expression                     |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else"                                  | Conditional expression                |\n+-------------------------------------------------+---------------------------------------+\n| "or"                                            | Boolean OR                            |\n+-------------------------------------------------+---------------------------------------+\n| "and"                                           | Boolean AND                           |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x"                                       | Boolean NOT                           |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership     |\n| ">=", "!=", "=="                                | tests and identity tests              |\n+-------------------------------------------------+---------------------------------------+\n| "|"                                             | Bitwise OR                            |\n+-------------------------------------------------+---------------------------------------+\n| "^"                                             | Bitwise XOR                           |\n+-------------------------------------------------+---------------------------------------+\n| "&"                                             | Bitwise AND                           |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>"                                      | Shifts                                |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-"                                        | Addition and subtraction              |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%"                             | Multiplication, division, remainder   |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x"                                | Positive, negative, bitwise NOT       |\n+-------------------------------------------------+---------------------------------------+\n| "**"                                            | Exponentiation [6]                    |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]",                   | Subscription, slicing, call,          |\n| "x(arguments...)", "x.attribute"                | attribute reference                   |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key:  | Binding or tuple display, list        |\n| value...}", "{expressions...}"                  | display, dictionary display, set      |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While "abs(x%y) < abs(y)" is true mathematically, for floats it\n    may not be true numerically due to roundoff.  For example, and\n    assuming a platform on which a Python float is an IEEE 754 double-\n    precision number, in order that "-1e-100 % 1e100" have the same\n    sign as "1e100", the computed result is "-1e-100 + 1e100", which\n    is numerically exactly equal to "1e100".  The function\n    "math.fmod()" returns a result whose sign matches the sign of the\n    first argument instead, and so returns "-1e-100" in this case.\n    Which approach is more appropriate depends on the application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n    possible for "x//y" to be one larger than "(x-x%y)//y" due to\n    rounding.  In such cases, Python returns the latter result, in\n    order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n    to "x".\n\n[3] While comparisons between strings make sense at the byte level,\n    they may be counter-intuitive to users.  For example, the strings\n    ""\\u00C7"" and ""\\u0327\\u0043"" compare differently, even though\n    they both represent the same unicode character (LATIN CAPITAL\n    LETTER C WITH CEDILLA).  To compare strings in a human\n    recognizable way, compare using "unicodedata.normalize()".\n\n[4] Due to automatic garbage-collection, free lists, and the dynamic\n    nature of descriptors, you may notice seemingly unusual behaviour\n    in certain uses of the "is" operator, like those involving\n    comparisons between instance methods, or constants.  Check their\n    documentation for more info.\n\n[5] The "%" operator is also used for string formatting; the same\n    precedence applies.\n\n[6] The power operator "**" binds less tightly than an arithmetic or\n    bitwise unary operator on its right, that is, "2**-1" is "0.5".\n',
+ 'operator-summary': '\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding).  Operators in the same box have the same precedence.  Unless\nthe syntax is explicitly given, operators are binary.  Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator                                        | Description                           |\n+=================================================+=======================================+\n| "lambda"                                        | Lambda expression                     |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else"                                  | Conditional expression                |\n+-------------------------------------------------+---------------------------------------+\n| "or"                                            | Boolean OR                            |\n+-------------------------------------------------+---------------------------------------+\n| "and"                                           | Boolean AND                           |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x"                                       | Boolean NOT                           |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership     |\n| ">=", "!=", "=="                                | tests and identity tests              |\n+-------------------------------------------------+---------------------------------------+\n| "|"                                             | Bitwise OR                            |\n+-------------------------------------------------+---------------------------------------+\n| "^"                                             | Bitwise XOR                           |\n+-------------------------------------------------+---------------------------------------+\n| "&"                                             | Bitwise AND                           |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>"                                      | Shifts                                |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-"                                        | Addition and subtraction              |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%"                             | Multiplication, division, remainder   |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x"                                | Positive, negative, bitwise NOT       |\n+-------------------------------------------------+---------------------------------------+\n| "**"                                            | Exponentiation [6]                    |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]",                   | Subscription, slicing, call,          |\n| "x(arguments...)", "x.attribute"                | attribute reference                   |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key:  | Binding or tuple display, list        |\n| value...}", "{expressions...}"                  | display, dictionary display, set      |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While "abs(x%y) < abs(y)" is true mathematically, for floats\n    it may not be true numerically due to roundoff.  For example, and\n    assuming a platform on which a Python float is an IEEE 754 double-\n    precision number, in order that "-1e-100 % 1e100" have the same\n    sign as "1e100", the computed result is "-1e-100 + 1e100", which\n    is numerically exactly equal to "1e100".  The function\n    "math.fmod()" returns a result whose sign matches the sign of the\n    first argument instead, and so returns "-1e-100" in this case.\n    Which approach is more appropriate depends on the application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n    possible for "x//y" to be one larger than "(x-x%y)//y" due to\n    rounding.  In such cases, Python returns the latter result, in\n    order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n    to "x".\n\n[3] While comparisons between strings make sense at the byte\n    level, they may be counter-intuitive to users.  For example, the\n    strings ""\\u00C7"" and ""\\u0327\\u0043"" compare differently, even\n    though they both represent the same unicode character (LATIN\n    CAPITAL LETTER C WITH CEDILLA).  To compare strings in a human\n    recognizable way, compare using "unicodedata.normalize()".\n\n[4] Due to automatic garbage-collection, free lists, and the\n    dynamic nature of descriptors, you may notice seemingly unusual\n    behaviour in certain uses of the "is" operator, like those\n    involving comparisons between instance methods, or constants.\n    Check their documentation for more info.\n\n[5] The "%" operator is also used for string formatting; the same\n    precedence applies.\n\n[6] The power operator "**" binds less tightly than an arithmetic\n    or bitwise unary operator on its right, that is, "2**-1" is "0.5".\n',
  'pass': '\nThe "pass" statement\n********************\n\n   pass_stmt ::= "pass"\n\n"pass" is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n   def f(arg): pass    # a function that does nothing (yet)\n\n   class C: pass       # a class with no methods (yet)\n',
  'power': '\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right.  The\nsyntax is:\n\n   power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): "-1**2" results in "-1".\n\nThe power operator has the same semantics as the built-in "pow()"\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument.  The numeric arguments are\nfirst converted to a common type, and the result is of that type.\n\nFor int operands, the result has the same type as the operands unless\nthe second argument is negative; in that case, all arguments are\nconverted to float and a float result is delivered. For example,\n"10**2" returns "100", but "10**-2" returns "0.01".\n\nRaising "0.0" to a negative power results in a "ZeroDivisionError".\nRaising a negative number to a fractional power results in a "complex"\nnumber. (In earlier versions it raised a "ValueError".)\n',
  'raise': '\nThe "raise" statement\n*********************\n\n   raise_stmt ::= "raise" [expression ["from" expression]]\n\nIf no expressions are present, "raise" re-raises the last exception\nthat was active in the current scope.  If no exception is active in\nthe current scope, a "RuntimeError" exception is raised indicating\nthat this is an error.\n\nOtherwise, "raise" evaluates the first expression as the exception\nobject.  It must be either a subclass or an instance of\n"BaseException". If it is a class, the exception instance will be\nobtained when needed by instantiating the class with no arguments.\n\nThe *type* of the exception is the exception instance\'s class, the\n*value* is the instance itself.\n\nA traceback object is normally created automatically when an exception\nis raised and attached to it as the "__traceback__" attribute, which\nis writable. You can create an exception and set your own traceback in\none step using the "with_traceback()" exception method (which returns\nthe same exception instance, with its traceback set to its argument),\nlike so:\n\n   raise Exception("foo occurred").with_traceback(tracebackobj)\n\nThe "from" clause is used for exception chaining: if given, the second\n*expression* must be another exception class or instance, which will\nthen be attached to the raised exception as the "__cause__" attribute\n(which is writable).  If the raised exception is not handled, both\nexceptions will be printed:\n\n   >>> try:\n   ...     print(1 / 0)\n   ... except Exception as exc:\n   ...     raise RuntimeError("Something bad happened") from exc\n   ...\n   Traceback (most recent call last):\n     File "<stdin>", line 2, in <module>\n   ZeroDivisionError: int division or modulo by zero\n\n   The above exception was the direct cause of the following exception:\n\n   Traceback (most recent call last):\n     File "<stdin>", line 4, in <module>\n   RuntimeError: Something bad happened\n\nA similar mechanism works implicitly if an exception is raised inside\nan exception handler: the previous exception is then attached as the\nnew exception\'s "__context__" attribute:\n\n   >>> try:\n   ...     print(1 / 0)\n   ... except:\n   ...     raise RuntimeError("Something bad happened")\n   ...\n   Traceback (most recent call last):\n     File "<stdin>", line 2, in <module>\n   ZeroDivisionError: int division or modulo by zero\n\n   During handling of the above exception, another exception occurred:\n\n   Traceback (most recent call last):\n     File "<stdin>", line 4, in <module>\n   RuntimeError: Something bad happened\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n',
  'return': '\nThe "return" statement\n**********************\n\n   return_stmt ::= "return" [expression_list]\n\n"return" may only occur syntactically nested in a function definition,\nnot within a nested class definition.\n\nIf an expression list is present, it is evaluated, else "None" is\nsubstituted.\n\n"return" leaves the current function call with the expression list (or\n"None") as return value.\n\nWhen "return" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nfunction.\n\nIn a generator function, the "return" statement indicates that the\ngenerator is done and will cause "StopIteration" to be raised. The\nreturned value (if any) is used as an argument to construct\n"StopIteration" and becomes the "StopIteration.value" attribute.\n',
- 'sequence-types': '\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well.  The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items.  It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects.  Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators.  It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values.  It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n   Called to implement the built-in function "len()".  Should return\n   the length of the object, an integer ">=" 0.  Also, an object that\n   doesn\'t define a "__bool__()" method and whose "__len__()" method\n   returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n   Called to implement "operator.length_hint()". Should return an\n   estimated length for the object (which may be greater or less than\n   the actual length). The length must be an integer ">=" 0. This\n   method is purely an optimization and is never required for\n   correctness.\n\n   New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.  A\n  call like\n\n     a[1:2] = b\n\n  is translated to\n\n     a[slice(1, 2, None)] = b\n\n  and so forth.  Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n   Called to implement evaluation of "self[key]". For sequence types,\n   the accepted keys should be integers and slice objects.  Note that\n   the special interpretation of negative indexes (if the class wishes\n   to emulate a sequence type) is up to the "__getitem__()" method. If\n   *key* is of an inappropriate type, "TypeError" may be raised; if of\n   a value outside the set of indexes for the sequence (after any\n   special interpretation of negative values), "IndexError" should be\n   raised. For mapping types, if *key* is missing (not in the\n   container), "KeyError" should be raised.\n\n   Note: "for" loops expect that an "IndexError" will be raised for\n     illegal indexes to allow proper detection of the end of the\n     sequence.\n\nobject.__setitem__(self, key, value)\n\n   Called to implement assignment to "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support changes to the values for keys, or if new keys\n   can be added, or for sequences if elements can be replaced.  The\n   same exceptions should be raised for improper *key* values as for\n   the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n   Called to implement deletion of "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support removal of keys, or for sequences if elements\n   can be removed from the sequence.  The same exceptions should be\n   raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n   This method is called when an iterator is required for a container.\n   This method should return a new iterator object that can iterate\n   over all the objects in the container.  For mappings, it should\n   iterate over the keys of the container, and should also be made\n   available as the method "keys()".\n\n   Iterator objects also need to implement this method; they are\n   required to return themselves.  For more information on iterator\n   objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n   Called (if present) by the "reversed()" built-in to implement\n   reverse iteration.  It should return a new iterator object that\n   iterates over all the objects in the container in reverse order.\n\n   If the "__reversed__()" method is not provided, the "reversed()"\n   built-in will fall back to using the sequence protocol ("__len__()"\n   and "__getitem__()").  Objects that support the sequence protocol\n   should only provide "__reversed__()" if they can provide an\n   implementation that is more efficient than the one provided by\n   "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence.  However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n   Called to implement membership test operators.  Should return true\n   if *item* is in *self*, false otherwise.  For mapping objects, this\n   should consider the keys of the mapping rather than the values or\n   the key-item pairs.\n\n   For objects that don\'t define "__contains__()", the membership test\n   first tries iteration via "__iter__()", then the old sequence\n   iteration protocol via "__getitem__()", see *this section in the\n   language reference*.\n',
- 'shifting': '\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n   shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept integers as arguments.  They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as floor division by "pow(2,n)".\nA left shift by *n* bits is defined as multiplication with "pow(2,n)".\n\nNote: In the current implementation, the right-hand operand is required to\n  be at most "sys.maxsize".  If the right-hand operand is larger than\n  "sys.maxsize" an "OverflowError" exception is raised.\n',
+ 'sequence-types': '\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well.  The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items.  It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects.  Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators.  It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values.  It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n   Called to implement the built-in function "len()".  Should return\n   the length of the object, an integer ">=" 0.  Also, an object that\n   doesn\'t define a "__bool__()" method and whose "__len__()" method\n   returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n   Called to implement "operator.length_hint()". Should return an\n   estimated length for the object (which may be greater or less than\n   the actual length). The length must be an integer ">=" 0. This\n   method is purely an optimization and is never required for\n   correctness.\n\n   New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n  A call like\n\n     a[1:2] = b\n\n  is translated to\n\n     a[slice(1, 2, None)] = b\n\n  and so forth.  Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n   Called to implement evaluation of "self[key]". For sequence types,\n   the accepted keys should be integers and slice objects.  Note that\n   the special interpretation of negative indexes (if the class wishes\n   to emulate a sequence type) is up to the "__getitem__()" method. If\n   *key* is of an inappropriate type, "TypeError" may be raised; if of\n   a value outside the set of indexes for the sequence (after any\n   special interpretation of negative values), "IndexError" should be\n   raised. For mapping types, if *key* is missing (not in the\n   container), "KeyError" should be raised.\n\n   Note: "for" loops expect that an "IndexError" will be raised for\n     illegal indexes to allow proper detection of the end of the\n     sequence.\n\nobject.__setitem__(self, key, value)\n\n   Called to implement assignment to "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support changes to the values for keys, or if new keys\n   can be added, or for sequences if elements can be replaced.  The\n   same exceptions should be raised for improper *key* values as for\n   the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n   Called to implement deletion of "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support removal of keys, or for sequences if elements\n   can be removed from the sequence.  The same exceptions should be\n   raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n   This method is called when an iterator is required for a container.\n   This method should return a new iterator object that can iterate\n   over all the objects in the container.  For mappings, it should\n   iterate over the keys of the container, and should also be made\n   available as the method "keys()".\n\n   Iterator objects also need to implement this method; they are\n   required to return themselves.  For more information on iterator\n   objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n   Called (if present) by the "reversed()" built-in to implement\n   reverse iteration.  It should return a new iterator object that\n   iterates over all the objects in the container in reverse order.\n\n   If the "__reversed__()" method is not provided, the "reversed()"\n   built-in will fall back to using the sequence protocol ("__len__()"\n   and "__getitem__()").  Objects that support the sequence protocol\n   should only provide "__reversed__()" if they can provide an\n   implementation that is more efficient than the one provided by\n   "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence.  However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n   Called to implement membership test operators.  Should return true\n   if *item* is in *self*, false otherwise.  For mapping objects, this\n   should consider the keys of the mapping rather than the values or\n   the key-item pairs.\n\n   For objects that don\'t define "__contains__()", the membership test\n   first tries iteration via "__iter__()", then the old sequence\n   iteration protocol via "__getitem__()", see *this section in the\n   language reference*.\n',
+ 'shifting': '\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n   shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept integers as arguments.  They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as floor division by "pow(2,n)".\nA left shift by *n* bits is defined as multiplication with "pow(2,n)".\n\nNote: In the current implementation, the right-hand operand is\n  required to be at most "sys.maxsize".  If the right-hand operand is\n  larger than "sys.maxsize" an "OverflowError" exception is raised.\n',
  'slicings': '\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list).  Slicings may be used as expressions or as\ntargets in assignment or "del" statements.  The syntax for a slicing:\n\n   slicing      ::= primary "[" slice_list "]"\n   slice_list   ::= slice_item ("," slice_item)* [","]\n   slice_item   ::= expression | proper_slice\n   proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ]\n   lower_bound  ::= expression\n   upper_bound  ::= expression\n   stride       ::= expression\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing.  Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice).\n\nThe semantics for a slicing are as follows.  The primary must evaluate\nto a mapping object, and it is indexed (using the same "__getitem__()"\nmethod as normal subscription) with a key that is constructed from the\nslice list, as follows.  If the slice list contains at least one\ncomma, the key is a tuple containing the conversion of the slice\nitems; otherwise, the conversion of the lone slice item is the key.\nThe conversion of a slice item that is an expression is that\nexpression.  The conversion of a proper slice is a slice object (see\nsection *The standard type hierarchy*) whose "start", "stop" and\n"step" attributes are the values of the expressions given as lower\nbound, upper bound and stride, respectively, substituting "None" for\nmissing expressions.\n',
- 'specialattrs': '\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant.  Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n   A dictionary or other mapping object used to store an object\'s\n   (writable) attributes.\n\ninstance.__class__\n\n   The class to which a class instance belongs.\n\nclass.__bases__\n\n   The tuple of base classes of a class object.\n\nclass.__name__\n\n   The name of the class or type.\n\nclass.__qualname__\n\n   The *qualified name* of the class or type.\n\n   New in version 3.3.\n\nclass.__mro__\n\n   This attribute is a tuple of classes that are considered when\n   looking for base classes during method resolution.\n\nclass.mro()\n\n   This method can be overridden by a metaclass to customize the\n   method resolution order for its instances.  It is called at class\n   instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n   Each class keeps a list of weak references to its immediate\n   subclasses.  This method returns a list of all those references\n   still alive. Example:\n\n      >>> int.__subclasses__()\n      [<class \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found in\n    the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to "[1.0,\n    2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n    operands.\n\n[4] Cased characters are those with general category property being\n    one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase), or "Lt"\n    (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a singleton\n    tuple whose only element is the tuple to be formatted.\n',
- 'specialnames': '\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators.  For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)".\nExcept where mentioned, attempts to execute an operation raise an\nexception when no appropriate method is defined (typically\n"AttributeError" or "TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled.  For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense.  (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n   Called to create a new instance of class *cls*.  "__new__()" is a\n   static method (special-cased so you need not declare it as such)\n   that takes the class of which an instance was requested as its\n   first argument.  The remaining arguments are those passed to the\n   object constructor expression (the call to the class).  The return\n   value of "__new__()" should be the new object instance (usually an\n   instance of *cls*).\n\n   Typical implementations create a new instance of the class by\n   invoking the superclass\'s "__new__()" method using\n   "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n   arguments and then modifying the newly-created instance as\n   necessary before returning it.\n\n   If "__new__()" returns an instance of *cls*, then the new\n   instance\'s "__init__()" method will be invoked like\n   "__init__(self[, ...])", where *self* is the new instance and the\n   remaining arguments are the same as were passed to "__new__()".\n\n   If "__new__()" does not return an instance of *cls*, then the new\n   instance\'s "__init__()" method will not be invoked.\n\n   "__new__()" is intended mainly to allow subclasses of immutable\n   types (like int, str, or tuple) to customize instance creation.  It\n   is also commonly overridden in custom metaclasses in order to\n   customize class creation.\n\nobject.__init__(self[, ...])\n\n   Called when the instance is created.  The arguments are those\n   passed to the class constructor expression.  If a base class has an\n   "__init__()" method, the derived class\'s "__init__()" method, if\n   any, must explicitly call it to ensure proper initialization of the\n   base class part of the instance; for example:\n   "BaseClass.__init__(self, [args...])".  As a special constraint on\n   constructors, no value may be returned; doing so will cause a\n   "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n   Called when the instance is about to be destroyed.  This is also\n   called a destructor.  If a base class has a "__del__()" method, the\n   derived class\'s "__del__()" method, if any, must explicitly call it\n   to ensure proper deletion of the base class part of the instance.\n   Note that it is possible (though not recommended!) for the\n   "__del__()" method to postpone destruction of the instance by\n   creating a new reference to it.  It may then be called at a later\n   time when this new reference is deleted.  It is not guaranteed that\n   "__del__()" methods are called for objects that still exist when\n   the interpreter exits.\n\n   Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n     decrements the reference count for "x" by one, and the latter is\n     only called when "x"\'s reference count reaches zero.  Some common\n     situations that may prevent the reference count of an object from\n     going to zero include: circular references between objects (e.g.,\n     a doubly-linked list or a tree data structure with parent and\n     child pointers); a reference to the object on the stack frame of\n     a function that caught an exception (the traceback stored in\n     "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n     to the object on the stack frame that raised an unhandled\n     exception in interactive mode (the traceback stored in\n     "sys.last_traceback" keeps the stack frame alive).  The first\n     situation can only be remedied by explicitly breaking the cycles;\n     the latter two situations can be resolved by storing "None" in\n     "sys.last_traceback". Circular references which are garbage are\n     detected and cleaned up when the cyclic garbage collector is\n     enabled (it\'s on by default). Refer to the documentation for the\n     "gc" module for more information about this topic.\n\n   Warning: Due to the precarious circumstances under which "__del__()"\n     methods are invoked, exceptions that occur during their execution\n     are ignored, and a warning is printed to "sys.stderr" instead.\n     Also, when "__del__()" is invoked in response to a module being\n     deleted (e.g., when execution of the program is done), other\n     globals referenced by the "__del__()" method may already have\n     been deleted or in the process of being torn down (e.g. the\n     import machinery shutting down).  For this reason, "__del__()"\n     methods should do the absolute minimum needed to maintain\n     external invariants.  Starting with version 1.5, Python\n     guarantees that globals whose name begins with a single\n     underscore are deleted from their module before other globals are\n     deleted; if no other references to such globals exist, this may\n     help in assuring that imported modules are still available at the\n     time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n   Called by the "repr()" built-in function to compute the "official"\n   string representation of an object.  If at all possible, this\n   should look like a valid Python expression that could be used to\n   recreate an object with the same value (given an appropriate\n   environment).  If this is not possible, a string of the form\n   "<...some useful description...>" should be returned. The return\n   value must be a string object. If a class defines "__repr__()" but\n   not "__str__()", then "__repr__()" is also used when an "informal"\n   string representation of instances of that class is required.\n\n   This is typically used for debugging, so it is important that the\n   representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n   Called by "str(object)" and the built-in functions "format()" and\n   "print()" to compute the "informal" or nicely printable string\n   representation of an object.  The return value must be a *string*\n   object.\n\n   This method differs from "object.__repr__()" in that there is no\n   expectation that "__str__()" return a valid Python expression: a\n   more convenient or concise representation can be used.\n\n   The default implementation defined by the built-in type "object"\n   calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n   Called by "bytes()" to compute a byte-string representation of an\n   object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n   Called by the "format()" built-in function (and by extension, the\n   "str.format()" method of class "str") to produce a "formatted"\n   string representation of an object. The "format_spec" argument is a\n   string that contains a description of the formatting options\n   desired. The interpretation of the "format_spec" argument is up to\n   the type implementing "__format__()", however most classes will\n   either delegate formatting to one of the built-in types, or use a\n   similar formatting option syntax.\n\n   See *Format Specification Mini-Language* for a description of the\n   standard formatting syntax.\n\n   The return value must be a string object.\n\n   Changed in version 3.4: The __format__ method of "object" itself\n   raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n   These are the so-called "rich comparison" methods. The\n   correspondence between operator symbols and method names is as\n   follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n   "x==y" calls "x.__eq__(y)", "x!=y" calls "x.__ne__(y)", "x>y" calls\n   "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n   A rich comparison method may return the singleton "NotImplemented"\n   if it does not implement the operation for a given pair of\n   arguments. By convention, "False" and "True" are returned for a\n   successful comparison. However, these methods can return any value,\n   so if the comparison operator is used in a Boolean context (e.g.,\n   in the condition of an "if" statement), Python will call "bool()"\n   on the value to determine if the result is true or false.\n\n   There are no implied relationships among the comparison operators.\n   The truth of "x==y" does not imply that "x!=y" is false.\n   Accordingly, when defining "__eq__()", one should also define\n   "__ne__()" so that the operators will behave as expected.  See the\n   paragraph on "__hash__()" for some important notes on creating\n   *hashable* objects which support custom comparison operations and\n   are usable as dictionary keys.\n\n   There are no swapped-argument versions of these methods (to be used\n   when the left argument does not support the operation but the right\n   argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n   reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n   and "__eq__()" and "__ne__()" are their own reflection.\n\n   Arguments to rich comparison methods are never coerced.\n\n   To automatically generate ordering operations from a single root\n   operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n   Called by built-in function "hash()" and for operations on members\n   of hashed collections including "set", "frozenset", and "dict".\n   "__hash__()" should return an integer.  The only required property\n   is that objects which compare equal have the same hash value; it is\n   advised to somehow mix together (e.g. using exclusive or) the hash\n   values for the components of the object that also play a part in\n   comparison of objects.\n\n   Note: "hash()" truncates the value returned from an object\'s custom\n     "__hash__()" method to the size of a "Py_ssize_t".  This is\n     typically 8 bytes on 64-bit builds and 4 bytes on 32-bit builds.\n     If an object\'s   "__hash__()" must interoperate on builds of\n     different bit sizes, be sure to check the width on all supported\n     builds.  An easy way to do this is with "python -c "import sys;\n     print(sys.hash_info.width)""\n\n   If a class does not define an "__eq__()" method it should not\n   define a "__hash__()" operation either; if it defines "__eq__()"\n   but not "__hash__()", its instances will not be usable as items in\n   hashable collections.  If a class defines mutable objects and\n   implements an "__eq__()" method, it should not implement\n   "__hash__()", since the implementation of hashable collections\n   requires that a key\'s hash value is immutable (if the object\'s hash\n   value changes, it will be in the wrong hash bucket).\n\n   User-defined classes have "__eq__()" and "__hash__()" methods by\n   default; with them, all objects compare unequal (except with\n   themselves) and "x.__hash__()" returns an appropriate value such\n   that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n   A class that overrides "__eq__()" and does not define "__hash__()"\n   will have its "__hash__()" implicitly set to "None".  When the\n   "__hash__()" method of a class is "None", instances of the class\n   will raise an appropriate "TypeError" when a program attempts to\n   retrieve their hash value, and will also be correctly identified as\n   unhashable when checking "isinstance(obj, collections.Hashable").\n\n   If a class that overrides "__eq__()" needs to retain the\n   implementation of "__hash__()" from a parent class, the interpreter\n   must be told this explicitly by setting "__hash__ =\n   <ParentClass>.__hash__".\n\n   If a class that does not override "__eq__()" wishes to suppress\n   hash support, it should include "__hash__ = None" in the class\n   definition. A class which defines its own "__hash__()" that\n   explicitly raises a "TypeError" would be incorrectly identified as\n   hashable by an "isinstance(obj, collections.Hashable)" call.\n\n   Note: By default, the "__hash__()" values of str, bytes and datetime\n     objects are "salted" with an unpredictable random value.\n     Although they remain constant within an individual Python\n     process, they are not predictable between repeated invocations of\n     Python.This is intended to provide protection against a denial-\n     of-service caused by carefully-chosen inputs that exploit the\n     worst case performance of a dict insertion, O(n^2) complexity.\n     See http://www.ocert.org/advisories/ocert-2011-003.html for\n     details.Changing hash values affects the iteration order of\n     dicts, sets and other mappings.  Python has never made guarantees\n     about this ordering (and it typically varies between 32-bit and\n     64-bit builds).See also "PYTHONHASHSEED".\n\n   Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n   Called to implement truth value testing and the built-in operation\n   "bool()"; should return "False" or "True".  When this method is not\n   defined, "__len__()" is called, if it is defined, and the object is\n   considered true if its result is nonzero.  If a class defines\n   neither "__len__()" nor "__bool__()", all its instances are\n   considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n   Called when an attribute lookup has not found the attribute in the\n   usual places (i.e. it is not an instance attribute nor is it found\n   in the class tree for "self").  "name" is the attribute name. This\n   method should return the (computed) attribute value or raise an\n   "AttributeError" exception.\n\n   Note that if the attribute is found through the normal mechanism,\n   "__getattr__()" is not called.  (This is an intentional asymmetry\n   between "__getattr__()" and "__setattr__()".) This is done both for\n   efficiency reasons and because otherwise "__getattr__()" would have\n   no way to access other attributes of the instance.  Note that at\n   least for instance variables, you can fake total control by not\n   inserting any values in the instance attribute dictionary (but\n   instead inserting them in another object).  See the\n   "__getattribute__()" method below for a way to actually get total\n   control over attribute access.\n\nobject.__getattribute__(self, name)\n\n   Called unconditionally to implement attribute accesses for\n   instances of the class. If the class also defines "__getattr__()",\n   the latter will not be called unless "__getattribute__()" either\n   calls it explicitly or raises an "AttributeError". This method\n   should return the (computed) attribute value or raise an\n   "AttributeError" exception. In order to avoid infinite recursion in\n   this method, its implementation should always call the base class\n   method with the same name to access any attributes it needs, for\n   example, "object.__getattribute__(self, name)".\n\n   Note: This method may still be bypassed when looking up special methods\n     as the result of implicit invocation via language syntax or\n     built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n   Called when an attribute assignment is attempted.  This is called\n   instead of the normal mechanism (i.e. store the value in the\n   instance dictionary). *name* is the attribute name, *value* is the\n   value to be assigned to it.\n\n   If "__setattr__()" wants to assign to an instance attribute, it\n   should call the base class method with the same name, for example,\n   "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n   Like "__setattr__()" but for attribute deletion instead of\n   assignment.  This should only be implemented if "del obj.name" is\n   meaningful for the object.\n\nobject.__dir__(self)\n\n   Called when "dir()" is called on the object. A sequence must be\n   returned. "dir()" converts the returned sequence to a list and\n   sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents).  In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n   Called to get the attribute of the owner class (class attribute\n   access) or of an instance of that class (instance attribute\n   access). *owner* is always the owner class, while *instance* is the\n   instance that the attribute was accessed through, or "None" when\n   the attribute is accessed through the *owner*.  This method should\n   return the (computed) attribute value or raise an "AttributeError"\n   exception.\n\nobject.__set__(self, instance, value)\n\n   Called to set the attribute on an instance *instance* of the owner\n   class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n   Called to delete the attribute on an instance *instance* of the\n   owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol:  "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead.  Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n   The simplest and least common call is when user code directly\n   invokes a descriptor method:    "x.__get__(a)".\n\nInstance Binding\n   If binding to an object instance, "a.x" is transformed into the\n   call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n   If binding to a class, "A.x" is transformed into the call:\n   "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n   If "a" is an instance of "super", then the binding "super(B,\n   obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n   immediately preceding "B" and then invokes the descriptor with the\n   call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined.  A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()".  If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary.  If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor.  Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method.  Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary.  In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors.  Accordingly, instances can\nredefine and override methods.  This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage.  This wastes space for objects having very few instance\nvariables.  The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable.  Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n   This class variable can be assigned a string, iterable, or sequence\n   of strings with variable names used by instances.  If defined in a\n   class, *__slots__* reserves space for the declared variables and\n   prevents the automatic creation of *__dict__* and *__weakref__* for\n   each instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n  attribute of that class will always be accessible, so a *__slots__*\n  definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n  variables not listed in the *__slots__* definition.  Attempts to\n  assign to an unlisted variable name raises "AttributeError". If\n  dynamic assignment of new variables is desired, then add\n  "\'__dict__\'" to the sequence of strings in the *__slots__*\n  declaration.\n\n* Without a *__weakref__* variable for each instance, classes defining\n  *__slots__* do not support weak references to its instances. If weak\n  reference support is needed, then add "\'__weakref__\'" to the\n  sequence of strings in the *__slots__* declaration.\n\n* *__slots__* are implemented at the class level by creating\n  descriptors (*Implementing Descriptors*) for each variable name.  As\n  a result, class attributes cannot be used to set default values for\n  instance variables defined by *__slots__*; otherwise, the class\n  attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n  where it is defined.  As a result, subclasses will have a *__dict__*\n  unless they also define *__slots__* (which must only contain names\n  of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n  variable defined by the base class slot is inaccessible (except by\n  retrieving its descriptor directly from the base class). This\n  renders the meaning of the program undefined.  In the future, a\n  check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n  "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n  also be used; however, in the future, special meaning may be\n  assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n  *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using "type()". The class body is\nexecuted in a new namespace and the class name is bound locally to the\nresult of "type(name, bases, namespace)".\n\nThe class creation process can be customised by passing the\n"metaclass" keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both "MyClass" and "MySubclass" are instances\nof "Meta":\n\n   class Meta(type):\n       pass\n\n   class MyClass(metaclass=Meta):\n       pass\n\n   class MySubclass(MyClass):\n       pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then "type()" is\n  used\n\n* if an explicit metaclass is given and it is *not* an instance of\n  "type()", then it is used directly as the metaclass\n\n* if an instance of "type()" is given as the explicit metaclass, or\n  bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. "type(cls)") of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with "TypeError".\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a "__prepare__" attribute,\nit is called as "namespace = metaclass.__prepare__(name, bases,\n**kwds)" (where the additional keyword arguments, if any, come from\nthe class definition).\n\nIf the metaclass has no "__prepare__" attribute, then the class\nnamespace is initialised as an empty "dict()" instance.\n\nSee also:\n\n   **PEP 3115** - Metaclasses in Python 3000\n      Introduced the "__prepare__" namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as "exec(body, globals(),\nnamespace)". The key difference from a normal call to "exec()" is that\nlexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling "metaclass(name, bases,\nnamespace, **kwds)" (the additional keywords passed here are the same\nas those passed to "__prepare__").\n\nThis class object is the one that will be referenced by the zero-\nargument form of "super()". "__class__" is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either "__class__" or "super". This allows the zero argument form\nof "super()" to correctly identify the class being defined based on\nlexical scoping, while the class or instance that was used to make the\ncurrent call is identified based on the first argument passed to the\nmethod.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also:\n\n   **PEP 3135** - New super\n      Describes the implicit "__class__" closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n"collections.OrderedDict" to remember the order that class members\nwere defined:\n\n   class OrderedClass(type):\n\n        @classmethod\n        def __prepare__(metacls, name, bases, **kwds):\n           return collections.OrderedDict()\n\n        def __new__(cls, name, bases, namespace, **kwds):\n           result = type.__new__(cls, name, bases, dict(namespace))\n           result.members = tuple(namespace)\n           return result\n\n   class A(metaclass=OrderedClass):\n       def one(self): pass\n       def two(self): pass\n       def three(self): pass\n       def four(self): pass\n\n   >>> A.members\n   (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s "__prepare__()" method which returns an\nempty "collections.OrderedDict".  That mapping records the methods and\nattributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s "__new__()" method gets\ninvoked.  That method builds the new type and it saves the ordered\ndictionary keys in an attribute called "members".\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n   Return true if *instance* should be considered a (direct or\n   indirect) instance of *class*. If defined, called to implement\n   "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n   Return true if *subclass* should be considered a (direct or\n   indirect) subclass of *class*.  If defined, called to implement\n   "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass.  They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n   **PEP 3119** - Introducing Abstract Base Classes\n      Includes the specification for customizing "isinstance()" and\n      "issubclass()" behavior through "__instancecheck__()" and\n      "__subclasscheck__()", with motivation for this functionality in\n      the context of adding Abstract Base Classes (see the "abc"\n      module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n   Called when the instance is "called" as a function; if this method\n   is defined, "x(arg1, arg2, ...)" is a shorthand for\n   "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well.  The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items.  It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects.  Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators.  It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values.  It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n   Called to implement the built-in function "len()".  Should return\n   the length of the object, an integer ">=" 0.  Also, an object that\n   doesn\'t define a "__bool__()" method and whose "__len__()" method\n   returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n   Called to implement "operator.length_hint()". Should return an\n   estimated length for the object (which may be greater or less than\n   the actual length). The length must be an integer ">=" 0. This\n   method is purely an optimization and is never required for\n   correctness.\n\n   New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.  A\n  call like\n\n     a[1:2] = b\n\n  is translated to\n\n     a[slice(1, 2, None)] = b\n\n  and so forth.  Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n   Called to implement evaluation of "self[key]". For sequence types,\n   the accepted keys should be integers and slice objects.  Note that\n   the special interpretation of negative indexes (if the class wishes\n   to emulate a sequence type) is up to the "__getitem__()" method. If\n   *key* is of an inappropriate type, "TypeError" may be raised; if of\n   a value outside the set of indexes for the sequence (after any\n   special interpretation of negative values), "IndexError" should be\n   raised. For mapping types, if *key* is missing (not in the\n   container), "KeyError" should be raised.\n\n   Note: "for" loops expect that an "IndexError" will be raised for\n     illegal indexes to allow proper detection of the end of the\n     sequence.\n\nobject.__setitem__(self, key, value)\n\n   Called to implement assignment to "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support changes to the values for keys, or if new keys\n   can be added, or for sequences if elements can be replaced.  The\n   same exceptions should be raised for improper *key* values as for\n   the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n   Called to implement deletion of "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support removal of keys, or for sequences if elements\n   can be removed from the sequence.  The same exceptions should be\n   raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n   This method is called when an iterator is required for a container.\n   This method should return a new iterator object that can iterate\n   over all the objects in the container.  For mappings, it should\n   iterate over the keys of the container, and should also be made\n   available as the method "keys()".\n\n   Iterator objects also need to implement this method; they are\n   required to return themselves.  For more information on iterator\n   objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n   Called (if present) by the "reversed()" built-in to implement\n   reverse iteration.  It should return a new iterator object that\n   iterates over all the objects in the container in reverse order.\n\n   If the "__reversed__()" method is not provided, the "reversed()"\n   built-in will fall back to using the sequence protocol ("__len__()"\n   and "__getitem__()").  Objects that support the sequence protocol\n   should only provide "__reversed__()" if they can provide an\n   implementation that is more efficient than the one provided by\n   "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence.  However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n   Called to implement membership test operators.  Should return true\n   if *item* is in *self*, false otherwise.  For mapping objects, this\n   should consider the keys of the mapping rather than the values or\n   the key-item pairs.\n\n   For objects that don\'t define "__contains__()", the membership test\n   first tries iteration via "__iter__()", then the old sequence\n   iteration protocol via "__getitem__()", see *this section in the\n   language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n   "**", "<<", ">>", "&", "^", "|").  For instance, to evaluate the\n   expression "x + y", where *x* is an instance of a class that has an\n   "__add__()" method, "x.__add__(y)" is called.  The "__divmod__()"\n   method should be the equivalent to using "__floordiv__()" and\n   "__mod__()"; it should not be related to "__truediv__()".  Note\n   that "__pow__()" should be defined to accept an optional third\n   argument if the ternary version of the built-in "pow()" function is\n   to be supported.\n\n   If one of those methods does not support the operation with the\n   supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n   "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n   These functions are only called if the left operand does not\n   support the corresponding operation and the operands are of\n   different types. [2]  For instance, to evaluate the expression "x -\n   y", where *y* is an instance of a class that has an "__rsub__()"\n   method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n   *NotImplemented*.\n\n   Note that ternary "pow()" will not try calling "__rpow__()" (the\n   coercion rules would become too complicated).\n\n   Note: If the right operand\'s type is a subclass of the left operand\'s\n     type and that subclass provides the reflected method for the\n     operation, this method will be called before the left operand\'s\n     non-reflected method.  This behavior allows subclasses to\n     override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n   These methods are called to implement the augmented arithmetic\n   assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n   ">>=", "&=", "^=", "|=").  These methods should attempt to do the\n   operation in-place (modifying *self*) and return the result (which\n   could be, but does not have to be, *self*).  If a specific method\n   is not defined, the augmented assignment falls back to the normal\n   methods.  For instance, if *x* is an instance of a class with an\n   "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n   . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n   with the evaluation of "x + y". In certain situations, augmented\n   assignment can result in unexpected errors (see *Why does\n   a_tuple[i] += [\'item\'] raise an exception when the addition\n   works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n   Called to implement the unary arithmetic operations ("-", "+",\n   "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n   Called to implement the built-in functions "complex()", "int()",\n   "float()" and "round()".  Should return a value of the appropriate\n   type.\n\nobject.__index__(self)\n\n   Called to implement "operator.index()", and whenever Python needs\n   to losslessly convert the numeric object to an integer object (such\n   as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n   functions). Presence of this method indicates that the numeric\n   object is an integer type.  Must return an integer.\n\n   Note: When "__index__()" is defined, "__int__()" should also be\n     defined, and both shuld return the same value, in order to have a\n     coherent integer type class.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code.  Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n   Enter the runtime context related to this object. The "with"\n   statement will bind this method\'s return value to the target(s)\n   specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n   Exit the runtime context related to this object. The parameters\n   describe the exception that caused the context to be exited. If the\n   context was exited without an exception, all three arguments will\n   be "None".\n\n   If an exception is supplied, and the method wishes to suppress the\n   exception (i.e., prevent it from being propagated), it should\n   return a true value. Otherwise, the exception will be processed\n   normally upon exit from this method.\n\n   Note that "__exit__()" methods should not reraise the passed-in\n   exception; this is the caller\'s responsibility.\n\nSee also:\n\n   **PEP 0343** - The "with" statement\n      The specification, background, and examples for the Python\n      "with" statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary.  That behaviour is the reason why\nthe following code raises an exception:\n\n   >>> class C:\n   ...     pass\n   ...\n   >>> c = C()\n   >>> c.__len__ = lambda: 5\n   >>> len(c)\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in <module>\n   TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n   >>> 1 .__hash__() == hash(1)\n   True\n   >>> int.__hash__() == hash(int)\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in <module>\n   TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n   >>> type(1).__hash__(1) == hash(1)\n   True\n   >>> type(int).__hash__(int) == hash(int)\n   True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n   >>> class Meta(type):\n   ...    def __getattribute__(*args):\n   ...       print("Metaclass getattribute invoked")\n   ...       return type.__getattribute__(*args)\n   ...\n   >>> class C(object, metaclass=Meta):\n   ...     def __len__(self):\n   ...         return 10\n   ...     def __getattribute__(*args):\n   ...         print("Class getattribute invoked")\n   ...         return object.__getattribute__(*args)\n   ...\n   >>> c = C()\n   >>> c.__len__()                 # Explicit lookup via instance\n   Class getattribute invoked\n   10\n   >>> type(c).__len__(c)          # Explicit lookup via type\n   Metaclass getattribute invoked\n   10\n   >>> len(c)                      # Implicit lookup\n   10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n    certain controlled conditions. It generally isn\'t a good idea\n    though, since it can lead to some very strange behaviour if it is\n    handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n    reflected method (such as "__add__()") fails the operation is not\n    supported, which is why the reflected method is not called.\n',
- 'string-methods': '\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see "str.format()",\n*Format String Syntax* and *String Formatting*) and the other based on\nC "printf" style formatting that handles a narrower range of types and\nis slightly harder to use correctly, but is often faster for the cases\nit can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the "re" module).\n\nstr.capitalize()\n\n   Return a copy of the string with its first character capitalized\n   and the rest lowercased.\n\nstr.casefold()\n\n   Return a casefolded copy of the string. Casefolded strings may be\n   used for caseless matching.\n\n   Casefolding is similar to lowercasing but more aggressive because\n   it is intended to remove all case distinctions in a string. For\n   example, the German lowercase letter "\'\xc3\x9f\'" is equivalent to ""ss"".\n   Since it is already lowercase, "lower()" would do nothing to "\'\xc3\x9f\'";\n   "casefold()" converts it to ""ss"".\n\n   The casefolding algorithm is described in section 3.13 of the\n   Unicode Standard.\n\n   New in version 3.3.\n\nstr.center(width[, fillchar])\n\n   Return centered in a string of length *width*. Padding is done\n   using the specified *fillchar* (default is a space).\n\nstr.count(sub[, start[, end]])\n\n   Return the number of non-overlapping occurrences of substring *sub*\n   in the range [*start*, *end*].  Optional arguments *start* and\n   *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n   Return an encoded version of the string as a bytes object. Default\n   encoding is "\'utf-8\'". *errors* may be given to set a different\n   error handling scheme. The default for *errors* is "\'strict\'",\n   meaning that encoding errors raise a "UnicodeError". Other possible\n   values are "\'ignore\'", "\'replace\'", "\'xmlcharrefreplace\'",\n   "\'backslashreplace\'" and any other name registered via\n   "codecs.register_error()", see section *Codec Base Classes*. For a\n   list of possible encodings, see section *Standard Encodings*.\n\n   Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n   Return "True" if the string ends with the specified *suffix*,\n   otherwise return "False".  *suffix* can also be a tuple of suffixes\n   to look for.  With optional *start*, test beginning at that\n   position.  With optional *end*, stop comparing at that position.\n\nstr.expandtabs(tabsize=8)\n\n   Return a copy of the string where all tab characters are replaced\n   by one or more spaces, depending on the current column and the\n   given tab size.  Tab positions occur every *tabsize* characters\n   (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n   To expand the string, the current column is set to zero and the\n   string is examined character by character.  If the character is a\n   tab ("\\t"), one or more space characters are inserted in the result\n   until the current column is equal to the next tab position. (The\n   tab character itself is not copied.)  If the character is a newline\n   ("\\n") or return ("\\r"), it is copied and the current column is\n   reset to zero.  Any other character is copied unchanged and the\n   current column is incremented by one regardless of how the\n   character is represented when printed.\n\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n   \'01      012     0123    01234\'\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n   \'01  012 0123    01234\'\n\nstr.find(sub[, start[, end]])\n\n   Return the lowest index in the string where substring *sub* is\n   found, such that *sub* is contained in the slice "s[start:end]".\n   Optional arguments *start* and *end* are interpreted as in slice\n   notation.  Return "-1" if *sub* is not found.\n\n   Note: The "find()" method should be used only if you need to know the\n     position of *sub*.  To check if *sub* is a substring or not, use\n     the "in" operator:\n\n        >>> \'Py\' in \'Python\'\n        True\n\nstr.format(*args, **kwargs)\n\n   Perform a string formatting operation.  The string on which this\n   method is called can contain literal text or replacement fields\n   delimited by braces "{}".  Each replacement field contains either\n   the numeric index of a positional argument, or the name of a\n   keyword argument.  Returns a copy of the string where each\n   replacement field is replaced with the string value of the\n   corresponding argument.\n\n   >>> "The sum of 1 + 2 is {0}".format(1+2)\n   \'The sum of 1 + 2 is 3\'\n\n   See *Format String Syntax* for a description of the various\n   formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n   Similar to "str.format(**mapping)", except that "mapping" is used\n   directly and not copied to a "dict".  This is useful if for example\n   "mapping" is a dict subclass:\n\n   >>> class Default(dict):\n   ...     def __missing__(self, key):\n   ...         return key\n   ...\n   >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n   \'Guido was born in country\'\n\n   New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n   Like "find()", but raise "ValueError" when the substring is not\n   found.\n\nstr.isalnum()\n\n   Return true if all characters in the string are alphanumeric and\n   there is at least one character, false otherwise.  A character "c"\n   is alphanumeric if one of the following returns "True":\n   "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()".\n\nstr.isalpha()\n\n   Return true if all characters in the string are alphabetic and\n   there is at least one character, false otherwise.  Alphabetic\n   characters are those characters defined in the Unicode character\n   database as "Letter", i.e., those with general category property\n   being one of "Lm", "Lt", "Lu", "Ll", or "Lo".  Note that this is\n   different from the "Alphabetic" property defined in the Unicode\n   Standard.\n\nstr.isdecimal()\n\n   Return true if all characters in the string are decimal characters\n   and there is at least one character, false otherwise. Decimal\n   characters are those from general category "Nd". This category\n   includes digit characters, and all characters that can be used to\n   form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n   Return true if all characters in the string are digits and there is\n   at least one character, false otherwise.  Digits include decimal\n   characters and digits that need special handling, such as the\n   compatibility superscript digits.  Formally, a digit is a character\n   that has the property value Numeric_Type=Digit or\n   Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n   Return true if the string is a valid identifier according to the\n   language definition, section *Identifiers and keywords*.\n\n   Use "keyword.iskeyword()" to test for reserved identifiers such as\n   "def" and "class".\n\nstr.islower()\n\n   Return true if all cased characters [4] in the string are lowercase\n   and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n   Return true if all characters in the string are numeric characters,\n   and there is at least one character, false otherwise. Numeric\n   characters include digit characters, and all characters that have\n   the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n   ONE FIFTH.  Formally, numeric characters are those with the\n   property value Numeric_Type=Digit, Numeric_Type=Decimal or\n   Numeric_Type=Numeric.\n\nstr.isprintable()\n\n   Return true if all characters in the string are printable or the\n   string is empty, false otherwise.  Nonprintable characters are\n   those characters defined in the Unicode character database as\n   "Other" or "Separator", excepting the ASCII space (0x20) which is\n   considered printable.  (Note that printable characters in this\n   context are those which should not be escaped when "repr()" is\n   invoked on a string.  It has no bearing on the handling of strings\n   written to "sys.stdout" or "sys.stderr".)\n\nstr.isspace()\n\n   Return true if there are only whitespace characters in the string\n   and there is at least one character, false otherwise.  Whitespace\n   characters  are those characters defined in the Unicode character\n   database as "Other" or "Separator" and those with bidirectional\n   property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n   Return true if the string is a titlecased string and there is at\n   least one character, for example uppercase characters may only\n   follow uncased characters and lowercase characters only cased ones.\n   Return false otherwise.\n\nstr.isupper()\n\n   Return true if all cased characters [4] in the string are uppercase\n   and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n   Return a string which is the concatenation of the strings in the\n   *iterable* *iterable*.  A "TypeError" will be raised if there are\n   any non-string values in *iterable*, including "bytes" objects.\n   The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n   Return the string left justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space).  The original string is returned if *width* is less than or\n   equal to "len(s)".\n\nstr.lower()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to lowercase.\n\n   The lowercasing algorithm used is described in section 3.13 of the\n   Unicode Standard.\n\nstr.lstrip([chars])\n\n   Return a copy of the string with leading characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a prefix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.lstrip()\n   \'spacious   \'\n   >>> \'www.example.com\'.lstrip(\'cmowz.\')\n   \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n   This static method returns a translation table usable for\n   "str.translate()".\n\n   If there is only one argument, it must be a dictionary mapping\n   Unicode ordinals (integers) or characters (strings of length 1) to\n   Unicode ordinals, strings (of arbitrary lengths) or None.\n   Character keys will then be converted to ordinals.\n\n   If there are two arguments, they must be strings of equal length,\n   and in the resulting dictionary, each character in x will be mapped\n   to the character at the same position in y.  If there is a third\n   argument, it must be a string, whose characters will be mapped to\n   None in the result.\n\nstr.partition(sep)\n\n   Split the string at the first occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing the string itself, followed by\n   two empty strings.\n\nstr.replace(old, new[, count])\n\n   Return a copy of the string with all occurrences of substring *old*\n   replaced by *new*.  If the optional argument *count* is given, only\n   the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n   Return the highest index in the string where substring *sub* is\n   found, such that *sub* is contained within "s[start:end]".\n   Optional arguments *start* and *end* are interpreted as in slice\n   notation.  Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n   Like "rfind()" but raises "ValueError" when the substring *sub* is\n   not found.\n\nstr.rjust(width[, fillchar])\n\n   Return the string right justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space). The original string is returned if *width* is less than or\n   equal to "len(s)".\n\nstr.rpartition(sep)\n\n   Split the string at the last occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing two empty strings, followed by\n   the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n   are done, the *rightmost* ones.  If *sep* is not specified or\n   "None", any whitespace string is a separator.  Except for splitting\n   from the right, "rsplit()" behaves like "split()" which is\n   described in detail below.\n\nstr.rstrip([chars])\n\n   Return a copy of the string with trailing characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a suffix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.rstrip()\n   \'   spacious\'\n   >>> \'mississippi\'.rstrip(\'ipz\')\n   \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string.  If *maxsplit* is given, at most *maxsplit*\n   splits are done (thus, the list will have at most "maxsplit+1"\n   elements).  If *maxsplit* is not specified or "-1", then there is\n   no limit on the number of splits (all possible splits are made).\n\n   If *sep* is given, consecutive delimiters are not grouped together\n   and are deemed to delimit empty strings (for example,\n   "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']").  The *sep* argument\n   may consist of multiple characters (for example,\n   "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n   empty string with a specified separator returns "[\'\']".\n\n   If *sep* is not specified or is "None", a different splitting\n   algorithm is applied: runs of consecutive whitespace are regarded\n   as a single separator, and the result will contain no empty strings\n   at the start or end if the string has leading or trailing\n   whitespace.  Consequently, splitting an empty string or a string\n   consisting of just whitespace with a "None" separator returns "[]".\n\n   For example, "\' 1  2   3  \'.split()" returns "[\'1\', \'2\', \'3\']", and\n   "\'  1  2   3  \'.split(None, 1)" returns "[\'1\', \'2   3  \']".\n\nstr.splitlines([keepends])\n\n   Return a list of the lines in the string, breaking at line\n   boundaries. This method uses the *universal newlines* approach to\n   splitting lines. Line breaks are not included in the resulting list\n   unless *keepends* is given and true.\n\n   For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n   c\', \'\', \'de fg\', \'kl\']", while the same call with\n   "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n   Unlike "split()" when a delimiter string *sep* is given, this\n   method returns an empty list for the empty string, and a terminal\n   line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n   Return "True" if string starts with the *prefix*, otherwise return\n   "False". *prefix* can also be a tuple of prefixes to look for.\n   With optional *start*, test string beginning at that position.\n   With optional *end*, stop comparing string at that position.\n\nstr.strip([chars])\n\n   Return a copy of the string with the leading and trailing\n   characters removed. The *chars* argument is a string specifying the\n   set of characters to be removed. If omitted or "None", the *chars*\n   argument defaults to removing whitespace. The *chars* argument is\n   not a prefix or suffix; rather, all combinations of its values are\n   stripped:\n\n   >>> \'   spacious   \'.strip()\n   \'spacious\'\n   >>> \'www.example.com\'.strip(\'cmowz.\')\n   \'example\'\n\nstr.swapcase()\n\n   Return a copy of the string with uppercase characters converted to\n   lowercase and vice versa. Note that it is not necessarily true that\n   "s.swapcase().swapcase() == s".\n\nstr.title()\n\n   Return a titlecased version of the string where words start with an\n   uppercase character and the remaining characters are lowercase.\n\n   The algorithm uses a simple language-independent definition of a\n   word as groups of consecutive letters.  The definition works in\n   many contexts but it means that apostrophes in contractions and\n   possessives form word boundaries, which may not be the desired\n   result:\n\n      >>> "they\'re bill\'s friends from the UK".title()\n      "They\'Re Bill\'S Friends From The Uk"\n\n   A workaround for apostrophes can be constructed using regular\n   expressions:\n\n      >>> import re\n      >>> def titlecase(s):\n      ...     return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n      ...                   lambda mo: mo.group(0)[0].upper() +\n      ...                              mo.group(0)[1:].lower(),\n      ...                   s)\n      ...\n      >>> titlecase("they\'re bill\'s friends.")\n      "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n   Return a copy of the *s* where all characters have been mapped\n   through the *map* which must be a dictionary of Unicode ordinals\n   (integers) to Unicode ordinals, strings or "None".  Unmapped\n   characters are left untouched. Characters mapped to "None" are\n   deleted.\n\n   You can use "str.maketrans()" to create a translation map from\n   character-to-character mappings in different formats.\n\n   Note: An even more flexible approach is to create a custom character\n     mapping codec using the "codecs" module (see "encodings.cp1251"\n     for an example).\n\nstr.upper()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to uppercase.  Note that "str.upper().isupper()" might be\n   "False" if "s" contains uncased characters or if the Unicode\n   category of the resulting character(s) is not "Lu" (Letter,\n   uppercase), but e.g. "Lt" (Letter, titlecase).\n\n   The uppercasing algorithm used is described in section 3.13 of the\n   Unicode Standard.\n\nstr.zfill(width)\n\n   Return the numeric string left filled with zeros in a string of\n   length *width*.  A sign prefix is handled correctly.  The original\n   string is returned if *width* is less than or equal to "len(s)".\n',
- 'strings': '\nString and Bytes literals\n*************************\n\nString literals are described by the following lexical definitions:\n\n   stringliteral   ::= [stringprefix](shortstring | longstring)\n   stringprefix    ::= "r" | "u" | "R" | "U"\n   shortstring     ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n   longstring      ::= "\'\'\'" longstringitem* "\'\'\'" | \'"""\' longstringitem* \'"""\'\n   shortstringitem ::= shortstringchar | stringescapeseq\n   longstringitem  ::= longstringchar | stringescapeseq\n   shortstringchar ::= <any source character except "\\" or newline or the quote>\n   longstringchar  ::= <any source character except "\\">\n   stringescapeseq ::= "\\" <any source character>\n\n   bytesliteral   ::= bytesprefix(shortbytes | longbytes)\n   bytesprefix    ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB"\n   shortbytes     ::= "\'" shortbytesitem* "\'" | \'"\' shortbytesitem* \'"\'\n   longbytes      ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' longbytesitem* \'"""\'\n   shortbytesitem ::= shortbyteschar | bytesescapeseq\n   longbytesitem  ::= longbyteschar | bytesescapeseq\n   shortbyteschar ::= <any ASCII character except "\\" or newline or the quote>\n   longbyteschar  ::= <any ASCII character except "\\">\n   bytesescapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" or "bytesprefix"\nand the rest of the literal. The source character set is defined by\nthe encoding declaration; it is UTF-8 if no encoding declaration is\ngiven in the source file; see section *Encoding declarations*.\n\nIn plain English: Both types of literals can be enclosed in matching\nsingle quotes ("\'") or double quotes (""").  They can also be enclosed\nin matching groups of three single or double quotes (these are\ngenerally referred to as *triple-quoted strings*).  The backslash\n("\\") character is used to escape characters that otherwise have a\nspecial meaning, such as newline, backslash itself, or the quote\ncharacter.\n\nBytes literals are always prefixed with "\'b\'" or "\'B\'"; they produce\nan instance of the "bytes" type instead of the "str" type.  They may\nonly contain ASCII characters; bytes with a numeric value of 128 or\ngreater must be expressed with escapes.\n\nAs of Python 3.3 it is possible again to prefix unicode strings with a\n"u" prefix to simplify maintenance of dual 2.x and 3.x codebases.\n\nBoth string and bytes literals may optionally be prefixed with a\nletter "\'r\'" or "\'R\'"; such strings are called *raw strings* and treat\nbackslashes as literal characters.  As a result, in string literals,\n"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated specially.\nGiven that Python 2.x\'s raw unicode literals behave differently than\nPython 3.x\'s the "\'ur\'" syntax is not supported.\n\n   New in version 3.3: The "\'rb\'" prefix of raw bytes literals has\n   been added as a synonym of "\'br\'".\n\n   New in version 3.3: Support for the unicode legacy literal\n   ("u\'value\'") was reintroduced to simplify the maintenance of dual\n   Python 2.x and 3.x codebases. See **PEP 414** for more information.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string.  (A "quote" is the character used to open the\nstring, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C.  The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence   | Meaning                           | Notes   |\n+===================+===================================+=========+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n| "\\ooo"            | Character with octal value *ooo*  | (1,3)   |\n+-------------------+-----------------------------------+---------+\n| "\\xhh"            | Character with hex value *hh*     | (2,3)   |\n+-------------------+-----------------------------------+---------+\n\nEscape sequences only recognized in string literals are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence   | Meaning                           | Notes   |\n+===================+===================================+=========+\n| "\\N{name}"        | Character named *name* in the     | (4)     |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx"          | Character with 16-bit hex value   | (5)     |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx"      | Character with 32-bit hex value   | (6)     |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. As in Standard C, up to three octal digits are accepted.\n\n2. Unlike in Standard C, exactly two hex digits are required.\n\n3. In a bytes literal, hexadecimal and octal escapes denote the byte\n   with the given value. In a string literal, these escapes denote a\n   Unicode character with the given value.\n\n4. Changed in version 3.3: Support for name aliases [1] has been\n   added.\n\n5. Individual code units which form parts of a surrogate pair can be\n   encoded using this escape sequence.  Exactly four hex digits are\n   required.\n\n6. Any Unicode character can be encoded this way.  Exactly eight hex\n   digits are required.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*.  (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.)  It is also\nimportant to note that the escape sequences only recognized in string\nliterals fall into the category of unrecognized escapes for bytes\nliterals.\n\nEven in a raw string, string quotes can be escaped with a backslash,\nbut the backslash remains in the string; for example, "r"\\""" is a\nvalid string literal consisting of two characters: a backslash and a\ndouble quote; "r"\\"" is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes).  Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character).  Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n',
+ 'specialattrs': '\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant.  Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n   A dictionary or other mapping object used to store an object\'s\n   (writable) attributes.\n\ninstance.__class__\n\n   The class to which a class instance belongs.\n\nclass.__bases__\n\n   The tuple of base classes of a class object.\n\nclass.__name__\n\n   The name of the class or type.\n\nclass.__qualname__\n\n   The *qualified name* of the class or type.\n\n   New in version 3.3.\n\nclass.__mro__\n\n   This attribute is a tuple of classes that are considered when\n   looking for base classes during method resolution.\n\nclass.mro()\n\n   This method can be overridden by a metaclass to customize the\n   method resolution order for its instances.  It is called at class\n   instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n   Each class keeps a list of weak references to its immediate\n   subclasses.  This method returns a list of all those references\n   still alive. Example:\n\n      >>> int.__subclasses__()\n      [<class \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found\n    in the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to\n    "[1.0, 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n    operands.\n\n[4] Cased characters are those with general category property\n    being one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase),\n    or "Lt" (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a\n    singleton tuple whose only element is the tuple to be formatted.\n',
+ 'specialnames': '\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators.  For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)".\nExcept where mentioned, attempts to execute an operation raise an\nexception when no appropriate method is defined (typically\n"AttributeError" or "TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled.  For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense.  (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n   Called to create a new instance of class *cls*.  "__new__()" is a\n   static method (special-cased so you need not declare it as such)\n   that takes the class of which an instance was requested as its\n   first argument.  The remaining arguments are those passed to the\n   object constructor expression (the call to the class).  The return\n   value of "__new__()" should be the new object instance (usually an\n   instance of *cls*).\n\n   Typical implementations create a new instance of the class by\n   invoking the superclass\'s "__new__()" method using\n   "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n   arguments and then modifying the newly-created instance as\n   necessary before returning it.\n\n   If "__new__()" returns an instance of *cls*, then the new\n   instance\'s "__init__()" method will be invoked like\n   "__init__(self[, ...])", where *self* is the new instance and the\n   remaining arguments are the same as were passed to "__new__()".\n\n   If "__new__()" does not return an instance of *cls*, then the new\n   instance\'s "__init__()" method will not be invoked.\n\n   "__new__()" is intended mainly to allow subclasses of immutable\n   types (like int, str, or tuple) to customize instance creation.  It\n   is also commonly overridden in custom metaclasses in order to\n   customize class creation.\n\nobject.__init__(self[, ...])\n\n   Called when the instance is created.  The arguments are those\n   passed to the class constructor expression.  If a base class has an\n   "__init__()" method, the derived class\'s "__init__()" method, if\n   any, must explicitly call it to ensure proper initialization of the\n   base class part of the instance; for example:\n   "BaseClass.__init__(self, [args...])".  As a special constraint on\n   constructors, no value may be returned; doing so will cause a\n   "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n   Called when the instance is about to be destroyed.  This is also\n   called a destructor.  If a base class has a "__del__()" method, the\n   derived class\'s "__del__()" method, if any, must explicitly call it\n   to ensure proper deletion of the base class part of the instance.\n   Note that it is possible (though not recommended!) for the\n   "__del__()" method to postpone destruction of the instance by\n   creating a new reference to it.  It may then be called at a later\n   time when this new reference is deleted.  It is not guaranteed that\n   "__del__()" methods are called for objects that still exist when\n   the interpreter exits.\n\n   Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n     decrements the reference count for "x" by one, and the latter is\n     only called when "x"\'s reference count reaches zero.  Some common\n     situations that may prevent the reference count of an object from\n     going to zero include: circular references between objects (e.g.,\n     a doubly-linked list or a tree data structure with parent and\n     child pointers); a reference to the object on the stack frame of\n     a function that caught an exception (the traceback stored in\n     "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n     to the object on the stack frame that raised an unhandled\n     exception in interactive mode (the traceback stored in\n     "sys.last_traceback" keeps the stack frame alive).  The first\n     situation can only be remedied by explicitly breaking the cycles;\n     the latter two situations can be resolved by storing "None" in\n     "sys.last_traceback". Circular references which are garbage are\n     detected and cleaned up when the cyclic garbage collector is\n     enabled (it\'s on by default). Refer to the documentation for the\n     "gc" module for more information about this topic.\n\n   Warning: Due to the precarious circumstances under which\n     "__del__()" methods are invoked, exceptions that occur during\n     their execution are ignored, and a warning is printed to\n     "sys.stderr" instead. Also, when "__del__()" is invoked in\n     response to a module being deleted (e.g., when execution of the\n     program is done), other globals referenced by the "__del__()"\n     method may already have been deleted or in the process of being\n     torn down (e.g. the import machinery shutting down).  For this\n     reason, "__del__()" methods should do the absolute minimum needed\n     to maintain external invariants.  Starting with version 1.5,\n     Python guarantees that globals whose name begins with a single\n     underscore are deleted from their module before other globals are\n     deleted; if no other references to such globals exist, this may\n     help in assuring that imported modules are still available at the\n     time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n   Called by the "repr()" built-in function to compute the "official"\n   string representation of an object.  If at all possible, this\n   should look like a valid Python expression that could be used to\n   recreate an object with the same value (given an appropriate\n   environment).  If this is not possible, a string of the form\n   "<...some useful description...>" should be returned. The return\n   value must be a string object. If a class defines "__repr__()" but\n   not "__str__()", then "__repr__()" is also used when an "informal"\n   string representation of instances of that class is required.\n\n   This is typically used for debugging, so it is important that the\n   representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n   Called by "str(object)" and the built-in functions "format()" and\n   "print()" to compute the "informal" or nicely printable string\n   representation of an object.  The return value must be a *string*\n   object.\n\n   This method differs from "object.__repr__()" in that there is no\n   expectation that "__str__()" return a valid Python expression: a\n   more convenient or concise representation can be used.\n\n   The default implementation defined by the built-in type "object"\n   calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n   Called by "bytes()" to compute a byte-string representation of an\n   object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n   Called by the "format()" built-in function (and by extension, the\n   "str.format()" method of class "str") to produce a "formatted"\n   string representation of an object. The "format_spec" argument is a\n   string that contains a description of the formatting options\n   desired. The interpretation of the "format_spec" argument is up to\n   the type implementing "__format__()", however most classes will\n   either delegate formatting to one of the built-in types, or use a\n   similar formatting option syntax.\n\n   See *Format Specification Mini-Language* for a description of the\n   standard formatting syntax.\n\n   The return value must be a string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n   These are the so-called "rich comparison" methods. The\n   correspondence between operator symbols and method names is as\n   follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n   "x==y" calls "x.__eq__(y)", "x!=y" calls "x.__ne__(y)", "x>y" calls\n   "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n   A rich comparison method may return the singleton "NotImplemented"\n   if it does not implement the operation for a given pair of\n   arguments. By convention, "False" and "True" are returned for a\n   successful comparison. However, these methods can return any value,\n   so if the comparison operator is used in a Boolean context (e.g.,\n   in the condition of an "if" statement), Python will call "bool()"\n   on the value to determine if the result is true or false.\n\n   There are no implied relationships among the comparison operators.\n   The truth of "x==y" does not imply that "x!=y" is false.\n   Accordingly, when defining "__eq__()", one should also define\n   "__ne__()" so that the operators will behave as expected.  See the\n   paragraph on "__hash__()" for some important notes on creating\n   *hashable* objects which support custom comparison operations and\n   are usable as dictionary keys.\n\n   There are no swapped-argument versions of these methods (to be used\n   when the left argument does not support the operation but the right\n   argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n   reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n   and "__eq__()" and "__ne__()" are their own reflection.\n\n   Arguments to rich comparison methods are never coerced.\n\n   To automatically generate ordering operations from a single root\n   operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n   Called by built-in function "hash()" and for operations on members\n   of hashed collections including "set", "frozenset", and "dict".\n   "__hash__()" should return an integer.  The only required property\n   is that objects which compare equal have the same hash value; it is\n   advised to somehow mix together (e.g. using exclusive or) the hash\n   values for the components of the object that also play a part in\n   comparison of objects.\n\n   Note: "hash()" truncates the value returned from an object\'s\n     custom "__hash__()" method to the size of a "Py_ssize_t".  This\n     is typically 8 bytes on 64-bit builds and 4 bytes on 32-bit\n     builds. If an object\'s   "__hash__()" must interoperate on builds\n     of different bit sizes, be sure to check the width on all\n     supported builds.  An easy way to do this is with "python -c\n     "import sys; print(sys.hash_info.width)""\n\n   If a class does not define an "__eq__()" method it should not\n   define a "__hash__()" operation either; if it defines "__eq__()"\n   but not "__hash__()", its instances will not be usable as items in\n   hashable collections.  If a class defines mutable objects and\n   implements an "__eq__()" method, it should not implement\n   "__hash__()", since the implementation of hashable collections\n   requires that a key\'s hash value is immutable (if the object\'s hash\n   value changes, it will be in the wrong hash bucket).\n\n   User-defined classes have "__eq__()" and "__hash__()" methods by\n   default; with them, all objects compare unequal (except with\n   themselves) and "x.__hash__()" returns an appropriate value such\n   that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n   A class that overrides "__eq__()" and does not define "__hash__()"\n   will have its "__hash__()" implicitly set to "None".  When the\n   "__hash__()" method of a class is "None", instances of the class\n   will raise an appropriate "TypeError" when a program attempts to\n   retrieve their hash value, and will also be correctly identified as\n   unhashable when checking "isinstance(obj, collections.Hashable").\n\n   If a class that overrides "__eq__()" needs to retain the\n   implementation of "__hash__()" from a parent class, the interpreter\n   must be told this explicitly by setting "__hash__ =\n   <ParentClass>.__hash__".\n\n   If a class that does not override "__eq__()" wishes to suppress\n   hash support, it should include "__hash__ = None" in the class\n   definition. A class which defines its own "__hash__()" that\n   explicitly raises a "TypeError" would be incorrectly identified as\n   hashable by an "isinstance(obj, collections.Hashable)" call.\n\n   Note: By default, the "__hash__()" values of str, bytes and\n     datetime objects are "salted" with an unpredictable random value.\n     Although they remain constant within an individual Python\n     process, they are not predictable between repeated invocations of\n     Python.This is intended to provide protection against a denial-\n     of-service caused by carefully-chosen inputs that exploit the\n     worst case performance of a dict insertion, O(n^2) complexity.\n     See http://www.ocert.org/advisories/ocert-2011-003.html for\n     details.Changing hash values affects the iteration order of\n     dicts, sets and other mappings.  Python has never made guarantees\n     about this ordering (and it typically varies between 32-bit and\n     64-bit builds).See also "PYTHONHASHSEED".\n\n   Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n   Called to implement truth value testing and the built-in operation\n   "bool()"; should return "False" or "True".  When this method is not\n   defined, "__len__()" is called, if it is defined, and the object is\n   considered true if its result is nonzero.  If a class defines\n   neither "__len__()" nor "__bool__()", all its instances are\n   considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n   Called when an attribute lookup has not found the attribute in the\n   usual places (i.e. it is not an instance attribute nor is it found\n   in the class tree for "self").  "name" is the attribute name. This\n   method should return the (computed) attribute value or raise an\n   "AttributeError" exception.\n\n   Note that if the attribute is found through the normal mechanism,\n   "__getattr__()" is not called.  (This is an intentional asymmetry\n   between "__getattr__()" and "__setattr__()".) This is done both for\n   efficiency reasons and because otherwise "__getattr__()" would have\n   no way to access other attributes of the instance.  Note that at\n   least for instance variables, you can fake total control by not\n   inserting any values in the instance attribute dictionary (but\n   instead inserting them in another object).  See the\n   "__getattribute__()" method below for a way to actually get total\n   control over attribute access.\n\nobject.__getattribute__(self, name)\n\n   Called unconditionally to implement attribute accesses for\n   instances of the class. If the class also defines "__getattr__()",\n   the latter will not be called unless "__getattribute__()" either\n   calls it explicitly or raises an "AttributeError". This method\n   should return the (computed) attribute value or raise an\n   "AttributeError" exception. In order to avoid infinite recursion in\n   this method, its implementation should always call the base class\n   method with the same name to access any attributes it needs, for\n   example, "object.__getattribute__(self, name)".\n\n   Note: This method may still be bypassed when looking up special\n     methods as the result of implicit invocation via language syntax\n     or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n   Called when an attribute assignment is attempted.  This is called\n   instead of the normal mechanism (i.e. store the value in the\n   instance dictionary). *name* is the attribute name, *value* is the\n   value to be assigned to it.\n\n   If "__setattr__()" wants to assign to an instance attribute, it\n   should call the base class method with the same name, for example,\n   "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n   Like "__setattr__()" but for attribute deletion instead of\n   assignment.  This should only be implemented if "del obj.name" is\n   meaningful for the object.\n\nobject.__dir__(self)\n\n   Called when "dir()" is called on the object. A sequence must be\n   returned. "dir()" converts the returned sequence to a list and\n   sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents).  In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n   Called to get the attribute of the owner class (class attribute\n   access) or of an instance of that class (instance attribute\n   access). *owner* is always the owner class, while *instance* is the\n   instance that the attribute was accessed through, or "None" when\n   the attribute is accessed through the *owner*.  This method should\n   return the (computed) attribute value or raise an "AttributeError"\n   exception.\n\nobject.__set__(self, instance, value)\n\n   Called to set the attribute on an instance *instance* of the owner\n   class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n   Called to delete the attribute on an instance *instance* of the\n   owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol:  "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead.  Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n   The simplest and least common call is when user code directly\n   invokes a descriptor method:    "x.__get__(a)".\n\nInstance Binding\n   If binding to an object instance, "a.x" is transformed into the\n   call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n   If binding to a class, "A.x" is transformed into the call:\n   "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n   If "a" is an instance of "super", then the binding "super(B,\n   obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n   immediately preceding "B" and then invokes the descriptor with the\n   call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined.  A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()".  If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary.  If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor.  Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method.  Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary.  In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors.  Accordingly, instances can\nredefine and override methods.  This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage.  This wastes space for objects having very few instance\nvariables.  The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable.  Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n   This class variable can be assigned a string, iterable, or sequence\n   of strings with variable names used by instances.  If defined in a\n   class, *__slots__* reserves space for the declared variables and\n   prevents the automatic creation of *__dict__* and *__weakref__* for\n   each instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n  attribute of that class will always be accessible, so a *__slots__*\n  definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n  variables not listed in the *__slots__* definition.  Attempts to\n  assign to an unlisted variable name raises "AttributeError". If\n  dynamic assignment of new variables is desired, then add\n  "\'__dict__\'" to the sequence of strings in the *__slots__*\n  declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n  defining *__slots__* do not support weak references to its\n  instances. If weak reference support is needed, then add\n  "\'__weakref__\'" to the sequence of strings in the *__slots__*\n  declaration.\n\n* *__slots__* are implemented at the class level by creating\n  descriptors (*Implementing Descriptors*) for each variable name.  As\n  a result, class attributes cannot be used to set default values for\n  instance variables defined by *__slots__*; otherwise, the class\n  attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n  where it is defined.  As a result, subclasses will have a *__dict__*\n  unless they also define *__slots__* (which must only contain names\n  of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n  instance variable defined by the base class slot is inaccessible\n  (except by retrieving its descriptor directly from the base class).\n  This renders the meaning of the program undefined.  In the future, a\n  check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n  "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n  may also be used; however, in the future, special meaning may be\n  assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n  *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using "type()". The class body is\nexecuted in a new namespace and the class name is bound locally to the\nresult of "type(name, bases, namespace)".\n\nThe class creation process can be customised by passing the\n"metaclass" keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both "MyClass" and "MySubclass" are instances\nof "Meta":\n\n   class Meta(type):\n       pass\n\n   class MyClass(metaclass=Meta):\n       pass\n\n   class MySubclass(MyClass):\n       pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then "type()" is\n  used\n\n* if an explicit metaclass is given and it is *not* an instance of\n  "type()", then it is used directly as the metaclass\n\n* if an instance of "type()" is given as the explicit metaclass, or\n  bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. "type(cls)") of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with "TypeError".\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a "__prepare__" attribute,\nit is called as "namespace = metaclass.__prepare__(name, bases,\n**kwds)" (where the additional keyword arguments, if any, come from\nthe class definition).\n\nIf the metaclass has no "__prepare__" attribute, then the class\nnamespace is initialised as an empty "dict()" instance.\n\nSee also: **PEP 3115** - Metaclasses in Python 3000\n\n     Introduced the "__prepare__" namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as "exec(body, globals(),\nnamespace)". The key difference from a normal call to "exec()" is that\nlexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling "metaclass(name, bases,\nnamespace, **kwds)" (the additional keywords passed here are the same\nas those passed to "__prepare__").\n\nThis class object is the one that will be referenced by the zero-\nargument form of "super()". "__class__" is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either "__class__" or "super". This allows the zero argument form\nof "super()" to correctly identify the class being defined based on\nlexical scoping, while the class or instance that was used to make the\ncurrent call is identified based on the first argument passed to the\nmethod.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also: **PEP 3135** - New super\n\n     Describes the implicit "__class__" closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n"collections.OrderedDict" to remember the order that class members\nwere defined:\n\n   class OrderedClass(type):\n\n        @classmethod\n        def __prepare__(metacls, name, bases, **kwds):\n           return collections.OrderedDict()\n\n        def __new__(cls, name, bases, namespace, **kwds):\n           result = type.__new__(cls, name, bases, dict(namespace))\n           result.members = tuple(namespace)\n           return result\n\n   class A(metaclass=OrderedClass):\n       def one(self): pass\n       def two(self): pass\n       def three(self): pass\n       def four(self): pass\n\n   >>> A.members\n   (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s "__prepare__()" method which returns an\nempty "collections.OrderedDict".  That mapping records the methods and\nattributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s "__new__()" method gets\ninvoked.  That method builds the new type and it saves the ordered\ndictionary keys in an attribute called "members".\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n   Return true if *instance* should be considered a (direct or\n   indirect) instance of *class*. If defined, called to implement\n   "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n   Return true if *subclass* should be considered a (direct or\n   indirect) subclass of *class*.  If defined, called to implement\n   "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass.  They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also: **PEP 3119** - Introducing Abstract Base Classes\n\n     Includes the specification for customizing "isinstance()" and\n     "issubclass()" behavior through "__instancecheck__()" and\n     "__subclasscheck__()", with motivation for this functionality in\n     the context of adding Abstract Base Classes (see the "abc"\n     module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n   Called when the instance is "called" as a function; if this method\n   is defined, "x(arg1, arg2, ...)" is a shorthand for\n   "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well.  The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items.  It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects.  Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators.  It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values.  It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n   Called to implement the built-in function "len()".  Should return\n   the length of the object, an integer ">=" 0.  Also, an object that\n   doesn\'t define a "__bool__()" method and whose "__len__()" method\n   returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n   Called to implement "operator.length_hint()". Should return an\n   estimated length for the object (which may be greater or less than\n   the actual length). The length must be an integer ">=" 0. This\n   method is purely an optimization and is never required for\n   correctness.\n\n   New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n  A call like\n\n     a[1:2] = b\n\n  is translated to\n\n     a[slice(1, 2, None)] = b\n\n  and so forth.  Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n   Called to implement evaluation of "self[key]". For sequence types,\n   the accepted keys should be integers and slice objects.  Note that\n   the special interpretation of negative indexes (if the class wishes\n   to emulate a sequence type) is up to the "__getitem__()" method. If\n   *key* is of an inappropriate type, "TypeError" may be raised; if of\n   a value outside the set of indexes for the sequence (after any\n   special interpretation of negative values), "IndexError" should be\n   raised. For mapping types, if *key* is missing (not in the\n   container), "KeyError" should be raised.\n\n   Note: "for" loops expect that an "IndexError" will be raised for\n     illegal indexes to allow proper detection of the end of the\n     sequence.\n\nobject.__setitem__(self, key, value)\n\n   Called to implement assignment to "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support changes to the values for keys, or if new keys\n   can be added, or for sequences if elements can be replaced.  The\n   same exceptions should be raised for improper *key* values as for\n   the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n   Called to implement deletion of "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support removal of keys, or for sequences if elements\n   can be removed from the sequence.  The same exceptions should be\n   raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n   This method is called when an iterator is required for a container.\n   This method should return a new iterator object that can iterate\n   over all the objects in the container.  For mappings, it should\n   iterate over the keys of the container, and should also be made\n   available as the method "keys()".\n\n   Iterator objects also need to implement this method; they are\n   required to return themselves.  For more information on iterator\n   objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n   Called (if present) by the "reversed()" built-in to implement\n   reverse iteration.  It should return a new iterator object that\n   iterates over all the objects in the container in reverse order.\n\n   If the "__reversed__()" method is not provided, the "reversed()"\n   built-in will fall back to using the sequence protocol ("__len__()"\n   and "__getitem__()").  Objects that support the sequence protocol\n   should only provide "__reversed__()" if they can provide an\n   implementation that is more efficient than the one provided by\n   "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence.  However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n   Called to implement membership test operators.  Should return true\n   if *item* is in *self*, false otherwise.  For mapping objects, this\n   should consider the keys of the mapping rather than the values or\n   the key-item pairs.\n\n   For objects that don\'t define "__contains__()", the membership test\n   first tries iteration via "__iter__()", then the old sequence\n   iteration protocol via "__getitem__()", see *this section in the\n   language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n   "**", "<<", ">>", "&", "^", "|").  For instance, to evaluate the\n   expression "x + y", where *x* is an instance of a class that has an\n   "__add__()" method, "x.__add__(y)" is called.  The "__divmod__()"\n   method should be the equivalent to using "__floordiv__()" and\n   "__mod__()"; it should not be related to "__truediv__()".  Note\n   that "__pow__()" should be defined to accept an optional third\n   argument if the ternary version of the built-in "pow()" function is\n   to be supported.\n\n   If one of those methods does not support the operation with the\n   supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n   "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n   These functions are only called if the left operand does not\n   support the corresponding operation and the operands are of\n   different types. [2]  For instance, to evaluate the expression "x -\n   y", where *y* is an instance of a class that has an "__rsub__()"\n   method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n   *NotImplemented*.\n\n   Note that ternary "pow()" will not try calling "__rpow__()" (the\n   coercion rules would become too complicated).\n\n   Note: If the right operand\'s type is a subclass of the left\n     operand\'s type and that subclass provides the reflected method\n     for the operation, this method will be called before the left\n     operand\'s non-reflected method.  This behavior allows subclasses\n     to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n   These methods are called to implement the augmented arithmetic\n   assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n   ">>=", "&=", "^=", "|=").  These methods should attempt to do the\n   operation in-place (modifying *self*) and return the result (which\n   could be, but does not have to be, *self*).  If a specific method\n   is not defined, the augmented assignment falls back to the normal\n   methods.  For instance, to execute the statement "x += y", where\n   *x* is an instance of a class that has an "__iadd__()" method,\n   "x.__iadd__(y)" is called.  If *x* is an instance of a class that\n   does not define a "__iadd__()" method, "x.__add__(y)" and\n   "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n   Called to implement the unary arithmetic operations ("-", "+",\n   "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n   Called to implement the built-in functions "complex()", "int()",\n   "float()" and "round()".  Should return a value of the appropriate\n   type.\n\nobject.__index__(self)\n\n   Called to implement "operator.index()", and whenever Python needs\n   to losslessly convert the numeric object to an integer object (such\n   as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n   functions). Presence of this method indicates that the numeric\n   object is an integer type.  Must return an integer.\n\n   Note: When "__index__()" is defined, "__int__()" should also be\n     defined, and both shuld return the same value, in order to have a\n     coherent integer type class.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code.  Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n   Enter the runtime context related to this object. The "with"\n   statement will bind this method\'s return value to the target(s)\n   specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n   Exit the runtime context related to this object. The parameters\n   describe the exception that caused the context to be exited. If the\n   context was exited without an exception, all three arguments will\n   be "None".\n\n   If an exception is supplied, and the method wishes to suppress the\n   exception (i.e., prevent it from being propagated), it should\n   return a true value. Otherwise, the exception will be processed\n   normally upon exit from this method.\n\n   Note that "__exit__()" methods should not reraise the passed-in\n   exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n     The specification, background, and examples for the Python "with"\n     statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary.  That behaviour is the reason why\nthe following code raises an exception:\n\n   >>> class C:\n   ...     pass\n   ...\n   >>> c = C()\n   >>> c.__len__ = lambda: 5\n   >>> len(c)\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in <module>\n   TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n   >>> 1 .__hash__() == hash(1)\n   True\n   >>> int.__hash__() == hash(int)\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in <module>\n   TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n   >>> type(1).__hash__(1) == hash(1)\n   True\n   >>> type(int).__hash__(int) == hash(int)\n   True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n   >>> class Meta(type):\n   ...    def __getattribute__(*args):\n   ...       print("Metaclass getattribute invoked")\n   ...       return type.__getattribute__(*args)\n   ...\n   >>> class C(object, metaclass=Meta):\n   ...     def __len__(self):\n   ...         return 10\n   ...     def __getattribute__(*args):\n   ...         print("Class getattribute invoked")\n   ...         return object.__getattribute__(*args)\n   ...\n   >>> c = C()\n   >>> c.__len__()                 # Explicit lookup via instance\n   Class getattribute invoked\n   10\n   >>> type(c).__len__(c)          # Explicit lookup via type\n   Metaclass getattribute invoked\n   10\n   >>> len(c)                      # Implicit lookup\n   10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n    under certain controlled conditions. It generally isn\'t a good\n    idea though, since it can lead to some very strange behaviour if\n    it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n    reflected method (such as "__add__()") fails the operation is not\n    supported, which is why the reflected method is not called.\n',
+ 'string-methods': '\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see "str.format()",\n*Format String Syntax* and *String Formatting*) and the other based on\nC "printf" style formatting that handles a narrower range of types and\nis slightly harder to use correctly, but is often faster for the cases\nit can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the "re" module).\n\nstr.capitalize()\n\n   Return a copy of the string with its first character capitalized\n   and the rest lowercased.\n\nstr.casefold()\n\n   Return a casefolded copy of the string. Casefolded strings may be\n   used for caseless matching.\n\n   Casefolding is similar to lowercasing but more aggressive because\n   it is intended to remove all case distinctions in a string. For\n   example, the German lowercase letter "\'\xc3\x9f\'" is equivalent to ""ss"".\n   Since it is already lowercase, "lower()" would do nothing to "\'\xc3\x9f\'";\n   "casefold()" converts it to ""ss"".\n\n   The casefolding algorithm is described in section 3.13 of the\n   Unicode Standard.\n\n   New in version 3.3.\n\nstr.center(width[, fillchar])\n\n   Return centered in a string of length *width*. Padding is done\n   using the specified *fillchar* (default is a space).\n\nstr.count(sub[, start[, end]])\n\n   Return the number of non-overlapping occurrences of substring *sub*\n   in the range [*start*, *end*].  Optional arguments *start* and\n   *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n   Return an encoded version of the string as a bytes object. Default\n   encoding is "\'utf-8\'". *errors* may be given to set a different\n   error handling scheme. The default for *errors* is "\'strict\'",\n   meaning that encoding errors raise a "UnicodeError". Other possible\n   values are "\'ignore\'", "\'replace\'", "\'xmlcharrefreplace\'",\n   "\'backslashreplace\'" and any other name registered via\n   "codecs.register_error()", see section *Codec Base Classes*. For a\n   list of possible encodings, see section *Standard Encodings*.\n\n   Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n   Return "True" if the string ends with the specified *suffix*,\n   otherwise return "False".  *suffix* can also be a tuple of suffixes\n   to look for.  With optional *start*, test beginning at that\n   position.  With optional *end*, stop comparing at that position.\n\nstr.expandtabs(tabsize=8)\n\n   Return a copy of the string where all tab characters are replaced\n   by one or more spaces, depending on the current column and the\n   given tab size.  Tab positions occur every *tabsize* characters\n   (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n   To expand the string, the current column is set to zero and the\n   string is examined character by character.  If the character is a\n   tab ("\\t"), one or more space characters are inserted in the result\n   until the current column is equal to the next tab position. (The\n   tab character itself is not copied.)  If the character is a newline\n   ("\\n") or return ("\\r"), it is copied and the current column is\n   reset to zero.  Any other character is copied unchanged and the\n   current column is incremented by one regardless of how the\n   character is represented when printed.\n\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n   \'01      012     0123    01234\'\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n   \'01  012 0123    01234\'\n\nstr.find(sub[, start[, end]])\n\n   Return the lowest index in the string where substring *sub* is\n   found, such that *sub* is contained in the slice "s[start:end]".\n   Optional arguments *start* and *end* are interpreted as in slice\n   notation.  Return "-1" if *sub* is not found.\n\n   Note: The "find()" method should be used only if you need to know\n     the position of *sub*.  To check if *sub* is a substring or not,\n     use the "in" operator:\n\n        >>> \'Py\' in \'Python\'\n        True\n\nstr.format(*args, **kwargs)\n\n   Perform a string formatting operation.  The string on which this\n   method is called can contain literal text or replacement fields\n   delimited by braces "{}".  Each replacement field contains either\n   the numeric index of a positional argument, or the name of a\n   keyword argument.  Returns a copy of the string where each\n   replacement field is replaced with the string value of the\n   corresponding argument.\n\n   >>> "The sum of 1 + 2 is {0}".format(1+2)\n   \'The sum of 1 + 2 is 3\'\n\n   See *Format String Syntax* for a description of the various\n   formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n   Similar to "str.format(**mapping)", except that "mapping" is used\n   directly and not copied to a "dict".  This is useful if for example\n   "mapping" is a dict subclass:\n\n   >>> class Default(dict):\n   ...     def __missing__(self, key):\n   ...         return key\n   ...\n   >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n   \'Guido was born in country\'\n\n   New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n   Like "find()", but raise "ValueError" when the substring is not\n   found.\n\nstr.isalnum()\n\n   Return true if all characters in the string are alphanumeric and\n   there is at least one character, false otherwise.  A character "c"\n   is alphanumeric if one of the following returns "True":\n   "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()".\n\nstr.isalpha()\n\n   Return true if all characters in the string are alphabetic and\n   there is at least one character, false otherwise.  Alphabetic\n   characters are those characters defined in the Unicode character\n   database as "Letter", i.e., those with general category property\n   being one of "Lm", "Lt", "Lu", "Ll", or "Lo".  Note that this is\n   different from the "Alphabetic" property defined in the Unicode\n   Standard.\n\nstr.isdecimal()\n\n   Return true if all characters in the string are decimal characters\n   and there is at least one character, false otherwise. Decimal\n   characters are those from general category "Nd". This category\n   includes digit characters, and all characters that can be used to\n   form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n   Return true if all characters in the string are digits and there is\n   at least one character, false otherwise.  Digits include decimal\n   characters and digits that need special handling, such as the\n   compatibility superscript digits.  Formally, a digit is a character\n   that has the property value Numeric_Type=Digit or\n   Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n   Return true if the string is a valid identifier according to the\n   language definition, section *Identifiers and keywords*.\n\n   Use "keyword.iskeyword()" to test for reserved identifiers such as\n   "def" and "class".\n\nstr.islower()\n\n   Return true if all cased characters [4] in the string are lowercase\n   and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n   Return true if all characters in the string are numeric characters,\n   and there is at least one character, false otherwise. Numeric\n   characters include digit characters, and all characters that have\n   the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n   ONE FIFTH.  Formally, numeric characters are those with the\n   property value Numeric_Type=Digit, Numeric_Type=Decimal or\n   Numeric_Type=Numeric.\n\nstr.isprintable()\n\n   Return true if all characters in the string are printable or the\n   string is empty, false otherwise.  Nonprintable characters are\n   those characters defined in the Unicode character database as\n   "Other" or "Separator", excepting the ASCII space (0x20) which is\n   considered printable.  (Note that printable characters in this\n   context are those which should not be escaped when "repr()" is\n   invoked on a string.  It has no bearing on the handling of strings\n   written to "sys.stdout" or "sys.stderr".)\n\nstr.isspace()\n\n   Return true if there are only whitespace characters in the string\n   and there is at least one character, false otherwise.  Whitespace\n   characters  are those characters defined in the Unicode character\n   database as "Other" or "Separator" and those with bidirectional\n   property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n   Return true if the string is a titlecased string and there is at\n   least one character, for example uppercase characters may only\n   follow uncased characters and lowercase characters only cased ones.\n   Return false otherwise.\n\nstr.isupper()\n\n   Return true if all cased characters [4] in the string are uppercase\n   and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n   Return a string which is the concatenation of the strings in the\n   *iterable* *iterable*.  A "TypeError" will be raised if there are\n   any non-string values in *iterable*, including "bytes" objects.\n   The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n   Return the string left justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space).  The original string is returned if *width* is less than or\n   equal to "len(s)".\n\nstr.lower()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to lowercase.\n\n   The lowercasing algorithm used is described in section 3.13 of the\n   Unicode Standard.\n\nstr.lstrip([chars])\n\n   Return a copy of the string with leading characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a prefix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.lstrip()\n   \'spacious   \'\n   >>> \'www.example.com\'.lstrip(\'cmowz.\')\n   \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n   This static method returns a translation table usable for\n   "str.translate()".\n\n   If there is only one argument, it must be a dictionary mapping\n   Unicode ordinals (integers) or characters (strings of length 1) to\n   Unicode ordinals, strings (of arbitrary lengths) or None.\n   Character keys will then be converted to ordinals.\n\n   If there are two arguments, they must be strings of equal length,\n   and in the resulting dictionary, each character in x will be mapped\n   to the character at the same position in y.  If there is a third\n   argument, it must be a string, whose characters will be mapped to\n   None in the result.\n\nstr.partition(sep)\n\n   Split the string at the first occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing the string itself, followed by\n   two empty strings.\n\nstr.replace(old, new[, count])\n\n   Return a copy of the string with all occurrences of substring *old*\n   replaced by *new*.  If the optional argument *count* is given, only\n   the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n   Return the highest index in the string where substring *sub* is\n   found, such that *sub* is contained within "s[start:end]".\n   Optional arguments *start* and *end* are interpreted as in slice\n   notation.  Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n   Like "rfind()" but raises "ValueError" when the substring *sub* is\n   not found.\n\nstr.rjust(width[, fillchar])\n\n   Return the string right justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space). The original string is returned if *width* is less than or\n   equal to "len(s)".\n\nstr.rpartition(sep)\n\n   Split the string at the last occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing two empty strings, followed by\n   the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n   are done, the *rightmost* ones.  If *sep* is not specified or\n   "None", any whitespace string is a separator.  Except for splitting\n   from the right, "rsplit()" behaves like "split()" which is\n   described in detail below.\n\nstr.rstrip([chars])\n\n   Return a copy of the string with trailing characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a suffix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.rstrip()\n   \'   spacious\'\n   >>> \'mississippi\'.rstrip(\'ipz\')\n   \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string.  If *maxsplit* is given, at most *maxsplit*\n   splits are done (thus, the list will have at most "maxsplit+1"\n   elements).  If *maxsplit* is not specified or "-1", then there is\n   no limit on the number of splits (all possible splits are made).\n\n   If *sep* is given, consecutive delimiters are not grouped together\n   and are deemed to delimit empty strings (for example,\n   "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']").  The *sep* argument\n   may consist of multiple characters (for example,\n   "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n   empty string with a specified separator returns "[\'\']".\n\n   If *sep* is not specified or is "None", a different splitting\n   algorithm is applied: runs of consecutive whitespace are regarded\n   as a single separator, and the result will contain no empty strings\n   at the start or end if the string has leading or trailing\n   whitespace.  Consequently, splitting an empty string or a string\n   consisting of just whitespace with a "None" separator returns "[]".\n\n   For example, "\' 1  2   3  \'.split()" returns "[\'1\', \'2\', \'3\']", and\n   "\'  1  2   3  \'.split(None, 1)" returns "[\'1\', \'2   3  \']".\n\nstr.splitlines([keepends])\n\n   Return a list of the lines in the string, breaking at line\n   boundaries. This method uses the *universal newlines* approach to\n   splitting lines. Line breaks are not included in the resulting list\n   unless *keepends* is given and true.\n\n   For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n   c\', \'\', \'de fg\', \'kl\']", while the same call with\n   "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n   Unlike "split()" when a delimiter string *sep* is given, this\n   method returns an empty list for the empty string, and a terminal\n   line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n   Return "True" if string starts with the *prefix*, otherwise return\n   "False". *prefix* can also be a tuple of prefixes to look for.\n   With optional *start*, test string beginning at that position.\n   With optional *end*, stop comparing string at that position.\n\nstr.strip([chars])\n\n   Return a copy of the string with the leading and trailing\n   characters removed. The *chars* argument is a string specifying the\n   set of characters to be removed. If omitted or "None", the *chars*\n   argument defaults to removing whitespace. The *chars* argument is\n   not a prefix or suffix; rather, all combinations of its values are\n   stripped:\n\n   >>> \'   spacious   \'.strip()\n   \'spacious\'\n   >>> \'www.example.com\'.strip(\'cmowz.\')\n   \'example\'\n\nstr.swapcase()\n\n   Return a copy of the string with uppercase characters converted to\n   lowercase and vice versa. Note that it is not necessarily true that\n   "s.swapcase().swapcase() == s".\n\nstr.title()\n\n   Return a titlecased version of the string where words start with an\n   uppercase character and the remaining characters are lowercase.\n\n   The algorithm uses a simple language-independent definition of a\n   word as groups of consecutive letters.  The definition works in\n   many contexts but it means that apostrophes in contractions and\n   possessives form word boundaries, which may not be the desired\n   result:\n\n      >>> "they\'re bill\'s friends from the UK".title()\n      "They\'Re Bill\'S Friends From The Uk"\n\n   A workaround for apostrophes can be constructed using regular\n   expressions:\n\n      >>> import re\n      >>> def titlecase(s):\n      ...     return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n      ...                   lambda mo: mo.group(0)[0].upper() +\n      ...                              mo.group(0)[1:].lower(),\n      ...                   s)\n      ...\n      >>> titlecase("they\'re bill\'s friends.")\n      "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n   Return a copy of the *s* where all characters have been mapped\n   through the *map* which must be a dictionary of Unicode ordinals\n   (integers) to Unicode ordinals, strings or "None".  Unmapped\n   characters are left untouched. Characters mapped to "None" are\n   deleted.\n\n   You can use "str.maketrans()" to create a translation map from\n   character-to-character mappings in different formats.\n\n   Note: An even more flexible approach is to create a custom\n     character mapping codec using the "codecs" module (see\n     "encodings.cp1251" for an example).\n\nstr.upper()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to uppercase.  Note that "str.upper().isupper()" might be\n   "False" if "s" contains uncased characters or if the Unicode\n   category of the resulting character(s) is not "Lu" (Letter,\n   uppercase), but e.g. "Lt" (Letter, titlecase).\n\n   The uppercasing algorithm used is described in section 3.13 of the\n   Unicode Standard.\n\nstr.zfill(width)\n\n   Return the numeric string left filled with zeros in a string of\n   length *width*.  A sign prefix is handled correctly.  The original\n   string is returned if *width* is less than or equal to "len(s)".\n',
+ 'strings': '\nString and Bytes literals\n*************************\n\nString literals are described by the following lexical definitions:\n\n   stringliteral   ::= [stringprefix](shortstring | longstring)\n   stringprefix    ::= "r" | "u" | "R" | "U"\n   shortstring     ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n   longstring      ::= "\'\'\'" longstringitem* "\'\'\'" | \'"""\' longstringitem* \'"""\'\n   shortstringitem ::= shortstringchar | stringescapeseq\n   longstringitem  ::= longstringchar | stringescapeseq\n   shortstringchar ::= <any source character except "\\" or newline or the quote>\n   longstringchar  ::= <any source character except "\\">\n   stringescapeseq ::= "\\" <any source character>\n\n   bytesliteral   ::= bytesprefix(shortbytes | longbytes)\n   bytesprefix    ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB"\n   shortbytes     ::= "\'" shortbytesitem* "\'" | \'"\' shortbytesitem* \'"\'\n   longbytes      ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' longbytesitem* \'"""\'\n   shortbytesitem ::= shortbyteschar | bytesescapeseq\n   longbytesitem  ::= longbyteschar | bytesescapeseq\n   shortbyteschar ::= <any ASCII character except "\\" or newline or the quote>\n   longbyteschar  ::= <any ASCII character except "\\">\n   bytesescapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" or "bytesprefix"\nand the rest of the literal. The source character set is defined by\nthe encoding declaration; it is UTF-8 if no encoding declaration is\ngiven in the source file; see section *Encoding declarations*.\n\nIn plain English: Both types of literals can be enclosed in matching\nsingle quotes ("\'") or double quotes (""").  They can also be enclosed\nin matching groups of three single or double quotes (these are\ngenerally referred to as *triple-quoted strings*).  The backslash\n("\\") character is used to escape characters that otherwise have a\nspecial meaning, such as newline, backslash itself, or the quote\ncharacter.\n\nBytes literals are always prefixed with "\'b\'" or "\'B\'"; they produce\nan instance of the "bytes" type instead of the "str" type.  They may\nonly contain ASCII characters; bytes with a numeric value of 128 or\ngreater must be expressed with escapes.\n\nAs of Python 3.3 it is possible again to prefix unicode strings with a\n"u" prefix to simplify maintenance of dual 2.x and 3.x codebases.\n\nBoth string and bytes literals may optionally be prefixed with a\nletter "\'r\'" or "\'R\'"; such strings are called *raw strings* and treat\nbackslashes as literal characters.  As a result, in string literals,\n"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated specially.\nGiven that Python 2.x\'s raw unicode literals behave differently than\nPython 3.x\'s the "\'ur\'" syntax is not supported.\n\n   New in version 3.3: The "\'rb\'" prefix of raw bytes literals has\n   been added as a synonym of "\'br\'".\n\n   New in version 3.3: Support for the unicode legacy literal\n   ("u\'value\'") was reintroduced to simplify the maintenance of dual\n   Python 2.x and 3.x codebases. See **PEP 414** for more information.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string.  (A "quote" is the character used to open the\nstring, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C.  The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence   | Meaning                           | Notes   |\n+===================+===================================+=========+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n| "\\ooo"            | Character with octal value *ooo*  | (1,3)   |\n+-------------------+-----------------------------------+---------+\n| "\\xhh"            | Character with hex value *hh*     | (2,3)   |\n+-------------------+-----------------------------------+---------+\n\nEscape sequences only recognized in string literals are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence   | Meaning                           | Notes   |\n+===================+===================================+=========+\n| "\\N{name}"        | Character named *name* in the     | (4)     |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx"          | Character with 16-bit hex value   | (5)     |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx"      | Character with 32-bit hex value   | (6)     |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. As in Standard C, up to three octal digits are accepted.\n\n2. Unlike in Standard C, exactly two hex digits are required.\n\n3. In a bytes literal, hexadecimal and octal escapes denote the\n   byte with the given value. In a string literal, these escapes\n   denote a Unicode character with the given value.\n\n4. Changed in version 3.3: Support for name aliases [1] has been\n   added.\n\n5. Individual code units which form parts of a surrogate pair can\n   be encoded using this escape sequence.  Exactly four hex digits are\n   required.\n\n6. Any Unicode character can be encoded this way.  Exactly eight\n   hex digits are required.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*.  (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.)  It is also\nimportant to note that the escape sequences only recognized in string\nliterals fall into the category of unrecognized escapes for bytes\nliterals.\n\nEven in a raw string, string quotes can be escaped with a backslash,\nbut the backslash remains in the string; for example, "r"\\""" is a\nvalid string literal consisting of two characters: a backslash and a\ndouble quote; "r"\\"" is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes).  Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character).  Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n',
  'subscriptions': '\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n   subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object that supports subscription,\ne.g. a list or dictionary.  User-defined objects can support\nsubscription by defining a "__getitem__()" method.\n\nFor built-in objects, there are two types of objects that support\nsubscription:\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey.  (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to\nan integer or a slice (as discussed in the following section).\n\nThe formal syntax makes no special provision for negative indices in\nsequences; however, built-in sequences all provide a "__getitem__()"\nmethod that interprets negative indices by adding the length of the\nsequence to the index (so that "x[-1]" selects the last item of "x").\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero). Since the support for\nnegative indices and slicing occurs in the object\'s "__getitem__()"\nmethod, subclasses overriding this method will need to explicitly add\nthat support.\n\nA string\'s items are characters.  A character is not a separate data\ntype but a string of exactly one character.\n',
  'truth': '\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n  "__bool__()" or "__len__()" method, when that method returns the\n  integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n',
- 'try': '\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n   try_stmt  ::= try1_stmt | try2_stmt\n   try1_stmt ::= "try" ":" suite\n                 ("except" [expression ["as" target]] ":" suite)+\n                 ["else" ":" suite]\n                 ["finally" ":" suite]\n   try2_stmt ::= "try" ":" suite\n                 "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started.  This search inspects the except clauses\nin turn until one is found that matches the exception.  An expression-\nless except clause, if present, must be last; it matches any\nexception.  For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception.  An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed.  All except\nclauses must have an executable block.  When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause.  This is as if\n\n   except E as N:\n       foo\n\nwas translated to\n\n   except E as N:\n       try:\n           foo\n       finally:\n           del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause.  Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be access via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred.  "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler.  The "try"\nclause is executed, including any "except" and "else" clauses.  If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed.  If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause.  If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n   >>> def f():\n   ...     try:\n   ...         1/0\n   ...     finally:\n   ...         return 42\n   ...\n   >>> f()\n   42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed.  Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n   >>> def foo():\n   ...     try:\n   ...         return \'try\'\n   ...     finally:\n   ...         return \'finally\'\n   ...\n   >>> foo()\n   \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n',
+ 'try': '\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n   try_stmt  ::= try1_stmt | try2_stmt\n   try1_stmt ::= "try" ":" suite\n                 ("except" [expression ["as" target]] ":" suite)+\n                 ["else" ":" suite]\n                 ["finally" ":" suite]\n   try2_stmt ::= "try" ":" suite\n                 "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started.  This search inspects the except clauses\nin turn until one is found that matches the exception.  An expression-\nless except clause, if present, must be last; it matches any\nexception.  For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception.  An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed.  All except\nclauses must have an executable block.  When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause.  This is as if\n\n   except E as N:\n       foo\n\nwas translated to\n\n   except E as N:\n       try:\n           foo\n       finally:\n           del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause.  Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be access via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred.  "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler.  The "try"\nclause is executed, including any "except" and "else" clauses.  If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed.  If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause.  If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n   def f():\n       try:\n           1/0\n       finally:\n           return 42\n\n   >>> f()\n   42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n',
  'types': '\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python.  Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types.  Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.), although such additions\nwill often be provided via the standard library instead.\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\'  These are attributes that provide access to the\nimplementation and are not intended for general use.  Their definition\nmay change in the future.\n\nNone\n   This type has a single value.  There is a single object with this\n   value. This object is accessed through the built-in name "None". It\n   is used to signify the absence of a value in many situations, e.g.,\n   it is returned from functions that don\'t explicitly return\n   anything. Its truth value is false.\n\nNotImplemented\n   This type has a single value.  There is a single object with this\n   value. This object is accessed through the built-in name\n   "NotImplemented". Numeric methods and rich comparison methods may\n   return this value if they do not implement the operation for the\n   operands provided.  (The interpreter will then try the reflected\n   operation, or some other fallback, depending on the operator.)  Its\n   truth value is true.\n\nEllipsis\n   This type has a single value.  There is a single object with this\n   value. This object is accessed through the literal "..." or the\n   built-in name "Ellipsis".  Its truth value is true.\n\n"numbers.Number"\n   These are created by numeric literals and returned as results by\n   arithmetic operators and arithmetic built-in functions.  Numeric\n   objects are immutable; once created their value never changes.\n   Python numbers are of course strongly related to mathematical\n   numbers, but subject to the limitations of numerical representation\n   in computers.\n\n   Python distinguishes between integers, floating point numbers, and\n   complex numbers:\n\n   "numbers.Integral"\n      These represent elements from the mathematical set of integers\n      (positive and negative).\n\n      There are two types of integers:\n\n      Integers ("int")\n\n         These represent numbers in an unlimited range, subject to\n         available (virtual) memory only.  For the purpose of shift\n         and mask operations, a binary representation is assumed, and\n         negative numbers are represented in a variant of 2\'s\n         complement which gives the illusion of an infinite string of\n         sign bits extending to the left.\n\n      Booleans ("bool")\n         These represent the truth values False and True.  The two\n         objects representing the values "False" and "True" are the\n         only Boolean objects. The Boolean type is a subtype of the\n         integer type, and Boolean values behave like the values 0 and\n         1, respectively, in almost all contexts, the exception being\n         that when converted to a string, the strings ""False"" or\n         ""True"" are returned, respectively.\n\n      The rules for integer representation are intended to give the\n      most meaningful interpretation of shift and mask operations\n      involving negative integers.\n\n   "numbers.Real" ("float")\n      These represent machine-level double precision floating point\n      numbers. You are at the mercy of the underlying machine\n      architecture (and C or Java implementation) for the accepted\n      range and handling of overflow. Python does not support single-\n      precision floating point numbers; the savings in processor and\n      memory usage that are usually the reason for using these is\n      dwarfed by the overhead of using objects in Python, so there is\n      no reason to complicate the language with two kinds of floating\n      point numbers.\n\n   "numbers.Complex" ("complex")\n      These represent complex numbers as a pair of machine-level\n      double precision floating point numbers.  The same caveats apply\n      as for floating point numbers. The real and imaginary parts of a\n      complex number "z" can be retrieved through the read-only\n      attributes "z.real" and "z.imag".\n\nSequences\n   These represent finite ordered sets indexed by non-negative\n   numbers. The built-in function "len()" returns the number of items\n   of a sequence. When the length of a sequence is *n*, the index set\n   contains the numbers 0, 1, ..., *n*-1.  Item *i* of sequence *a* is\n   selected by "a[i]".\n\n   Sequences also support slicing: "a[i:j]" selects all items with\n   index *k* such that *i* "<=" *k* "<" *j*.  When used as an\n   expression, a slice is a sequence of the same type.  This implies\n   that the index set is renumbered so that it starts at 0.\n\n   Some sequences also support "extended slicing" with a third "step"\n   parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n   "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n   Sequences are distinguished according to their mutability:\n\n   Immutable sequences\n      An object of an immutable sequence type cannot change once it is\n      created.  (If the object contains references to other objects,\n      these other objects may be mutable and may be changed; however,\n      the collection of objects directly referenced by an immutable\n      object cannot change.)\n\n      The following types are immutable sequences:\n\n      Strings\n         A string is a sequence of values that represent Unicode\n         codepoints. All the codepoints in range "U+0000 - U+10FFFF"\n         can be represented in a string.  Python doesn\'t have a "chr"\n         type, and every character in the string is represented as a\n         string object with length "1".  The built-in function "ord()"\n         converts a character to its codepoint (as an integer);\n         "chr()" converts an integer in range "0 - 10FFFF" to the\n         corresponding character. "str.encode()" can be used to\n         convert a "str" to "bytes" using the given encoding, and\n         "bytes.decode()" can be used to achieve the opposite.\n\n      Tuples\n         The items of a tuple are arbitrary Python objects. Tuples of\n         two or more items are formed by comma-separated lists of\n         expressions.  A tuple of one item (a \'singleton\') can be\n         formed by affixing a comma to an expression (an expression by\n         itself does not create a tuple, since parentheses must be\n         usable for grouping of expressions).  An empty tuple can be\n         formed by an empty pair of parentheses.\n\n      Bytes\n         A bytes object is an immutable array.  The items are 8-bit\n         bytes, represented by integers in the range 0 <= x < 256.\n         Bytes literals (like "b\'abc\'") and the built-in function\n         "bytes()" can be used to construct bytes objects.  Also,\n         bytes objects can be decoded to strings via the "decode()"\n         method.\n\n   Mutable sequences\n      Mutable sequences can be changed after they are created.  The\n      subscription and slicing notations can be used as the target of\n      assignment and "del" (delete) statements.\n\n      There are currently two intrinsic mutable sequence types:\n\n      Lists\n         The items of a list are arbitrary Python objects.  Lists are\n         formed by placing a comma-separated list of expressions in\n         square brackets. (Note that there are no special cases needed\n         to form lists of length 0 or 1.)\n\n      Byte Arrays\n         A bytearray object is a mutable array. They are created by\n         the built-in "bytearray()" constructor.  Aside from being\n         mutable (and hence unhashable), byte arrays otherwise provide\n         the same interface and functionality as immutable bytes\n         objects.\n\n      The extension module "array" provides an additional example of a\n      mutable sequence type, as does the "collections" module.\n\nSet types\n   These represent unordered, finite sets of unique, immutable\n   objects. As such, they cannot be indexed by any subscript. However,\n   they can be iterated over, and the built-in function "len()"\n   returns the number of items in a set. Common uses for sets are fast\n   membership testing, removing duplicates from a sequence, and\n   computing mathematical operations such as intersection, union,\n   difference, and symmetric difference.\n\n   For set elements, the same immutability rules apply as for\n   dictionary keys. Note that numeric types obey the normal rules for\n   numeric comparison: if two numbers compare equal (e.g., "1" and\n   "1.0"), only one of them can be contained in a set.\n\n   There are currently two intrinsic set types:\n\n   Sets\n      These represent a mutable set. They are created by the built-in\n      "set()" constructor and can be modified afterwards by several\n      methods, such as "add()".\n\n   Frozen sets\n      These represent an immutable set.  They are created by the\n      built-in "frozenset()" constructor.  As a frozenset is immutable\n      and *hashable*, it can be used again as an element of another\n      set, or as a dictionary key.\n\nMappings\n   These represent finite sets of objects indexed by arbitrary index\n   sets. The subscript notation "a[k]" selects the item indexed by "k"\n   from the mapping "a"; this can be used in expressions and as the\n   target of assignments or "del" statements. The built-in function\n   "len()" returns the number of items in a mapping.\n\n   There is currently a single intrinsic mapping type:\n\n   Dictionaries\n      These represent finite sets of objects indexed by nearly\n      arbitrary values.  The only types of values not acceptable as\n      keys are values containing lists or dictionaries or other\n      mutable types that are compared by value rather than by object\n      identity, the reason being that the efficient implementation of\n      dictionaries requires a key\'s hash value to remain constant.\n      Numeric types used for keys obey the normal rules for numeric\n      comparison: if two numbers compare equal (e.g., "1" and "1.0")\n      then they can be used interchangeably to index the same\n      dictionary entry.\n\n      Dictionaries are mutable; they can be created by the "{...}"\n      notation (see section *Dictionary displays*).\n\n      The extension modules "dbm.ndbm" and "dbm.gnu" provide\n      additional examples of mapping types, as does the "collections"\n      module.\n\nCallable types\n   These are the types to which the function call operation (see\n   section *Calls*) can be applied:\n\n   User-defined functions\n      A user-defined function object is created by a function\n      definition (see section *Function definitions*).  It should be\n      called with an argument list containing the same number of items\n      as the function\'s formal parameter list.\n\n      Special attributes:\n\n      +---------------------------+---------------------------------+-------------+\n      +===========================+=================================+=============+\n      | "__doc__"                 | The function\'s documentation    | Writable    |\n      +---------------------------+---------------------------------+-------------+\n      | "__name__"                | The function\'s name             | Writable    |\n      +---------------------------+---------------------------------+-------------+\n      | "__qualname__"            | The function\'s *qualified name* | Writable    |\n      +---------------------------+---------------------------------+-------------+\n      | "__module__"              | The name of the module the      | Writable    |\n      +---------------------------+---------------------------------+-------------+\n      | "__defaults__"            | A tuple containing default      | Writable    |\n      +---------------------------+---------------------------------+-------------+\n      | "__code__"                | The code object representing    | Writable    |\n      +---------------------------+---------------------------------+-------------+\n      | "__globals__"             | A reference to the dictionary   | Read-only   |\n      +---------------------------+---------------------------------+-------------+\n      | "__dict__"                | The namespace supporting        | Writable    |\n      +---------------------------+---------------------------------+-------------+\n      | "__closure__"             | "None" or a tuple of cells that | Read-only   |\n      +---------------------------+---------------------------------+-------------+\n      | "__annotations__"         | A dict containing annotations   | Writable    |\n      +---------------------------+---------------------------------+-------------+\n      | "__kwdefaults__"          | A dict containing defaults for  | Writable    |\n      +---------------------------+---------------------------------+-------------+\n\n      Most of the attributes labelled "Writable" check the type of the\n      assigned value.\n\n      Function objects also support getting and setting arbitrary\n      attributes, which can be used, for example, to attach metadata\n      to functions.  Regular attribute dot-notation is used to get and\n      set such attributes. *Note that the current implementation only\n      supports function attributes on user-defined functions. Function\n      attributes on built-in functions may be supported in the\n      future.*\n\n      Additional information about a function\'s definition can be\n      retrieved from its code object; see the description of internal\n      types below.\n\n   Instance methods\n      An instance method object combines a class, a class instance and\n      any callable object (normally a user-defined function).\n\n      Special read-only attributes: "__self__" is the class instance\n      object, "__func__" is the function object; "__doc__" is the\n      method\'s documentation (same as "__func__.__doc__"); "__name__"\n      is the method name (same as "__func__.__name__"); "__module__"\n      is the name of the module the method was defined in, or "None"\n      if unavailable.\n\n      Methods also support accessing (but not setting) the arbitrary\n      function attributes on the underlying function object.\n\n      User-defined method objects may be created when getting an\n      attribute of a class (perhaps via an instance of that class), if\n      that attribute is a user-defined function object or a class\n      method object.\n\n      When an instance method object is created by retrieving a user-\n      defined function object from a class via one of its instances,\n      its "__self__" attribute is the instance, and the method object\n      is said to be bound.  The new method\'s "__func__" attribute is\n      the original function object.\n\n      When a user-defined method object is created by retrieving\n      another method object from a class or instance, the behaviour is\n      the same as for a function object, except that the "__func__"\n      attribute of the new instance is not the original method object\n      but its "__func__" attribute.\n\n      When an instance method object is created by retrieving a class\n      method object from a class or instance, its "__self__" attribute\n      is the class itself, and its "__func__" attribute is the\n      function object underlying the class method.\n\n      When an instance method object is called, the underlying\n      function ("__func__") is called, inserting the class instance\n      ("__self__") in front of the argument list.  For instance, when\n      "C" is a class which contains a definition for a function "f()",\n      and "x" is an instance of "C", calling "x.f(1)" is equivalent to\n      calling "C.f(x, 1)".\n\n      When an instance method object is derived from a class method\n      object, the "class instance" stored in "__self__" will actually\n      be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n      is equivalent to calling "f(C,1)" where "f" is the underlying\n      function.\n\n      Note that the transformation from function object to instance\n      method object happens each time the attribute is retrieved from\n      the instance.  In some cases, a fruitful optimization is to\n      assign the attribute to a local variable and call that local\n      variable. Also notice that this transformation only happens for\n      user-defined functions; other callable objects (and all non-\n      callable objects) are retrieved without transformation.  It is\n      also important to note that user-defined functions which are\n      attributes of a class instance are not converted to bound\n      methods; this *only* happens when the function is an attribute\n      of the class.\n\n   Generator functions\n      A function or method which uses the "yield" statement (see\n      section *The yield statement*) is called a *generator function*.\n      Such a function, when called, always returns an iterator object\n      which can be used to execute the body of the function:  calling\n      the iterator\'s "iterator.__next__()" method will cause the\n      function to execute until it provides a value using the "yield"\n      statement.  When the function executes a "return" statement or\n      falls off the end, a "StopIteration" exception is raised and the\n      iterator will have reached the end of the set of values to be\n      returned.\n\n   Built-in functions\n      A built-in function object is a wrapper around a C function.\n      Examples of built-in functions are "len()" and "math.sin()"\n      ("math" is a standard built-in module). The number and type of\n      the arguments are determined by the C function. Special read-\n      only attributes: "__doc__" is the function\'s documentation\n      string, or "None" if unavailable; "__name__" is the function\'s\n      name; "__self__" is set to "None" (but see the next item);\n      "__module__" is the name of the module the function was defined\n      in or "None" if unavailable.\n\n   Built-in methods\n      This is really a different disguise of a built-in function, this\n      time containing an object passed to the C function as an\n      implicit extra argument.  An example of a built-in method is\n      "alist.append()", assuming *alist* is a list object. In this\n      case, the special read-only attribute "__self__" is set to the\n      object denoted by *alist*.\n\n   Classes\n      Classes are callable.  These objects normally act as factories\n      for new instances of themselves, but variations are possible for\n      class types that override "__new__()".  The arguments of the\n      call are passed to "__new__()" and, in the typical case, to\n      "__init__()" to initialize the new instance.\n\n   Class Instances\n      Instances of arbitrary classes can be made callable by defining\n      a "__call__()" method in their class.\n\nModules\n   Modules are a basic organizational unit of Python code, and are\n   created by the *import system* as invoked either by the "import"\n   statement (see "import"), or by calling functions such as\n   "importlib.import_module()" and built-in "__import__()".  A module\n   object has a namespace implemented by a dictionary object (this is\n   the dictionary referenced by the "__globals__" attribute of\n   functions defined in the module).  Attribute references are\n   translated to lookups in this dictionary, e.g., "m.x" is equivalent\n   to "m.__dict__["x"]". A module object does not contain the code\n   object used to initialize the module (since it isn\'t needed once\n   the initialization is done).\n\n   Attribute assignment updates the module\'s namespace dictionary,\n   e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n   Special read-only attribute: "__dict__" is the module\'s namespace\n   as a dictionary object.\n\n   **CPython implementation detail:** Because of the way CPython\n   clears module dictionaries, the module dictionary will be cleared\n   when the module falls out of scope even if the dictionary still has\n   live references.  To avoid this, copy the dictionary or keep the\n   module around while using its dictionary directly.\n\n   Predefined (writable) attributes: "__name__" is the module\'s name;\n   "__doc__" is the module\'s documentation string, or "None" if\n   unavailable; "__file__" is the pathname of the file from which the\n   module was loaded, if it was loaded from a file. The "__file__"\n   attribute may be missing for certain types of modules, such as C\n   modules that are statically linked into the interpreter; for\n   extension modules loaded dynamically from a shared library, it is\n   the pathname of the shared library file.\n\nCustom classes\n   Custom class types are typically created by class definitions (see\n   section *Class definitions*).  A class has a namespace implemented\n   by a dictionary object. Class attribute references are translated\n   to lookups in this dictionary, e.g., "C.x" is translated to\n   "C.__dict__["x"]" (although there are a number of hooks which allow\n   for other means of locating attributes). When the attribute name is\n   not found there, the attribute search continues in the base\n   classes. This search of the base classes uses the C3 method\n   resolution order which behaves correctly even in the presence of\n   \'diamond\' inheritance structures where there are multiple\n   inheritance paths leading back to a common ancestor. Additional\n   details on the C3 MRO used by Python can be found in the\n   documentation accompanying the 2.3 release at\n   http://www.python.org/download/releases/2.3/mro/.\n\n   When a class attribute reference (for class "C", say) would yield a\n   class method object, it is transformed into an instance method\n   object whose "__self__" attributes is "C".  When it would yield a\n   static method object, it is transformed into the object wrapped by\n   the static method object. See section *Implementing Descriptors*\n   for another way in which attributes retrieved from a class may\n   differ from those actually contained in its "__dict__".\n\n   Class attribute assignments update the class\'s dictionary, never\n   the dictionary of a base class.\n\n   A class object can be called (see above) to yield a class instance\n   (see below).\n\n   Special attributes: "__name__" is the class name; "__module__" is\n   the module name in which the class was defined; "__dict__" is the\n   dictionary containing the class\'s namespace; "__bases__" is a tuple\n   (possibly empty or a singleton) containing the base classes, in the\n   order of their occurrence in the base class list; "__doc__" is the\n   class\'s documentation string, or None if undefined.\n\nClass instances\n   A class instance is created by calling a class object (see above).\n   A class instance has a namespace implemented as a dictionary which\n   is the first place in which attribute references are searched.\n   When an attribute is not found there, and the instance\'s class has\n   an attribute by that name, the search continues with the class\n   attributes.  If a class attribute is found that is a user-defined\n   function object, it is transformed into an instance method object\n   whose "__self__" attribute is the instance.  Static method and\n   class method objects are also transformed; see above under\n   "Classes".  See section *Implementing Descriptors* for another way\n   in which attributes of a class retrieved via its instances may\n   differ from the objects actually stored in the class\'s "__dict__".\n   If no class attribute is found, and the object\'s class has a\n   "__getattr__()" method, that is called to satisfy the lookup.\n\n   Attribute assignments and deletions update the instance\'s\n   dictionary, never a class\'s dictionary.  If the class has a\n   "__setattr__()" or "__delattr__()" method, this is called instead\n   of updating the instance dictionary directly.\n\n   Class instances can pretend to be numbers, sequences, or mappings\n   if they have methods with certain special names.  See section\n   *Special method names*.\n\n   Special attributes: "__dict__" is the attribute dictionary;\n   "__class__" is the instance\'s class.\n\nI/O objects (also known as file objects)\n   A *file object* represents an open file.  Various shortcuts are\n   available to create file objects: the "open()" built-in function,\n   and also "os.popen()", "os.fdopen()", and the "makefile()" method\n   of socket objects (and perhaps by other functions or methods\n   provided by extension modules).\n\n   The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n   initialized to file objects corresponding to the interpreter\'s\n   standard input, output and error streams; they are all open in text\n   mode and therefore follow the interface defined by the\n   "io.TextIOBase" abstract class.\n\nInternal types\n   A few types used internally by the interpreter are exposed to the\n   user. Their definitions may change with future versions of the\n   interpreter, but they are mentioned here for completeness.\n\n   Code objects\n      Code objects represent *byte-compiled* executable Python code,\n      or *bytecode*. The difference between a code object and a\n      function object is that the function object contains an explicit\n      reference to the function\'s globals (the module in which it was\n      defined), while a code object contains no context; also the\n      default argument values are stored in the function object, not\n      in the code object (because they represent values calculated at\n      run-time).  Unlike function objects, code objects are immutable\n      and contain no references (directly or indirectly) to mutable\n      objects.\n\n      Special read-only attributes: "co_name" gives the function name;\n      "co_argcount" is the number of positional arguments (including\n      arguments with default values); "co_nlocals" is the number of\n      local variables used by the function (including arguments);\n      "co_varnames" is a tuple containing the names of the local\n      variables (starting with the argument names); "co_cellvars" is a\n      tuple containing the names of local variables that are\n      referenced by nested functions; "co_freevars" is a tuple\n      containing the names of free variables; "co_code" is a string\n      representing the sequence of bytecode instructions; "co_consts"\n      is a tuple containing the literals used by the bytecode;\n      "co_names" is a tuple containing the names used by the bytecode;\n      "co_filename" is the filename from which the code was compiled;\n      "co_firstlineno" is the first line number of the function;\n      "co_lnotab" is a string encoding the mapping from bytecode\n      offsets to line numbers (for details see the source code of the\n      interpreter); "co_stacksize" is the required stack size\n      (including local variables); "co_flags" is an integer encoding a\n      number of flags for the interpreter.\n\n      The following flag bits are defined for "co_flags": bit "0x04"\n      is set if the function uses the "*arguments" syntax to accept an\n      arbitrary number of positional arguments; bit "0x08" is set if\n      the function uses the "**keywords" syntax to accept arbitrary\n      keyword arguments; bit "0x20" is set if the function is a\n      generator.\n\n      Future feature declarations ("from __future__ import division")\n      also use bits in "co_flags" to indicate whether a code object\n      was compiled with a particular feature enabled: bit "0x2000" is\n      set if the function was compiled with future division enabled;\n      bits "0x10" and "0x1000" were used in earlier versions of\n      Python.\n\n      Other bits in "co_flags" are reserved for internal use.\n\n      If a code object represents a function, the first item in\n      "co_consts" is the documentation string of the function, or\n      "None" if undefined.\n\n   Frame objects\n      Frame objects represent execution frames.  They may occur in\n      traceback objects (see below).\n\n      Special read-only attributes: "f_back" is to the previous stack\n      frame (towards the caller), or "None" if this is the bottom\n      stack frame; "f_code" is the code object being executed in this\n      frame; "f_locals" is the dictionary used to look up local\n      variables; "f_globals" is used for global variables;\n      "f_builtins" is used for built-in (intrinsic) names; "f_lasti"\n      gives the precise instruction (this is an index into the\n      bytecode string of the code object).\n\n      Special writable attributes: "f_trace", if not "None", is a\n      function called at the start of each source code line (this is\n      used by the debugger); "f_lineno" is the current line number of\n      the frame --- writing to this from within a trace function jumps\n      to the given line (only for the bottom-most frame).  A debugger\n      can implement a Jump command (aka Set Next Statement) by writing\n      to f_lineno.\n\n      Frame objects support one method:\n\n      frame.clear()\n\n         This method clears all references to local variables held by\n         the frame.  Also, if the frame belonged to a generator, the\n         generator is finalized.  This helps break reference cycles\n         involving frame objects (for example when catching an\n         exception and storing its traceback for later use).\n\n         "RuntimeError" is raised if the frame is currently executing.\n\n         New in version 3.4.\n\n   Traceback objects\n      Traceback objects represent a stack trace of an exception.  A\n      traceback object is created when an exception occurs.  When the\n      search for an exception handler unwinds the execution stack, at\n      each unwound level a traceback object is inserted in front of\n      the current traceback.  When an exception handler is entered,\n      the stack trace is made available to the program. (See section\n      *The try statement*.) It is accessible as the third item of the\n      tuple returned by "sys.exc_info()". When the program contains no\n      suitable handler, the stack trace is written (nicely formatted)\n      to the standard error stream; if the interpreter is interactive,\n      it is also made available to the user as "sys.last_traceback".\n\n      Special read-only attributes: "tb_next" is the next level in the\n      stack trace (towards the frame where the exception occurred), or\n      "None" if there is no next level; "tb_frame" points to the\n      execution frame of the current level; "tb_lineno" gives the line\n      number where the exception occurred; "tb_lasti" indicates the\n      precise instruction.  The line number and last instruction in\n      the traceback may differ from the line number of its frame\n      object if the exception occurred in a "try" statement with no\n      matching except clause or with a finally clause.\n\n   Slice objects\n      Slice objects are used to represent slices for "__getitem__()"\n      methods.  They are also created by the built-in "slice()"\n      function.\n\n      Special read-only attributes: "start" is the lower bound; "stop"\n      is the upper bound; "step" is the step value; each is "None" if\n      omitted.  These attributes can have any type.\n\n      Slice objects support one method:\n\n      slice.indices(self, length)\n\n         This method takes a single integer argument *length* and\n         computes information about the slice that the slice object\n         would describe if applied to a sequence of *length* items.\n         It returns a tuple of three integers; respectively these are\n         the *start* and *stop* indices and the *step* or stride\n         length of the slice. Missing or out-of-bounds indices are\n         handled in a manner consistent with regular slices.\n\n   Static method objects\n      Static method objects provide a way of defeating the\n      transformation of function objects to method objects described\n      above. A static method object is a wrapper around any other\n      object, usually a user-defined method object. When a static\n      method object is retrieved from a class or a class instance, the\n      object actually returned is the wrapped object, which is not\n      subject to any further transformation. Static method objects are\n      not themselves callable, although the objects they wrap usually\n      are. Static method objects are created by the built-in\n      "staticmethod()" constructor.\n\n   Class method objects\n      A class method object, like a static method object, is a wrapper\n      around another object that alters the way in which that object\n      is retrieved from classes and class instances. The behaviour of\n      class method objects upon such retrieval is described above,\n      under "User-defined methods". Class method objects are created\n      by the built-in "classmethod()" constructor.\n',
  'typesfunctions': '\nFunctions\n*********\n\nFunction objects are created by function definitions.  The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions.  Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n',
- 'typesmapping': '\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects.  There is currently only one standard\nmapping type, the *dictionary*.  (For other containers see the built-\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values.  Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys.  Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry.  (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n   Return a new dictionary initialized from an optional positional\n   argument and a possibly empty set of keyword arguments.\n\n   If no positional argument is given, an empty dictionary is created.\n   If a positional argument is given and it is a mapping object, a\n   dictionary is created with the same key-value pairs as the mapping\n   object.  Otherwise, the positional argument must be an *iterator*\n   object.  Each item in the iterable must itself be an iterator with\n   exactly two objects.  The first object of each item becomes a key\n   in the new dictionary, and the second object the corresponding\n   value.  If a key occurs more than once, the last value for that key\n   becomes the corresponding value in the new dictionary.\n\n   If keyword arguments are given, the keyword arguments and their\n   values are added to the dictionary created from the positional\n   argument.  If a key being added is already present, the value from\n   the keyword argument replaces the value from the positional\n   argument.\n\n   To illustrate, the following examples all return a dictionary equal\n   to "{"one": 1, "two": 2, "three": 3}":\n\n      >>> a = dict(one=1, two=2, three=3)\n      >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n      >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n      >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n      >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n      >>> a == b == c == d == e\n      True\n\n   Providing keyword arguments as in the first example only works for\n   keys that are valid Python identifiers.  Otherwise, any valid keys\n   can be used.\n\n   These are the operations that dictionaries support (and therefore,\n   custom mapping types should support too):\n\n   len(d)\n\n      Return the number of items in the dictionary *d*.\n\n   d[key]\n\n      Return the item of *d* with key *key*.  Raises a "KeyError" if\n      *key* is not in the map.\n\n      If a subclass of dict defines a method "__missing__()", if the\n      key *key* is not present, the "d[key]" operation calls that\n      method with the key *key* as argument.  The "d[key]" operation\n      then returns or raises whatever is returned or raised by the\n      "__missing__(key)" call if the key is not present. No other\n      operations or methods invoke "__missing__()". If "__missing__()"\n      is not defined, "KeyError" is raised. "__missing__()" must be a\n      method; it cannot be an instance variable:\n\n         >>> class Counter(dict):\n         ...     def __missing__(self, key):\n         ...         return 0\n         >>> c = Counter()\n         >>> c[\'red\']\n         0\n         >>> c[\'red\'] += 1\n         >>> c[\'red\']\n         1\n\n      See "collections.Counter" for a complete implementation\n      including other methods helpful for accumulating and managing\n      tallies.\n\n   d[key] = value\n\n      Set "d[key]" to *value*.\n\n   del d[key]\n\n      Remove "d[key]" from *d*.  Raises a "KeyError" if *key* is not\n      in the map.\n\n   key in d\n\n      Return "True" if *d* has a key *key*, else "False".\n\n   key not in d\n\n      Equivalent to "not key in d".\n\n   iter(d)\n\n      Return an iterator over the keys of the dictionary.  This is a\n      shortcut for "iter(d.keys())".\n\n   clear()\n\n      Remove all items from the dictionary.\n\n   copy()\n\n      Return a shallow copy of the dictionary.\n\n   classmethod fromkeys(seq[, value])\n\n      Create a new dictionary with keys from *seq* and values set to\n      *value*.\n\n      "fromkeys()" is a class method that returns a new dictionary.\n      *value* defaults to "None".\n\n   get(key[, default])\n\n      Return the value for *key* if *key* is in the dictionary, else\n      *default*. If *default* is not given, it defaults to "None", so\n      that this method never raises a "KeyError".\n\n   items()\n\n      Return a new view of the dictionary\'s items ("(key, value)"\n      pairs). See the *documentation of view objects*.\n\n   keys()\n\n      Return a new view of the dictionary\'s keys.  See the\n      *documentation of view objects*.\n\n   pop(key[, default])\n\n      If *key* is in the dictionary, remove it and return its value,\n      else return *default*.  If *default* is not given and *key* is\n      not in the dictionary, a "KeyError" is raised.\n\n   popitem()\n\n      Remove and return an arbitrary "(key, value)" pair from the\n      dictionary.\n\n      "popitem()" is useful to destructively iterate over a\n      dictionary, as often used in set algorithms.  If the dictionary\n      is empty, calling "popitem()" raises a "KeyError".\n\n   setdefault(key[, default])\n\n      If *key* is in the dictionary, return its value.  If not, insert\n      *key* with a value of *default* and return *default*.  *default*\n      defaults to "None".\n\n   update([other])\n\n      Update the dictionary with the key/value pairs from *other*,\n      overwriting existing keys.  Return "None".\n\n      "update()" accepts either another dictionary object or an\n      iterable of key/value pairs (as tuples or other iterables of\n      length two).  If keyword arguments are specified, the dictionary\n      is then updated with those key/value pairs: "d.update(red=1,\n      blue=2)".\n\n   values()\n\n      Return a new view of the dictionary\'s values.  See the\n      *documentation of view objects*.\n\nSee also:\n\n   "types.MappingProxyType" can be used to create a read-only view of\n   a "dict".\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.keys()", "dict.values()" and\n"dict.items()" are *view objects*.  They provide a dynamic view on the\ndictionary\'s entries, which means that when the dictionary changes,\nthe view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n   Return the number of entries in the dictionary.\n\niter(dictview)\n\n   Return an iterator over the keys, values or items (represented as\n   tuples of "(key, value)") in the dictionary.\n\n   Keys and values are iterated over in an arbitrary order which is\n   non-random, varies across Python implementations, and depends on\n   the dictionary\'s history of insertions and deletions. If keys,\n   values and items views are iterated over with no intervening\n   modifications to the dictionary, the order of items will directly\n   correspond.  This allows the creation of "(value, key)" pairs using\n   "zip()": "pairs = zip(d.values(), d.keys())".  Another way to\n   create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n   Iterating views while adding or deleting entries in the dictionary\n   may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n   Return "True" if *x* is in the underlying dictionary\'s keys, values\n   or items (in the latter case, *x* should be a "(key, value)"\n   tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that "(key, value)" pairs are unique\nand hashable, then the items view is also set-like.  (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class "collections.abc.Set" are available (for example, "==",\n"<", or "^").\n\nAn example of dictionary view usage:\n\n   >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n   >>> keys = dishes.keys()\n   >>> values = dishes.values()\n\n   >>> # iteration\n   >>> n = 0\n   >>> for val in values:\n   ...     n += val\n   >>> print(n)\n   504\n\n   >>> # keys and values are iterated over in the same order\n   >>> list(keys)\n   [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n   >>> list(values)\n   [2, 1, 1, 500]\n\n   >>> # view objects are dynamic and reflect dict changes\n   >>> del dishes[\'eggs\']\n   >>> del dishes[\'sausage\']\n   >>> list(keys)\n   [\'spam\', \'bacon\']\n\n   >>> # set operations\n   >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n   {\'bacon\'}\n   >>> keys ^ {\'sausage\', \'juice\'}\n   {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n',
+ 'typesmapping': '\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects.  There is currently only one standard\nmapping type, the *dictionary*.  (For other containers see the built-\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values.  Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys.  Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry.  (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n   Return a new dictionary initialized from an optional positional\n   argument and a possibly empty set of keyword arguments.\n\n   If no positional argument is given, an empty dictionary is created.\n   If a positional argument is given and it is a mapping object, a\n   dictionary is created with the same key-value pairs as the mapping\n   object.  Otherwise, the positional argument must be an *iterator*\n   object.  Each item in the iterable must itself be an iterator with\n   exactly two objects.  The first object of each item becomes a key\n   in the new dictionary, and the second object the corresponding\n   value.  If a key occurs more than once, the last value for that key\n   becomes the corresponding value in the new dictionary.\n\n   If keyword arguments are given, the keyword arguments and their\n   values are added to the dictionary created from the positional\n   argument.  If a key being added is already present, the value from\n   the keyword argument replaces the value from the positional\n   argument.\n\n   To illustrate, the following examples all return a dictionary equal\n   to "{"one": 1, "two": 2, "three": 3}":\n\n      >>> a = dict(one=1, two=2, three=3)\n      >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n      >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n      >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n      >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n      >>> a == b == c == d == e\n      True\n\n   Providing keyword arguments as in the first example only works for\n   keys that are valid Python identifiers.  Otherwise, any valid keys\n   can be used.\n\n   These are the operations that dictionaries support (and therefore,\n   custom mapping types should support too):\n\n   len(d)\n\n      Return the number of items in the dictionary *d*.\n\n   d[key]\n\n      Return the item of *d* with key *key*.  Raises a "KeyError" if\n      *key* is not in the map.\n\n      If a subclass of dict defines a method "__missing__()", if the\n      key *key* is not present, the "d[key]" operation calls that\n      method with the key *key* as argument.  The "d[key]" operation\n      then returns or raises whatever is returned or raised by the\n      "__missing__(key)" call if the key is not present. No other\n      operations or methods invoke "__missing__()". If "__missing__()"\n      is not defined, "KeyError" is raised. "__missing__()" must be a\n      method; it cannot be an instance variable:\n\n         >>> class Counter(dict):\n         ...     def __missing__(self, key):\n         ...         return 0\n         >>> c = Counter()\n         >>> c[\'red\']\n         0\n         >>> c[\'red\'] += 1\n         >>> c[\'red\']\n         1\n\n      See "collections.Counter" for a complete implementation\n      including other methods helpful for accumulating and managing\n      tallies.\n\n   d[key] = value\n\n      Set "d[key]" to *value*.\n\n   del d[key]\n\n      Remove "d[key]" from *d*.  Raises a "KeyError" if *key* is not\n      in the map.\n\n   key in d\n\n      Return "True" if *d* has a key *key*, else "False".\n\n   key not in d\n\n      Equivalent to "not key in d".\n\n   iter(d)\n\n      Return an iterator over the keys of the dictionary.  This is a\n      shortcut for "iter(d.keys())".\n\n   clear()\n\n      Remove all items from the dictionary.\n\n   copy()\n\n      Return a shallow copy of the dictionary.\n\n   classmethod fromkeys(seq[, value])\n\n      Create a new dictionary with keys from *seq* and values set to\n      *value*.\n\n      "fromkeys()" is a class method that returns a new dictionary.\n      *value* defaults to "None".\n\n   get(key[, default])\n\n      Return the value for *key* if *key* is in the dictionary, else\n      *default*. If *default* is not given, it defaults to "None", so\n      that this method never raises a "KeyError".\n\n   items()\n\n      Return a new view of the dictionary\'s items ("(key, value)"\n      pairs). See the *documentation of view objects*.\n\n   keys()\n\n      Return a new view of the dictionary\'s keys.  See the\n      *documentation of view objects*.\n\n   pop(key[, default])\n\n      If *key* is in the dictionary, remove it and return its value,\n      else return *default*.  If *default* is not given and *key* is\n      not in the dictionary, a "KeyError" is raised.\n\n   popitem()\n\n      Remove and return an arbitrary "(key, value)" pair from the\n      dictionary.\n\n      "popitem()" is useful to destructively iterate over a\n      dictionary, as often used in set algorithms.  If the dictionary\n      is empty, calling "popitem()" raises a "KeyError".\n\n   setdefault(key[, default])\n\n      If *key* is in the dictionary, return its value.  If not, insert\n      *key* with a value of *default* and return *default*.  *default*\n      defaults to "None".\n\n   update([other])\n\n      Update the dictionary with the key/value pairs from *other*,\n      overwriting existing keys.  Return "None".\n\n      "update()" accepts either another dictionary object or an\n      iterable of key/value pairs (as tuples or other iterables of\n      length two).  If keyword arguments are specified, the dictionary\n      is then updated with those key/value pairs: "d.update(red=1,\n      blue=2)".\n\n   values()\n\n      Return a new view of the dictionary\'s values.  See the\n      *documentation of view objects*.\n\nSee also: "types.MappingProxyType" can be used to create a read-only\n  view of a "dict".\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.keys()", "dict.values()" and\n"dict.items()" are *view objects*.  They provide a dynamic view on the\ndictionary\'s entries, which means that when the dictionary changes,\nthe view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n   Return the number of entries in the dictionary.\n\niter(dictview)\n\n   Return an iterator over the keys, values or items (represented as\n   tuples of "(key, value)") in the dictionary.\n\n   Keys and values are iterated over in an arbitrary order which is\n   non-random, varies across Python implementations, and depends on\n   the dictionary\'s history of insertions and deletions. If keys,\n   values and items views are iterated over with no intervening\n   modifications to the dictionary, the order of items will directly\n   correspond.  This allows the creation of "(value, key)" pairs using\n   "zip()": "pairs = zip(d.values(), d.keys())".  Another way to\n   create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n   Iterating views while adding or deleting entries in the dictionary\n   may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n   Return "True" if *x* is in the underlying dictionary\'s keys, values\n   or items (in the latter case, *x* should be a "(key, value)"\n   tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that "(key, value)" pairs are unique\nand hashable, then the items view is also set-like.  (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class "collections.abc.Set" are available (for example, "==",\n"<", or "^").\n\nAn example of dictionary view usage:\n\n   >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n   >>> keys = dishes.keys()\n   >>> values = dishes.values()\n\n   >>> # iteration\n   >>> n = 0\n   >>> for val in values:\n   ...     n += val\n   >>> print(n)\n   504\n\n   >>> # keys and values are iterated over in the same order\n   >>> list(keys)\n   [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n   >>> list(values)\n   [2, 1, 1, 500]\n\n   >>> # view objects are dynamic and reflect dict changes\n   >>> del dishes[\'eggs\']\n   >>> del dishes[\'sausage\']\n   >>> list(keys)\n   [\'spam\', \'bacon\']\n\n   >>> # set operations\n   >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n   {\'bacon\'}\n   >>> keys ^ {\'sausage\', \'juice\'}\n   {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n',
  'typesmethods': '\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods.  Built-in methods are described with the\ntypes that support them.\n\nIf you access a method (a function defined in a class namespace)\nthrough an instance, you get a special object: a *bound method* (also\ncalled *instance method*) object. When called, it will add the "self"\nargument to the argument list.  Bound methods have two special read-\nonly attributes: "m.__self__" is the object on which the method\noperates, and "m.__func__" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)".\n\nLike function objects, bound method objects support getting arbitrary\nattributes.  However, since method attributes are actually stored on\nthe underlying function object ("meth.__func__"), setting method\nattributes on bound methods is disallowed.  Attempting to set an\nattribute on a method results in an "AttributeError" being raised.  In\norder to set a method attribute, you need to explicitly set it on the\nunderlying function object:\n\n   >>> class C:\n   ...     def method(self):\n   ...         pass\n   ...\n   >>> c = C()\n   >>> c.method.whoami = \'my name is method\'  # can\'t set on the method\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in <module>\n   AttributeError: \'method\' object has no attribute \'whoami\'\n   >>> c.method.__func__.whoami = \'my name is method\'\n   >>> c.method.whoami\n   \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n',
  'typesmodules': '\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to.  (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}").  Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "<module\n\'sys\' (built-in)>".  If loaded from a file, they are written as\n"<module \'os\' from \'/usr/local/lib/pythonX.Y/os.pyc\'>".\n',
- 'typesseq': '\nSequence Types --- "list", "tuple", "range"\n*******************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The "collections.abc.Sequence" ABC\nis provided to make it easier to correctly implement these operations\non custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority).  In the table,\n*s* and *t* are sequences of the same type, *n*, *i*, *j* and *k* are\nintegers and *x* is an arbitrary object that meets any type and value\nrestrictions imposed by *s*.\n\nThe "in" and "not in" operations have the same priorities as the\ncomparison operations. The "+" (concatenation) and "*" (repetition)\noperations have the same priority as the corresponding numeric\noperations.\n\n+----------------------------+----------------------------------+------------+\n| Operation                  | Result                           | Notes      |\n+============================+==================================+============+\n| "x in s"                   | "True" if an item of *s* is      | (1)        |\n+----------------------------+----------------------------------+------------+\n| "x not in s"               | "False" if an item of *s* is     | (1)        |\n+----------------------------+----------------------------------+------------+\n| "s + t"                    | the concatenation of *s* and *t* | (6)(7)     |\n+----------------------------+----------------------------------+------------+\n| "s * n" or "n * s"         | *n* shallow copies of *s*        | (2)(7)     |\n+----------------------------+----------------------------------+------------+\n| "s[i]"                     | *i*th item of *s*, origin 0      | (3)        |\n+----------------------------+----------------------------------+------------+\n| "s[i:j]"                   | slice of *s* from *i* to *j*     | (3)(4)     |\n+----------------------------+----------------------------------+------------+\n| "s[i:j:k]"                 | slice of *s* from *i* to *j*     | (3)(5)     |\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n| "s.index(x[, i[, j]])"     | index of the first occurrence of | (8)        |\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons.  In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length.  (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the "in" and "not in" operations are used only for simple\n   containment testing in the general case, some specialised sequences\n   (such as "str", "bytes" and "bytearray") also use them for\n   subsequence testing:\n\n      >>> "gg" in "eggs"\n      True\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n   empty sequence of the same type as *s*).  Note also that the copies\n   are shallow; nested structures are not copied.  This often haunts\n   new Python programmers; consider:\n\n      >>> lists = [[]] * 3\n      >>> lists\n      [[], [], []]\n      >>> lists[0].append(3)\n      >>> lists\n      [[3], [3], [3]]\n\n   What has happened is that "[[]]" is a one-element list containing\n   an empty list, so all three elements of "[[]] * 3" are (pointers\n   to) this single empty list.  Modifying any of the elements of\n   "lists" modifies this single list. You can create a list of\n   different lists this way:\n\n      >>> lists = [[] for i in range(3)]\n      >>> lists[0].append(3)\n      >>> lists[1].append(5)\n      >>> lists[2].append(7)\n      >>> lists\n      [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n   string: "len(s) + i" or "len(s) + j" is substituted.  But note that\n   "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n   items with index *k* such that "i <= k < j".  If *i* or *j* is\n   greater than "len(s)", use "len(s)".  If *i* is omitted or "None",\n   use "0".  If *j* is omitted or "None", use "len(s)".  If *i* is\n   greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n   sequence of items with index  "x = i + n*k" such that "0 <= n <\n   (j-i)/k".  In other words, the indices are "i", "i+k", "i+2*k",\n   "i+3*k" and so on, stopping when *j* is reached (but never\n   including *j*).  If *i* or *j* is greater than "len(s)", use\n   "len(s)".  If *i* or *j* are omitted or "None", they become "end"\n   values (which end depends on the sign of *k*).  Note, *k* cannot be\n   zero. If *k* is "None", it is treated like "1".\n\n6. Concatenating immutable sequences always results in a new object.\n   This means that building up a sequence by repeated concatenation\n   will have a quadratic runtime cost in the total sequence length.\n   To get a linear runtime cost, you must switch to one of the\n   alternatives below:\n\n   * if concatenating "str" objects, you can build a list and use\n     "str.join()" at the end or else write to a "io.StringIO" instance\n     and retrieve its value when complete\n\n   * if concatenating "bytes" objects, you can similarly use\n     "bytes.join()" or "io.BytesIO", or you can do in-place\n     concatenation with a "bytearray" object.  "bytearray" objects are\n     mutable and have an efficient overallocation mechanism\n\n   * if concatenating "tuple" objects, extend a "list" instead\n\n   * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as "range") only support item sequences\n   that follow specific patterns, and hence don\'t support sequence\n   concatenation or repetition.\n\n8. "index" raises "ValueError" when *x* is not found in *s*. When\n   supported, the additional arguments to the index method allow\n   efficient searching of subsections of the sequence. Passing the\n   extra arguments is roughly equivalent to using "s[i:j].index(x)",\n   only without copying any data and with the returned index being\n   relative to the start of the sequence rather than the start of the\n   slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe "hash()" built-in.\n\nThis support allows immutable sequences, such as "tuple" instances, to\nbe used as "dict" keys and stored in "set" and "frozenset" instances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in "TypeError".\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation                      | Result                           | Notes                 |\n+================================+==================================+=======================+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t"                 | the elements of "s[i:j:k]" are   | (1)                   |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()"                    | removes all items from "s" (same | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()"                     | creates a shallow copy of "s"    | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])"                   | retrieves the item at *i* and    | (2)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)"                  | remove the first item from *s*   | (3)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()"                  | reverses the items of *s* in     | (4)                   |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default the\n   last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for economy\n   of space when reversing a large sequence.  To remind users that it\n   operates by side effect, it does not return the reversed sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n   interfaces of mutable containers that don\'t support slicing\n   operations (such as "dict" and "set")\n\n   New in version 3.3: "clear()" and "copy()" methods.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n   Lists may be constructed in several ways:\n\n   * Using a pair of square brackets to denote the empty list: "[]"\n\n   * Using square brackets, separating items with commas: "[a]", "[a,\n     b, c]"\n\n   * Using a list comprehension: "[x for x in iterable]"\n\n   * Using the type constructor: "list()" or "list(iterable)"\n\n   The constructor builds a list whose items are the same and in the\n   same order as *iterable*\'s items.  *iterable* may be either a\n   sequence, a container that supports iteration, or an iterator\n   object.  If *iterable* is already a list, a copy is made and\n   returned, similar to "iterable[:]". For example, "list(\'abc\')"\n   returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" returns "[1, 2,\n   3]". If no argument is given, the constructor creates a new empty\n   list, "[]".\n\n   Many other operations also produce lists, including the "sorted()"\n   built-in.\n\n   Lists implement all of the *common* and *mutable* sequence\n   operations. Lists also provide the following additional method:\n\n   sort(*, key=None, reverse=None)\n\n      This method sorts the list in place, using only "<" comparisons\n      between items. Exceptions are not suppressed - if any comparison\n      operations fail, the entire sort operation will fail (and the\n      list will likely be left in a partially modified state).\n\n      "sort()" accepts two arguments that can only be passed by\n      keyword (*keyword-only arguments*):\n\n      *key* specifies a function of one argument that is used to\n      extract a comparison key from each list element (for example,\n      "key=str.lower"). The key corresponding to each item in the list\n      is calculated once and then used for the entire sorting process.\n      The default value of "None" means that list items are sorted\n      directly without calculating a separate key value.\n\n      The "functools.cmp_to_key()" utility is available to convert a\n      2.x style *cmp* function to a *key* function.\n\n      *reverse* is a boolean value.  If set to "True", then the list\n      elements are sorted as if each comparison were reversed.\n\n      This method modifies the sequence in place for economy of space\n      when sorting a large sequence.  To remind users that it operates\n      by side effect, it does not return the sorted sequence (use\n      "sorted()" to explicitly request a new sorted list instance).\n\n      The "sort()" method is guaranteed to be stable.  A sort is\n      stable if it guarantees not to change the relative order of\n      elements that compare equal --- this is helpful for sorting in\n      multiple passes (for example, sort by department, then by salary\n      grade).\n\n      **CPython implementation detail:** While a list is being sorted,\n      the effect of attempting to mutate, or even inspect, the list is\n      undefined.  The C implementation of Python makes the list appear\n      empty for the duration, and raises "ValueError" if it can detect\n      that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the "enumerate()"\nbuilt-in). Tuples are also used for cases where an immutable sequence\nof homogeneous data is needed (such as allowing storage in a "set" or\n"dict" instance).\n\nclass class tuple([iterable])\n\n   Tuples may be constructed in a number of ways:\n\n   * Using a pair of parentheses to denote the empty tuple: "()"\n\n   * Using a trailing comma for a singleton tuple: "a," or "(a,)"\n\n   * Separating items with commas: "a, b, c" or "(a, b, c)"\n\n   * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)"\n\n   The constructor builds a tuple whose items are the same and in the\n   same order as *iterable*\'s items.  *iterable* may be either a\n   sequence, a container that supports iteration, or an iterator\n   object.  If *iterable* is already a tuple, it is returned\n   unchanged. For example, "tuple(\'abc\')" returns "(\'a\', \'b\', \'c\')"\n   and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is\n   given, the constructor creates a new empty tuple, "()".\n\n   Note that it is actually the comma which makes a tuple, not the\n   parentheses. The parentheses are optional, except in the empty\n   tuple case, or when they are needed to avoid syntactic ambiguity.\n   For example, "f(a, b, c)" is a function call with three arguments,\n   while "f((a, b, c))" is a function call with a 3-tuple as the sole\n   argument.\n\n   Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, "collections.namedtuple()" may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe "range" type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in "for" loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n   The arguments to the range constructor must be integers (either\n   built-in "int" or any object that implements the "__index__"\n   special method).  If the *step* argument is omitted, it defaults to\n   "1". If the *start* argument is omitted, it defaults to "0". If\n   *step* is zero, "ValueError" is raised.\n\n   For a positive *step*, the contents of a range "r" are determined\n   by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] <\n   stop".\n\n   For a negative *step*, the contents of the range are still\n   determined by the formula "r[i] = start + step*i", but the\n   constraints are "i >= 0" and "r[i] > stop".\n\n   A range object will be empty if "r[0]" does not meet the value\n   constraint. Ranges do support negative indices, but these are\n   interpreted as indexing from the end of the sequence determined by\n   the positive indices.\n\n   Ranges containing absolute values larger than "sys.maxsize" are\n   permitted but some features (such as "len()") may raise\n   "OverflowError".\n\n   Range examples:\n\n      >>> list(range(10))\n      [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n      >>> list(range(1, 11))\n      [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n      >>> list(range(0, 30, 5))\n      [0, 5, 10, 15, 20, 25]\n      >>> list(range(0, 10, 3))\n      [0, 3, 6, 9]\n      >>> list(range(0, -10, -1))\n      [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n      >>> list(range(0))\n      []\n      >>> list(range(1, 0))\n      []\n\n   Ranges implement all of the *common* sequence operations except\n   concatenation and repetition (due to the fact that range objects\n   can only represent sequences that follow a strict pattern and\n   repetition and concatenation will usually violate that pattern).\n\nThe advantage of the "range" type over a regular "list" or "tuple" is\nthat a "range" object will always take the same (small) amount of\nmemory, no matter the size of the range it represents (as it only\nstores the "start", "stop" and "step" values, calculating individual\nitems and subranges as needed).\n\nRange objects implement the "collections.abc.Sequence" ABC, and\nprovide features such as containment tests, element index lookup,\nslicing and support for negative indices (see *Sequence Types ---\nlist, tuple, range*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with "==" and "!=" compares them as\nsequences.  That is, two range objects are considered equal if they\nrepresent the same sequence of values.  (Note that two range objects\nthat compare equal might have different "start", "stop" and "step"\nattributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3,\n2) == range(0, 4, 2)".)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test "int" objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The "start", "stop" and "step" attributes.\n',
- 'typesseq-mutable': '\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation                      | Result                           | Notes                 |\n+================================+==================================+=======================+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t"                 | the elements of "s[i:j:k]" are   | (1)                   |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()"                    | removes all items from "s" (same | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()"                     | creates a shallow copy of "s"    | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])"                   | retrieves the item at *i* and    | (2)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)"                  | remove the first item from *s*   | (3)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()"                  | reverses the items of *s* in     | (4)                   |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default the\n   last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for economy\n   of space when reversing a large sequence.  To remind users that it\n   operates by side effect, it does not return the reversed sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n   interfaces of mutable containers that don\'t support slicing\n   operations (such as "dict" and "set")\n\n   New in version 3.3: "clear()" and "copy()" methods.\n',
+ 'typesseq': '\nSequence Types --- "list", "tuple", "range"\n*******************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The "collections.abc.Sequence" ABC\nis provided to make it easier to correctly implement these operations\non custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority).  In the table,\n*s* and *t* are sequences of the same type, *n*, *i*, *j* and *k* are\nintegers and *x* is an arbitrary object that meets any type and value\nrestrictions imposed by *s*.\n\nThe "in" and "not in" operations have the same priorities as the\ncomparison operations. The "+" (concatenation) and "*" (repetition)\noperations have the same priority as the corresponding numeric\noperations.\n\n+----------------------------+----------------------------------+------------+\n| Operation                  | Result                           | Notes      |\n+============================+==================================+============+\n| "x in s"                   | "True" if an item of *s* is      | (1)        |\n+----------------------------+----------------------------------+------------+\n| "x not in s"               | "False" if an item of *s* is     | (1)        |\n+----------------------------+----------------------------------+------------+\n| "s + t"                    | the concatenation of *s* and *t* | (6)(7)     |\n+----------------------------+----------------------------------+------------+\n| "s * n" or "n * s"         | *n* shallow copies of *s*        | (2)(7)     |\n+----------------------------+----------------------------------+------------+\n| "s[i]"                     | *i*th item of *s*, origin 0      | (3)        |\n+----------------------------+----------------------------------+------------+\n| "s[i:j]"                   | slice of *s* from *i* to *j*     | (3)(4)     |\n+----------------------------+----------------------------------+------------+\n| "s[i:j:k]"                 | slice of *s* from *i* to *j*     | (3)(5)     |\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n| "s.index(x[, i[, j]])"     | index of the first occurrence of | (8)        |\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons.  In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length.  (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the "in" and "not in" operations are used only for simple\n   containment testing in the general case, some specialised sequences\n   (such as "str", "bytes" and "bytearray") also use them for\n   subsequence testing:\n\n      >>> "gg" in "eggs"\n      True\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n   empty sequence of the same type as *s*).  Note also that the copies\n   are shallow; nested structures are not copied.  This often haunts\n   new Python programmers; consider:\n\n      >>> lists = [[]] * 3\n      >>> lists\n      [[], [], []]\n      >>> lists[0].append(3)\n      >>> lists\n      [[3], [3], [3]]\n\n   What has happened is that "[[]]" is a one-element list containing\n   an empty list, so all three elements of "[[]] * 3" are (pointers\n   to) this single empty list.  Modifying any of the elements of\n   "lists" modifies this single list. You can create a list of\n   different lists this way:\n\n      >>> lists = [[] for i in range(3)]\n      >>> lists[0].append(3)\n      >>> lists[1].append(5)\n      >>> lists[2].append(7)\n      >>> lists\n      [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of\n   the string: "len(s) + i" or "len(s) + j" is substituted.  But note\n   that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n   items with index *k* such that "i <= k < j".  If *i* or *j* is\n   greater than "len(s)", use "len(s)".  If *i* is omitted or "None",\n   use "0".  If *j* is omitted or "None", use "len(s)".  If *i* is\n   greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n   sequence of items with index  "x = i + n*k" such that "0 <= n <\n   (j-i)/k".  In other words, the indices are "i", "i+k", "i+2*k",\n   "i+3*k" and so on, stopping when *j* is reached (but never\n   including *j*).  If *i* or *j* is greater than "len(s)", use\n   "len(s)".  If *i* or *j* are omitted or "None", they become "end"\n   values (which end depends on the sign of *k*).  Note, *k* cannot be\n   zero. If *k* is "None", it is treated like "1".\n\n6. Concatenating immutable sequences always results in a new\n   object. This means that building up a sequence by repeated\n   concatenation will have a quadratic runtime cost in the total\n   sequence length. To get a linear runtime cost, you must switch to\n   one of the alternatives below:\n\n   * if concatenating "str" objects, you can build a list and use\n     "str.join()" at the end or else write to a "io.StringIO" instance\n     and retrieve its value when complete\n\n   * if concatenating "bytes" objects, you can similarly use\n     "bytes.join()" or "io.BytesIO", or you can do in-place\n     concatenation with a "bytearray" object.  "bytearray" objects are\n     mutable and have an efficient overallocation mechanism\n\n   * if concatenating "tuple" objects, extend a "list" instead\n\n   * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as "range") only support item\n   sequences that follow specific patterns, and hence don\'t support\n   sequence concatenation or repetition.\n\n8. "index" raises "ValueError" when *x* is not found in *s*. When\n   supported, the additional arguments to the index method allow\n   efficient searching of subsections of the sequence. Passing the\n   extra arguments is roughly equivalent to using "s[i:j].index(x)",\n   only without copying any data and with the returned index being\n   relative to the start of the sequence rather than the start of the\n   slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe "hash()" built-in.\n\nThis support allows immutable sequences, such as "tuple" instances, to\nbe used as "dict" keys and stored in "set" and "frozenset" instances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in "TypeError".\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation                      | Result                           | Notes                 |\n+================================+==================================+=======================+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t"                 | the elements of "s[i:j:k]" are   | (1)                   |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()"                    | removes all items from "s" (same | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()"                     | creates a shallow copy of "s"    | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])"                   | retrieves the item at *i* and    | (2)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)"                  | remove the first item from *s*   | (3)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()"                  | reverses the items of *s* in     | (4)                   |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n   the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n   economy of space when reversing a large sequence.  To remind users\n   that it operates by side effect, it does not return the reversed\n   sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n   interfaces of mutable containers that don\'t support slicing\n   operations (such as "dict" and "set")\n\n   New in version 3.3: "clear()" and "copy()" methods.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n   Lists may be constructed in several ways:\n\n   * Using a pair of square brackets to denote the empty list: "[]"\n\n   * Using square brackets, separating items with commas: "[a]",\n     "[a, b, c]"\n\n   * Using a list comprehension: "[x for x in iterable]"\n\n   * Using the type constructor: "list()" or "list(iterable)"\n\n   The constructor builds a list whose items are the same and in the\n   same order as *iterable*\'s items.  *iterable* may be either a\n   sequence, a container that supports iteration, or an iterator\n   object.  If *iterable* is already a list, a copy is made and\n   returned, similar to "iterable[:]". For example, "list(\'abc\')"\n   returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" returns "[1, 2,\n   3]". If no argument is given, the constructor creates a new empty\n   list, "[]".\n\n   Many other operations also produce lists, including the "sorted()"\n   built-in.\n\n   Lists implement all of the *common* and *mutable* sequence\n   operations. Lists also provide the following additional method:\n\n   sort(*, key=None, reverse=None)\n\n      This method sorts the list in place, using only "<" comparisons\n      between items. Exceptions are not suppressed - if any comparison\n      operations fail, the entire sort operation will fail (and the\n      list will likely be left in a partially modified state).\n\n      "sort()" accepts two arguments that can only be passed by\n      keyword (*keyword-only arguments*):\n\n      *key* specifies a function of one argument that is used to\n      extract a comparison key from each list element (for example,\n      "key=str.lower"). The key corresponding to each item in the list\n      is calculated once and then used for the entire sorting process.\n      The default value of "None" means that list items are sorted\n      directly without calculating a separate key value.\n\n      The "functools.cmp_to_key()" utility is available to convert a\n      2.x style *cmp* function to a *key* function.\n\n      *reverse* is a boolean value.  If set to "True", then the list\n      elements are sorted as if each comparison were reversed.\n\n      This method modifies the sequence in place for economy of space\n      when sorting a large sequence.  To remind users that it operates\n      by side effect, it does not return the sorted sequence (use\n      "sorted()" to explicitly request a new sorted list instance).\n\n      The "sort()" method is guaranteed to be stable.  A sort is\n      stable if it guarantees not to change the relative order of\n      elements that compare equal --- this is helpful for sorting in\n      multiple passes (for example, sort by department, then by salary\n      grade).\n\n      **CPython implementation detail:** While a list is being sorted,\n      the effect of attempting to mutate, or even inspect, the list is\n      undefined.  The C implementation of Python makes the list appear\n      empty for the duration, and raises "ValueError" if it can detect\n      that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the "enumerate()"\nbuilt-in). Tuples are also used for cases where an immutable sequence\nof homogeneous data is needed (such as allowing storage in a "set" or\n"dict" instance).\n\nclass class tuple([iterable])\n\n   Tuples may be constructed in a number of ways:\n\n   * Using a pair of parentheses to denote the empty tuple: "()"\n\n   * Using a trailing comma for a singleton tuple: "a," or "(a,)"\n\n   * Separating items with commas: "a, b, c" or "(a, b, c)"\n\n   * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)"\n\n   The constructor builds a tuple whose items are the same and in the\n   same order as *iterable*\'s items.  *iterable* may be either a\n   sequence, a container that supports iteration, or an iterator\n   object.  If *iterable* is already a tuple, it is returned\n   unchanged. For example, "tuple(\'abc\')" returns "(\'a\', \'b\', \'c\')"\n   and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is\n   given, the constructor creates a new empty tuple, "()".\n\n   Note that it is actually the comma which makes a tuple, not the\n   parentheses. The parentheses are optional, except in the empty\n   tuple case, or when they are needed to avoid syntactic ambiguity.\n   For example, "f(a, b, c)" is a function call with three arguments,\n   while "f((a, b, c))" is a function call with a 3-tuple as the sole\n   argument.\n\n   Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, "collections.namedtuple()" may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe "range" type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in "for" loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n   The arguments to the range constructor must be integers (either\n   built-in "int" or any object that implements the "__index__"\n   special method).  If the *step* argument is omitted, it defaults to\n   "1". If the *start* argument is omitted, it defaults to "0". If\n   *step* is zero, "ValueError" is raised.\n\n   For a positive *step*, the contents of a range "r" are determined\n   by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] <\n   stop".\n\n   For a negative *step*, the contents of the range are still\n   determined by the formula "r[i] = start + step*i", but the\n   constraints are "i >= 0" and "r[i] > stop".\n\n   A range object will be empty if "r[0]" does not meet the value\n   constraint. Ranges do support negative indices, but these are\n   interpreted as indexing from the end of the sequence determined by\n   the positive indices.\n\n   Ranges containing absolute values larger than "sys.maxsize" are\n   permitted but some features (such as "len()") may raise\n   "OverflowError".\n\n   Range examples:\n\n      >>> list(range(10))\n      [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n      >>> list(range(1, 11))\n      [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n      >>> list(range(0, 30, 5))\n      [0, 5, 10, 15, 20, 25]\n      >>> list(range(0, 10, 3))\n      [0, 3, 6, 9]\n      >>> list(range(0, -10, -1))\n      [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n      >>> list(range(0))\n      []\n      >>> list(range(1, 0))\n      []\n\n   Ranges implement all of the *common* sequence operations except\n   concatenation and repetition (due to the fact that range objects\n   can only represent sequences that follow a strict pattern and\n   repetition and concatenation will usually violate that pattern).\n\nThe advantage of the "range" type over a regular "list" or "tuple" is\nthat a "range" object will always take the same (small) amount of\nmemory, no matter the size of the range it represents (as it only\nstores the "start", "stop" and "step" values, calculating individual\nitems and subranges as needed).\n\nRange objects implement the "collections.abc.Sequence" ABC, and\nprovide features such as containment tests, element index lookup,\nslicing and support for negative indices (see *Sequence Types ---\nlist, tuple, range*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with "==" and "!=" compares them as\nsequences.  That is, two range objects are considered equal if they\nrepresent the same sequence of values.  (Note that two range objects\nthat compare equal might have different "start", "stop" and "step"\nattributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3,\n2) == range(0, 4, 2)".)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test "int" objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The "start", "stop" and "step" attributes.\n',
+ 'typesseq-mutable': '\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation                      | Result                           | Notes                 |\n+================================+==================================+=======================+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t"                 | the elements of "s[i:j:k]" are   | (1)                   |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()"                    | removes all items from "s" (same | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()"                     | creates a shallow copy of "s"    | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])"                   | retrieves the item at *i* and    | (2)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)"                  | remove the first item from *s*   | (3)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()"                  | reverses the items of *s* in     | (4)                   |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n   the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n   economy of space when reversing a large sequence.  To remind users\n   that it operates by side effect, it does not return the reversed\n   sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n   interfaces of mutable containers that don\'t support slicing\n   operations (such as "dict" and "set")\n\n   New in version 3.3: "clear()" and "copy()" methods.\n',
  'unary': '\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n   u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\ninteger argument.  The bitwise inversion of "x" is defined as\n"-(x+1)".  It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n',
  'while': '\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n   while_stmt ::= "while" expression ":" suite\n                  ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n',
- 'with': '\nThe "with" statement\n********************\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n   with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item") is\n   evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return value\n   from "__enter__()" is assigned to it.\n\n   Note: The "with" statement guarantees that if the "__enter__()" method\n     returns without an error, then "__exit__()" will always be\n     called. Thus, if an error occurs during the assignment to the\n     target list, it will be treated the same as an error occurring\n     within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked.  If an\n   exception caused the suite to be exited, its type, value, and\n   traceback are passed as arguments to "__exit__()". Otherwise, three\n   "None" arguments are supplied.\n\n   If the suite was exited due to an exception, and the return value\n   from the "__exit__()" method was false, the exception is reraised.\n   If the return value was true, the exception is suppressed, and\n   execution continues with the statement following the "with"\n   statement.\n\n   If the suite was exited for any reason other than an exception, the\n   return value from "__exit__()" is ignored, and execution proceeds\n   at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n   with A() as a, B() as b:\n       suite\n\nis equivalent to\n\n   with A() as a:\n       with B() as b:\n           suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n   **PEP 0343** - The "with" statement\n      The specification, background, and examples for the Python\n      "with" statement.\n',
+ 'with': '\nThe "with" statement\n********************\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n   with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n   is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n   value from "__enter__()" is assigned to it.\n\n   Note: The "with" statement guarantees that if the "__enter__()"\n     method returns without an error, then "__exit__()" will always be\n     called. Thus, if an error occurs during the assignment to the\n     target list, it will be treated the same as an error occurring\n     within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked.  If an\n   exception caused the suite to be exited, its type, value, and\n   traceback are passed as arguments to "__exit__()". Otherwise, three\n   "None" arguments are supplied.\n\n   If the suite was exited due to an exception, and the return value\n   from the "__exit__()" method was false, the exception is reraised.\n   If the return value was true, the exception is suppressed, and\n   execution continues with the statement following the "with"\n   statement.\n\n   If the suite was exited for any reason other than an exception, the\n   return value from "__exit__()" is ignored, and execution proceeds\n   at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n   with A() as a, B() as b:\n       suite\n\nis equivalent to\n\n   with A() as a:\n       with B() as b:\n           suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n     The specification, background, and examples for the Python "with"\n     statement.\n',
  'yield': '\nThe "yield" statement\n*********************\n\n   yield_stmt ::= yield_expression\n\nA "yield" statement is semantically equivalent to a *yield\nexpression*. The yield statement can be used to omit the parentheses\nthat would otherwise be required in the equivalent yield expression\nstatement. For example, the yield statements\n\n   yield <expr>\n   yield from <expr>\n\nare equivalent to the yield expression statements\n\n   (yield <expr>)\n   (yield from <expr>)\n\nYield expressions and statements are only used when defining a\n*generator* function, and are only used in the body of the generator\nfunction.  Using yield in a function definition is sufficient to cause\nthat definition to create a generator function instead of a normal\nfunction.\n\nFor full details of "yield" semantics, refer to the *Yield\nexpressions* section.\n'}
diff --git a/Lib/random.py b/Lib/random.py
index 174e755..b21dee8 100644
--- a/Lib/random.py
+++ b/Lib/random.py
@@ -684,7 +684,7 @@
     print(round(t1-t0, 3), 'sec,', end=' ')
     avg = total/n
     stddev = _sqrt(sqsum/n - avg*avg)
-    print('avg %g, stddev %g, min %g, max %g' % \
+    print('avg %g, stddev %g, min %g, max %g\n' % \
               (avg, stddev, smallest, largest))
 
 
diff --git a/Lib/selectors.py b/Lib/selectors.py
index 9be9225..4e9ae6e 100644
--- a/Lib/selectors.py
+++ b/Lib/selectors.py
@@ -441,6 +441,64 @@
             super().close()
 
 
+if hasattr(select, 'devpoll'):
+
+    class DevpollSelector(_BaseSelectorImpl):
+        """Solaris /dev/poll selector."""
+
+        def __init__(self):
+            super().__init__()
+            self._devpoll = select.devpoll()
+
+        def fileno(self):
+            return self._devpoll.fileno()
+
+        def register(self, fileobj, events, data=None):
+            key = super().register(fileobj, events, data)
+            poll_events = 0
+            if events & EVENT_READ:
+                poll_events |= select.POLLIN
+            if events & EVENT_WRITE:
+                poll_events |= select.POLLOUT
+            self._devpoll.register(key.fd, poll_events)
+            return key
+
+        def unregister(self, fileobj):
+            key = super().unregister(fileobj)
+            self._devpoll.unregister(key.fd)
+            return key
+
+        def select(self, timeout=None):
+            if timeout is None:
+                timeout = None
+            elif timeout <= 0:
+                timeout = 0
+            else:
+                # devpoll() has a resolution of 1 millisecond, round away from
+                # zero to wait *at least* timeout seconds.
+                timeout = math.ceil(timeout * 1e3)
+            ready = []
+            try:
+                fd_event_list = self._devpoll.poll(timeout)
+            except InterruptedError:
+                return ready
+            for fd, event in fd_event_list:
+                events = 0
+                if event & ~select.POLLIN:
+                    events |= EVENT_WRITE
+                if event & ~select.POLLOUT:
+                    events |= EVENT_READ
+
+                key = self._key_from_fd(fd)
+                if key:
+                    ready.append((key, events & key.events))
+            return ready
+
+        def close(self):
+            self._devpoll.close()
+            super().close()
+
+
 if hasattr(select, 'kqueue'):
 
     class KqueueSelector(_BaseSelectorImpl):
@@ -513,12 +571,14 @@
             super().close()
 
 
-# Choose the best implementation: roughly, epoll|kqueue > poll > select.
+# Choose the best implementation: roughly, epoll|kqueue|devpoll > poll > select.
 # select() also can't accept a FD > FD_SETSIZE (usually around 1024)
 if 'KqueueSelector' in globals():
     DefaultSelector = KqueueSelector
 elif 'EpollSelector' in globals():
     DefaultSelector = EpollSelector
+elif 'DevpollSelector' in globals():
+    DefaultSelector = DevpollSelector
 elif 'PollSelector' in globals():
     DefaultSelector = PollSelector
 else:
diff --git a/Lib/shutil.py b/Lib/shutil.py
index 0cd6ec4..508a368 100644
--- a/Lib/shutil.py
+++ b/Lib/shutil.py
@@ -630,23 +630,6 @@
 
     return archive_name
 
-def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
-    # XXX see if we want to keep an external call here
-    if verbose:
-        zipoptions = "-r"
-    else:
-        zipoptions = "-rq"
-    from distutils.errors import DistutilsExecError
-    from distutils.spawn import spawn
-    try:
-        spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
-    except DistutilsExecError:
-        # XXX really should distinguish between "couldn't find
-        # external 'zip' command" and "zip failed".
-        raise ExecError("unable to create zip file '%s': "
-            "could neither import the 'zipfile' module nor "
-            "find a standalone zip utility") % zip_filename
-
 def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
     """Create a zip file from all the files under 'base_dir'.
 
@@ -656,6 +639,8 @@
     available, raises ExecError.  Returns the name of the output zip
     file.
     """
+    import zipfile
+
     zip_filename = base_name + ".zip"
     archive_dir = os.path.dirname(base_name)
 
@@ -665,30 +650,20 @@
         if not dry_run:
             os.makedirs(archive_dir)
 
-    # If zipfile module is not available, try spawning an external 'zip'
-    # command.
-    try:
-        import zipfile
-    except ImportError:
-        zipfile = None
+    if logger is not None:
+        logger.info("creating '%s' and adding '%s' to it",
+                    zip_filename, base_dir)
 
-    if zipfile is None:
-        _call_external_zip(base_dir, zip_filename, verbose, dry_run)
-    else:
-        if logger is not None:
-            logger.info("creating '%s' and adding '%s' to it",
-                        zip_filename, base_dir)
-
-        if not dry_run:
-            with zipfile.ZipFile(zip_filename, "w",
-                                 compression=zipfile.ZIP_DEFLATED) as zf:
-                for dirpath, dirnames, filenames in os.walk(base_dir):
-                    for name in filenames:
-                        path = os.path.normpath(os.path.join(dirpath, name))
-                        if os.path.isfile(path):
-                            zf.write(path, path)
-                            if logger is not None:
-                                logger.info("adding '%s'", path)
+    if not dry_run:
+        with zipfile.ZipFile(zip_filename, "w",
+                             compression=zipfile.ZIP_DEFLATED) as zf:
+            for dirpath, dirnames, filenames in os.walk(base_dir):
+                for name in filenames:
+                    path = os.path.normpath(os.path.join(dirpath, name))
+                    if os.path.isfile(path):
+                        zf.write(path, path)
+                        if logger is not None:
+                            logger.info("adding '%s'", path)
 
     return zip_filename
 
diff --git a/Lib/signal.py b/Lib/signal.py
new file mode 100644
index 0000000..0db3df8
--- /dev/null
+++ b/Lib/signal.py
@@ -0,0 +1,85 @@
+import _signal
+from _signal import *
+from functools import wraps as _wraps
+from enum import IntEnum as _IntEnum
+
+_globals = globals()
+
+Signals = _IntEnum(
+    'Signals',
+    {name: value for name, value in _globals.items()
+     if name.isupper()
+        and (name.startswith('SIG') and not name.startswith('SIG_'))
+        or name.startswith('CTRL_')})
+
+class Handlers(_IntEnum):
+    SIG_DFL = _signal.SIG_DFL
+    SIG_IGN = _signal.SIG_IGN
+
+_globals.update(Signals.__members__)
+_globals.update(Handlers.__members__)
+
+if 'pthread_sigmask' in _globals:
+    class Sigmasks(_IntEnum):
+        SIG_BLOCK = _signal.SIG_BLOCK
+        SIG_UNBLOCK = _signal.SIG_UNBLOCK
+        SIG_SETMASK = _signal.SIG_SETMASK
+
+    _globals.update(Sigmasks.__members__)
+
+
+def _int_to_enum(value, enum_klass):
+    """Convert a numeric value to an IntEnum member.
+    If it's not a known member, return the numeric value itself.
+    """
+    try:
+        return enum_klass(value)
+    except ValueError:
+        return value
+
+
+def _enum_to_int(value):
+    """Convert an IntEnum member to a numeric value.
+    If it's not a IntEnum member return the value itself.
+    """
+    try:
+        return int(value)
+    except (ValueError, TypeError):
+        return value
+
+
+@_wraps(_signal.signal)
+def signal(signalnum, handler):
+    handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler))
+    return _int_to_enum(handler, Handlers)
+
+
+@_wraps(_signal.getsignal)
+def getsignal(signalnum):
+    handler = _signal.getsignal(signalnum)
+    return _int_to_enum(handler, Handlers)
+
+
+if 'pthread_sigmask' in _globals:
+    @_wraps(_signal.pthread_sigmask)
+    def pthread_sigmask(how, mask):
+        sigs_set = _signal.pthread_sigmask(how, mask)
+        return set(_int_to_enum(x, Signals) for x in sigs_set)
+    pthread_sigmask.__doc__ = _signal.pthread_sigmask.__doc__
+
+
+if 'sigpending' in _globals:
+    @_wraps(_signal.sigpending)
+    def sigpending():
+        sigs = _signal.sigpending()
+        return set(_int_to_enum(x, Signals) for x in sigs)
+
+
+if 'sigwait' in _globals:
+    @_wraps(_signal.sigwait)
+    def sigwait(sigset):
+        retsig = _signal.sigwait(sigset)
+        return _int_to_enum(retsig, Signals)
+    sigwait.__doc__ = _signal.sigwait
+
+del _globals, _wraps
diff --git a/Lib/socketserver.py b/Lib/socketserver.py
index 46ee7c5..7c85fbc 100644
--- a/Lib/socketserver.py
+++ b/Lib/socketserver.py
@@ -94,7 +94,7 @@
 Another approach to handling multiple simultaneous requests in an
 environment that supports neither threads nor fork (or where these are
 too expensive or inappropriate for the service) is to maintain an
-explicit table of partially finished requests and to use select() to
+explicit table of partially finished requests and to use a selector to
 decide which request to work on next (or whether to handle a new
 incoming request).  This is particularly important for stream services
 where each client can potentially be connected for a long time (if
@@ -104,7 +104,6 @@
 - Standard classes for Sun RPC (which uses either UDP or TCP)
 - Standard mix-in classes to implement various authentication
   and encryption schemes
-- Standard framework for select-based multiplexing
 
 XXX Open problems:
 - What to do with out-of-band data?
@@ -130,13 +129,17 @@
 
 
 import socket
-import select
+import selectors
 import os
 import errno
 try:
     import threading
 except ImportError:
     import dummy_threading as threading
+try:
+    from time import monotonic as time
+except ImportError:
+    from time import time as time
 
 __all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
            "ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
@@ -147,14 +150,13 @@
                     "ThreadingUnixStreamServer",
                     "ThreadingUnixDatagramServer"])
 
-def _eintr_retry(func, *args):
-    """restart a system call interrupted by EINTR"""
-    while True:
-        try:
-            return func(*args)
-        except OSError as e:
-            if e.errno != errno.EINTR:
-                raise
+# poll/select have the advantage of not requiring any extra file descriptor,
+# contrarily to epoll/kqueue (also, they require a single syscall).
+if hasattr(selectors, 'PollSelector'):
+    _ServerSelector = selectors.PollSelector
+else:
+    _ServerSelector = selectors.SelectSelector
+
 
 class BaseServer:
 
@@ -166,7 +168,7 @@
     - serve_forever(poll_interval=0.5)
     - shutdown()
     - handle_request()  # if you do not use serve_forever()
-    - fileno() -> int   # for select()
+    - fileno() -> int   # for selector
 
     Methods that may be overridden:
 
@@ -227,17 +229,19 @@
         """
         self.__is_shut_down.clear()
         try:
-            while not self.__shutdown_request:
-                # XXX: Consider using another file descriptor or
-                # connecting to the socket to wake this up instead of
-                # polling. Polling reduces our responsiveness to a
-                # shutdown request and wastes cpu at all other times.
-                r, w, e = _eintr_retry(select.select, [self], [], [],
-                                       poll_interval)
-                if self in r:
-                    self._handle_request_noblock()
+            # XXX: Consider using another file descriptor or connecting to the
+            # socket to wake this up instead of polling. Polling reduces our
+            # responsiveness to a shutdown request and wastes cpu at all other
+            # times.
+            with _ServerSelector() as selector:
+                selector.register(self, selectors.EVENT_READ)
 
-                self.service_actions()
+                while not self.__shutdown_request:
+                    ready = selector.select(poll_interval)
+                    if ready:
+                        self._handle_request_noblock()
+
+                    self.service_actions()
         finally:
             self.__shutdown_request = False
             self.__is_shut_down.set()
@@ -260,16 +264,16 @@
         """
         pass
 
-    # The distinction between handling, getting, processing and
-    # finishing a request is fairly arbitrary.  Remember:
+    # The distinction between handling, getting, processing and finishing a
+    # request is fairly arbitrary.  Remember:
     #
-    # - handle_request() is the top-level call.  It calls
-    #   select, get_request(), verify_request() and process_request()
+    # - handle_request() is the top-level call.  It calls selector.select(),
+    #   get_request(), verify_request() and process_request()
     # - get_request() is different for stream or datagram sockets
-    # - process_request() is the place that may fork a new process
-    #   or create a new thread to finish the request
-    # - finish_request() instantiates the request handler class;
-    #   this constructor will handle the request all by itself
+    # - process_request() is the place that may fork a new process or create a
+    #   new thread to finish the request
+    # - finish_request() instantiates the request handler class; this
+    #   constructor will handle the request all by itself
 
     def handle_request(self):
         """Handle one request, possibly blocking.
@@ -283,18 +287,30 @@
             timeout = self.timeout
         elif self.timeout is not None:
             timeout = min(timeout, self.timeout)
-        fd_sets = _eintr_retry(select.select, [self], [], [], timeout)
-        if not fd_sets[0]:
-            self.handle_timeout()
-            return
-        self._handle_request_noblock()
+        if timeout is not None:
+            deadline = time() + timeout
+
+        # Wait until a request arrives or the timeout expires - the loop is
+        # necessary to accomodate early wakeups due to EINTR.
+        with _ServerSelector() as selector:
+            selector.register(self, selectors.EVENT_READ)
+
+            while True:
+                ready = selector.select(timeout)
+                if ready:
+                    return self._handle_request_noblock()
+                else:
+                    if timeout is not None:
+                        timeout = deadline - time()
+                        if timeout < 0:
+                            return self.handle_timeout()
 
     def _handle_request_noblock(self):
         """Handle one request, without blocking.
 
-        I assume that select.select has returned that the socket is
-        readable before this function was called, so there should be
-        no risk of blocking in get_request().
+        I assume that selector.select() has returned that the socket is
+        readable before this function was called, so there should be no risk of
+        blocking in get_request().
         """
         try:
             request, client_address = self.get_request()
@@ -377,7 +393,7 @@
     - serve_forever(poll_interval=0.5)
     - shutdown()
     - handle_request()  # if you don't use serve_forever()
-    - fileno() -> int   # for select()
+    - fileno() -> int   # for selector
 
     Methods that may be overridden:
 
@@ -459,7 +475,7 @@
     def fileno(self):
         """Return socket file number.
 
-        Interface required by select().
+        Interface required by selector.
 
         """
         return self.socket.fileno()
diff --git a/Lib/ssl.py b/Lib/ssl.py
index d3c18ed..8f12513 100644
--- a/Lib/ssl.py
+++ b/Lib/ssl.py
@@ -92,7 +92,7 @@
 import sys
 import os
 from collections import namedtuple
-from enum import Enum as _Enum
+from enum import Enum as _Enum, IntEnum as _IntEnum
 
 import _ssl             # if we can't import it, let the error propagate
 
@@ -119,30 +119,19 @@
 
 from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN
 
-from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
 from _ssl import _OPENSSL_API_VERSION
 
+_SSLMethod = _IntEnum('_SSLMethod',
+                      {name: value for name, value in vars(_ssl).items()
+                       if name.startswith('PROTOCOL_')})
+globals().update(_SSLMethod.__members__)
 
-_PROTOCOL_NAMES = {
-    PROTOCOL_TLSv1: "TLSv1",
-    PROTOCOL_SSLv23: "SSLv23",
-    PROTOCOL_SSLv3: "SSLv3",
-}
+_PROTOCOL_NAMES = {value: name for name, value in _SSLMethod.__members__.items()}
+
 try:
-    from _ssl import PROTOCOL_SSLv2
     _SSLv2_IF_EXISTS = PROTOCOL_SSLv2
-except ImportError:
+except NameError:
     _SSLv2_IF_EXISTS = None
-else:
-    _PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2"
-
-try:
-    from _ssl import PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2
-except ImportError:
-    pass
-else:
-    _PROTOCOL_NAMES[PROTOCOL_TLSv1_1] = "TLSv1.1"
-    _PROTOCOL_NAMES[PROTOCOL_TLSv1_2] = "TLSv1.2"
 
 if sys.platform == "win32":
     from _ssl import enum_certificates, enum_crls
@@ -675,17 +664,7 @@
                 raise ValueError(
                     "non-zero flags not allowed in calls to send() on %s" %
                     self.__class__)
-            try:
-                v = self._sslobj.write(data)
-            except SSLError as x:
-                if x.args[0] == SSL_ERROR_WANT_READ:
-                    return 0
-                elif x.args[0] == SSL_ERROR_WANT_WRITE:
-                    return 0
-                else:
-                    raise
-            else:
-                return v
+            return self._sslobj.write(data)
         else:
             return socket.send(self, data, flags)
 
@@ -890,12 +869,34 @@
 # some utility functions
 
 def cert_time_to_seconds(cert_time):
-    """Takes a date-time string in standard ASN1_print form
-    ("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
-    a Python time value in seconds past the epoch."""
+    """Return the time in seconds since the Epoch, given the timestring
+    representing the "notBefore" or "notAfter" date from a certificate
+    in ``"%b %d %H:%M:%S %Y %Z"`` strptime format (C locale).
 
-    import time
-    return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
+    "notBefore" or "notAfter" dates must use UTC (RFC 5280).
+
+    Month is one of: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
+    UTC should be specified as GMT (see ASN1_TIME_print())
+    """
+    from time import strptime
+    from calendar import timegm
+
+    months = (
+        "Jan","Feb","Mar","Apr","May","Jun",
+        "Jul","Aug","Sep","Oct","Nov","Dec"
+    )
+    time_format = ' %d %H:%M:%S %Y GMT' # NOTE: no month, fixed GMT
+    try:
+        month_number = months.index(cert_time[:3].title()) + 1
+    except ValueError:
+        raise ValueError('time data %r does not match '
+                         'format "%%b%s"' % (cert_time, time_format))
+    else:
+        # found valid month
+        tt = strptime(cert_time[3:], time_format)
+        # return an integer, the previous mktime()-based implementation
+        # returned a float (fractional seconds are always zero here).
+        return timegm((tt[0], month_number) + tt[2:6])
 
 PEM_HEADER = "-----BEGIN CERTIFICATE-----"
 PEM_FOOTER = "-----END CERTIFICATE-----"
@@ -922,7 +923,7 @@
     d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
     return base64.decodebytes(d.encode('ASCII', 'strict'))
 
-def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
+def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
     """Retrieve the certificate from the server at the specified address,
     and return it as a PEM-encoded string.
     If 'ca_certs' is specified, validate the server cert against it.
diff --git a/Lib/subprocess.py b/Lib/subprocess.py
index ddc033a..daa3e25 100644
--- a/Lib/subprocess.py
+++ b/Lib/subprocess.py
@@ -104,17 +104,21 @@
 If env is not None, it defines the environment variables for the new
 process.
 
-If universal_newlines is false, the file objects stdin, stdout and stderr
+If universal_newlines is False, the file objects stdin, stdout and stderr
 are opened as binary files, and no line ending conversion is done.
 
-If universal_newlines is true, the file objects stdout and stderr are
-opened as a text files, but lines may be terminated by any of '\n',
+If universal_newlines is True, the file objects stdout and stderr are
+opened as a text file, but lines may be terminated by any of '\n',
 the Unix end-of-line convention, '\r', the old Macintosh convention or
 '\r\n', the Windows convention.  All of these external representations
 are seen as '\n' by the Python program.  Also, the newlines attribute
 of the file objects stdout, stdin and stderr are not updated by the
 communicate() method.
 
+In either case, the process being communicated with should start up
+expecting to receive bytes on its standard input and decode them with
+the same encoding they are sent in.
+
 The startupinfo and creationflags, if given, will be passed to the
 underlying CreateProcess() function.  They can specify things such as
 appearance of the main window and priority for the new process.
@@ -184,6 +188,9 @@
     pass a string to the subprocess's stdin.  If you use this argument
     you may not also use the Popen constructor's "stdin" argument.
 
+    If universal_newlines is set to True, the "input" argument must
+    be a string rather than bytes, and the return value will be a string.
+
 Exceptions
 ----------
 Exceptions raised in the child process, before the new program has
@@ -225,9 +232,13 @@
 communicate(input=None)
     Interact with process: Send data to stdin.  Read data from stdout
     and stderr, until end-of-file is reached.  Wait for process to
-    terminate.  The optional input argument should be a string to be
+    terminate.  The optional input argument should be data to be
     sent to the child process, or None, if no data should be sent to
-    the child.
+    the child. If the Popen instance was constructed with universal_newlines
+    set to True, the input argument should be a string and will be encoded
+    using the preferred system encoding (see locale.getpreferredencoding);
+    if universal_newlines is False, the input argument should be a
+    byte string.
 
     communicate() returns a tuple (stdout, stderr).
 
@@ -591,8 +602,8 @@
     ...              input=b"when in the course of fooman events\n")
     b'when in the course of barman events\n'
 
-    If universal_newlines=True is passed, the return value will be a
-    string rather than bytes.
+    If universal_newlines=True is passed, the "input" argument must be a
+    string and the return value will be a string rather than bytes.
     """
     if 'stdout' in kwargs:
         raise ValueError('stdout argument not allowed, it will be overridden.')
@@ -918,11 +929,16 @@
     def communicate(self, input=None, timeout=None):
         """Interact with process: Send data to stdin.  Read data from
         stdout and stderr, until end-of-file is reached.  Wait for
-        process to terminate.  The optional input argument should be
-        bytes to be sent to the child process, or None, if no data
-        should be sent to the child.
+        process to terminate.
 
-        communicate() returns a tuple (stdout, stderr)."""
+        The optional "input" argument should be data to be sent to the
+        child process (if self.universal_newlines is True, this should
+        be a string; if it is False, "input" should be bytes), or
+        None, if no data should be sent to the child.
+
+        communicate() returns a tuple (stdout, stderr).  These will be
+        bytes or, if self.universal_newlines was True, a string.
+        """
 
         if self._communication_started and input:
             raise ValueError("Cannot send input after starting communication")
diff --git a/Lib/test/datetimetester.py b/Lib/test/datetimetester.py
index 3f3c60a..b8c6138 100644
--- a/Lib/test/datetimetester.py
+++ b/Lib/test/datetimetester.py
@@ -2270,13 +2270,14 @@
             self.assertEqual(orig, derived)
 
     def test_bool(self):
+        # time is always True.
         cls = self.theclass
         self.assertTrue(cls(1))
         self.assertTrue(cls(0, 1))
         self.assertTrue(cls(0, 0, 1))
         self.assertTrue(cls(0, 0, 0, 1))
-        self.assertFalse(cls(0))
-        self.assertFalse(cls())
+        self.assertTrue(cls(0))
+        self.assertTrue(cls())
 
     def test_replace(self):
         cls = self.theclass
@@ -2629,7 +2630,7 @@
             self.assertEqual(derived.tzname(), 'cookie')
 
     def test_more_bool(self):
-        # Test cases with non-None tzinfo.
+        # time is always True.
         cls = self.theclass
 
         t = cls(0, tzinfo=FixedOffset(-300, ""))
@@ -2639,22 +2640,10 @@
         self.assertTrue(t)
 
         t = cls(5, tzinfo=FixedOffset(300, ""))
-        self.assertFalse(t)
-
-        t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, ""))
-        self.assertFalse(t)
-
-        # Mostly ensuring this doesn't overflow internally.
-        t = cls(0, tzinfo=FixedOffset(23*60 + 59, ""))
         self.assertTrue(t)
 
-        # But this should yield a value error -- the utcoffset is bogus.
-        t = cls(0, tzinfo=FixedOffset(24*60, ""))
-        self.assertRaises(ValueError, lambda: bool(t))
-
-        # Likewise.
-        t = cls(0, tzinfo=FixedOffset(-24*60, ""))
-        self.assertRaises(ValueError, lambda: bool(t))
+        t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, ""))
+        self.assertTrue(t)
 
     def test_replace(self):
         cls = self.theclass
diff --git a/Lib/test/fork_wait.py b/Lib/test/fork_wait.py
index 19b54ec..8c7c3aa 100644
--- a/Lib/test/fork_wait.py
+++ b/Lib/test/fork_wait.py
@@ -48,7 +48,12 @@
         for i in range(NUM_THREADS):
             _thread.start_new(self.f, (i,))
 
-        time.sleep(LONGSLEEP)
+        # busy-loop to wait for threads
+        deadline = time.monotonic() + 10.0
+        while len(self.alive) < NUM_THREADS:
+            time.sleep(0.1)
+            if time.monotonic() <= deadline:
+                break
 
         a = sorted(self.alive.keys())
         self.assertEqual(a, list(range(NUM_THREADS)))
diff --git a/Lib/test/imghdrdata/python.webp b/Lib/test/imghdrdata/python.webp
new file mode 100644
index 0000000..e824ec7
--- /dev/null
+++ b/Lib/test/imghdrdata/python.webp
Binary files differ
diff --git a/Lib/test/ssl_servers.py b/Lib/test/ssl_servers.py
index 759b3f4..f9d30cf 100644
--- a/Lib/test/ssl_servers.py
+++ b/Lib/test/ssl_servers.py
@@ -150,7 +150,7 @@
 def make_https_server(case, *, context=None, certfile=CERTFILE,
                       host=HOST, handler_class=None):
     if context is None:
-        context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+        context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
     # We assume the certfile contains both private key and certificate
     context.load_cert_chain(certfile)
     server = HTTPSServerThread(context, host, handler_class)
@@ -182,6 +182,8 @@
     parser.add_argument('--curve-name', dest='curve_name', type=str,
                         action='store',
                         help='curve name for EC-based Diffie-Hellman')
+    parser.add_argument('--ciphers', dest='ciphers', type=str,
+                        help='allowed cipher list')
     parser.add_argument('--dh', dest='dh_file', type=str, action='store',
                         help='PEM file containing DH parameters')
     args = parser.parse_args()
@@ -192,12 +194,14 @@
     else:
         handler_class = RootedHTTPRequestHandler
         handler_class.root = os.getcwd()
-    context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
+    context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
     context.load_cert_chain(CERTFILE)
     if args.curve_name:
         context.set_ecdh_curve(args.curve_name)
     if args.dh_file:
         context.load_dh_params(args.dh_file)
+    if args.ciphers:
+        context.set_ciphers(args.ciphers)
 
     server = HTTPSServer(("", args.port), handler_class, context)
     if args.verbose:
diff --git a/Lib/test/string_tests.py b/Lib/test/string_tests.py
index 5ed01f2..569bae1 100644
--- a/Lib/test/string_tests.py
+++ b/Lib/test/string_tests.py
@@ -1178,8 +1178,7 @@
         self.checkraises(TypeError, 'abc', '__mod__')
         self.checkraises(TypeError, '%(foo)s', '__mod__', 42)
         self.checkraises(TypeError, '%s%s', '__mod__', (42,))
-        with self.assertWarns(DeprecationWarning):
-            self.checkraises(TypeError, '%c', '__mod__', (None,))
+        self.checkraises(TypeError, '%c', '__mod__', (None,))
         self.checkraises(ValueError, '%(foo', '__mod__', {})
         self.checkraises(TypeError, '%(foo)s %(bar)s', '__mod__', ('foo', 42))
         self.checkraises(TypeError, '%d', '__mod__', "42") # not numeric
diff --git a/Lib/test/test_asdl_parser.py b/Lib/test/test_asdl_parser.py
new file mode 100644
index 0000000..7a6426a
--- /dev/null
+++ b/Lib/test/test_asdl_parser.py
@@ -0,0 +1,122 @@
+"""Tests for the asdl parser in Parser/asdl.py"""
+
+import importlib.machinery
+import os
+from os.path import dirname
+import sys
+import sysconfig
+import unittest
+
+
+# This test is only relevant for from-source builds of Python.
+if not sysconfig.is_python_build():
+    raise unittest.SkipTest('test irrelevant for an installed Python')
+
+src_base = dirname(dirname(dirname(__file__)))
+parser_dir = os.path.join(src_base, 'Parser')
+
+
+class TestAsdlParser(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        # Loads the asdl module dynamically, since it's not in a real importable
+        # package.
+        # Parses Python.asdl into a ast.Module and run the check on it.
+        # There's no need to do this for each test method, hence setUpClass.
+        sys.path.insert(0, parser_dir)
+        loader = importlib.machinery.SourceFileLoader(
+                'asdl', os.path.join(parser_dir, 'asdl.py'))
+        cls.asdl = loader.load_module()
+        cls.mod = cls.asdl.parse(os.path.join(parser_dir, 'Python.asdl'))
+        cls.assertTrue(cls.asdl.check(cls.mod), 'Module validation failed')
+
+    @classmethod
+    def tearDownClass(cls):
+        del sys.path[0]
+
+    def setUp(self):
+        # alias stuff from the class, for convenience
+        self.asdl = TestAsdlParser.asdl
+        self.mod = TestAsdlParser.mod
+        self.types = self.mod.types
+
+    def test_module(self):
+        self.assertEqual(self.mod.name, 'Python')
+        self.assertIn('stmt', self.types)
+        self.assertIn('expr', self.types)
+        self.assertIn('mod', self.types)
+
+    def test_definitions(self):
+        defs = self.mod.dfns
+        self.assertIsInstance(defs[0], self.asdl.Type)
+        self.assertIsInstance(defs[0].value, self.asdl.Sum)
+
+        self.assertIsInstance(self.types['withitem'], self.asdl.Product)
+        self.assertIsInstance(self.types['alias'], self.asdl.Product)
+
+    def test_product(self):
+        alias = self.types['alias']
+        self.assertEqual(
+            str(alias),
+            'Product([Field(identifier, name), Field(identifier, asname, opt=True)])')
+
+    def test_attributes(self):
+        stmt = self.types['stmt']
+        self.assertEqual(len(stmt.attributes), 2)
+        self.assertEqual(str(stmt.attributes[0]), 'Field(int, lineno)')
+        self.assertEqual(str(stmt.attributes[1]), 'Field(int, col_offset)')
+
+    def test_constructor_fields(self):
+        ehandler = self.types['excepthandler']
+        self.assertEqual(len(ehandler.types), 1)
+        self.assertEqual(len(ehandler.attributes), 2)
+
+        cons = ehandler.types[0]
+        self.assertIsInstance(cons, self.asdl.Constructor)
+        self.assertEqual(len(cons.fields), 3)
+
+        f0 = cons.fields[0]
+        self.assertEqual(f0.type, 'expr')
+        self.assertEqual(f0.name, 'type')
+        self.assertTrue(f0.opt)
+
+        f1 = cons.fields[1]
+        self.assertEqual(f1.type, 'identifier')
+        self.assertEqual(f1.name, 'name')
+        self.assertTrue(f1.opt)
+
+        f2 = cons.fields[2]
+        self.assertEqual(f2.type, 'stmt')
+        self.assertEqual(f2.name, 'body')
+        self.assertFalse(f2.opt)
+        self.assertTrue(f2.seq)
+
+    def test_visitor(self):
+        class CustomVisitor(self.asdl.VisitorBase):
+            def __init__(self):
+                super().__init__()
+                self.names_with_seq = []
+
+            def visitModule(self, mod):
+                for dfn in mod.dfns:
+                    self.visit(dfn)
+
+            def visitType(self, type):
+                self.visit(type.value)
+
+            def visitSum(self, sum):
+                for t in sum.types:
+                    self.visit(t)
+
+            def visitConstructor(self, cons):
+                for f in cons.fields:
+                    if f.seq:
+                        self.names_with_seq.append(cons.name)
+
+        v = CustomVisitor()
+        v.visit(self.types['mod'])
+        self.assertEqual(v.names_with_seq, ['Module', 'Interactive', 'Suite'])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Lib/test/test_asyncore.py b/Lib/test/test_asyncore.py
index 084d247..b04aa1d 100644
--- a/Lib/test/test_asyncore.py
+++ b/Lib/test/test_asyncore.py
@@ -316,23 +316,6 @@
                     'warning: unhandled connect event']
         self.assertEqual(lines, expected)
 
-    def test_issue_8594(self):
-        # XXX - this test is supposed to be removed in next major Python
-        # version
-        d = asyncore.dispatcher(socket.socket())
-        # make sure the error message no longer refers to the socket
-        # object but the dispatcher instance instead
-        self.assertRaisesRegex(AttributeError, 'dispatcher instance',
-                               getattr, d, 'foo')
-        # cheap inheritance with the underlying socket is supposed
-        # to still work but a DeprecationWarning is expected
-        with warnings.catch_warnings(record=True) as w:
-            warnings.simplefilter("always")
-            family = d.family
-            self.assertEqual(family, socket.AF_INET)
-            self.assertEqual(len(w), 1)
-            self.assertTrue(issubclass(w[0].category, DeprecationWarning))
-
     def test_strerror(self):
         # refers to bug #8573
         err = asyncore._strerror(errno.EPERM)
@@ -349,9 +332,8 @@
     def handle_connect(self):
         pass
 
-class DispatcherWithSendTests(unittest.TestCase):
-    usepoll = False
 
+class DispatcherWithSendTests(unittest.TestCase):
     def setUp(self):
         pass
 
@@ -401,10 +383,6 @@
                 self.fail("join() timed out")
 
 
-
-class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
-    usepoll = True
-
 @unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
                      'asyncore.file_wrapper required')
 class FileWrapperTest(unittest.TestCase):
diff --git a/Lib/test/test_augassign.py b/Lib/test/test_augassign.py
index 9a59c58..19b7687 100644
--- a/Lib/test/test_augassign.py
+++ b/Lib/test/test_augassign.py
@@ -136,6 +136,14 @@
                 output.append("__imul__ called")
                 return self
 
+            def __matmul__(self, val):
+                output.append("__matmul__ called")
+            def __rmatmul__(self, val):
+                output.append("__rmatmul__ called")
+            def __imatmul__(self, val):
+                output.append("__imatmul__ called")
+                return self
+
             def __div__(self, val):
                 output.append("__div__ called")
             def __rdiv__(self, val):
@@ -233,6 +241,10 @@
         1 * x
         x *= 1
 
+        x @ 1
+        1 @ x
+        x @= 1
+
         x / 1
         1 / x
         x /= 1
@@ -279,6 +291,9 @@
 __mul__ called
 __rmul__ called
 __imul__ called
+__matmul__ called
+__rmatmul__ called
+__imatmul__ called
 __truediv__ called
 __rtruediv__ called
 __itruediv__ called
diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py
index b561a6f..018ac8d 100644
--- a/Lib/test/test_builtin.py
+++ b/Lib/test/test_builtin.py
@@ -1092,7 +1092,7 @@
         self.assertAlmostEqual(pow(-1, 0.5), 1j)
         self.assertAlmostEqual(pow(-1, 1/3), 0.5 + 0.8660254037844386j)
 
-        self.assertRaises(TypeError, pow, -1, -2, 3)
+        self.assertRaises(ValueError, pow, -1, -2, 3)
         self.assertRaises(ValueError, pow, 1, 2, 0)
 
         self.assertRaises(TypeError, pow)
diff --git a/Lib/test/test_capi.py b/Lib/test/test_capi.py
index ba7c38d..ba7f2c4 100644
--- a/Lib/test/test_capi.py
+++ b/Lib/test/test_capi.py
@@ -150,6 +150,23 @@
         self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
             "($module, /, parameter)")
 
+    def test_c_type_with_matrix_multiplication(self):
+        M = _testcapi.matmulType
+        m1 = M()
+        m2 = M()
+        self.assertEqual(m1 @ m2, ("matmul", m1, m2))
+        self.assertEqual(m1 @ 42, ("matmul", m1, 42))
+        self.assertEqual(42 @ m1, ("matmul", 42, m1))
+        o = m1
+        o @= m2
+        self.assertEqual(o, ("imatmul", m1, m2))
+        o = m1
+        o @= 42
+        self.assertEqual(o, ("imatmul", m1, 42))
+        o = 42
+        o @= m1
+        self.assertEqual(o, ("matmul", 42, m1))
+
 
 @unittest.skipUnless(threading, 'Threading required for this test.')
 class TestPendingCalls(unittest.TestCase):
@@ -319,34 +336,38 @@
             print()
             print(out)
             print(err)
+        expected_errors = sys.__stdout__.errors
         expected_stdin_encoding = sys.__stdin__.encoding
         expected_pipe_encoding = self._get_default_pipe_encoding()
         expected_output = os.linesep.join([
         "--- Use defaults ---",
         "Expected encoding: default",
         "Expected errors: default",
-        "stdin: {0}:strict",
-        "stdout: {1}:strict",
-        "stderr: {1}:backslashreplace",
+        "stdin: {in_encoding}:{errors}",
+        "stdout: {out_encoding}:{errors}",
+        "stderr: {out_encoding}:backslashreplace",
         "--- Set errors only ---",
         "Expected encoding: default",
-        "Expected errors: surrogateescape",
-        "stdin: {0}:surrogateescape",
-        "stdout: {1}:surrogateescape",
-        "stderr: {1}:backslashreplace",
+        "Expected errors: ignore",
+        "stdin: {in_encoding}:ignore",
+        "stdout: {out_encoding}:ignore",
+        "stderr: {out_encoding}:backslashreplace",
         "--- Set encoding only ---",
         "Expected encoding: latin-1",
         "Expected errors: default",
-        "stdin: latin-1:strict",
-        "stdout: latin-1:strict",
+        "stdin: latin-1:{errors}",
+        "stdout: latin-1:{errors}",
         "stderr: latin-1:backslashreplace",
         "--- Set encoding and errors ---",
         "Expected encoding: latin-1",
-        "Expected errors: surrogateescape",
-        "stdin: latin-1:surrogateescape",
-        "stdout: latin-1:surrogateescape",
-        "stderr: latin-1:backslashreplace"]).format(expected_stdin_encoding,
-                                                    expected_pipe_encoding)
+        "Expected errors: replace",
+        "stdin: latin-1:replace",
+        "stdout: latin-1:replace",
+        "stderr: latin-1:backslashreplace"])
+        expected_output = expected_output.format(
+                                in_encoding=expected_stdin_encoding,
+                                out_encoding=expected_pipe_encoding,
+                                errors=expected_errors)
         # This is useful if we ever trip over odd platform behaviour
         self.maxDiff = None
         self.assertEqual(out.strip(), expected_output)
diff --git a/Lib/test/test_codeccallbacks.py b/Lib/test/test_codeccallbacks.py
index 84804bb..a1ce9cf 100644
--- a/Lib/test/test_codeccallbacks.py
+++ b/Lib/test/test_codeccallbacks.py
@@ -819,7 +819,7 @@
             def __getitem__(self, key):
                 raise ValueError
         #self.assertRaises(ValueError, "\xff".translate, D())
-        self.assertRaises(TypeError, "\xff".translate, {0xff: sys.maxunicode+1})
+        self.assertRaises(ValueError, "\xff".translate, {0xff: sys.maxunicode+1})
         self.assertRaises(TypeError, "\xff".translate, {0xff: ()})
 
     def test_bug828737(self):
diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py
index 9b62d5b..f59c37d 100644
--- a/Lib/test/test_codecs.py
+++ b/Lib/test/test_codecs.py
@@ -890,10 +890,6 @@
                          "\U00010fff\uD800")
         self.assertTrue(codecs.lookup_error("surrogatepass"))
 
-    def test_readline(self):
-        self.skipTest("issue #20571: code page 65001 codec does not "
-                      "support partial decoder yet")
-
 
 class UTF7Test(ReadTest, unittest.TestCase):
     encoding = "utf-7"
@@ -1600,6 +1596,12 @@
         self.assertEqual(codecs.decode(b'abc'), 'abc')
         self.assertRaises(UnicodeDecodeError, codecs.decode, b'\xff', 'ascii')
 
+        # test keywords
+        self.assertEqual(codecs.decode(obj=b'\xe4\xf6\xfc', encoding='latin-1'),
+                         '\xe4\xf6\xfc')
+        self.assertEqual(codecs.decode(b'[\xff]', 'ascii', errors='ignore'),
+                         '[]')
+
     def test_encode(self):
         self.assertEqual(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
                          b'\xe4\xf6\xfc')
@@ -1608,6 +1610,12 @@
         self.assertEqual(codecs.encode('abc'), b'abc')
         self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
 
+        # test keywords
+        self.assertEqual(codecs.encode(obj='\xe4\xf6\xfc', encoding='latin-1'),
+                         b'\xe4\xf6\xfc')
+        self.assertEqual(codecs.encode('[\xff]', 'ascii', errors='ignore'),
+                         b'[]')
+
     def test_register(self):
         self.assertRaises(TypeError, codecs.register)
         self.assertRaises(TypeError, codecs.register, 42)
@@ -2750,15 +2758,15 @@
         self.assertRaisesRegex(UnicodeEncodeError, 'cp932',
             codecs.code_page_encode, 932, '\xff')
         self.assertRaisesRegex(UnicodeDecodeError, 'cp932',
-            codecs.code_page_decode, 932, b'\x81\x00')
+            codecs.code_page_decode, 932, b'\x81\x00', 'strict', True)
         self.assertRaisesRegex(UnicodeDecodeError, 'CP_UTF8',
-            codecs.code_page_decode, self.CP_UTF8, b'\xff')
+            codecs.code_page_decode, self.CP_UTF8, b'\xff', 'strict', True)
 
     def check_decode(self, cp, tests):
         for raw, errors, expected in tests:
             if expected is not None:
                 try:
-                    decoded = codecs.code_page_decode(cp, raw, errors)
+                    decoded = codecs.code_page_decode(cp, raw, errors, True)
                 except UnicodeDecodeError as err:
                     self.fail('Unable to decode %a from "cp%s" with '
                               'errors=%r: %s' % (raw, cp, errors, err))
@@ -2770,7 +2778,7 @@
                 self.assertLessEqual(decoded[1], len(raw))
             else:
                 self.assertRaises(UnicodeDecodeError,
-                    codecs.code_page_decode, cp, raw, errors)
+                    codecs.code_page_decode, cp, raw, errors, True)
 
     def check_encode(self, cp, tests):
         for text, errors, expected in tests:
@@ -2799,6 +2807,9 @@
             ('[\u20ac]', 'replace', b'[?]'),
             ('[\xff]', 'backslashreplace', b'[\\xff]'),
             ('[\xff]', 'xmlcharrefreplace', b'[&#255;]'),
+            ('\udcff', 'strict', None),
+            ('[\udcff]', 'surrogateescape', b'[\xff]'),
+            ('[\udcff]', 'surrogatepass', None),
         ))
         self.check_decode(932, (
             (b'abc', 'strict', 'abc'),
@@ -2808,6 +2819,7 @@
             (b'[\xff]', 'ignore', '[]'),
             (b'[\xff]', 'replace', '[\ufffd]'),
             (b'[\xff]', 'surrogateescape', '[\udcff]'),
+            (b'[\xff]', 'surrogatepass', None),
             (b'\x81\x00abc', 'strict', None),
             (b'\x81\x00abc', 'ignore', '\x00abc'),
             (b'\x81\x00abc', 'replace', '\ufffd\x00abc'),
@@ -2818,9 +2830,12 @@
             ('abc', 'strict', b'abc'),
             ('\xe9\u20ac', 'strict',  b'\xe9\x80'),
             ('\xff', 'strict', b'\xff'),
+            # test error handlers
             ('\u0141', 'strict', None),
             ('\u0141', 'ignore', b''),
             ('\u0141', 'replace', b'L'),
+            ('\udc98', 'surrogateescape', b'\x98'),
+            ('\udc98', 'surrogatepass', None),
         ))
         self.check_decode(1252, (
             (b'abc', 'strict', 'abc'),
diff --git a/Lib/test/test_collections.py b/Lib/test/test_collections.py
index ee28a6c..7e14980 100644
--- a/Lib/test/test_collections.py
+++ b/Lib/test/test_collections.py
@@ -1187,6 +1187,21 @@
         self.assertEqual(list(od.items()), pairs)
         self.assertEqual(list(reversed(od)),
                          [t[0] for t in reversed(pairs)])
+        self.assertEqual(list(reversed(od.keys())),
+                         [t[0] for t in reversed(pairs)])
+        self.assertEqual(list(reversed(od.values())),
+                         [t[1] for t in reversed(pairs)])
+        self.assertEqual(list(reversed(od.items())), list(reversed(pairs)))
+
+    def test_detect_deletion_during_iteration(self):
+        od = OrderedDict.fromkeys('abc')
+        it = iter(od)
+        key = next(it)
+        del od[key]
+        with self.assertRaises(Exception):
+            # Note, the exact exception raised is not guaranteed
+            # The only guarantee that the next() will not succeed
+            next(it)
 
     def test_popitem(self):
         pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
diff --git a/Lib/test/test_concurrent_futures.py b/Lib/test/test_concurrent_futures.py
index c74b2ca..55254b5 100644
--- a/Lib/test/test_concurrent_futures.py
+++ b/Lib/test/test_concurrent_futures.py
@@ -425,6 +425,13 @@
         self.assertTrue(collected,
                         "Stale reference not collected within timeout.")
 
+    def test_max_workers_negative(self):
+        for number in (0, -1):
+            with self.assertRaisesRegexp(ValueError,
+                                         "max_workers must be greater "
+                                         "than 0"):
+                self.executor_type(max_workers=number)
+
 
 class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
     def test_map_submits_without_iteration(self):
diff --git a/Lib/test/test_csv.py b/Lib/test/test_csv.py
index 7e2485f..7c31ac7 100644
--- a/Lib/test/test_csv.py
+++ b/Lib/test/test_csv.py
@@ -575,6 +575,16 @@
             fileobj.readline() # header
             self.assertEqual(fileobj.read(), "10,,abc\r\n")
 
+    def test_write_multiple_dict_rows(self):
+        fileobj = StringIO()
+        writer = csv.DictWriter(fileobj, fieldnames=["f1", "f2", "f3"])
+        writer.writeheader()
+        self.assertEqual(fileobj.getvalue(), "f1,f2,f3\r\n")
+        writer.writerows([{"f1": 1, "f2": "abc", "f3": "f"},
+                          {"f1": 2, "f2": 5, "f3": "xyz"}])
+        self.assertEqual(fileobj.getvalue(),
+                         "f1,f2,f3\r\n1,abc,f\r\n2,5,xyz\r\n")
+
     def test_write_no_fields(self):
         fileobj = StringIO()
         self.assertRaises(TypeError, csv.DictWriter, fileobj)
diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py
index 4b27907..8072899 100644
--- a/Lib/test/test_decimal.py
+++ b/Lib/test/test_decimal.py
@@ -33,12 +33,13 @@
 import numbers
 import locale
 from test.support import (run_unittest, run_doctest, is_resource_enabled,
-                          requires_IEEE_754)
+                          requires_IEEE_754, requires_docstrings)
 from test.support import (check_warnings, import_fresh_module, TestFailed,
                           run_with_locale, cpython_only)
 import random
 import time
 import warnings
+import inspect
 try:
     import threading
 except ImportError:
@@ -4448,18 +4449,6 @@
 class PyFunctionality(unittest.TestCase):
     """Extra functionality in decimal.py"""
 
-    def test_py_quantize_watchexp(self):
-        # watchexp functionality
-        Decimal = P.Decimal
-        localcontext = P.localcontext
-
-        with localcontext() as c:
-            c.prec = 1
-            c.Emax = 1
-            c.Emin = -1
-            x = Decimal(99999).quantize(Decimal("1e3"), watchexp=False)
-            self.assertEqual(x, Decimal('1.00E+5'))
-
     def test_py_alternate_formatting(self):
         # triples giving a format, a Decimal, and the expected result
         Decimal = P.Decimal
@@ -5402,6 +5391,143 @@
             y = Decimal(10**(9*25)).__sizeof__()
             self.assertEqual(y, x+4)
 
+@requires_docstrings
+@unittest.skipUnless(C, "test requires C version")
+class SignatureTest(unittest.TestCase):
+    """Function signatures"""
+
+    def test_inspect_module(self):
+        for attr in dir(P):
+            if attr.startswith('_'):
+                continue
+            p_func = getattr(P, attr)
+            c_func = getattr(C, attr)
+            if (attr == 'Decimal' or attr == 'Context' or
+                inspect.isfunction(p_func)):
+                p_sig = inspect.signature(p_func)
+                c_sig = inspect.signature(c_func)
+
+                # parameter names:
+                c_names = list(c_sig.parameters.keys())
+                p_names = [x for x in p_sig.parameters.keys() if not
+                           x.startswith('_')]
+
+                self.assertEqual(c_names, p_names,
+                                 msg="parameter name mismatch in %s" % p_func)
+
+                c_kind = [x.kind for x in c_sig.parameters.values()]
+                p_kind = [x[1].kind for x in p_sig.parameters.items() if not
+                          x[0].startswith('_')]
+
+                # parameters:
+                if attr != 'setcontext':
+                    self.assertEqual(c_kind, p_kind,
+                                     msg="parameter kind mismatch in %s" % p_func)
+
+    def test_inspect_types(self):
+
+        POS = inspect._ParameterKind.POSITIONAL_ONLY
+        POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
+
+        # Type heuristic (type annotations would help!):
+        pdict = {C: {'other': C.Decimal(1),
+                     'third': C.Decimal(1),
+                     'x': C.Decimal(1),
+                     'y': C.Decimal(1),
+                     'z': C.Decimal(1),
+                     'a': C.Decimal(1),
+                     'b': C.Decimal(1),
+                     'c': C.Decimal(1),
+                     'exp': C.Decimal(1),
+                     'modulo': C.Decimal(1),
+                     'num': "1",
+                     'f': 1.0,
+                     'rounding': C.ROUND_HALF_UP,
+                     'context': C.getcontext()},
+                 P: {'other': P.Decimal(1),
+                     'third': P.Decimal(1),
+                     'a': P.Decimal(1),
+                     'b': P.Decimal(1),
+                     'c': P.Decimal(1),
+                     'exp': P.Decimal(1),
+                     'modulo': P.Decimal(1),
+                     'num': "1",
+                     'f': 1.0,
+                     'rounding': P.ROUND_HALF_UP,
+                     'context': P.getcontext()}}
+
+        def mkargs(module, sig):
+            args = []
+            kwargs = {}
+            for name, param in sig.parameters.items():
+                if name == 'self': continue
+                if param.kind == POS:
+                    args.append(pdict[module][name])
+                elif param.kind == POS_KWD:
+                    kwargs[name] = pdict[module][name]
+                else:
+                    raise TestFailed("unexpected parameter kind")
+            return args, kwargs
+
+        def tr(s):
+            """The C Context docstrings use 'x' in order to prevent confusion
+               with the article 'a' in the descriptions."""
+            if s == 'x': return 'a'
+            if s == 'y': return 'b'
+            if s == 'z': return 'c'
+            return s
+
+        def doit(ty):
+            p_type = getattr(P, ty)
+            c_type = getattr(C, ty)
+            for attr in dir(p_type):
+                if attr.startswith('_'):
+                    continue
+                p_func = getattr(p_type, attr)
+                c_func = getattr(c_type, attr)
+                if inspect.isfunction(p_func):
+                    p_sig = inspect.signature(p_func)
+                    c_sig = inspect.signature(c_func)
+
+                    # parameter names:
+                    p_names = list(p_sig.parameters.keys())
+                    c_names = [tr(x) for x in c_sig.parameters.keys()]
+
+                    self.assertEqual(c_names, p_names,
+                                     msg="parameter name mismatch in %s" % p_func)
+
+                    p_kind = [x.kind for x in p_sig.parameters.values()]
+                    c_kind = [x.kind for x in c_sig.parameters.values()]
+
+                    # 'self' parameter:
+                    self.assertIs(p_kind[0], POS_KWD)
+                    self.assertIs(c_kind[0], POS)
+
+                    # remaining parameters:
+                    if ty == 'Decimal':
+                        self.assertEqual(c_kind[1:], p_kind[1:],
+                                         msg="parameter kind mismatch in %s" % p_func)
+                    else: # Context methods are positional only in the C version.
+                        self.assertEqual(len(c_kind), len(p_kind),
+                                         msg="parameter kind mismatch in %s" % p_func)
+
+                    # Run the function:
+                    args, kwds = mkargs(C, c_sig)
+                    try:
+                        getattr(c_type(9), attr)(*args, **kwds)
+                    except Exception as err:
+                        raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
+
+                    args, kwds = mkargs(P, p_sig)
+                    try:
+                        getattr(p_type(9), attr)(*args, **kwds)
+                    except Exception as err:
+                        raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
+
+        doit('Decimal')
+        doit('Context')
+
+
 all_tests = [
   CExplicitConstructionTest, PyExplicitConstructionTest,
   CImplicitConstructionTest, PyImplicitConstructionTest,
@@ -5427,6 +5553,7 @@
     all_tests = all_tests[1::2]
 else:
     all_tests.insert(0, CheckAttributes)
+    all_tests.insert(1, SignatureTest)
 
 
 def test_main(arith=False, verbose=None, todo_tests=None, debug=None):
diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py
index 8bb7d6a..e65edb2 100644
--- a/Lib/test/test_descr.py
+++ b/Lib/test/test_descr.py
@@ -4160,6 +4160,7 @@
                 ('__add__',      'x + y',                   'x += y'),
                 ('__sub__',      'x - y',                   'x -= y'),
                 ('__mul__',      'x * y',                   'x *= y'),
+                ('__matmul__',   'x @ y',                   'x @= y'),
                 ('__truediv__',  'operator.truediv(x, y)',  None),
                 ('__floordiv__', 'operator.floordiv(x, y)', None),
                 ('__div__',      'x / y',                   'x /= y'),
diff --git a/Lib/test/test_doctest.py b/Lib/test/test_doctest.py
index 56193e8..c62e7ca 100644
--- a/Lib/test/test_doctest.py
+++ b/Lib/test/test_doctest.py
@@ -2096,22 +2096,9 @@
          >>> suite.run(unittest.TestResult())
          <unittest.result.TestResult run=0 errors=0 failures=0>
 
-       However, if DocTestSuite finds no docstrings, it raises an error:
+       The module need not contain any docstrings either:
 
-         >>> try:
-         ...     doctest.DocTestSuite('test.sample_doctest_no_docstrings')
-         ... except ValueError as e:
-         ...     error = e
-
-         >>> print(error.args[1])
-         has no docstrings
-
-       You can prevent this error by passing a DocTestFinder instance with
-       the `exclude_empty` keyword argument set to False:
-
-         >>> finder = doctest.DocTestFinder(exclude_empty=False)
-         >>> suite = doctest.DocTestSuite('test.sample_doctest_no_docstrings',
-         ...                              test_finder=finder)
+         >>> suite = doctest.DocTestSuite('test.sample_doctest_no_docstrings')
          >>> suite.run(unittest.TestResult())
          <unittest.result.TestResult run=0 errors=0 failures=0>
 
@@ -2121,6 +2108,22 @@
          >>> suite.run(unittest.TestResult())
          <unittest.result.TestResult run=9 errors=0 failures=4>
 
+       We can also provide a DocTestFinder:
+
+         >>> finder = doctest.DocTestFinder()
+         >>> suite = doctest.DocTestSuite('test.sample_doctest',
+         ...                          test_finder=finder)
+         >>> suite.run(unittest.TestResult())
+         <unittest.result.TestResult run=9 errors=0 failures=4>
+
+       The DocTestFinder need not return any tests:
+
+         >>> finder = doctest.DocTestFinder()
+         >>> suite = doctest.DocTestSuite('test.sample_doctest_no_docstrings',
+         ...                          test_finder=finder)
+         >>> suite.run(unittest.TestResult())
+         <unittest.result.TestResult run=0 errors=0 failures=0>
+
        We can supply global variables.  If we pass globs, they will be
        used instead of the module globals.  Here we'll pass an empty
        globals, triggering an extra error:
@@ -2168,7 +2171,7 @@
          >>> test.test_doctest.sillySetup
          Traceback (most recent call last):
          ...
-         AttributeError: 'module' object has no attribute 'sillySetup'
+         AttributeError: module 'test.test_doctest' has no attribute 'sillySetup'
 
        The setUp and tearDown funtions are passed test objects. Here
        we'll use the setUp function to supply the missing variable y:
@@ -2314,7 +2317,7 @@
          >>> test.test_doctest.sillySetup
          Traceback (most recent call last):
          ...
-         AttributeError: 'module' object has no attribute 'sillySetup'
+         AttributeError: module 'test.test_doctest' has no attribute 'sillySetup'
 
        The setUp and tearDown funtions are passed test objects.
        Here, we'll use a setUp function to set the favorite color in
@@ -2897,7 +2900,7 @@
 
 def test_main():
     # Check the doctest cases in doctest itself:
-    support.run_doctest(doctest, verbosity=True)
+    ret = support.run_doctest(doctest, verbosity=True)
     # Check the doctest cases defined here:
     from test import test_doctest
     support.run_doctest(test_doctest, verbosity=True)
diff --git a/Lib/test/test_docxmlrpc.py b/Lib/test/test_docxmlrpc.py
index cb6366c..eb97516 100644
--- a/Lib/test/test_docxmlrpc.py
+++ b/Lib/test/test_docxmlrpc.py
@@ -87,10 +87,11 @@
         threading.Thread(target=server, args=(self.evt, 1)).start()
 
         # wait for port to be assigned
-        n = 1000
-        while n > 0 and PORT is None:
-            time.sleep(0.001)
-            n -= 1
+        deadline = time.monotonic() + 10.0
+        while PORT is None:
+            time.sleep(0.010)
+            if time.monotonic() > deadline:
+                break
 
         self.client = http.client.HTTPConnection("localhost:%d" % PORT)
 
diff --git a/Lib/test/test_fork1.py b/Lib/test/test_fork1.py
index e0626df..8bcbd46 100644
--- a/Lib/test/test_fork1.py
+++ b/Lib/test/test_fork1.py
@@ -18,13 +18,14 @@
 
 class ForkTest(ForkWait):
     def wait_impl(self, cpid):
-        for i in range(10):
+        deadline = time.monotonic() + 10.0
+        while time.monotonic() <= deadline:
             # waitpid() shouldn't hang, but some of the buildbots seem to hang
             # in the forking tests.  This is an attempt to fix the problem.
             spid, status = os.waitpid(cpid, os.WNOHANG)
             if spid == cpid:
                 break
-            time.sleep(1.0)
+            time.sleep(0.1)
 
         self.assertEqual(spid, cpid)
         self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
diff --git a/Lib/test/test_format.py b/Lib/test/test_format.py
index fc71e48..631bf35 100644
--- a/Lib/test/test_format.py
+++ b/Lib/test/test_format.py
@@ -142,8 +142,6 @@
         testformat("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
         # same, except no 0 flag
         testformat("%#+27.23X", big, " +0X001234567890ABCDEF12345")
-        with self.assertWarns(DeprecationWarning):
-            testformat("%x", float(big), "123456_______________", 6)
         big = 0o12345670123456701234567012345670  # 32 octal digits
         testformat("%o", big, "12345670123456701234567012345670")
         testformat("%o", -big, "-12345670123456701234567012345670")
@@ -183,8 +181,6 @@
         testformat("%034.33o", big, "0012345670123456701234567012345670")
         # base marker shouldn't change that
         testformat("%0#34.33o", big, "0o012345670123456701234567012345670")
-        with self.assertWarns(DeprecationWarning):
-            testformat("%o", float(big), "123456__________________________", 6)
         # Some small ints, in both Python int and flavors).
         testformat("%d", 42, "42")
         testformat("%d", -42, "-42")
@@ -195,8 +191,6 @@
         testformat("%#x", 1, "0x1")
         testformat("%#X", 1, "0X1")
         testformat("%#X", 1, "0X1")
-        with self.assertWarns(DeprecationWarning):
-            testformat("%#x", 1.0, "0x1")
         testformat("%#o", 1, "0o1")
         testformat("%#o", 1, "0o1")
         testformat("%#o", 0, "0o0")
@@ -213,14 +207,10 @@
         testformat("%x", -0x42, "-42")
         testformat("%x", 0x42, "42")
         testformat("%x", -0x42, "-42")
-        with self.assertWarns(DeprecationWarning):
-            testformat("%x", float(0x42), "42")
         testformat("%o", 0o42, "42")
         testformat("%o", -0o42, "-42")
         testformat("%o", 0o42, "42")
         testformat("%o", -0o42, "-42")
-        with self.assertWarns(DeprecationWarning):
-            testformat("%o", float(0o42), "42")
         testformat("%r", "\u0378", "'\\u0378'")  # non printable
         testformat("%a", "\u0378", "'\\u0378'")  # non printable
         testformat("%r", "\u0374", "'\u0374'")   # printable
diff --git a/Lib/test/test_fractions.py b/Lib/test/test_fractions.py
index 3336532..e86d5ce 100644
--- a/Lib/test/test_fractions.py
+++ b/Lib/test/test_fractions.py
@@ -330,7 +330,6 @@
         self.assertTypedEquals(F(-2, 10), round(F(-15, 100), 1))
         self.assertTypedEquals(F(-2, 10), round(F(-25, 100), 1))
 
-
     def testArithmetic(self):
         self.assertEqual(F(1, 2), F(1, 10) + F(2, 5))
         self.assertEqual(F(-3, 10), F(1, 10) - F(2, 5))
@@ -402,6 +401,8 @@
         self.assertTypedEquals(2.0 , 4 ** F(1, 2))
         self.assertTypedEquals(0.25, 2.0 ** F(-2, 1))
         self.assertTypedEquals(1.0 + 0j, (1.0 + 0j) ** F(1, 10))
+        self.assertRaises(ZeroDivisionError, operator.pow,
+                          F(0, 1), -2)
 
     def testMixingWithDecimal(self):
         # Decimal refuses mixed arithmetic (but not mixed comparisons)
diff --git a/Lib/test/test_grammar.py b/Lib/test/test_grammar.py
index bba8820..a7bad2d 100644
--- a/Lib/test/test_grammar.py
+++ b/Lib/test/test_grammar.py
@@ -985,6 +985,20 @@
         self.assertFalse((False is 2) is 3)
         self.assertFalse(False is 2 is 3)
 
+    def test_matrix_mul(self):
+        # This is not intended to be a comprehensive test, rather just to be few
+        # samples of the @ operator in test_grammar.py.
+        class M:
+            def __matmul__(self, o):
+                return 4
+            def __imatmul__(self, o):
+                self.other = o
+                return self
+        m = M()
+        self.assertEqual(m @ m, 4)
+        m @= 42
+        self.assertEqual(m.other, 42)
+
 
 def test_main():
     run_unittest(TokenTests, GrammarTests)
diff --git a/Lib/test/test_heapq.py b/Lib/test/test_heapq.py
index b5a2fd8..59c7029 100644
--- a/Lib/test/test_heapq.py
+++ b/Lib/test/test_heapq.py
@@ -13,7 +13,7 @@
 # _heapq.nlargest/nsmallest are saved in heapq._nlargest/_smallest when
 # _heapq is imported, so check them there
 func_names = ['heapify', 'heappop', 'heappush', 'heappushpop',
-              'heapreplace', '_nlargest', '_nsmallest']
+              'heapreplace', '_heapreplace_max']
 
 class TestModules(TestCase):
     def test_py_functions(self):
diff --git a/Lib/test/test_httplib.py b/Lib/test/test_httplib.py
index 22f7329..1a6d8d0 100644
--- a/Lib/test/test_httplib.py
+++ b/Lib/test/test_httplib.py
@@ -18,6 +18,26 @@
 # Root cert file (CA) for svn.python.org's cert
 CACERT_svn_python_org = os.path.join(here, 'https_svn_python_org_root.pem')
 
+# constants for testing chunked encoding
+chunked_start = (
+    'HTTP/1.1 200 OK\r\n'
+    'Transfer-Encoding: chunked\r\n\r\n'
+    'a\r\n'
+    'hello worl\r\n'
+    '3\r\n'
+    'd! \r\n'
+    '8\r\n'
+    'and now \r\n'
+    '22\r\n'
+    'for something completely different\r\n'
+)
+chunked_expected = b'hello world! and now for something completely different'
+chunk_extension = ";foo=bar"
+last_chunk = "0\r\n"
+last_chunk_extended = "0" + chunk_extension + "\r\n"
+trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
+chunked_end = "\r\n"
+
 HOST = support.HOST
 
 class FakeSocket:
@@ -38,7 +58,10 @@
     def makefile(self, mode, bufsize=None):
         if mode != 'r' and mode != 'rb':
             raise client.UnimplementedFileMode()
-        return self.fileclass(self.text)
+        # keep the file around so we can check how much was read from it
+        self.file = self.fileclass(self.text)
+        self.file.close = lambda:None #nerf close ()
+        return self.file
 
     def close(self):
         pass
@@ -435,20 +458,8 @@
             conn.request('POST', 'test', conn)
 
     def test_chunked(self):
-        chunked_start = (
-            'HTTP/1.1 200 OK\r\n'
-            'Transfer-Encoding: chunked\r\n\r\n'
-            'a\r\n'
-            'hello worl\r\n'
-            '3\r\n'
-            'd! \r\n'
-            '8\r\n'
-            'and now \r\n'
-            '22\r\n'
-            'for something completely different\r\n'
-        )
-        expected = b'hello world! and now for something completely different'
-        sock = FakeSocket(chunked_start + '0\r\n')
+        expected = chunked_expected
+        sock = FakeSocket(chunked_start + last_chunk + chunked_end)
         resp = client.HTTPResponse(sock, method="GET")
         resp.begin()
         self.assertEqual(resp.read(), expected)
@@ -456,7 +467,7 @@
 
         # Various read sizes
         for n in range(1, 12):
-            sock = FakeSocket(chunked_start + '0\r\n')
+            sock = FakeSocket(chunked_start + last_chunk + chunked_end)
             resp = client.HTTPResponse(sock, method="GET")
             resp.begin()
             self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
@@ -479,23 +490,12 @@
                 resp.close()
 
     def test_readinto_chunked(self):
-        chunked_start = (
-            'HTTP/1.1 200 OK\r\n'
-            'Transfer-Encoding: chunked\r\n\r\n'
-            'a\r\n'
-            'hello worl\r\n'
-            '3\r\n'
-            'd! \r\n'
-            '8\r\n'
-            'and now \r\n'
-            '22\r\n'
-            'for something completely different\r\n'
-        )
-        expected = b'hello world! and now for something completely different'
+
+        expected = chunked_expected
         nexpected = len(expected)
         b = bytearray(128)
 
-        sock = FakeSocket(chunked_start + '0\r\n')
+        sock = FakeSocket(chunked_start + last_chunk + chunked_end)
         resp = client.HTTPResponse(sock, method="GET")
         resp.begin()
         n = resp.readinto(b)
@@ -505,7 +505,7 @@
 
         # Various read sizes
         for n in range(1, 12):
-            sock = FakeSocket(chunked_start + '0\r\n')
+            sock = FakeSocket(chunked_start + last_chunk + chunked_end)
             resp = client.HTTPResponse(sock, method="GET")
             resp.begin()
             m = memoryview(b)
@@ -541,7 +541,7 @@
             '1\r\n'
             'd\r\n'
         )
-        sock = FakeSocket(chunked_start + '0\r\n')
+        sock = FakeSocket(chunked_start + last_chunk + chunked_end)
         resp = client.HTTPResponse(sock, method="HEAD")
         resp.begin()
         self.assertEqual(resp.read(), b'')
@@ -561,7 +561,7 @@
             '1\r\n'
             'd\r\n'
         )
-        sock = FakeSocket(chunked_start + '0\r\n')
+        sock = FakeSocket(chunked_start + last_chunk + chunked_end)
         resp = client.HTTPResponse(sock, method="HEAD")
         resp.begin()
         b = bytearray(5)
@@ -636,6 +636,7 @@
             + '0' * 65536 + 'a\r\n'
             'hello world\r\n'
             '0\r\n'
+            '\r\n'
         )
         resp = client.HTTPResponse(FakeSocket(body))
         resp.begin()
@@ -675,6 +676,239 @@
         conn.request('POST', '/', body)
         self.assertGreater(sock.sendall_calls, 1)
 
+    def test_chunked_extension(self):
+        extra = '3;foo=bar\r\n' + 'abc\r\n'
+        expected = chunked_expected + b'abc'
+
+        sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
+        resp = client.HTTPResponse(sock, method="GET")
+        resp.begin()
+        self.assertEqual(resp.read(), expected)
+        resp.close()
+
+    def test_chunked_missing_end(self):
+        """some servers may serve up a short chunked encoding stream"""
+        expected = chunked_expected
+        sock = FakeSocket(chunked_start + last_chunk)  #no terminating crlf
+        resp = client.HTTPResponse(sock, method="GET")
+        resp.begin()
+        self.assertEqual(resp.read(), expected)
+        resp.close()
+
+    def test_chunked_trailers(self):
+        """See that trailers are read and ignored"""
+        expected = chunked_expected
+        sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
+        resp = client.HTTPResponse(sock, method="GET")
+        resp.begin()
+        self.assertEqual(resp.read(), expected)
+        # we should have reached the end of the file
+        self.assertEqual(sock.file.read(100), b"") #we read to the end
+        resp.close()
+
+    def test_chunked_sync(self):
+        """Check that we don't read past the end of the chunked-encoding stream"""
+        expected = chunked_expected
+        extradata = "extradata"
+        sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
+        resp = client.HTTPResponse(sock, method="GET")
+        resp.begin()
+        self.assertEqual(resp.read(), expected)
+        # the file should now have our extradata ready to be read
+        self.assertEqual(sock.file.read(100), extradata.encode("ascii")) #we read to the end
+        resp.close()
+
+    def test_content_length_sync(self):
+        """Check that we don't read past the end of the Content-Length stream"""
+        extradata = "extradata"
+        expected = b"Hello123\r\n"
+        sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello123\r\n' + extradata)
+        resp = client.HTTPResponse(sock, method="GET")
+        resp.begin()
+        self.assertEqual(resp.read(), expected)
+        # the file should now have our extradata ready to be read
+        self.assertEqual(sock.file.read(100), extradata.encode("ascii")) #we read to the end
+        resp.close()
+
+class ExtendedReadTest(TestCase):
+    """
+    Test peek(), read1(), readline()
+    """
+    lines = (
+        'HTTP/1.1 200 OK\r\n'
+        '\r\n'
+        'hello world!\n'
+        'and now \n'
+        'for something completely different\n'
+        'foo'
+        )
+    lines_expected = lines[lines.find('hello'):].encode("ascii")
+    lines_chunked = (
+        'HTTP/1.1 200 OK\r\n'
+        'Transfer-Encoding: chunked\r\n\r\n'
+        'a\r\n'
+        'hello worl\r\n'
+        '3\r\n'
+        'd!\n\r\n'
+        '9\r\n'
+        'and now \n\r\n'
+        '23\r\n'
+        'for something completely different\n\r\n'
+        '3\r\n'
+        'foo\r\n'
+        '0\r\n' # terminating chunk
+        '\r\n'  # end of trailers
+    )
+
+    def setUp(self):
+        sock = FakeSocket(self.lines)
+        resp = client.HTTPResponse(sock, method="GET")
+        resp.begin()
+        resp.fp = io.BufferedReader(resp.fp)
+        self.resp = resp
+
+
+
+    def test_peek(self):
+        resp = self.resp
+        # patch up the buffered peek so that it returns not too much stuff
+        oldpeek = resp.fp.peek
+        def mypeek(n=-1):
+            p = oldpeek(n)
+            if n >= 0:
+                return p[:n]
+            return p[:10]
+        resp.fp.peek = mypeek
+
+        all = []
+        while True:
+            # try a short peek
+            p = resp.peek(3)
+            if p:
+                self.assertGreater(len(p), 0)
+                # then unbounded peek
+                p2 = resp.peek()
+                self.assertGreaterEqual(len(p2), len(p))
+                self.assertTrue(p2.startswith(p))
+                next = resp.read(len(p2))
+                self.assertEqual(next, p2)
+            else:
+                next = resp.read()
+                self.assertFalse(next)
+            all.append(next)
+            if not next:
+                break
+        self.assertEqual(b"".join(all), self.lines_expected)
+
+    def test_readline(self):
+        resp = self.resp
+        self._verify_readline(self.resp.readline, self.lines_expected)
+
+    def _verify_readline(self, readline, expected):
+        all = []
+        while True:
+            # short readlines
+            line = readline(5)
+            if line and line != b"foo":
+                if len(line) < 5:
+                    self.assertTrue(line.endswith(b"\n"))
+            all.append(line)
+            if not line:
+                break
+        self.assertEqual(b"".join(all), expected)
+
+    def test_read1(self):
+        resp = self.resp
+        def r():
+            res = resp.read1(4)
+            self.assertLessEqual(len(res), 4)
+            return res
+        readliner = Readliner(r)
+        self._verify_readline(readliner.readline, self.lines_expected)
+
+    def test_read1_unbounded(self):
+        resp = self.resp
+        all = []
+        while True:
+            data = resp.read1()
+            if not data:
+                break
+            all.append(data)
+        self.assertEqual(b"".join(all), self.lines_expected)
+
+    def test_read1_bounded(self):
+        resp = self.resp
+        all = []
+        while True:
+            data = resp.read1(10)
+            if not data:
+                break
+            self.assertLessEqual(len(data), 10)
+            all.append(data)
+        self.assertEqual(b"".join(all), self.lines_expected)
+
+    def test_read1_0(self):
+        self.assertEqual(self.resp.read1(0), b"")
+
+    def test_peek_0(self):
+        p = self.resp.peek(0)
+        self.assertLessEqual(0, len(p))
+
+class ExtendedReadTestChunked(ExtendedReadTest):
+    """
+    Test peek(), read1(), readline() in chunked mode
+    """
+    lines = (
+        'HTTP/1.1 200 OK\r\n'
+        'Transfer-Encoding: chunked\r\n\r\n'
+        'a\r\n'
+        'hello worl\r\n'
+        '3\r\n'
+        'd!\n\r\n'
+        '9\r\n'
+        'and now \n\r\n'
+        '23\r\n'
+        'for something completely different\n\r\n'
+        '3\r\n'
+        'foo\r\n'
+        '0\r\n' # terminating chunk
+        '\r\n'  # end of trailers
+    )
+
+
+class Readliner:
+    """
+    a simple readline class that uses an arbitrary read function and buffering
+    """
+    def __init__(self, readfunc):
+        self.readfunc = readfunc
+        self.remainder = b""
+
+    def readline(self, limit):
+        data = []
+        datalen = 0
+        read = self.remainder
+        try:
+            while True:
+                idx = read.find(b'\n')
+                if idx != -1:
+                    break
+                if datalen + len(read) >= limit:
+                    idx = limit - datalen - 1
+                # read more data
+                data.append(read)
+                read = self.readfunc()
+                if not read:
+                    idx = 0 #eof condition
+                    break
+            idx += 1
+            data.append(read[:idx])
+            self.remainder = read[idx:]
+            return b"".join(data)
+        except:
+            self.remainder = b"".join(data)
+            raise
+
 class OfflineTest(TestCase):
     def test_responses(self):
         self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
@@ -1019,7 +1253,8 @@
 def test_main(verbose=None):
     support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest,
                          HTTPSTest, RequestBodyTest, SourceAddressTest,
-                         HTTPResponseTest, TunnelTests)
+                         HTTPResponseTest, ExtendedReadTest,
+                         ExtendedReadTestChunked, TunnelTests)
 
 if __name__ == '__main__':
     test_main()
diff --git a/Lib/test/test_imghdr.py b/Lib/test/test_imghdr.py
index 0ad4343..e2a1aca 100644
--- a/Lib/test/test_imghdr.py
+++ b/Lib/test/test_imghdr.py
@@ -16,7 +16,8 @@
     ('python.ras', 'rast'),
     ('python.sgi', 'rgb'),
     ('python.tiff', 'tiff'),
-    ('python.xbm', 'xbm')
+    ('python.xbm', 'xbm'),
+    ('python.webp', 'webp'),
 )
 
 class UnseekableIO(io.FileIO):
diff --git a/Lib/test/test_importlib/builtin/test_finder.py b/Lib/test/test_importlib/builtin/test_finder.py
index 934562f..a2e6e1e 100644
--- a/Lib/test/test_importlib/builtin/test_finder.py
+++ b/Lib/test/test_importlib/builtin/test_finder.py
@@ -1,21 +1,21 @@
 from .. import abc
 from .. import util
-from . import util as builtin_util
 
-frozen_machinery, source_machinery = util.import_importlib('importlib.machinery')
+machinery = util.import_importlib('importlib.machinery')
 
 import sys
 import unittest
 
 
+@unittest.skipIf(util.BUILTINS.good_name is None, 'no reasonable builtin module')
 class FindSpecTests(abc.FinderTests):
 
     """Test find_spec() for built-in modules."""
 
     def test_module(self):
         # Common case.
-        with util.uncache(builtin_util.NAME):
-            found = self.machinery.BuiltinImporter.find_spec(builtin_util.NAME)
+        with util.uncache(util.BUILTINS.good_name):
+            found = self.machinery.BuiltinImporter.find_spec(util.BUILTINS.good_name)
             self.assertTrue(found)
             self.assertEqual(found.origin, 'built-in')
 
@@ -39,23 +39,26 @@
 
     def test_ignore_path(self):
         # The value for 'path' should always trigger a failed import.
-        with util.uncache(builtin_util.NAME):
-            spec = self.machinery.BuiltinImporter.find_spec(builtin_util.NAME,
+        with util.uncache(util.BUILTINS.good_name):
+            spec = self.machinery.BuiltinImporter.find_spec(util.BUILTINS.good_name,
                                                             ['pkg'])
             self.assertIsNone(spec)
 
-Frozen_FindSpecTests, Source_FindSpecTests = util.test_both(FindSpecTests,
-        machinery=[frozen_machinery, source_machinery])
+
+(Frozen_FindSpecTests,
+ Source_FindSpecTests
+ ) = util.test_both(FindSpecTests, machinery=machinery)
 
 
+@unittest.skipIf(util.BUILTINS.good_name is None, 'no reasonable builtin module')
 class FinderTests(abc.FinderTests):
 
     """Test find_module() for built-in modules."""
 
     def test_module(self):
         # Common case.
-        with util.uncache(builtin_util.NAME):
-            found = self.machinery.BuiltinImporter.find_module(builtin_util.NAME)
+        with util.uncache(util.BUILTINS.good_name):
+            found = self.machinery.BuiltinImporter.find_module(util.BUILTINS.good_name)
             self.assertTrue(found)
             self.assertTrue(hasattr(found, 'load_module'))
 
@@ -72,13 +75,15 @@
 
     def test_ignore_path(self):
         # The value for 'path' should always trigger a failed import.
-        with util.uncache(builtin_util.NAME):
-            loader = self.machinery.BuiltinImporter.find_module(builtin_util.NAME,
+        with util.uncache(util.BUILTINS.good_name):
+            loader = self.machinery.BuiltinImporter.find_module(util.BUILTINS.good_name,
                                                             ['pkg'])
             self.assertIsNone(loader)
 
-Frozen_FinderTests, Source_FinderTests = util.test_both(FinderTests,
-        machinery=[frozen_machinery, source_machinery])
+
+(Frozen_FinderTests,
+ Source_FinderTests
+ ) = util.test_both(FinderTests, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/builtin/test_loader.py b/Lib/test/test_importlib/builtin/test_loader.py
index a636f77..eaee025 100644
--- a/Lib/test/test_importlib/builtin/test_loader.py
+++ b/Lib/test/test_importlib/builtin/test_loader.py
@@ -1,14 +1,13 @@
 from .. import abc
 from .. import util
-from . import util as builtin_util
 
-frozen_machinery, source_machinery = util.import_importlib('importlib.machinery')
+machinery = util.import_importlib('importlib.machinery')
 
 import sys
 import types
 import unittest
 
-
+@unittest.skipIf(util.BUILTINS.good_name is None, 'no reasonable builtin module')
 class LoaderTests(abc.LoaderTests):
 
     """Test load_module() for built-in modules."""
@@ -29,8 +28,8 @@
 
     def test_module(self):
         # Common case.
-        with util.uncache(builtin_util.NAME):
-            module = self.load_module(builtin_util.NAME)
+        with util.uncache(util.BUILTINS.good_name):
+            module = self.load_module(util.BUILTINS.good_name)
             self.verify(module)
 
     # Built-in modules cannot be a package.
@@ -41,9 +40,9 @@
 
     def test_module_reuse(self):
         # Test that the same module is used in a reload.
-        with util.uncache(builtin_util.NAME):
-            module1 = self.load_module(builtin_util.NAME)
-            module2 = self.load_module(builtin_util.NAME)
+        with util.uncache(util.BUILTINS.good_name):
+            module1 = self.load_module(util.BUILTINS.good_name)
+            module2 = self.load_module(util.BUILTINS.good_name)
             self.assertIs(module1, module2)
 
     def test_unloadable(self):
@@ -66,40 +65,44 @@
         self.assertEqual(cm.exception.name, module_name)
 
 
-Frozen_LoaderTests, Source_LoaderTests = util.test_both(LoaderTests,
-        machinery=[frozen_machinery, source_machinery])
+(Frozen_LoaderTests,
+ Source_LoaderTests
+ ) = util.test_both(LoaderTests, machinery=machinery)
 
 
+@unittest.skipIf(util.BUILTINS.good_name is None, 'no reasonable builtin module')
 class InspectLoaderTests:
 
     """Tests for InspectLoader methods for BuiltinImporter."""
 
     def test_get_code(self):
         # There is no code object.
-        result = self.machinery.BuiltinImporter.get_code(builtin_util.NAME)
+        result = self.machinery.BuiltinImporter.get_code(util.BUILTINS.good_name)
         self.assertIsNone(result)
 
     def test_get_source(self):
         # There is no source.
-        result = self.machinery.BuiltinImporter.get_source(builtin_util.NAME)
+        result = self.machinery.BuiltinImporter.get_source(util.BUILTINS.good_name)
         self.assertIsNone(result)
 
     def test_is_package(self):
         # Cannot be a package.
-        result = self.machinery.BuiltinImporter.is_package(builtin_util.NAME)
+        result = self.machinery.BuiltinImporter.is_package(util.BUILTINS.good_name)
         self.assertTrue(not result)
 
+    @unittest.skipIf(util.BUILTINS.bad_name is None, 'all modules are built in')
     def test_not_builtin(self):
         # Modules not built-in should raise ImportError.
         for meth_name in ('get_code', 'get_source', 'is_package'):
             method = getattr(self.machinery.BuiltinImporter, meth_name)
         with self.assertRaises(ImportError) as cm:
-            method(builtin_util.BAD_NAME)
-        self.assertRaises(builtin_util.BAD_NAME)
+            method(util.BUILTINS.bad_name)
+        self.assertRaises(util.BUILTINS.bad_name)
 
-Frozen_InspectLoaderTests, Source_InspectLoaderTests = util.test_both(
-        InspectLoaderTests,
-        machinery=[frozen_machinery, source_machinery])
+
+(Frozen_InspectLoaderTests,
+ Source_InspectLoaderTests
+ ) = util.test_both(InspectLoaderTests, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/builtin/util.py b/Lib/test/test_importlib/builtin/util.py
deleted file mode 100644
index 5704699..0000000
--- a/Lib/test/test_importlib/builtin/util.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import sys
-
-assert 'errno' in sys.builtin_module_names
-NAME = 'errno'
-
-assert 'importlib' not in sys.builtin_module_names
-BAD_NAME = 'importlib'
diff --git a/Lib/test/test_importlib/extension/test_case_sensitivity.py b/Lib/test/test_importlib/extension/test_case_sensitivity.py
index bb2528e..c7d6ca6 100644
--- a/Lib/test/test_importlib/extension/test_case_sensitivity.py
+++ b/Lib/test/test_importlib/extension/test_case_sensitivity.py
@@ -4,22 +4,21 @@
 import unittest
 
 from .. import util
-from . import util as ext_util
 
-frozen_machinery, source_machinery = util.import_importlib('importlib.machinery')
+machinery = util.import_importlib('importlib.machinery')
 
 
 # XXX find_spec tests
 
-@unittest.skipIf(ext_util.FILENAME is None, '_testcapi not available')
+@unittest.skipIf(util.EXTENSIONS.filename is None, '_testcapi not available')
 @util.case_insensitive_tests
 class ExtensionModuleCaseSensitivityTest:
 
     def find_module(self):
-        good_name = ext_util.NAME
+        good_name = util.EXTENSIONS.name
         bad_name = good_name.upper()
         assert good_name != bad_name
-        finder = self.machinery.FileFinder(ext_util.PATH,
+        finder = self.machinery.FileFinder(util.EXTENSIONS.path,
                                           (self.machinery.ExtensionFileLoader,
                                            self.machinery.EXTENSION_SUFFIXES))
         return finder.find_module(bad_name)
@@ -42,9 +41,10 @@
             loader = self.find_module()
             self.assertTrue(hasattr(loader, 'load_module'))
 
-Frozen_ExtensionCaseSensitivity, Source_ExtensionCaseSensitivity = util.test_both(
-        ExtensionModuleCaseSensitivityTest,
-        machinery=[frozen_machinery, source_machinery])
+
+(Frozen_ExtensionCaseSensitivity,
+ Source_ExtensionCaseSensitivity
+ ) = util.test_both(ExtensionModuleCaseSensitivityTest, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/extension/test_finder.py b/Lib/test/test_importlib/extension/test_finder.py
index 990f29c..71bf67f 100644
--- a/Lib/test/test_importlib/extension/test_finder.py
+++ b/Lib/test/test_importlib/extension/test_finder.py
@@ -1,8 +1,7 @@
 from .. import abc
-from .. import util as test_util
-from . import util
+from .. import util
 
-machinery = test_util.import_importlib('importlib.machinery')
+machinery = util.import_importlib('importlib.machinery')
 
 import unittest
 import warnings
@@ -14,7 +13,7 @@
     """Test the finder for extension modules."""
 
     def find_module(self, fullname):
-        importer = self.machinery.FileFinder(util.PATH,
+        importer = self.machinery.FileFinder(util.EXTENSIONS.path,
                                             (self.machinery.ExtensionFileLoader,
                                              self.machinery.EXTENSION_SUFFIXES))
         with warnings.catch_warnings():
@@ -22,7 +21,7 @@
             return importer.find_module(fullname)
 
     def test_module(self):
-        self.assertTrue(self.find_module(util.NAME))
+        self.assertTrue(self.find_module(util.EXTENSIONS.name))
 
     # No extension module as an __init__ available for testing.
     test_package = test_package_in_package = None
@@ -36,8 +35,10 @@
     def test_failure(self):
         self.assertIsNone(self.find_module('asdfjkl;'))
 
-Frozen_FinderTests, Source_FinderTests = test_util.test_both(
-        FinderTests, machinery=machinery)
+
+(Frozen_FinderTests,
+ Source_FinderTests
+ ) = util.test_both(FinderTests, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/extension/test_loader.py b/Lib/test/test_importlib/extension/test_loader.py
index fd9abf2..aefd050 100644
--- a/Lib/test/test_importlib/extension/test_loader.py
+++ b/Lib/test/test_importlib/extension/test_loader.py
@@ -1,4 +1,3 @@
-from . import util as ext_util
 from .. import abc
 from .. import util
 
@@ -15,8 +14,8 @@
     """Test load_module() for extension modules."""
 
     def setUp(self):
-        self.loader = self.machinery.ExtensionFileLoader(ext_util.NAME,
-                                                         ext_util.FILEPATH)
+        self.loader = self.machinery.ExtensionFileLoader(util.EXTENSIONS.name,
+                                                         util.EXTENSIONS.file_path)
 
     def load_module(self, fullname):
         return self.loader.load_module(fullname)
@@ -29,23 +28,23 @@
             self.load_module('XXX')
 
     def test_equality(self):
-        other = self.machinery.ExtensionFileLoader(ext_util.NAME,
-                                                   ext_util.FILEPATH)
+        other = self.machinery.ExtensionFileLoader(util.EXTENSIONS.name,
+                                                   util.EXTENSIONS.file_path)
         self.assertEqual(self.loader, other)
 
     def test_inequality(self):
-        other = self.machinery.ExtensionFileLoader('_' + ext_util.NAME,
-                                                   ext_util.FILEPATH)
+        other = self.machinery.ExtensionFileLoader('_' + util.EXTENSIONS.name,
+                                                   util.EXTENSIONS.file_path)
         self.assertNotEqual(self.loader, other)
 
     def test_module(self):
-        with util.uncache(ext_util.NAME):
-            module = self.load_module(ext_util.NAME)
-            for attr, value in [('__name__', ext_util.NAME),
-                                ('__file__', ext_util.FILEPATH),
+        with util.uncache(util.EXTENSIONS.name):
+            module = self.load_module(util.EXTENSIONS.name)
+            for attr, value in [('__name__', util.EXTENSIONS.name),
+                                ('__file__', util.EXTENSIONS.file_path),
                                 ('__package__', '')]:
                 self.assertEqual(getattr(module, attr), value)
-            self.assertIn(ext_util.NAME, sys.modules)
+            self.assertIn(util.EXTENSIONS.name, sys.modules)
             self.assertIsInstance(module.__loader__,
                                   self.machinery.ExtensionFileLoader)
 
@@ -56,9 +55,9 @@
     test_lacking_parent = None
 
     def test_module_reuse(self):
-        with util.uncache(ext_util.NAME):
-            module1 = self.load_module(ext_util.NAME)
-            module2 = self.load_module(ext_util.NAME)
+        with util.uncache(util.EXTENSIONS.name):
+            module1 = self.load_module(util.EXTENSIONS.name)
+            module2 = self.load_module(util.EXTENSIONS.name)
             self.assertIs(module1, module2)
 
     # No easy way to trigger a failure after a successful import.
@@ -71,14 +70,15 @@
         self.assertEqual(cm.exception.name, name)
 
     def test_is_package(self):
-        self.assertFalse(self.loader.is_package(ext_util.NAME))
+        self.assertFalse(self.loader.is_package(util.EXTENSIONS.name))
         for suffix in self.machinery.EXTENSION_SUFFIXES:
             path = os.path.join('some', 'path', 'pkg', '__init__' + suffix)
             loader = self.machinery.ExtensionFileLoader('pkg', path)
             self.assertTrue(loader.is_package('pkg'))
 
-Frozen_LoaderTests, Source_LoaderTests = util.test_both(
-        LoaderTests, machinery=machinery)
+(Frozen_LoaderTests,
+ Source_LoaderTests
+ ) = util.test_both(LoaderTests, machinery=machinery)
 
 
 
diff --git a/Lib/test/test_importlib/extension/test_path_hook.py b/Lib/test/test_importlib/extension/test_path_hook.py
index 49d6734..8f4b8bb 100644
--- a/Lib/test/test_importlib/extension/test_path_hook.py
+++ b/Lib/test/test_importlib/extension/test_path_hook.py
@@ -1,7 +1,6 @@
-from .. import util as test_util
-from . import util
+from .. import util
 
-machinery = test_util.import_importlib('importlib.machinery')
+machinery = util.import_importlib('importlib.machinery')
 
 import collections
 import sys
@@ -22,10 +21,12 @@
     def test_success(self):
         # Path hook should handle a directory where a known extension module
         # exists.
-        self.assertTrue(hasattr(self.hook(util.PATH), 'find_module'))
+        self.assertTrue(hasattr(self.hook(util.EXTENSIONS.path), 'find_module'))
 
-Frozen_PathHooksTests, Source_PathHooksTests = test_util.test_both(
-        PathHookTests, machinery=machinery)
+
+(Frozen_PathHooksTests,
+ Source_PathHooksTests
+ ) = util.test_both(PathHookTests, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/extension/util.py b/Lib/test/test_importlib/extension/util.py
deleted file mode 100644
index 8d089f0..0000000
--- a/Lib/test/test_importlib/extension/util.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from importlib import machinery
-import os
-import sys
-
-PATH = None
-EXT = None
-FILENAME = None
-NAME = '_testcapi'
-try:
-    for PATH in sys.path:
-        for EXT in machinery.EXTENSION_SUFFIXES:
-            FILENAME = NAME + EXT
-            FILEPATH = os.path.join(PATH, FILENAME)
-            if os.path.exists(os.path.join(PATH, FILENAME)):
-                raise StopIteration
-    else:
-        PATH = EXT = FILENAME = FILEPATH = None
-except StopIteration:
-    pass
diff --git a/Lib/test/test_importlib/frozen/test_finder.py b/Lib/test/test_importlib/frozen/test_finder.py
index f9f97f3..519aa02 100644
--- a/Lib/test/test_importlib/frozen/test_finder.py
+++ b/Lib/test/test_importlib/frozen/test_finder.py
@@ -37,8 +37,10 @@
         spec = self.find('<not real>')
         self.assertIsNone(spec)
 
-Frozen_FindSpecTests, Source_FindSpecTests = util.test_both(FindSpecTests,
-                                                            machinery=machinery)
+
+(Frozen_FindSpecTests,
+ Source_FindSpecTests
+ ) = util.test_both(FindSpecTests, machinery=machinery)
 
 
 class FinderTests(abc.FinderTests):
@@ -72,8 +74,10 @@
         loader = self.find('<not real>')
         self.assertIsNone(loader)
 
-Frozen_FinderTests, Source_FinderTests = util.test_both(FinderTests,
-                                                        machinery=machinery)
+
+(Frozen_FinderTests,
+ Source_FinderTests
+ ) = util.test_both(FinderTests, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/frozen/test_loader.py b/Lib/test/test_importlib/frozen/test_loader.py
index 7c01464..603c7d7 100644
--- a/Lib/test/test_importlib/frozen/test_loader.py
+++ b/Lib/test/test_importlib/frozen/test_loader.py
@@ -85,8 +85,10 @@
             self.exec_module('_not_real')
         self.assertEqual(cm.exception.name, '_not_real')
 
-Frozen_ExecModuleTests, Source_ExecModuleTests = util.test_both(ExecModuleTests,
-                                                        machinery=machinery)
+
+(Frozen_ExecModuleTests,
+ Source_ExecModuleTests
+ ) = util.test_both(ExecModuleTests, machinery=machinery)
 
 
 class LoaderTests(abc.LoaderTests):
@@ -175,8 +177,10 @@
             self.machinery.FrozenImporter.load_module('_not_real')
         self.assertEqual(cm.exception.name, '_not_real')
 
-Frozen_LoaderTests, Source_LoaderTests = util.test_both(LoaderTests,
-                                                        machinery=machinery)
+
+(Frozen_LoaderTests,
+ Source_LoaderTests
+ ) = util.test_both(LoaderTests, machinery=machinery)
 
 
 class InspectLoaderTests:
@@ -214,8 +218,9 @@
                 method('importlib')
             self.assertEqual(cm.exception.name, 'importlib')
 
-Frozen_ILTests, Source_ILTests = util.test_both(InspectLoaderTests,
-                                                machinery=machinery)
+(Frozen_ILTests,
+ Source_ILTests
+ ) = util.test_both(InspectLoaderTests, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/import_/test___loader__.py b/Lib/test/test_importlib/import_/test___loader__.py
index 6df8010..9998cd6 100644
--- a/Lib/test/test_importlib/import_/test___loader__.py
+++ b/Lib/test/test_importlib/import_/test___loader__.py
@@ -4,7 +4,6 @@
 import unittest
 
 from .. import util
-from . import util as import_util
 
 
 class SpecLoaderMock:
@@ -24,8 +23,10 @@
             module = self.__import__('blah')
         self.assertEqual(loader, module.__loader__)
 
-Frozen_SpecTests, Source_SpecTests = util.test_both(
-        SpecLoaderAttributeTests, __import__=import_util.__import__)
+
+(Frozen_SpecTests,
+ Source_SpecTests
+ ) = util.test_both(SpecLoaderAttributeTests, __import__=util.__import__)
 
 
 class LoaderMock:
@@ -62,8 +63,9 @@
         self.assertEqual(loader, module.__loader__)
 
 
-Frozen_Tests, Source_Tests = util.test_both(LoaderAttributeTests,
-                                            __import__=import_util.__import__)
+(Frozen_Tests,
+ Source_Tests
+ ) = util.test_both(LoaderAttributeTests, __import__=util.__import__)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/import_/test___package__.py b/Lib/test/test_importlib/import_/test___package__.py
index 2e19725..c7d3a2a 100644
--- a/Lib/test/test_importlib/import_/test___package__.py
+++ b/Lib/test/test_importlib/import_/test___package__.py
@@ -6,7 +6,6 @@
 """
 import unittest
 from .. import util
-from . import util as import_util
 
 
 class Using__package__:
@@ -70,17 +69,23 @@
         with self.assertRaises(TypeError):
             self.__import__('', globals, {}, ['relimport'], 1)
 
+
 class Using__package__PEP302(Using__package__):
     mock_modules = util.mock_modules
 
-Frozen_UsingPackagePEP302, Source_UsingPackagePEP302 = util.test_both(
-        Using__package__PEP302, __import__=import_util.__import__)
 
-class Using__package__PEP302(Using__package__):
+(Frozen_UsingPackagePEP302,
+ Source_UsingPackagePEP302
+ ) = util.test_both(Using__package__PEP302, __import__=util.__import__)
+
+
+class Using__package__PEP451(Using__package__):
     mock_modules = util.mock_spec
 
-Frozen_UsingPackagePEP451, Source_UsingPackagePEP451 = util.test_both(
-        Using__package__PEP302, __import__=import_util.__import__)
+
+(Frozen_UsingPackagePEP451,
+ Source_UsingPackagePEP451
+ ) = util.test_both(Using__package__PEP451, __import__=util.__import__)
 
 
 class Setting__package__:
@@ -95,7 +100,7 @@
 
     """
 
-    __import__ = import_util.__import__[1]
+    __import__ = util.__import__['Source']
 
     # [top-level]
     def test_top_level(self):
diff --git a/Lib/test/test_importlib/import_/test_api.py b/Lib/test/test_importlib/import_/test_api.py
index 439c105..2c61b01 100644
--- a/Lib/test/test_importlib/import_/test_api.py
+++ b/Lib/test/test_importlib/import_/test_api.py
@@ -1,5 +1,4 @@
 from .. import util
-from . import util as import_util
 
 from importlib import machinery
 import sys
@@ -79,15 +78,19 @@
 class OldAPITests(APITest):
     bad_finder_loader = BadLoaderFinder
 
-Frozen_OldAPITests, Source_OldAPITests = util.test_both(
-        OldAPITests, __import__=import_util.__import__)
+
+(Frozen_OldAPITests,
+ Source_OldAPITests
+ ) = util.test_both(OldAPITests, __import__=util.__import__)
 
 
 class SpecAPITests(APITest):
     bad_finder_loader = BadSpecFinderLoader
 
-Frozen_SpecAPITests, Source_SpecAPITests = util.test_both(
-        SpecAPITests, __import__=import_util.__import__)
+
+(Frozen_SpecAPITests,
+ Source_SpecAPITests
+ ) = util.test_both(SpecAPITests, __import__=util.__import__)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/import_/test_caching.py b/Lib/test/test_importlib/import_/test_caching.py
index c292ee4..8079add 100644
--- a/Lib/test/test_importlib/import_/test_caching.py
+++ b/Lib/test/test_importlib/import_/test_caching.py
@@ -1,6 +1,5 @@
 """Test that sys.modules is used properly by import."""
 from .. import util
-from . import util as import_util
 import sys
 from types import MethodType
 import unittest
@@ -39,15 +38,17 @@
                 self.__import__(name)
             self.assertEqual(cm.exception.name, name)
 
-Frozen_UseCache, Source_UseCache = util.test_both(
-        UseCache, __import__=import_util.__import__)
+
+(Frozen_UseCache,
+ Source_UseCache
+ ) = util.test_both(UseCache, __import__=util.__import__)
 
 
 class ImportlibUseCache(UseCache, unittest.TestCase):
 
     # Pertinent only to PEP 302; exec_module() doesn't return a module.
 
-    __import__ = import_util.__import__[1]
+    __import__ = util.__import__['Source']
 
     def create_mock(self, *names, return_=None):
         mock = util.mock_modules(*names)
diff --git a/Lib/test/test_importlib/import_/test_fromlist.py b/Lib/test/test_importlib/import_/test_fromlist.py
index 58f244b..8993226 100644
--- a/Lib/test/test_importlib/import_/test_fromlist.py
+++ b/Lib/test/test_importlib/import_/test_fromlist.py
@@ -1,6 +1,5 @@
 """Test that the semantics relating to the 'fromlist' argument are correct."""
 from .. import util
-from . import util as import_util
 import unittest
 
 
@@ -29,8 +28,10 @@
                 module = self.__import__('pkg.module', fromlist=['attr'])
                 self.assertEqual(module.__name__, 'pkg.module')
 
-Frozen_ReturnValue, Source_ReturnValue = util.test_both(
-        ReturnValue, __import__=import_util.__import__)
+
+(Frozen_ReturnValue,
+ Source_ReturnValue
+ ) = util.test_both(ReturnValue, __import__=util.__import__)
 
 
 class HandlingFromlist:
@@ -121,8 +122,10 @@
                 self.assertEqual(module.module1.__name__, 'pkg.module1')
                 self.assertEqual(module.module2.__name__, 'pkg.module2')
 
-Frozen_FromList, Source_FromList = util.test_both(
-        HandlingFromlist, __import__=import_util.__import__)
+
+(Frozen_FromList,
+ Source_FromList
+ ) = util.test_both(HandlingFromlist, __import__=util.__import__)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/import_/test_meta_path.py b/Lib/test/test_importlib/import_/test_meta_path.py
index dc45420..47a603c 100644
--- a/Lib/test/test_importlib/import_/test_meta_path.py
+++ b/Lib/test/test_importlib/import_/test_meta_path.py
@@ -1,5 +1,4 @@
 from .. import util
-from . import util as import_util
 import importlib._bootstrap
 import sys
 from types import MethodType
@@ -46,8 +45,10 @@
                 self.assertEqual(len(w), 1)
                 self.assertTrue(issubclass(w[-1].category, ImportWarning))
 
-Frozen_CallingOrder, Source_CallingOrder = util.test_both(
-        CallingOrder, __import__=import_util.__import__)
+
+(Frozen_CallingOrder,
+ Source_CallingOrder
+ ) = util.test_both(CallingOrder, __import__=util.__import__)
 
 
 class CallSignature:
@@ -100,19 +101,25 @@
                 self.assertEqual(args[0], mod_name)
                 self.assertIs(args[1], path)
 
+
 class CallSignaturePEP302(CallSignature):
     mock_modules = util.mock_modules
     finder_name = 'find_module'
 
-Frozen_CallSignaturePEP302, Source_CallSignaturePEP302 = util.test_both(
-        CallSignaturePEP302, __import__=import_util.__import__)
+
+(Frozen_CallSignaturePEP302,
+ Source_CallSignaturePEP302
+ ) = util.test_both(CallSignaturePEP302, __import__=util.__import__)
+
 
 class CallSignaturePEP451(CallSignature):
     mock_modules = util.mock_spec
     finder_name = 'find_spec'
 
-Frozen_CallSignaturePEP451, Source_CallSignaturePEP451 = util.test_both(
-        CallSignaturePEP451, __import__=import_util.__import__)
+
+(Frozen_CallSignaturePEP451,
+ Source_CallSignaturePEP451
+ ) = util.test_both(CallSignaturePEP451, __import__=util.__import__)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/import_/test_packages.py b/Lib/test/test_importlib/import_/test_packages.py
index 55a5d14..3755b84 100644
--- a/Lib/test/test_importlib/import_/test_packages.py
+++ b/Lib/test/test_importlib/import_/test_packages.py
@@ -1,5 +1,4 @@
 from .. import util
-from . import util as import_util
 import sys
 import unittest
 import importlib
@@ -102,8 +101,10 @@
                 finally:
                     support.unload(subname)
 
-Frozen_ParentTests, Source_ParentTests = util.test_both(
-        ParentModuleTests, __import__=import_util.__import__)
+
+(Frozen_ParentTests,
+ Source_ParentTests
+ ) = util.test_both(ParentModuleTests, __import__=util.__import__)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/import_/test_path.py b/Lib/test/test_importlib/import_/test_path.py
index 1274f8c..e86c655 100644
--- a/Lib/test/test_importlib/import_/test_path.py
+++ b/Lib/test/test_importlib/import_/test_path.py
@@ -1,5 +1,4 @@
 from .. import util
-from . import util as import_util
 
 importlib = util.import_importlib('importlib')
 machinery = util.import_importlib('importlib.machinery')
@@ -58,7 +57,7 @@
         module = '<test module>'
         path = '<test path>'
         importer = util.mock_spec(module)
-        hook = import_util.mock_path_hook(path, importer=importer)
+        hook = util.mock_path_hook(path, importer=importer)
         with util.import_state(path_hooks=[hook]):
             loader = self.machinery.PathFinder.find_module(module, [path])
             self.assertIs(loader, importer)
@@ -83,7 +82,7 @@
         path = ''
         module = '<test module>'
         importer = util.mock_spec(module)
-        hook = import_util.mock_path_hook(os.getcwd(), importer=importer)
+        hook = util.mock_path_hook(os.getcwd(), importer=importer)
         with util.import_state(path=[path], path_hooks=[hook]):
             loader = self.machinery.PathFinder.find_module(module)
             self.assertIs(loader, importer)
@@ -112,8 +111,57 @@
             if email is not missing:
                 sys.modules['email'] = email
 
-Frozen_FinderTests, Source_FinderTests = util.test_both(
-        FinderTests, importlib=importlib, machinery=machinery)
+    def test_finder_with_find_module(self):
+        class TestFinder:
+            def find_module(self, fullname):
+                return self.to_return
+        failing_finder = TestFinder()
+        failing_finder.to_return = None
+        path = 'testing path'
+        with util.import_state(path_importer_cache={path: failing_finder}):
+            self.assertIsNone(
+                    self.machinery.PathFinder.find_spec('whatever', [path]))
+        success_finder = TestFinder()
+        success_finder.to_return = __loader__
+        with util.import_state(path_importer_cache={path: success_finder}):
+            spec = self.machinery.PathFinder.find_spec('whatever', [path])
+        self.assertEqual(spec.loader, __loader__)
+
+    def test_finder_with_find_loader(self):
+        class TestFinder:
+            loader = None
+            portions = []
+            def find_loader(self, fullname):
+                return self.loader, self.portions
+        path = 'testing path'
+        with util.import_state(path_importer_cache={path: TestFinder()}):
+            self.assertIsNone(
+                    self.machinery.PathFinder.find_spec('whatever', [path]))
+        success_finder = TestFinder()
+        success_finder.loader = __loader__
+        with util.import_state(path_importer_cache={path: success_finder}):
+            spec = self.machinery.PathFinder.find_spec('whatever', [path])
+        self.assertEqual(spec.loader, __loader__)
+
+    def test_finder_with_find_spec(self):
+        class TestFinder:
+            spec = None
+            def find_spec(self, fullname, target=None):
+                return self.spec
+        path = 'testing path'
+        with util.import_state(path_importer_cache={path: TestFinder()}):
+            self.assertIsNone(
+                    self.machinery.PathFinder.find_spec('whatever', [path]))
+        success_finder = TestFinder()
+        success_finder.spec = self.machinery.ModuleSpec('whatever', __loader__)
+        with util.import_state(path_importer_cache={path: success_finder}):
+            got = self.machinery.PathFinder.find_spec('whatever', [path])
+        self.assertEqual(got, success_finder.spec)
+
+
+(Frozen_FinderTests,
+ Source_FinderTests
+ ) = util.test_both(FinderTests, importlib=importlib, machinery=machinery)
 
 
 class PathEntryFinderTests:
@@ -136,8 +184,10 @@
                                path_hooks=[Finder]):
             self.machinery.PathFinder.find_spec('importlib')
 
-Frozen_PEFTests, Source_PEFTests = util.test_both(
-        PathEntryFinderTests, machinery=machinery)
+
+(Frozen_PEFTests,
+ Source_PEFTests
+ ) = util.test_both(PathEntryFinderTests, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/import_/test_relative_imports.py b/Lib/test/test_importlib/import_/test_relative_imports.py
index b216e9c..28bb6f7 100644
--- a/Lib/test/test_importlib/import_/test_relative_imports.py
+++ b/Lib/test/test_importlib/import_/test_relative_imports.py
@@ -1,6 +1,5 @@
 """Test relative imports (PEP 328)."""
 from .. import util
-from . import util as import_util
 import sys
 import unittest
 
@@ -208,8 +207,10 @@
         with self.assertRaises(KeyError):
             self.__import__('sys', level=1)
 
-Frozen_RelativeImports, Source_RelativeImports = util.test_both(
-        RelativeImports, __import__=import_util.__import__)
+
+(Frozen_RelativeImports,
+ Source_RelativeImports
+ ) = util.test_both(RelativeImports, __import__=util.__import__)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/import_/util.py b/Lib/test/test_importlib/import_/util.py
deleted file mode 100644
index dcb490f..0000000
--- a/Lib/test/test_importlib/import_/util.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from .. import util
-
-frozen_importlib, source_importlib = util.import_importlib('importlib')
-
-import builtins
-import functools
-import importlib
-import unittest
-
-
-__import__ = staticmethod(builtins.__import__), staticmethod(source_importlib.__import__)
-
-
-def mock_path_hook(*entries, importer):
-    """A mock sys.path_hooks entry."""
-    def hook(entry):
-        if entry not in entries:
-            raise ImportError
-        return importer
-    return hook
diff --git a/Lib/test/test_importlib/source/test_case_sensitivity.py b/Lib/test/test_importlib/source/test_case_sensitivity.py
index efd3146..29e95b2 100644
--- a/Lib/test/test_importlib/source/test_case_sensitivity.py
+++ b/Lib/test/test_importlib/source/test_case_sensitivity.py
@@ -1,6 +1,5 @@
 """Test case-sensitivity (PEP 235)."""
 from .. import util
-from . import util as source_util
 
 importlib = util.import_importlib('importlib')
 machinery = util.import_importlib('importlib.machinery')
@@ -32,7 +31,7 @@
         """Look for a module with matching and non-matching sensitivity."""
         sensitive_pkg = 'sensitive.{0}'.format(self.name)
         insensitive_pkg = 'insensitive.{0}'.format(self.name.lower())
-        context = source_util.create_modules(insensitive_pkg, sensitive_pkg)
+        context = util.create_modules(insensitive_pkg, sensitive_pkg)
         with context as mapping:
             sensitive_path = os.path.join(mapping['.root'], 'sensitive')
             insensitive_path = os.path.join(mapping['.root'], 'insensitive')
@@ -63,20 +62,28 @@
             self.assertIsNotNone(insensitive)
             self.assertIn(self.name, insensitive.get_filename(self.name))
 
+
 class CaseSensitivityTestPEP302(CaseSensitivityTest):
     def find(self, finder):
         return finder.find_module(self.name)
 
-Frozen_CaseSensitivityTestPEP302, Source_CaseSensitivityTestPEP302 = util.test_both(
-    CaseSensitivityTestPEP302, importlib=importlib, machinery=machinery)
+
+(Frozen_CaseSensitivityTestPEP302,
+ Source_CaseSensitivityTestPEP302
+ ) = util.test_both(CaseSensitivityTestPEP302, importlib=importlib,
+                    machinery=machinery)
+
 
 class CaseSensitivityTestPEP451(CaseSensitivityTest):
     def find(self, finder):
         found = finder.find_spec(self.name)
         return found.loader if found is not None else found
 
-Frozen_CaseSensitivityTestPEP451, Source_CaseSensitivityTestPEP451 = util.test_both(
-    CaseSensitivityTestPEP451, importlib=importlib, machinery=machinery)
+
+(Frozen_CaseSensitivityTestPEP451,
+ Source_CaseSensitivityTestPEP451
+ ) = util.test_both(CaseSensitivityTestPEP451, importlib=importlib,
+                    machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/source/test_file_loader.py b/Lib/test/test_importlib/source/test_file_loader.py
index 2d415f9..73f4c62 100644
--- a/Lib/test/test_importlib/source/test_file_loader.py
+++ b/Lib/test/test_importlib/source/test_file_loader.py
@@ -1,6 +1,5 @@
 from .. import abc
 from .. import util
-from . import util as source_util
 
 importlib = util.import_importlib('importlib')
 importlib_abc = util.import_importlib('importlib.abc')
@@ -71,7 +70,7 @@
 
     # [basic]
     def test_module(self):
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
             with warnings.catch_warnings():
                 warnings.simplefilter('ignore', DeprecationWarning)
@@ -83,7 +82,7 @@
                 self.assertEqual(getattr(module, attr), value)
 
     def test_package(self):
-        with source_util.create_modules('_pkg.__init__') as mapping:
+        with util.create_modules('_pkg.__init__') as mapping:
             loader = self.machinery.SourceFileLoader('_pkg',
                                                  mapping['_pkg.__init__'])
             with warnings.catch_warnings():
@@ -98,7 +97,7 @@
 
 
     def test_lacking_parent(self):
-        with source_util.create_modules('_pkg.__init__', '_pkg.mod')as mapping:
+        with util.create_modules('_pkg.__init__', '_pkg.mod')as mapping:
             loader = self.machinery.SourceFileLoader('_pkg.mod',
                                                     mapping['_pkg.mod'])
             with warnings.catch_warnings():
@@ -115,7 +114,7 @@
         return lambda name: fxn(name) + 1
 
     def test_module_reuse(self):
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
             with warnings.catch_warnings():
                 warnings.simplefilter('ignore', DeprecationWarning)
@@ -139,7 +138,7 @@
         attributes = ('__file__', '__path__', '__package__')
         value = '<test>'
         name = '_temp'
-        with source_util.create_modules(name) as mapping:
+        with util.create_modules(name) as mapping:
             orig_module = types.ModuleType(name)
             for attr in attributes:
                 setattr(orig_module, attr, value)
@@ -159,7 +158,7 @@
 
     # [syntax error]
     def test_bad_syntax(self):
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             with open(mapping['_temp'], 'w') as file:
                 file.write('=')
             loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
@@ -190,11 +189,11 @@
             if os.path.exists(pycache):
                 shutil.rmtree(pycache)
 
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def test_timestamp_overflow(self):
         # When a modification timestamp is larger than 2**32, it should be
         # truncated rather than raise an OverflowError.
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             source = mapping['_temp']
             compiled = self.util.cache_from_source(source)
             with open(source, 'w') as f:
@@ -236,9 +235,11 @@
                 warnings.simplefilter('ignore', DeprecationWarning)
                 loader.load_module('bad name')
 
-Frozen_SimpleTest, Source_SimpleTest = util.test_both(
-        SimpleTest, importlib=importlib, machinery=machinery, abc=importlib_abc,
-        util=importlib_util)
+
+(Frozen_SimpleTest,
+ Source_SimpleTest
+ ) = util.test_both(SimpleTest, importlib=importlib, machinery=machinery,
+                    abc=importlib_abc, util=importlib_util)
 
 
 class BadBytecodeTest:
@@ -275,45 +276,45 @@
         return bytecode_path
 
     def _test_empty_file(self, test, *, del_source=False):
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             bc_path = self.manipulate_bytecode('_temp', mapping,
                                                 lambda bc: b'',
                                                 del_source=del_source)
             test('_temp', mapping, bc_path)
 
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def _test_partial_magic(self, test, *, del_source=False):
         # When their are less than 4 bytes to a .pyc, regenerate it if
         # possible, else raise ImportError.
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             bc_path = self.manipulate_bytecode('_temp', mapping,
                                                 lambda bc: bc[:3],
                                                 del_source=del_source)
             test('_temp', mapping, bc_path)
 
     def _test_magic_only(self, test, *, del_source=False):
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             bc_path = self.manipulate_bytecode('_temp', mapping,
                                                 lambda bc: bc[:4],
                                                 del_source=del_source)
             test('_temp', mapping, bc_path)
 
     def _test_partial_timestamp(self, test, *, del_source=False):
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             bc_path = self.manipulate_bytecode('_temp', mapping,
                                                 lambda bc: bc[:7],
                                                 del_source=del_source)
             test('_temp', mapping, bc_path)
 
     def _test_partial_size(self, test, *, del_source=False):
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             bc_path = self.manipulate_bytecode('_temp', mapping,
                                                 lambda bc: bc[:11],
                                                 del_source=del_source)
             test('_temp', mapping, bc_path)
 
     def _test_no_marshal(self, *, del_source=False):
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             bc_path = self.manipulate_bytecode('_temp', mapping,
                                                 lambda bc: bc[:12],
                                                 del_source=del_source)
@@ -322,7 +323,7 @@
                 self.import_(file_path, '_temp')
 
     def _test_non_code_marshal(self, *, del_source=False):
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             bytecode_path = self.manipulate_bytecode('_temp', mapping,
                                     lambda bc: bc[:12] + marshal.dumps(b'abcd'),
                                     del_source=del_source)
@@ -333,7 +334,7 @@
             self.assertEqual(cm.exception.path, bytecode_path)
 
     def _test_bad_marshal(self, *, del_source=False):
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             bytecode_path = self.manipulate_bytecode('_temp', mapping,
                                                 lambda bc: bc[:12] + b'<test>',
                                                 del_source=del_source)
@@ -342,11 +343,12 @@
                 self.import_(file_path, '_temp')
 
     def _test_bad_magic(self, test, *, del_source=False):
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             bc_path = self.manipulate_bytecode('_temp', mapping,
                                     lambda bc: b'\x00\x00\x00\x00' + bc[4:])
             test('_temp', mapping, bc_path)
 
+
 class BadBytecodeTestPEP451(BadBytecodeTest):
 
     def import_(self, file, module_name):
@@ -355,6 +357,7 @@
         module.__spec__ = self.util.spec_from_loader(module_name, loader)
         loader.exec_module(module)
 
+
 class BadBytecodeTestPEP302(BadBytecodeTest):
 
     def import_(self, file, module_name):
@@ -371,7 +374,7 @@
     def setUpClass(cls):
         cls.loader = cls.machinery.SourceFileLoader
 
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def test_empty_file(self):
         # When a .pyc is empty, regenerate it if possible, else raise
         # ImportError.
@@ -390,7 +393,7 @@
 
         self._test_partial_magic(test)
 
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def test_magic_only(self):
         # When there is only the magic number, regenerate the .pyc if possible,
         # else raise EOFError.
@@ -401,7 +404,7 @@
 
         self._test_magic_only(test)
 
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def test_bad_magic(self):
         # When the magic number is different, the bytecode should be
         # regenerated.
@@ -413,7 +416,7 @@
 
         self._test_bad_magic(test)
 
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def test_partial_timestamp(self):
         # When the timestamp is partial, regenerate the .pyc, else
         # raise EOFError.
@@ -424,7 +427,7 @@
 
         self._test_partial_timestamp(test)
 
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def test_partial_size(self):
         # When the size is partial, regenerate the .pyc, else
         # raise EOFError.
@@ -435,29 +438,29 @@
 
         self._test_partial_size(test)
 
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def test_no_marshal(self):
         # When there is only the magic number and timestamp, raise EOFError.
         self._test_no_marshal()
 
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def test_non_code_marshal(self):
         self._test_non_code_marshal()
         # XXX ImportError when sourceless
 
     # [bad marshal]
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def test_bad_marshal(self):
         # Bad marshal data should raise a ValueError.
         self._test_bad_marshal()
 
     # [bad timestamp]
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def test_old_timestamp(self):
         # When the timestamp is older than the source, bytecode should be
         # regenerated.
         zeros = b'\x00\x00\x00\x00'
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             py_compile.compile(mapping['_temp'])
             bytecode_path = self.util.cache_from_source(mapping['_temp'])
             with open(bytecode_path, 'r+b') as bytecode_file:
@@ -471,10 +474,10 @@
                 self.assertEqual(bytecode_file.read(4), source_timestamp)
 
     # [bytecode read-only]
-    @source_util.writes_bytecode_files
+    @util.writes_bytecode_files
     def test_read_only_bytecode(self):
         # When bytecode is read-only but should be rewritten, fail silently.
-        with source_util.create_modules('_temp') as mapping:
+        with util.create_modules('_temp') as mapping:
             # Create bytecode that will need to be re-created.
             py_compile.compile(mapping['_temp'])
             bytecode_path = self.util.cache_from_source(mapping['_temp'])
@@ -491,21 +494,29 @@
                 # Make writable for eventual clean-up.
                 os.chmod(bytecode_path, stat.S_IWUSR)
 
+
 class SourceLoaderBadBytecodeTestPEP451(
         SourceLoaderBadBytecodeTest, BadBytecodeTestPEP451):
     pass
 
-Frozen_SourceBadBytecodePEP451, Source_SourceBadBytecodePEP451 = util.test_both(
-        SourceLoaderBadBytecodeTestPEP451, importlib=importlib, machinery=machinery,
-        abc=importlib_abc, util=importlib_util)
+
+(Frozen_SourceBadBytecodePEP451,
+ Source_SourceBadBytecodePEP451
+ ) = util.test_both(SourceLoaderBadBytecodeTestPEP451, importlib=importlib,
+                    machinery=machinery, abc=importlib_abc,
+                    util=importlib_util)
+
 
 class SourceLoaderBadBytecodeTestPEP302(
         SourceLoaderBadBytecodeTest, BadBytecodeTestPEP302):
     pass
 
-Frozen_SourceBadBytecodePEP302, Source_SourceBadBytecodePEP302 = util.test_both(
-        SourceLoaderBadBytecodeTestPEP302, importlib=importlib, machinery=machinery,
-        abc=importlib_abc, util=importlib_util)
+
+(Frozen_SourceBadBytecodePEP302,
+ Source_SourceBadBytecodePEP302
+ ) = util.test_both(SourceLoaderBadBytecodeTestPEP302, importlib=importlib,
+                    machinery=machinery, abc=importlib_abc,
+                    util=importlib_util)
 
 
 class SourcelessLoaderBadBytecodeTest:
@@ -567,21 +578,29 @@
     def test_non_code_marshal(self):
         self._test_non_code_marshal(del_source=True)
 
+
 class SourcelessLoaderBadBytecodeTestPEP451(SourcelessLoaderBadBytecodeTest,
         BadBytecodeTestPEP451):
     pass
 
-Frozen_SourcelessBadBytecodePEP451, Source_SourcelessBadBytecodePEP451 = util.test_both(
-        SourcelessLoaderBadBytecodeTestPEP451, importlib=importlib,
-        machinery=machinery, abc=importlib_abc, util=importlib_util)
+
+(Frozen_SourcelessBadBytecodePEP451,
+ Source_SourcelessBadBytecodePEP451
+ ) = util.test_both(SourcelessLoaderBadBytecodeTestPEP451, importlib=importlib,
+                    machinery=machinery, abc=importlib_abc,
+                    util=importlib_util)
+
 
 class SourcelessLoaderBadBytecodeTestPEP302(SourcelessLoaderBadBytecodeTest,
         BadBytecodeTestPEP302):
     pass
 
-Frozen_SourcelessBadBytecodePEP302, Source_SourcelessBadBytecodePEP302 = util.test_both(
-        SourcelessLoaderBadBytecodeTestPEP302, importlib=importlib,
-        machinery=machinery, abc=importlib_abc, util=importlib_util)
+
+(Frozen_SourcelessBadBytecodePEP302,
+ Source_SourcelessBadBytecodePEP302
+ ) = util.test_both(SourcelessLoaderBadBytecodeTestPEP302, importlib=importlib,
+                    machinery=machinery, abc=importlib_abc,
+                    util=importlib_util)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/source/test_finder.py b/Lib/test/test_importlib/source/test_finder.py
index 473297b..f372b85 100644
--- a/Lib/test/test_importlib/source/test_finder.py
+++ b/Lib/test/test_importlib/source/test_finder.py
@@ -1,6 +1,5 @@
 from .. import abc
 from .. import util
-from . import util as source_util
 
 machinery = util.import_importlib('importlib.machinery')
 
@@ -60,7 +59,7 @@
         """
         if create is None:
             create = {test}
-        with source_util.create_modules(*create) as mapping:
+        with util.create_modules(*create) as mapping:
             if compile_:
                 for name in compile_:
                     py_compile.compile(mapping[name])
@@ -100,14 +99,14 @@
 
     # [sub module]
     def test_module_in_package(self):
-        with source_util.create_modules('pkg.__init__', 'pkg.sub') as mapping:
+        with util.create_modules('pkg.__init__', 'pkg.sub') as mapping:
             pkg_dir = os.path.dirname(mapping['pkg.__init__'])
             loader = self.import_(pkg_dir, 'pkg.sub')
             self.assertTrue(hasattr(loader, 'load_module'))
 
     # [sub package]
     def test_package_in_package(self):
-        context = source_util.create_modules('pkg.__init__', 'pkg.sub.__init__')
+        context = util.create_modules('pkg.__init__', 'pkg.sub.__init__')
         with context as mapping:
             pkg_dir = os.path.dirname(mapping['pkg.__init__'])
             loader = self.import_(pkg_dir, 'pkg.sub')
@@ -120,7 +119,7 @@
         self.assertIn('__init__', loader.get_filename(name))
 
     def test_failure(self):
-        with source_util.create_modules('blah') as mapping:
+        with util.create_modules('blah') as mapping:
             nothing = self.import_(mapping['.root'], 'sdfsadsadf')
             self.assertIsNone(nothing)
 
@@ -147,7 +146,7 @@
     # Regression test for http://bugs.python.org/issue14846
     def test_dir_removal_handling(self):
         mod = 'mod'
-        with source_util.create_modules(mod) as mapping:
+        with util.create_modules(mod) as mapping:
             finder = self.get_finder(mapping['.root'])
             found = self._find(finder, 'mod', loader_only=True)
             self.assertIsNotNone(found)
@@ -196,8 +195,10 @@
         spec = finder.find_spec(name)
         return spec.loader if spec is not None else spec
 
-Frozen_FinderTestsPEP451, Source_FinderTestsPEP451 = util.test_both(
-        FinderTestsPEP451, machinery=machinery)
+
+(Frozen_FinderTestsPEP451,
+ Source_FinderTestsPEP451
+ ) = util.test_both(FinderTestsPEP451, machinery=machinery)
 
 
 class FinderTestsPEP420(FinderTests):
@@ -210,8 +211,10 @@
             loader_portions = finder.find_loader(name)
             return loader_portions[0] if loader_only else loader_portions
 
-Frozen_FinderTestsPEP420, Source_FinderTestsPEP420 = util.test_both(
-        FinderTestsPEP420, machinery=machinery)
+
+(Frozen_FinderTestsPEP420,
+ Source_FinderTestsPEP420
+ ) = util.test_both(FinderTestsPEP420, machinery=machinery)
 
 
 class FinderTestsPEP302(FinderTests):
@@ -223,9 +226,10 @@
             warnings.simplefilter("ignore", DeprecationWarning)
             return finder.find_module(name)
 
-Frozen_FinderTestsPEP302, Source_FinderTestsPEP302 = util.test_both(
-        FinderTestsPEP302, machinery=machinery)
 
+(Frozen_FinderTestsPEP302,
+ Source_FinderTestsPEP302
+ ) = util.test_both(FinderTestsPEP302, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/source/test_path_hook.py b/Lib/test/test_importlib/source/test_path_hook.py
index 92da772..e6a2415 100644
--- a/Lib/test/test_importlib/source/test_path_hook.py
+++ b/Lib/test/test_importlib/source/test_path_hook.py
@@ -1,5 +1,4 @@
 from .. import util
-from . import util as source_util
 
 machinery = util.import_importlib('importlib.machinery')
 
@@ -15,7 +14,7 @@
             self.machinery.SOURCE_SUFFIXES))
 
     def test_success(self):
-        with source_util.create_modules('dummy') as mapping:
+        with util.create_modules('dummy') as mapping:
             self.assertTrue(hasattr(self.path_hook()(mapping['.root']),
                                  'find_module'))
 
@@ -23,7 +22,10 @@
         # The empty string represents the cwd.
         self.assertTrue(hasattr(self.path_hook()(''), 'find_module'))
 
-Frozen_PathHookTest, Source_PathHooktest = util.test_both(PathHookTest, machinery=machinery)
+
+(Frozen_PathHookTest,
+ Source_PathHooktest
+ ) = util.test_both(PathHookTest, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/source/test_source_encoding.py b/Lib/test/test_importlib/source/test_source_encoding.py
index c62dfa1..b604afb 100644
--- a/Lib/test/test_importlib/source/test_source_encoding.py
+++ b/Lib/test/test_importlib/source/test_source_encoding.py
@@ -1,5 +1,4 @@
 from .. import util
-from . import util as source_util
 
 machinery = util.import_importlib('importlib.machinery')
 
@@ -37,7 +36,7 @@
     module_name = '_temp'
 
     def run_test(self, source):
-        with source_util.create_modules(self.module_name) as mapping:
+        with util.create_modules(self.module_name) as mapping:
             with open(mapping[self.module_name], 'wb') as file:
                 file.write(source)
             loader = self.machinery.SourceFileLoader(self.module_name,
@@ -89,6 +88,7 @@
         with self.assertRaises(SyntaxError):
             self.run_test(source)
 
+
 class EncodingTestPEP451(EncodingTest):
 
     def load(self, loader):
@@ -97,8 +97,11 @@
         loader.exec_module(module)
         return module
 
-Frozen_EncodingTestPEP451, Source_EncodingTestPEP451 = util.test_both(
-        EncodingTestPEP451, machinery=machinery)
+
+(Frozen_EncodingTestPEP451,
+ Source_EncodingTestPEP451
+ ) = util.test_both(EncodingTestPEP451, machinery=machinery)
+
 
 class EncodingTestPEP302(EncodingTest):
 
@@ -107,8 +110,10 @@
             warnings.simplefilter('ignore', DeprecationWarning)
             return loader.load_module(self.module_name)
 
-Frozen_EncodingTestPEP302, Source_EncodingTestPEP302 = util.test_both(
-        EncodingTestPEP302, machinery=machinery)
+
+(Frozen_EncodingTestPEP302,
+ Source_EncodingTestPEP302
+ ) = util.test_both(EncodingTestPEP302, machinery=machinery)
 
 
 class LineEndingTest:
@@ -120,7 +125,7 @@
         module_name = '_temp'
         source_lines = [b"a = 42", b"b = -13", b'']
         source = line_ending.join(source_lines)
-        with source_util.create_modules(module_name) as mapping:
+        with util.create_modules(module_name) as mapping:
             with open(mapping[module_name], 'wb') as file:
                 file.write(source)
             loader = self.machinery.SourceFileLoader(module_name,
@@ -139,6 +144,7 @@
     def test_lf(self):
         self.run_test(b'\n')
 
+
 class LineEndingTestPEP451(LineEndingTest):
 
     def load(self, loader, module_name):
@@ -147,8 +153,11 @@
         loader.exec_module(module)
         return module
 
-Frozen_LineEndingTestPEP451, Source_LineEndingTestPEP451 = util.test_both(
-        LineEndingTestPEP451, machinery=machinery)
+
+(Frozen_LineEndingTestPEP451,
+ Source_LineEndingTestPEP451
+ ) = util.test_both(LineEndingTestPEP451, machinery=machinery)
+
 
 class LineEndingTestPEP302(LineEndingTest):
 
@@ -157,8 +166,10 @@
             warnings.simplefilter('ignore', DeprecationWarning)
             return loader.load_module(module_name)
 
-Frozen_LineEndingTestPEP302, Source_LineEndingTestPEP302 = util.test_both(
-        LineEndingTestPEP302, machinery=machinery)
+
+(Frozen_LineEndingTestPEP302,
+ Source_LineEndingTestPEP302
+ ) = util.test_both(LineEndingTestPEP302, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/source/util.py b/Lib/test/test_importlib/source/util.py
deleted file mode 100644
index 63cd25a..0000000
--- a/Lib/test/test_importlib/source/util.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from .. import util
-import contextlib
-import errno
-import functools
-import os
-import os.path
-import sys
-import tempfile
-from test import support
-
-
-def writes_bytecode_files(fxn):
-    """Decorator to protect sys.dont_write_bytecode from mutation and to skip
-    tests that require it to be set to False."""
-    if sys.dont_write_bytecode:
-        return lambda *args, **kwargs: None
-    @functools.wraps(fxn)
-    def wrapper(*args, **kwargs):
-        original = sys.dont_write_bytecode
-        sys.dont_write_bytecode = False
-        try:
-            to_return = fxn(*args, **kwargs)
-        finally:
-            sys.dont_write_bytecode = original
-        return to_return
-    return wrapper
-
-
-def ensure_bytecode_path(bytecode_path):
-    """Ensure that the __pycache__ directory for PEP 3147 pyc file exists.
-
-    :param bytecode_path: File system path to PEP 3147 pyc file.
-    """
-    try:
-        os.mkdir(os.path.dirname(bytecode_path))
-    except OSError as error:
-        if error.errno != errno.EEXIST:
-            raise
-
-
-@contextlib.contextmanager
-def create_modules(*names):
-    """Temporarily create each named module with an attribute (named 'attr')
-    that contains the name passed into the context manager that caused the
-    creation of the module.
-
-    All files are created in a temporary directory returned by
-    tempfile.mkdtemp(). This directory is inserted at the beginning of
-    sys.path. When the context manager exits all created files (source and
-    bytecode) are explicitly deleted.
-
-    No magic is performed when creating packages! This means that if you create
-    a module within a package you must also create the package's __init__ as
-    well.
-
-    """
-    source = 'attr = {0!r}'
-    created_paths = []
-    mapping = {}
-    state_manager = None
-    uncache_manager = None
-    try:
-        temp_dir = tempfile.mkdtemp()
-        mapping['.root'] = temp_dir
-        import_names = set()
-        for name in names:
-            if not name.endswith('__init__'):
-                import_name = name
-            else:
-                import_name = name[:-len('.__init__')]
-            import_names.add(import_name)
-            if import_name in sys.modules:
-                del sys.modules[import_name]
-            name_parts = name.split('.')
-            file_path = temp_dir
-            for directory in name_parts[:-1]:
-                file_path = os.path.join(file_path, directory)
-                if not os.path.exists(file_path):
-                    os.mkdir(file_path)
-                    created_paths.append(file_path)
-            file_path = os.path.join(file_path, name_parts[-1] + '.py')
-            with open(file_path, 'w') as file:
-                file.write(source.format(name))
-            created_paths.append(file_path)
-            mapping[name] = file_path
-        uncache_manager = util.uncache(*import_names)
-        uncache_manager.__enter__()
-        state_manager = util.import_state(path=[temp_dir])
-        state_manager.__enter__()
-        yield mapping
-    finally:
-        if state_manager is not None:
-            state_manager.__exit__(None, None, None)
-        if uncache_manager is not None:
-            uncache_manager.__exit__(None, None, None)
-        support.rmtree(temp_dir)
diff --git a/Lib/test/test_importlib/test_abc.py b/Lib/test/test_importlib/test_abc.py
index 09b7294..66a8d7b 100644
--- a/Lib/test/test_importlib/test_abc.py
+++ b/Lib/test/test_importlib/test_abc.py
@@ -10,12 +10,13 @@
 from unittest import mock
 import warnings
 
-from . import util
+from . import util as test_util
 
-frozen_init, source_init = util.import_importlib('importlib')
-frozen_abc, source_abc = util.import_importlib('importlib.abc')
-machinery = util.import_importlib('importlib.machinery')
-frozen_util, source_util = util.import_importlib('importlib.util')
+init = test_util.import_importlib('importlib')
+abc = test_util.import_importlib('importlib.abc')
+machinery = test_util.import_importlib('importlib.machinery')
+util = test_util.import_importlib('importlib.util')
+
 
 ##### Inheritance ##############################################################
 class InheritanceTests:
@@ -26,8 +27,7 @@
     subclasses = []
     superclasses = []
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
+    def setUp(self):
         self.superclasses = [getattr(self.abc, class_name)
                              for class_name in self.superclass_names]
         if hasattr(self, 'subclass_names'):
@@ -36,11 +36,11 @@
             # checking across module boundaries (i.e. the _bootstrap in abc is
             # not the same as the one in machinery). That means stealing one of
             # the modules from the other to make sure the same instance is used.
-            self.subclasses = [getattr(self.abc.machinery, class_name)
-                                for class_name in self.subclass_names]
+            machinery = self.abc.machinery
+            self.subclasses = [getattr(machinery, class_name)
+                               for class_name in self.subclass_names]
         assert self.subclasses or self.superclasses, self.__class__
-        testing = self.__class__.__name__.partition('_')[2]
-        self.__test = getattr(self.abc, testing)
+        self.__test = getattr(self.abc, self._NAME)
 
     def test_subclasses(self):
         # Test that the expected subclasses inherit.
@@ -54,94 +54,97 @@
             self.assertTrue(issubclass(self.__test, superclass),
                "{0} is not a superclass of {1}".format(superclass, self.__test))
 
-def create_inheritance_tests(base_class):
-    def set_frozen(ns):
-        ns['abc'] = frozen_abc
-    def set_source(ns):
-        ns['abc'] = source_abc
-
-    classes = []
-    for prefix, ns_set in [('Frozen', set_frozen), ('Source', set_source)]:
-        classes.append(types.new_class('_'.join([prefix, base_class.__name__]),
-                                       (base_class, unittest.TestCase),
-                                       exec_body=ns_set))
-    return classes
-
 
 class MetaPathFinder(InheritanceTests):
     superclass_names = ['Finder']
     subclass_names = ['BuiltinImporter', 'FrozenImporter', 'PathFinder',
                       'WindowsRegistryFinder']
 
-tests = create_inheritance_tests(MetaPathFinder)
-Frozen_MetaPathFinderInheritanceTests, Source_MetaPathFinderInheritanceTests = tests
+
+(Frozen_MetaPathFinderInheritanceTests,
+ Source_MetaPathFinderInheritanceTests
+ ) = test_util.test_both(MetaPathFinder, abc=abc)
 
 
 class PathEntryFinder(InheritanceTests):
     superclass_names = ['Finder']
     subclass_names = ['FileFinder']
 
-tests = create_inheritance_tests(PathEntryFinder)
-Frozen_PathEntryFinderInheritanceTests, Source_PathEntryFinderInheritanceTests = tests
+
+(Frozen_PathEntryFinderInheritanceTests,
+ Source_PathEntryFinderInheritanceTests
+ ) = test_util.test_both(PathEntryFinder, abc=abc)
 
 
 class ResourceLoader(InheritanceTests):
     superclass_names = ['Loader']
 
-tests = create_inheritance_tests(ResourceLoader)
-Frozen_ResourceLoaderInheritanceTests, Source_ResourceLoaderInheritanceTests = tests
+
+(Frozen_ResourceLoaderInheritanceTests,
+ Source_ResourceLoaderInheritanceTests
+ ) = test_util.test_both(ResourceLoader, abc=abc)
 
 
 class InspectLoader(InheritanceTests):
     superclass_names = ['Loader']
     subclass_names = ['BuiltinImporter', 'FrozenImporter', 'ExtensionFileLoader']
 
-tests = create_inheritance_tests(InspectLoader)
-Frozen_InspectLoaderInheritanceTests, Source_InspectLoaderInheritanceTests = tests
+
+(Frozen_InspectLoaderInheritanceTests,
+ Source_InspectLoaderInheritanceTests
+ ) = test_util.test_both(InspectLoader, abc=abc)
 
 
 class ExecutionLoader(InheritanceTests):
     superclass_names = ['InspectLoader']
     subclass_names = ['ExtensionFileLoader']
 
-tests = create_inheritance_tests(ExecutionLoader)
-Frozen_ExecutionLoaderInheritanceTests, Source_ExecutionLoaderInheritanceTests = tests
+
+(Frozen_ExecutionLoaderInheritanceTests,
+ Source_ExecutionLoaderInheritanceTests
+ ) = test_util.test_both(ExecutionLoader, abc=abc)
 
 
 class FileLoader(InheritanceTests):
     superclass_names = ['ResourceLoader', 'ExecutionLoader']
     subclass_names = ['SourceFileLoader', 'SourcelessFileLoader']
 
-tests = create_inheritance_tests(FileLoader)
-Frozen_FileLoaderInheritanceTests, Source_FileLoaderInheritanceTests = tests
+
+(Frozen_FileLoaderInheritanceTests,
+ Source_FileLoaderInheritanceTests
+ ) = test_util.test_both(FileLoader, abc=abc)
 
 
 class SourceLoader(InheritanceTests):
     superclass_names = ['ResourceLoader', 'ExecutionLoader']
     subclass_names = ['SourceFileLoader']
 
-tests = create_inheritance_tests(SourceLoader)
-Frozen_SourceLoaderInheritanceTests, Source_SourceLoaderInheritanceTests = tests
+
+(Frozen_SourceLoaderInheritanceTests,
+ Source_SourceLoaderInheritanceTests
+ ) = test_util.test_both(SourceLoader, abc=abc)
+
 
 ##### Default return values ####################################################
-def make_abc_subclasses(base_class):
-    classes = []
-    for kind, abc in [('Frozen', frozen_abc), ('Source', source_abc)]:
-        name = '_'.join([kind, base_class.__name__])
-        base_classes = base_class, getattr(abc, base_class.__name__)
-        classes.append(types.new_class(name, base_classes))
-    return classes
 
-def make_return_value_tests(base_class, test_class):
-    frozen_class, source_class = make_abc_subclasses(base_class)
-    tests = []
-    for prefix, class_in_test in [('Frozen', frozen_class), ('Source', source_class)]:
-        def set_ns(ns):
-            ns['ins'] = class_in_test()
-        tests.append(types.new_class('_'.join([prefix, test_class.__name__]),
-                                     (test_class, unittest.TestCase),
-                                     exec_body=set_ns))
-    return tests
+def make_abc_subclasses(base_class, name=None, inst=False, **kwargs):
+    if name is None:
+        name = base_class.__name__
+    base = {kind: getattr(splitabc, name)
+            for kind, splitabc in abc.items()}
+    return {cls._KIND: cls() if inst else cls
+            for cls in test_util.split_frozen(base_class, base, **kwargs)}
+
+
+class ABCTestHarness:
+
+    @property
+    def ins(self):
+        # Lazily set ins on the class.
+        cls = self.SPLIT[self._KIND]
+        ins = cls()
+        self.__class__.ins = ins
+        return ins
 
 
 class MetaPathFinder:
@@ -149,10 +152,10 @@
     def find_module(self, fullname, path):
         return super().find_module(fullname, path)
 
-Frozen_MPF, Source_MPF = make_abc_subclasses(MetaPathFinder)
 
+class MetaPathFinderDefaultsTests(ABCTestHarness):
 
-class MetaPathFinderDefaultsTests:
+    SPLIT = make_abc_subclasses(MetaPathFinder)
 
     def test_find_module(self):
         # Default should return None.
@@ -163,8 +166,9 @@
         self.ins.invalidate_caches()
 
 
-tests = make_return_value_tests(MetaPathFinder, MetaPathFinderDefaultsTests)
-Frozen_MPFDefaultTests, Source_MPFDefaultTests = tests
+(Frozen_MPFDefaultTests,
+ Source_MPFDefaultTests
+ ) = test_util.test_both(MetaPathFinderDefaultsTests)
 
 
 class PathEntryFinder:
@@ -172,10 +176,10 @@
     def find_loader(self, fullname):
         return super().find_loader(fullname)
 
-Frozen_PEF, Source_PEF = make_abc_subclasses(PathEntryFinder)
 
+class PathEntryFinderDefaultsTests(ABCTestHarness):
 
-class PathEntryFinderDefaultsTests:
+    SPLIT = make_abc_subclasses(PathEntryFinder)
 
     def test_find_loader(self):
         self.assertEqual((None, []), self.ins.find_loader('something'))
@@ -188,8 +192,9 @@
         self.ins.invalidate_caches()
 
 
-tests = make_return_value_tests(PathEntryFinder, PathEntryFinderDefaultsTests)
-Frozen_PEFDefaultTests, Source_PEFDefaultTests = tests
+(Frozen_PEFDefaultTests,
+ Source_PEFDefaultTests
+ ) = test_util.test_both(PathEntryFinderDefaultsTests)
 
 
 class Loader:
@@ -198,10 +203,9 @@
         return super().load_module(fullname)
 
 
-Frozen_L, Source_L = make_abc_subclasses(Loader)
+class LoaderDefaultsTests(ABCTestHarness):
 
-
-class LoaderDefaultsTests:
+    SPLIT = make_abc_subclasses(Loader)
 
     def test_load_module(self):
         with self.assertRaises(ImportError):
@@ -217,8 +221,9 @@
         self.assertTrue(repr(mod))
 
 
-tests = make_return_value_tests(Loader, LoaderDefaultsTests)
-Frozen_LDefaultTests, SourceLDefaultTests = tests
+(Frozen_LDefaultTests,
+ SourceLDefaultTests
+ ) = test_util.test_both(LoaderDefaultsTests)
 
 
 class ResourceLoader(Loader):
@@ -227,18 +232,18 @@
         return super().get_data(path)
 
 
-Frozen_RL, Source_RL = make_abc_subclasses(ResourceLoader)
+class ResourceLoaderDefaultsTests(ABCTestHarness):
 
-
-class ResourceLoaderDefaultsTests:
+    SPLIT = make_abc_subclasses(ResourceLoader)
 
     def test_get_data(self):
         with self.assertRaises(IOError):
             self.ins.get_data('/some/path')
 
 
-tests = make_return_value_tests(ResourceLoader, ResourceLoaderDefaultsTests)
-Frozen_RLDefaultTests, Source_RLDefaultTests = tests
+(Frozen_RLDefaultTests,
+ Source_RLDefaultTests
+ ) = test_util.test_both(ResourceLoaderDefaultsTests)
 
 
 class InspectLoader(Loader):
@@ -250,10 +255,12 @@
         return super().get_source(fullname)
 
 
-Frozen_IL, Source_IL = make_abc_subclasses(InspectLoader)
+SPLIT_IL = make_abc_subclasses(InspectLoader)
 
 
-class InspectLoaderDefaultsTests:
+class InspectLoaderDefaultsTests(ABCTestHarness):
+
+    SPLIT = SPLIT_IL
 
     def test_is_package(self):
         with self.assertRaises(ImportError):
@@ -264,8 +271,9 @@
             self.ins.get_source('blah')
 
 
-tests = make_return_value_tests(InspectLoader, InspectLoaderDefaultsTests)
-Frozen_ILDefaultTests, Source_ILDefaultTests = tests
+(Frozen_ILDefaultTests,
+ Source_ILDefaultTests
+ ) = test_util.test_both(InspectLoaderDefaultsTests)
 
 
 class ExecutionLoader(InspectLoader):
@@ -273,21 +281,25 @@
     def get_filename(self, fullname):
         return super().get_filename(fullname)
 
-Frozen_EL, Source_EL = make_abc_subclasses(ExecutionLoader)
+
+SPLIT_EL = make_abc_subclasses(ExecutionLoader)
 
 
-class ExecutionLoaderDefaultsTests:
+class ExecutionLoaderDefaultsTests(ABCTestHarness):
+
+    SPLIT = SPLIT_EL
 
     def test_get_filename(self):
         with self.assertRaises(ImportError):
             self.ins.get_filename('blah')
 
 
-tests = make_return_value_tests(ExecutionLoader, InspectLoaderDefaultsTests)
-Frozen_ELDefaultTests, Source_ELDefaultsTests = tests
+(Frozen_ELDefaultTests,
+ Source_ELDefaultsTests
+ ) = test_util.test_both(InspectLoaderDefaultsTests)
+
 
 ##### MetaPathFinder concrete methods ##########################################
-
 class MetaPathFinderFindModuleTests:
 
     @classmethod
@@ -317,13 +329,12 @@
         self.assertIs(found, spec.loader)
 
 
-Frozen_MPFFindModuleTests, Source_MPFFindModuleTests = util.test_both(
-        MetaPathFinderFindModuleTests,
-        abc=(frozen_abc, source_abc),
-        util=(frozen_util, source_util))
+(Frozen_MPFFindModuleTests,
+ Source_MPFFindModuleTests
+ ) = test_util.test_both(MetaPathFinderFindModuleTests, abc=abc, util=util)
+
 
 ##### PathEntryFinder concrete methods #########################################
-
 class PathEntryFinderFindLoaderTests:
 
     @classmethod
@@ -361,11 +372,10 @@
         self.assertEqual(paths, found[1])
 
 
-Frozen_PEFFindLoaderTests, Source_PEFFindLoaderTests = util.test_both(
-        PathEntryFinderFindLoaderTests,
-        abc=(frozen_abc, source_abc),
-        machinery=machinery,
-        util=(frozen_util, source_util))
+(Frozen_PEFFindLoaderTests,
+ Source_PEFFindLoaderTests
+ ) = test_util.test_both(PathEntryFinderFindLoaderTests, abc=abc, util=util,
+                         machinery=machinery)
 
 
 ##### Loader concrete methods ##################################################
@@ -386,7 +396,7 @@
     def test_fresh(self):
         loader = self.loader()
         name = 'blah'
-        with util.uncache(name):
+        with test_util.uncache(name):
             loader.load_module(name)
             module = loader.found
             self.assertIs(sys.modules[name], module)
@@ -404,7 +414,7 @@
         module = types.ModuleType(name)
         module.__spec__ = self.util.spec_from_loader(name, loader)
         module.__loader__ = loader
-        with util.uncache(name):
+        with test_util.uncache(name):
             sys.modules[name] = module
             loader.load_module(name)
             found = loader.found
@@ -412,10 +422,9 @@
             self.assertIs(module, sys.modules[name])
 
 
-Frozen_LoaderLoadModuleTests, Source_LoaderLoadModuleTests = util.test_both(
-        LoaderLoadModuleTests,
-        abc=(frozen_abc, source_abc),
-        util=(frozen_util, source_util))
+(Frozen_LoaderLoadModuleTests,
+ Source_LoaderLoadModuleTests
+ ) = test_util.test_both(LoaderLoadModuleTests, abc=abc, util=util)
 
 
 ##### InspectLoader concrete methods ###########################################
@@ -461,11 +470,10 @@
         self.assertEqual(code.co_filename, '<string>')
 
 
-class Frozen_ILSourceToCodeTests(InspectLoaderSourceToCodeTests, unittest.TestCase):
-    InspectLoaderSubclass = Frozen_IL
-
-class Source_ILSourceToCodeTests(InspectLoaderSourceToCodeTests, unittest.TestCase):
-    InspectLoaderSubclass = Source_IL
+(Frozen_ILSourceToCodeTests,
+ Source_ILSourceToCodeTests
+ ) = test_util.test_both(InspectLoaderSourceToCodeTests,
+                         InspectLoaderSubclass=SPLIT_IL)
 
 
 class InspectLoaderGetCodeTests:
@@ -495,11 +503,10 @@
             loader.get_code('blah')
 
 
-class Frozen_ILGetCodeTests(InspectLoaderGetCodeTests, unittest.TestCase):
-    InspectLoaderSubclass = Frozen_IL
-
-class Source_ILGetCodeTests(InspectLoaderGetCodeTests, unittest.TestCase):
-    InspectLoaderSubclass = Source_IL
+(Frozen_ILGetCodeTests,
+ Source_ILGetCodeTests
+ ) = test_util.test_both(InspectLoaderGetCodeTests,
+                         InspectLoaderSubclass=SPLIT_IL)
 
 
 class InspectLoaderLoadModuleTests:
@@ -543,11 +550,10 @@
             self.assertEqual(module, sys.modules[self.module_name])
 
 
-class Frozen_ILLoadModuleTests(InspectLoaderLoadModuleTests, unittest.TestCase):
-    InspectLoaderSubclass = Frozen_IL
-
-class Source_ILLoadModuleTests(InspectLoaderLoadModuleTests, unittest.TestCase):
-    InspectLoaderSubclass = Source_IL
+(Frozen_ILLoadModuleTests,
+ Source_ILLoadModuleTests
+ ) = test_util.test_both(InspectLoaderLoadModuleTests,
+                         InspectLoaderSubclass=SPLIT_IL)
 
 
 ##### ExecutionLoader concrete methods #########################################
@@ -608,15 +614,14 @@
         self.assertEqual(module.attr, 42)
 
 
-class Frozen_ELGetCodeTests(ExecutionLoaderGetCodeTests, unittest.TestCase):
-    ExecutionLoaderSubclass = Frozen_EL
-
-class Source_ELGetCodeTests(ExecutionLoaderGetCodeTests, unittest.TestCase):
-    ExecutionLoaderSubclass = Source_EL
+(Frozen_ELGetCodeTests,
+ Source_ELGetCodeTests
+ ) = test_util.test_both(ExecutionLoaderGetCodeTests,
+                         ExecutionLoaderSubclass=SPLIT_EL)
 
 
 ##### SourceLoader concrete methods ############################################
-class SourceLoader:
+class SourceOnlyLoader:
 
     # Globals that should be defined for all modules.
     source = (b"_ = '::'.join([__name__, __file__, __cached__, __package__, "
@@ -637,10 +642,10 @@
         return '<module>'
 
 
-Frozen_SourceOnlyL, Source_SourceOnlyL = make_abc_subclasses(SourceLoader)
+SPLIT_SOL = make_abc_subclasses(SourceOnlyLoader, 'SourceLoader')
 
 
-class SourceLoader(SourceLoader):
+class SourceLoader(SourceOnlyLoader):
 
     source_mtime = 1
 
@@ -677,11 +682,7 @@
         return path == self.bytecode_path
 
 
-Frozen_SL, Source_SL = make_abc_subclasses(SourceLoader)
-Frozen_SL.util = frozen_util
-Source_SL.util = source_util
-Frozen_SL.init = frozen_init
-Source_SL.init = source_init
+SPLIT_SL = make_abc_subclasses(SourceLoader, util=util, init=init)
 
 
 class SourceLoaderTestHarness:
@@ -765,7 +766,7 @@
         # Loading a module should set __name__, __loader__, __package__,
         # __path__ (for packages), __file__, and __cached__.
         # The module should also be put into sys.modules.
-        with util.uncache(self.name):
+        with test_util.uncache(self.name):
             with warnings.catch_warnings():
                 warnings.simplefilter('ignore', DeprecationWarning)
                 module = self.loader.load_module(self.name)
@@ -778,7 +779,7 @@
         # is a package.
         # Testing the values for a package are covered by test_load_module.
         self.setUp(is_package=False)
-        with util.uncache(self.name):
+        with test_util.uncache(self.name):
             with warnings.catch_warnings():
                 warnings.simplefilter('ignore', DeprecationWarning)
                 module = self.loader.load_module(self.name)
@@ -798,13 +799,10 @@
         self.assertEqual(returned_source, source)
 
 
-class Frozen_SourceOnlyLTests(SourceOnlyLoaderTests, unittest.TestCase):
-    loader_mock = Frozen_SourceOnlyL
-    util = frozen_util
-
-class Source_SourceOnlyLTests(SourceOnlyLoaderTests, unittest.TestCase):
-    loader_mock = Source_SourceOnlyL
-    util = source_util
+(Frozen_SourceOnlyLoaderTests,
+ Source_SourceOnlyLoaderTests
+ ) = test_util.test_both(SourceOnlyLoaderTests, util=util,
+                         loader_mock=SPLIT_SOL)
 
 
 @unittest.skipIf(sys.dont_write_bytecode, "sys.dont_write_bytecode is true")
@@ -896,15 +894,10 @@
         self.verify_code(code_object)
 
 
-class Frozen_SLBytecodeTests(SourceLoaderBytecodeTests, unittest.TestCase):
-    loader_mock = Frozen_SL
-    init = frozen_init
-    util = frozen_util
-
-class SourceSLBytecodeTests(SourceLoaderBytecodeTests, unittest.TestCase):
-    loader_mock = Source_SL
-    init = source_init
-    util = source_util
+(Frozen_SLBytecodeTests,
+ SourceSLBytecodeTests
+ ) = test_util.test_both(SourceLoaderBytecodeTests, init=init, util=util,
+                         loader_mock=SPLIT_SL)
 
 
 class SourceLoaderGetSourceTests:
@@ -940,11 +933,10 @@
         self.assertEqual(mock.get_source(name), expect)
 
 
-class Frozen_SourceOnlyLGetSourceTests(SourceLoaderGetSourceTests, unittest.TestCase):
-    SourceOnlyLoaderMock = Frozen_SourceOnlyL
-
-class Source_SourceOnlyLGetSourceTests(SourceLoaderGetSourceTests, unittest.TestCase):
-    SourceOnlyLoaderMock = Source_SourceOnlyL
+(Frozen_SourceOnlyLoaderGetSourceTests,
+ Source_SourceOnlyLoaderGetSourceTests
+ ) = test_util.test_both(SourceLoaderGetSourceTests,
+                         SourceOnlyLoaderMock=SPLIT_SOL)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/test_api.py b/Lib/test/test_importlib/test_api.py
index 2a2d42b..6bc3c56 100644
--- a/Lib/test/test_importlib/test_api.py
+++ b/Lib/test/test_importlib/test_api.py
@@ -1,8 +1,8 @@
-from . import util
+from . import util as test_util
 
-frozen_init, source_init = util.import_importlib('importlib')
-frozen_util, source_util = util.import_importlib('importlib.util')
-frozen_machinery, source_machinery = util.import_importlib('importlib.machinery')
+init = test_util.import_importlib('importlib')
+util = test_util.import_importlib('importlib.util')
+machinery = test_util.import_importlib('importlib.machinery')
 
 import os.path
 import sys
@@ -18,8 +18,8 @@
 
     def test_module_import(self):
         # Test importing a top-level module.
-        with util.mock_modules('top_level') as mock:
-            with util.import_state(meta_path=[mock]):
+        with test_util.mock_modules('top_level') as mock:
+            with test_util.import_state(meta_path=[mock]):
                 module = self.init.import_module('top_level')
                 self.assertEqual(module.__name__, 'top_level')
 
@@ -28,8 +28,8 @@
         pkg_name = 'pkg'
         pkg_long_name = '{0}.__init__'.format(pkg_name)
         name = '{0}.mod'.format(pkg_name)
-        with util.mock_modules(pkg_long_name, name) as mock:
-            with util.import_state(meta_path=[mock]):
+        with test_util.mock_modules(pkg_long_name, name) as mock:
+            with test_util.import_state(meta_path=[mock]):
                 module = self.init.import_module(name)
                 self.assertEqual(module.__name__, name)
 
@@ -40,16 +40,16 @@
         module_name = 'mod'
         absolute_name = '{0}.{1}'.format(pkg_name, module_name)
         relative_name = '.{0}'.format(module_name)
-        with util.mock_modules(pkg_long_name, absolute_name) as mock:
-            with util.import_state(meta_path=[mock]):
+        with test_util.mock_modules(pkg_long_name, absolute_name) as mock:
+            with test_util.import_state(meta_path=[mock]):
                 self.init.import_module(pkg_name)
                 module = self.init.import_module(relative_name, pkg_name)
                 self.assertEqual(module.__name__, absolute_name)
 
     def test_deep_relative_package_import(self):
         modules = ['a.__init__', 'a.b.__init__', 'a.c']
-        with util.mock_modules(*modules) as mock:
-            with util.import_state(meta_path=[mock]):
+        with test_util.mock_modules(*modules) as mock:
+            with test_util.import_state(meta_path=[mock]):
                 self.init.import_module('a')
                 self.init.import_module('a.b')
                 module = self.init.import_module('..c', 'a.b')
@@ -61,8 +61,8 @@
         pkg_name = 'pkg'
         pkg_long_name = '{0}.__init__'.format(pkg_name)
         name = '{0}.mod'.format(pkg_name)
-        with util.mock_modules(pkg_long_name, name) as mock:
-            with util.import_state(meta_path=[mock]):
+        with test_util.mock_modules(pkg_long_name, name) as mock:
+            with test_util.import_state(meta_path=[mock]):
                 self.init.import_module(pkg_name)
                 module = self.init.import_module(name, pkg_name)
                 self.assertEqual(module.__name__, name)
@@ -86,16 +86,15 @@
             b_load_count += 1
         code = {'a': load_a, 'a.b': load_b}
         modules = ['a.__init__', 'a.b']
-        with util.mock_modules(*modules, module_code=code) as mock:
-            with util.import_state(meta_path=[mock]):
+        with test_util.mock_modules(*modules, module_code=code) as mock:
+            with test_util.import_state(meta_path=[mock]):
                 self.init.import_module('a.b')
         self.assertEqual(b_load_count, 1)
 
-class Frozen_ImportModuleTests(ImportModuleTests, unittest.TestCase):
-    init = frozen_init
 
-class Source_ImportModuleTests(ImportModuleTests, unittest.TestCase):
-    init = source_init
+(Frozen_ImportModuleTests,
+ Source_ImportModuleTests
+ ) = test_util.test_both(ImportModuleTests, init=init)
 
 
 class FindLoaderTests:
@@ -107,7 +106,7 @@
     def test_sys_modules(self):
         # If a module with __loader__ is in sys.modules, then return it.
         name = 'some_mod'
-        with util.uncache(name):
+        with test_util.uncache(name):
             module = types.ModuleType(name)
             loader = 'a loader!'
             module.__loader__ = loader
@@ -120,7 +119,7 @@
     def test_sys_modules_loader_is_None(self):
         # If sys.modules[name].__loader__ is None, raise ValueError.
         name = 'some_mod'
-        with util.uncache(name):
+        with test_util.uncache(name):
             module = types.ModuleType(name)
             module.__loader__ = None
             sys.modules[name] = module
@@ -133,7 +132,7 @@
         # Should raise ValueError
         # Issue #17099
         name = 'some_mod'
-        with util.uncache(name):
+        with test_util.uncache(name):
             module = types.ModuleType(name)
             try:
                 del module.__loader__
@@ -148,8 +147,8 @@
     def test_success(self):
         # Return the loader found on sys.meta_path.
         name = 'some_mod'
-        with util.uncache(name):
-            with util.import_state(meta_path=[self.FakeMetaFinder]):
+        with test_util.uncache(name):
+            with test_util.import_state(meta_path=[self.FakeMetaFinder]):
                 with warnings.catch_warnings():
                     warnings.simplefilter('ignore', DeprecationWarning)
                     self.assertEqual((name, None), self.init.find_loader(name))
@@ -158,8 +157,8 @@
         # Searching on a path should work.
         name = 'some_mod'
         path = 'path to some place'
-        with util.uncache(name):
-            with util.import_state(meta_path=[self.FakeMetaFinder]):
+        with test_util.uncache(name):
+            with test_util.import_state(meta_path=[self.FakeMetaFinder]):
                 with warnings.catch_warnings():
                     warnings.simplefilter('ignore', DeprecationWarning)
                     self.assertEqual((name, path),
@@ -171,11 +170,10 @@
             warnings.simplefilter('ignore', DeprecationWarning)
             self.assertIsNone(self.init.find_loader('nevergoingtofindthismodule'))
 
-class Frozen_FindLoaderTests(FindLoaderTests, unittest.TestCase):
-    init = frozen_init
 
-class Source_FindLoaderTests(FindLoaderTests, unittest.TestCase):
-    init = source_init
+(Frozen_FindLoaderTests,
+ Source_FindLoaderTests
+ ) = test_util.test_both(FindLoaderTests, init=init)
 
 
 class ReloadTests:
@@ -195,10 +193,10 @@
             module = type(sys)('top_level')
             module.spam = 3
             sys.modules['top_level'] = module
-        mock = util.mock_modules('top_level',
-                                 module_code={'top_level': code})
+        mock = test_util.mock_modules('top_level',
+                                      module_code={'top_level': code})
         with mock:
-            with util.import_state(meta_path=[mock]):
+            with test_util.import_state(meta_path=[mock]):
                 module = self.init.import_module('top_level')
                 reloaded = self.init.reload(module)
                 actual = sys.modules['top_level']
@@ -230,7 +228,7 @@
     def test_reload_location_changed(self):
         name = 'spam'
         with support.temp_cwd(None) as cwd:
-            with util.uncache('spam'):
+            with test_util.uncache('spam'):
                 with support.DirsOnSysPath(cwd):
                     # Start as a plain module.
                     self.init.invalidate_caches()
@@ -281,7 +279,7 @@
     def test_reload_namespace_changed(self):
         name = 'spam'
         with support.temp_cwd(None) as cwd:
-            with util.uncache('spam'):
+            with test_util.uncache('spam'):
                 with support.DirsOnSysPath(cwd):
                     # Start as a namespace package.
                     self.init.invalidate_caches()
@@ -338,20 +336,16 @@
         # See #19851.
         name = 'spam'
         subname = 'ham'
-        with util.temp_module(name, pkg=True) as pkg_dir:
-            fullname, _ = util.submodule(name, subname, pkg_dir)
+        with test_util.temp_module(name, pkg=True) as pkg_dir:
+            fullname, _ = test_util.submodule(name, subname, pkg_dir)
             ham = self.init.import_module(fullname)
             reloaded = self.init.reload(ham)
             self.assertIs(reloaded, ham)
 
 
-class Frozen_ReloadTests(ReloadTests, unittest.TestCase):
-    init = frozen_init
-    util = frozen_util
-
-class Source_ReloadTests(ReloadTests, unittest.TestCase):
-    init = source_init
-    util = source_util
+(Frozen_ReloadTests,
+ Source_ReloadTests
+ ) = test_util.test_both(ReloadTests, init=init, util=util)
 
 
 class InvalidateCacheTests:
@@ -384,11 +378,10 @@
         self.addCleanup(lambda: sys.path_importer_cache.__delitem__(key))
         self.init.invalidate_caches()  # Shouldn't trigger an exception.
 
-class Frozen_InvalidateCacheTests(InvalidateCacheTests, unittest.TestCase):
-    init = frozen_init
 
-class Source_InvalidateCacheTests(InvalidateCacheTests, unittest.TestCase):
-    init = source_init
+(Frozen_InvalidateCacheTests,
+ Source_InvalidateCacheTests
+ ) = test_util.test_both(InvalidateCacheTests, init=init)
 
 
 class FrozenImportlibTests(unittest.TestCase):
@@ -398,6 +391,7 @@
         # Can't do an isinstance() check since separate copies of importlib
         # may have been used for import, so just check the name is not for the
         # frozen loader.
+        source_init = init['Source']
         self.assertNotEqual(source_init.__loader__.__class__.__name__,
                             'FrozenImporter')
 
@@ -426,11 +420,10 @@
                     elif self.machinery.FrozenImporter.find_module(name):
                         self.assertIsNot(module.__spec__, None)
 
-class Frozen_StartupTests(StartupTests, unittest.TestCase):
-    machinery = frozen_machinery
 
-class Source_StartupTests(StartupTests, unittest.TestCase):
-    machinery = source_machinery
+(Frozen_StartupTests,
+ Source_StartupTests
+ ) = test_util.test_both(StartupTests, machinery=machinery)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/test_lazy.py b/Lib/test/test_importlib/test_lazy.py
new file mode 100644
index 0000000..2e191bb
--- /dev/null
+++ b/Lib/test/test_importlib/test_lazy.py
@@ -0,0 +1,132 @@
+import importlib
+from importlib import abc
+from importlib import util
+import unittest
+
+from . import util as test_util
+
+
+class CollectInit:
+
+    def __init__(self, *args, **kwargs):
+        self.args = args
+        self.kwargs = kwargs
+
+    def exec_module(self, module):
+        return self
+
+
+class LazyLoaderFactoryTests(unittest.TestCase):
+
+    def test_init(self):
+        factory = util.LazyLoader.factory(CollectInit)
+        # E.g. what importlib.machinery.FileFinder instantiates loaders with
+        # plus keyword arguments.
+        lazy_loader = factory('module name', 'module path', kw='kw')
+        loader = lazy_loader.loader
+        self.assertEqual(('module name', 'module path'), loader.args)
+        self.assertEqual({'kw': 'kw'}, loader.kwargs)
+
+    def test_validation(self):
+        # No exec_module(), no lazy loading.
+        with self.assertRaises(TypeError):
+            util.LazyLoader.factory(object)
+
+
+class TestingImporter(abc.MetaPathFinder, abc.Loader):
+
+    module_name = 'lazy_loader_test'
+    mutated_name = 'changed'
+    loaded = None
+    source_code = 'attr = 42; __name__ = {!r}'.format(mutated_name)
+
+    def find_spec(self, name, path, target=None):
+        if name != self.module_name:
+            return None
+        return util.spec_from_loader(name, util.LazyLoader(self))
+
+    def exec_module(self, module):
+        exec(self.source_code, module.__dict__)
+        self.loaded = module
+
+
+class LazyLoaderTests(unittest.TestCase):
+
+    def test_init(self):
+        with self.assertRaises(TypeError):
+            util.LazyLoader(object)
+
+    def new_module(self, source_code=None):
+        loader = TestingImporter()
+        if source_code is not None:
+            loader.source_code = source_code
+        spec = util.spec_from_loader(TestingImporter.module_name,
+                                     util.LazyLoader(loader))
+        module = spec.loader.create_module(spec)
+        module.__spec__ = spec
+        module.__loader__ = spec.loader
+        spec.loader.exec_module(module)
+        # Module is now lazy.
+        self.assertIsNone(loader.loaded)
+        return module
+
+    def test_e2e(self):
+        # End-to-end test to verify the load is in fact lazy.
+        importer = TestingImporter()
+        assert importer.loaded is None
+        with test_util.uncache(importer.module_name):
+            with test_util.import_state(meta_path=[importer]):
+                module = importlib.import_module(importer.module_name)
+        self.assertIsNone(importer.loaded)
+        # Trigger load.
+        self.assertEqual(module.__loader__, importer)
+        self.assertIsNotNone(importer.loaded)
+        self.assertEqual(module, importer.loaded)
+
+    def test_attr_unchanged(self):
+        # An attribute only mutated as a side-effect of import should not be
+        # changed needlessly.
+        module = self.new_module()
+        self.assertEqual(TestingImporter.mutated_name, module.__name__)
+
+    def test_new_attr(self):
+        # A new attribute should persist.
+        module = self.new_module()
+        module.new_attr = 42
+        self.assertEqual(42, module.new_attr)
+
+    def test_mutated_preexisting_attr(self):
+        # Changing an attribute that already existed on the module --
+        # e.g. __name__ -- should persist.
+        module = self.new_module()
+        module.__name__ = 'bogus'
+        self.assertEqual('bogus', module.__name__)
+
+    def test_mutated_attr(self):
+        # Changing an attribute that comes into existence after an import
+        # should persist.
+        module = self.new_module()
+        module.attr = 6
+        self.assertEqual(6, module.attr)
+
+    def test_delete_eventual_attr(self):
+        # Deleting an attribute should stay deleted.
+        module = self.new_module()
+        del module.attr
+        self.assertFalse(hasattr(module, 'attr'))
+
+    def test_delete_preexisting_attr(self):
+        module = self.new_module()
+        del module.__name__
+        self.assertFalse(hasattr(module, '__name__'))
+
+    def test_module_substitution_error(self):
+        source_code = 'import sys; sys.modules[__name__] = 42'
+        module = self.new_module(source_code)
+        with test_util.uncache(TestingImporter.module_name):
+            with self.assertRaises(ValueError):
+                module.__name__
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Lib/test/test_importlib/test_locks.py b/Lib/test/test_importlib/test_locks.py
index dc97ba1..0805054 100644
--- a/Lib/test/test_importlib/test_locks.py
+++ b/Lib/test/test_importlib/test_locks.py
@@ -1,7 +1,6 @@
-from . import util
-frozen_init, source_init = util.import_importlib('importlib')
-frozen_bootstrap = frozen_init._bootstrap
-source_bootstrap = source_init._bootstrap
+from . import util as test_util
+
+init = test_util.import_importlib('importlib')
 
 import sys
 import time
@@ -33,13 +32,16 @@
         # _release_save() unsupported
         test_release_save_unacquired = None
 
-    class Frozen_ModuleLockAsRLockTests(ModuleLockAsRLockTests, lock_tests.RLockTests):
-        LockType = frozen_bootstrap._ModuleLock
+    LOCK_TYPES = {kind: splitinit._bootstrap._ModuleLock
+                  for kind, splitinit in init.items()}
 
-    class Source_ModuleLockAsRLockTests(ModuleLockAsRLockTests, lock_tests.RLockTests):
-        LockType = source_bootstrap._ModuleLock
-
+    (Frozen_ModuleLockAsRLockTests,
+     Source_ModuleLockAsRLockTests
+     ) = test_util.test_both(ModuleLockAsRLockTests, lock_tests.RLockTests,
+                             LockType=LOCK_TYPES)
 else:
+    LOCK_TYPES = {}
+
     class Frozen_ModuleLockAsRLockTests(unittest.TestCase):
         pass
 
@@ -47,6 +49,7 @@
         pass
 
 
+@unittest.skipUnless(threading, "threads needed for this test")
 class DeadlockAvoidanceTests:
 
     def setUp(self):
@@ -106,19 +109,22 @@
         self.assertEqual(results.count((True, False)), 0)
         self.assertEqual(results.count((True, True)), len(results))
 
-@unittest.skipUnless(threading, "threads needed for this test")
-class Frozen_DeadlockAvoidanceTests(DeadlockAvoidanceTests, unittest.TestCase):
-    LockType = frozen_bootstrap._ModuleLock
-    DeadlockError = frozen_bootstrap._DeadlockError
 
-@unittest.skipUnless(threading, "threads needed for this test")
-class Source_DeadlockAvoidanceTests(DeadlockAvoidanceTests, unittest.TestCase):
-    LockType = source_bootstrap._ModuleLock
-    DeadlockError = source_bootstrap._DeadlockError
+DEADLOCK_ERRORS = {kind: splitinit._bootstrap._DeadlockError
+                   for kind, splitinit in init.items()}
+
+(Frozen_DeadlockAvoidanceTests,
+ Source_DeadlockAvoidanceTests
+ ) = test_util.test_both(DeadlockAvoidanceTests,
+                         LockType=LOCK_TYPES, DeadlockError=DEADLOCK_ERRORS)
 
 
 class LifetimeTests:
 
+    @property
+    def bootstrap(self):
+        return self.init._bootstrap
+
     def test_lock_lifetime(self):
         name = "xyzzy"
         self.assertNotIn(name, self.bootstrap._module_locks)
@@ -135,11 +141,10 @@
         self.assertEqual(0, len(self.bootstrap._module_locks),
                          self.bootstrap._module_locks)
 
-class Frozen_LifetimeTests(LifetimeTests, unittest.TestCase):
-    bootstrap = frozen_bootstrap
 
-class Source_LifetimeTests(LifetimeTests, unittest.TestCase):
-    bootstrap = source_bootstrap
+(Frozen_LifetimeTests,
+ Source_LifetimeTests
+ ) = test_util.test_both(LifetimeTests, init=init)
 
 
 @support.reap_threads
diff --git a/Lib/test/test_importlib/test_spec.py b/Lib/test/test_importlib/test_spec.py
index 71541f6..0cb14ee 100644
--- a/Lib/test/test_importlib/test_spec.py
+++ b/Lib/test/test_importlib/test_spec.py
@@ -1,10 +1,8 @@
-from . import util
+from . import util as test_util
 
-frozen_init, source_init = util.import_importlib('importlib')
-frozen_bootstrap = frozen_init._bootstrap
-source_bootstrap = source_init._bootstrap
-frozen_machinery, source_machinery = util.import_importlib('importlib.machinery')
-frozen_util, source_util = util.import_importlib('importlib.util')
+init = test_util.import_importlib('importlib')
+machinery = test_util.import_importlib('importlib.machinery')
+util = test_util.import_importlib('importlib.util')
 
 import os.path
 from test.support import CleanImport
@@ -52,6 +50,8 @@
     with warnings.catch_warnings():
         warnings.simplefilter("ignore", DeprecationWarning)
 
+        frozen_util = util['Frozen']
+
         @frozen_util.module_for_loader
         def load_module(self, module):
             module.ham = self.HAM
@@ -221,18 +221,17 @@
         self.assertEqual(self.loc_spec.cached, 'spam.pyc')
 
 
-class Frozen_ModuleSpecTests(ModuleSpecTests, unittest.TestCase):
-    util = frozen_util
-    machinery = frozen_machinery
-
-
-class Source_ModuleSpecTests(ModuleSpecTests, unittest.TestCase):
-    util = source_util
-    machinery = source_machinery
+(Frozen_ModuleSpecTests,
+ Source_ModuleSpecTests
+ ) = test_util.test_both(ModuleSpecTests, util=util, machinery=machinery)
 
 
 class ModuleSpecMethodsTests:
 
+    @property
+    def bootstrap(self):
+        return self.init._bootstrap
+
     def setUp(self):
         self.name = 'spam'
         self.path = 'spam.py'
@@ -528,20 +527,18 @@
         self.assertIs(installed, loaded)
 
 
-class Frozen_ModuleSpecMethodsTests(ModuleSpecMethodsTests, unittest.TestCase):
-    bootstrap = frozen_bootstrap
-    machinery = frozen_machinery
-    util = frozen_util
-
-
-class Source_ModuleSpecMethodsTests(ModuleSpecMethodsTests, unittest.TestCase):
-    bootstrap = source_bootstrap
-    machinery = source_machinery
-    util = source_util
+(Frozen_ModuleSpecMethodsTests,
+ Source_ModuleSpecMethodsTests
+ ) = test_util.test_both(ModuleSpecMethodsTests, init=init, util=util,
+                         machinery=machinery)
 
 
 class ModuleReprTests:
 
+    @property
+    def bootstrap(self):
+        return self.init._bootstrap
+
     def setUp(self):
         self.module = type(os)('spam')
         self.spec = self.machinery.ModuleSpec('spam', TestLoader())
@@ -625,16 +622,10 @@
         self.assertEqual(modrepr, '<module {!r}>'.format('spam'))
 
 
-class Frozen_ModuleReprTests(ModuleReprTests, unittest.TestCase):
-    bootstrap = frozen_bootstrap
-    machinery = frozen_machinery
-    util = frozen_util
-
-
-class Source_ModuleReprTests(ModuleReprTests, unittest.TestCase):
-    bootstrap = source_bootstrap
-    machinery = source_machinery
-    util = source_util
+(Frozen_ModuleReprTests,
+ Source_ModuleReprTests
+ ) = test_util.test_both(ModuleReprTests, init=init, util=util,
+                         machinery=machinery)
 
 
 class FactoryTests:
@@ -787,7 +778,7 @@
     # spec_from_file_location()
 
     def test_spec_from_file_location_default(self):
-        if self.machinery is source_machinery:
+        if self.machinery is machinery['Source']:
             raise unittest.SkipTest('not sure why this is breaking...')
         spec = self.util.spec_from_file_location(self.name, self.path)
 
@@ -947,11 +938,6 @@
         self.assertTrue(spec.has_location)
 
 
-class Frozen_FactoryTests(FactoryTests, unittest.TestCase):
-    util = frozen_util
-    machinery = frozen_machinery
-
-
-class Source_FactoryTests(FactoryTests, unittest.TestCase):
-    util = source_util
-    machinery = source_machinery
+(Frozen_FactoryTests,
+ Source_FactoryTests
+ ) = test_util.test_both(FactoryTests, util=util, machinery=machinery)
diff --git a/Lib/test/test_importlib/test_util.py b/Lib/test/test_importlib/test_util.py
index b2823c6..9428471 100644
--- a/Lib/test/test_importlib/test_util.py
+++ b/Lib/test/test_importlib/test_util.py
@@ -1,8 +1,8 @@
-from importlib import util
+import importlib.util
 from . import util as test_util
-frozen_init, source_init = test_util.import_importlib('importlib')
-frozen_machinery, source_machinery = test_util.import_importlib('importlib.machinery')
-frozen_util, source_util = test_util.import_importlib('importlib.util')
+init = test_util.import_importlib('importlib')
+machinery = test_util.import_importlib('importlib.machinery')
+util = test_util.import_importlib('importlib.util')
 
 import os
 import sys
@@ -32,8 +32,10 @@
         self.assertEqual(self.util.decode_source(source_bytes),
                          '\n'.join([self.source, self.source]))
 
-Frozen_DecodeSourceBytesTests, Source_DecodeSourceBytesTests = test_util.test_both(
-        DecodeSourceBytesTests, util=[frozen_util, source_util])
+
+(Frozen_DecodeSourceBytesTests,
+ Source_DecodeSourceBytesTests
+ ) = test_util.test_both(DecodeSourceBytesTests, util=util)
 
 
 class ModuleForLoaderTests:
@@ -161,8 +163,10 @@
             self.assertIs(module.__loader__, loader)
             self.assertEqual(module.__package__, name)
 
-Frozen_ModuleForLoaderTests, Source_ModuleForLoaderTests = test_util.test_both(
-        ModuleForLoaderTests, util=[frozen_util, source_util])
+
+(Frozen_ModuleForLoaderTests,
+ Source_ModuleForLoaderTests
+ ) = test_util.test_both(ModuleForLoaderTests, util=util)
 
 
 class SetPackageTests:
@@ -222,18 +226,25 @@
         self.assertEqual(wrapped.__name__, fxn.__name__)
         self.assertEqual(wrapped.__qualname__, fxn.__qualname__)
 
-Frozen_SetPackageTests, Source_SetPackageTests = test_util.test_both(
-        SetPackageTests, util=[frozen_util, source_util])
+
+(Frozen_SetPackageTests,
+ Source_SetPackageTests
+ ) = test_util.test_both(SetPackageTests, util=util)
 
 
 class SetLoaderTests:
 
     """Tests importlib.util.set_loader()."""
 
-    class DummyLoader:
-        @util.set_loader
-        def load_module(self, module):
-            return self.module
+    @property
+    def DummyLoader(self):
+        # Set DummyLoader on the class lazily.
+        class DummyLoader:
+            @self.util.set_loader
+            def load_module(self, module):
+                return self.module
+        self.__class__.DummyLoader = DummyLoader
+        return DummyLoader
 
     def test_no_attribute(self):
         loader = self.DummyLoader()
@@ -262,17 +273,10 @@
             warnings.simplefilter('ignore', DeprecationWarning)
             self.assertEqual(42, loader.load_module('blah').__loader__)
 
-class Frozen_SetLoaderTests(SetLoaderTests, unittest.TestCase):
-    class DummyLoader:
-        @frozen_util.set_loader
-        def load_module(self, module):
-            return self.module
 
-class Source_SetLoaderTests(SetLoaderTests, unittest.TestCase):
-    class DummyLoader:
-        @source_util.set_loader
-        def load_module(self, module):
-            return self.module
+(Frozen_SetLoaderTests,
+ Source_SetLoaderTests
+ ) = test_util.test_both(SetLoaderTests, util=util)
 
 
 class ResolveNameTests:
@@ -307,9 +311,10 @@
         with self.assertRaises(ValueError):
             self.util.resolve_name('..bacon', 'spam')
 
-Frozen_ResolveNameTests, Source_ResolveNameTests = test_util.test_both(
-        ResolveNameTests,
-        util=[frozen_util, source_util])
+
+(Frozen_ResolveNameTests,
+ Source_ResolveNameTests
+ ) = test_util.test_both(ResolveNameTests, util=util)
 
 
 class FindSpecTests:
@@ -446,15 +451,10 @@
             self.assertNotIn(fullname, sorted(sys.modules))
 
 
-class Frozen_FindSpecTests(FindSpecTests, unittest.TestCase):
-    init = frozen_init
-    machinery = frozen_machinery
-    util = frozen_util
-
-class Source_FindSpecTests(FindSpecTests, unittest.TestCase):
-    init = source_init
-    machinery = source_machinery
-    util = source_util
+(Frozen_FindSpecTests,
+ Source_FindSpecTests
+ ) = test_util.test_both(FindSpecTests, init=init, util=util,
+                         machinery=machinery)
 
 
 class MagicNumberTests:
@@ -467,8 +467,10 @@
         # The magic number uses \r\n to come out wrong when splitting on lines.
         self.assertTrue(self.util.MAGIC_NUMBER.endswith(b'\r\n'))
 
-Frozen_MagicNumberTests, Source_MagicNumberTests = test_util.test_both(
-        MagicNumberTests, util=[frozen_util, source_util])
+
+(Frozen_MagicNumberTests,
+ Source_MagicNumberTests
+ ) = test_util.test_both(MagicNumberTests, util=util)
 
 
 class PEP3147Tests:
@@ -583,9 +585,10 @@
             ValueError, self.util.source_from_cache,
             '/foo/bar/foo.cpython-32.foo.pyc')
 
-Frozen_PEP3147Tests, Source_PEP3147Tests = test_util.test_both(
-        PEP3147Tests,
-        util=[frozen_util, source_util])
+
+(Frozen_PEP3147Tests,
+ Source_PEP3147Tests
+ ) = test_util.test_both(PEP3147Tests, util=util)
 
 
 if __name__ == '__main__':
diff --git a/Lib/test/test_importlib/test_windows.py b/Lib/test/test_importlib/test_windows.py
index 96b4adc..d4c771c 100644
--- a/Lib/test/test_importlib/test_windows.py
+++ b/Lib/test/test_importlib/test_windows.py
@@ -1,5 +1,5 @@
-from . import util
-frozen_machinery, source_machinery = util.import_importlib('importlib.machinery')
+from . import util as test_util
+machinery = test_util.import_importlib('importlib.machinery')
 
 import sys
 import unittest
@@ -19,11 +19,6 @@
         self.assertIs(loader, None)
 
 
-class Frozen_WindowsRegistryFinderTests(WindowsRegistryFinderTests,
-                                        unittest.TestCase):
-    machinery = frozen_machinery
-
-
-class Source_WindowsRegistryFinderTests(WindowsRegistryFinderTests,
-                                        unittest.TestCase):
-    machinery = source_machinery
+(Frozen_WindowsRegistryFinderTests,
+ Source_WindowsRegistryFinderTests
+ ) = test_util.test_both(WindowsRegistryFinderTests, machinery=machinery)
diff --git a/Lib/test/test_importlib/util.py b/Lib/test/test_importlib/util.py
index 885cec3..aa4cd7e 100644
--- a/Lib/test/test_importlib/util.py
+++ b/Lib/test/test_importlib/util.py
@@ -1,31 +1,85 @@
-from contextlib import contextmanager
-from importlib import util, invalidate_caches
+import builtins
+import contextlib
+import errno
+import functools
+import importlib
+from importlib import machinery, util, invalidate_caches
+import os
 import os.path
 from test import support
 import unittest
 import sys
+import tempfile
 import types
 
 
+BUILTINS = types.SimpleNamespace()
+BUILTINS.good_name = None
+BUILTINS.bad_name = None
+if 'errno' in sys.builtin_module_names:
+    BUILTINS.good_name = 'errno'
+if 'importlib' not in sys.builtin_module_names:
+    BUILTINS.bad_name = 'importlib'
+
+EXTENSIONS = types.SimpleNamespace()
+EXTENSIONS.path = None
+EXTENSIONS.ext = None
+EXTENSIONS.filename = None
+EXTENSIONS.file_path = None
+EXTENSIONS.name = '_testcapi'
+
+def _extension_details():
+    global EXTENSIONS
+    for path in sys.path:
+        for ext in machinery.EXTENSION_SUFFIXES:
+            filename = EXTENSIONS.name + ext
+            file_path = os.path.join(path, filename)
+            if os.path.exists(file_path):
+                EXTENSIONS.path = path
+                EXTENSIONS.ext = ext
+                EXTENSIONS.filename = filename
+                EXTENSIONS.file_path = file_path
+                return
+
+_extension_details()
+
+
 def import_importlib(module_name):
     """Import a module from importlib both w/ and w/o _frozen_importlib."""
     fresh = ('importlib',) if '.' in module_name else ()
     frozen = support.import_fresh_module(module_name)
     source = support.import_fresh_module(module_name, fresh=fresh,
                                          blocked=('_frozen_importlib',))
+    return {'Frozen': frozen, 'Source': source}
+
+
+def specialize_class(cls, kind, base=None, **kwargs):
+    # XXX Support passing in submodule names--load (and cache) them?
+    # That would clean up the test modules a bit more.
+    if base is None:
+        base = unittest.TestCase
+    elif not isinstance(base, type):
+        base = base[kind]
+    name = '{}_{}'.format(kind, cls.__name__)
+    bases = (cls, base)
+    specialized = types.new_class(name, bases)
+    specialized.__module__ = cls.__module__
+    specialized._NAME = cls.__name__
+    specialized._KIND = kind
+    for attr, values in kwargs.items():
+        value = values[kind]
+        setattr(specialized, attr, value)
+    return specialized
+
+
+def split_frozen(cls, base=None, **kwargs):
+    frozen = specialize_class(cls, 'Frozen', base, **kwargs)
+    source = specialize_class(cls, 'Source', base, **kwargs)
     return frozen, source
 
 
-def test_both(test_class, **kwargs):
-    frozen_tests = types.new_class('Frozen_'+test_class.__name__,
-                                   (test_class, unittest.TestCase))
-    source_tests = types.new_class('Source_'+test_class.__name__,
-                                   (test_class, unittest.TestCase))
-    frozen_tests.__module__ = source_tests.__module__ = test_class.__module__
-    for attr, (frozen_value, source_value) in kwargs.items():
-        setattr(frozen_tests, attr, frozen_value)
-        setattr(source_tests, attr, source_value)
-    return frozen_tests, source_tests
+def test_both(test_class, base=None, **kwargs):
+    return split_frozen(test_class, base, **kwargs)
 
 
 CASE_INSENSITIVE_FS = True
@@ -38,6 +92,10 @@
     if not os.path.exists(changed_name):
         CASE_INSENSITIVE_FS = False
 
+source_importlib = import_importlib('importlib')['Source']
+__import__ = {'Frozen': staticmethod(builtins.__import__),
+              'Source': staticmethod(source_importlib.__import__)}
+
 
 def case_insensitive_tests(test):
     """Class decorator that nullifies tests requiring a case-insensitive
@@ -53,7 +111,7 @@
     return '{}.{}'.format(parent, name), path
 
 
-@contextmanager
+@contextlib.contextmanager
 def uncache(*names):
     """Uncache a module from sys.modules.
 
@@ -79,7 +137,7 @@
                 pass
 
 
-@contextmanager
+@contextlib.contextmanager
 def temp_module(name, content='', *, pkg=False):
     conflicts = [n for n in sys.modules if n.partition('.')[0] == name]
     with support.temp_cwd(None) as cwd:
@@ -103,7 +161,7 @@
                 yield location
 
 
-@contextmanager
+@contextlib.contextmanager
 def import_state(**kwargs):
     """Context manager to manage the various importers and stored state in the
     sys module.
@@ -198,6 +256,7 @@
                     raise
             return self.modules[fullname]
 
+
 class mock_spec(_ImporterMock):
 
     """Importer mock using PEP 451 APIs."""
@@ -223,3 +282,99 @@
             self.module_code[module.__spec__.name]()
         except KeyError:
             pass
+
+
+def writes_bytecode_files(fxn):
+    """Decorator to protect sys.dont_write_bytecode from mutation and to skip
+    tests that require it to be set to False."""
+    if sys.dont_write_bytecode:
+        return lambda *args, **kwargs: None
+    @functools.wraps(fxn)
+    def wrapper(*args, **kwargs):
+        original = sys.dont_write_bytecode
+        sys.dont_write_bytecode = False
+        try:
+            to_return = fxn(*args, **kwargs)
+        finally:
+            sys.dont_write_bytecode = original
+        return to_return
+    return wrapper
+
+
+def ensure_bytecode_path(bytecode_path):
+    """Ensure that the __pycache__ directory for PEP 3147 pyc file exists.
+
+    :param bytecode_path: File system path to PEP 3147 pyc file.
+    """
+    try:
+        os.mkdir(os.path.dirname(bytecode_path))
+    except OSError as error:
+        if error.errno != errno.EEXIST:
+            raise
+
+
+@contextlib.contextmanager
+def create_modules(*names):
+    """Temporarily create each named module with an attribute (named 'attr')
+    that contains the name passed into the context manager that caused the
+    creation of the module.
+
+    All files are created in a temporary directory returned by
+    tempfile.mkdtemp(). This directory is inserted at the beginning of
+    sys.path. When the context manager exits all created files (source and
+    bytecode) are explicitly deleted.
+
+    No magic is performed when creating packages! This means that if you create
+    a module within a package you must also create the package's __init__ as
+    well.
+
+    """
+    source = 'attr = {0!r}'
+    created_paths = []
+    mapping = {}
+    state_manager = None
+    uncache_manager = None
+    try:
+        temp_dir = tempfile.mkdtemp()
+        mapping['.root'] = temp_dir
+        import_names = set()
+        for name in names:
+            if not name.endswith('__init__'):
+                import_name = name
+            else:
+                import_name = name[:-len('.__init__')]
+            import_names.add(import_name)
+            if import_name in sys.modules:
+                del sys.modules[import_name]
+            name_parts = name.split('.')
+            file_path = temp_dir
+            for directory in name_parts[:-1]:
+                file_path = os.path.join(file_path, directory)
+                if not os.path.exists(file_path):
+                    os.mkdir(file_path)
+                    created_paths.append(file_path)
+            file_path = os.path.join(file_path, name_parts[-1] + '.py')
+            with open(file_path, 'w') as file:
+                file.write(source.format(name))
+            created_paths.append(file_path)
+            mapping[name] = file_path
+        uncache_manager = uncache(*import_names)
+        uncache_manager.__enter__()
+        state_manager = import_state(path=[temp_dir])
+        state_manager.__enter__()
+        yield mapping
+    finally:
+        if state_manager is not None:
+            state_manager.__exit__(None, None, None)
+        if uncache_manager is not None:
+            uncache_manager.__exit__(None, None, None)
+        support.rmtree(temp_dir)
+
+
+def mock_path_hook(*entries, importer):
+    """A mock sys.path_hooks entry."""
+    def hook(entry):
+        if entry not in entries:
+            raise ImportError
+        return importer
+    return hook
diff --git a/Lib/test/test_inspect.py b/Lib/test/test_inspect.py
index 1ede3b5..7ad190b 100644
--- a/Lib/test/test_inspect.py
+++ b/Lib/test/test_inspect.py
@@ -8,6 +8,7 @@
 import os
 from os.path import normcase
 import _pickle
+import pickle
 import re
 import shutil
 import sys
@@ -73,6 +74,7 @@
     for i in range(2):
         yield i
 
+
 class TestPredicates(IsTestBase):
     def test_sixteen(self):
         count = len([x for x in dir(inspect) if x.startswith('is')])
@@ -1611,6 +1613,17 @@
         self.assertRaises(TypeError, inspect.getgeneratorlocals, (2,3))
 
 
+class MySignature(inspect.Signature):
+    # Top-level to make it picklable;
+    # used in test_signature_object_pickle
+    pass
+
+class MyParameter(inspect.Parameter):
+    # Top-level to make it picklable;
+    # used in test_signature_object_pickle
+    pass
+
+
 class TestSignatureObject(unittest.TestCase):
     @staticmethod
     def signature(func):
@@ -1668,6 +1681,37 @@
         with self.assertRaisesRegex(ValueError, 'follows default argument'):
             S((pkd, pk))
 
+        self.assertTrue(repr(sig).startswith('<Signature'))
+        self.assertTrue('"(po, pk' in repr(sig))
+
+    def test_signature_object_pickle(self):
+        def foo(a, b, *, c:1={}, **kw) -> {42:'ham'}: pass
+        foo_partial = functools.partial(foo, a=1)
+
+        sig = inspect.signature(foo_partial)
+
+        for ver in range(pickle.HIGHEST_PROTOCOL + 1):
+            with self.subTest(pickle_ver=ver, subclass=False):
+                sig_pickled = pickle.loads(pickle.dumps(sig, ver))
+                self.assertEqual(sig, sig_pickled)
+
+        # Test that basic sub-classing works
+        sig = inspect.signature(foo)
+        myparam = MyParameter(name='z', kind=inspect.Parameter.POSITIONAL_ONLY)
+        myparams = collections.OrderedDict(sig.parameters, a=myparam)
+        mysig = MySignature().replace(parameters=myparams.values(),
+                                      return_annotation=sig.return_annotation)
+        self.assertTrue(isinstance(mysig, MySignature))
+        self.assertTrue(isinstance(mysig.parameters['z'], MyParameter))
+
+        for ver in range(pickle.HIGHEST_PROTOCOL + 1):
+            with self.subTest(pickle_ver=ver, subclass=True):
+                sig_pickled = pickle.loads(pickle.dumps(mysig, ver))
+                self.assertEqual(mysig, sig_pickled)
+                self.assertTrue(isinstance(sig_pickled, MySignature))
+                self.assertTrue(isinstance(sig_pickled.parameters['z'],
+                                           MyParameter))
+
     def test_signature_immutability(self):
         def test(a):
             pass
@@ -2469,11 +2513,29 @@
         def bar(pos, *args, c, b, a=42, **kwargs:int): pass
         self.assertEqual(inspect.signature(foo), inspect.signature(bar))
 
-    def test_signature_unhashable(self):
+    def test_signature_hashable(self):
+        S = inspect.Signature
+        P = inspect.Parameter
+
         def foo(a): pass
-        sig = inspect.signature(foo)
+        foo_sig = inspect.signature(foo)
+
+        manual_sig = S(parameters=[P('a', P.POSITIONAL_OR_KEYWORD)])
+
+        self.assertEqual(hash(foo_sig), hash(manual_sig))
+        self.assertNotEqual(hash(foo_sig),
+                            hash(manual_sig.replace(return_annotation='spam')))
+
+        def bar(a) -> 1: pass
+        self.assertNotEqual(hash(foo_sig), hash(inspect.signature(bar)))
+
+        def foo(a={}): pass
         with self.assertRaisesRegex(TypeError, 'unhashable type'):
-            hash(sig)
+            hash(inspect.signature(foo))
+
+        def foo(a) -> {}: pass
+        with self.assertRaisesRegex(TypeError, 'unhashable type'):
+            hash(inspect.signature(foo))
 
     def test_signature_str(self):
         def foo(a:int=1, *, b, c=None, **kwargs) -> 42:
@@ -2547,6 +2609,19 @@
         self.assertEqual(self.signature(Spam.foo),
                          self.signature(Ham.foo))
 
+    def test_signature_from_callable_python_obj(self):
+        class MySignature(inspect.Signature): pass
+        def foo(a, *, b:1): pass
+        foo_sig = MySignature.from_callable(foo)
+        self.assertTrue(isinstance(foo_sig, MySignature))
+
+    @unittest.skipIf(MISSING_C_DOCSTRINGS,
+                     "Signature information for builtins requires docstrings")
+    def test_signature_from_callable_builtin_obj(self):
+        class MySignature(inspect.Signature): pass
+        sig = MySignature.from_callable(_pickle.Pickler)
+        self.assertTrue(isinstance(sig, MySignature))
+
 
 class TestParameterObject(unittest.TestCase):
     def test_signature_parameter_kinds(self):
@@ -2592,6 +2667,16 @@
             p.replace(kind=inspect.Parameter.VAR_POSITIONAL)
 
         self.assertTrue(repr(p).startswith('<Parameter'))
+        self.assertTrue('"a=42"' in repr(p))
+
+    def test_signature_parameter_hashable(self):
+        P = inspect.Parameter
+        foo = P('foo', kind=P.POSITIONAL_ONLY)
+        self.assertEqual(hash(foo), hash(P('foo', kind=P.POSITIONAL_ONLY)))
+        self.assertNotEqual(hash(foo), hash(P('foo', kind=P.POSITIONAL_ONLY,
+                                              default=42)))
+        self.assertNotEqual(hash(foo),
+                            hash(foo.replace(kind=P.VAR_POSITIONAL)))
 
     def test_signature_parameter_equality(self):
         P = inspect.Parameter
@@ -2603,13 +2688,6 @@
         self.assertEqual(p, P('foo', default=42,
                               kind=inspect.Parameter.KEYWORD_ONLY))
 
-    def test_signature_parameter_unhashable(self):
-        p = inspect.Parameter('foo', default=42,
-                              kind=inspect.Parameter.KEYWORD_ONLY)
-
-        with self.assertRaisesRegex(TypeError, 'unhashable type'):
-            hash(p)
-
     def test_signature_parameter_replace(self):
         p = inspect.Parameter('foo', default=42,
                               kind=inspect.Parameter.KEYWORD_ONLY)
@@ -2918,6 +2996,16 @@
         ba4 = inspect.signature(bar).bind(1)
         self.assertNotEqual(ba, ba4)
 
+    def test_signature_bound_arguments_pickle(self):
+        def foo(a, b, *, c:1={}, **kw) -> {42:'ham'}: pass
+        sig = inspect.signature(foo)
+        ba = sig.bind(20, 30, z={})
+
+        for ver in range(pickle.HIGHEST_PROTOCOL + 1):
+            with self.subTest(pickle_ver=ver):
+                ba_pickled = pickle.loads(pickle.dumps(ba, ver))
+                self.assertEqual(ba, ba_pickled)
+
 
 class TestSignaturePrivateHelpers(unittest.TestCase):
     def test_signature_get_bound_param(self):
diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py
index ef1e056..347832d 100644
--- a/Lib/test/test_io.py
+++ b/Lib/test/test_io.py
@@ -2713,6 +2713,34 @@
         self.assertFalse(err)
         self.assertEqual("ok", out.decode().strip())
 
+    def test_read_byteslike(self):
+        r = MemviewBytesIO(b'Just some random string\n')
+        t = self.TextIOWrapper(r, 'utf-8')
+
+        # TextIOwrapper will not read the full string, because
+        # we truncate it to a multiple of the native int size
+        # so that we can construct a more complex memoryview.
+        bytes_val =  _to_memoryview(r.getvalue()).tobytes()
+
+        self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
+
+class MemviewBytesIO(io.BytesIO):
+    '''A BytesIO object whose read method returns memoryviews
+       rather than bytes'''
+
+    def read1(self, len_):
+        return _to_memoryview(super().read1(len_))
+
+    def read(self, len_):
+        return _to_memoryview(super().read(len_))
+
+def _to_memoryview(buf):
+    '''Convert bytes-object *buf* to a non-trivial memoryview'''
+
+    arr = array.array('i')
+    idx = len(buf) - len(buf) % arr.itemsize
+    arr.frombytes(buf[:idx])
+    return memoryview(arr)
 
 class CTextIOWrapperTest(TextIOWrapperTest):
     io = io
diff --git a/Lib/test/test_ipaddress.py b/Lib/test/test_ipaddress.py
index f2947b9..0b71bf8 100644
--- a/Lib/test/test_ipaddress.py
+++ b/Lib/test/test_ipaddress.py
@@ -628,6 +628,119 @@
         self.assertEqual("IPv6Interface('::1/128')",
                          repr(ipaddress.IPv6Interface('::1')))
 
+    # issue #16531: constructing IPv4Network from a (address, mask) tuple
+    def testIPv4Tuple(self):
+        # /32
+        ip = ipaddress.IPv4Address('192.0.2.1')
+        net = ipaddress.IPv4Network('192.0.2.1/32')
+        self.assertEqual(ipaddress.IPv4Network(('192.0.2.1', 32)), net)
+        self.assertEqual(ipaddress.IPv4Network((ip, 32)), net)
+        self.assertEqual(ipaddress.IPv4Network((3221225985, 32)), net)
+        self.assertEqual(ipaddress.IPv4Network(('192.0.2.1',
+                                                '255.255.255.255')), net)
+        self.assertEqual(ipaddress.IPv4Network((ip,
+                                                '255.255.255.255')), net)
+        self.assertEqual(ipaddress.IPv4Network((3221225985,
+                                                '255.255.255.255')), net)
+        # strict=True and host bits set
+        with self.assertRaises(ValueError):
+            ipaddress.IPv4Network(('192.0.2.1', 24))
+        with self.assertRaises(ValueError):
+            ipaddress.IPv4Network((ip, 24))
+        with self.assertRaises(ValueError):
+            ipaddress.IPv4Network((3221225985, 24))
+        with self.assertRaises(ValueError):
+            ipaddress.IPv4Network(('192.0.2.1', '255.255.255.0'))
+        with self.assertRaises(ValueError):
+            ipaddress.IPv4Network((ip, '255.255.255.0'))
+        with self.assertRaises(ValueError):
+            ipaddress.IPv4Network((3221225985, '255.255.255.0'))
+        # strict=False and host bits set
+        net = ipaddress.IPv4Network('192.0.2.0/24')
+        self.assertEqual(ipaddress.IPv4Network(('192.0.2.1', 24),
+                                               strict=False), net)
+        self.assertEqual(ipaddress.IPv4Network((ip, 24),
+                                               strict=False), net)
+        self.assertEqual(ipaddress.IPv4Network((3221225985, 24),
+                                               strict=False), net)
+        self.assertEqual(ipaddress.IPv4Network(('192.0.2.1',
+                                                '255.255.255.0'),
+                                               strict=False), net)
+        self.assertEqual(ipaddress.IPv4Network((ip,
+                                                '255.255.255.0'),
+                                               strict=False), net)
+        self.assertEqual(ipaddress.IPv4Network((3221225985,
+                                                '255.255.255.0'),
+                                               strict=False), net)
+
+        # /24
+        ip = ipaddress.IPv4Address('192.0.2.0')
+        net = ipaddress.IPv4Network('192.0.2.0/24')
+        self.assertEqual(ipaddress.IPv4Network(('192.0.2.0',
+                                                '255.255.255.0')), net)
+        self.assertEqual(ipaddress.IPv4Network((ip,
+                                                '255.255.255.0')), net)
+        self.assertEqual(ipaddress.IPv4Network((3221225984,
+                                                '255.255.255.0')), net)
+        self.assertEqual(ipaddress.IPv4Network(('192.0.2.0', 24)), net)
+        self.assertEqual(ipaddress.IPv4Network((ip, 24)), net)
+        self.assertEqual(ipaddress.IPv4Network((3221225984, 24)), net)
+
+        self.assertEqual(ipaddress.IPv4Interface(('192.0.2.1', 24)),
+                         ipaddress.IPv4Interface('192.0.2.1/24'))
+        self.assertEqual(ipaddress.IPv4Interface((3221225985, 24)),
+                         ipaddress.IPv4Interface('192.0.2.1/24'))
+
+    # issue #16531: constructing IPv6Network from a (address, mask) tuple
+    def testIPv6Tuple(self):
+        # /128
+        ip = ipaddress.IPv6Address('2001:db8::')
+        net = ipaddress.IPv6Network('2001:db8::/128')
+        self.assertEqual(ipaddress.IPv6Network(('2001:db8::', '128')),
+                         net)
+        self.assertEqual(ipaddress.IPv6Network(
+                (42540766411282592856903984951653826560, 128)),
+                         net)
+        self.assertEqual(ipaddress.IPv6Network((ip, '128')),
+                         net)
+        ip = ipaddress.IPv6Address('2001:db8::')
+        net = ipaddress.IPv6Network('2001:db8::/96')
+        self.assertEqual(ipaddress.IPv6Network(('2001:db8::', '96')),
+                         net)
+        self.assertEqual(ipaddress.IPv6Network(
+                (42540766411282592856903984951653826560, 96)),
+                         net)
+        self.assertEqual(ipaddress.IPv6Network((ip, '96')),
+                         net)
+
+        # strict=True and host bits set
+        ip = ipaddress.IPv6Address('2001:db8::1')
+        with self.assertRaises(ValueError):
+            ipaddress.IPv6Network(('2001:db8::1', 96))
+        with self.assertRaises(ValueError):
+            ipaddress.IPv6Network((
+                42540766411282592856903984951653826561, 96))
+        with self.assertRaises(ValueError):
+            ipaddress.IPv6Network((ip, 96))
+        # strict=False and host bits set
+        net = ipaddress.IPv6Network('2001:db8::/96')
+        self.assertEqual(ipaddress.IPv6Network(('2001:db8::1', 96),
+                                               strict=False),
+                         net)
+        self.assertEqual(ipaddress.IPv6Network(
+                             (42540766411282592856903984951653826561, 96),
+                             strict=False),
+                         net)
+        self.assertEqual(ipaddress.IPv6Network((ip, 96), strict=False),
+                         net)
+
+        # /96
+        self.assertEqual(ipaddress.IPv6Interface(('2001:db8::1', '96')),
+                         ipaddress.IPv6Interface('2001:db8::1/96'))
+        self.assertEqual(ipaddress.IPv6Interface(
+                (42540766411282592856903984951653826561, '96')),
+                         ipaddress.IPv6Interface('2001:db8::1/96'))
+
     # issue57
     def testAddressIntMath(self):
         self.assertEqual(ipaddress.IPv4Address('1.1.1.1') + 255,
@@ -1593,6 +1706,14 @@
                          addr3.exploded)
         self.assertEqual('192.168.178.1', addr4.exploded)
 
+    def testReversePointer(self):
+        addr1 = ipaddress.IPv4Address('127.0.0.1')
+        addr2 = ipaddress.IPv6Address('2001:db8::1')
+        self.assertEqual('1.0.0.127.in-addr.arpa', addr1.reverse_pointer)
+        self.assertEqual('1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.' +
+                         'b.d.0.1.0.0.2.ip6.arpa',
+                         addr2.reverse_pointer)
+
     def testIntRepresentation(self):
         self.assertEqual(16909060, int(self.ipv4_address))
         self.assertEqual(42540616829182469433547762482097946625,
diff --git a/Lib/test/test_json/test_tool.py b/Lib/test/test_json/test_tool.py
index 0c39e56..5484a8a 100644
--- a/Lib/test/test_json/test_tool.py
+++ b/Lib/test/test_json/test_tool.py
@@ -55,6 +55,7 @@
     def test_infile_stdout(self):
         infile = self._create_infile()
         rc, out, err = assert_python_ok('-m', 'json.tool', infile)
+        self.assertEqual(rc, 0)
         self.assertEqual(out.splitlines(), self.expect.encode().splitlines())
         self.assertEqual(err, b'')
 
@@ -65,5 +66,12 @@
         self.addCleanup(os.remove, outfile)
         with open(outfile, "r") as fp:
             self.assertEqual(fp.read(), self.expect)
+        self.assertEqual(rc, 0)
         self.assertEqual(out, b'')
         self.assertEqual(err, b'')
+
+    def test_help_flag(self):
+        rc, out, err = assert_python_ok('-m', 'json.tool', '-h')
+        self.assertEqual(rc, 0)
+        self.assertTrue(out.startswith(b'usage: '))
+        self.assertEqual(err, b'')
diff --git a/Lib/test/test_math.py b/Lib/test/test_math.py
index 48f84ba..c9f3f16 100644
--- a/Lib/test/test_math.py
+++ b/Lib/test/test_math.py
@@ -422,9 +422,17 @@
             self.assertEqual(math.factorial(i), py_factorial(i))
         self.assertRaises(ValueError, math.factorial, -1)
         self.assertRaises(ValueError, math.factorial, -1.0)
+        self.assertRaises(ValueError, math.factorial, -10**100)
+        self.assertRaises(ValueError, math.factorial, -1e100)
         self.assertRaises(ValueError, math.factorial, math.pi)
-        self.assertRaises(OverflowError, math.factorial, sys.maxsize+1)
-        self.assertRaises(OverflowError, math.factorial, 10e100)
+
+    # Other implementations may place different upper bounds.
+    @support.cpython_only
+    def testFactorialHugeInputs(self):
+        # Currently raises ValueError for inputs that are too large
+        # to fit into a C long.
+        self.assertRaises(OverflowError, math.factorial, 10**100)
+        self.assertRaises(OverflowError, math.factorial, 1e100)
 
     def testFloor(self):
         self.assertRaises(TypeError, math.floor)
diff --git a/Lib/test/test_module.py b/Lib/test/test_module.py
index 1230293..9da3536 100644
--- a/Lib/test/test_module.py
+++ b/Lib/test/test_module.py
@@ -30,6 +30,22 @@
             pass
         self.assertEqual(foo.__doc__, ModuleType.__doc__)
 
+    def test_unintialized_missing_getattr(self):
+        # Issue 8297
+        # test the text in the AttributeError of an uninitialized module
+        foo = ModuleType.__new__(ModuleType)
+        self.assertRaisesRegex(
+                AttributeError, "module has no attribute 'not_here'",
+                getattr, foo, "not_here")
+
+    def test_missing_getattr(self):
+        # Issue 8297
+        # test the text in the AttributeError
+        foo = ModuleType("foo")
+        self.assertRaisesRegex(
+                AttributeError, "module 'foo' has no attribute 'not_here'",
+                getattr, foo, "not_here")
+
     def test_no_docstring(self):
         # Regularly initialized module, no docstring
         foo = ModuleType("foo")
@@ -211,6 +227,14 @@
             b"len = len",
             b"shutil.rmtree = rmtree"})
 
+    def test_descriptor_errors_propogate(self):
+        class Descr:
+            def __get__(self, o, t):
+                raise RuntimeError
+        class M(ModuleType):
+            melon = Descr()
+        self.assertRaises(RuntimeError, getattr, M("mymod"), "melon")
+
     # frozen and namespace module reprs are tested in importlib.
 
 
diff --git a/Lib/test/test_operator.py b/Lib/test/test_operator.py
index ab58a98..1bd0391 100644
--- a/Lib/test/test_operator.py
+++ b/Lib/test/test_operator.py
@@ -203,6 +203,15 @@
         self.assertRaises(TypeError, operator.mul, None, None)
         self.assertTrue(operator.mul(5, 2) == 10)
 
+    def test_matmul(self):
+        operator = self.module
+        self.assertRaises(TypeError, operator.matmul)
+        self.assertRaises(TypeError, operator.matmul, 42, 42)
+        class M:
+            def __matmul__(self, other):
+                return other - 1
+        self.assertEqual(M() @ 42, 41)
+
     def test_neg(self):
         operator = self.module
         self.assertRaises(TypeError, operator.neg)
@@ -416,6 +425,7 @@
             def __ilshift__  (self, other): return "ilshift"
             def __imod__     (self, other): return "imod"
             def __imul__     (self, other): return "imul"
+            def __imatmul__  (self, other): return "imatmul"
             def __ior__      (self, other): return "ior"
             def __ipow__     (self, other): return "ipow"
             def __irshift__  (self, other): return "irshift"
@@ -430,6 +440,7 @@
         self.assertEqual(operator.ilshift  (c, 5), "ilshift")
         self.assertEqual(operator.imod     (c, 5), "imod")
         self.assertEqual(operator.imul     (c, 5), "imul")
+        self.assertEqual(operator.imatmul  (c, 5), "imatmul")
         self.assertEqual(operator.ior      (c, 5), "ior")
         self.assertEqual(operator.ipow     (c, 5), "ipow")
         self.assertEqual(operator.irshift  (c, 5), "irshift")
diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py
index e129bef..7d5ee69 100644
--- a/Lib/test/test_os.py
+++ b/Lib/test/test_os.py
@@ -39,6 +39,10 @@
     import fcntl
 except ImportError:
     fcntl = None
+try:
+    import _winapi
+except ImportError:
+    _winapi = None
 
 from test.script_helper import assert_python_ok
 
@@ -1773,6 +1777,37 @@
             shutil.rmtree(level1)
 
 
+@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
+class Win32JunctionTests(unittest.TestCase):
+    junction = 'junctiontest'
+    junction_target = os.path.dirname(os.path.abspath(__file__))
+
+    def setUp(self):
+        assert os.path.exists(self.junction_target)
+        assert not os.path.exists(self.junction)
+
+    def tearDown(self):
+        if os.path.exists(self.junction):
+            # os.rmdir delegates to Windows' RemoveDirectoryW,
+            # which removes junction points safely.
+            os.rmdir(self.junction)
+
+    def test_create_junction(self):
+        _winapi.CreateJunction(self.junction_target, self.junction)
+        self.assertTrue(os.path.exists(self.junction))
+        self.assertTrue(os.path.isdir(self.junction))
+
+        # Junctions are not recognized as links.
+        self.assertFalse(os.path.islink(self.junction))
+
+    def test_unlink_removes_junction(self):
+        _winapi.CreateJunction(self.junction_target, self.junction)
+        self.assertTrue(os.path.exists(self.junction))
+
+        os.unlink(self.junction)
+        self.assertFalse(os.path.exists(self.junction))
+
+
 @support.skip_unless_symlink
 class NonLocalSymlinkTests(unittest.TestCase):
 
@@ -2544,6 +2579,7 @@
         RemoveDirsTests,
         CPUCountTests,
         FDInheritanceTests,
+        Win32JunctionTests,
     )
 
 if __name__ == "__main__":
diff --git a/Lib/test/test_pathlib.py b/Lib/test/test_pathlib.py
index 6378d8c..fd9cf23 100644
--- a/Lib/test/test_pathlib.py
+++ b/Lib/test/test_pathlib.py
@@ -1251,6 +1251,26 @@
         p = self.cls.cwd()
         self._test_cwd(p)
 
+    def test_samefile(self):
+        fileA_path = os.path.join(BASE, 'fileA')
+        fileB_path = os.path.join(BASE, 'dirB', 'fileB')
+        p = self.cls(fileA_path)
+        pp = self.cls(fileA_path)
+        q = self.cls(fileB_path)
+        self.assertTrue(p.samefile(fileA_path))
+        self.assertTrue(p.samefile(pp))
+        self.assertFalse(p.samefile(fileB_path))
+        self.assertFalse(p.samefile(q))
+        # Test the non-existent file case
+        non_existent = os.path.join(BASE, 'foo')
+        r = self.cls(non_existent)
+        self.assertRaises(FileNotFoundError, p.samefile, r)
+        self.assertRaises(FileNotFoundError, p.samefile, non_existent)
+        self.assertRaises(FileNotFoundError, r.samefile, p)
+        self.assertRaises(FileNotFoundError, r.samefile, non_existent)
+        self.assertRaises(FileNotFoundError, r.samefile, r)
+        self.assertRaises(FileNotFoundError, r.samefile, non_existent)
+
     def test_empty_path(self):
         # The empty path points to '.'
         p = self.cls('')
diff --git a/Lib/test/test_poplib.py b/Lib/test/test_poplib.py
index d076fc1..d695a0d 100644
--- a/Lib/test/test_poplib.py
+++ b/Lib/test/test_poplib.py
@@ -349,23 +349,18 @@
 
 
 if SUPPORTS_SSL:
+    from test.test_ftplib import SSLConnection
 
-    class DummyPOP3_SSLHandler(DummyPOP3Handler):
+    class DummyPOP3_SSLHandler(SSLConnection, DummyPOP3Handler):
 
         def __init__(self, conn):
             asynchat.async_chat.__init__(self, conn)
-            ssl_socket = ssl.wrap_socket(self.socket, certfile=CERTFILE,
-                                          server_side=True,
-                                          do_handshake_on_connect=False)
-            self.del_channel()
-            self.set_socket(ssl_socket)
-            # Must try handshake before calling push()
-            self.tls_active = True
-            self.tls_starting = True
-            self._do_tls_handshake()
+            self.secure_connection()
             self.set_terminator(b"\r\n")
             self.in_buffer = []
             self.push('+OK dummy pop3 server ready. <timestamp>')
+            self.tls_active = True
+            self.tls_starting = False
 
 
 @requires_ssl
diff --git a/Lib/test/test_selectors.py b/Lib/test/test_selectors.py
index 34edd76..8f83c90 100644
--- a/Lib/test/test_selectors.py
+++ b/Lib/test/test_selectors.py
@@ -441,10 +441,18 @@
     SELECTOR = getattr(selectors, 'KqueueSelector', None)
 
 
+@unittest.skipUnless(hasattr(selectors, 'DevpollSelector'),
+                     "Test needs selectors.DevpollSelector")
+class DevpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn):
+
+    SELECTOR = getattr(selectors, 'DevpollSelector', None)
+
+
+
 def test_main():
     tests = [DefaultSelectorTestCase, SelectSelectorTestCase,
              PollSelectorTestCase, EpollSelectorTestCase,
-             KqueueSelectorTestCase]
+             KqueueSelectorTestCase, DevpollSelectorTestCase]
     support.run_unittest(*tests)
     support.reap_children()
 
diff --git a/Lib/test/test_set.py b/Lib/test/test_set.py
index bfef621..992a4ce 100644
--- a/Lib/test/test_set.py
+++ b/Lib/test/test_set.py
@@ -929,7 +929,7 @@
 
 class TestBasicOpsBytes(TestBasicOps, unittest.TestCase):
     def setUp(self):
-        self.case   = "string set"
+        self.case   = "bytes set"
         self.values = [b"a", b"b", b"c"]
         self.set    = set(self.values)
         self.dup    = set(self.values)
diff --git a/Lib/test/test_signal.py b/Lib/test/test_signal.py
index 74f74af..92747cf 100644
--- a/Lib/test/test_signal.py
+++ b/Lib/test/test_signal.py
@@ -1,6 +1,7 @@
 import unittest
 from test import support
 from contextlib import closing
+import enum
 import gc
 import pickle
 import select
@@ -39,6 +40,23 @@
         return None
 
 
+class GenericTests(unittest.TestCase):
+
+    @unittest.skipIf(threading is None, "test needs threading module")
+    def test_enums(self):
+        for name in dir(signal):
+            sig = getattr(signal, name)
+            if name in {'SIG_DFL', 'SIG_IGN'}:
+                self.assertIsInstance(sig, signal.Handlers)
+            elif name in {'SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'}:
+                self.assertIsInstance(sig, signal.Sigmasks)
+            elif name.startswith('SIG') and not name.startswith('SIG_'):
+                self.assertIsInstance(sig, signal.Signals)
+            elif name.startswith('CTRL_'):
+                self.assertIsInstance(sig, signal.Signals)
+                self.assertEqual(sys.platform, "win32")
+
+
 @unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
 class InterProcessSignalTests(unittest.TestCase):
     MAX_DURATION = 20   # Entire test should last at most 20 sec.
@@ -195,6 +213,7 @@
 
     def test_getsignal(self):
         hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
+        self.assertIsInstance(hup, signal.Handlers)
         self.assertEqual(signal.getsignal(signal.SIGHUP),
                          self.trivial_signal_handler)
         signal.signal(signal.SIGHUP, hup)
@@ -271,7 +290,7 @@
 
         os.close(read)
         os.close(write)
-        """.format(signals, ordered, test_body)
+        """.format(tuple(map(int, signals)), ordered, test_body)
 
         assert_python_ok('-c', code)
 
@@ -604,6 +623,8 @@
             signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
             os.kill(os.getpid(), signum)
             pending = signal.sigpending()
+            for sig in pending:
+                assert isinstance(sig, signal.Signals), repr(pending)
             if pending != {signum}:
                 raise Exception('%s != {%s}' % (pending, signum))
             try:
@@ -660,6 +681,7 @@
         code = '''if 1:
         import signal
         import sys
+        from signal import Signals
 
         def handler(signum, frame):
             1/0
@@ -702,6 +724,7 @@
         def test(signum):
             signal.alarm(1)
             received = signal.sigwait([signum])
+            assert isinstance(received, signal.Signals), received
             if received != signum:
                 raise Exception('received %s, not %s' % (received, signum))
         ''')
@@ -842,8 +865,14 @@
         def kill(signum):
             os.kill(os.getpid(), signum)
 
+        def check_mask(mask):
+            for sig in mask:
+                assert isinstance(sig, signal.Signals), repr(sig)
+
         def read_sigmask():
-            return signal.pthread_sigmask(signal.SIG_BLOCK, [])
+            sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, [])
+            check_mask(sigmask)
+            return sigmask
 
         signum = signal.SIGUSR1
 
@@ -852,6 +881,7 @@
 
         # Unblock SIGUSR1 (and copy the old mask) to test our signal handler
         old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
+        check_mask(old_mask)
         try:
             kill(signum)
         except ZeroDivisionError:
@@ -861,11 +891,13 @@
 
         # Block and then raise SIGUSR1. The signal is blocked: the signal
         # handler is not called, and the signal is now pending
-        signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
+        mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
+        check_mask(mask)
         kill(signum)
 
         # Check the new mask
         blocked = read_sigmask()
+        check_mask(blocked)
         if signum not in blocked:
             raise Exception("%s not in %s" % (signum, blocked))
         if old_mask ^ blocked != {signum}:
@@ -928,7 +960,7 @@
 
 def test_main():
     try:
-        support.run_unittest(PosixTests, InterProcessSignalTests,
+        support.run_unittest(GenericTests, PosixTests, InterProcessSignalTests,
                              WakeupFDTests, WakeupSignalTests,
                              SiginterruptTest, ItimerTest, WindowsSignalTests,
                              PendingSignalsTests)
diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py
index aed10a7..86cbec7 100644
--- a/Lib/test/test_socket.py
+++ b/Lib/test/test_socket.py
@@ -1344,10 +1344,13 @@
 
     def test_listen_backlog(self):
         for backlog in 0, -1:
-            srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
+                srv.bind((HOST, 0))
+                srv.listen(backlog)
+
+        with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
             srv.bind((HOST, 0))
-            srv.listen(backlog)
-            srv.close()
+            srv.listen()
 
     @support.cpython_only
     def test_listen_backlog_overflow(self):
diff --git a/Lib/test/test_socketserver.py b/Lib/test/test_socketserver.py
index 0617b30..8e0fde4 100644
--- a/Lib/test/test_socketserver.py
+++ b/Lib/test/test_socketserver.py
@@ -222,38 +222,6 @@
                             socketserver.DatagramRequestHandler,
                             self.dgram_examine)
 
-    @contextlib.contextmanager
-    def mocked_select_module(self):
-        """Mocks the select.select() call to raise EINTR for first call"""
-        old_select = select.select
-
-        class MockSelect:
-            def __init__(self):
-                self.called = 0
-
-            def __call__(self, *args):
-                self.called += 1
-                if self.called == 1:
-                    # raise the exception on first call
-                    raise OSError(errno.EINTR, os.strerror(errno.EINTR))
-                else:
-                    # Return real select value for consecutive calls
-                    return old_select(*args)
-
-        select.select = MockSelect()
-        try:
-            yield select.select
-        finally:
-            select.select = old_select
-
-    def test_InterruptServerSelectCall(self):
-        with self.mocked_select_module() as mock_select:
-            pid = self.run_server(socketserver.TCPServer,
-                                  socketserver.StreamRequestHandler,
-                                  self.stream_examine)
-            # Make sure select was called again:
-            self.assertGreater(mock_select.called, 1)
-
     # Alas, on Linux (at least) recvfrom() doesn't return a meaningful
     # client address so this cannot work:
 
diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py
index 2b3de1f..f72fb15 100644
--- a/Lib/test/test_ssl.py
+++ b/Lib/test/test_ssl.py
@@ -86,6 +86,12 @@
     # 0.9.8 or higher
     return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
 
+def utc_offset(): #NOTE: ignore issues like #1647654
+    # local time = utc time + utc offset
+    if time.daylight and time.localtime().tm_isdst > 0:
+        return -time.altzone  # seconds
+    return -time.timezone
+
 def asn1time(cert_time):
     # Some versions of OpenSSL ignore seconds, see #18207
     # 0.9.8.i
@@ -134,6 +140,14 @@
         self.assertIn(ssl.HAS_SNI, {True, False})
         self.assertIn(ssl.HAS_ECDH, {True, False})
 
+    def test_str_for_enums(self):
+        # Make sure that the PROTOCOL_* constants have enum-like string
+        # reprs.
+        proto = ssl.PROTOCOL_SSLv3
+        self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_SSLv3')
+        ctx = ssl.SSLContext(proto)
+        self.assertIs(ctx.protocol, proto)
+
     def test_random(self):
         v = ssl.RAND_status()
         if support.verbose:
@@ -643,6 +657,71 @@
             ctx.wrap_socket(s)
         self.assertEqual(str(cx.exception), "only stream sockets are supported")
 
+    def cert_time_ok(self, timestring, timestamp):
+        self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
+
+    def cert_time_fail(self, timestring):
+        with self.assertRaises(ValueError):
+            ssl.cert_time_to_seconds(timestring)
+
+    @unittest.skipUnless(utc_offset(),
+                         'local time needs to be different from UTC')
+    def test_cert_time_to_seconds_timezone(self):
+        # Issue #19940: ssl.cert_time_to_seconds() returns wrong
+        #               results if local timezone is not UTC
+        self.cert_time_ok("May  9 00:00:00 2007 GMT", 1178668800.0)
+        self.cert_time_ok("Jan  5 09:34:43 2018 GMT", 1515144883.0)
+
+    def test_cert_time_to_seconds(self):
+        timestring = "Jan  5 09:34:43 2018 GMT"
+        ts = 1515144883.0
+        self.cert_time_ok(timestring, ts)
+        # accept keyword parameter, assert its name
+        self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
+        # accept both %e and %d (space or zero generated by strftime)
+        self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
+        # case-insensitive
+        self.cert_time_ok("JaN  5 09:34:43 2018 GmT", ts)
+        self.cert_time_fail("Jan  5 09:34 2018 GMT")     # no seconds
+        self.cert_time_fail("Jan  5 09:34:43 2018")      # no GMT
+        self.cert_time_fail("Jan  5 09:34:43 2018 UTC")  # not GMT timezone
+        self.cert_time_fail("Jan 35 09:34:43 2018 GMT")  # invalid day
+        self.cert_time_fail("Jon  5 09:34:43 2018 GMT")  # invalid month
+        self.cert_time_fail("Jan  5 24:00:00 2018 GMT")  # invalid hour
+        self.cert_time_fail("Jan  5 09:60:43 2018 GMT")  # invalid minute
+
+        newyear_ts = 1230768000.0
+        # leap seconds
+        self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
+        # same timestamp
+        self.cert_time_ok("Jan  1 00:00:00 2009 GMT", newyear_ts)
+
+        self.cert_time_ok("Jan  5 09:34:59 2018 GMT", 1515144899)
+        #  allow 60th second (even if it is not a leap second)
+        self.cert_time_ok("Jan  5 09:34:60 2018 GMT", 1515144900)
+        #  allow 2nd leap second for compatibility with time.strptime()
+        self.cert_time_ok("Jan  5 09:34:61 2018 GMT", 1515144901)
+        self.cert_time_fail("Jan  5 09:34:62 2018 GMT")  # invalid seconds
+
+        # no special treatement for the special value:
+        #   99991231235959Z (rfc 5280)
+        self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
+
+    @support.run_with_locale('LC_ALL', '')
+    def test_cert_time_to_seconds_locale(self):
+        # `cert_time_to_seconds()` should be locale independent
+
+        def local_february_name():
+            return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
+
+        if local_february_name().lower() == 'feb':
+            self.skipTest("locale-specific month name needs to be "
+                          "different from C locale")
+
+        # locale-independent
+        self.cert_time_ok("Feb  9 00:00:00 2007 GMT", 1170979200.0)
+        self.cert_time_fail(local_february_name() + "  9 00:00:00 2007 GMT")
+
 
 class ContextTests(unittest.TestCase):
 
@@ -1371,14 +1450,12 @@
     def test_get_server_certificate(self):
         def _test_get_server_certificate(host, port, cert=None):
             with support.transient_internet(host):
-                pem = ssl.get_server_certificate((host, port),
-                                                 ssl.PROTOCOL_SSLv23)
+                pem = ssl.get_server_certificate((host, port))
                 if not pem:
                     self.fail("No server certificate on %s:%s!" % (host, port))
 
                 try:
                     pem = ssl.get_server_certificate((host, port),
-                                                     ssl.PROTOCOL_SSLv23,
                                                      ca_certs=CERTFILE)
                 except ssl.SSLError as x:
                     #should fail
@@ -1388,7 +1465,6 @@
                     self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
 
                 pem = ssl.get_server_certificate((host, port),
-                                                 ssl.PROTOCOL_SSLv23,
                                                  ca_certs=cert)
                 if not pem:
                     self.fail("No server certificate on %s:%s!" % (host, port))
@@ -2471,6 +2547,36 @@
                 s.write(b"over\n")
                 s.close()
 
+        def test_nonblocking_send(self):
+            server = ThreadedEchoServer(CERTFILE,
+                                        certreqs=ssl.CERT_NONE,
+                                        ssl_version=ssl.PROTOCOL_TLSv1,
+                                        cacerts=CERTFILE,
+                                        chatty=True,
+                                        connectionchatty=False)
+            with server:
+                s = ssl.wrap_socket(socket.socket(),
+                                    server_side=False,
+                                    certfile=CERTFILE,
+                                    ca_certs=CERTFILE,
+                                    cert_reqs=ssl.CERT_NONE,
+                                    ssl_version=ssl.PROTOCOL_TLSv1)
+                s.connect((HOST, server.port))
+                s.setblocking(False)
+
+                # If we keep sending data, at some point the buffers
+                # will be full and the call will block
+                buf = bytearray(8192)
+                def fill_buffer():
+                    while True:
+                        s.send(buf)
+                self.assertRaises((ssl.SSLWantWriteError,
+                                   ssl.SSLWantReadError), fill_buffer)
+
+                # Now read all the output and discard it
+                s.setblocking(True)
+                s.close()
+
         def test_handshake_timeout(self):
             # Issue #5103: SSL handshake must respect the socket timeout
             server = socket.socket(socket.AF_INET)
diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py
index a68ed08..4eadd4b 100644
--- a/Lib/test/test_sys.py
+++ b/Lib/test/test_sys.py
@@ -635,6 +635,53 @@
             expected = None
         self.check_fsencoding(fs_encoding, expected)
 
+    def c_locale_get_error_handler(self, isolated=False, encoding=None):
+        # Force the POSIX locale
+        env = os.environ.copy()
+        env["LC_ALL"] = "C"
+        code = '\n'.join((
+            'import sys',
+            'def dump(name):',
+            '    std = getattr(sys, name)',
+            '    print("%s: %s" % (name, std.errors))',
+            'dump("stdin")',
+            'dump("stdout")',
+            'dump("stderr")',
+        ))
+        args = [sys.executable, "-c", code]
+        if isolated:
+            args.append("-I")
+        elif encoding:
+            env['PYTHONIOENCODING'] = encoding
+        p = subprocess.Popen(args,
+                              stdout=subprocess.PIPE,
+                              stderr=subprocess.STDOUT,
+                              env=env,
+                              universal_newlines=True)
+        stdout, stderr = p.communicate()
+        return stdout
+
+    def test_c_locale_surrogateescape(self):
+        out = self.c_locale_get_error_handler(isolated=True)
+        self.assertEqual(out,
+                         'stdin: surrogateescape\n'
+                         'stdout: surrogateescape\n'
+                         'stderr: backslashreplace\n')
+
+        # replace the default error handler
+        out = self.c_locale_get_error_handler(encoding=':strict')
+        self.assertEqual(out,
+                         'stdin: strict\n'
+                         'stdout: strict\n'
+                         'stderr: backslashreplace\n')
+
+        # force the encoding
+        out = self.c_locale_get_error_handler(encoding='iso8859-1')
+        self.assertEqual(out,
+                         'stdin: surrogateescape\n'
+                         'stdout: surrogateescape\n'
+                         'stderr: backslashreplace\n')
+
     def test_implementation(self):
         # This test applies to all implementations equally.
 
@@ -925,7 +972,7 @@
         check(int, s)
         # (PyTypeObject + PyNumberMethods + PyMappingMethods +
         #  PySequenceMethods + PyBufferProcs + 4P)
-        s = vsize('P2n15Pl4Pn9Pn11PIP') + struct.calcsize('34P 3P 10P 2P 4P')
+        s = vsize('P2n17Pl4Pn9Pn11PIP') + struct.calcsize('34P 3P 10P 2P 4P')
         # Separate block for PyDictKeysObject with 4 entries
         s += struct.calcsize("2nPn") + 4*struct.calcsize("n2P")
         # class
diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py
index 92f8bfe..d828979 100644
--- a/Lib/test/test_tarfile.py
+++ b/Lib/test/test_tarfile.py
@@ -1,7 +1,6 @@
 import sys
 import os
 import io
-import shutil
 from hashlib import md5
 
 import unittest
@@ -456,16 +455,16 @@
         # Test hardlink extraction (e.g. bug #857297).
         with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar:
             tar.extract("ustar/regtype", TEMPDIR)
-            self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype"))
+            self.addCleanup(support.unlink, os.path.join(TEMPDIR, "ustar/regtype"))
 
             tar.extract("ustar/lnktype", TEMPDIR)
-            self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype"))
+            self.addCleanup(support.unlink, os.path.join(TEMPDIR, "ustar/lnktype"))
             with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f:
                 data = f.read()
             self.assertEqual(md5sum(data), md5_regtype)
 
             tar.extract("ustar/symtype", TEMPDIR)
-            self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype"))
+            self.addCleanup(support.unlink, os.path.join(TEMPDIR, "ustar/symtype"))
             with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f:
                 data = f.read()
             self.assertEqual(md5sum(data), md5_regtype)
@@ -498,7 +497,7 @@
                 self.assertEqual(tarinfo.mtime, file_mtime, errmsg)
         finally:
             tar.close()
-            shutil.rmtree(DIR)
+            support.rmtree(DIR)
 
     def test_extract_directory(self):
         dirtype = "ustar/dirtype"
@@ -513,7 +512,7 @@
                 if sys.platform != "win32":
                     self.assertEqual(os.stat(extracted).st_mode & 0o777, 0o755)
         finally:
-            shutil.rmtree(DIR)
+            support.rmtree(DIR)
 
     def test_init_close_fobj(self):
         # Issue #7341: Close the internal file object in the TarFile
@@ -877,7 +876,7 @@
                 fobj.seek(4096)
                 fobj.truncate()
             s = os.stat(name)
-            os.remove(name)
+            support.unlink(name)
             return s.st_blocks == 0
         else:
             return False
@@ -1010,7 +1009,7 @@
             finally:
                 tar.close()
         finally:
-            os.rmdir(path)
+            support.rmdir(path)
 
     @unittest.skipUnless(hasattr(os, "link"),
                          "Missing hardlink implementation")
@@ -1030,8 +1029,8 @@
             finally:
                 tar.close()
         finally:
-            os.remove(target)
-            os.remove(link)
+            support.unlink(target)
+            support.unlink(link)
 
     @support.skip_unless_symlink
     def test_symlink_size(self):
@@ -1045,7 +1044,7 @@
             finally:
                 tar.close()
         finally:
-            os.remove(path)
+            support.unlink(path)
 
     def test_add_self(self):
         # Test for #1257255.
@@ -1092,7 +1091,7 @@
             finally:
                 tar.close()
         finally:
-            shutil.rmtree(tempdir)
+            support.rmtree(tempdir)
 
     def test_filter(self):
         tempdir = os.path.join(TEMPDIR, "filter")
@@ -1128,7 +1127,7 @@
             finally:
                 tar.close()
         finally:
-            shutil.rmtree(tempdir)
+            support.rmtree(tempdir)
 
     # Guarantee that stored pathnames are not modified. Don't
     # remove ./ or ../ or double slashes. Still make absolute
@@ -1156,9 +1155,9 @@
             tar.close()
 
         if not dir:
-            os.remove(foo)
+            support.unlink(foo)
         else:
-            os.rmdir(foo)
+            support.rmdir(foo)
 
         self.assertEqual(t.name, cmp_path or path.replace(os.sep, "/"))
 
@@ -1189,8 +1188,8 @@
             finally:
                 tar.close()
         finally:
-            os.unlink(temparchive)
-            shutil.rmtree(tempdir)
+            support.unlink(temparchive)
+            support.rmtree(tempdir)
 
     def test_pathnames(self):
         self._test_pathname("foo")
@@ -1290,7 +1289,7 @@
         # Test for issue #8464: Create files with correct
         # permissions.
         if os.path.exists(tmpname):
-            os.remove(tmpname)
+            support.unlink(tmpname)
 
         original_umask = os.umask(0o022)
         try:
@@ -1644,7 +1643,7 @@
     def setUp(self):
         self.tarname = tmpname
         if os.path.exists(self.tarname):
-            os.remove(self.tarname)
+            support.unlink(self.tarname)
 
     def _create_testtar(self, mode="w:"):
         with tarfile.open(tarname, encoding="iso8859-1") as src:
@@ -2151,7 +2150,7 @@
 
 def tearDownModule():
     if os.path.exists(TEMPDIR):
-        shutil.rmtree(TEMPDIR)
+        support.rmtree(TEMPDIR)
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py
index d12fb22..c9a558c 100644
--- a/Lib/test/test_tcl.py
+++ b/Lib/test/test_tcl.py
@@ -368,6 +368,8 @@
             self.assertEqual(float(passValue(-float('inf'))), -float('inf'))
         self.assertEqual(passValue((1, '2', (3.4,))),
                          (1, '2', (3.4,)) if self.wantobjects else '1 2 3.4')
+        self.assertEqual(passValue(['a', ['b', 'c']]),
+                         ('a', ('b', 'c')) if self.wantobjects else 'a {b c}')
 
     def test_user_command(self):
         result = None
@@ -415,6 +417,7 @@
         check(float('nan'), 'NaN', eq=nan_eq)
         check((), '')
         check((1, (2,), (3, 4), '5 6', ()), '1 2 {3 4} {5 6} {}')
+        check([1, [2,], [3, 4], '5 6', []], '1 2 {3 4} {5 6} {}')
 
     def test_splitlist(self):
         splitlist = self.interp.tk.splitlist
@@ -440,6 +443,8 @@
             ('a 3.4', ('a', '3.4')),
             (('a', 3.4), ('a', 3.4)),
             ((), ()),
+            ([], ()),
+            (['a', ['b', 'c']], ('a', ['b', 'c'])),
             (call('list', 1, '2', (3.4,)),
                 (1, '2', (3.4,)) if self.wantobjects else
                 ('1', '2', '3.4')),
@@ -487,6 +492,9 @@
             (('a', 3.4), ('a', 3.4)),
             (('a', (2, 3.4)), ('a', (2, 3.4))),
             ((), ()),
+            ([], ()),
+            (['a', 'b c'], ('a', ('b', 'c'))),
+            (['a', ['b', 'c']], ('a', ('b', 'c'))),
             (call('list', 1, '2', (3.4,)),
                 (1, '2', (3.4,)) if self.wantobjects else
                 ('1', '2', '3.4')),
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 38611a7..8f74a06 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -464,7 +464,7 @@
 
 Multiplicative
 
-    >>> dump_tokens("x = 1//1*1/5*12%0x12")
+    >>> dump_tokens("x = 1//1*1/5*12%0x12@42")
     ENCODING   'utf-8'       (0, 0) (0, 0)
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
@@ -479,6 +479,8 @@
     NUMBER     '12'          (1, 13) (1, 15)
     OP         '%'           (1, 15) (1, 16)
     NUMBER     '0x12'        (1, 16) (1, 20)
+    OP         '@'           (1, 20) (1, 21)
+    NUMBER     '42'          (1, 21) (1, 23)
 
 Unary
 
@@ -1154,6 +1156,7 @@
         self.assertExactTypeEqual('//', token.DOUBLESLASH)
         self.assertExactTypeEqual('//=', token.DOUBLESLASHEQUAL)
         self.assertExactTypeEqual('@', token.AT)
+        self.assertExactTypeEqual('@=', token.ATEQUAL)
 
         self.assertExactTypeEqual('a**2+b**2==c**2',
                                   NAME, token.DOUBLESTAR, NUMBER,
diff --git a/Lib/test/test_tuple.py b/Lib/test/test_tuple.py
index e41711c..14c6430 100644
--- a/Lib/test/test_tuple.py
+++ b/Lib/test/test_tuple.py
@@ -201,6 +201,14 @@
         with self.assertRaises(TypeError):
             [3,] + T((1,2))
 
+    def test_lexicographic_ordering(self):
+        # Issue 21100
+        a = self.type2test([1, 2])
+        b = self.type2test([1, 2, 0])
+        c = self.type2test([1, 3])
+        self.assertLess(a, b)
+        self.assertLess(b, c)
+
 def test_main():
     support.run_unittest(TupleTest)
 
diff --git a/Lib/test/test_types.py b/Lib/test/test_types.py
index ec10752..11d9546 100644
--- a/Lib/test/test_types.py
+++ b/Lib/test/test_types.py
@@ -343,6 +343,8 @@
         self.assertRaises(ValueError, 3 .__format__, ",n")
         # can't have ',' with 'c'
         self.assertRaises(ValueError, 3 .__format__, ",c")
+        # can't have '#' with 'c'
+        self.assertRaises(ValueError, 3 .__format__, "#c")
 
         # ensure that only int and float type specifiers work
         for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py
index 9ae31d1..64e6bf5 100644
--- a/Lib/test/test_unicode.py
+++ b/Lib/test/test_unicode.py
@@ -8,6 +8,7 @@
 import _string
 import codecs
 import itertools
+import operator
 import struct
 import sys
 import unittest
@@ -250,6 +251,7 @@
                              {ord('a'): None, ord('b'): ''})
         self.checkequalnofix('xyyx', 'xzx', 'translate',
                              {ord('z'): 'yy'})
+
         # this needs maketrans()
         self.checkequalnofix('abababc', 'abababc', 'translate',
                              {'b': '<i>'})
@@ -259,6 +261,33 @@
         tbl = self.type2test.maketrans('abc', 'xyz', 'd')
         self.checkequalnofix('xyzzy', 'abdcdcbdddd', 'translate', tbl)
 
+        # various tests switching from ASCII to latin1 or the opposite;
+        # same length, remove a letter, or replace with a longer string.
+        self.assertEqual("[a]".translate(str.maketrans('a', 'X')),
+                         "[X]")
+        self.assertEqual("[a]".translate(str.maketrans({'a': 'X'})),
+                         "[X]")
+        self.assertEqual("[a]".translate(str.maketrans({'a': None})),
+                         "[]")
+        self.assertEqual("[a]".translate(str.maketrans({'a': 'XXX'})),
+                         "[XXX]")
+        self.assertEqual("[a]".translate(str.maketrans({'a': '\xe9'})),
+                         "[\xe9]")
+        self.assertEqual("[a]".translate(str.maketrans({'a': '<\xe9>'})),
+                         "[<\xe9>]")
+        self.assertEqual("[\xe9]".translate(str.maketrans({'\xe9': 'a'})),
+                         "[a]")
+        self.assertEqual("[\xe9]".translate(str.maketrans({'\xe9': None})),
+                         "[]")
+
+        # invalid Unicode characters
+        invalid_char = 0x10ffff+1
+        for before in "a\xe9\u20ac\U0010ffff":
+            mapping = str.maketrans({before: invalid_char})
+            text = "[%s]" % before
+            self.assertRaises(ValueError, text.translate, mapping)
+
+        # errors
         self.assertRaises(TypeError, self.type2test.maketrans)
         self.assertRaises(ValueError, self.type2test.maketrans, 'abc', 'defg')
         self.assertRaises(TypeError, self.type2test.maketrans, 2, 'def')
@@ -1148,20 +1177,20 @@
         self.assertEqual('%.2s' % "a\xe9\u20ac", 'a\xe9')
 
         #issue 19995
-        class PsuedoInt:
+        class PseudoInt:
             def __init__(self, value):
                 self.value = int(value)
             def __int__(self):
                 return self.value
             def __index__(self):
                 return self.value
-        class PsuedoFloat:
+        class PseudoFloat:
             def __init__(self, value):
                 self.value = float(value)
             def __int__(self):
                 return int(self.value)
-        pi = PsuedoFloat(3.1415)
-        letter_m = PsuedoInt(109)
+        pi = PseudoFloat(3.1415)
+        letter_m = PseudoInt(109)
         self.assertEqual('%x' % 42, '2a')
         self.assertEqual('%X' % 15, 'F')
         self.assertEqual('%o' % 9, '11')
@@ -1170,11 +1199,11 @@
         self.assertEqual('%X' % letter_m, '6D')
         self.assertEqual('%o' % letter_m, '155')
         self.assertEqual('%c' % letter_m, 'm')
-        self.assertWarns(DeprecationWarning, '%x'.__mod__, pi),
-        self.assertWarns(DeprecationWarning, '%x'.__mod__, 3.14),
-        self.assertWarns(DeprecationWarning, '%X'.__mod__, 2.11),
-        self.assertWarns(DeprecationWarning, '%o'.__mod__, 1.79),
-        self.assertWarns(DeprecationWarning, '%c'.__mod__, pi),
+        self.assertRaisesRegex(TypeError, '%x format: an integer is required, not float', operator.mod, '%x', 3.14),
+        self.assertRaisesRegex(TypeError, '%X format: an integer is required, not float', operator.mod, '%X', 2.11),
+        self.assertRaisesRegex(TypeError, '%o format: an integer is required, not float', operator.mod, '%o', 1.79),
+        self.assertRaisesRegex(TypeError, '%x format: an integer is required, not PseudoFloat', operator.mod, '%x', pi),
+        self.assertRaises(TypeError, operator.mod, '%c', pi),
 
     def test_formatting_with_enum(self):
         # issue18780
diff --git a/Lib/test/test_wait3.py b/Lib/test/test_wait3.py
index f6a065d..bb71481 100644
--- a/Lib/test/test_wait3.py
+++ b/Lib/test/test_wait3.py
@@ -18,7 +18,8 @@
         # This many iterations can be required, since some previously run
         # tests (e.g. test_ctypes) could have spawned a lot of children
         # very quickly.
-        for i in range(30):
+        deadline = time.monotonic() + 10.0
+        while time.monotonic() <= deadline:
             # wait3() shouldn't hang, but some of the buildbots seem to hang
             # in the forking tests.  This is an attempt to fix the problem.
             spid, status, rusage = os.wait3(os.WNOHANG)
diff --git a/Lib/test/test_wait4.py b/Lib/test/test_wait4.py
index 352c11a..b427a9b 100644
--- a/Lib/test/test_wait4.py
+++ b/Lib/test/test_wait4.py
@@ -19,13 +19,14 @@
             # Issue #11185: wait4 is broken on AIX and will always return 0
             # with WNOHANG.
             option = 0
-        for i in range(10):
+        deadline = time.monotonic() + 10.0
+        while time.monotonic() <= deadline:
             # wait4() shouldn't hang, but some of the buildbots seem to hang
             # in the forking tests.  This is an attempt to fix the problem.
             spid, status, rusage = os.wait4(cpid, option)
             if spid == cpid:
                 break
-            time.sleep(1.0)
+            time.sleep(0.1)
         self.assertEqual(spid, cpid)
         self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
         self.assertTrue(rusage)
diff --git a/Lib/test/test_warnings.py b/Lib/test/test_warnings.py
index eec2c24..cf7f747 100644
--- a/Lib/test/test_warnings.py
+++ b/Lib/test/test_warnings.py
@@ -5,7 +5,7 @@
 import sys
 import unittest
 from test import support
-from test.script_helper import assert_python_ok
+from test.script_helper import assert_python_ok, assert_python_failure
 
 from test import warning_tests
 
@@ -748,7 +748,19 @@
             "import sys; sys.stdout.write(str(sys.warnoptions))",
             PYTHONWARNINGS="ignore::DeprecationWarning")
         self.assertEqual(stdout,
-            b"['ignore::UnicodeWarning', 'ignore::DeprecationWarning']")
+            b"['ignore::DeprecationWarning', 'ignore::UnicodeWarning']")
+
+    def test_conflicting_envvar_and_command_line(self):
+        rc, stdout, stderr = assert_python_failure("-Werror::DeprecationWarning", "-c",
+            "import sys, warnings; sys.stdout.write(str(sys.warnoptions)); "
+            "warnings.warn('Message', DeprecationWarning)",
+            PYTHONWARNINGS="default::DeprecationWarning")
+        self.assertEqual(stdout,
+            b"['default::DeprecationWarning', 'error::DeprecationWarning']")
+        self.assertEqual(stderr.splitlines(),
+            [b"Traceback (most recent call last):",
+             b"  File \"<string>\", line 1, in <module>",
+             b"DeprecationWarning: Message"])
 
     @unittest.skipUnless(sys.getfilesystemencoding() != 'ascii',
                          'requires non-ascii filesystemencoding')
diff --git a/Lib/test/test_xmlrpc.py b/Lib/test/test_xmlrpc.py
index 99b3eda..120c54f 100644
--- a/Lib/test/test_xmlrpc.py
+++ b/Lib/test/test_xmlrpc.py
@@ -713,6 +713,23 @@
         conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
         conn.close()
 
+    def test_context_manager(self):
+        with xmlrpclib.ServerProxy(URL) as server:
+            server.add(2, 3)
+            self.assertNotEqual(server('transport')._connection,
+                                (None, None))
+        self.assertEqual(server('transport')._connection,
+                         (None, None))
+
+    def test_context_manager_method_error(self):
+        try:
+            with xmlrpclib.ServerProxy(URL) as server:
+                server.add(2, "a")
+        except xmlrpclib.Fault:
+            pass
+        self.assertEqual(server('transport')._connection,
+                         (None, None))
+
 
 class MultiPathServerTestCase(BaseServerTestCase):
     threadFunc = staticmethod(http_multi_server)
@@ -898,6 +915,7 @@
         p = xmlrpclib.ServerProxy(self.url, transport=t)
         self.assertEqual(p('transport'), t)
 
+
 # This is a contrived way to make a failure occur on the server side
 # in order to test the _send_traceback_header flag on the server
 class FailingMessageClass(http.client.HTTPMessage):
diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py
index 1bef575..2e232f3 100644
--- a/Lib/test/test_zipfile.py
+++ b/Lib/test/test_zipfile.py
@@ -3,7 +3,6 @@
 import sys
 import importlib.util
 import time
-import shutil
 import struct
 import zipfile
 import unittest
@@ -12,7 +11,7 @@
 from tempfile import TemporaryFile
 from random import randint, random, getrandbits
 
-from test.support import (TESTFN, findfile, unlink,
+from test.support import (TESTFN, findfile, unlink, rmtree,
                           requires_zlib, requires_bz2, requires_lzma,
                           captured_stdout, check_warnings)
 
@@ -691,7 +690,7 @@
                 self.assertNotIn('mod2.txt', names)
 
         finally:
-            shutil.rmtree(TESTFN2)
+            rmtree(TESTFN2)
 
     def test_write_python_directory_filtered(self):
         os.mkdir(TESTFN2)
@@ -711,7 +710,7 @@
                 self.assertNotIn('mod2.py', names)
 
         finally:
-            shutil.rmtree(TESTFN2)
+            rmtree(TESTFN2)
 
     def test_write_non_pyfile(self):
         with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
@@ -741,7 +740,7 @@
                 self.assertNotIn('mod1.pyo', names)
 
         finally:
-            shutil.rmtree(TESTFN2)
+            rmtree(TESTFN2)
 
 
 class ExtractTests(unittest.TestCase):
@@ -767,7 +766,7 @@
                 os.remove(writtenfile)
 
         # remove the test file subdirectories
-        shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
+        rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
 
     def test_extract_all(self):
         with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
@@ -785,7 +784,7 @@
                 os.remove(outfile)
 
         # remove the test file subdirectories
-        shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
+        rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
 
     def check_file(self, filename, content):
         self.assertTrue(os.path.isfile(filename))
@@ -867,12 +866,12 @@
                                  msg='extract %r: %r != %r' %
                                  (arcname, writtenfile, correctfile))
             self.check_file(correctfile, content)
-            shutil.rmtree('target')
+            rmtree('target')
 
             with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
                 zipfp.extractall(targetpath)
             self.check_file(correctfile, content)
-            shutil.rmtree('target')
+            rmtree('target')
 
             correctfile = os.path.join(os.getcwd(), *fixedname.split('/'))
 
@@ -881,12 +880,12 @@
                 self.assertEqual(writtenfile, correctfile,
                                  msg="extract %r" % arcname)
             self.check_file(correctfile, content)
-            shutil.rmtree(fixedname.split('/')[0])
+            rmtree(fixedname.split('/')[0])
 
             with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
                 zipfp.extractall()
             self.check_file(correctfile, content)
-            shutil.rmtree(fixedname.split('/')[0])
+            rmtree(fixedname.split('/')[0])
 
             os.remove(TESTFN2)
 
@@ -1628,7 +1627,7 @@
         self.assertTrue(zipf.filelist[0].filename.endswith("x/"))
 
     def tearDown(self):
-        shutil.rmtree(TESTFN2)
+        rmtree(TESTFN2)
         if os.path.exists(TESTFN):
             unlink(TESTFN)
 
diff --git a/Lib/tkinter/__init__.py b/Lib/tkinter/__init__.py
index 4ac4e57a..f60923a 100644
--- a/Lib/tkinter/__init__.py
+++ b/Lib/tkinter/__init__.py
@@ -1280,6 +1280,11 @@
     def __str__(self):
         """Return the window path name of this widget."""
         return self._w
+
+    def __repr__(self):
+        return '<%s.%s object %s>' % (
+            self.__class__.__module__, self.__class__.__qualname__, self._w)
+
     # Pack methods that apply to the master
     _noarg_ = ['_noarg_']
     def pack_propagate(self, flag=_noarg_):
diff --git a/Lib/tkinter/test/test_tkinter/test_misc.py b/Lib/tkinter/test/test_tkinter/test_misc.py
index d325b31..46f5170 100644
--- a/Lib/tkinter/test/test_tkinter/test_misc.py
+++ b/Lib/tkinter/test/test_tkinter/test_misc.py
@@ -10,6 +10,11 @@
     def setUp(self):
         self.root = ttk.setup_master()
 
+    def test_repr(self):
+        t = tkinter.Toplevel(self.root, name='top')
+        f = tkinter.Frame(t, name='child')
+        self.assertEqual(repr(f), '<tkinter.Frame object .top.child>')
+
     def test_tk_setPalette(self):
         root = self.root
         root.tk_setPalette('black')
diff --git a/Lib/token.py b/Lib/token.py
index 7470c8c..bdfcec8 100644
--- a/Lib/token.py
+++ b/Lib/token.py
@@ -60,11 +60,12 @@
 DOUBLESLASH = 47
 DOUBLESLASHEQUAL = 48
 AT = 49
-RARROW = 50
-ELLIPSIS = 51
-OP = 52
-ERRORTOKEN = 53
-N_TOKENS = 54
+ATEQUAL = 50
+RARROW = 51
+ELLIPSIS = 52
+OP = 53
+ERRORTOKEN = 54
+N_TOKENS = 55
 NT_OFFSET = 256
 #--end constants--
 
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 98e9122..742abd1 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -91,7 +91,8 @@
     '**=': DOUBLESTAREQUAL,
     '//':  DOUBLESLASH,
     '//=': DOUBLESLASHEQUAL,
-    '@':   AT
+    '@':   AT,
+    '@=':  ATEQUAL,
 }
 
 class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
@@ -150,7 +151,7 @@
 # recognized as two instances of =).
 Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
                  r"//=?", r"->",
-                 r"[+\-*/%&|^=<>]=?",
+                 r"[+\-*/%&@|^=<>]=?",
                  r"~")
 
 Bracket = '[][(){}]'
diff --git a/Lib/turtledemo/__init__.py b/Lib/turtledemo/__init__.py
index e69de29..77150e2 100644
--- a/Lib/turtledemo/__init__.py
+++ b/Lib/turtledemo/__init__.py
@@ -0,0 +1,14 @@
+"""
+    --------------------------------------
+        About this viewer
+    --------------------------------------
+
+    Tiny demo viewer to view turtle graphics example scripts.
+
+    Quickly and dirtyly assembled by Gregor Lingl.
+    June, 2006
+
+    For more information see: turtledemo - Help
+
+    Have fun!
+"""
diff --git a/Lib/turtledemo/__main__.py b/Lib/turtledemo/__main__.py
index a14684c..958c283 100755
--- a/Lib/turtledemo/__main__.py
+++ b/Lib/turtledemo/__main__.py
@@ -1,12 +1,81 @@
 #!/usr/bin/env python3
+
+"""
+  ----------------------------------------------
+
+      turtledemo - Help
+
+  ----------------------------------------------
+
+  This document has two sections:
+
+  (1) How to use the demo viewer
+  (2) How to add your own demos to the demo repository
+
+
+  (1) How to use the demo viewer.
+
+  Select a demoscript from the example menu.
+  The (syntax coloured) source code appears in the left
+  source code window. IT CANNOT BE EDITED, but ONLY VIEWED!
+
+  - Press START button to start the demo.
+  - Stop execution by pressing the STOP button.
+  - Clear screen by pressing the CLEAR button.
+  - Restart by pressing the START button again.
+
+  SPECIAL demos are those which run EVENTDRIVEN.
+  (For example clock.py - or oldTurtleDemo.py which
+  in the end expects a mouse click.):
+
+      Press START button to start the demo.
+
+      - Until the EVENTLOOP is entered everything works
+      as in an ordinary demo script.
+
+      - When the EVENTLOOP is entered, you control the
+      application by using the mouse and/or keys (or it's
+      controlled by some timer events)
+      To stop it you can and must press the STOP button.
+
+      While the EVENTLOOP is running, the examples menu is disabled.
+
+      - Only after having pressed the STOP button, you may
+      restart it or choose another example script.
+
+   * * * * * * * *
+   In some rare situations there may occur interferences/conflicts
+   between events concerning the demo script and those concerning the
+   demo-viewer. (They run in the same process.) Strange behaviour may be
+   the consequence and in the worst case you must close and restart the
+   viewer.
+   * * * * * * * *
+
+
+   (2) How to add your own demos to the demo repository
+
+   - place: same directory as turtledemo/__main__.py
+
+   - requirements on source code:
+       code must contain a main() function which will
+       be executed by the viewer (see provided example scripts)
+       main() may return a string which will be displayed
+       in the Label below the source code window (when execution
+       has finished.)
+
+       !! For programs, which are EVENT DRIVEN, main must return
+       !! the string "EVENTLOOP". This informs the viewer, that the
+       !! script is still running and must be stopped by the user!
+"""
 import sys
 import os
 
 from tkinter import *
 from idlelib.Percolator import Percolator
 from idlelib.ColorDelegator import ColorDelegator
-from idlelib.textView import view_file # TextViewer
+from idlelib.textView import view_text # TextViewer
 from importlib import reload
+from turtledemo import __doc__ as about_turtledemo
 
 import turtle
 import time
@@ -28,16 +97,13 @@
             entry.endswith(".py") and entry[0] != '_']
 
 def showDemoHelp():
-    view_file(demo.root, "Help on turtleDemo",
-              os.path.join(demo_dir, "demohelp.txt"))
+    view_text(demo.root, "Help on turtledemo", __doc__)
 
 def showAboutDemo():
-    view_file(demo.root, "About turtleDemo",
-              os.path.join(demo_dir, "about_turtledemo.txt"))
+    view_text(demo.root, "About turtledemo", about_turtledemo)
 
 def showAboutTurtle():
-    view_file(demo.root, "About the new turtle module.",
-              os.path.join(demo_dir, "about_turtle.txt"))
+    view_text(demo.root, "About the turtle module.", turtle.__doc__)
 
 class DemoWindow(object):
 
diff --git a/Lib/turtledemo/about_turtle.txt b/Lib/turtledemo/about_turtle.txt
deleted file mode 100644
index d02c7b3..0000000
--- a/Lib/turtledemo/about_turtle.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-
-========================================================
-    A new turtle module for Python
-========================================================
-
-Turtle graphics is a popular way for introducing programming to
-kids. It was part of the original Logo programming language developed
-by Wally Feurzig and Seymour Papert in 1966.
-
-Imagine a robotic turtle starting at (0, 0) in the x-y plane. After an ``import turtle``, give it
-the command turtle.forward(15), and it moves (on-screen!) 15 pixels in
-the direction it is facing, drawing a line as it moves. Give it the
-command turtle.right(25), and it rotates in-place 25 degrees clockwise.
-
-By combining together these and similar commands, intricate shapes and
-pictures can easily be drawn.
-
------ turtle.py
-
-This module is an extended reimplementation of turtle.py from the
-Python standard distribution up to Python 2.5. (See: http:\\www.python.org)
-
-It tries to keep the merits of turtle.py and to be (nearly) 100%
-compatible with it. This means in the first place to enable the
-learning programmer to use all the commands, classes and methods
-interactively when using the module from within IDLE run with
-the -n switch.
-
-Roughly it has the following features added:
-
-- Better animation of the turtle movements, especially of turning the
-    turtle. So the turtles can more easily be used as a visual feedback
-    instrument by the (beginning) programmer.
-
-- Different turtle shapes, gif-images as turtle shapes, user defined
-    and user controllable turtle shapes, among them compound
-    (multicolored) shapes. Turtle shapes can be stgretched and tilted, which
-    makes turtles zu very versatile geometrical objects.
-
-- Fine control over turtle movement and screen updates via delay(),
-    and enhanced tracer() and speed() methods.
-
-- Aliases for the most commonly used commands, like fd for forward etc.,
-    following the early Logo traditions. This reduces the boring work of
-    typing long sequences of commands, which often occur in a natural way
-    when kids try to program fancy pictures on their first encounter with
-    turtle graphcis.
-
-- Turtles now have an undo()-method with configurable undo-buffer.
-
-- Some simple commands/methods for creating event driven programs
-    (mouse-, key-, timer-events). Especially useful for programming games.
-
-- A scrollable Canvas class. The default scrollable Canvas can be
-    extended interactively as needed while playing around with the turtle(s).
-
-- A TurtleScreen class with methods controlling background color or
-    background image, window and canvas size and other properties of the
-    TurtleScreen.
-
-- There is a method, setworldcoordinates(), to install a user defined
-    coordinate-system for the TurtleScreen.
-
-- The implementation uses a 2-vector class named Vec2D, derived from tuple.
-    This class is public, so it can be imported by the application programmer,
-    which makes certain types of computations very natural and compact.
-
-- Appearance of the TurtleScreen and the Turtles at startup/import can be
-    configured by means of a turtle.cfg configuration file.
-    The default configuration mimics the appearance of the old turtle module.
-
-- If configured appropriately the module reads in docstrings from a docstring
-    dictionary in some different language, supplied separately  and replaces
-    the english ones by those read in. There is a utility function
-    write_docstringdict() to write a dictionary with the original (english)
-    docstrings to disc, so it can serve as a template for translations.
diff --git a/Lib/turtledemo/about_turtledemo.txt b/Lib/turtledemo/about_turtledemo.txt
deleted file mode 100644
index a9009bd..0000000
--- a/Lib/turtledemo/about_turtledemo.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-
-    --------------------------------------
-        About this viewer
-    --------------------------------------
-
-    Tiny demo viewer to view turtle graphics example scripts.
-
-    Quickly and dirtyly assembled by Gregor Lingl.
-    June, 2006
-
-    For more information see: turtleDemo - Help
-
-    Have fun!
diff --git a/Lib/turtledemo/demohelp.txt b/Lib/turtledemo/demohelp.txt
deleted file mode 100644
index fe83bc7..0000000
--- a/Lib/turtledemo/demohelp.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-
-
-  ----------------------------------------------
-
-      turtleDemo - Help
-
-  ----------------------------------------------
-
-  This document has two sections:
-
-  (1) How to use the demo viewer
-  (2) How to add your own demos to the demo repository
-
-
-  (1) How to use the demo viewer.
-
-  Select a demoscript from the example menu.
-  The (syntax coloured) source code appears in the left
-  source code window. IT CANNOT BE EDITED, but ONLY VIEWED!
-
-  - Press START button to start the demo.
-  - Stop execution by pressing the STOP button.
-  - Clear screen by pressing the CLEAR button.
-  - Restart by pressing the START button again.
-
-  SPECIAL demos are those which run EVENTDRIVEN.
-  (For example clock.py - or oldTurtleDemo.py which
-  in the end expects a mouse click.):
-
-      Press START button to start the demo.
-
-      - Until the EVENTLOOP is entered everything works
-      as in an ordinary demo script.
-
-      - When the EVENTLOOP is entered, you control the
-      application by using the mouse and/or keys (or it's
-      controlled by some timer events)
-      To stop it you can and must press the STOP button.
-
-      While the EVENTLOOP is running, the examples menu is disabled.
-
-      - Only after having pressed the STOP button, you may
-      restart it or choose another example script.
-
-   * * * * * * * *
-   In some rare situations there may occur interferences/conflicts
-   between events concerning the demo script and those concerning the
-   demo-viewer. (They run in the same process.) Strange behaviour may be
-   the consequence and in the worst case you must close and restart the
-   viewer.
-   * * * * * * * *
-
-
-   (2) How to add your own demos to the demo repository
-
-   - place: same directory as turtledemo/__main__.py
-
-   - requirements on source code:
-       code must contain a main() function which will
-       be executed by the viewer (see provided example scripts)
-       main() may return a string which will be displayed
-       in the Label below the source code window (when execution
-       has finished.) 
-
-       !! For programs, which are EVENT DRIVEN, main must return
-       !! the string "EVENTLOOP". This informs the viewer, that the
-       !! script is still running and must be stopped by the user!
-
-        
-  
diff --git a/Lib/unittest/mock.py b/Lib/unittest/mock.py
index 5555774..d9c2ee9 100644
--- a/Lib/unittest/mock.py
+++ b/Lib/unittest/mock.py
@@ -27,9 +27,13 @@
 import inspect
 import pprint
 import sys
+import builtins
+from types import ModuleType
 from functools import wraps, partial
 
 
+_builtins = {name for name in dir(builtins) if not name.startswith('_')}
+
 BaseExceptions = (BaseException,)
 if 'java' in sys.platform:
     # jython
@@ -375,7 +379,7 @@
     def __init__(
             self, spec=None, wraps=None, name=None, spec_set=None,
             parent=None, _spec_state=None, _new_name='', _new_parent=None,
-            _spec_as_instance=False, _eat_self=None, **kwargs
+            _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs
         ):
         if _new_parent is None:
             _new_parent = parent
@@ -405,6 +409,7 @@
         __dict__['_mock_mock_calls'] = _CallList()
 
         __dict__['method_calls'] = _CallList()
+        __dict__['_mock_unsafe'] = unsafe
 
         if kwargs:
             self.configure_mock(**kwargs)
@@ -561,13 +566,16 @@
 
 
     def __getattr__(self, name):
-        if name == '_mock_methods':
+        if name in {'_mock_methods', '_mock_unsafe'}:
             raise AttributeError(name)
         elif self._mock_methods is not None:
             if name not in self._mock_methods or name in _all_magics:
                 raise AttributeError("Mock object has no attribute %r" % name)
         elif _is_magic(name):
             raise AttributeError(name)
+        if not self._mock_unsafe:
+            if name.startswith(('assert', 'assret')):
+                raise AttributeError(name)
 
         result = self._mock_children.get(name)
         if result is _deleted:
@@ -750,6 +758,14 @@
         else:
             return _call
 
+    def assert_not_called(_mock_self):
+        """assert that the mock was never called.
+        """
+        self = _mock_self
+        if self.call_count != 0:
+            msg = ("Expected '%s' to not have been called. Called %s times." %
+                   (self._mock_name or 'mock', self.call_count))
+            raise AssertionError(msg)
 
     def assert_called_with(_mock_self, *args, **kwargs):
         """assert that the mock was called with the specified arguments.
@@ -1166,6 +1182,9 @@
         else:
             local = True
 
+        if name in _builtins and isinstance(target, ModuleType):
+            self.create = True
+
         if not self.create and original is DEFAULT:
             raise AttributeError(
                 "%s does not have the attribute %r" % (target, name)
diff --git a/Lib/unittest/test/test_case.py b/Lib/unittest/test/test_case.py
index 4932578..aae7ec3 100644
--- a/Lib/unittest/test/test_case.py
+++ b/Lib/unittest/test/test_case.py
@@ -1080,7 +1080,7 @@
             # so can't use assertEqual either. Just use assertTrue.
             self.assertTrue(sample_text_error == error)
 
-    def testAsertEqualSingleLine(self):
+    def testAssertEqualSingleLine(self):
         sample_text = "laden swallows fly slowly"
         revised_sample_text = "unladen swallows fly quickly"
         sample_text_error = """\
diff --git a/Lib/unittest/test/test_loader.py b/Lib/unittest/test/test_loader.py
index b62a1b5..3e013af 100644
--- a/Lib/unittest/test/test_loader.py
+++ b/Lib/unittest/test/test_loader.py
@@ -255,7 +255,7 @@
         try:
             loader.loadTestsFromName('unittest.sdasfasfasdf')
         except AttributeError as e:
-            self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
+            self.assertEqual(str(e), "module 'unittest' has no attribute 'sdasfasfasdf'")
         else:
             self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
 
@@ -272,7 +272,7 @@
         try:
             loader.loadTestsFromName('sdasfasfasdf', unittest)
         except AttributeError as e:
-            self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
+            self.assertEqual(str(e), "module 'unittest' has no attribute 'sdasfasfasdf'")
         else:
             self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
 
@@ -635,7 +635,7 @@
         try:
             loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
         except AttributeError as e:
-            self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
+            self.assertEqual(str(e), "module 'unittest' has no attribute 'sdasfasfasdf'")
         else:
             self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
 
@@ -654,7 +654,7 @@
         try:
             loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
         except AttributeError as e:
-            self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
+            self.assertEqual(str(e), "module 'unittest' has no attribute 'sdasfasfasdf'")
         else:
             self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
 
@@ -673,7 +673,7 @@
         try:
             loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
         except AttributeError as e:
-            self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
+            self.assertEqual(str(e), "module 'unittest' has no attribute 'sdasfasfasdf'")
         else:
             self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
 
diff --git a/Lib/unittest/test/testmock/testmock.py b/Lib/unittest/test/testmock/testmock.py
index 23675b9..b65dc32 100644
--- a/Lib/unittest/test/testmock/testmock.py
+++ b/Lib/unittest/test/testmock/testmock.py
@@ -1187,6 +1187,26 @@
         m = mock.create_autospec(object(), name='sweet_func')
         self.assertIn('sweet_func', repr(m))
 
+    #Issue21238
+    def test_mock_unsafe(self):
+        m = Mock()
+        with self.assertRaises(AttributeError):
+            m.assert_foo_call()
+        with self.assertRaises(AttributeError):
+            m.assret_foo_call()
+        m = Mock(unsafe=True)
+        m.assert_foo_call()
+        m.assret_foo_call()
+
+    #Issue21262
+    def test_assert_not_called(self):
+        m = Mock()
+        m.hello.assert_not_called()
+        m.hello()
+        with self.assertRaises(AssertionError):
+            m.hello.assert_not_called()
+
+
     def test_mock_add_spec(self):
         class _One(object):
             one = 1
diff --git a/Lib/unittest/test/testmock/testpatch.py b/Lib/unittest/test/testmock/testpatch.py
index b516f42..28fe86b 100644
--- a/Lib/unittest/test/testmock/testpatch.py
+++ b/Lib/unittest/test/testmock/testpatch.py
@@ -377,7 +377,7 @@
 
     def test_patchobject_wont_create_by_default(self):
         try:
-            @patch.object(SomeClass, 'frooble', sentinel.Frooble)
+            @patch.object(SomeClass, 'ord', sentinel.Frooble)
             def test():
                 self.fail('Patching non existent attributes should fail')
 
@@ -386,7 +386,27 @@
             pass
         else:
             self.fail('Patching non existent attributes should fail')
-        self.assertFalse(hasattr(SomeClass, 'frooble'))
+        self.assertFalse(hasattr(SomeClass, 'ord'))
+
+
+    def test_patch_builtins_without_create(self):
+        @patch(__name__+'.ord')
+        def test_ord(mock_ord):
+            mock_ord.return_value = 101
+            return ord('c')
+
+        @patch(__name__+'.open')
+        def test_open(mock_open):
+            m = mock_open.return_value
+            m.read.return_value = 'abcd'
+
+            fobj = open('doesnotexists.txt')
+            data = fobj.read()
+            fobj.close()
+            return data
+
+        self.assertEqual(test_ord(), 101)
+        self.assertEqual(test_open(), 'abcd')
 
 
     def test_patch_with_static_methods(self):
diff --git a/Lib/urllib/robotparser.py b/Lib/urllib/robotparser.py
index 1d7b751..4fbb0cb 100644
--- a/Lib/urllib/robotparser.py
+++ b/Lib/urllib/robotparser.py
@@ -172,7 +172,7 @@
         return self.path == "*" or filename.startswith(self.path)
 
     def __str__(self):
-        return (self.allowance and "Allow" or "Disallow") + ": " + self.path
+        return ("Allow" if self.allowance else "Disallow") + ": " + self.path
 
 
 class Entry:
diff --git a/Lib/xmlrpc/client.py b/Lib/xmlrpc/client.py
index c2ae707..567554d 100644
--- a/Lib/xmlrpc/client.py
+++ b/Lib/xmlrpc/client.py
@@ -1449,6 +1449,12 @@
             return self.__transport
         raise AttributeError("Attribute %r not found" % (attr,))
 
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.__close()
+
 # compatibility
 
 Server = ServerProxy
diff --git a/Mac/PythonLauncher/Info.plist.in b/Mac/PythonLauncher/Info.plist.in
index 0a5a439..0e60e07 100644
--- a/Mac/PythonLauncher/Info.plist.in
+++ b/Mac/PythonLauncher/Info.plist.in
@@ -1,5 +1,5 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
 <plist version="1.0">
 <dict>
 	<key>CFBundleDevelopmentRegion</key>
@@ -38,7 +38,7 @@
 		</dict>
 	</array>
 	<key>CFBundleExecutable</key>
-	<string>PythonLauncher</string>
+	<string>Python Launcher</string>
 	<key>CFBundleGetInfoString</key>
 	<string>%VERSION%, © 2001-2014 Python Software Foundation</string>
 	<key>CFBundleIconFile</key>
diff --git a/Mac/PythonLauncher/Makefile.in b/Mac/PythonLauncher/Makefile.in
index 970b83f..4b4ae62 100644
--- a/Mac/PythonLauncher/Makefile.in
+++ b/Mac/PythonLauncher/Makefile.in
@@ -15,12 +15,10 @@
 PYTHONFRAMEWORK=@PYTHONFRAMEWORK@
 
 # Deployment target selected during configure, to be checked
-# by distutils  
+# by distutils
 MACOSX_DEPLOYMENT_TARGET=@CONFIGURE_MACOSX_DEPLOYMENT_TARGET@
 @EXPORT_MACOSX_DEPLOYMENT_TARGET@export MACOSX_DEPLOYMENT_TARGET
 
-BUNDLEBULDER=$(srcdir)/../Tools/bundlebuilder.py
-
 PYTHONAPPSDIR=@FRAMEWORKINSTALLAPPSPREFIX@/$(PYTHONFRAMEWORK) $(VERSION)
 OBJECTS=FileSettings.o MyAppDelegate.o MyDocument.o PreferencesWindowController.o doscript.o main.o
 
@@ -30,10 +28,10 @@
 	/bin/cp -r "Python Launcher.app" "$(DESTDIR)$(PYTHONAPPSDIR)"
 	touch "$(DESTDIR)$(PYTHONAPPSDIR)/Python Launcher.app"
 
-
 clean:
 	rm -f *.o "Python Launcher"
 	rm -rf "Python Launcher.app"
+	rm -f Info.plist
 
 Python\ Launcher.app:  Info.plist \
 		Python\ Launcher $(srcdir)/../Icons/PythonLauncher.icns \
@@ -41,20 +39,18 @@
 		$(srcdir)/../Icons/PythonCompiled.icns \
 		$(srcdir)/factorySettings.plist
 	rm -fr "Python Launcher.app"
-	$(RUNSHARED) $(BUILDPYTHON) $(BUNDLEBULDER) \
-		--builddir=. \
-		--name="Python Launcher" \
-		--executable="Python Launcher" \
-		--iconfile=$(srcdir)/../Icons/PythonLauncher.icns \
-		--bundle-id=org.python.PythonLauncher \
-		--resource=$(srcdir)/../Icons/PythonSource.icns \
-		--resource=$(srcdir)/../Icons/PythonCompiled.icns \
-		--resource=$(srcdir)/English.lproj \
-		--resource=$(srcdir)/factorySettings.plist \
-		--plist Info.plist \
-		build
+	mkdir "Python Launcher.app"
+	mkdir "Python Launcher.app/Contents"
+	mkdir "Python Launcher.app/Contents/MacOS"
+	mkdir "Python Launcher.app/Contents/Resources"
+	cp "Python Launcher" "Python Launcher.app/Contents/MacOS"
+	cp Info.plist  "Python Launcher.app/Contents"
+	cp $(srcdir)/../Icons/PythonLauncher.icns  "Python Launcher.app/Contents/Resources"
+	cp $(srcdir)/../Icons/PythonSource.icns  "Python Launcher.app/Contents/Resources"
+	cp $(srcdir)/../Icons/PythonCompiled.icns  "Python Launcher.app/Contents/Resources"
+	cp $(srcdir)/factorySettings.plist  "Python Launcher.app/Contents/Resources"
+	cp -R $(srcdir)/English.lproj "Python Launcher.app/Contents/Resources"
 	find "Python Launcher.app" -name '.svn' -print0 | xargs -0 rm -r
-		
 
 FileSettings.o: $(srcdir)/FileSettings.m
 	$(CC) $(CFLAGS) -o $@ -c $(srcdir)/FileSettings.m
diff --git a/Mac/Tools/bundlebuilder.py b/Mac/Tools/bundlebuilder.py
deleted file mode 100755
index f5679d3..0000000
--- a/Mac/Tools/bundlebuilder.py
+++ /dev/null
@@ -1,934 +0,0 @@
-#! /usr/bin/env python
-
-"""\
-bundlebuilder.py -- Tools to assemble MacOS X (application) bundles.
-
-This module contains two classes to build so called "bundles" for
-MacOS X. BundleBuilder is a general tool, AppBuilder is a subclass
-specialized in building application bundles.
-
-[Bundle|App]Builder objects are instantiated with a bunch of keyword
-arguments, and have a build() method that will do all the work. See
-the class doc strings for a description of the constructor arguments.
-
-The module contains a main program that can be used in two ways:
-
-  % python bundlebuilder.py [options] build
-  % python buildapp.py [options] build
-
-Where "buildapp.py" is a user-supplied setup.py-like script following
-this model:
-
-  from bundlebuilder import buildapp
-  buildapp(<lots-of-keyword-args>)
-
-"""
-
-
-__all__ = ["BundleBuilder", "BundleBuilderError", "AppBuilder", "buildapp"]
-
-
-import sys
-import os, errno, shutil
-import imp, marshal
-import re
-from copy import deepcopy
-import getopt
-from plistlib import Plist
-from types import FunctionType as function
-
-class BundleBuilderError(Exception): pass
-
-
-class Defaults:
-
-    """Class attributes that don't start with an underscore and are
-    not functions or classmethods are (deep)copied to self.__dict__.
-    This allows for mutable default values.
-    """
-
-    def __init__(self, **kwargs):
-        defaults = self._getDefaults()
-        defaults.update(kwargs)
-        self.__dict__.update(defaults)
-
-    def _getDefaults(cls):
-        defaults = {}
-        for base in cls.__bases__:
-            if hasattr(base, "_getDefaults"):
-                defaults.update(base._getDefaults())
-        for name, value in list(cls.__dict__.items()):
-            if name[0] != "_" and not isinstance(value,
-                    (function, classmethod)):
-                defaults[name] = deepcopy(value)
-        return defaults
-    _getDefaults = classmethod(_getDefaults)
-
-
-class BundleBuilder(Defaults):
-
-    """BundleBuilder is a barebones class for assembling bundles. It
-    knows nothing about executables or icons, it only copies files
-    and creates the PkgInfo and Info.plist files.
-    """
-
-    # (Note that Defaults.__init__ (deep)copies these values to
-    # instance variables. Mutable defaults are therefore safe.)
-
-    # Name of the bundle, with or without extension.
-    name = None
-
-    # The property list ("plist")
-    plist = Plist(CFBundleDevelopmentRegion = "English",
-                  CFBundleInfoDictionaryVersion = "6.0")
-
-    # The type of the bundle.
-    type = "BNDL"
-    # The creator code of the bundle.
-    creator = None
-
-    # the CFBundleIdentifier (this is used for the preferences file name)
-    bundle_id = None
-
-    # List of files that have to be copied to <bundle>/Contents/Resources.
-    resources = []
-
-    # List of (src, dest) tuples; dest should be a path relative to the bundle
-    # (eg. "Contents/Resources/MyStuff/SomeFile.ext).
-    files = []
-
-    # List of shared libraries (dylibs, Frameworks) to bundle with the app
-    # will be placed in Contents/Frameworks
-    libs = []
-
-    # Directory where the bundle will be assembled.
-    builddir = "build"
-
-    # Make symlinks instead copying files. This is handy during debugging, but
-    # makes the bundle non-distributable.
-    symlink = 0
-
-    # Verbosity level.
-    verbosity = 1
-
-    # Destination root directory
-    destroot = ""
-
-    def setup(self):
-        # XXX rethink self.name munging, this is brittle.
-        self.name, ext = os.path.splitext(self.name)
-        if not ext:
-            ext = ".bundle"
-        bundleextension = ext
-        # misc (derived) attributes
-        self.bundlepath = pathjoin(self.builddir, self.name + bundleextension)
-
-        plist = self.plist
-        plist.CFBundleName = self.name
-        plist.CFBundlePackageType = self.type
-        if self.creator is None:
-            if hasattr(plist, "CFBundleSignature"):
-                self.creator = plist.CFBundleSignature
-            else:
-                self.creator = "????"
-        plist.CFBundleSignature = self.creator
-        if self.bundle_id:
-            plist.CFBundleIdentifier = self.bundle_id
-        elif not hasattr(plist, "CFBundleIdentifier"):
-            plist.CFBundleIdentifier = self.name
-
-    def build(self):
-        """Build the bundle."""
-        builddir = self.builddir
-        if builddir and not os.path.exists(builddir):
-            os.mkdir(builddir)
-        self.message("Building %s" % repr(self.bundlepath), 1)
-        if os.path.exists(self.bundlepath):
-            shutil.rmtree(self.bundlepath)
-        if os.path.exists(self.bundlepath + '~'):
-            shutil.rmtree(self.bundlepath + '~')
-        bp = self.bundlepath
-
-        # Create the app bundle in a temporary location and then
-        # rename the completed bundle. This way the Finder will
-        # never see an incomplete bundle (where it might pick up
-        # and cache the wrong meta data)
-        self.bundlepath = bp + '~'
-        try:
-            os.mkdir(self.bundlepath)
-            self.preProcess()
-            self._copyFiles()
-            self._addMetaFiles()
-            self.postProcess()
-            os.rename(self.bundlepath, bp)
-        finally:
-            self.bundlepath = bp
-        self.message("Done.", 1)
-
-    def preProcess(self):
-        """Hook for subclasses."""
-        pass
-    def postProcess(self):
-        """Hook for subclasses."""
-        pass
-
-    def _addMetaFiles(self):
-        contents = pathjoin(self.bundlepath, "Contents")
-        makedirs(contents)
-        #
-        # Write Contents/PkgInfo
-        assert len(self.type) == len(self.creator) == 4, \
-                "type and creator must be 4-byte strings."
-        pkginfo = pathjoin(contents, "PkgInfo")
-        f = open(pkginfo, "wb")
-        f.write((self.type + self.creator).encode('latin1'))
-        f.close()
-        #
-        # Write Contents/Info.plist
-        infoplist = pathjoin(contents, "Info.plist")
-        self.plist.write(infoplist)
-
-    def _copyFiles(self):
-        files = self.files[:]
-        for path in self.resources:
-            files.append((path, pathjoin("Contents", "Resources",
-                os.path.basename(path))))
-        for path in self.libs:
-            files.append((path, pathjoin("Contents", "Frameworks",
-                os.path.basename(path))))
-        if self.symlink:
-            self.message("Making symbolic links", 1)
-            msg = "Making symlink from"
-        else:
-            self.message("Copying files", 1)
-            msg = "Copying"
-        files.sort()
-        for src, dst in files:
-            if os.path.isdir(src):
-                self.message("%s %s/ to %s/" % (msg, src, dst), 2)
-            else:
-                self.message("%s %s to %s" % (msg, src, dst), 2)
-            dst = pathjoin(self.bundlepath, dst)
-            if self.symlink:
-                symlink(src, dst, mkdirs=1)
-            else:
-                copy(src, dst, mkdirs=1)
-
-    def message(self, msg, level=0):
-        if level <= self.verbosity:
-            indent = ""
-            if level > 1:
-                indent = (level - 1) * "  "
-            sys.stderr.write(indent + msg + "\n")
-
-    def report(self):
-        # XXX something decent
-        pass
-
-
-if __debug__:
-    PYC_EXT = ".pyc"
-else:
-    PYC_EXT = ".pyo"
-
-MAGIC = imp.get_magic()
-USE_ZIPIMPORT = "zipimport" in sys.builtin_module_names
-
-# For standalone apps, we have our own minimal site.py. We don't need
-# all the cruft of the real site.py.
-SITE_PY = """\
-import sys
-if not %(semi_standalone)s:
-    del sys.path[1:]  # sys.path[0] is Contents/Resources/
-"""
-
-if USE_ZIPIMPORT:
-    ZIP_ARCHIVE = "Modules.zip"
-    SITE_PY += "sys.path.append(sys.path[0] + '/%s')\n" % ZIP_ARCHIVE
-    def getPycData(fullname, code, ispkg):
-        if ispkg:
-            fullname += ".__init__"
-        path = fullname.replace(".", os.sep) + PYC_EXT
-        return path, MAGIC + '\0\0\0\0' + marshal.dumps(code)
-
-#
-# Extension modules can't be in the modules zip archive, so a placeholder
-# is added instead, that loads the extension from a specified location.
-#
-EXT_LOADER = """\
-def __load():
-    import imp, sys, os
-    for p in sys.path:
-        path = os.path.join(p, "%(filename)s")
-        if os.path.exists(path):
-            break
-    else:
-        assert 0, "file not found: %(filename)s"
-    mod = imp.load_dynamic("%(name)s", path)
-
-__load()
-del __load
-"""
-
-MAYMISS_MODULES = ['mac', 'nt', 'ntpath', 'dos', 'dospath',
-    'win32api', 'ce', '_winreg', 'nturl2path', 'sitecustomize',
-    'org.python.core', 'riscos', 'riscosenviron', 'riscospath'
-]
-
-STRIP_EXEC = "/usr/bin/strip"
-
-#
-# We're using a stock interpreter to run the app, yet we need
-# a way to pass the Python main program to the interpreter. The
-# bootstrapping script fires up the interpreter with the right
-# arguments. os.execve() is used as OSX doesn't like us to
-# start a real new process. Also, the executable name must match
-# the CFBundleExecutable value in the Info.plist, so we lie
-# deliberately with argv[0]. The actual Python executable is
-# passed in an environment variable so we can "repair"
-# sys.executable later.
-#
-BOOTSTRAP_SCRIPT = """\
-#!%(hashbang)s
-
-import sys, os
-execdir = os.path.dirname(sys.argv[0])
-executable = os.path.join(execdir, "%(executable)s")
-resdir = os.path.join(os.path.dirname(execdir), "Resources")
-libdir = os.path.join(os.path.dirname(execdir), "Frameworks")
-mainprogram = os.path.join(resdir, "%(mainprogram)s")
-
-sys.argv.insert(1, mainprogram)
-if %(standalone)s or %(semi_standalone)s:
-    os.environ["PYTHONPATH"] = resdir
-    if %(standalone)s:
-        os.environ["PYTHONHOME"] = resdir
-else:
-    pypath = os.getenv("PYTHONPATH", "")
-    if pypath:
-        pypath = ":" + pypath
-    os.environ["PYTHONPATH"] = resdir + pypath
-os.environ["PYTHONEXECUTABLE"] = executable
-os.environ["DYLD_LIBRARY_PATH"] = libdir
-os.environ["DYLD_FRAMEWORK_PATH"] = libdir
-os.execve(executable, sys.argv, os.environ)
-"""
-
-
-#
-# Optional wrapper that converts "dropped files" into sys.argv values.
-#
-ARGV_EMULATOR = """\
-import argvemulator, os
-
-argvemulator.ArgvCollector().mainloop()
-execfile(os.path.join(os.path.split(__file__)[0], "%(realmainprogram)s"))
-"""
-
-#
-# When building a standalone app with Python.framework, we need to copy
-# a subset from Python.framework to the bundle. The following list
-# specifies exactly what items we'll copy.
-#
-PYTHONFRAMEWORKGOODIES = [
-    "Python",  # the Python core library
-    "Resources/English.lproj",
-    "Resources/Info.plist",
-    "Resources/version.plist",
-]
-
-def isFramework():
-    return sys.exec_prefix.find("Python.framework") > 0
-
-
-LIB = os.path.join(sys.prefix, "lib", "python" + sys.version[:3])
-SITE_PACKAGES = os.path.join(LIB, "site-packages")
-
-
-class AppBuilder(BundleBuilder):
-
-    # Override type of the bundle.
-    type = "APPL"
-
-    # platform, name of the subfolder of Contents that contains the executable.
-    platform = "MacOS"
-
-    # A Python main program. If this argument is given, the main
-    # executable in the bundle will be a small wrapper that invokes
-    # the main program. (XXX Discuss why.)
-    mainprogram = None
-
-    # The main executable. If a Python main program is specified
-    # the executable will be copied to Resources and be invoked
-    # by the wrapper program mentioned above. Otherwise it will
-    # simply be used as the main executable.
-    executable = None
-
-    # The name of the main nib, for Cocoa apps. *Must* be specified
-    # when building a Cocoa app.
-    nibname = None
-
-    # The name of the icon file to be copied to Resources and used for
-    # the Finder icon.
-    iconfile = None
-
-    # Symlink the executable instead of copying it.
-    symlink_exec = 0
-
-    # If True, build standalone app.
-    standalone = 0
-
-    # If True, build semi-standalone app (only includes third-party modules).
-    semi_standalone = 0
-
-    # If set, use this for #! lines in stead of sys.executable
-    python = None
-
-    # If True, add a real main program that emulates sys.argv before calling
-    # mainprogram
-    argv_emulation = 0
-
-    # The following attributes are only used when building a standalone app.
-
-    # Exclude these modules.
-    excludeModules = []
-
-    # Include these modules.
-    includeModules = []
-
-    # Include these packages.
-    includePackages = []
-
-    # Strip binaries from debug info.
-    strip = 0
-
-    # Found Python modules: [(name, codeobject, ispkg), ...]
-    pymodules = []
-
-    # Modules that modulefinder couldn't find:
-    missingModules = []
-    maybeMissingModules = []
-
-    def setup(self):
-        if ((self.standalone or self.semi_standalone)
-            and self.mainprogram is None):
-            raise BundleBuilderError("must specify 'mainprogram' when "
-                    "building a standalone application.")
-        if self.mainprogram is None and self.executable is None:
-            raise BundleBuilderError("must specify either or both of "
-                    "'executable' and 'mainprogram'")
-
-        self.execdir = pathjoin("Contents", self.platform)
-
-        if self.name is not None:
-            pass
-        elif self.mainprogram is not None:
-            self.name = os.path.splitext(os.path.basename(self.mainprogram))[0]
-        elif executable is not None:
-            self.name = os.path.splitext(os.path.basename(self.executable))[0]
-        if self.name[-4:] != ".app":
-            self.name += ".app"
-
-        if self.executable is None:
-            if not self.standalone and not isFramework():
-                self.symlink_exec = 1
-            if self.python:
-                self.executable = self.python
-            else:
-                self.executable = sys.executable
-
-        if self.nibname:
-            self.plist.NSMainNibFile = self.nibname
-            if not hasattr(self.plist, "NSPrincipalClass"):
-                self.plist.NSPrincipalClass = "NSApplication"
-
-        if self.standalone and isFramework():
-            self.addPythonFramework()
-
-        BundleBuilder.setup(self)
-
-        self.plist.CFBundleExecutable = self.name
-
-        if self.standalone or self.semi_standalone:
-            self.findDependencies()
-
-    def preProcess(self):
-        resdir = "Contents/Resources"
-        if self.executable is not None:
-            if self.mainprogram is None:
-                execname = self.name
-            else:
-                execname = os.path.basename(self.executable)
-            execpath = pathjoin(self.execdir, execname)
-            if not self.symlink_exec:
-                self.files.append((self.destroot + self.executable, execpath))
-            self.execpath = execpath
-
-        if self.mainprogram is not None:
-            mainprogram = os.path.basename(self.mainprogram)
-            self.files.append((self.mainprogram, pathjoin(resdir, mainprogram)))
-            if self.argv_emulation:
-                # Change the main program, and create the helper main program (which
-                # does argv collection and then calls the real main).
-                # Also update the included modules (if we're creating a standalone
-                # program) and the plist
-                realmainprogram = mainprogram
-                mainprogram = '__argvemulator_' + mainprogram
-                resdirpath = pathjoin(self.bundlepath, resdir)
-                mainprogrampath = pathjoin(resdirpath, mainprogram)
-                makedirs(resdirpath)
-                open(mainprogrampath, "w").write(ARGV_EMULATOR % locals())
-                if self.standalone or self.semi_standalone:
-                    self.includeModules.append("argvemulator")
-                    self.includeModules.append("os")
-                if "CFBundleDocumentTypes" not in self.plist:
-                    self.plist["CFBundleDocumentTypes"] = [
-                        { "CFBundleTypeOSTypes" : [
-                            "****",
-                            "fold",
-                            "disk"],
-                          "CFBundleTypeRole": "Viewer"}]
-            # Write bootstrap script
-            executable = os.path.basename(self.executable)
-            execdir = pathjoin(self.bundlepath, self.execdir)
-            bootstrappath = pathjoin(execdir, self.name)
-            makedirs(execdir)
-            if self.standalone or self.semi_standalone:
-                # XXX we're screwed when the end user has deleted
-                # /usr/bin/python
-                hashbang = "/usr/bin/python"
-            elif self.python:
-                hashbang = self.python
-            else:
-                hashbang = os.path.realpath(sys.executable)
-            standalone = self.standalone
-            semi_standalone = self.semi_standalone
-            open(bootstrappath, "w").write(BOOTSTRAP_SCRIPT % locals())
-            os.chmod(bootstrappath, 0o775)
-
-        if self.iconfile is not None:
-            iconbase = os.path.basename(self.iconfile)
-            self.plist.CFBundleIconFile = iconbase
-            self.files.append((self.iconfile, pathjoin(resdir, iconbase)))
-
-    def postProcess(self):
-        if self.standalone or self.semi_standalone:
-            self.addPythonModules()
-        if self.strip and not self.symlink:
-            self.stripBinaries()
-
-        if self.symlink_exec and self.executable:
-            self.message("Symlinking executable %s to %s" % (self.executable,
-                    self.execpath), 2)
-            dst = pathjoin(self.bundlepath, self.execpath)
-            makedirs(os.path.dirname(dst))
-            os.symlink(os.path.abspath(self.executable), dst)
-
-        if self.missingModules or self.maybeMissingModules:
-            self.reportMissing()
-
-    def addPythonFramework(self):
-        # If we're building a standalone app with Python.framework,
-        # include a minimal subset of Python.framework, *unless*
-        # Python.framework was specified manually in self.libs.
-        for lib in self.libs:
-            if os.path.basename(lib) == "Python.framework":
-                # a Python.framework was specified as a library
-                return
-
-        frameworkpath = sys.exec_prefix[:sys.exec_prefix.find(
-            "Python.framework") + len("Python.framework")]
-
-        version = sys.version[:3]
-        frameworkpath = pathjoin(frameworkpath, "Versions", version)
-        destbase = pathjoin("Contents", "Frameworks", "Python.framework",
-                            "Versions", version)
-        for item in PYTHONFRAMEWORKGOODIES:
-            src = pathjoin(frameworkpath, item)
-            dst = pathjoin(destbase, item)
-            self.files.append((src, dst))
-
-    def _getSiteCode(self):
-        return compile(SITE_PY % {"semi_standalone": self.semi_standalone},
-                     "<-bundlebuilder.py->", "exec")
-
-    def addPythonModules(self):
-        self.message("Adding Python modules", 1)
-
-        if USE_ZIPIMPORT:
-            # Create a zip file containing all modules as pyc.
-            import zipfile
-            relpath = pathjoin("Contents", "Resources", ZIP_ARCHIVE)
-            abspath = pathjoin(self.bundlepath, relpath)
-            zf = zipfile.ZipFile(abspath, "w", zipfile.ZIP_DEFLATED)
-            for name, code, ispkg in self.pymodules:
-                self.message("Adding Python module %s" % name, 2)
-                path, pyc = getPycData(name, code, ispkg)
-                zf.writestr(path, pyc)
-            zf.close()
-            # add site.pyc
-            sitepath = pathjoin(self.bundlepath, "Contents", "Resources",
-                    "site" + PYC_EXT)
-            writePyc(self._getSiteCode(), sitepath)
-        else:
-            # Create individual .pyc files.
-            for name, code, ispkg in self.pymodules:
-                if ispkg:
-                    name += ".__init__"
-                path = name.split(".")
-                path = pathjoin("Contents", "Resources", *path) + PYC_EXT
-
-                if ispkg:
-                    self.message("Adding Python package %s" % path, 2)
-                else:
-                    self.message("Adding Python module %s" % path, 2)
-
-                abspath = pathjoin(self.bundlepath, path)
-                makedirs(os.path.dirname(abspath))
-                writePyc(code, abspath)
-
-    def stripBinaries(self):
-        if not os.path.exists(STRIP_EXEC):
-            self.message("Error: can't strip binaries: no strip program at "
-                "%s" % STRIP_EXEC, 0)
-        else:
-            import stat
-            self.message("Stripping binaries", 1)
-            def walk(top):
-                for name in os.listdir(top):
-                    path = pathjoin(top, name)
-                    if os.path.islink(path):
-                        continue
-                    if os.path.isdir(path):
-                        walk(path)
-                    else:
-                        mod = os.stat(path)[stat.ST_MODE]
-                        if not (mod & 0o100):
-                            continue
-                        relpath = path[len(self.bundlepath):]
-                        self.message("Stripping %s" % relpath, 2)
-                        inf, outf = os.popen4("%s -S \"%s\"" %
-                                              (STRIP_EXEC, path))
-                        output = outf.read().strip()
-                        if output:
-                            # usually not a real problem, like when we're
-                            # trying to strip a script
-                            self.message("Problem stripping %s:" % relpath, 3)
-                            self.message(output, 3)
-            walk(self.bundlepath)
-
-    def findDependencies(self):
-        self.message("Finding module dependencies", 1)
-        import modulefinder
-        mf = modulefinder.ModuleFinder(excludes=self.excludeModules)
-        if USE_ZIPIMPORT:
-            # zipimport imports zlib, must add it manually
-            mf.import_hook("zlib")
-        # manually add our own site.py
-        site = mf.add_module("site")
-        site.__code__ = self._getSiteCode()
-        mf.scan_code(site.__code__, site)
-
-        # warnings.py gets imported implicitly from C
-        mf.import_hook("warnings")
-
-        includeModules = self.includeModules[:]
-        for name in self.includePackages:
-            includeModules.extend(list(findPackageContents(name).keys()))
-        for name in includeModules:
-            try:
-                mf.import_hook(name)
-            except ImportError:
-                self.missingModules.append(name)
-
-        mf.run_script(self.mainprogram)
-        modules = list(mf.modules.items())
-        modules.sort()
-        for name, mod in modules:
-            path = mod.__file__
-            if path and self.semi_standalone:
-                # skip the standard library
-                if path.startswith(LIB) and not path.startswith(SITE_PACKAGES):
-                    continue
-            if path and mod.__code__ is None:
-                # C extension
-                filename = os.path.basename(path)
-                pathitems = name.split(".")[:-1] + [filename]
-                dstpath = pathjoin(*pathitems)
-                if USE_ZIPIMPORT:
-                    if name != "zlib":
-                        # neatly pack all extension modules in a subdirectory,
-                        # except zlib, since it's necessary for bootstrapping.
-                        dstpath = pathjoin("ExtensionModules", dstpath)
-                    # Python modules are stored in a Zip archive, but put
-                    # extensions in Contents/Resources/. Add a tiny "loader"
-                    # program in the Zip archive. Due to Thomas Heller.
-                    source = EXT_LOADER % {"name": name, "filename": dstpath}
-                    code = compile(source, "<dynloader for %s>" % name, "exec")
-                    mod.__code__ = code
-                self.files.append((path, pathjoin("Contents", "Resources", dstpath)))
-            if mod.__code__ is not None:
-                ispkg = mod.__path__ is not None
-                if not USE_ZIPIMPORT or name != "site":
-                    # Our site.py is doing the bootstrapping, so we must
-                    # include a real .pyc file if USE_ZIPIMPORT is True.
-                    self.pymodules.append((name, mod.__code__, ispkg))
-
-        if hasattr(mf, "any_missing_maybe"):
-            missing, maybe = mf.any_missing_maybe()
-        else:
-            missing = mf.any_missing()
-            maybe = []
-        self.missingModules.extend(missing)
-        self.maybeMissingModules.extend(maybe)
-
-    def reportMissing(self):
-        missing = [name for name in self.missingModules
-                if name not in MAYMISS_MODULES]
-        if self.maybeMissingModules:
-            maybe = self.maybeMissingModules
-        else:
-            maybe = [name for name in missing if "." in name]
-            missing = [name for name in missing if "." not in name]
-        missing.sort()
-        maybe.sort()
-        if maybe:
-            self.message("Warning: couldn't find the following submodules:", 1)
-            self.message("    (Note that these could be false alarms -- "
-                         "it's not always", 1)
-            self.message("    possible to distinguish between \"from package "
-                         "import submodule\" ", 1)
-            self.message("    and \"from package import name\")", 1)
-            for name in maybe:
-                self.message("  ? " + name, 1)
-        if missing:
-            self.message("Warning: couldn't find the following modules:", 1)
-            for name in missing:
-                self.message("  ? " + name, 1)
-
-    def report(self):
-        # XXX something decent
-        import pprint
-        pprint.pprint(self.__dict__)
-        if self.standalone or self.semi_standalone:
-            self.reportMissing()
-
-#
-# Utilities.
-#
-
-SUFFIXES = [_suf for _suf, _mode, _tp in imp.get_suffixes()]
-identifierRE = re.compile(r"[_a-zA-z][_a-zA-Z0-9]*$")
-
-def findPackageContents(name, searchpath=None):
-    head = name.split(".")[-1]
-    if identifierRE.match(head) is None:
-        return {}
-    try:
-        fp, path, (ext, mode, tp) = imp.find_module(head, searchpath)
-    except ImportError:
-        return {}
-    modules = {name: None}
-    if tp == imp.PKG_DIRECTORY and path:
-        files = os.listdir(path)
-        for sub in files:
-            sub, ext = os.path.splitext(sub)
-            fullname = name + "." + sub
-            if sub != "__init__" and fullname not in modules:
-                modules.update(findPackageContents(fullname, [path]))
-    return modules
-
-def writePyc(code, path):
-    f = open(path, "wb")
-    f.write(MAGIC)
-    f.write("\0" * 4)  # don't bother about a time stamp
-    marshal.dump(code, f)
-    f.close()
-
-def copy(src, dst, mkdirs=0):
-    """Copy a file or a directory."""
-    if mkdirs:
-        makedirs(os.path.dirname(dst))
-    if os.path.isdir(src):
-        shutil.copytree(src, dst, symlinks=1)
-    else:
-        shutil.copy2(src, dst)
-
-def copytodir(src, dstdir):
-    """Copy a file or a directory to an existing directory."""
-    dst = pathjoin(dstdir, os.path.basename(src))
-    copy(src, dst)
-
-def makedirs(dir):
-    """Make all directories leading up to 'dir' including the leaf
-    directory. Don't moan if any path element already exists."""
-    try:
-        os.makedirs(dir)
-    except OSError as why:
-        if why.errno != errno.EEXIST:
-            raise
-
-def symlink(src, dst, mkdirs=0):
-    """Copy a file or a directory."""
-    if not os.path.exists(src):
-        raise IOError("No such file or directory: '%s'" % src)
-    if mkdirs:
-        makedirs(os.path.dirname(dst))
-    os.symlink(os.path.abspath(src), dst)
-
-def pathjoin(*args):
-    """Safe wrapper for os.path.join: asserts that all but the first
-    argument are relative paths."""
-    for seg in args[1:]:
-        assert seg[0] != "/"
-    return os.path.join(*args)
-
-
-cmdline_doc = """\
-Usage:
-  python bundlebuilder.py [options] command
-  python mybuildscript.py [options] command
-
-Commands:
-  build      build the application
-  report     print a report
-
-Options:
-  -b, --builddir=DIR     the build directory; defaults to "build"
-  -n, --name=NAME        application name
-  -r, --resource=FILE    extra file or folder to be copied to Resources
-  -f, --file=SRC:DST     extra file or folder to be copied into the bundle;
-                         DST must be a path relative to the bundle root
-  -e, --executable=FILE  the executable to be used
-  -m, --mainprogram=FILE the Python main program
-  -a, --argv             add a wrapper main program to create sys.argv
-  -p, --plist=FILE       .plist file (default: generate one)
-      --nib=NAME         main nib name
-  -c, --creator=CCCC     4-char creator code (default: '????')
-      --iconfile=FILE    filename of the icon (an .icns file) to be used
-                         as the Finder icon
-      --bundle-id=ID     the CFBundleIdentifier, in reverse-dns format
-                         (eg. org.python.BuildApplet; this is used for
-                         the preferences file name)
-  -l, --link             symlink files/folder instead of copying them
-      --link-exec        symlink the executable instead of copying it
-      --standalone       build a standalone application, which is fully
-                         independent of a Python installation
-      --semi-standalone  build a standalone application, which depends on
-                         an installed Python, yet includes all third-party
-                         modules.
-      --python=FILE      Python to use in #! line in stead of current Python
-      --lib=FILE         shared library or framework to be copied into
-                         the bundle
-  -x, --exclude=MODULE   exclude module (with --(semi-)standalone)
-  -i, --include=MODULE   include module (with --(semi-)standalone)
-      --package=PACKAGE  include a whole package (with --(semi-)standalone)
-      --strip            strip binaries (remove debug info)
-  -v, --verbose          increase verbosity level
-  -q, --quiet            decrease verbosity level
-  -h, --help             print this message
-"""
-
-def usage(msg=None):
-    if msg:
-        print(msg)
-    print(cmdline_doc)
-    sys.exit(1)
-
-def main(builder=None):
-    if builder is None:
-        builder = AppBuilder(verbosity=1)
-
-    shortopts = "b:n:r:f:e:m:c:p:lx:i:hvqa"
-    longopts = ("builddir=", "name=", "resource=", "file=", "executable=",
-        "mainprogram=", "creator=", "nib=", "plist=", "link",
-        "link-exec", "help", "verbose", "quiet", "argv", "standalone",
-        "exclude=", "include=", "package=", "strip", "iconfile=",
-        "lib=", "python=", "semi-standalone", "bundle-id=", "destroot=")
-
-    try:
-        options, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
-    except getopt.error:
-        usage()
-
-    for opt, arg in options:
-        if opt in ('-b', '--builddir'):
-            builder.builddir = arg
-        elif opt in ('-n', '--name'):
-            builder.name = arg
-        elif opt in ('-r', '--resource'):
-            builder.resources.append(os.path.normpath(arg))
-        elif opt in ('-f', '--file'):
-            srcdst = arg.split(':')
-            if len(srcdst) != 2:
-                usage("-f or --file argument must be two paths, "
-                      "separated by a colon")
-            builder.files.append(srcdst)
-        elif opt in ('-e', '--executable'):
-            builder.executable = arg
-        elif opt in ('-m', '--mainprogram'):
-            builder.mainprogram = arg
-        elif opt in ('-a', '--argv'):
-            builder.argv_emulation = 1
-        elif opt in ('-c', '--creator'):
-            builder.creator = arg
-        elif opt == '--bundle-id':
-            builder.bundle_id = arg
-        elif opt == '--iconfile':
-            builder.iconfile = arg
-        elif opt == "--lib":
-            builder.libs.append(os.path.normpath(arg))
-        elif opt == "--nib":
-            builder.nibname = arg
-        elif opt in ('-p', '--plist'):
-            builder.plist = Plist.fromFile(arg)
-        elif opt in ('-l', '--link'):
-            builder.symlink = 1
-        elif opt == '--link-exec':
-            builder.symlink_exec = 1
-        elif opt in ('-h', '--help'):
-            usage()
-        elif opt in ('-v', '--verbose'):
-            builder.verbosity += 1
-        elif opt in ('-q', '--quiet'):
-            builder.verbosity -= 1
-        elif opt == '--standalone':
-            builder.standalone = 1
-        elif opt == '--semi-standalone':
-            builder.semi_standalone = 1
-        elif opt == '--python':
-            builder.python = arg
-        elif opt in ('-x', '--exclude'):
-            builder.excludeModules.append(arg)
-        elif opt in ('-i', '--include'):
-            builder.includeModules.append(arg)
-        elif opt == '--package':
-            builder.includePackages.append(arg)
-        elif opt == '--strip':
-            builder.strip = 1
-        elif opt == '--destroot':
-            builder.destroot = arg
-
-    if len(args) != 1:
-        usage("Must specify one command ('build', 'report' or 'help')")
-    command = args[0]
-
-    if command == "build":
-        builder.setup()
-        builder.build()
-    elif command == "report":
-        builder.setup()
-        builder.report()
-    elif command == "help":
-        usage()
-    else:
-        usage("Unknown command '%s'" % command)
-
-
-def buildapp(**kwargs):
-    builder = AppBuilder(**kwargs)
-    main(builder)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/Makefile.pre.in b/Makefile.pre.in
index f36c11d..43bc818 100644
--- a/Makefile.pre.in
+++ b/Makefile.pre.in
@@ -323,6 +323,13 @@
 PGENOBJS=	$(POBJS) $(PGOBJS)
 
 ##########################################################################
+# opcode.h generation
+OPCODE_H_DIR= 	$(srcdir)/Include
+OPCODE_H_SCRIPT= $(srcdir)/Tools/scripts/generate_opcode_h.py
+OPCODE_H=	$(OPCODE_H_DIR)/opcode.h
+OPCODE_H_GEN=	@OPCODEHGEN@  $(OPCODE_H_SCRIPT) $(srcdir)/Lib/opcode.py $(OPCODE_H)
+#
+##########################################################################
 # AST
 AST_H_DIR=	Include
 AST_H=		$(AST_H_DIR)/Python-ast.h
@@ -760,6 +767,9 @@
 	$(MKDIR_P) $(AST_C_DIR)
 	$(ASDLGEN) -c $(AST_C_DIR) $(AST_ASDL)
 
+$(OPCODE_H): $(srcdir)/Lib/opcode.py $(OPCODE_H_SCRIPT)
+	$(OPCODE_H_GEN)
+
 Python/compile.o Python/symtable.o Python/ast.o: $(GRAMMAR_H) $(AST_H)
 
 Python/getplatform.o: $(srcdir)/Python/getplatform.c
@@ -871,7 +881,7 @@
 		$(srcdir)/Include/node.h \
 		$(srcdir)/Include/object.h \
 		$(srcdir)/Include/objimpl.h \
-		$(srcdir)/Include/opcode.h \
+		$(OPCODE_H) \
 		$(srcdir)/Include/osdefs.h \
 		$(srcdir)/Include/patchlevel.h \
 		$(srcdir)/Include/pgen.h \
diff --git a/Misc/ACKS b/Misc/ACKS
index 97d00ea..68701c4 100644
--- a/Misc/ACKS
+++ b/Misc/ACKS
@@ -42,6 +42,7 @@
 Erik Andersén
 Oliver Andrich
 Ross Andrus
+Fabrice Aneche
 Juancarlo Añez
 Chris Angelico
 Jérémy Anger
@@ -470,6 +471,7 @@
 Shelley Gooch
 David Goodger
 Hans de Graaff
+Kim Gräsman
 Nathaniel Gray
 Eddy De Greef
 Grant Griffin
@@ -615,6 +617,7 @@
 Rajagopalasarma Jayakrishnan
 Zbigniew Jędrzejewski-Szmek
 Julien Jehannet
+Muhammad Jehanzeb
 Drew Jenkins
 Flemming Kjær Jensen
 Philip H. Jensen
@@ -634,6 +637,7 @@
 Nicolas Joly
 Brian K. Jones
 Evan Jones
+Glenn Jones
 Jeremy Jones
 Richard Jones
 Irmen de Jong
@@ -849,6 +853,7 @@
 Lucas Maystre
 Arnaud Mazin
 Matt McClure
+Jack McCracken
 Rebecca McCreary
 Kirk McDonald
 Chris McDonough
@@ -1117,6 +1122,7 @@
 Case Roole
 Timothy Roscoe
 Erik Rose
+Josh Rosenberg
 Jim Roskind
 Brian Rosner
 Guido van Rossum
@@ -1245,6 +1251,7 @@
 Evgeny Sologubov
 Cody Somerville
 Edoardo Spadolini
+Geoffrey Spear
 Clay Spence
 Stefan Sperling
 Nicholas Spies
@@ -1354,6 +1361,7 @@
 Daniel Urban
 Michael Urman
 Hector Urtubia
+Lukas Vacek
 Ville Vainio
 Andi Vajda
 Case Van Horsen
@@ -1400,6 +1408,7 @@
 David Watson
 Aaron Watters
 Henrik Weber
+Leon Weber
 Corran Webster
 Glyn Webster
 Phil Webster
diff --git a/Misc/HISTORY b/Misc/HISTORY
index 4280764..78006cd 100644
--- a/Misc/HISTORY
+++ b/Misc/HISTORY
@@ -18,13 +18,13 @@
 
 - Issue #16046: Fix loading sourceless legacy .pyo files.
 
-- Issue #16060: Fix refcounting bug when __trunc__ returns an object
-  whose __int__ gives a non-integer.  Patch by Serhiy Storchaka.
+- Issue #16060: Fix refcounting bug when `__trunc__()` returns an object whose
+  `__int__()` gives a non-integer.  Patch by Serhiy Storchaka.
 
 Extension Modules
 -----------------
 
-- Issue #16012: Fix a regression in pyexpat. The parser's UseForeignDTD()
+- Issue #16012: Fix a regression in pyexpat. The parser's `UseForeignDTD()`
   method doesn't require an argument again.
 
 
@@ -36,26 +36,26 @@
 Core and Builtins
 -----------------
 
-- Issue #15900: Fix reference leak in PyUnicode_TranslateCharmap().
+- Issue #15900: Fix reference leak in `PyUnicode_TranslateCharmap()`.
 
 - Issue #15926: Fix crash after multiple reinitializations of the interpreter.
 
 - Issue #15895: Fix FILE pointer leak in one error branch of
-  PyRun_SimpleFileExFlags() when filename points to a pyc/pyo file, closeit
-  is false an and set_main_loader() fails.
+  `PyRun_SimpleFileExFlags()` when filename points to a pyc/pyo file, closeit is
+  false an and set_main_loader() fails.
 
 - Fixes for a few crash and memory leak regressions found by Coverity.
 
 Library
 -------
 
-- Issue #15882: Change _decimal to accept any coefficient tuple when
-  constructing infinities. This is done for backwards compatibility
-  with decimal.py: Infinity coefficients are undefined in _decimal
-  (in accordance with the specification).
+- Issue #15882: Change `_decimal` to accept any coefficient tuple when
+  constructing infinities. This is done for backwards compatibility with
+  decimal.py: Infinity coefficients are undefined in _decimal (in accordance
+  with the specification).
 
-- Issue #15925: Fix a regression in email.util where the parsedate() and
-  parsedate_tz() functions did not return None anymore when the argument could
+- Issue #15925: Fix a regression in `email.util` where the `parsedate()` and
+  `parsedate_tz()` functions did not return None anymore when the argument could
   not be parsed.
 
 Extension Modules
@@ -67,7 +67,7 @@
 - Issue #15977: Fix memory leak in Modules/_ssl.c when the function
   _set_npn_protocols() is called multiple times, thanks to Daniel Sommermann.
 
-- Issue #15969: faulthandler module: rename dump_tracebacks_later() to
+- Issue #15969: `faulthandler` module: rename dump_tracebacks_later() to
   dump_traceback_later() and cancel_dump_tracebacks_later() to
   cancel_dump_traceback_later().
 
@@ -83,35 +83,37 @@
 -----------------
 
 - Issue #13992: The trashcan mechanism is now thread-safe.  This eliminates
-  sporadic crashes in multi-thread programs when several long deallocator
-  chains ran concurrently and involved subclasses of built-in container
-  types.
+  sporadic crashes in multi-thread programs when several long deallocator chains
+  ran concurrently and involved subclasses of built-in container types.
 
-- Issue #15784: Modify OSError.__str__() to better distinguish between
-  errno error numbers and Windows error numbers.
+- Issue #15784: Modify `OSError`.__str__() to better distinguish between errno
+  error numbers and Windows error numbers.
 
 - Issue #15781: Fix two small race conditions in import's module locking.
 
 Library
 -------
 
-- Issue #15847: Fix a regression in argparse, which did not accept tuples
-  as argument lists anymore.
+- Issue #17158: Add 'symbols' to help() welcome message; clarify
+  'modules spam' messages.
 
-- Issue #15828: Restore support for C extensions in imp.load_module().
+- Issue #15847: Fix a regression in argparse, which did not accept tuples as
+  argument lists anymore.
 
-- Issue #15340: Fix importing the random module when /dev/urandom cannot
-  be opened.  This was a regression caused by the hash randomization patch.
+- Issue #15828: Restore support for C extensions in `imp.load_module()`.
 
-- Issue #10650: Deprecate the watchexp parameter of the Decimal.quantize()
+- Issue #15340: Fix importing the random module when ``/dev/urandom`` cannot be
+  opened.  This was a regression caused by the hash randomization patch.
+
+- Issue #10650: Deprecate the watchexp parameter of the `Decimal.quantize()`
   method.
 
-- Issue #15785: Modify window.get_wch() API of the curses module: return
-  a character for most keys, and an integer for special keys, instead of
-  always returning an integer. So it is now possible to distinguish special
-  keys like keypad keys.
+- Issue #15785: Modify `window.get_wch()` API of the curses module: return a
+  character for most keys, and an integer for special keys, instead of always
+  returning an integer. So it is now possible to distinguish special keys like
+  keypad keys.
 
-- Issue #14223: Fix window.addch() of the curses module for special characters
+- Issue #14223: Fix `window.addch()` of the curses module for special characters
   like curses.ACS_HLINE: the Python function addch(int) and addch(bytes) is now
   calling the C function waddch()/mvwaddch() (as it was done in Python 3.2),
   instead of wadd_wch()/mvwadd_wch(). The Python function addch(str) is still
@@ -127,10 +129,10 @@
 Documentation
 -------------
 
-- Issue #15814: The memoryview enhancements in 3.3.0 accidentally permitted
-  the hashing of multi-dimensional memorviews and memoryviews with multi-byte
-  item formats. The intended restrictions have now been documented - they
-  will be correctly enforced in 3.3.1
+- Issue #15814: The memoryview enhancements in 3.3.0 accidentally permitted the
+  hashing of multi-dimensional memorviews and memoryviews with multi-byte item
+  formats. The intended restrictions have now been documented - they will be
+  correctly enforced in 3.3.1.
 
 
 What's New in Python 3.3.0 Release Candidate 1?
@@ -144,131 +146,123 @@
 - Issue #15573: memoryview comparisons are now performed by value with full
   support for any valid struct module format definition.
 
-- Issue #15316: When an item in the fromlist for __import__ doesn't exist,
+- Issue #15316: When an item in the fromlist for `__import__()` doesn't exist,
   don't raise an error, but if an exception is raised as part of an import do
   let that propagate.
 
-- Issue #15778: ensure that str(ImportError(msg)) returns a str
-  even when msg isn't a str.
+- Issue #15778: Ensure that ``str(ImportError(msg))`` returns a str even when
+  msg isn't a str.
 
-- Issue #2051: Source file permission bits are once again correctly
-  copied to the cached bytecode file. (The migration to importlib
-  reintroduced this problem because these was no regression test. A test
-  has been added as part of this patch)
+- Issue #2051: Source file permission bits are once again correctly copied to
+  the cached bytecode file. (The migration to importlib reintroduced this
+  problem because these was no regression test. A test has been added as part of
+  this patch)
 
-- Issue #15761: Fix crash when PYTHONEXECUTABLE is set on Mac OS X.
+- Issue #15761: Fix crash when ``PYTHONEXECUTABLE`` is set on Mac OS X.
 
-- Issue #15726: Fix incorrect bounds checking in PyState_FindModule.
-  Patch by Robin Schreiber.
+- Issue #15726: Fix incorrect bounds checking in PyState_FindModule.  Patch by
+  Robin Schreiber.
 
-- Issue #15604: Update uses of PyObject_IsTrue() to check for and handle
+- Issue #15604: Update uses of `PyObject_IsTrue()` to check for and handle
   errors correctly.  Patch by Serhiy Storchaka.
 
-- Issue #14846: importlib.FileFinder now handles the case where the
-  directory being searched is removed after a previous import attempt
+- Issue #14846: `importlib.FileFinder` now handles the case where the directory
+  being searched is removed after a previous import attempt.
 
 Library
 -------
 
-- Issue #13370: Ensure that ctypes works on Mac OS X when Python is
-  compiled using the clang compiler
+- Issue #13370: Ensure that ctypes works on Mac OS X when Python is compiled
+  using the clang compiler.
 
-- Issue #13072: The array module's 'u' format code is now deprecated and
-  will be removed in Python 4.0.
+- Issue #13072: The array module's 'u' format code is now deprecated and will be
+  removed in Python 4.0.
 
 - Issue #15544: Fix Decimal.__float__ to work with payload-carrying NaNs.
 
-- Issue #15249: BytesGenerator now correctly mangles From lines (when
+- Issue #15776: Allow pyvenv to work in existing directory with --clean.
+
+- Issue #15249: email's BytesGenerator now correctly mangles From lines (when
   requested) even if the body contains undecodable bytes.
 
 - Issue #15777: Fix a refleak in _posixsubprocess.
 
-- Issue ##665194: Update email.utils.localtime to use datetime.astimezone and
+- Issue ##665194: Update `email.utils.localtime` to use datetime.astimezone and
   correctly handle historic changes in UTC offsets.
 
 - Issue #15199: Fix JavaScript's default MIME type to application/javascript.
   Patch by Bohuslav Kabrda.
 
-- Issue #12643: code.InteractiveConsole now respects sys.excepthook when
-  displaying exceptions (Patch by Aaron Iles)
+- Issue #12643: `code.InteractiveConsole` now respects `sys.excepthook` when
+  displaying exceptions.  Patch by Aaron Iles.
 
-- Issue #13579: string.Formatter now understands the 'a' conversion specifier.
+- Issue #13579: `string.Formatter` now understands the 'a' conversion specifier.
 
-- Issue #15793: Stack corruption in ssl.RAND_egd().
-  Patch by Serhiy Storchaka.
-
-- Issue #15595: Fix subprocess.Popen(universal_newlines=True)
-  for certain locales (utf-16 and utf-32 family). Patch by Chris Jerdonek.
+- Issue #15595: Fix ``subprocess.Popen(universal_newlines=True)`` for certain
+  locales (utf-16 and utf-32 family). Patch by Chris Jerdonek.
 
 - Issue #15477: In cmath and math modules, add workaround for platforms whose
   system-supplied log1p function doesn't respect signs of zeros.
 
-- Issue #15715: importlib.__import__() will silence an ImportError when the use
-  of fromlist leads to a failed import.
+- Issue #15715: `importlib.__import__()` will silence an ImportError when the
+  use of fromlist leads to a failed import.
 
-- Issue #14669: Fix pickling of connections and sockets on MacOSX
-  by sending/receiving an acknowledgment after file descriptor transfer.
-  TestPicklingConnection has been reenabled for MacOSX.
+- Issue #14669: Fix pickling of connections and sockets on Mac OS X by
+  sending/receiving an acknowledgment after file descriptor transfer.
+  TestPicklingConnection has been reenabled for Mac OS X.
 
 - Issue #11062: Fix adding a message from file to Babyl mailbox.
 
-- Issue #15646: Prevent equivalent of a fork bomb when using
-  multiprocessing on Windows without the "if __name__ == '__main__'"
-  idiom.
+- Issue #15646: Prevent equivalent of a fork bomb when using `multiprocessing`
+  on Windows without the ``if __name__ == '__main__'`` idiom.
 
-- Issue #15678: Fix IDLE menus when started from OS X command line
-  (3.3.0b2 regression).
+IDLE
+----
 
-C API
------
-
-Extension Modules
------------------
-
-Tools/Demos
------------
+- Issue #15678: Fix IDLE menus when started from OS X command line (3.3.0b2
+  regression).
 
 Documentation
 -------------
 
-- Issue #14674: Add a discussion of the json module's standard compliance.
+- Touched up the Python 2 to 3 porting guide.
+
+- Issue #14674: Add a discussion of the `json` module's standard compliance.
   Patch by Chris Rebert.
 
 - Create a 'Concurrent Execution' section in the docs, and split up the
   'Optional Operating System Services' section to use a more user-centric
-  classification scheme (splitting them across the new CE section, IPC and
-  text processing). Operating system limitatons can be reflected with
-  the Sphinx ``:platform:`` tag, it doesn't make sense as part of the Table of
-  Contents.
+  classification scheme (splitting them across the new CE section, IPC and text
+  processing). Operating system limitations can be reflected with the Sphinx
+  ``:platform:`` tag, it doesn't make sense as part of the Table of Contents.
 
-- Issue #4966: Bring the sequence docs up to date for the Py3k transition
-  and the many language enhancements since they were original written
+- Issue #4966: Bring the sequence docs up to date for the Py3k transition and
+  the many language enhancements since they were original written.
 
 - The "path importer" misnomer has been replaced with Eric Snow's
-  more-awkward-but-at-least-not-wrong suggestion of "path based finder" in
-  the import system reference docs
+  more-awkward-but-at-least-not-wrong suggestion of "path based finder" in the
+  import system reference docs.
 
-- Issue #15640: Document importlib.abc.Finder as deprecated.
+- Issue #15640: Document `importlib.abc.Finder` as deprecated.
 
-- Issue #15630: Add an example for "continue" stmt in the tutorial. Patch by
+- Issue #15630: Add an example for "continue" stmt in the tutorial.  Patch by
   Daniel Ellis.
 
 Tests
 -----
 
 - Issue #15747: ZFS always returns EOPNOTSUPP when attempting to set the
-  UF_IMMUTABLE flag (via either chflags or lchflags); refactor affected
-  tests in test_posix.py to account for this.
+  UF_IMMUTABLE flag (via either chflags or lchflags); refactor affected tests in
+  test_posix.py to account for this.
 
-- Issue #15285: Refactor the approach for testing connect timeouts using
-  two external hosts that have been configured specifically for this type
-  of test.
+- Issue #15285: Refactor the approach for testing connect timeouts using two
+  external hosts that have been configured specifically for this type of test.
 
-- Issue #15743: Remove the deprecated method usage in urllib tests. Patch by
+- Issue #15743: Remove the deprecated method usage in `urllib` tests. Patch by
   Jeff Knupp.
 
-- Issue #15615: Add some tests for the json module's handling of invalid
-  input data.  Patch by Kushal Das.
+- Issue #15615: Add some tests for the `json` module's handling of invalid input
+  data.  Patch by Kushal Das.
 
 Build
 -----
@@ -277,11 +271,11 @@
 
 - Pick up 32-bit launcher from PGO directory on 64-bit PGO build.
 
-- Drop PC\python_nt.h as it's not used. Add input dependency on custom
+- Drop ``PC\python_nt.h`` as it's not used.  Add input dependency on custom
   build step.
 
-- Issue #15511: Drop explicit dependency on pythonxy.lib from _decimal
-  amd64 configuration.
+- Issue #15511: Drop explicit dependency on pythonxy.lib from _decimal amd64
+  configuration.
 
 - Add missing PGI/PGO configurations for pywlauncher.
 
@@ -296,15 +290,15 @@
 Core and Builtins
 -----------------
 
-- Issue #15568: Fix the return value of "yield from" when StopIteration is
+- Issue #15568: Fix the return value of ``yield from`` when StopIteration is
   raised by a custom iterator.
 
-- Issue #13119: sys.stdout and sys.stderr are now using "\r\n" newline on
+- Issue #13119: `sys.stdout` and `sys.stderr` are now using "\r\n" newline on
   Windows, as Python 2.
 
 - Issue #15534: Fix the fast-search function for non-ASCII Unicode strings.
 
-- Issue #15508: Fix the docstring for __import__ to have the proper default
+- Issue #15508: Fix the docstring for `__import__()` to have the proper default
   value of 0 for 'level' and to not mention negative levels since they are not
   supported.
 
@@ -317,17 +311,17 @@
   byte code files) equal between 32-bit and 64-bit systems.
 
 - Issue #1692335: Move initial exception args assignment to
-  "BaseException.__new__" to help pickling of naive subclasses.
+  `BaseException.__new__()` to help pickling of naive subclasses.
 
-- Issue #12834: Fix PyBuffer_ToContiguous() for non-contiguous arrays.
+- Issue #12834: Fix `PyBuffer_ToContiguous()` for non-contiguous arrays.
 
-- Issue #15456: Fix code __sizeof__ after #12399 change.  Patch by Serhiy
+- Issue #15456: Fix code `__sizeof__()` after #12399 change.  Patch by Serhiy
   Storchaka.
 
 - Issue #15404: Refleak in PyMethodObject repr.
 
-- Issue #15394: An issue in PyModule_Create that caused references to be leaked
-  on some error paths has been fixed.  Patch by Julia Lawall.
+- Issue #15394: An issue in `PyModule_Create()` that caused references to be
+  leaked on some error paths has been fixed.  Patch by Julia Lawall.
 
 - Issue #15368: An issue that caused bytecode generation to be non-deterministic
   has been fixed.
@@ -335,7 +329,7 @@
 - Issue #15202: Consistently use the name "follow_symlinks" for new parameters
   in os and shutil functions.
 
-- Issue #15314: __main__.__loader__ is now set correctly during interpreter
+- Issue #15314: ``__main__.__loader__`` is now set correctly during interpreter
   startup.
 
 - Issue #15111: When a module imported using 'from import' has an ImportError
@@ -350,57 +344,62 @@
 - Issue #15110: Fix the tracebacks generated by "import xxx" to not show the
   importlib stack frames.
 
+- Issue #16369: Global PyTypeObjects not initialized with PyType_Ready(...).
+
 - Issue #15020: The program name used to search for Python's path is now
   "python3" under Unix, not "python".
 
-- Issue #15033: Fix the exit status bug when modules invoked using -m swith,
+- Issue #15897: zipimport.c doesn't check return value of fseek().
+  Patch by Felipe Cruz.
+
+- Issue #15033: Fix the exit status bug when modules invoked using -m switch,
   return the proper failure return value (1). Patch contributed by Jeff Knupp.
 
-- Issue #15229: An OSError subclass whose __init__ doesn't call back
+- Issue #15229: An `OSError` subclass whose __init__ doesn't call back
   OSError.__init__ could produce incomplete instances, leading to crashes when
   calling str() on them.
 
-- Issue 15307: Virtual environments now use symlinks with framework builds on
+- Issue #15307: Virtual environments now use symlinks with framework builds on
   Mac OS X, like other POSIX builds.
 
 Library
 -------
 
-- Issue #15424: Add a __sizeof__ implementation for array objects.  Patch by
+- Issue #14590: configparser now correctly strips inline comments when delimiter
+  occurs earlier without preceding space.
+
+- Issue #15424: Add a `__sizeof__()` implementation for array objects.  Patch by
   Ludwig Hähne.
 
 - Issue #15576: Allow extension modules to act as a package's __init__ module.
 
-- Issue #15502: Have importlib.invalidate_caches() work on sys.meta_path instead
-  of sys.path_importer_cache.
+- Issue #15502: Have `importlib.invalidate_caches()` work on `sys.meta_path`
+  instead of `sys.path_importer_cache`.
 
 - Issue #15163: Pydoc shouldn't list __loader__ as module data.
 
 - Issue #15471: Do not use mutable objects as defaults for
-  importlib.__import__().
+  `importlib.__import__()`.
 
 - Issue #15559: To avoid a problematic failure mode when passed to the bytes
-  constructor, objects in the ipaddress module no longer implement __index__
-  (they still implement __int__ as appropriate)
+  constructor, objects in the ipaddress module no longer implement `__index__()`
+  (they still implement `__int__()` as appropriate).
 
 - Issue #15546: Fix handling of pathological input data in the peek() and
   read1() methods of the BZ2File, GzipFile and LZMAFile classes.
 
-- Issue #13052: Fix IDLE crashing when replace string in Search/Replace dialog
-  ended with '\'. Patch by Roger Serwy.
+- Issue #12655: Instead of requiring a custom type, `os.sched_getaffinity()` and
+  `os.sched_setaffinity()` now use regular sets of integers to represent the
+  CPUs a process is restricted to.
 
-- Issue #12655: Instead of requiring a custom type, os.sched_getaffinity and
-  os.sched_setaffinity now use regular sets of integers to represent the CPUs a
-  process is restricted to.
-
-- Issue #15538: Fix compilation of the getnameinfo() / getaddrinfo() emulation
-  code.  Patch by Philipp Hagemeister.
+- Issue #15538: Fix compilation of the `socket.getnameinfo()` /
+  `socket.getaddrinfo()` emulation code.  Patch by Philipp Hagemeister.
 
 - Issue #15519: Properly expose WindowsRegistryFinder in importlib (and use the
-  correct term for it). Original patch by Eric Snow.
+  correct term for it).  Original patch by Eric Snow.
 
 - Issue #15502: Bring the importlib ABCs into line with the current state of the
-  import protocols given PEP 420. Original patch by Eric Snow.
+  import protocols given PEP 420.  Original patch by Eric Snow.
 
 - Issue #15499: Launching a webbrowser in Unix used to sleep for a few seconds.
   Original patch by Anton Barkovsky.
@@ -408,37 +407,36 @@
 - Issue #15463: The faulthandler module truncates strings to 500 characters,
   instead of 100, to be able to display long file paths.
 
-- Issue #6056: Make multiprocessing use setblocking(True) on the sockets it
+- Issue #6056: Make `multiprocessing` use setblocking(True) on the sockets it
   uses.  Original patch by J Derek Wilson.
 
 - Issue #15364: Fix sysconfig.get_config_var('srcdir') to be an absolute path.
 
-- Issue #15041: Update "see also" list in tkinter documentation.
+- Issue #15413: `os.times()` had disappeared under Windows.
 
-- Issue #15413: os.times() had disappeared under Windows.
-
-- Issue #15402: An issue in the struct module that caused sys.getsizeof to
+- Issue #15402: An issue in the struct module that caused `sys.getsizeof()` to
   return incorrect results for struct.Struct instances has been fixed.  Initial
   patch by Serhiy Storchaka.
 
-- Issue #15232: When mangle_from is True, email.Generator now correctly mangles
-  lines that start with 'From ' that occur in a MIME preamble or epilogue.
+- Issue #15232: When mangle_from is True, `email.Generator` now correctly
+  mangles lines that start with 'From ' that occur in a MIME preamble or
+  epilogue.
 
 - Issue #15094: Incorrectly placed #endif in _tkinter.c.  Patch by Serhiy
   Storchaka.
 
-- Issue #13922: argparse no longer incorrectly strips '--'s that appear after
+- Issue #13922: `argparse` no longer incorrectly strips '--'s that appear after
   the first one.
 
-- Issue #12353: argparse now correctly handles null argument values.
+- Issue #12353: `argparse` now correctly handles null argument values.
 
 - Issue #10017, issue #14998: Fix TypeError using pprint on dictionaries with
   user-defined types as keys or other unorderable keys.
 
-- Issue #15397: inspect.getmodulename() is now based directly on importlib via a
-  new importlib.machinery.all_suffixes() API.
+- Issue #15397: `inspect.getmodulename()` is now based directly on importlib via
+  a new `importlib.machinery.all_suffixes()` API.
 
-- Issue #14635: telnetlib will use poll() rather than select() when possible to
+- Issue #14635: `telnetlib` will use poll() rather than select() when possible to
   avoid failing due to the select() file descriptor limit.
 
 - Issue #15180: Clarify posixpath.join() error message when mixing str & bytes.
@@ -455,7 +453,7 @@
 - Issue #15233: Python now guarantees that callables registered with the atexit
   module will be called in a deterministic order.
 
-- Issue #15238: shutil.copystat now copies Linux "extended attributes".
+- Issue #15238: `shutil.copystat()` now copies Linux "extended attributes".
 
 - Issue #15230: runpy.run_path now correctly sets __package__ as described in
   the documentation.
@@ -465,42 +463,42 @@
 - Issue #15294: Fix a regression in pkgutil.extend_path()'s handling of nested
   namespace packages.
 
-- Issue #15056: imp.cache_from_source() and source_from_cache() raise
-  NotImplementedError when sys.implementation.cache_tag is set to None.
+- Issue #15056: `imp.cache_from_source()` and `imp.source_from_cache()` raise
+  NotImplementedError when `sys.implementation.cache_tag` is set to None.
 
-- Issue #15256: Grammatical mistake in exception raised by imp.find_module().
+- Issue #15256: Grammatical mistake in exception raised by `imp.find_module()`.
 
-- Issue #5931: wsgiref environ variable SERVER_SOFTWARE will specify an
+- Issue #5931: `wsgiref` environ variable SERVER_SOFTWARE will specify an
   implementation specific term like CPython, Jython instead of generic "Python".
 
 - Issue #13248: Remove obsolete argument "max_buffer_size" of BufferedWriter and
   BufferedRWPair, from the io module.
 
-- Issue #13248: Remove obsolete argument "version" of argparse.ArgumentParser.
+- Issue #13248: Remove obsolete argument "version" of `argparse.ArgumentParser`.
 
 - Issue #14814: Implement more consistent ordering and sorting behaviour for
   ipaddress objects.
 
-- Issue #14814: ipaddress network objects correctly return NotImplemented when
+- Issue #14814: `ipaddress` network objects correctly return NotImplemented when
   compared to arbitrary objects instead of raising TypeError.
 
 - Issue #14990: Correctly fail with SyntaxError on invalid encoding declaration.
 
-- Issue #14814: ipaddress now provides more informative error messages when
+- Issue #14814: `ipaddress` now provides more informative error messages when
   constructing instances directly (changes permitted during beta due to
   provisional API status).
 
-- Issue #15247: FileIO now raises an error when given a file descriptor pointing
-  to a directory.
+- Issue #15247: `io.FileIO` now raises an error when given a file descriptor
+  pointing to a directory.
 
 - Issue #15261: Stop os.stat(fd) crashing on Windows when fd not open.
 
-- Issue #15166: Implement imp.get_tag() using sys.implementation.cache_tag.
+- Issue #15166: Implement `imp.get_tag()` using `sys.implementation.cache_tag`.
 
-- Issue #15210: Catch KeyError when importlib.__init__ can't find
+- Issue #15210: Catch KeyError when `importlib.__init__()` can't find
   _frozen_importlib in sys.modules, not ImportError.
 
-- Issue #15030: importlib.abc.PyPycLoader now supports the new source size
+- Issue #15030: `importlib.abc.PyPycLoader` now supports the new source size
   header field in .pyc files.
 
 - Issue #5346: Preserve permissions of mbox, MMDF and Babyl mailbox files on
@@ -513,7 +511,7 @@
   renamed over the old file when flush() is called on an mbox, MMDF or Babyl
   mailbox.
 
-- Issue 10924: Fixed crypt.mksalt() to use a RNG that is suitable for
+- Issue #10924: Fixed `crypt.mksalt()` to use a RNG that is suitable for
   cryptographic purpose.
 
 - Issue #15184: Ensure consistent results of OS X configuration tailoring for
@@ -524,10 +522,10 @@
 C API
 -----
 
-- Issue #15610: PyImport_ImportModuleEx() now uses a 'level' of 0 instead of -1.
+- Issue #15610: `PyImport_ImportModuleEx()` now uses a 'level' of 0 instead of -1.
 
-- Issues #15169, #14599: Strip out the C implementation of
-  imp.source_from_cache() used by PyImport_ExecCodeModuleWithPathnames() and
+- Issue #15169, issue #14599: Strip out the C implementation of
+  `imp.source_from_cache()` used by PyImport_ExecCodeModuleWithPathnames() and
   used the Python code instead. Leads to PyImport_ExecCodeModuleObject() to not
   try to infer the source path from the bytecode path as
   PyImport_ExecCodeModuleWithPathnames() does.
@@ -535,14 +533,17 @@
 Extension Modules
 -----------------
 
-- Issue #15676: Now "mmap" check for empty files before doing the
-  offset check.  Patch by Steven Willis.
-
-- Issue #6493: An issue in ctypes on Windows that caused structure bitfields
-  of type ctypes.c_uint32 and width 32 to incorrectly be set has been fixed.
+- Issue #6493: An issue in ctypes on Windows that caused structure bitfields of
+  type `ctypes.c_uint32` and width 32 to incorrectly be set has been fixed.
 
 - Issue #15194: Update libffi to the 3.0.11 release.
 
+IDLE
+----
+
+- Issue #13052: Fix IDLE crashing when replace string in Search/Replace dialog
+  ended with ``\``.  Patch by Roger Serwy.
+
 Tools/Demos
 -----------
 
@@ -562,8 +563,10 @@
 Documentation
 -------------
 
-- Issue #15444: Use proper spelling for non-ASCII contributor names.  Patch
-  by Serhiy Storchaka.
+- Issue #15041: Update "see also" list in tkinter documentation.
+
+- Issue #15444: Use proper spelling for non-ASCII contributor names.  Patch by
+  Serhiy Storchaka.
 
 - Issue #15295: Reorganize and rewrite the documentation on the import system.
 
@@ -578,25 +581,29 @@
   "changed" since they will no longer work with modules directly imported by
   import itself.
 
-- Issue #13557: Clarify effect of giving two different namespaces to exec or
-  execfile().
+- Issue #13557: Clarify effect of giving two different namespaces to `exec()` or
+  `execfile()`.
 
-- Issue #15250: Document that filecmp.dircmp compares files shallowly. Patch
+- Issue #15250: Document that `filecmp.dircmp()` compares files shallowly. Patch
   contributed by Chris Jerdonek.
 
+- Issue #15442: Expose the default list of directories ignored by
+  `filecmp.dircmp()` as a module attribute, and expand the list to more modern
+  values.
+
 Tests
 -----
 
-- Issue #15467: Move helpers for __sizeof__ tests into test_support.  Patch by
-  Serhiy Storchaka.
+- Issue #15467: Move helpers for `__sizeof__()` tests into test_support.  Patch
+  by Serhiy Storchaka.
 
 - Issue #15320: Make iterating the list of tests thread-safe when running tests
   in multiprocess mode. Patch by Chris Jerdonek.
 
-- Issue #15168: Move importlib.test to test.test_importlib.
+- Issue #15168: Move `importlib.test` to `test.test_importlib`.
 
 - Issue #15091: Reactivate a test on UNIX which was failing thanks to a
-  forgotten importlib.invalidate_caches() call.
+  forgotten `importlib.invalidate_caches()` call.
 
 - Issue #15230: Adopted a more systematic approach in the runpy tests.
 
@@ -629,6 +636,8 @@
 
 - Issue #14018: Fix OS X Tcl/Tk framework checking when using OS X SDKs.
 
+- Issue #16256: OS X installer now sets correct permissions for doc directory.
+
 - Issue #15431: Add _freeze_importlib project to regenerate importlib.h on
   Windows. Patch by Kristján Valur Jónsson.
 
@@ -664,14 +673,9 @@
 
 - Issue #11626: Add _SizeT functions to stable ABI.
 
-- Issue #15146: Add PyType_FromSpecWithBases. Patch by Robin Schreiber.
-
 - Issue #15142: Fix reference leak when deallocating instances of types
   created using PyType_FromSpec().
 
-- Issue #15042: Add PyState_AddModule and PyState_RemoveModule. Add version
-  guard for Py_LIMITED_API additions. Patch by Robin Schreiber.
-
 - Issue #10053: Don't close FDs when FileIO.__init__ fails. Loosely based on
   the work by Hirokazu Yamamoto.
 
@@ -699,9 +703,6 @@
 Library
 -------
 
-- Issue #9803: Don't close IDLE on saving if breakpoint is open.
-  Patch by Roger Serwy.
-
 - Issue #12288: Consider '0' and '0.0' as valid initialvalue
   for tkinter SimpleDialog.
 
@@ -720,14 +721,8 @@
 - Issue #15514: Correct __sizeof__ support for cpu_set.
   Patch by Serhiy Storchaka.
 
-- Issue #15187: Bugfix: remove temporary directories test_shutil was leaving
-  behind.
-
 - Issue #15177: Added dir_fd parameter to os.fwalk().
 
-- Issue #15176: Clarified behavior, documentation, and implementation
-  of os.listdir().
-
 - Issue #15061: Re-implemented hmac.compare_digest() in C to prevent further
   timing analysis and to support all buffer protocol aware objects as well as
   ASCII only str instances safely.
@@ -827,10 +822,6 @@
 - Issue #15006: Allow equality comparison between naive and aware
   time or datetime objects.
 
-- Issue #14982: Document that pkgutil's iteration functions require the
-  non-standard iter_modules() method to be defined by an importer (something
-  the importlib importers do not define).
-
 - Issue #15036: Mailbox no longer throws an error if a flush is done
   between operations when removing or changing multiple items in mbox,
   MMDF, or Babyl mailboxes.
@@ -898,9 +889,6 @@
 
 - Issue #14969: Better handling of exception chaining in contextlib.ExitStack
 
-- Issue #14962: Update text coloring in IDLE shell window after changing
-  options.  Patch by Roger Serwy.
-
 - Issue #14963: Convert contextlib.ExitStack.__exit__ to use an iterative
   algorithm (Patch by Alon Horev)
 
@@ -913,6 +901,11 @@
 C-API
 -----
 
+- Issue #15146: Add PyType_FromSpecWithBases. Patch by Robin Schreiber.
+
+- Issue #15042: Add PyState_AddModule and PyState_RemoveModule. Add version
+  guard for Py_LIMITED_API additions. Patch by Robin Schreiber.
+
 - Issue #13783: Inadvertent additions to the public C API in the PEP 380
   implementation have either been removed or marked as private interfaces.
 
@@ -921,9 +914,25 @@
 
 - Issue #15000: Support the "unique" x32 architecture in _posixsubprocess.c.
 
+IDLE
+----
+
+- Issue #9803: Don't close IDLE on saving if breakpoint is open.
+  Patch by Roger Serwy.
+
+- Issue #14962: Update text coloring in IDLE shell window after changing
+  options.  Patch by Roger Serwy.
+
 Documentation
 -------------
 
+- Issue #15176: Clarified behavior, documentation, and implementation
+  of os.listdir().
+
+- Issue #14982: Document that pkgutil's iteration functions require the
+  non-standard iter_modules() method to be defined by an importer (something
+  the importlib importers do not define).
+
 - Issue #15081: Document PyState_FindModule.
   Patch by Robin Schreiber.
 
@@ -932,6 +941,9 @@
 Tests
 -----
 
+- Issue #15187: Bugfix: remove temporary directories test_shutil was leaving
+  behind.
+
 - Issue #14769: test_capi now has SkipitemTest, which cleverly checks
   for "parity" between PyArg_ParseTuple() and the Python/getargs.c static
   function skipitem() for all possible "format units".
@@ -1020,34 +1032,18 @@
 - Issue #14700: Fix two broken and undefined-behaviour-inducing overflow checks
   in old-style string formatting.
 
-- Issue #14705: The PyArg_Parse() family of functions now support the 'p' format
-  unit, which accepts a "boolean predicate" argument.  It converts any Python
-  value into an integer--0 if it is "false", and 1 otherwise.
-
 Library
 -------
 
 - Issue #14690: Use monotonic clock instead of system clock in the sched,
   subprocess and trace modules.
 
-- Issue #14958: Change IDLE systax highlighting to recognize all string and
-  byte literals supported in Python 3.3.
-
-- Issue #10997: Prevent a duplicate entry in IDLE's "Recent Files" menu.
-
 - Issue #14443: Tell rpmbuild to use the correct version of Python in
   bdist_rpm. Initial patch by Ross Lagerwall.
 
-- Issue #14929: Stop Idle 3.x from closing on Unicode decode errors when
-  grepping. Patch by Roger Serwy.
-
 - Issue #12515: email now registers a defect if it gets to EOF while parsing
   a MIME part without seeing the closing MIME boundary.
 
-- Issue #12510: Attempting to get invalid tooltip no longer closes Idle.
-  Other tooltipss have been corrected or improved and the number of tests
-  has been tripled. Original patch by Roger Serwy.
-
 - Issue #1672568: email now always decodes base64 payloads, adding padding and
   ignoring non-base64-alphabet characters if needed, and registering defects
   for any such problems.
@@ -1081,9 +1077,6 @@
 - Issue #14548: Make multiprocessing finalizers check pid before
   running to cope with possibility of gc running just after fork.
 
-- Issue #14863: Update the documentation of os.fdopen() to reflect the
-  fact that it's only a thin wrapper around open() anymore.
-
 - Issue #14036: Add an additional check to validate that port in urlparse does
   not go in illegal range and returns None.
 
@@ -1210,6 +1203,21 @@
 - Issue #14127 and #10148: shutil.copystat now preserves exact mtime and atime
   on filesystems providing nanosecond resolution.
 
+IDLE
+----
+
+- Issue #14958: Change IDLE systax highlighting to recognize all string and
+  byte literals supported in Python 3.3.
+
+- Issue #10997: Prevent a duplicate entry in IDLE's "Recent Files" menu.
+
+- Issue #14929: Stop IDLE 3.x from closing on Unicode decode errors when
+  grepping. Patch by Roger Serwy.
+
+- Issue #12510: Attempting to get invalid tooltip no longer closes IDLE.
+  Other tooltipss have been corrected or improved and the number of tests
+  has been tripled. Original patch by Roger Serwy.
+
 Tools/Demos
 -----------
 
@@ -1228,9 +1236,19 @@
 
 - Issue #13210: Windows build now uses VS2010, ported from VS2008.
 
+C-API
+-----
+
+- Issue #14705: The PyArg_Parse() family of functions now support the 'p' format
+  unit, which accepts a "boolean predicate" argument.  It converts any Python
+  value into an integer--0 if it is "false", and 1 otherwise.
+
 Documentation
 -------------
 
+- Issue #14863: Update the documentation of os.fdopen() to reflect the
+  fact that it's only a thin wrapper around open() anymore.
+
 - Issue #14588: The language reference now accurately documents the Python 3
   class definition process. Patch by Nick Coghlan.
 
@@ -1279,9 +1297,6 @@
 - Issue #14339: Speed improvements to bin, oct and hex functions.  Patch by
   Serhiy Storchaka.
 
-- Issue #14098: New functions PyErr_GetExcInfo and PyErr_SetExcInfo.
-  Patch by Stefan Behnel.
-
 - Issue #14385: It is now possible to use a custom type for the __builtins__
   namespace, instead of a dict. It can be used for sandboxing for example.
   Raise also a NameError instead of ImportError if __build_class__ name if not
@@ -1431,12 +1446,6 @@
 
 - Don't Py_DECREF NULL variable in io.IncrementalNewlineDecoder.
 
-- Issue #8515: Set __file__ when run file in IDLE.
-  Initial patch by Bruce Frederiksen.
-
-- Issue #14496: Fix wrong name in idlelib/tabbedpages.py.
-  Patch by Popa Claudiu.
-
 - Issue #3033: Add displayof parameter to tkinter font. Patch by Guilherme Polo.
 
 - Issue #14482: Raise a ValueError, not a NameError, when trying to create
@@ -1472,6 +1481,15 @@
 - Issue #14355: Regrtest now supports the standard unittest test loading, and
   will use it if a test file contains no `test_main` method.
 
+IDLE
+----
+
+- Issue #8515: Set __file__ when run file in IDLE.
+  Initial patch by Bruce Frederiksen.
+
+- Issue #14496: Fix wrong name in idlelib/tabbedpages.py.
+  Patch by Popa Claudiu.
+
 Tools / Demos
 -------------
 
@@ -1481,6 +1499,12 @@
 - Issue #13165: stringbench is now available in the Tools/stringbench folder.
   It used to live in its own SVN project.
 
+C-API
+-----
+
+- Issue #14098: New functions PyErr_GetExcInfo and PyErr_SetExcInfo.
+  Patch by Stefan Behnel.
+
 
 What's New in Python 3.3.0 Alpha 2?
 ===================================
@@ -1532,16 +1556,9 @@
 
 - Issue #5136: deprecate old, unused functions from tkinter.
 
-- Issue #14409: IDLE now properly executes commands in the Shell window
-  when it cannot read the normal config files on startup and
-  has to use the built-in default key bindings.
-  There was previously a bug in one of the defaults.
-
 - Issue #14416: syslog now defines the LOG_ODELAY and LOG_AUTHPRIV constants
   if they are defined in <syslog.h>.
 
-- IDLE can be launched as python -m idlelib
-
 - Issue #14295: Add unittest.mock
 
 - Issue #7652: Add --with-system-libmpdec option to configure for linking
@@ -1567,9 +1584,6 @@
   up the decimal module. Performance gains of the new C implementation are
   between 10x and 100x, depending on the application.
 
-- Issue #3573: IDLE hangs when passing invalid command line args
-  (directory(ies) instead of file(s)) (Patch by Guilherme Polo)
-
 - Issue #14269: SMTPD now conforms to the RFC and requires a HELO command
   before MAIL, RCPT, or DATA.
 
@@ -1601,8 +1615,6 @@
   denial of service due to hash collisions.  Patch by David Malcolm with some
   modifications by the expat project.
 
-- Issue #14200: Idle shell crash on printing non-BMP unicode character.
-
 - Issue #12818: format address no longer needlessly \ escapes ()s in names when
   the name ends up being quoted.
 
@@ -1618,8 +1630,6 @@
 
 - Issue #989712: Support using Tk without a mainloop.
 
-- Issue #5219: Prevent event handler cascade in IDLE.
-
 - Issue #3835: Refuse to use unthreaded Tcl in threaded Python.
 
 - Issue #2843: Add new Tk API to Tkinter.
@@ -1848,10 +1858,6 @@
   on POSIX systems supporting anonymous memory mappings.  Patch by
   Charles-François Natali.
 
-- Issue #13452: PyUnicode_EncodeDecimal() doesn't support error handlers
-  different than "strict" anymore. The caller was unable to compute the
-  size of the output buffer: it depends on the error handler.
-
 - PEP 3155 / issue #13448: Qualified name for classes and functions.
 
 - Issue #13436: Fix a bogus error message when an AST object was passed
@@ -1942,10 +1948,6 @@
 
 - PEP 3151 / issue #12555: reworking the OS and IO exception hierarchy.
 
-- Issue #13560: Add PyUnicode_DecodeLocale(), PyUnicode_DecodeLocaleAndSize()
-  and PyUnicode_EncodeLocale() functions to the C API to decode/encode from/to
-  the current locale encoding.
-
 - Add internal API for static strings (_Py_identifier et al.).
 
 - Issue #13063: the Windows error ERROR_NO_DATA (numbered 232 and described
@@ -2028,7 +2030,7 @@
   deallocator calls one of the methods on the type (e.g. when subclassing
   IOBase).  Diagnosis and patch by Davide Rizzo.
 
-- Issue #9611, #9015: FileIO.read() clamps the length to INT_MAX on Windows.
+- Issue #9611, Issue #9015: FileIO.read() clamps the length to INT_MAX on Windows.
 
 - Issue #9642: Uniformize the tests on the availability of the mbcs codec, add
   a new HAVE_MBCS define.
@@ -2191,17 +2193,11 @@
   PyUnicode_AsUTF8String() and PyUnicode_AsEncodedString(unicode, "utf-8",
   NULL).
 
-- Issue #10831: PyUnicode_FromFormat() supports %li, %lli and %zi formats.
-
 - Issue #10829: Refactor PyUnicode_FromFormat(), use the same function to parse
   the format string in the 3 steps, fix crashs on invalid format strings.
 
 - Issue #13007: whichdb should recognize gdbm 1.9 magic numbers.
 
-- Issue #11246: Fix PyUnicode_FromFormat("%V") to decode the byte string from
-  UTF-8 (with replace error handler) instead of ISO-8859-1 (in strict mode).
-  Patch written by Ray Allen.
-
 - Issue #11286: Raise a ValueError from calling PyMemoryView_FromBuffer with
   a buffer struct having a NULL data pointer.
 
@@ -2211,9 +2207,6 @@
 - Issue #11828: startswith and endswith now accept None as slice index.
   Patch by Torsten Becker.
 
-- Issue #10830: Fix PyUnicode_FromFormatV("%c") for non-BMP characters on
-  narrow build.
-
 - Issue #11168: Remove filename debug variable from PyEval_EvalFrameEx().
   It encoded the Unicode filename to UTF-8, but the encoding fails on
   undecodable filename (on surrogate characters) which raises an unexpected
@@ -2255,15 +2248,9 @@
   are dead or dying.  Moreover, the implementation is now O(1) rather than
   O(n).
 
-- Issue #13125: Silence spurious test_lib2to3 output when in non-verbose mode.
-  Patch by Mikhail Novikov.
-
 - Issue #11841: Fix comparison bug with 'rc' versions in packaging.version.
   Patch by Filip GruszczyƄski.
 
-- Issue #13447: Add a test file to host regression tests for bugs in the
-  scripts found in the Tools directory.
-
 - Issue #6884: Fix long-standing bugs with MANIFEST.in parsing in distutils
   on Windows.  Also fixed in packaging.
 
@@ -2319,9 +2306,6 @@
   authenticating (since the result may change, according to RFC 4643).
   Patch by Hynek Schlawack.
 
-- Issue #13989: Document that GzipFile does not support text mode, and give a
-  more helpful error message when opened with an invalid mode string.
-
 - Issue #13590: On OS X 10.7 and 10.6 with Xcode 4.2, building
   Distutils-based packages with C extension modules may fail because
   Apple has removed gcc-4.2, the version used to build python.org
@@ -2338,10 +2322,6 @@
 - Issue #13960: HTMLParser is now able to handle broken comments when
   strict=False.
 
-- Issue #13921: Undocument and clean up sqlite3.OptimizedUnicode,
-  which is obsolete in Python 3.x. It's now aliased to str for
-  backwards compatibility.
-
 - When '' is a path (e.g. in sys.path), make sure __file__ uses the current
   working directory instead of '' in importlib.
 
@@ -2363,11 +2343,6 @@
 - Issue #10811: Fix recursive usage of cursors. Instead of crashing,
   raise a ProgrammingError now.
 
-- Issue #10881: Fix test_site failure with OS X framework builds.
-
-- Issue #964437: Make IDLE help window non-modal.
-  Patch by Guilherme Polo and Roger Serwy.
-
 - Issue #13734: Add os.fwalk(), a directory walking function yielding file
   descriptors.
 
@@ -2377,16 +2352,8 @@
 
 - Issue #11805: package_data in setup.cfg should allow more than one value.
 
-- Issue #13933: IDLE auto-complete did not work with some imported
-  module, like hashlib.  (Patch by Roger Serwy)
-
-- Issue #13901: Prevent test_distutils failures on OS X with --enable-shared.
-
 - Issue #13676: Handle strings with embedded zeros correctly in sqlite3.
 
-- Issue #13506: Add '' to path for IDLE Shell when started and restarted with Restart Shell.
-  Original patches by Marco Scataglini and Roger Serwy.
-
 - Issue #8828: Add new function os.replace(), for cross-platform renaming
   with overwriting.
 
@@ -2407,12 +2374,6 @@
   OSError if localtime() failed. time.clock() now raises a RuntimeError if the
   processor time used is not available or its value cannot be represented
 
-- Issue #13862: Fix spurious failure in test_zlib due to runtime/compile time
-  minor versions not matching.
-
-- Issue #12804: Fix test_socket and test_urllib2net failures when running tests
-  on a system without internet access.
-
 - Issue #13772: In os.symlink() under Windows, do not try to guess the link
   target's type (file or directory).  The detection was buggy and made the
   call non-atomic (therefore prone to race conditions).
@@ -2439,9 +2400,6 @@
 - Issue #13642: Unquote before b64encoding user:password during Basic
   Authentication. Patch contributed by Joonas Kuorilehto.
 
-- Issue #13726: Fix the ambiguous -S flag in regrtest. It is -o/--slow for slow
-  tests.
-
 - Issue #12364: Fix a hang in concurrent.futures.ProcessPoolExecutor.
   The hang would occur when retrieving the result of a scheduled future after
   the executor had been shut down.
@@ -2524,10 +2482,6 @@
 - Issue #13591: A bug in importlib has been fixed that caused import_module
   to load a module twice.
 
-- Issue #4625: If IDLE cannot write to its recent file or breakpoint files,
-  display a message popup and continue rather than crash.  Original patch by
-  Roger Serwy.
-
 - Issue #13449 sched.scheduler.run() method has a new "blocking" parameter which
   when set to False makes run() execute the scheduled events due to expire
   soonest (if any) and then return.  Patch by Giampaolo Rodolà.
@@ -2544,12 +2498,9 @@
   'importlib.abc.PyPycLoader', 'nntplib.NNTP.xgtitle', 'nntplib.NNTP.xpath',
   and private attributes of 'smtpd.SMTPChannel'.
 
-- Issue #5905, #13560: time.strftime() is now using the current locale
+- Issue #5905, Issue #13560: time.strftime() is now using the current locale
   encoding, instead of UTF-8, if the wcsftime() function is not available.
 
-- Issue #8641: Update IDLE 3 syntax coloring to recognize b".." and not u"..".
-  Patch by Tal Einat.
-
 - Issue #13464: Add a readinto() method to http.client.HTTPResponse.  Patch
   by Jon Kuhn.
 
@@ -2661,9 +2612,6 @@
 - Issue #10817: Fix urlretrieve function to raise ContentTooShortError even
   when reporthook is None. Patch by Jyrki Pulliainen.
 
-- Issue #13296: Fix IDLE to clear compile __future__ flags on shell restart.
-  (Patch by Roger Serwy)
-
 - Fix the xmlrpc.client user agent to return something similar to
   urllib.request user agent: "Python-xmlrpc/3.3".
 
@@ -2766,10 +2714,6 @@
 - Issue #13034: When decoding some SSL certificates, the subjectAltName
   extension could be unreported.
 
-- Issue #9871: Prevent IDLE 3 crash when given byte stings
-  with invalid hex escape sequences, like b'\x0'.
-  (Original patch by Claudiu Popa.)
-
 - Issue #12306: Expose the runtime version of the zlib C library as a constant,
   ZLIB_RUNTIME_VERSION, in the zlib module. Patch by Torsten Landschoff.
 
@@ -2798,8 +2742,6 @@
 
 - Issue #12878: Expose a __dict__ attribute on io.IOBase and its subclasses.
 
-- Issue #12636: IDLE reads the coding cookie when executing a Python script.
-
 - Issue #12494: On error, call(), check_call(), check_output() and
   getstatusoutput() functions of the subprocess module now kill the process,
   read its status (to avoid zombis) and close pipes.
@@ -2869,9 +2811,6 @@
 
 - Issue #10087: Fix the html output format of the calendar module.
 
-- Issue #12540: Prevent zombie IDLE processes on Windows due to changes
-  in os.kill().
-
 - Issue #13121: add support for inplace math operators to collections.Counter.
 
 - Add support for unary plus and unary minus to collections.Counter.
@@ -2903,7 +2842,7 @@
   Condition, etc.) used to be factory functions returning instances of hidden
   classes (_Event, _Condition, etc.), because (if Guido recalls correctly) this
   code pre-dates the ability to subclass extension types.  It is now possible
-  to inherit from these classes without having to import the private
+  to inherit from these classes, without having to import the private
   underscored names like multiprocessing did.
 
 - Issue #9723: Add shlex.quote functions, to escape filenames and command
@@ -2917,14 +2856,8 @@
 - Issue #12607: In subprocess, fix issue where if stdin, stdout or stderr is
   given as a low fd, it gets overwritten.
 
-- Issue #12590: IDLE editor window now always displays the first line
-  when opening a long file.  With Tk 8.5, the first line was hidden.
-
 - Issue #12576: Fix urlopen behavior on sites which do not send (or obfuscates)
-  Connection:close header.
-
-- Issue #12102: Document that buffered files must be flushed before being used
-  with mmap. Patch by Steffen Daode Nurpmeso.
+  ``Connection: close`` header.
 
 - Issue #12560: Build libpython.so on OpenBSD. Patch by Stefan Sperling.
 
@@ -3179,7 +3112,7 @@
 - Issue #12175: FileIO.readall() now raises a ValueError instead of an IOError
   if the file is closed.
 
-- Issue #11109: New service_action method for BaseServer, used by ForkingMixIn
+- Issue #11109: New service_action method for BaseServer, used by ForkingMixin
   class for cleanup. Initial Patch by Justin Warkentin.
 
 - Issue #12045: Avoid duplicate execution of command in
@@ -3224,9 +3157,6 @@
   passing a ``context`` argument pointing to an ssl.SSLContext instance.
   Patch by Kasun Herath.
 
-- Issue #11088: don't crash when using F5 to run a script in IDLE on MacOSX
-  with Tk 8.5.
-
 - Issue #9516: Issue #9516: avoid errors in sysconfig when MACOSX_DEPLOYMENT_TARGET
   is set in shell.
 
@@ -3246,10 +3176,6 @@
 - Issue #9971: Write an optimized implementation of BufferedReader.readinto().
   Patch by John O'Connor.
 
-- Issue #1028: Tk returns invalid Unicode null in %A: UnicodeDecodeError.
-  With Tk < 8.5 _tkinter.c:PythonCmd() raised UnicodeDecodeError, caused
-  IDLE to exit.  Converted to valid Unicode null in PythonCmd().
-
 - Issue #11799: urllib.request Authentication Handlers will raise a ValueError
   when presented with an unsupported Authentication Scheme. Patch contributed
   by Yuval Greenfield.
@@ -3486,12 +3412,12 @@
 
 - Issue #7639: Fix short file name generation in bdist_msi
 
-- Issue #11659: Fix ResourceWarning in test_subprocess introduced by #11459.
-  Patch by Ben Hayden.
-
 - Issue #11635: Don't use polling in worker threads and processes launched by
   concurrent.futures.
 
+- Issue #5845: Automatically read readline configuration to enable completion
+  in interactive mode.
+
 - Issue #6811: Allow importlib to change a code object's co_filename attribute
   to match the path to where the source code currently is, not where the code
   object originally came from.
@@ -3526,7 +3452,7 @@
 
 - Issue #11127: Raise a TypeError when trying to pickle a socket object.
 
-- Issue #11563: Connection:close header is sent by requests using URLOpener
+- Issue #11563: ``Connection: close`` header is sent by requests using URLOpener
   class which helps in closing of sockets after connection is over. Patch
   contributions by Jeff McNeil and Nadeem Vawda.
 
@@ -3541,8 +3467,6 @@
 - Issue #10979: unittest stdout buffering now works with class and module
   setup and teardown.
 
-- Issue #11577: fix ResourceWarning triggered by improved binhex test coverage
-
 - Issue #11243: fix the parameter querying methods of Message to work if
   the headers contain un-encoded non-ASCII data.
 
@@ -3575,9 +3499,6 @@
 - Issue #11554: Fixed support for Japanese codecs; previously the body output
   encoding was not done if euc-jp or shift-jis was specified as the charset.
 
-- Issue #11509: Significantly increase test coverage of fileinput.
-  Patch by Denver Coneybeare at PyCon 2011 Sprints.
-
 - Issue #11407: `TestCase.run` returns the result object used or created.
   Contributed by Janathan Hartley.
 
@@ -3700,11 +3621,6 @@
 
 - Issue #9348: Raise an early error if argparse nargs and metavar don't match.
 
-- Issue #8982: Improve the documentation for the argparse Namespace object.
-
-- Issue #9343: Document that argparse parent parsers must be configured before
-  their children.
-
 - Issue #9026: Fix order of argparse sub-commands in help messages.
 
 - Issue #9347: Fix formatting for tuples in argparse type= error messages.
@@ -3757,10 +3673,61 @@
 
 - Issue #11495: OSF support is eliminated. It was deprecated in Python 3.2.
 
-
 IDLE
 ----
 
+- Issue #14409: IDLE now properly executes commands in the Shell window
+  when it cannot read the normal config files on startup and
+  has to use the built-in default key bindings.
+  There was previously a bug in one of the defaults.
+
+- IDLE can be launched as python -m idlelib
+
+- Issue #3573: IDLE hangs when passing invalid command line args
+  (directory(ies) instead of file(s)) (Patch by Guilherme Polo)
+
+- Issue #14200: IDLE shell crash on printing non-BMP unicode character.
+
+- Issue #5219: Prevent event handler cascade in IDLE.
+
+- Issue #964437: Make IDLE help window non-modal.
+  Patch by Guilherme Polo and Roger Serwy.
+
+- Issue #13933: IDLE auto-complete did not work with some imported
+  module, like hashlib.  (Patch by Roger Serwy)
+
+- Issue #13506: Add '' to path for IDLE Shell when started and restarted with Restart Shell.
+  Original patches by Marco Scataglini and Roger Serwy.
+
+- Issue #4625: If IDLE cannot write to its recent file or breakpoint files,
+  display a message popup and continue rather than crash.  Original patch by
+  Roger Serwy.
+
+- Issue #8641: Update IDLE 3 syntax coloring to recognize b".." and not u"..".
+  Patch by Tal Einat.
+
+- Issue #13296: Fix IDLE to clear compile __future__ flags on shell restart.
+  (Patch by Roger Serwy)
+
+- Issue #9871: Prevent IDLE 3 crash when given byte stings
+  with invalid hex escape sequences, like b'\x0'.
+  (Original patch by Claudiu Popa.)
+
+- Issue #12636: IDLE reads the coding cookie when executing a Python script.
+
+- Issue #12540: Prevent zombie IDLE processes on Windows due to changes
+  in os.kill().
+
+- Issue #12590: IDLE editor window now always displays the first line
+  when opening a long file.  With Tk 8.5, the first line was hidden.
+
+- Issue #11088: don't crash when using F5 to run a script in IDLE on MacOSX
+  with Tk 8.5.
+
+- Issue #1028: Tk returns invalid Unicode null in %A: UnicodeDecodeError.
+  With Tk < 8.5 _tkinter.c:PythonCmd() raised UnicodeDecodeError, caused
+  IDLE to exit.  Converted to valid Unicode null in PythonCmd().
+
 - Issue #11718: IDLE's open module dialog couldn't find the __init__.py
   file in a package.
 
@@ -3790,6 +3757,10 @@
 Extension Modules
 -----------------
 
+- Issue #16847: Fixed improper use of _PyUnicode_CheckConsistency() in
+  non-pydebug builds. Several extension modules now compile cleanly when
+  assert()s are enabled in standard builds (-DDEBUG flag).
+
 - Issue #13840: The error message produced by ctypes.create_string_buffer
   when given a Unicode string has been fixed.
 
@@ -3852,6 +3823,33 @@
 Tests
 -----
 
+- Issue #13125: Silence spurious test_lib2to3 output when in non-verbose mode.
+  Patch by Mikhail Novikov.
+
+- Issue #13447: Add a test file to host regression tests for bugs in the
+  scripts found in the Tools directory.
+
+- Issue #10881: Fix test_site failure with OS X framework builds.
+
+- Issue #13901: Prevent test_distutils failures on OS X with --enable-shared.
+
+- Issue #13862: Fix spurious failure in test_zlib due to runtime/compile time
+  minor versions not matching.
+
+- Issue #12804: Fix test_socket and test_urllib2net failures when running tests
+  on a system without internet access.
+
+- Issue #13726: Fix the ambiguous -S flag in regrtest. It is -o/--slow for slow
+  tests.
+
+- Issue #11659: Fix ResourceWarning in test_subprocess introduced by #11459.
+  Patch by Ben Hayden.
+
+- Issue #11577: fix ResourceWarning triggered by improved binhex test coverage
+
+- Issue #11509: Significantly increase test coverage of fileinput.
+  Patch by Denver Coneybeare at PyCon 2011 Sprints.
+
 - Issue #11689: Fix a variable scoping error in an sqlite3 test
 
 - Issue #13786: Remove unimplemented 'trace' long option from regrtest.py.
@@ -4030,7 +4028,7 @@
 - Issue #11505: improves test coverage of string.py. Patch by Alicia
   Arlen
 
-- Issue #11490: test_subprocess:test_leaking_fds_on_error no longer gives a
+- Issue #11490: test_subprocess.test_leaking_fds_on_error no longer gives a
   false positive if the last directory in the path is inaccessible.
 
 - Issue #11223: Fix test_threadsignals to fail, not hang, when the
@@ -4054,6 +4052,23 @@
 C-API
 -----
 
+- Issue #13452: PyUnicode_EncodeDecimal() doesn't support error handlers
+  different than "strict" anymore. The caller was unable to compute the
+  size of the output buffer: it depends on the error handler.
+
+- Issue #13560: Add PyUnicode_DecodeLocale(), PyUnicode_DecodeLocaleAndSize()
+  and PyUnicode_EncodeLocale() functions to the C API to decode/encode from/to
+  the current locale encoding.
+
+- Issue #10831: PyUnicode_FromFormat() supports %li, %lli and %zi formats.
+
+- Issue #11246: Fix PyUnicode_FromFormat("%V") to decode the byte string from
+  UTF-8 (with replace error handler) instead of ISO-8859-1 (in strict mode).
+  Patch written by Ray Allen.
+
+- Issue #10830: Fix PyUnicode_FromFormatV("%c") for non-BMP characters on
+  narrow build.
+
 - Add PyObject_GenericGetDict and PyObject_GeneriSetDict. They are generic
   implementations for the getter and setter of a ``__dict__`` descriptor of C
   types.
@@ -4079,6 +4094,24 @@
 Documentation
 -------------
 
+- Issue #13989: Document that GzipFile does not support text mode, and give a
+  more helpful error message when opened with an invalid mode string.
+
+- Issue #13921: Undocument and clean up sqlite3.OptimizedUnicode,
+  which is obsolete in Python 3.x. It's now aliased to str for
+  backwards compatibility.
+
+- Issue #12102: Document that buffered files must be flushed before being used
+  with mmap. Patch by Steffen Daode Nurpmeso.
+
+- Issue #8982: Improve the documentation for the argparse Namespace object.
+
+- Issue #9343: Document that argparse parent parsers must be configured before
+  their children.
+
+- Issue #13498: Clarify docs of os.makedirs()'s exist_ok argument.  Done with
+  great native-speaker help from R. David Murray.
+
 - Issues #13491 and #13995: Fix many errors in sqlite3 documentation.
   Initial patch for #13491 by Johannes Vogel.
 
@@ -9652,7 +9685,7 @@
 
 - Issue #1210: Fixed imaplib and its documentation.
 
-- Issue #4233: Changed semantic of ``_fileio.FileIO``'s ``close()`` 
+- Issue #4233: Changed semantic of ``_fileio.FileIO``'s ``close()``
   method on file objects with closefd=False. The file descriptor is still
   kept open but the file object behaves like a closed file. The ``FileIO``
   object also got a new readonly attribute ``closefd``.
@@ -9796,13 +9829,13 @@
   cyclic garbage collection.
 
 - Issue #3668: Fix a memory leak with the "s*" argument parser in
-  PyArg_ParseTuple and friends, which occurred when the argument for "s*" 
+  PyArg_ParseTuple and friends, which occurred when the argument for "s*"
   was correctly parsed but parsing of subsequent arguments failed.
 
 - Issue #3611: An exception __context__ could be cleared in a complex pattern
   involving a __del__ method re-raising an exception.
 
-- Issue #2534: speed up isinstance() and issubclass() by 50-70%, so as to 
+- Issue #2534: speed up isinstance() and issubclass() by 50-70%, so as to
   match Python 2.5 speed despite the __instancecheck__ / __subclasscheck__
   mechanism. In the process, fix a bug where isinstance() and issubclass(),
   when given a tuple of classes as second argument, were looking up
@@ -9880,7 +9913,7 @@
 
 - The deprecation warnings for the camelCase threading API names were removed.
 
-- Issue #3110: multiprocessing fails to compiel on solaris 10 due to missing 
+- Issue #3110: multiprocessing fails to compiel on solaris 10 due to missing
   SEM_VALUE_MAX.
 
 Extension Modules
@@ -21593,7 +21626,7 @@
 
 - Improved BeOS support.
 
-- Support dynamic loading of shared libraries on NetBSD platforms that 
+- Support dynamic loading of shared libraries on NetBSD platforms that
 use ELF (i.e., MIPS and Alpha systems).
 
 Configuration/build changes
@@ -21810,7 +21843,7 @@
 higher-level classes in code.py.
 
 - turtle.py is a new module for simple turtle graphics.  I'm still
-working on it; let me know if you use this to teach Python to children 
+working on it; let me know if you use this to teach Python to children
 or other novices without prior programming experience.
 
 Obsoleted library modules
@@ -21942,7 +21975,7 @@
 Changes to tools
 ----------------
 
-- New, improved version of Barry Warsaw's Misc/python-mode.el (editing 
+- New, improved version of Barry Warsaw's Misc/python-mode.el (editing
 support for Emacs).
 
 - tabnanny.py: added a -q ('quiet') option to tabnanny, which causes
@@ -22217,7 +22250,7 @@
 -----------------
 
 - Install zlib.dll in the DLLs directory instead of in the win32
-system directory, to avoid conflicts with other applications that have 
+system directory, to avoid conflicts with other applications that have
 their own zlib.dll.
 
 Test Suite
@@ -22297,7 +22330,7 @@
 so that a symlink to a symlink can work.
 
 - Added a hack so that when you type 'quit' or 'exit' at the
-interpreter, you get a friendly explanation of how to press Ctrl-D (or 
+interpreter, you get a friendly explanation of how to press Ctrl-D (or
 Ctrl-Z) to exit.
 
 - New and improved Misc/python-mode.el (Python mode for Emacs).
@@ -22470,13 +22503,13 @@
 range.  Also, randint(a, b) is now redefined as randrange(a, b+1),
 adding extra range and type checking to its arguments!
 
-- Add some semi-thread-safety to random.gauss() (it used to be able to 
+- Add some semi-thread-safety to random.gauss() (it used to be able to
 crash when invoked from separate threads; now the worst it can do is
 give a duplicate result occasionally).
 
 - Some restructuring and generalization done to cmd.py.
 
-- Major upgrade to ConfigParser.py; converted to using 're', added new 
+- Major upgrade to ConfigParser.py; converted to using 're', added new
 exceptions, support underscore in section header and option name.  No
 longer add 'name' option to every section; instead, add '__name__'.
 
@@ -22673,7 +22706,7 @@
 -----------------
 
 - The registry key used is now "1.5" instead of "1.5.x" -- so future
-versions of 1.5 and Mark Hammond's win32all installer don't need to be 
+versions of 1.5 and Mark Hammond's win32all installer don't need to be
 resynchronized.
 
 Windows Tools
@@ -22740,7 +22773,7 @@
 dynamically add one or many entries to the table of built-in modules.
 
 - New macro Py_InitModule3(name, methods, doc) which calls
-Py_InitModule4() with appropriate arguments.  (The -4 variant requires 
+Py_InitModule4() with appropriate arguments.  (The -4 variant requires
 you to pass an obscure version number constant which is always the same.)
 
 - New APIs PySys_WriteStdout() and PySys_WriteStderr() to write to
@@ -22812,7 +22845,7 @@
 Syntax change
 -------------
 
-- The raise statement can now be used without arguments, to re-raise 
+- The raise statement can now be used without arguments, to re-raise
 a previously set exception.  This should be used after catching an
 exception with an except clause only, either in the except clause or
 later in the same function.
@@ -22871,7 +22904,7 @@
 
 	Demo/tkinter/guido/paint.py -- Dave Mitchell
 	Demo/sockets/unixserver.py -- Piet van Oostrum
-	
+
 
 - Much better freeze support.  The freeze script can now freeze
 hierarchical module names (with a corresponding change to import.c),
@@ -23010,7 +23043,7 @@
 - New command supported by the ftplib module: rmd(); also fixed some
 minor bugs.
 
-- The profile module now uses a different timer function by default -- 
+- The profile module now uses a different timer function by default --
 time.clock() is generally better than os.times().  This makes it work
 better on Windows NT, too.
 
@@ -23049,14 +23082,14 @@
 - In the multifile module, support the optional second parameter to
 seek() when possible.
 
-- Several fixes to the gopherlib module by Lars Marius Garshol.  Also, 
+- Several fixes to the gopherlib module by Lars Marius Garshol.  Also,
 urlparse now correctly handles Gopher URLs with query strings.
 
 - Fixed a tiny bug in format_exception() in the traceback module.
 Also rewrite tb_lineno() to be compatible with JPython (and not
 disturb the current exception!); by Jim Hugunin.
 
-- The httplib module is more robust when servers send a short response 
+- The httplib module is more robust when servers send a short response
 -- courtesy Tim O'Malley.
 
 Tkinter and friends
@@ -23071,7 +23104,7 @@
 no longer use the default root.
 
 - The interfaces for the bind*() and unbind() widget methods have been
-redesigned; the bind*() methods now return the name of the Tcl command 
+redesigned; the bind*() methods now return the name of the Tcl command
 created for the callback, and this can be passed as a optional
 argument to unbind() in order to delete the command (normally, such
 commands are automatically unbound when the widget is destroyed, but
@@ -23107,7 +23140,7 @@
 dictionary to allow recursive container types to detect recursion in
 their repr(), str() and print implementations.
 
-- New function PyObject_Not(x) calculates (not x) according to Python's 
+- New function PyObject_Not(x) calculates (not x) according to Python's
 standard rules (basically, it negates the outcome PyObject_IsTrue(x).
 
 - New function _PyModule_Clear(), which clears a module's dictionary
@@ -23268,7 +23301,7 @@
 instances with copy.py.  The cPickle.c changes and some pickle.py
 changes are courtesy Jim Fulton.
 
-- Locale support in he "re" (Perl regular expressions) module.  Use 
+- Locale support in he "re" (Perl regular expressions) module.  Use
 the flag re.L (or re.LOCALE) to enable locale-specific matching
 rules for \w and \b.  The in-line syntax for this flag is (?L).
 
@@ -23334,7 +23367,7 @@
 
 - Some improvements to the _tkinter build line suggested by Case Roole.
 
-- A full suite of platform specific files for NetBSD 1.x, submitted by 
+- A full suite of platform specific files for NetBSD 1.x, submitted by
 Anders Andersen.
 
 - New Solaris specific header STROPTS.py.
@@ -23404,7 +23437,7 @@
 if there are lots of duplicates, and otherwise at least as good.
 
 - Added "uue" as an alias for "uuencode" to mimetools.py.  (Hm, the
-uudecode bug where it complaints about trailing garbage is still there 
+uudecode bug where it complaints about trailing garbage is still there
 :-( ).
 
 - pickle.py requires integers in text mode to be in decimal notation
@@ -24180,7 +24213,7 @@
 The Python/C API for frames is changed (you shouldn't be using this
 anyway).
 
-- Significant speedup by inlining some common opcodes for common operand 
+- Significant speedup by inlining some common opcodes for common operand
 types (e.g.  i+i, i-i, and list[i]).  Fredrik Lundh.
 
 - Small speedup by reordering the method tables of some common
@@ -24206,34 +24239,34 @@
 printing the documentation now kills fewer trees -- the margins have
 been reduced.
 
-- I have started documenting the Python/C API. Unfortunately this project 
-hasn't been completed yet.  It will be complete before the final release of 
-Python 1.5, though.  At the moment, it's better to read the LaTeX source 
+- I have started documenting the Python/C API. Unfortunately this project
+hasn't been completed yet.  It will be complete before the final release of
+Python 1.5, though.  At the moment, it's better to read the LaTeX source
 than to attempt to run it through LaTeX and print the resulting dvi file.
 
-- The posix module (and hence os.py) now has doc strings!  Thanks to Neil 
-Schemenauer.  I received a few other contributions of doc strings.  In most 
+- The posix module (and hence os.py) now has doc strings!  Thanks to Neil
+Schemenauer.  I received a few other contributions of doc strings.  In most
 other places, doc strings are still wishful thinking...
 
 
 Language changes
 ----------------
 
-- Private variables with leading double underscore are now a permanent 
-feature of the language.  (These were experimental in release 1.4.  I have 
-favorable experience using them; I can't label them "experimental" 
+- Private variables with leading double underscore are now a permanent
+feature of the language.  (These were experimental in release 1.4.  I have
+favorable experience using them; I can't label them "experimental"
 forever.)
 
-- There's new string literal syntax for "raw strings".  Prefixing a string 
-literal with the letter r (or R) disables all escape processing in the 
-string; for example, r'\n' is a two-character string consisting of a 
-backslash followed by the letter n.  This combines with all forms of string 
-quotes; it is actually useful for triple quoted doc strings which might 
-contain references to \n or \t.  An embedded quote prefixed with a 
-backslash does not terminate the string, but the backslash is still 
-included in the string; for example, r'\'' is a two-character string 
-consisting of a backslash and a quote.  (Raw strings are also 
-affectionately known as Robin strings, after their inventor, Robin 
+- There's new string literal syntax for "raw strings".  Prefixing a string
+literal with the letter r (or R) disables all escape processing in the
+string; for example, r'\n' is a two-character string consisting of a
+backslash followed by the letter n.  This combines with all forms of string
+quotes; it is actually useful for triple quoted doc strings which might
+contain references to \n or \t.  An embedded quote prefixed with a
+backslash does not terminate the string, but the backslash is still
+included in the string; for example, r'\'' is a two-character string
+consisting of a backslash and a quote.  (Raw strings are also
+affectionately known as Robin strings, after their inventor, Robin
 Friedrich.)
 
 - There's a simple assert statement, and a new exception
@@ -24262,10 +24295,10 @@
 - The obsolete exception ConflictError (presumably used by the long
 obsolete access statement) has been deleted.
 
-- There's a new function sys.exc_info() which returns the tuple 
+- There's a new function sys.exc_info() which returns the tuple
 (sys.exc_type, sys.exc_value, sys.exc_traceback) in a thread-safe way.
 
-- There's a new variable sys.executable, pointing to the executable file 
+- There's a new variable sys.executable, pointing to the executable file
 for the Python interpreter.
 
 - The sort() methods for lists no longer uses the C library qsort(); I
@@ -24291,11 +24324,11 @@
 returning from a function that caught an exception.
 
 - There's a new "buffer" interface.  Certain objects (e.g. strings and
-arrays) now support the "buffer" protocol.  Buffer objects are acceptable 
-whenever formerly a string was required for a write operation; mutable 
+arrays) now support the "buffer" protocol.  Buffer objects are acceptable
+whenever formerly a string was required for a write operation; mutable
 buffer objects can be the target of a read operation using the call
-f.readinto(buffer).  A cool feature is that regular expression matching now 
-also work on array objects.  Contribution by Jack Jansen.  (Needs 
+f.readinto(buffer).  A cool feature is that regular expression matching now
+also work on array objects.  Contribution by Jack Jansen.  (Needs
 documentation.)
 
 - String interning: dictionary lookups are faster when the lookup
@@ -24587,7 +24620,7 @@
 of message sequence specifiers without invoking a subprocess.  Also
 added a createmessage() method by Lars Wirzenius.
 
-- The StringIO.StringIO class now supports readline(nbytes).  (Lars 
+- The StringIO.StringIO class now supports readline(nbytes).  (Lars
 Wirzenius.)  (Of course, you should be using cStringIO for performance.)
 
 - UserDict.py supports the new dictionary methods as well.
@@ -24635,8 +24668,8 @@
 - Various small fixes to the nntplib.py module that I can't bother to
 document in detail.
 
-- Sjoerd Mullender's mimify.py module now supports base64 encoding and 
-includes functions to handle the funny encoding you sometimes see in mail 
+- Sjoerd Mullender's mimify.py module now supports base64 encoding and
+includes functions to handle the funny encoding you sometimes see in mail
 headers.  It is now documented.
 
 - mailbox.py: Added BabylMailbox.  Improved the way the mailbox is
@@ -24987,23 +25020,23 @@
 NT (the old VC++ 4.2 Makefile is also still supported, but will
 eventually be withdrawn due to its bulkiness).
 
-- See the note on the new module search path in the "Miscellaneous" section 
+- See the note on the new module search path in the "Miscellaneous" section
 above.
 
 - Support for Win32s (the 32-bit Windows API under Windows 3.1) is
 basically withdrawn.  If it still works for you, you're lucky.
 
-- There's a new extension module, msvcrt.c, which provides various 
-low-level operations defined in the Microsoft Visual C++ Runtime Library.  
-These include locking(), setmode(), get_osfhandle(), set_osfhandle(), and 
+- There's a new extension module, msvcrt.c, which provides various
+low-level operations defined in the Microsoft Visual C++ Runtime Library.
+These include locking(), setmode(), get_osfhandle(), set_osfhandle(), and
 console I/O functions like kbhit(), getch() and putch().
 
 - The -u option not only sets the standard I/O streams to unbuffered
 status, but also sets them in binary mode.  (This can also be done
 using msvcrt.setmode(), by the way.)
 
-- The, sys.prefix and sys.exec_prefix variables point to the directory 
-where Python is installed, or to the top of the source tree, if it was run 
+- The, sys.prefix and sys.exec_prefix variables point to the directory
+where Python is installed, or to the top of the source tree, if it was run
 from there.
 
 - The various os.path modules (posixpath, ntpath, macpath) now support
@@ -25011,7 +25044,7 @@
 os.path.join(a, b, c) is the same as os.path.join(a, os.path.join(b,
 c)).
 
-- The ntpath module (normally used as os.path) supports ~ to $HOME 
+- The ntpath module (normally used as os.path) supports ~ to $HOME
 expansion in expanduser().
 
 - The freeze tool now works on Windows.
@@ -25309,47 +25342,47 @@
 
 - New module whichdb recognizes dbm, gdbm and bsddb/dbhash files.
 
-- The Doc/Makefile targets have been reorganized somewhat to remove the 
+- The Doc/Makefile targets have been reorganized somewhat to remove the
 insistence on always generating PostScript.
 
 - The texinfo to html filter (Doc/texi2html.py) has been improved somewhat.
 
-- "errors.h" has been renamed to "pyerrors.h" to resolve a long-standing 
+- "errors.h" has been renamed to "pyerrors.h" to resolve a long-standing
 name conflict on the Mac.
 
-- Linking a module compiled with a different setting for Py_TRACE_REFS now 
+- Linking a module compiled with a different setting for Py_TRACE_REFS now
 generates a linker error rather than a core dump.
 
-- The cgi module has a new convenience function print_exception(), which 
-formats a python exception using HTML.  It also fixes a bug in the 
-compatibility code and adds a dubious feature which makes it possible to 
+- The cgi module has a new convenience function print_exception(), which
+formats a python exception using HTML.  It also fixes a bug in the
+compatibility code and adds a dubious feature which makes it possible to
 have two query strings, one in the URL and one in the POST data.
 
-- A subtle change in the unpickling of class instances makes it possible 
-to unpickle in restricted execution mode, where the __dict__ attribute is 
+- A subtle change in the unpickling of class instances makes it possible
+to unpickle in restricted execution mode, where the __dict__ attribute is
 not available (but setattr() is).
 
-- Documentation for os.path.splitext() (== posixpath.splitext()) has been 
+- Documentation for os.path.splitext() (== posixpath.splitext()) has been
 cleared up.  It splits at the *last* dot.
 
 - posixfile locking is now also correctly supported on AIX.
 
-- The tempfile module once again honors an initial setting of tmpdir.  It 
+- The tempfile module once again honors an initial setting of tmpdir.  It
 now works on Windows, too.
 
-- The traceback module has some new functions to extract, format and print 
+- The traceback module has some new functions to extract, format and print
 the active stack.
 
-- Some translation functions in the urllib module have been made a little 
+- Some translation functions in the urllib module have been made a little
 less sluggish.
 
-- The addtag_* methods for Canvas widgets in Tkinter as well as in the 
-separate Canvas class have been fixed so they actually do something 
+- The addtag_* methods for Canvas widgets in Tkinter as well as in the
+separate Canvas class have been fixed so they actually do something
 meaningful.
 
 - A tiny _test() function has been added to Tkinter.py.
 
-- A generic Makefile for dynamically loaded modules is provided in the Misc 
+- A generic Makefile for dynamically loaded modules is provided in the Misc
 subdirectory (Misc/gMakefile).
 
 - A new version of python-mode.el for Emacs is provided.  See
@@ -25357,25 +25390,25 @@
 separate file pyimenu.el is no longer needed, imenu support is folded
 into python-mode.el.
 
-- The configure script can finally correctly find the readline library in a 
-non-standard location.  The LDFLAGS variable is passed on the Makefiles 
+- The configure script can finally correctly find the readline library in a
+non-standard location.  The LDFLAGS variable is passed on the Makefiles
 from the configure script.
 
-- Shared libraries are now installed as programs (i.e. with executable 
+- Shared libraries are now installed as programs (i.e. with executable
 permission).  This is required on HP-UX and won't hurt on other systems.
 
-- The objc.c module is no longer part of the distribution.  Objective-C 
+- The objc.c module is no longer part of the distribution.  Objective-C
 support may become available as contributed software on the ftp site.
 
 - The sybase module is no longer part of the distribution.  A much
 improved sybase module is available as contributed software from the
 ftp site.
 
-- _tkinter is now compatible with Tcl 7.5 / Tk 4.1 patch1 on Windows and 
-Mac (don't use unpatched Tcl/Tk!).  The default line in the Setup.in file 
+- _tkinter is now compatible with Tcl 7.5 / Tk 4.1 patch1 on Windows and
+Mac (don't use unpatched Tcl/Tk!).  The default line in the Setup.in file
 now links with Tcl 7.5 / Tk 4.1 rather than 7.4/4.0.
 
-- In Setup, you can now write "*shared*" instead of "*noconfig*", and you 
+- In Setup, you can now write "*shared*" instead of "*noconfig*", and you
 can use *.so and *.sl as shared libraries.
 
 - Some more fidgeting for AIX shared libraries.
@@ -25384,81 +25417,81 @@
 (Note -- a complete replacement by Niels Mo"ller, called gpmodule, is
 available from the contrib directory on the ftp site.)
 
-- A warning is written to sys.stderr when a __del__ method raises an 
+- A warning is written to sys.stderr when a __del__ method raises an
 exception (formerly, such exceptions were completely ignored).
 
-- The configure script now defines HAVE_OLD_CPP if the C preprocessor is 
+- The configure script now defines HAVE_OLD_CPP if the C preprocessor is
 incapable of ANSI style token concatenation and stringification.
 
-- All source files (except a few platform specific modules) are once again 
+- All source files (except a few platform specific modules) are once again
 compatible with K&R C compilers as well as ANSI compilers.  In particular,
-ANSI-isms have been removed or made conditional in complexobject.c, 
+ANSI-isms have been removed or made conditional in complexobject.c,
 getargs.c and operator.c.
 
-- The abstract object API has three new functions, PyObject_DelItem, 
+- The abstract object API has three new functions, PyObject_DelItem,
 PySequence_DelItem, and PySequence_DelSlice.
 
-- The operator module has new functions delitem and delslice, and the 
-functions "or" and "and" are renamed to "or_" and "and_" (since "or" and 
+- The operator module has new functions delitem and delslice, and the
+functions "or" and "and" are renamed to "or_" and "and_" (since "or" and
 "and" are reserved words).  ("__or__" and "__and__" are unchanged.)
 
-- The environment module is no longer supported; putenv() is now a function 
+- The environment module is no longer supported; putenv() is now a function
 in posixmodule (also under NT).
 
 - Error in filter(<function>, "") has been fixed.
 
 - Unrecognized keyword arguments raise TypeError, not KeyError.
 
-- Better portability, fewer bugs and memory leaks, fewer compiler warnings, 
+- Better portability, fewer bugs and memory leaks, fewer compiler warnings,
 some more documentation.
 
-- Bug in float power boundary case (0.0 to the negative integer power) 
+- Bug in float power boundary case (0.0 to the negative integer power)
 fixed.
 
-- The test of negative number to the float power has been moved from the 
-built-in pow() functin to floatobject.c (so complex numbers can yield the 
+- The test of negative number to the float power has been moved from the
+built-in pow() functin to floatobject.c (so complex numbers can yield the
 correct result).
 
-- The bug introduced in beta2 where shared libraries loaded (using 
+- The bug introduced in beta2 where shared libraries loaded (using
 dlopen()) from the current directory would fail, has been fixed.
 
-- Modules imported as shared libraries now also have a __file__ attribute, 
-giving the filename from which they were loaded.  The only modules without 
+- Modules imported as shared libraries now also have a __file__ attribute,
+giving the filename from which they were loaded.  The only modules without
 a __file__ attribute now are built-in modules.
 
-- On the Mac, dynamically loaded modules can end in either ".slb" or 
-".<platform>.slb" where <platform> is either "CFM68K" or "ppc".  The ".slb" 
+- On the Mac, dynamically loaded modules can end in either ".slb" or
+".<platform>.slb" where <platform> is either "CFM68K" or "ppc".  The ".slb"
 extension should only be used for "fat" binaries.
 
-- C API addition: marshal.c now supports 
+- C API addition: marshal.c now supports
 PyMarshal_WriteObjectToString(object).
 
 - C API addition: getargs.c now supports
 PyArg_ParseTupleAndKeywords(args, kwdict, format, kwnames, ...)
 to parse keyword arguments.
 
-- The PC versioning scheme (sys.winver) has changed once again.  the 
-version number is now "<digit>.<digit>.<digit>.<apiversion>", where the 
-first three <digit>s are the Python version (e.g. "1.4.0" for Python 1.4, 
-"1.4.1" for Python 1.4.1 -- the beta level is not included) and 
+- The PC versioning scheme (sys.winver) has changed once again.  the
+version number is now "<digit>.<digit>.<digit>.<apiversion>", where the
+first three <digit>s are the Python version (e.g. "1.4.0" for Python 1.4,
+"1.4.1" for Python 1.4.1 -- the beta level is not included) and
 <apiversion> is the four-digit PYTHON_API_VERSION (currently 1005).
 
 - h2py.py accepts whitespace before the # in CPP directives
 
-- On Solaris 2.5, it should now be possible to use either Posix threads or 
-Solaris threads (XXX: how do you select which is used???).  (Note: the 
-Python pthreads interface doesn't fully support semaphores yet -- anyone 
+- On Solaris 2.5, it should now be possible to use either Posix threads or
+Solaris threads (XXX: how do you select which is used???).  (Note: the
+Python pthreads interface doesn't fully support semaphores yet -- anyone
 care to fix this?)
 
-- Thread support should now work on AIX, using either DCE threads or 
+- Thread support should now work on AIX, using either DCE threads or
 pthreads.
 
 - New file Demo/sockets/unicast.py
 
-- Working Mac port, with CFM68K support, with Tk 4.1 support (though not 
+- Working Mac port, with CFM68K support, with Tk 4.1 support (though not
 both) (XXX)
 
-- New project setup for PC port, now compatible with PythonWin, with 
+- New project setup for PC port, now compatible with PythonWin, with
 _tkinter and NumPy support (XXX)
 
 - New module site.py (XXX)
@@ -25475,7 +25508,7 @@
 
 - string.atoi c.s. now raise an exception for an empty input string.
 
-- At last, it is no longer necessary to define HAVE_CONFIG_H in order to 
+- At last, it is no longer necessary to define HAVE_CONFIG_H in order to
 have config.h included at various places.
 
 - Unrecognized keyword arguments now raise TypeError rather than KeyError.
@@ -25483,25 +25516,25 @@
 - The makesetup script recognizes files with extension .so or .sl as
 (shared) libraries.
 
-- 'access' is no longer a reserved word, and all code related to its 
-implementation is gone (or at least #ifdef'ed out).  This should make 
+- 'access' is no longer a reserved word, and all code related to its
+implementation is gone (or at least #ifdef'ed out).  This should make
 Python a little speedier too!
 
-- Performance enhancements suggested by Sjoerd Mullender.  This includes 
-the introduction of two new optional function pointers in type object, 
-getattro and setattro, which are like getattr and setattr but take a 
+- Performance enhancements suggested by Sjoerd Mullender.  This includes
+the introduction of two new optional function pointers in type object,
+getattro and setattro, which are like getattr and setattr but take a
 string object instead of a C string pointer.
 
-- New operations in string module: lstrip(s) and rstrip(s) strip whitespace 
-only on the left or only on the right, A new optional third argument to 
-split() specifies the maximum number of separators honored (so 
-splitfields(s, sep, n) returns a list of at most n+1 elements).  (Since 
+- New operations in string module: lstrip(s) and rstrip(s) strip whitespace
+only on the left or only on the right, A new optional third argument to
+split() specifies the maximum number of separators honored (so
+splitfields(s, sep, n) returns a list of at most n+1 elements).  (Since
 1.3, splitfields(s, None) is totally equivalent to split(s).)
-string.capwords() has an optional second argument specifying the 
+string.capwords() has an optional second argument specifying the
 separator (which is passed to split()).
 
-- regsub.split() has the same addition as string.split().  regsub.splitx(s, 
-sep, maxsep) implements the functionality that was regsub.split(s, 1) in 
+- regsub.split() has the same addition as string.split().  regsub.splitx(s,
+sep, maxsep) implements the functionality that was regsub.split(s, 1) in
 1.4beta2 (return a list containing the delimiters as well as the words).
 
 - Final touch for AIX loading, rewritten Misc/AIX-NOTES.
@@ -25545,11 +25578,11 @@
 meaningful value (a few things were botched in beta 1).  Lib/dos_8x3
 is now a standard part of the distribution (alas).
 
-- More improvements to the installation procedure.  Typing "make install" 
-now inserts the version number in the pathnames of almost everything 
-installed, and creates the machine dependent modules (FCNTL.py etc.) if not 
-supplied by the distribution.  (XXX There's still a problem with the latter 
-because the "regen" script requires that Python is installed.  Some manual 
+- More improvements to the installation procedure.  Typing "make install"
+now inserts the version number in the pathnames of almost everything
+installed, and creates the machine dependent modules (FCNTL.py etc.) if not
+supplied by the distribution.  (XXX There's still a problem with the latter
+because the "regen" script requires that Python is installed.  Some manual
 intervention may still be required.) (This has been fixed in 1.4beta3.)
 
 - New modules: errno, operator (XXX).
@@ -25612,8 +25645,8 @@
 
 - Added sys.platform and sys.exec_platform for Bill Janssen.
 
-- Installation has been completely overhauled.  "make install" now installs 
-everything, not just the python binary.  Installation uses the install-sh 
+- Installation has been completely overhauled.  "make install" now installs
+everything, not just the python binary.  Installation uses the install-sh
 script (borrowed from X11) to install each file.
 
 - New functions in the posix module: mkfifo, plock, remove (== unlink),
@@ -25623,59 +25656,59 @@
 
 - Shared library support for FreeBSD.
 
-- The --with-readline option can now be used without a DIRECTORY argument, 
-for systems where libreadline.* is in one of the standard places.  It is 
+- The --with-readline option can now be used without a DIRECTORY argument,
+for systems where libreadline.* is in one of the standard places.  It is
 also possible for it to be a shared library.
 
-- The extension tkinter has been renamed to _tkinter, to avoid confusion 
-with Tkinter.py oncase insensitive file systems.  It now supports Tk 4.1 as 
+- The extension tkinter has been renamed to _tkinter, to avoid confusion
+with Tkinter.py oncase insensitive file systems.  It now supports Tk 4.1 as
 well as 4.0.
 
-- Author's change of address from CWI in Amsterdam, The Netherlands, to 
+- Author's change of address from CWI in Amsterdam, The Netherlands, to
 CNRI in Reston, VA, USA.
 
-- The math.hypot() function is now always available (if it isn't found in 
+- The math.hypot() function is now always available (if it isn't found in
 the C math library, Python provides its own implementation).
 
-- The latex documentation is now compatible with latex2e, thanks to David 
+- The latex documentation is now compatible with latex2e, thanks to David
 Ascher.
 
 - The expression x**y is now equivalent to pow(x, y).
 
 - The indexing expression x[a, b, c] is now equivalent to x[(a, b, c)].
 
-- Complex numbers are now supported.  Imaginary constants are written with 
-a 'j' or 'J' prefix, general complex numbers can be formed by adding a real 
-part to an imaginary part, like 3+4j.  Complex numbers are always stored in 
-floating point form, so this is equivalent to 3.0+4.0j.  It is also 
-possible to create complex numbers with the new built-in function 
-complex(re, [im]).  For the footprint-conscious, complex number support can 
+- Complex numbers are now supported.  Imaginary constants are written with
+a 'j' or 'J' prefix, general complex numbers can be formed by adding a real
+part to an imaginary part, like 3+4j.  Complex numbers are always stored in
+floating point form, so this is equivalent to 3.0+4.0j.  It is also
+possible to create complex numbers with the new built-in function
+complex(re, [im]).  For the footprint-conscious, complex number support can
 be disabled by defining the symbol WITHOUT_COMPLEX.
 
 - New built-in function list() is the long-awaited counterpart of tuple().
 
-- There's a new "cmath" module which provides the same functions as the 
-"math" library but with complex arguments and results.  (There are very 
-good reasons why math.sqrt(-1) still raises an exception -- you have to use 
+- There's a new "cmath" module which provides the same functions as the
+"math" library but with complex arguments and results.  (There are very
+good reasons why math.sqrt(-1) still raises an exception -- you have to use
 cmath.sqrt(-1) to get 1j for an answer.)
 
-- The Python.h header file (which is really the same as allobjects.h except 
-it disables support for old style names) now includes several more files, 
+- The Python.h header file (which is really the same as allobjects.h except
+it disables support for old style names) now includes several more files,
 so you have to have fewer #include statements in the average extension.
 
-- The NDEBUG symbol is no longer used.  Code that used to be dependent on 
-the presence of NDEBUG is now present on the absence of DEBUG.  TRACE_REFS 
-and REF_DEBUG have been renamed to Py_TRACE_REFS and Py_REF_DEBUG, 
-respectively.  At long last, the source actually compiles and links without 
+- The NDEBUG symbol is no longer used.  Code that used to be dependent on
+the presence of NDEBUG is now present on the absence of DEBUG.  TRACE_REFS
+and REF_DEBUG have been renamed to Py_TRACE_REFS and Py_REF_DEBUG,
+respectively.  At long last, the source actually compiles and links without
 errors when this symbol is defined.
 
-- Several symbols that didn't follow the new naming scheme have been 
-renamed (usually by adding to rename2.h) to use a Py or _Py prefix.  There 
-are no external symbols left without a Py or _Py prefix, not even those 
-defined by sources that were incorporated from elsewhere (regexpr.c, 
+- Several symbols that didn't follow the new naming scheme have been
+renamed (usually by adding to rename2.h) to use a Py or _Py prefix.  There
+are no external symbols left without a Py or _Py prefix, not even those
+defined by sources that were incorporated from elsewhere (regexpr.c,
 md5c.c).  (Macros are a different story...)
 
-- There are now typedefs for the structures defined in config.c and 
+- There are now typedefs for the structures defined in config.c and
 frozen.c.
 
 - New PYTHON_API_VERSION value and .pyc file magic number.
@@ -25689,125 +25722,125 @@
 - The binhex and binascii modules now actually work.
 
 - The cgi module has been almost totally rewritten and documented.
-It now supports file upload and a new data type to handle forms more 
+It now supports file upload and a new data type to handle forms more
 flexibly.
 
 - The formatter module (for use with htmllib) has been overhauled (again).
 
 - The ftplib module now supports passive mode and has doc strings.
 
-- In (ideally) all places where binary files are read or written, the file 
-is now correctly opened in binary mode ('rb' or 'wb') so the code will work 
+- In (ideally) all places where binary files are read or written, the file
+is now correctly opened in binary mode ('rb' or 'wb') so the code will work
 on Mac or PC.
 
-- Dummy versions of os.path.expandvars() and expanduser() are now provided 
+- Dummy versions of os.path.expandvars() and expanduser() are now provided
 on non-Unix platforms.
 
-- Module urllib now has two new functions url2pathname and pathname2url 
-which turn local filenames into "file:..." URLs using the same rules as 
-Netscape (why be different).  it also supports urlretrieve() with a 
-pathname parameter, and honors the proxy environment variables (http_proxy 
+- Module urllib now has two new functions url2pathname and pathname2url
+which turn local filenames into "file:..." URLs using the same rules as
+Netscape (why be different).  it also supports urlretrieve() with a
+pathname parameter, and honors the proxy environment variables (http_proxy
 etc.).  The URL parsing has been improved somewhat, too.
 
-- Micro improvements to urlparse.  Added urlparse.urldefrag() which 
+- Micro improvements to urlparse.  Added urlparse.urldefrag() which
 removes a trailing ``#fragment'' if any.
 
 - The mailbox module now supports MH style message delimiters as well.
 
-- The mhlib module contains some new functionality: setcontext() to set the 
-current folder and parsesequence() to parse a sequence as commonly passed 
+- The mhlib module contains some new functionality: setcontext() to set the
+current folder and parsesequence() to parse a sequence as commonly passed
 to MH commands (e.g. 1-10 or last:5).
 
-- New module mimify for conversion to and from MIME format of email 
+- New module mimify for conversion to and from MIME format of email
 messages.
 
-- Module ni now automatically installs itself when first imported -- this 
-is against the normal rule that modules should define classes and functions 
-but not invoke them, but appears more useful in the case that two 
+- Module ni now automatically installs itself when first imported -- this
+is against the normal rule that modules should define classes and functions
+but not invoke them, but appears more useful in the case that two
 different, independent modules want to use ni's features.
 
 - Some small performance enhancements in module pickle.
 
-- Small interface change to the profile.run*() family of functions -- more 
+- Small interface change to the profile.run*() family of functions -- more
 sensible handling of return values.
 
-- The officially registered Mac creator for Python files is 'Pyth'.  This 
+- The officially registered Mac creator for Python files is 'Pyth'.  This
 replaces 'PYTH' which was used before but never registered.
 
 - Added regsub.capwords().  (XXX)
 
-- Added string.capwords(), string.capitalize() and string.translate().  
+- Added string.capwords(), string.capitalize() and string.translate().
 (XXX)
 
-- Fixed an interface bug in the rexec module: it was impossible to pass a 
-hooks instance to the RExec class.  rexec now also supports the dynamic 
-loading of modules from shared libraries.  Some other interfaces have been 
+- Fixed an interface bug in the rexec module: it was impossible to pass a
+hooks instance to the RExec class.  rexec now also supports the dynamic
+loading of modules from shared libraries.  Some other interfaces have been
 added too.
 
-- Module rfc822 now caches the headers in a dictionary for more efficient 
+- Module rfc822 now caches the headers in a dictionary for more efficient
 lookup.
 
-- The sgmllib module now understands a limited number of SGML "shorthands" 
+- The sgmllib module now understands a limited number of SGML "shorthands"
 like <A/.../ for <A>...</A>.  (It's not clear that this was a good idea...)
 
-- The tempfile module actually tries a number of different places to find a 
-usable temporary directory.  (This was prompted by certain Linux 
-installations that appear to be missing a /usr/tmp directory.) [A bug in 
-the implementation that would ignore a pre-existing tmpdir global has been 
+- The tempfile module actually tries a number of different places to find a
+usable temporary directory.  (This was prompted by certain Linux
+installations that appear to be missing a /usr/tmp directory.) [A bug in
+the implementation that would ignore a pre-existing tmpdir global has been
 fixed in beta3.]
 
 - Much improved and enhanved FileDialog module for Tkinter.
 
-- Many small changes to Tkinter, to bring it more in line with Tk 4.0 (as 
+- Many small changes to Tkinter, to bring it more in line with Tk 4.0 (as
 well as Tk 4.1).
 
-- New socket interfaces include ntohs(), ntohl(), htons(), htonl(), and 
-s.dup().  Sockets now work correctly on Windows.  On Windows, the built-in 
-extension is called _socket and a wrapper module win/socket.py provides 
-"makefile()" and "dup()" functionality.  On Windows, the select module 
+- New socket interfaces include ntohs(), ntohl(), htons(), htonl(), and
+s.dup().  Sockets now work correctly on Windows.  On Windows, the built-in
+extension is called _socket and a wrapper module win/socket.py provides
+"makefile()" and "dup()" functionality.  On Windows, the select module
 works only with socket objects.
 
 - Bugs in bsddb module fixed (e.g. missing default argument values).
 
 - The curses extension now includes <ncurses.h> when available.
 
-- The gdbm module now supports opening databases in "fast" mode by 
+- The gdbm module now supports opening databases in "fast" mode by
 specifying 'f' as the second character or the mode string.
 
-- new variables sys.prefix and sys.exec_prefix pass corresponding 
+- new variables sys.prefix and sys.exec_prefix pass corresponding
 configuration options / Makefile variables to the Python programmer.
 
-- The ``new'' module now supports creating new user-defined classes as well 
+- The ``new'' module now supports creating new user-defined classes as well
 as instances thereof.
 
-- The soundex module now sports get_soundex() to get the soundex value for an 
-arbitrary string (formerly it would only do soundex-based string 
+- The soundex module now sports get_soundex() to get the soundex value for an
+arbitrary string (formerly it would only do soundex-based string
 comparison) as well as doc strings.
 
-- New object type "cobject" to safely wrap void pointers for passing them 
+- New object type "cobject" to safely wrap void pointers for passing them
 between various extension modules.
 
 - More efficient computation of float**smallint.
 
-- The mysterious bug whereby "x.x" (two occurrences of the same 
-one-character name) typed from the commandline would sometimes fail 
+- The mysterious bug whereby "x.x" (two occurrences of the same
+one-character name) typed from the commandline would sometimes fail
 mysteriously.
 
-- The initialization of the readline function can now be invoked by a C 
+- The initialization of the readline function can now be invoked by a C
 extension through PyOS_ReadlineInit().
 
-- There's now an externally visible pointer PyImport_FrozenModules which 
+- There's now an externally visible pointer PyImport_FrozenModules which
 can be changed by an embedding application.
 
-- The argument parsing functions now support a new format character 'D' to 
+- The argument parsing functions now support a new format character 'D' to
 specify complex numbers.
 
 - Various memory leaks plugged and bugs fixed.
 
-- Improved support for posix threads (now that real implementations are 
+- Improved support for posix threads (now that real implementations are
 beginning to apepar).  Still no fully functioning semaphores.
 
-- Some various and sundry improvements and new entries in the Tools 
+- Some various and sundry improvements and new entries in the Tools
 directory.
 
 
@@ -27417,7 +27450,7 @@
 The limit on the size of the *run-time* stack has completely been
 removed -- this means that tuple or list displays can contain any
 number of elements (formerly more than 50 would crash the
-interpreter). 
+interpreter).
 
 
 Changes to existing built-in functions and methods
diff --git a/Misc/NEWS b/Misc/NEWS
index 05230da..83fbeb8 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -2,10 +2,10 @@
 Python News
 +++++++++++
 
-What's New in Python 3.4.2?
-===========================
+What's New in Python 3.5 alpha 1?
+=================================
 
-Release date: XXXX-XX-XX
+Release date: TBA
 
 Core and Builtins
 -----------------
@@ -15,35 +15,8 @@
   time issue noticeable when compiling code with a large number of "and"
   and "or" operators.
 
-Library
--------
-
-- Issue #14710: pkgutil.get_loader() no longer raises an exception when None is
-  found in sys.modules.
-
-- Issue #14710: pkgutil.find_loader() no longer raises an exception when a
-  module doesn't exist.
-
-- Issue #21538: The plistlib module now supports loading of binary plist files
-  when reference or offset size is not a power of two.
-
-Tests
------
-
 - Issue #19925: Added tests for the spwd module. Original patch by Vajrasky Kok.
 
-- Issue #21522: Added Tkinter tests for Listbox.itemconfigure(),
-  PanedWindow.paneconfigure(), and Menu.entryconfigure().
-
-
-What's New in Python 3.4.1?
-===========================
-
-Release date: 2014-05-18
-
-Core and Builtins
------------------
-
 - Issue #21418: Fix a crash in the builtin function super() when called without
   argument and without current frame (ex: embedded Python).
 
@@ -54,55 +27,30 @@
   trash a bad pointer dereference could occur due to a subtle flaw in
   internal iteration logic.
 
-Library
--------
+- Issue #21233: Add new C functions: PyMem_RawCalloc(), PyMem_Calloc(),
+  PyObject_Calloc(), _PyObject_GC_Calloc(). bytes(int) and bytearray(int)
+  are now using ``calloc()`` instead of ``malloc()`` for large objects which
+  is faster and use less memory (until the bytearray buffer is filled with
+  data).
 
-- Issue #10744: Fix PEP 3118 format strings on ctypes objects with a nontrivial
-  shape.
+- Issue #21377: PyBytes_Concat() now tries to concatenate in-place when the
+  first argument has a reference count of 1.  Patch by Nikolaus Rath.
 
-- Issue #20998: Fixed re.fullmatch() of repeated single character pattern
-  with ignore case.  Original patch by Matthew Barnett.
-
-- Issue #21075: fileinput.FileInput now reads bytes from standard stream if
-  binary mode is specified.  Patch by Sam Kimbrel.
-
-- Issue #21396: Fix TextIOWrapper(..., write_through=True) to not force a
-  flush() on the underlying binary stream.  Patch by akira.
-
-- Issue #21470: Do a better job seeding the random number generator by
-  using enough bytes to span the full state space of the Mersenne Twister.
-
-- Issue #21398: Fix an unicode error in the pydoc pager when the documentation
-  contains characters not encodable to the stdout encoding.
-
-Tests
------
-
-- Issue #17756: Fix test_code test when run from the installed location.
-
-- Issue #17752: Fix distutils tests when run from the installed location.
-
-IDLE
-----
-
-- Issue #18104: Add idlelib/idle_test/htest.py with a few sample tests to begin
-  consolidating and improving human-validated tests of Idle. Change other files
-  as needed to work with htest.  Running the module as __main__ runs all tests.
-
-
-What's New in Python 3.4.1rc1?
-==============================
-
-Release date: 2014-05-05
-
-Core and Builtins
------------------
+- Issue #20355: -W command line options now have higher priority than the
+  PYTHONWARNINGS environment variable.  Patch by Arfrever.
 
 - Issue #21274: Define PATH_MAX for GNU/Hurd in Python/pythonrun.c.
 
+- Issue #20904: Support setting FPU precision on m68k.
+
 - Issue #21209: Fix sending tuples to custom generator objects with the yield
   from syntax.
 
+- Issue #21193: pow(a, b, c) now raises ValueError rather than TypeError when b
+  is negative.  Patch by Josh Rosenberg.
+
+- PEP 465 and Issue #21176: Add the '@' operator for matrix multiplication.
+
 - Issue #21134: Fix segfault when str is called on an uninitialized
   UnicodeEncodeError, UnicodeDecodeError, or UnicodeTranslateError object.
 
@@ -117,29 +65,127 @@
 - Issue #20637: Key-sharing now also works for instance dictionaries of
   subclasses.  Patch by Peter Ingebretson.
 
+- Issue #8297: Attributes missing from modules now include the module name
+  in the error text.  Original patch by ysj.ray.
+
+- Issue #19995: %c, %o, %x, and %X now raise TypeError on non-integer input.
+
+- Issue #19655: The ASDL parser - used by the build process to generate code for
+  managing the Python AST in C - was rewritten. The new parser is self contained
+  and does not require to carry long the spark.py parser-generator library;
+  spark.py was removed from the source base.
+
 - Issue #12546: Allow \x00 to be used as a fill character when using str, int,
   float, and complex __format__ methods.
 
+- Issue #20480: Add ipaddress.reverse_pointer. Patch by Leon Weber.
+
 - Issue #13598: Modify string.Formatter to support auto-numbering of
   replacement fields. It now matches the behavior of str.format() in
   this regard. Patches by Phil Elson and Ramchandra Apte.
 
+- Issue #8931: Make alternate formatting ('#') for type 'c' raise an
+  exception. In versions prior to 3.5, '#' with 'c' had no effect. Now
+  specifying it is an error.  Patch by Torsten Landschoff.
+
 Library
 -------
 
+- Issue #20197: Added support for the WebP image type in the imghdr module.
+  Patch by Fabrice Aneche and Claudiu Popa.
+
+- Issue #21513: Speedup some properties of IP addresses (IPv4Address,
+  IPv6Address) such as .is_private or .is_multicast.
+
+- Issue #21538: The plistlib module now supports loading of binary plist files
+  when reference or offset size is not a power of two.
+
+- Issue #21455: Add a default backlog to socket.listen().
+
+- Issue #21525: Most Tkinter methods which accepted tuples now accept lists too.
+
+- Issue #10744: Fix PEP 3118 format strings on ctypes objects with a nontrivial
+  shape.
+
+- Issue #20826: Optimize ipaddress.collapse_addresses().
+
+- Issue #21487: Optimize ipaddress.summarize_address_range() and
+  ipaddress.{IPv4Network,IPv6Network}.subnets().
+
+- Issue #21486: Optimize parsing of netmasks in ipaddress.IPv4Network and
+  ipaddress.IPv6Network.
+
+- Issue #13916: Disallowed the surrogatepass error handler for non UTF-*
+  encodings.
+
+- Issue #20998: Fixed re.fullmatch() of repeated single character pattern
+  with ignore case.  Original patch by Matthew Barnett.
+
+- Issue #21075: fileinput.FileInput now reads bytes from standard stream if
+  binary mode is specified.  Patch by Sam Kimbrel.
+
+- Issue #19775: Add a samefile() method to pathlib Path objects.  Initial
+  patch by Vajrasky Kok.
+
+- Issue #21398: Fix an unicode error in the pydoc pager when the documentation
+  contains characters not encodable to the stdout encoding.
+
+- Issue #16531: ipaddress.IPv4Network and ipaddress.IPv6Network now accept
+  an (address, netmask) tuple argument, so as to easily construct network
+  objects from existing addresses.
+
+- Issue #21156: importlib.abc.InspectLoader.source_to_code() is now a
+  staticmethod.
+
+- Issue #21424: Simplified and optimized heaqp.nlargest() and nmsmallest()
+  to make fewer tuple comparisons.
+
+- Issue #21396: Fix TextIOWrapper(..., write_through=True) to not force a
+  flush() on the underlying binary stream.  Patch by akira.
+
+- Issue #18314: Unlink now removes junctions on Windows. Patch by Kim Gräsman
+
 - Issue #21088: Bugfix for curses.window.addch() regression in 3.4.0.
   In porting to Argument Clinic, the first two arguments were reversed.
 
+- Issue #10650: Remove the non-standard 'watchexp' parameter from the
+  Decimal.quantize() method in the Python version.  It had never been
+  present in the C version.
+
 - Issue #21469:  Reduced the risk of false positives in robotparser by
   checking to make sure that robots.txt has been read or does not exist
   prior to returning True in can_fetch().
 
+- Issue #19414: Have the OrderedDict mark deleted links as unusable.
+  This gives an early failure if the link is deleted during iteration.
+
+- Issue #21421: Add __slots__ to the MappingViews ABC.
+  Patch by Josh Rosenberg.
+
+- Issue #21101: Eliminate double hashing in the C speed-up code for
+  collections.Counter().
+
 - Issue #21321: itertools.islice() now releases the reference to the source
   iterator when the slice is exhausted.  Patch by Anton Afanasyev.
 
+- Issue #21057: TextIOWrapper now allows the underlying binary stream's
+  read() or read1() method to return an arbitrary bytes-like object
+  (such as a memoryview).  Patch by Nikolaus Rath.
+
+- Issue #20951: SSLSocket.send() now raises either SSLWantReadError or
+  SSLWantWriteError on a non-blocking socket if the operation would block.
+  Previously, it would return 0.  Patch by Nikolaus Rath.
+
+- Issue #13248: removed previously deprecated asyncore.dispatcher __getattr__
+  cheap inheritance hack.
+
 - Issue #9815: assertRaises now tries to clear references to local variables
   in the exception's traceback.
 
+- Issue #19940: ssl.cert_time_to_seconds() now interprets the given time
+  string in the UTC timezone (as specified in RFC 5280), not the local
+  timezone.
+
 - Issue #13204: Calling sys.flags.__new__ would crash the interpreter,
   now it raises a TypeError.
 
@@ -163,9 +209,23 @@
 - Issue #12220: mindom now raises a custom ValueError indicating it doesn't
   support spaces in URIs instead of letting a 'split' ValueError bubble up.
 
+- Issue #21068: The ssl.PROTOCOL* constants are now enum members.
+
+- Issue #21262: New method assert_not_called for Mock.
+  It raises AssertionError if the mock has been called.
+
+- Issue #21238: New keyword argument `unsafe` to Mock. It raises
+  `AttributeError` incase of an attribute startswith assert or assret.
+
+- Issue #20896: ssl.get_server_certificate() now uses PROTOCOL_SSLv23, not
+  PROTOCOL_SSLv3, for maximum compatibility.
+
 - Issue #21239: patch.stopall() didn't work deterministically when the same
   name was patched more than once.
 
+- Issue #21203: Updated fileConfig and dictConfig to remove inconsistencies.
+  Thanks to Jure Koren for the patch.
+
 - Issue #21222: Passing name keyword argument to mock.create_autospec now
   works.
 
@@ -195,17 +255,31 @@
 - Issue #21171: Fixed undocumented filter API of the rot13 codec.
   Patch by Berker Peksag.
 
+- Issue #20539: Improved math.factorial error message for large positive inputs
+  and changed exception type (OverflowError -> ValueError) for large negative
+  inputs.
+
 - Issue #21172: isinstance check relaxed from dict to collections.Mapping.
 
 - Issue #21155: asyncio.EventLoop.create_unix_server() now raises a ValueError
   if path and sock are specified at the same time.
 
+- Issue #21136: Avoid unnecessary normalization of Fractions resulting from
+  power and other operations.  Patch by Raymond Hettinger.
+
+- Issue #17621: Introduce importlib.util.LazyLoader.
+
+- Issue #21076: signal module constants were turned into enums.
+  Patch by Giampaolo Rodola'.
+
+- Issue #20636: Improved the repr of Tkinter widgets.
+
+- Issue #19505: The items, keys, and values views of OrderedDict now support
+  reverse iteration using reversed().
+
 - Issue #21149: Improved thread-safety in logging cleanup during interpreter
   shutdown. Thanks to Devin Jeanpierre for the patch.
 
-- Issue #20145: `assertRaisesRegex` and `assertWarnsRegex` now raise a
-  TypeError if the second argument is not a string or compiled regex.
-
 - Issue #21058: Fix a leak of file descriptor in
   :func:`tempfile.NamedTemporaryFile`, close the file descriptor if
   :func:`io.open` fails
@@ -215,6 +289,9 @@
 - Issue #21013: Enhance ssl.create_default_context() when used for server side
   sockets to provide better security by default.
 
+- Issue #20145: `assertRaisesRegex` and `assertWarnsRegex` now raise a
+  TypeError if the second argument is not a string or compiled regex.
+
 - Issue #20633: Replace relative import by absolute import.
 
 - Issue #20980: Stop wrapping exception when using ThreadPool.
@@ -228,6 +305,8 @@
   curve for ECDH key exchange on OpenSSL 1.0.2 and later, and otherwise
   default to "prime256v1".
 
+- Issue #21000: Improve the command-line interface of json.tool.
+
 - Issue #20995: Enhance default ciphers used by the ssl module to enable
   better security an prioritize perfect forward secrecy.
 
@@ -235,6 +314,24 @@
 
 - Issue #21499: Ignore __builtins__ in several test_importlib.test_api tests.
 
+- Issue #20627: xmlrpc.client.ServerProxy is now a context manager.
+
+- Issue #19165: The formatter module now raises DeprecationWarning instead of
+  PendingDeprecationWarning.
+
+- Issue #13936: Remove the ability of datetime.time instances to be considered
+  false in boolean contexts.
+
+- Issue 18931: selectors module now supports /dev/poll on Solaris.
+  Patch by Giampaolo Rodola'.
+
+- Issue #19977: When the ``LC_TYPE`` locale is the POSIX locale (``C`` locale),
+  :py:data:`sys.stdin` and :py:data:`sys.stdout` are now using the
+  ``surrogateescape`` error handler, instead of the ``strict`` error handler.
+
+- Issue #20574: Implement incremental decoder for cp65001 code (Windows code
+  page 65001, Microsoft UTF-8).
+
 - Issue #20879: Delay the initialization of encoding and decoding tables for
   base32, ascii85 and base85 codecs in the base64 module, and delay the
   initialization of the unquote_to_bytes() table of the urllib.parse module, to
@@ -269,6 +366,14 @@
 - Issue #19748: On AIX, time.mktime() now raises an OverflowError for year
   outsize range [1902; 2037].
 
+- Issue #19573: inspect.signature: Use enum for parameter kind constants.
+
+- Issue #20726: inspect.signature: Make Signature and Parameter picklable.
+
+- Issue #17373: Add inspect.Signature.from_callable method.
+
+- Issue #20378: Improve repr of inspect.Signature and inspect.Parameter.
+
 - Issue #20816: Fix inspect.getcallargs() to raise correct TypeError for
   missing keyword-only arguments. Patch by Jeremiah Lowin.
 
@@ -285,6 +390,11 @@
   positional-or-keyword arguments passed as keyword arguments become
   keyword-only.
 
+- Issue #20334: inspect.Signature and inspect.Parameter are now hashable.
+
+- Issue #15916: doctest.DocTestSuite returns an empty unittest.TestSuite instead
+  of raising ValueError if it finds no tests
+
 - Issue #21209: Fix asyncio.tasks.CoroWrapper to workaround a bug
   in yield-from implementation in CPythons prior to 3.4.1.
 
@@ -300,6 +410,8 @@
 Extension Modules
 -----------------
 
+- Issue #21407: _decimal: The module now supports function signatures.
+
 - Issue #21276: posixmodule: Don't define USE_XATTRS on KFreeBSD and the Hurd.
 - Issue #21226: Set up modules properly in PyImport_ExecCodeModuleObject
   (and friends).
@@ -307,6 +419,10 @@
 IDLE
 ----
 
+- Issue #18104: Add idlelib/idle_test/htest.py with a few sample tests to begin
+  consolidating and improving human-validated tests of Idle. Change other files
+  as needed to work with htest.  Running the module as __main__ runs all tests.
+
 - Issue #21139: Change default paragraph width to 72, the PEP 8 recommendation.
 
 - Issue #21284: Paragraph reformat test passes after user changes reformat width.
@@ -317,8 +433,21 @@
 Build
 -----
 
+- Issue #21141: The Windows build process no longer attempts to find Perl,
+  instead relying on OpenSSL source being configured and ready to build.  The
+  ``PCbuild\build_ssl.py`` script has been re-written and re-named to
+  ``PCbuild\prepare_ssl.py``, and takes care of configuring OpenSSL source
+  for both 32 and 64 bit platforms.  OpenSSL sources obtained from
+  svn.python.org will always be pre-configured and ready to build.
+
+- Issue #21037: Add a build option to enable AddressSanitizer support.
+
 - The Windows build now includes OpenSSL 1.0.1g
 
+- Issue #19962: The Windows build process now creates "python.bat" in the
+  root of the source tree, which passes all arguments through to the most
+  recently built interpreter.
+
 - Issue #21285: Refactor and fix curses configure check to always search
   in a ncursesw directory.
 
@@ -326,9 +455,17 @@
   include directories if they aren't already being searched. This avoids
   an explicit runtime library dependency.
 
+- Issue #17861: Tools/scripts/generate_opcode_h.py automatically regenerates
+  Include/opcode.h from Lib/opcode.py if the later gets any change.
+
 - Issue #20644: OS X installer build support for documentation build changes
   in 3.4.1: assume externally supplied sphinx-build is available in /usr/bin.
 
+- Issue #20022: Eliminate use of deprecated bundlebuilder in OS X builds.
+
+- Issue #15968: Incorporated Tcl, Tk, and Tix builds into the Windows build
+  solution.
+
 C API
 -----
 - Issue #20942: PyImport_ImportFrozenModuleObject() no longer sets __file__ to
@@ -341,6 +478,9 @@
 - Issue #17386: Expanded functionality of the ``Doc/make.bat`` script to make
   it much more comparable to ``Doc/Makefile``.
 
+- Issue #21312: Update the thread_foobar.h template file to include newer
+  threading APIs.  Patch by Jack McCracken.
+
 - Issue #21043: Remove the recommendation for specific CA organizations and to
   mention the ability to load the OS certificates.
 
@@ -358,6 +498,13 @@
 Tests
 -----
 
+- Issue #21522: Added Tkinter tests for Listbox.itemconfigure(),
+  PanedWindow.paneconfigure(), and Menu.entryconfigure().
+
+- Issue #17756: Fix test_code test when run from the installed location.
+
+- Issue #17752: Fix distutils tests when run from the installed location.
+
 - Issue #18604: Consolidated checks for GUI availability.  All platforms now
   at least check whether Tk can be instantiated when the GUI resource is
   requested.
@@ -375,6 +522,8 @@
 
 - Issue #21097: Move test_namespace_pkgs into test_importlib.
 
+- Issue #21503: Use test_both() consistently in test_importlib.
+
 - Issue #20939: Avoid various network test failures due to new
   redirect of http://www.python.org/ to https://www.python.org:
   use http://www.example.com instead.
@@ -4675,4137 +4824,4 @@
 - Issue #18569: The installer now adds .py to the PATHEXT variable when extensions
   are registered. Patch by Paul Moore.
 
-
-What's New in Python 3.3.0?
-===========================
-
-*Release date: 29-Sep-2012*
-
-Core and Builtins
------------------
-
-- Issue #16046: Fix loading sourceless legacy .pyo files.
-
-- Issue #16060: Fix refcounting bug when `__trunc__()` returns an object whose
-  `__int__()` gives a non-integer.  Patch by Serhiy Storchaka.
-
-Extension Modules
------------------
-
-- Issue #16012: Fix a regression in pyexpat. The parser's `UseForeignDTD()`
-  method doesn't require an argument again.
-
-
-What's New in Python 3.3.0 Release Candidate 3?
-===============================================
-
-*Release date: 23-Sep-2012*
-
-Core and Builtins
------------------
-
-- Issue #15900: Fix reference leak in `PyUnicode_TranslateCharmap()`.
-
-- Issue #15926: Fix crash after multiple reinitializations of the interpreter.
-
-- Issue #15895: Fix FILE pointer leak in one error branch of
-  `PyRun_SimpleFileExFlags()` when filename points to a pyc/pyo file, closeit is
-  false an and set_main_loader() fails.
-
-- Fixes for a few crash and memory leak regressions found by Coverity.
-
-Library
--------
-
-- Issue #15882: Change `_decimal` to accept any coefficient tuple when
-  constructing infinities. This is done for backwards compatibility with
-  decimal.py: Infinity coefficients are undefined in _decimal (in accordance
-  with the specification).
-
-- Issue #15925: Fix a regression in `email.util` where the `parsedate()` and
-  `parsedate_tz()` functions did not return None anymore when the argument could
-  not be parsed.
-
-Extension Modules
------------------
-
-- Issue #15973: Fix a segmentation fault when comparing datetime timezone
-  objects.
-
-- Issue #15977: Fix memory leak in Modules/_ssl.c when the function
-  _set_npn_protocols() is called multiple times, thanks to Daniel Sommermann.
-
-- Issue #15969: `faulthandler` module: rename dump_tracebacks_later() to
-  dump_traceback_later() and cancel_dump_tracebacks_later() to
-  cancel_dump_traceback_later().
-
-- _decimal module: use only C 89 style comments.
-
-
-What's New in Python 3.3.0 Release Candidate 2?
-===============================================
-
-*Release date: 09-Sep-2012*
-
-Core and Builtins
------------------
-
-- Issue #13992: The trashcan mechanism is now thread-safe.  This eliminates
-  sporadic crashes in multi-thread programs when several long deallocator chains
-  ran concurrently and involved subclasses of built-in container types.
-
-- Issue #15784: Modify `OSError`.__str__() to better distinguish between errno
-  error numbers and Windows error numbers.
-
-- Issue #15781: Fix two small race conditions in import's module locking.
-
-Library
--------
-
-- Issue #17158: Add 'symbols' to help() welcome message; clarify
-  'modules spam' messages.
-
-- Issue #15847: Fix a regression in argparse, which did not accept tuples as
-  argument lists anymore.
-
-- Issue #15828: Restore support for C extensions in `imp.load_module()`.
-
-- Issue #15340: Fix importing the random module when ``/dev/urandom`` cannot be
-  opened.  This was a regression caused by the hash randomization patch.
-
-- Issue #10650: Deprecate the watchexp parameter of the `Decimal.quantize()`
-  method.
-
-- Issue #15785: Modify `window.get_wch()` API of the curses module: return a
-  character for most keys, and an integer for special keys, instead of always
-  returning an integer. So it is now possible to distinguish special keys like
-  keypad keys.
-
-- Issue #14223: Fix `window.addch()` of the curses module for special characters
-  like curses.ACS_HLINE: the Python function addch(int) and addch(bytes) is now
-  calling the C function waddch()/mvwaddch() (as it was done in Python 3.2),
-  instead of wadd_wch()/mvwadd_wch(). The Python function addch(str) is still
-  calling the C function wadd_wch()/mvwadd_wch() if the Python curses is linked
-  to libncursesw.
-
-Build
------
-
-- Issue #15822: Really ensure 2to3 grammar pickles are properly installed
-  (replaces fixes for Issue #15645).
-
-Documentation
--------------
-
-- Issue #15814: The memoryview enhancements in 3.3.0 accidentally permitted the
-  hashing of multi-dimensional memorviews and memoryviews with multi-byte item
-  formats. The intended restrictions have now been documented - they will be
-  correctly enforced in 3.3.1.
-
-
-What's New in Python 3.3.0 Release Candidate 1?
-===============================================
-
-*Release date: 25-Aug-2012*
-
-Core and Builtins
------------------
-
-- Issue #15573: memoryview comparisons are now performed by value with full
-  support for any valid struct module format definition.
-
-- Issue #15316: When an item in the fromlist for `__import__()` doesn't exist,
-  don't raise an error, but if an exception is raised as part of an import do
-  let that propagate.
-
-- Issue #15778: Ensure that ``str(ImportError(msg))`` returns a str even when
-  msg isn't a str.
-
-- Issue #2051: Source file permission bits are once again correctly copied to
-  the cached bytecode file. (The migration to importlib reintroduced this
-  problem because these was no regression test. A test has been added as part of
-  this patch)
-
-- Issue #15761: Fix crash when ``PYTHONEXECUTABLE`` is set on Mac OS X.
-
-- Issue #15726: Fix incorrect bounds checking in PyState_FindModule.  Patch by
-  Robin Schreiber.
-
-- Issue #15604: Update uses of `PyObject_IsTrue()` to check for and handle
-  errors correctly.  Patch by Serhiy Storchaka.
-
-- Issue #14846: `importlib.FileFinder` now handles the case where the directory
-  being searched is removed after a previous import attempt.
-
-Library
--------
-
-- Issue #13370: Ensure that ctypes works on Mac OS X when Python is compiled
-  using the clang compiler.
-
-- Issue #13072: The array module's 'u' format code is now deprecated and will be
-  removed in Python 4.0.
-
-- Issue #15544: Fix Decimal.__float__ to work with payload-carrying NaNs.
-
-- Issue #15776: Allow pyvenv to work in existing directory with --clean.
-
-- Issue #15249: email's BytesGenerator now correctly mangles From lines (when
-  requested) even if the body contains undecodable bytes.
-
-- Issue #15777: Fix a refleak in _posixsubprocess.
-
-- Issue ##665194: Update `email.utils.localtime` to use datetime.astimezone and
-  correctly handle historic changes in UTC offsets.
-
-- Issue #15199: Fix JavaScript's default MIME type to application/javascript.
-  Patch by Bohuslav Kabrda.
-
-- Issue #12643: `code.InteractiveConsole` now respects `sys.excepthook` when
-  displaying exceptions.  Patch by Aaron Iles.
-
-- Issue #13579: `string.Formatter` now understands the 'a' conversion specifier.
-
-- Issue #15595: Fix ``subprocess.Popen(universal_newlines=True)`` for certain
-  locales (utf-16 and utf-32 family). Patch by Chris Jerdonek.
-
-- Issue #15477: In cmath and math modules, add workaround for platforms whose
-  system-supplied log1p function doesn't respect signs of zeros.
-
-- Issue #15715: `importlib.__import__()` will silence an ImportError when the
-  use of fromlist leads to a failed import.
-
-- Issue #14669: Fix pickling of connections and sockets on Mac OS X by
-  sending/receiving an acknowledgment after file descriptor transfer.
-  TestPicklingConnection has been reenabled for Mac OS X.
-
-- Issue #11062: Fix adding a message from file to Babyl mailbox.
-
-- Issue #15646: Prevent equivalent of a fork bomb when using `multiprocessing`
-  on Windows without the ``if __name__ == '__main__'`` idiom.
-
-IDLE
-----
-
-- Issue #15678: Fix IDLE menus when started from OS X command line (3.3.0b2
-  regression).
-
-Documentation
--------------
-
-- Touched up the Python 2 to 3 porting guide.
-
-- Issue #14674: Add a discussion of the `json` module's standard compliance.
-  Patch by Chris Rebert.
-
-- Create a 'Concurrent Execution' section in the docs, and split up the
-  'Optional Operating System Services' section to use a more user-centric
-  classification scheme (splitting them across the new CE section, IPC and text
-  processing). Operating system limitations can be reflected with the Sphinx
-  ``:platform:`` tag, it doesn't make sense as part of the Table of Contents.
-
-- Issue #4966: Bring the sequence docs up to date for the Py3k transition and
-  the many language enhancements since they were original written.
-
-- The "path importer" misnomer has been replaced with Eric Snow's
-  more-awkward-but-at-least-not-wrong suggestion of "path based finder" in the
-  import system reference docs.
-
-- Issue #15640: Document `importlib.abc.Finder` as deprecated.
-
-- Issue #15630: Add an example for "continue" stmt in the tutorial.  Patch by
-  Daniel Ellis.
-
-Tests
------
-
-- Issue #15747: ZFS always returns EOPNOTSUPP when attempting to set the
-  UF_IMMUTABLE flag (via either chflags or lchflags); refactor affected tests in
-  test_posix.py to account for this.
-
-- Issue #15285: Refactor the approach for testing connect timeouts using two
-  external hosts that have been configured specifically for this type of test.
-
-- Issue #15743: Remove the deprecated method usage in `urllib` tests. Patch by
-  Jeff Knupp.
-
-- Issue #15615: Add some tests for the `json` module's handling of invalid input
-  data.  Patch by Kushal Das.
-
-Build
------
-
-- Output lib files for PGO build into PGO directory.
-
-- Pick up 32-bit launcher from PGO directory on 64-bit PGO build.
-
-- Drop ``PC\python_nt.h`` as it's not used.  Add input dependency on custom
-  build step.
-
-- Issue #15511: Drop explicit dependency on pythonxy.lib from _decimal amd64
-  configuration.
-
-- Add missing PGI/PGO configurations for pywlauncher.
-
-- Issue #15645: Ensure 2to3 grammar pickles are properly installed.
-
-
-What's New in Python 3.3.0 Beta 2?
-==================================
-
-*Release date: 12-Aug-2012*
-
-Core and Builtins
------------------
-
-- Issue #15568: Fix the return value of ``yield from`` when StopIteration is
-  raised by a custom iterator.
-
-- Issue #13119: `sys.stdout` and `sys.stderr` are now using "\r\n" newline on
-  Windows, as Python 2.
-
-- Issue #15534: Fix the fast-search function for non-ASCII Unicode strings.
-
-- Issue #15508: Fix the docstring for `__import__()` to have the proper default
-  value of 0 for 'level' and to not mention negative levels since they are not
-  supported.
-
-- Issue #15425: Eliminated traceback noise from more situations involving
-  importlib.
-
-- Issue #14578: Support modules registered in the Windows registry again.
-
-- Issue #15466: Stop using TYPE_INT64 in marshal, to make importlib.h (and other
-  byte code files) equal between 32-bit and 64-bit systems.
-
-- Issue #1692335: Move initial exception args assignment to
-  `BaseException.__new__()` to help pickling of naive subclasses.
-
-- Issue #12834: Fix `PyBuffer_ToContiguous()` for non-contiguous arrays.
-
-- Issue #15456: Fix code `__sizeof__()` after #12399 change.  Patch by Serhiy
-  Storchaka.
-
-- Issue #15404: Refleak in PyMethodObject repr.
-
-- Issue #15394: An issue in `PyModule_Create()` that caused references to be
-  leaked on some error paths has been fixed.  Patch by Julia Lawall.
-
-- Issue #15368: An issue that caused bytecode generation to be non-deterministic
-  has been fixed.
-
-- Issue #15202: Consistently use the name "follow_symlinks" for new parameters
-  in os and shutil functions.
-
-- Issue #15314: ``__main__.__loader__`` is now set correctly during interpreter
-  startup.
-
-- Issue #15111: When a module imported using 'from import' has an ImportError
-  inside itself, don't mask that fact behind a generic ImportError for the
-  module itself.
-
-- Issue #15293: Add GC support to the AST base node type.
-
-- Issue #15291: Fix a memory leak where AST nodes where not properly
-  deallocated.
-
-- Issue #15110: Fix the tracebacks generated by "import xxx" to not show the
-  importlib stack frames.
-
-- Issue #16369: Global PyTypeObjects not initialized with PyType_Ready(...).
-
-- Issue #15020: The program name used to search for Python's path is now
-  "python3" under Unix, not "python".
-
-- Issue #15897: zipimport.c doesn't check return value of fseek().
-  Patch by Felipe Cruz.
-
-- Issue #15033: Fix the exit status bug when modules invoked using -m switch,
-  return the proper failure return value (1). Patch contributed by Jeff Knupp.
-
-- Issue #15229: An `OSError` subclass whose __init__ doesn't call back
-  OSError.__init__ could produce incomplete instances, leading to crashes when
-  calling str() on them.
-
-- Issue #15307: Virtual environments now use symlinks with framework builds on
-  Mac OS X, like other POSIX builds.
-
-Library
--------
-
-- Issue #14590: configparser now correctly strips inline comments when delimiter
-  occurs earlier without preceding space.
-
-- Issue #15424: Add a `__sizeof__()` implementation for array objects.  Patch by
-  Ludwig Hähne.
-
-- Issue #15576: Allow extension modules to act as a package's __init__ module.
-
-- Issue #15502: Have `importlib.invalidate_caches()` work on `sys.meta_path`
-  instead of `sys.path_importer_cache`.
-
-- Issue #15163: Pydoc shouldn't list __loader__ as module data.
-
-- Issue #15471: Do not use mutable objects as defaults for
-  `importlib.__import__()`.
-
-- Issue #15559: To avoid a problematic failure mode when passed to the bytes
-  constructor, objects in the ipaddress module no longer implement `__index__()`
-  (they still implement `__int__()` as appropriate).
-
-- Issue #15546: Fix handling of pathological input data in the peek() and
-  read1() methods of the BZ2File, GzipFile and LZMAFile classes.
-
-- Issue #12655: Instead of requiring a custom type, `os.sched_getaffinity()` and
-  `os.sched_setaffinity()` now use regular sets of integers to represent the
-  CPUs a process is restricted to.
-
-- Issue #15538: Fix compilation of the `socket.getnameinfo()` /
-  `socket.getaddrinfo()` emulation code.  Patch by Philipp Hagemeister.
-
-- Issue #15519: Properly expose WindowsRegistryFinder in importlib (and use the
-  correct term for it).  Original patch by Eric Snow.
-
-- Issue #15502: Bring the importlib ABCs into line with the current state of the
-  import protocols given PEP 420.  Original patch by Eric Snow.
-
-- Issue #15499: Launching a webbrowser in Unix used to sleep for a few seconds.
-  Original patch by Anton Barkovsky.
-
-- Issue #15463: The faulthandler module truncates strings to 500 characters,
-  instead of 100, to be able to display long file paths.
-
-- Issue #6056: Make `multiprocessing` use setblocking(True) on the sockets it
-  uses.  Original patch by J Derek Wilson.
-
-- Issue #15364: Fix sysconfig.get_config_var('srcdir') to be an absolute path.
-
-- Issue #15413: `os.times()` had disappeared under Windows.
-
-- Issue #15402: An issue in the struct module that caused `sys.getsizeof()` to
-  return incorrect results for struct.Struct instances has been fixed.  Initial
-  patch by Serhiy Storchaka.
-
-- Issue #15232: When mangle_from is True, `email.Generator` now correctly
-  mangles lines that start with 'From ' that occur in a MIME preamble or
-  epilogue.
-
-- Issue #15094: Incorrectly placed #endif in _tkinter.c.  Patch by Serhiy
-  Storchaka.
-
-- Issue #13922: `argparse` no longer incorrectly strips '--'s that appear after
-  the first one.
-
-- Issue #12353: `argparse` now correctly handles null argument values.
-
-- Issue #10017, issue #14998: Fix TypeError using pprint on dictionaries with
-  user-defined types as keys or other unorderable keys.
-
-- Issue #15397: `inspect.getmodulename()` is now based directly on importlib via
-  a new `importlib.machinery.all_suffixes()` API.
-
-- Issue #14635: `telnetlib` will use poll() rather than select() when possible to
-  avoid failing due to the select() file descriptor limit.
-
-- Issue #15180: Clarify posixpath.join() error message when mixing str & bytes.
-
-- Issue #15343: pkgutil now includes an iter_importer_modules implementation for
-  importlib.machinery.FileFinder (similar to the way it already handled
-  zipimport.zipimporter).
-
-- Issue #15314: runpy now sets __main__.__loader__ correctly.
-
-- Issue #15357: The import emulation in pkgutil is now deprecated. pkgutil uses
-  importlib internally rather than the emulation.
-
-- Issue #15233: Python now guarantees that callables registered with the atexit
-  module will be called in a deterministic order.
-
-- Issue #15238: `shutil.copystat()` now copies Linux "extended attributes".
-
-- Issue #15230: runpy.run_path now correctly sets __package__ as described in
-  the documentation.
-
-- Issue #15315: Support VS 2010 in distutils cygwincompiler.
-
-- Issue #15294: Fix a regression in pkgutil.extend_path()'s handling of nested
-  namespace packages.
-
-- Issue #15056: `imp.cache_from_source()` and `imp.source_from_cache()` raise
-  NotImplementedError when `sys.implementation.cache_tag` is set to None.
-
-- Issue #15256: Grammatical mistake in exception raised by `imp.find_module()`.
-
-- Issue #5931: `wsgiref` environ variable SERVER_SOFTWARE will specify an
-  implementation specific term like CPython, Jython instead of generic "Python".
-
-- Issue #13248: Remove obsolete argument "max_buffer_size" of BufferedWriter and
-  BufferedRWPair, from the io module.
-
-- Issue #13248: Remove obsolete argument "version" of `argparse.ArgumentParser`.
-
-- Issue #14814: Implement more consistent ordering and sorting behaviour for
-  ipaddress objects.
-
-- Issue #14814: `ipaddress` network objects correctly return NotImplemented when
-  compared to arbitrary objects instead of raising TypeError.
-
-- Issue #14990: Correctly fail with SyntaxError on invalid encoding declaration.
-
-- Issue #14814: `ipaddress` now provides more informative error messages when
-  constructing instances directly (changes permitted during beta due to
-  provisional API status).
-
-- Issue #15247: `io.FileIO` now raises an error when given a file descriptor
-  pointing to a directory.
-
-- Issue #15261: Stop os.stat(fd) crashing on Windows when fd not open.
-
-- Issue #15166: Implement `imp.get_tag()` using `sys.implementation.cache_tag`.
-
-- Issue #15210: Catch KeyError when `importlib.__init__()` can't find
-  _frozen_importlib in sys.modules, not ImportError.
-
-- Issue #15030: `importlib.abc.PyPycLoader` now supports the new source size
-  header field in .pyc files.
-
-- Issue #5346: Preserve permissions of mbox, MMDF and Babyl mailbox files on
-  flush().
-
-- Issue #10571: Fix the "--sign" option of distutils' upload command.  Patch by
-  Jakub Wilk.
-
-- Issue #9559: If messages were only added, a new file is no longer created and
-  renamed over the old file when flush() is called on an mbox, MMDF or Babyl
-  mailbox.
-
-- Issue #10924: Fixed `crypt.mksalt()` to use a RNG that is suitable for
-  cryptographic purpose.
-
-- Issue #15184: Ensure consistent results of OS X configuration tailoring for
-  universal builds by factoring out common OS X-specific customizations from
-  sysconfig, distutils.sysconfig, distutils.util, and distutils.unixccompiler
-  into a new module _osx_support.
-
-C API
------
-
-- Issue #15610: `PyImport_ImportModuleEx()` now uses a 'level' of 0 instead of -1.
-
-- Issue #15169, issue #14599: Strip out the C implementation of
-  `imp.source_from_cache()` used by PyImport_ExecCodeModuleWithPathnames() and
-  used the Python code instead. Leads to PyImport_ExecCodeModuleObject() to not
-  try to infer the source path from the bytecode path as
-  PyImport_ExecCodeModuleWithPathnames() does.
-
-Extension Modules
------------------
-
-- Issue #6493: An issue in ctypes on Windows that caused structure bitfields of
-  type `ctypes.c_uint32` and width 32 to incorrectly be set has been fixed.
-
-- Issue #15194: Update libffi to the 3.0.11 release.
-
-IDLE
-----
-
-- Issue #13052: Fix IDLE crashing when replace string in Search/Replace dialog
-  ended with ``\``.  Patch by Roger Serwy.
-
-Tools/Demos
------------
-
-- Issue #15458: python-config gets a new option --configdir to print the $LIBPL
-  value.
-
-- Move importlib.test.benchmark to Tools/importbench.
-
-- Issue #12605: The gdb hooks for debugging CPython (within Tools/gdb) have been
-  enhanced to show information on more C frames relevant to CPython within the
-  "py-bt" and "py-bt-full" commands:
-
-  * C frames that are waiting on the GIL
-  * C frames that are garbage-collecting
-  * C frames that are due to the invocation of a PyCFunction
-
-Documentation
--------------
-
-- Issue #15041: Update "see also" list in tkinter documentation.
-
-- Issue #15444: Use proper spelling for non-ASCII contributor names.  Patch by
-  Serhiy Storchaka.
-
-- Issue #15295: Reorganize and rewrite the documentation on the import system.
-
-- Issue #15230: Clearly document some of the limitations of the runpy module and
-  nudge readers towards importlib when appropriate.
-
-- Issue #15053: Copy Python 3.3 import lock change notice to all relevant
-  functions in imp instead of just at the top of the relevant section.
-
-- Issue #15288: Link to the term "loader" in notes in pkgutil about how things
-  won't work as expected in Python 3.3 and mark the requisite functions as
-  "changed" since they will no longer work with modules directly imported by
-  import itself.
-
-- Issue #13557: Clarify effect of giving two different namespaces to `exec()` or
-  `execfile()`.
-
-- Issue #15250: Document that `filecmp.dircmp()` compares files shallowly. Patch
-  contributed by Chris Jerdonek.
-
-- Issue #15442: Expose the default list of directories ignored by
-  `filecmp.dircmp()` as a module attribute, and expand the list to more modern
-  values.
-
-Tests
------
-
-- Issue #15467: Move helpers for `__sizeof__()` tests into test_support.  Patch
-  by Serhiy Storchaka.
-
-- Issue #15320: Make iterating the list of tests thread-safe when running tests
-  in multiprocess mode. Patch by Chris Jerdonek.
-
-- Issue #15168: Move `importlib.test` to `test.test_importlib`.
-
-- Issue #15091: Reactivate a test on UNIX which was failing thanks to a
-  forgotten `importlib.invalidate_caches()` call.
-
-- Issue #15230: Adopted a more systematic approach in the runpy tests.
-
-- Issue #15300: Ensure the temporary test working directories are in the same
-  parent folder when running tests in multiprocess mode from a Python build.
-  Patch by Chris Jerdonek.
-
-- Issue #15284: Skip {send,recv}msg tests in test_socket when IPv6 is not
-  enabled. Patch by Brian Brazil.
-
-- Issue #15277: Fix a resource leak in support.py when IPv6 is disabled.  Patch
-  by Brian Brazil.
-
-Build
------
-
-- Issue #11715: Fix multiarch detection without having Debian development tools
-  (dpkg-dev) installed.
-
-- Issue #15037: Build OS X installers with local copy of ncurses 5.9 libraries
-  to avoid curses.unget_wch bug present in older versions of ncurses such as
-  those shipped with OS X.
-
-- Issue #15560: Fix building _sqlite3 extension on OS X with an SDK.  Also, for
-  OS X installers, ensure consistent sqlite3 behavior and feature availability
-  by building a local copy of libsqlite3 rather than depending on the wide range
-  of versions supplied with various OS X releases.
-
-- Issue #8847: Disable COMDAT folding in Windows PGO builds.
-
-- Issue #14018: Fix OS X Tcl/Tk framework checking when using OS X SDKs.
-
-- Issue #16256: OS X installer now sets correct permissions for doc directory.
-
-- Issue #15431: Add _freeze_importlib project to regenerate importlib.h on
-  Windows. Patch by Kristján Valur Jónsson.
-
-- Issue #14197: For OS X framework builds, ensure links to the shared library
-  are created with the proper ABI suffix.
-
-- Issue #14330: For cross builds, don't use host python, use host search paths
-  for host compiler.
-
-- Issue #15235: Allow Berkley DB versions up to 5.3 to build the dbm module.
-
-- Issue #15268: Search curses.h in /usr/include/ncursesw.
-
-
-What's New in Python 3.3.0 Beta 1?
-==================================
-
-*Release date: 27-Jun-2012*
-
-Core and Builtins
------------------
-
-- Fix a (most likely) very rare memory leak when calling main() and not being
-  able to decode a command-line argument.
-
-- Issue #14815: Use Py_ssize_t instead of long for the object hash, to
-  preserve all 64 bits of hash on Win64.
-
-- Issue #12268: File readline, readlines and read() or readall() methods
-  no longer lose data when an underlying read system call is interrupted.
-  IOError is no longer raised due to a read system call returning EINTR
-  from within these methods.
-
-- Issue #11626: Add _SizeT functions to stable ABI.
-
-- Issue #15142: Fix reference leak when deallocating instances of types
-  created using PyType_FromSpec().
-
-- Issue #10053: Don't close FDs when FileIO.__init__ fails. Loosely based on
-  the work by Hirokazu Yamamoto.
-
-- Issue #15096: Removed support for ur'' as the raw notation isn't
-  compatible with Python 2.x's raw unicode strings.
-
-- Issue #13783: Generator objects now use the identifier APIs internally
-
-- Issue #14874: Restore charmap decoding speed to pre-PEP 393 levels.
-  Patch by Serhiy Storchaka.
-
-- Issue #15026: utf-16 encoding is now significantly faster (up to 10x).
-  Patch by Serhiy Storchaka.
-
-- Issue #11022: open() and io.TextIOWrapper are now calling
-  locale.getpreferredencoding(False) instead of locale.getpreferredencoding()
-  in text mode if the encoding is not specified. Don't change temporary the
-  locale encoding using locale.setlocale(), use the current locale encoding
-  instead of the user preferred encoding.
-
-- Issue #14673: Add Eric Snow's sys.implementation implementation.
-
-- Issue #15038: Optimize python Locks on Windows.
-
-Library
--------
-
-- Issue #12288: Consider '0' and '0.0' as valid initialvalue
-  for tkinter SimpleDialog.
-
-- Issue #15512: Add a __sizeof__ implementation for parser.
-  Patch by Serhiy Storchaka.
-
-- Issue #15469: Add a __sizeof__ implementation for deque objects.
-  Patch by Serhiy Storchaka.
-
-- Issue #15489: Add a __sizeof__ implementation for BytesIO objects.
-  Patch by Serhiy Storchaka.
-
-- Issue #15487: Add a __sizeof__ implementation for buffered I/O objects.
-  Patch by Serhiy Storchaka.
-
-- Issue #15514: Correct __sizeof__ support for cpu_set.
-  Patch by Serhiy Storchaka.
-
-- Issue #15177: Added dir_fd parameter to os.fwalk().
-
-- Issue #15061: Re-implemented hmac.compare_digest() in C to prevent further
-  timing analysis and to support all buffer protocol aware objects as well as
-  ASCII only str instances safely.
-
-- Issue #15164: Change return value of platform.uname() from a
-  plain tuple to a collections.namedtuple.
-
-- Support Mageia Linux in the platform module.
-
-- Issue #11678: Support Arch linux in the platform module.
-
-- Issue #15118: Change return value of os.uname() and os.times() from
-  plain tuples to immutable iterable objects with named attributes
-  (structseq objects).
-
-- Speed up _decimal by another 10-15% by caching the thread local context
-  that was last accessed. In the pi benchmark (64-bit platform, prec=9),
-  _decimal is now only 1.5x slower than float.
-
-- Remove the packaging module, which is not ready for prime time.
-
-- Issue #15154: Add "dir_fd" parameter to os.rmdir, remove "rmdir"
-  parameter from os.remove / os.unlink.
-
-- Issue #4489: Add a shutil.rmtree that isn't susceptible to symlink attacks.
-  It is used automatically on platforms supporting the necessary os.openat()
-  and os.unlinkat() functions. Main code by Martin von Löwis.
-
-- Issue #15156: HTMLParser now uses the new "html.entities.html5" dictionary.
-
-- Issue #11113: add a new "html5" dictionary containing the named character
-  references defined by the HTML5 standard and the equivalent Unicode
-  character(s) to the html.entities module.
-
-- Issue #15114: the strict mode of HTMLParser and the HTMLParseError exception
-  are deprecated now that the parser is able to parse invalid markup.
-
-- Issue #3665: \u and \U escapes are now supported in unicode regular
-  expressions.  Patch by Serhiy Storchaka.
-
-- Issue #15153: Added inspect.getgeneratorlocals to simplify white box
-  testing of generator state updates
-
-- Issue #13062: Added inspect.getclosurevars to simplify testing stateful
-  closures
-
-- Issue #11024: Fixes and additional tests for Time2Internaldate.
-
-- Issue #14626: Large refactoring of functions / parameters in the os module.
-  Many functions now support "dir_fd" and "follow_symlinks" parameters;
-  some also support accepting an open file descriptor in place of a path
-  string.  Added os.support_* collections as LBYL helpers.  Removed many
-  functions only previously seen in 3.3 alpha releases (often starting with
-  "f" or "l", or ending with "at").  Originally suggested by Serhiy Storchaka;
-  implemented by Larry Hastings.
-
-- Issue #15008: Implement PEP 362 "Signature Objects".
-  Patch by Yury Selivanov.
-
-- Issue: #15138: base64.urlsafe_{en,de}code() are now 3-4x faster.
-
-- Issue #444582: Add shutil.which, for finding programs on the system path.
-  Original patch by Erik Demaine, with later iterations by Jan Killian
-  and Brian Curtin.
-
-- Issue #14837: SSL errors now have ``library`` and ``reason`` attributes
-  describing precisely what happened and in which OpenSSL submodule.  The
-  str() of a SSLError is also enhanced accordingly.
-
-- Issue #9527: datetime.astimezone() method will now supply a class
-  timezone instance corresponding to the system local timezone when
-  called with no arguments.
-
-- Issue #14653: email.utils.mktime_tz() no longer relies on system
-  mktime() when timezone offest is supplied.
-
-- Issue #14684: zlib.compressobj() and zlib.decompressobj() now support the use
-  of predefined compression dictionaries. Original patch by Sam Rushing.
-
-- Fix GzipFile's handling of filenames given as bytes objects.
-
-- Issue #14772: Return destination values from some shutil functions.
-
-- Issue #15064: Implement context manager protocol for multiprocessing types
-
-- Issue #15101: Make pool finalizer avoid joining current thread.
-
-- Issue #14657: The frozen instance of importlib used for bootstrap is now
-  also the module imported as importlib._bootstrap.
-
-- Issue #14055: Add __sizeof__ support to _elementtree.
-
-- Issue #15054: A bug in tokenize.tokenize that caused string literals
-  with 'b' prefixes to be incorrectly tokenized has been fixed.
-  Patch by Serhiy Storchaka.
-
-- Issue #15006: Allow equality comparison between naive and aware
-  time or datetime objects.
-
-- Issue #15036: Mailbox no longer throws an error if a flush is done
-  between operations when removing or changing multiple items in mbox,
-  MMDF, or Babyl mailboxes.
-
-- Issue #14059: Implement multiprocessing.Barrier.
-
-- Issue #15061: The inappropriately named hmac.secure_compare has been
-  renamed to hmac.compare_digest, restricted to operating on bytes inputs
-  only and had its documentation updated to more accurately reflect both its
-  intent and its limitations
-
-- Issue #13841: Make child processes exit using sys.exit() on Windows.
-
-- Issue #14936: curses_panel was converted to PEP 3121 and PEP 384 API.
-  Patch by Robin Schreiber.
-
-- Issue #1667546: On platforms supporting tm_zone and tm_gmtoff fields
-  in struct tm, time.struct_time objects returned by time.gmtime(),
-  time.localtime() and time.strptime() functions now have tm_zone and
-  tm_gmtoff attributes.  Original patch by Paul Boddie.
-
-- Rename adjusted attribute to adjustable in time.get_clock_info() result.
-
-- Issue #3518: Remove references to non-existent BaseManager.from_address()
-  method.
-
-- Issue #13857: Added textwrap.indent() function (initial patch by Ezra
-  Berch)
-
-- Issue #2736: Added datetime.timestamp() method.
-
-- Issue #13854: Make multiprocessing properly handle non-integer
-  non-string argument to SystemExit.
-
-- Issue #12157: Make pool.map() empty iterables correctly.  Initial
-  patch by mouad.
-
-- Issue #11823: disassembly now shows argument counts on calls with keyword args.
-
-- Issue #14711: os.stat_float_times() has been deprecated.
-
-- LZMAFile now accepts the modes "rb"/"wb"/"ab" as synonyms of "r"/"w"/"a".
-
-- The bz2 and lzma modules now each contain an open() function, allowing
-  compressed files to readily be opened in text mode as well as binary mode.
-
-- BZ2File.__init__() and LZMAFile.__init__() now accept a file object as their
-  first argument, rather than requiring a separate "fileobj" argument.
-
-- gzip.open() now accepts file objects as well as filenames.
-
-- Issue #14992: os.makedirs(path, exist_ok=True) would raise an OSError
-  when the path existed and had the S_ISGID mode bit set when it was
-  not explicitly asked for.  This is no longer an exception as mkdir
-  cannot control if the OS sets that bit for it or not.
-
-- Issue #14989: Make the CGI enable option to http.server available via command
-  line.
-
-- Issue #14987: Add a missing import statement to inspect.
-
-- Issue #1079: email.header.decode_header now correctly parses all the examples
-  in RFC2047.  There is a necessary visible behavior change: the leading and/or
-  trailing whitespace on ASCII parts is now preserved.
-
-- Issue #14969: Better handling of exception chaining in contextlib.ExitStack
-
-- Issue #14963: Convert contextlib.ExitStack.__exit__ to use an iterative
-  algorithm (Patch by Alon Horev)
-
-- Issue #14785: Add sys._debugmallocstats() to help debug low-level memory
-  allocation issues
-
-- Issue #14443: Ensure that .py files are byte-compiled with the correct Python
-  executable within bdist_rpm even on older versions of RPM
-
-C-API
------
-
-- Issue #15146: Add PyType_FromSpecWithBases. Patch by Robin Schreiber.
-
-- Issue #15042: Add PyState_AddModule and PyState_RemoveModule. Add version
-  guard for Py_LIMITED_API additions. Patch by Robin Schreiber.
-
-- Issue #13783: Inadvertent additions to the public C API in the PEP 380
-  implementation have either been removed or marked as private interfaces.
-
-Extension Modules
------------------
-
-- Issue #15000: Support the "unique" x32 architecture in _posixsubprocess.c.
-
-IDLE
-----
-
-- Issue #9803: Don't close IDLE on saving if breakpoint is open.
-  Patch by Roger Serwy.
-
-- Issue #14962: Update text coloring in IDLE shell window after changing
-  options.  Patch by Roger Serwy.
-
-Documentation
--------------
-
-- Issue #15176: Clarified behavior, documentation, and implementation
-  of os.listdir().
-
-- Issue #14982: Document that pkgutil's iteration functions require the
-  non-standard iter_modules() method to be defined by an importer (something
-  the importlib importers do not define).
-
-- Issue #15081: Document PyState_FindModule.
-  Patch by Robin Schreiber.
-
-- Issue #14814: Added first draft of ipaddress module API reference
-
-Tests
------
-
-- Issue #15187: Bugfix: remove temporary directories test_shutil was leaving
-  behind.
-
-- Issue #14769: test_capi now has SkipitemTest, which cleverly checks
-  for "parity" between PyArg_ParseTuple() and the Python/getargs.c static
-  function skipitem() for all possible "format units".
-
-- test_nntplib now tolerates being run from behind NNTP gateways that add
-  "X-Antivirus" headers to articles
-
-- Issue #15043: test_gdb is now skipped entirely if gdb security settings
-  block loading of the gdb hooks
-
-- Issue #14963: Add test cases for exception handling behaviour
-  in contextlib.ExitStack (Initial patch by Alon Horev)
-
-Build
------
-
-- Issue #13590: Improve support for OS X Xcode 4:
-    * Try to avoid building Python or extension modules with problematic
-      llvm-gcc compiler.
-    * Since Xcode 4 removes ppc support, extension module builds now
-      check for ppc compiler support and automatically remove ppc and
-      ppc64 archs when not available.
-    * Since Xcode 4 no longer install SDKs in default locations,
-      extension module builds now revert to using installed headers
-      and libs if the SDK used to build the interpreter is not
-      available.
-    * Update ./configure to use better defaults for universal builds;
-      in particular, --enable-universalsdk=yes uses the Xcode default
-      SDK and --with-universal-archs now defaults to "intel" if ppc
-      not available.
-
-- Issue #14225: Fix Unicode support for curses (#12567) on OS X
-
-- Issue #14928: Fix importlib bootstrap issues by using a custom executable
-  (Modules/_freeze_importlib) to build Python/importlib.h.
-
-
-What's New in Python 3.3.0 Alpha 4?
-===================================
-
-*Release date: 31-May-2012*
-
-Core and Builtins
------------------
-
-- Issue #14835: Make plistlib output empty arrays & dicts like OS X.
-  Patch by Sidney San Martín.
-
-- Issue #14744: Use the new _PyUnicodeWriter internal API to speed up
-  str%args and str.format(args).
-
-- Issue #14930: Make memoryview objects weakrefable.
-
-- Issue #14775: Fix a potential quadratic dict build-up due to the garbage
-  collector repeatedly trying to untrack dicts.
-
-- Issue #14857: fix regression in references to PEP 3135 implicit __class__
-  closure variable (Reopens issue #12370)
-
-- Issue #14712 (PEP 405): Virtual environments. Implemented by Vinay Sajip.
-
-- Issue #14660 (PEP 420): Namespace packages. Implemented by Eric Smith.
-
-- Issue #14494: Fix __future__.py and its documentation to note that
-  absolute imports are the default behavior in 3.0 instead of 2.7.
-  Patch by Sven Marnach.
-
-- Issue #9260: A finer-grained import lock.  Most of the import sequence
-  now uses per-module locks rather than the global import lock, eliminating
-  well-known issues with threads and imports.
-
-- Issue #14624: UTF-16 decoding is now 3x to 4x faster on various inputs.
-  Patch by Serhiy Storchaka.
-
-- asdl_seq and asdl_int_seq are now Py_ssize_t sized.
-
-- Issue #14133 (PEP 415): Implement suppression of __context__ display with an
-  attribute on BaseException. This replaces the original mechanism of PEP 409.
-
-- Issue #14417: Mutating a dict during lookup now restarts the lookup instead
-  of raising a RuntimeError (undoes issue #14205).
-
-- Issue #14738: Speed-up UTF-8 decoding on non-ASCII data.  Patch by Serhiy
-  Storchaka.
-
-- Issue #14700: Fix two broken and undefined-behaviour-inducing overflow checks
-  in old-style string formatting.
-
-Library
--------
-
-- Issue #14690: Use monotonic clock instead of system clock in the sched,
-  subprocess and trace modules.
-
-- Issue #14443: Tell rpmbuild to use the correct version of Python in
-  bdist_rpm. Initial patch by Ross Lagerwall.
-
-- Issue #12515: email now registers a defect if it gets to EOF while parsing
-  a MIME part without seeing the closing MIME boundary.
-
-- Issue #1672568: email now always decodes base64 payloads, adding padding and
-  ignoring non-base64-alphabet characters if needed, and registering defects
-  for any such problems.
-
-- Issue #14925: email now registers a defect when the parser decides that there
-  is a missing header/body separator line.  MalformedHeaderDefect, which the
-  existing code would never actually generate, is deprecated.
-
-- Issue #10365: File open dialog now works instead of crashing even when
-  the parent window is closed before the dialog. Patch by Roger Serwy.
-
-- Issue #8739: Updated smtpd to support RFC 5321, and added support for the
-  RFC 1870 SIZE extension.
-
-- Issue #665194: Added a localtime function to email.utils to provide an
-  aware local datetime for use in setting Date headers.
-
-- Issue #12586: Added new provisional policies that implement convenient
-  unicode support for email headers.  See What's New for details.
-
-- Issue #14731: Refactored email Policy framework to support full backward
-  compatibility with Python 3.2 by default yet allow for the introduction of
-  new features through new policies.  Note that Policy.must_be_7bit is renamed
-  to cte_type.
-
-- Issue #14876: Use user-selected font for highlight configuration.
-
-- Issue #14920: Fix the help(urllib.parse) failure on locale C on terminals.
-  Have ascii characters in help.
-
-- Issue #14548: Make multiprocessing finalizers check pid before
-  running to cope with possibility of gc running just after fork.
-
-- Issue #14036: Add an additional check to validate that port in urlparse does
-  not go in illegal range and returns None.
-
-- Issue #14862: Add missing names to os.__all__
-
-- Issue #14875: Use float('inf') instead of float('1e66666') in the json module.
-
-- Issue #13585: Added contextlib.ExitStack
-
-- PEP 3144, Issue #14814: Added the ipaddress module
-
-- Issue #14426: Correct the Date format in Expires attribute of Set-Cookie
-  Header in Cookie.py.
-
-- Issue #14588: The types module now provide new_class() and prepare_class()
-  functions to support PEP 3115 compliant dynamic class creation. Patch by
-  Daniel Urban and Nick Coghlan.
-
-- Issue #13152: Allow to specify a custom tabsize for expanding tabs in
-  textwrap. Patch by John Feuerstein.
-
-- Issue #14721: Send the correct 'Content-length: 0' header when the body is an
-  empty string ''. Initial Patch contributed by Arve Knudsen.
-
-- Issue #14072: Fix parsing of 'tel' URIs in urlparse by making the check for
-  ports stricter.
-
-- Issue #9374: Generic parsing of query and fragment portions of url for any
-  scheme. Supported both by RFC3986 and RFC2396.
-
-- Issue #14798: Fix the functions in pyclbr to raise an ImportError
-  when the first part of a dotted name is not a package. Patch by
-  Xavier de Gaye.
-
-- Issue #12098: multiprocessing on Windows now starts child processes
-  using the same sys.flags as the current process.  Initial patch by
-  Sergey Mezentsev.
-
-- Issue #13031: Small speed-up for tarfile when unzipping tarfiles.
-  Patch by Justin Peel.
-
-- Issue #14780: urllib.request.urlopen() now has a ``cadefault`` argument
-  to use the default certificate store.  Initial patch by James Oakley.
-
-- Issue #14829: Fix bisect and range() indexing with large indices
-  (>= 2 ** 32) under 64-bit Windows.
-
-- Issue #14732: The _csv module now uses PEP 3121 module initialization.
-  Patch by Robin Schreiber.
-
-- Issue #14809: Add HTTP status codes introduced by RFC 6585 to http.server
-  and http.client. Patch by EungJun Yi.
-
-- Issue #14777: tkinter may return undecoded UTF-8 bytes as a string when
-  accessing the Tk clipboard.  Modify clipboad_get() to first request type
-  UTF8_STRING when no specific type is requested in an X11 windowing
-  environment, falling back to the current default type STRING if that fails.
-  Original patch by Thomas Kluyver.
-
-- Issue #14773: Fix os.fwalk() failing on dangling symlinks.
-
-- Issue #12541: Be lenient with quotes around Realm field of HTTP Basic
-  Authentation in urllib2.
-
-- Issue #14807: move undocumented tarfile.filemode() to stat.filemode() and add
-  doc entry. Add tarfile.filemode alias with deprecation warning.
-
-- Issue #13815: TarFile.extractfile() now returns io.BufferedReader objects.
-
-- Issue #14532: Add a secure_compare() helper to the hmac module, to mitigate
-  timing attacks. Patch by Jon Oberheide.
-
-- Add importlib.util.resolve_name().
-
-- Issue #14366: Support lzma compression in zip files.
-  Patch by Serhiy Storchaka.
-
-- Issue #13959: Introduce importlib.find_loader() and document
-  imp.find_module/load_module as deprecated.
-
-- Issue #14082: shutil.copy2() now copies extended attributes, if possible.
-  Patch by Hynek Schlawack.
-
-- Issue #13959: Make importlib.abc.FileLoader.load_module()/get_filename() and
-  importlib.machinery.ExtensionFileLoader.load_module() have their single
-  argument be optional. Allows for the replacement (and thus deprecation) of
-  imp.load_source()/load_package()/load_compiled().
-
-- Issue #13959: imp.get_suffixes() has been deprecated in favour of the new
-  attributes on importlib.machinery: SOURCE_SUFFIXES, DEBUG_BYTECODE_SUFFIXES,
-  OPTIMIZED_BYTECODE_SUFFIXES, BYTECODE_SUFFIXES, and EXTENSION_SUFFIXES. This
-  led to an indirect deprecation of inspect.getmoduleinfo().
-
-- Issue #14662: Prevent shutil failures on OS X when destination does not
-  support chflag operations.  Patch by Hynek Schlawack.
-
-- Issue #14157: Fix time.strptime failing without a year on February 29th.
-  Patch by Hynek Schlawack.
-
-- Issue #14753: Make multiprocessing's handling of negative timeouts
-  the same as it was in Python 3.2.
-
-- Issue #14583: Fix importlib bug when a package's __init__.py would first
-  import one of its modules then raise an error.
-
-- Issue #14741: Fix missing support for Ellipsis ('...') in parser module.
-
-- Issue #14697: Fix missing support for set displays and set comprehensions in
-  parser module.
-
-- Issue #14701: Fix missing support for 'raise ... from' in parser module.
-
-- Add support for timeouts to the acquire() methods of
-  multiprocessing's lock/semaphore/condition proxies.
-
-- Issue #13989: Add support for text mode to gzip.open().
-
-- Issue #14127: The os.stat() result object now provides three additional
-  fields: st_ctime_ns, st_mtime_ns, and st_atime_ns, providing those times as an
-  integer with nanosecond resolution.  The functions os.utime(), os.lutimes(),
-  and os.futimes() now accept a new parameter, ns, which accepts mtime and atime
-  as integers with nanosecond resolution.
-
-- Issue #14127 and #10148: shutil.copystat now preserves exact mtime and atime
-  on filesystems providing nanosecond resolution.
-
-IDLE
-----
-
-- Issue #14958: Change IDLE systax highlighting to recognize all string and
-  byte literals supported in Python 3.3.
-
-- Issue #10997: Prevent a duplicate entry in IDLE's "Recent Files" menu.
-
-- Issue #14929: Stop IDLE 3.x from closing on Unicode decode errors when
-  grepping. Patch by Roger Serwy.
-
-- Issue #12510: Attempting to get invalid tooltip no longer closes IDLE.
-  Other tooltipss have been corrected or improved and the number of tests
-  has been tripled. Original patch by Roger Serwy.
-
-Tools/Demos
------------
-
-- Issue #14695: Bring Tools/parser/unparse.py support up to date with
-  the Python 3.3 Grammar.
-
-Build
------
-
-- Issue #14472: Update .gitignore. Patch by Matej Cepl.
-
-- Upgrade Windows library versions: bzip 1.0.6, OpenSSL 1.0.1c.
-
-- Issue #14693: Under non-Windows platforms, hashlib's fallback modules are
-  always compiled, even if OpenSSL is present at build time.
-
-- Issue #13210: Windows build now uses VS2010, ported from VS2008.
-
-C-API
------
-
-- Issue #14705: The PyArg_Parse() family of functions now support the 'p' format
-  unit, which accepts a "boolean predicate" argument.  It converts any Python
-  value into an integer--0 if it is "false", and 1 otherwise.
-
-Documentation
--------------
-
-- Issue #14863: Update the documentation of os.fdopen() to reflect the
-  fact that it's only a thin wrapper around open() anymore.
-
-- Issue #14588: The language reference now accurately documents the Python 3
-  class definition process. Patch by Nick Coghlan.
-
-- Issue #14943: Correct a default argument value for winreg.OpenKey
-  and correctly list the argument names in the function's explanation.
-
-
-What's New in Python 3.3.0 Alpha 3?
-===================================
-
-*Release date: 01-May-2012*
-
-Core and Builtins
------------------
-
-- Issue #14699: Fix calling the classmethod descriptor directly.
-
-- Issue #14433: Prevent msvcrt crash in interactive prompt when stdin is closed.
-
-- Issue #14521: Make result of float('nan') and float('-nan') more consistent
-  across platforms.
-
-- Issue #14646: __import__() sets __loader__ if the loader did not.
-
-- Issue #14605: No longer have implicit entries in sys.meta_path. If
-  sys.meta_path is found to be empty, raise ImportWarning.
-
-- Issue #14605: No longer have implicit entries in sys.path_hooks. If
-  sys.path_hooks is found to be empty, a warning will be raised. None is now
-  inserted into sys.path_importer_cache if no finder was discovered. This also
-  means imp.NullImporter is no longer implicitly used.
-
-- Issue #13903: Implement PEP 412. Individual dictionary instances can now share
-  their keys with other dictionaries. Classes take advantage of this to share
-  their instance dictionary keys for improved memory and performance.
-
-- Issue #11603 (again): Setting __repr__ to __str__ now raises a RuntimeError
-  when repr() or str() is called on such an object.
-
-- Issue #14658: Fix binding a special method to a builtin implementation of a
-  special method with a different name.
-
-- Issue #14630: Fix a memory access bug for instances of a subclass of int
-  with value 0.
-
-- Issue #14339: Speed improvements to bin, oct and hex functions.  Patch by
-  Serhiy Storchaka.
-
-- Issue #14385: It is now possible to use a custom type for the __builtins__
-  namespace, instead of a dict. It can be used for sandboxing for example.
-  Raise also a NameError instead of ImportError if __build_class__ name if not
-  found in __builtins__.
-
-- Issue #12599: Be more strict in accepting None compared to a false-like
-  object for importlib.util.module_for_loader and
-  importlib.machinery.PathFinder.
-
-- Issue #14612: Fix jumping around with blocks by setting f_lineno.
-
-- Issue #14592: Attempting a relative import w/o __package__ or __name__ set in
-  globals raises a KeyError.
-
-- Issue #14607: Fix keyword-only arguments which started with ``__``.
-
-- Issue #10854: The ImportError raised when an extension module on Windows
-  fails to import now uses the new path and name attributes from
-  Issue #1559549.
-
-- Issue #13889: Check and (if necessary) set FPU control word before calling
-  any of the dtoa.c string <-> float conversion functions, on MSVC builds of
-  Python.  This fixes issues when embedding Python in a Delphi app.
-
-- __import__() now matches PEP 328 and documentation by defaulting 'index' to 0
-  instead of -1 and removing support for negative values.
-
-- Issue #2377: Make importlib the implementation of __import__().
-
-- Issue #1559549: ImportError now has 'name' and 'path' attributes that are set
-  using keyword arguments to its constructor. They are currently not set by
-  import as they are meant for use by importlib.
-
-- Issue #14474: Save and restore exception state in thread.start_new_thread()
-  while writing error message if the thread leaves a unhandled exception.
-
-- Issue #13019: Fix potential reference leaks in bytearray.extend().  Patch
-  by Suman Saha.
-
-Library
--------
-
-- Issue #14768: os.path.expanduser('~/a') doesn't works correctly when HOME is '/'.
-
-- Issue #14371: Support bzip2 in zipfile module.  Patch by Serhiy Storchaka.
-
-- Issue #13183: Fix pdb skipping frames after hitting a breakpoint and running
-  step.  Patch by Xavier de Gaye.
-
-- Issue #14696: Fix parser module to understand 'nonlocal' declarations.
-
-- Issue #10941: Fix imaplib.Internaldate2tuple to produce correct result near
-  the DST transition.  Patch by Joe Peterson.
-
-- Issue #9154: Fix parser module to understand function annotations.
-
-- Issue #6085: In http.server.py SimpleHTTPServer.address_string returns the
-  client ip address instead client hostname. Patch by Charles-François Natali.
-
-- Issue #14309: Deprecate time.clock(), use time.perf_counter() or
-  time.process_time() instead.
-
-- Issue #14428: Implement the PEP 418. Add time.get_clock_info(),
-  time.perf_counter() and time.process_time() functions, and rename
-  time.steady() to time.monotonic().
-
-- Issue #14646: importlib.util.module_for_loader() now sets __loader__ and
-  __package__ (when possible).
-
-- Issue #14664: It is now possible to use @unittest.skip{If,Unless} on a
-  test class that doesn't inherit from TestCase (i.e. a mixin).
-
-- Issue #4892: multiprocessing Connections can now be transferred over
-  multiprocessing Connections.  Patch by Richard Oudkerk (sbt).
-
-- Issue #14160: TarFile.extractfile() failed to resolve symbolic links when
-  the links were not located in an archive subdirectory.
-
-- Issue #14638: pydoc now treats non-string __name__ values as if they
-  were missing, instead of raising an error.
-
-- Issue #13684: Fix httplib tunnel issue of infinite loops for certain sites
-  which send EOF without trailing \r\n.
-
-- Issue #14605: Add importlib.abc.FileLoader, importlib.machinery.(FileFinder,
-  SourceFileLoader, SourcelessFileLoader, ExtensionFileLoader).
-
-- Issue #13959: imp.cache_from_source()/source_from_cache() now follow
-  os.path.join()/split() semantics for path manipulation instead of its prior,
-  custom semantics of caring the right-most path separator forward in path
-  joining.
-
-- Issue #2193: Allow ":" character in Cookie NAME values.
-
-- Issue #14629: tokenizer.detect_encoding will specify the filename in the
-  SyntaxError exception if found at readline.__self__.name.
-
-- Issue #14629: Raise SyntaxError in tokenizer.detect_encoding if the
-  first two lines have non-UTF-8 characters without an encoding declaration.
-
-- Issue #14308: Fix an exception when a "dummy" thread is in the threading
-  module's active list after a fork().
-
-- Issue #11750: The Windows API functions scattered in the _subprocess and
-  _multiprocessing.win32 modules now live in a single module "_winapi".
-  Patch by sbt.
-
-- Issue #14087: multiprocessing: add Condition.wait_for(). Patch by sbt.
-
-- Issue #14538: HTMLParser can now parse correctly start tags that contain
-  a bare '/'.
-
-- Issue #14452: SysLogHandler no longer inserts a UTF-8 BOM into the message.
-
-- Issue #14386: Expose the dict_proxy internal type as types.MappingProxyType.
-
-- Issue #13959: Make imp.reload() always use a module's __loader__ to perform
-  the reload.
-
-- Issue #13959: Add imp.py and rename the built-in module to _imp, allowing for
-  re-implementing parts of the module in pure Python.
-
-- Issue #13496: Fix potential overflow in bisect.bisect algorithm when applied
-  to a collection of size > sys.maxsize / 2.
-
-- Have importlib take advantage of ImportError's new 'name' and 'path'
-  attributes.
-
-- Issue #14399: zipfile now recognizes that the archive has been modified even
-  if only the comment is changed.  In addition, the TypeError that results from
-  trying to set a non-binary value as a comment is now raised at the time
-  the comment is set rather than at the time the zipfile is written.
-
-- trace.CoverageResults.is_ignored_filename() now ignores any name that starts
-  with "<" and ends with ">" instead of special-casing "<string>" and
-  "<doctest ".
-
-- Issue #12537: The mailbox module no longer depends on knowledge of internal
-  implementation details of the email package Message object.
-
-- Issue #7978: socketserver now restarts the select() call when EINTR is
-  returned.  This avoids crashing the server loop when a signal is received.
-  Patch by Jerzy Kozera.
-
-- Issue #14522: Avoid duplicating socket handles in multiprocessing.connection.
-  Patch by sbt.
-
-- Don't Py_DECREF NULL variable in io.IncrementalNewlineDecoder.
-
-- Issue #3033: Add displayof parameter to tkinter font. Patch by Guilherme Polo.
-
-- Issue #14482: Raise a ValueError, not a NameError, when trying to create
-  a multiprocessing Client or Listener with an AF_UNIX type address under
-  Windows.  Patch by Popa Claudiu.
-
-- Issue #802310: Generate always unique tkinter font names if not directly passed.
-
-- Issue #14151: Raise a ValueError, not a NameError, when trying to create
-  a multiprocessing Client or Listener with an AF_PIPE type address under
-  non-Windows platforms.  Patch by Popa Claudiu.
-
-- Issue #14493: Use gvfs-open or xdg-open in webbrowser.
-
-Build
------
-
-- "make touch" will now touch generated files that are checked into Mercurial,
-  after a "hg update" which failed to bring the timestamps into the right order.
-
-Tests
------
-
-- Issue #14026: In test_cmd_line_script, check that sys.argv is populated
-  correctly for the various invocation approaches (Patch by Jason Yeo)
-
-- Issue #14032: Fix incorrect variable name in test_cmd_line_script debugging
-  message (Patch by Jason Yeo)
-
-- Issue #14589: Update certificate chain for sha256.tbs-internet.com, fixing
-  a test failure in test_ssl.
-
-- Issue #14355: Regrtest now supports the standard unittest test loading, and
-  will use it if a test file contains no `test_main` method.
-
-IDLE
-----
-
-- Issue #8515: Set __file__ when run file in IDLE.
-  Initial patch by Bruce Frederiksen.
-
-- Issue #14496: Fix wrong name in idlelib/tabbedpages.py.
-  Patch by Popa Claudiu.
-
-Tools / Demos
--------------
-
-- Issue #3561: The Windows installer now has an option, off by default, for
-  placing the Python installation into the system "Path" environment variable.
-
-- Issue #13165: stringbench is now available in the Tools/stringbench folder.
-  It used to live in its own SVN project.
-
-C-API
------
-
-- Issue #14098: New functions PyErr_GetExcInfo and PyErr_SetExcInfo.
-  Patch by Stefan Behnel.
-
-
-What's New in Python 3.3.0 Alpha 2?
-===================================
-
-*Release date: 01-Apr-2012*
-
-Core and Builtins
------------------
-
-- Issue #1683368: object.__new__ and object.__init__ raise a TypeError if they
-  are passed arguments and their complementary method is not overridden.
-
-- Issue #14378: Fix compiling ast.ImportFrom nodes with a "__future__" string as
-  the module name that was not interned.
-
-- Issue #14331: Use significantly less stack space when importing modules by
-  allocating path buffers on the heap instead of the stack.
-
-- Issue #14334: Prevent in a segfault in type.__getattribute__ when it was not
-  passed strings.
-
-- Issue #1469629: Allow cycles through an object's __dict__ slot to be
-  collected. (For example if ``x.__dict__ is x``).
-
-- Issue #14205: dict lookup raises a RuntimeError if the dict is modified
-  during a lookup.
-
-- Issue #14220: When a generator is delegating to another iterator with the
-  yield from syntax, it needs to have its ``gi_running`` flag set to True.
-
-- Issue #14435: Remove dedicated block allocator from floatobject.c and rely
-  on the PyObject_Malloc() api like all other objects.
-
-- Issue #14471: Fix a possible buffer overrun in the winreg module.
-
-- Issue #14288: Allow the serialization of builtin iterators
-
-Library
--------
-
-- Issue #14300: Under Windows, sockets created using socket.dup() now allow
-  overlapped I/O.  Patch by sbt.
-
-- Issue #13872: socket.detach() now marks the socket closed (as mirrored
-  in the socket repr()).  Patch by Matt Joiner.
-
-- Issue #14406: Fix a race condition when using ``concurrent.futures.wait(
-  return_when=ALL_COMPLETED)``.  Patch by Matt Joiner.
-
-- Issue #5136: deprecate old, unused functions from tkinter.
-
-- Issue #14416: syslog now defines the LOG_ODELAY and LOG_AUTHPRIV constants
-  if they are defined in <syslog.h>.
-
-- Issue #14295: Add unittest.mock
-
-- Issue #7652: Add --with-system-libmpdec option to configure for linking
-  the _decimal module against an installed libmpdec.
-
-- Issue #14380: MIMEText now defaults to utf-8 when passed non-ASCII unicode
-  with no charset specified.
-
-- Issue #10340: asyncore - properly handle EINVAL in dispatcher constructor on
-  OSX; avoid to call handle_connect in case of a disconnected socket which
-  was not meant to connect.
-
-- Issue #14204: The ssl module now has support for the Next Protocol
-  Negotiation extension, if available in the underlying OpenSSL library.
-  Patch by Colin Marc.
-
-- Issue #3035: Unused functions from tkinter are marked as pending deprecated.
-
-- Issue #12757: Fix the skipping of doctests when python is run with -OO so
-  that it works in unittest's verbose mode as well as non-verbose mode.
-
-- Issue #7652: Integrate the decimal floating point libmpdec library to speed
-  up the decimal module. Performance gains of the new C implementation are
-  between 10x and 100x, depending on the application.
-
-- Issue #14269: SMTPD now conforms to the RFC and requires a HELO command
-  before MAIL, RCPT, or DATA.
-
-- Issue #13694: asynchronous connect in asyncore.dispatcher does not set addr
-  attribute.
-
-- Issue #14344: fixed the repr of email.policy objects.
-
-- Issue #11686: Added missing entries to email package __all__ lists
-  (mostly the new Bytes classes).
-
-- Issue #14335: multiprocessing's custom Pickler subclass now inherits from
-  the C-accelerated implementation.  Patch by sbt.
-
-- Issue #10484: Fix the CGIHTTPServer's PATH_INFO handling problem.
-
-- Issue #11199: Fix the with urllib which hangs on particular ftp urls.
-
-- Improve the memory utilization and speed of functools.lru_cache.
-
-- Issue #14222: Use the new time.steady() function instead of time.time() for
-  timeout in queue and threading modules to not be affected of system time
-  update.
-
-- Issue #13248: Remove lib2to3.pytree.Base.get_prefix/set_prefix.
-
-- Issue #14234: CVE-2012-0876: Randomize hashes of xml attributes in the hash
-  table internal to the pyexpat module's copy of the expat library to avoid a
-  denial of service due to hash collisions.  Patch by David Malcolm with some
-  modifications by the expat project.
-
-- Issue #12818: format address no longer needlessly \ escapes ()s in names when
-  the name ends up being quoted.
-
-- Issue #14062: BytesGenerator now correctly folds Header objects,
-  including using linesep when folding.
-
-- Issue #13839: When invoked on the command-line, the pstats module now
-  accepts several filenames of profile stat files and merges them all.
-  Patch by Matt Joiner.
-
-- Issue #14291: Email now defaults to utf-8 for non-ASCII unicode headers
-  instead of raising an error.  This fixes a regression relative to 2.7.
-
-- Issue #989712: Support using Tk without a mainloop.
-
-- Issue #3835: Refuse to use unthreaded Tcl in threaded Python.
-
-- Issue #2843: Add new Tk API to Tkinter.
-
-- Issue #14184: Increase the default stack size for secondary threads on
-  Mac OS X to avoid interpreter crashes when using threads on 10.7.
-
-- Issue #14180: datetime.date.fromtimestamp(),
-  datetime.datetime.fromtimestamp() and datetime.datetime.utcfromtimestamp()
-  now raise an OSError instead of ValueError if localtime() or gmtime() failed.
-
-- Issue #14180: time.ctime(), gmtime(), time.localtime(),
-  datetime.date.fromtimestamp(), datetime.datetime.fromtimestamp() and
-  datetime.datetime.utcfromtimestamp() now raises an OverflowError, instead of
-  a ValueError, if the timestamp does not fit in time_t.
-
-- Issue #14180: datetime.datetime.fromtimestamp() and
-  datetime.datetime.utcfromtimestamp() now round microseconds towards zero
-  instead of rounding to nearest with ties going away from zero.
-
-- Issue #10543: Fix unittest test discovery with Jython bytecode files.
-
-- Issue #1178863: Separate initialisation from setting when initializing
-  Tkinter.Variables; harmonize exceptions to ValueError; only delete variables
-  that have not been deleted; assert that variable names are strings.
-
-- Issue #14104: Implement time.monotonic() on Mac OS X, patch written by
-  Nicholas Riley.
-
-- Issue #13394: the aifc module now uses warnings.warn() to signal warnings.
-
-- Issue #14252: Fix subprocess.Popen.terminate() to not raise an error under
-  Windows when the child process has already exited.
-
-- Issue #14223: curses.addch() is no more limited to the range 0-255 when the
-  Python curses is not linked to libncursesw. It was a regression introduced
-  in Python 3.3a1.
-
-- Issue #14168: Check for presence of Element._attrs in minidom before
-  accessing it.
-
-- Issue #12328: Fix multiprocessing's use of overlapped I/O on Windows.
-  Also, add a multiprocessing.connection.wait(rlist, timeout=None) function
-  for polling multiple objects at once.  Patch by sbt.
-
-- Issue #14007: Accept incomplete TreeBuilder objects (missing start, end,
-  data or close method) for the Python implementation as well.
-  Drop the no-op TreeBuilder().xml() method from the C implementation.
-
-- Issue #14210: pdb now has tab-completion not only for command names, but
-  also for their arguments, wherever possible.
-
-- Issue #14310: Sockets can now be with other processes on Windows using
-  the api socket.socket.share() and socket.fromshare().
-
-- Issue #10576: The gc module now has a 'callbacks' member that will get
-  called when garbage collection takes place.
-
-Build
------
-
-- Issue #14557: Fix extensions build on HP-UX. Patch by Adi Roiban.
-
-- Issue #14387: Do not include accu.h from Python.h.
-
-- Issue #14359: Only use O_CLOEXEC in _posixmodule.c if it is defined.
-  Based on patch from Hervé Coatanhay.
-
-- Issue #14321: Do not run pgen during the build if files are up to date.
-
-Documentation
--------------
-
-- Issue #14034: added the argparse tutorial.
-
-- Issue #14324: Fix configure tests for cross builds.
-
-- Issue #14327: Call AC_CANONICAL_HOST in configure.ac and check in
-  config.{guess,sub}. Don't use uname calls for cross builds.
-
-Extension Modules
------------------
-
-- Issue #9041: An issue in ctypes.c_longdouble, ctypes.c_double, and
-  ctypes.c_float that caused an incorrect exception to be returned in the
-  case of overflow has been fixed.
-
-- Issue #14212: The re module didn't retain a reference to buffers it was
-  scanning, resulting in segfaults.
-
-- Issue #14259: The finditer() method of re objects did not take any
-  keyword arguments, contrary to the documentation.
-
-- Issue #10142: Support for SEEK_HOLE/SEEK_DATA (for example, under ZFS).
-
-Tests
------
-
-- Issue #14442: Add missing errno import in test_smtplib.
-
-- Issue #8315: (partial fix) python -m unittest test.test_email now works.
-
-
-What's New in Python 3.3.0 Alpha 1?
-===================================
-
-*Release date: 05-Mar-2012*
-
-Core and Builtins
------------------
-
-- Issue #14172: Fix reference leak when marshalling a buffer-like object
-  (other than a bytes object).
-
-- Issue #13521: dict.setdefault() now does only one lookup for the given key,
-  making it "atomic" for many purposes.  Patch by Filip GruszczyƄski.
-
-- PEP 409, Issue #6210: "raise X from None" is now supported as a means of
-  suppressing the display of the chained exception context. The chained
-  context still remains available as the __context__ attribute.
-
-- Issue #10181: New memoryview implementation fixes multiple ownership
-  and lifetime issues of dynamically allocated Py_buffer members (#9990)
-  as well as crashes (#8305, #7433). Many new features have been added
-  (See whatsnew/3.3), and the documentation has been updated extensively.
-  The ndarray test object from _testbuffer.c implements all aspects of
-  PEP-3118, so further development towards the complete implementation
-  of the PEP can proceed in a test-driven manner.
-
-  Thanks to Nick Coghlan, Antoine Pitrou and Pauli Virtanen for review
-  and many ideas.
-
-- Issue #12834: Fix incorrect results of memoryview.tobytes() for
-  non-contiguous arrays.
-
-- Issue #5231: Introduce memoryview.cast() method that allows changing
-  format and shape without making a copy of the underlying memory.
-
-- Issue #14084: Fix a file descriptor leak when importing a module with a
-  bad encoding.
-
-- Upgrade Unicode data to Unicode 6.1.
-
-- Issue #14040: Remove rarely used file name suffixes for C extensions
-  (under POSIX mainly).
-
-- Issue #14051: Allow arbitrary attributes to be set of classmethod and
-  staticmethod.
-
-- Issue #13703: oCERT-2011-003: Randomize hashes of str and bytes to protect
-  against denial of service attacks due to hash collisions within the dict and
-  set types.  Patch by David Malcolm, based on work by Victor Stinner.
-
-- Issue #13020: Fix a reference leak when allocating a structsequence object
-  fails.  Patch by Suman Saha.
-
-- Issue #13908: Ready types returned from PyType_FromSpec.
-
-- Issue #11235: Fix OverflowError when trying to import a source file whose
-  modification time doesn't fit in a 32-bit timestamp.
-
-- Issue #12705: A SyntaxError exception is now raised when attempting to
-  compile multiple statements as a single interactive statement.
-
-- Fix the builtin module initialization code to store the init function for
-  future reinitialization.
-
-- Issue #8052: The posix subprocess module would take a long time closing
-  all possible file descriptors in the child process rather than just open
-  file descriptors.  It now closes only the open fds if possible for the
-  default close_fds=True behavior.
-
-- Issue #13629: Renumber the tokens in token.h so that they match the indexes
-  into _PyParser_TokenNames.
-
-- Issue #13752: Add a casefold() method to str.
-
-- Issue #13761: Add a "flush" keyword argument to the print() function,
-  used to ensure flushing the output stream.
-
-- Issue #13645: pyc files now contain the size of the corresponding source
-  code, to avoid timestamp collisions (especially on filesystems with a low
-  timestamp resolution) when checking for freshness of the bytecode.
-
-- PEP 380, Issue #11682: Add "yield from <x>" to support easy delegation to
-  subgenerators (initial patch by Greg Ewing, integration into 3.3 by
-  Renaud Blanch, Ryan Kelly, Zbigniew Jędrzejewski-Szmek and Nick Coghlan)
-
-- Issue #13748: Raw bytes literals can now be written with the ``rb`` prefix
-  as well as ``br``.
-
-- Issue #12736: Use full unicode case mappings for upper, lower, and title case.
-
-- Issue #12760: Add a create mode to open(). Patch by David Townshend.
-
-- Issue #13738: Simplify implementation of bytes.lower() and bytes.upper().
-
-- Issue #13577: Built-in methods and functions now have a __qualname__.
-  Patch by sbt.
-
-- Issue #6695: Full garbage collection runs now clear the freelist of set
-  objects.  Initial patch by Matthias Troffaes.
-
-- Fix OSError.__init__ and OSError.__new__ so that each of them can be
-  overriden and take additional arguments (followup to issue #12555).
-
-- Fix the fix for issue #12149: it was incorrect, although it had the side
-  effect of appearing to resolve the issue.  Thanks to Mark Shannon for
-  noticing.
-
-- Issue #13505: Pickle bytes objects in a way that is compatible with
-  Python 2 when using protocols <= 2.
-
-- Issue #11147: Fix an unused argument in _Py_ANNOTATE_MEMORY_ORDER.  (Fix
-  given by Campbell Barton).
-
-- Issue #13503: Use a more efficient reduction format for bytearrays with
-  pickle protocol >= 3.  The old reduction format is kept with older protocols
-  in order to allow unpickling under Python 2.  Patch by Irmen de Jong.
-
-- Issue #7111: Python can now be run without a stdin, stdout or stderr
-  stream.  It was already the case with Python 2.  However, the corresponding
-  sys module entries are now set to None (instead of an unusable file object).
-
-- Issue #11849: Ensure that free()d memory arenas are really released
-  on POSIX systems supporting anonymous memory mappings.  Patch by
-  Charles-François Natali.
-
-- PEP 3155 / issue #13448: Qualified name for classes and functions.
-
-- Issue #13436: Fix a bogus error message when an AST object was passed
-  an invalid integer value.
-
-- Issue #13411: memoryview objects are now hashable when the underlying
-  object is hashable.
-
-- Issue #13338: Handle all enumerations in _Py_ANNOTATE_MEMORY_ORDER
-  to allow compiling extension modules with -Wswitch-enum on gcc.
-  Initial patch by Floris Bruynooghe.
-
-- Issue #10227: Add an allocation cache for a single slice object.  Patch by
-  Stefan Behnel.
-
-- Issue #13393: BufferedReader.read1() now asks the full requested size to
-  the raw stream instead of limiting itself to the buffer size.
-
-- Issue #13392: Writing a pyc file should now be atomic under Windows as well.
-
-- Issue #13333: The UTF-7 decoder now accepts lone surrogates (the encoder
-  already accepts them).
-
-- Issue #13389: Full garbage collection passes now clear the freelists for
-  list and dict objects.  They already cleared other freelists in the
-  interpreter.
-
-- Issue #13327: Remove the need for an explicit None as the second argument
-  to os.utime, os.lutimes, os.futimes, os.futimens, os.futimesat, in
-  order to update to the current time. Also added keyword argument
-  handling to os.utimensat in order to remove the need for explicit None.
-
-- Issue #13350: Simplify some C code by replacing most usages of
-  PyUnicode_Format by PyUnicode_FromFormat.
-
-- Issue #13342: input() used to ignore sys.stdin's and sys.stdout's unicode
-  error handler in interactive mode (when calling into PyOS_Readline()).
-
-- Issue #9896: Add start, stop, and step attributes to range objects.
-
-- Issue #13343: Fix a SystemError when a lambda expression uses a global
-  variable in the default value of a keyword-only argument: ``lambda *,
-  arg=GLOBAL_NAME: None``
-
-- Issue #12797: Added custom opener parameter to builtin open() and
-  FileIO.open().
-
-- Issue #10519: Avoid unnecessary recursive function calls in
-  setobject.c.
-
-- Issue #10363: Deallocate global locks in Py_Finalize().
-
-- Issue #13018: Fix reference leaks in error paths in dictobject.c.
-  Patch by Suman Saha.
-
-- Issue #13201: Define '==' and '!=' to compare range objects based on
-  the sequence of values they define (instead of comparing based on
-  object identity).
-
-- Issue #1294232: In a few cases involving metaclass inheritance, the
-  interpreter would sometimes invoke the wrong metaclass when building a new
-  class object. These cases now behave correctly. Patch by Daniel Urban.
-
-- Issue #12753: Add support for Unicode name aliases and named sequences.
-  Both ``unicodedata.lookup()`` and '\N{...}' now resolve aliases,
-  and ``unicodedata.lookup()`` resolves named sequences too.
-
-- Issue #12170: The count(), find(), rfind(), index() and rindex() methods
-  of bytes and bytearray objects now accept an integer between 0 and 255
-  as their first argument.  Patch by Petri Lehtinen.
-
-- Issue #12604: VTRACE macro expanded to no-op in _sre.c to avoid compiler
-  warnings. Patch by Josh Triplett and Petri Lehtinen.
-
-- Issue #12281: Rewrite the MBCS codec to handle correctly replace and ignore
-  error handlers on all Windows versions. The MBCS codec is now supporting all
-  error handlers, instead of only replace to encode and ignore to decode.
-
-- Issue #13188: When called without an explicit traceback argument,
-  generator.throw() now gets the traceback from the passed exception's
-  ``__traceback__`` attribute.  Patch by Petri Lehtinen.
-
-- Issue #13146: Writing a pyc file is now atomic under POSIX.
-
-- Issue #7833: Extension modules built using distutils on Windows will no
-  longer include a "manifest" to prevent them failing at import time in some
-  embedded situations.
-
-- PEP 3151 / issue #12555: reworking the OS and IO exception hierarchy.
-
-- Add internal API for static strings (_Py_identifier et al.).
-
-- Issue #13063: the Windows error ERROR_NO_DATA (numbered 232 and described
-  as "The pipe is being closed") is now mapped to POSIX errno EPIPE
-  (previously EINVAL).
-
-- Issue #12911: Fix memory consumption when calculating the repr() of huge
-  tuples or lists.
-
-- PEP 393: flexible string representation. Thanks to Torsten Becker for the
-  initial implementation, and Victor Stinner for various bug fixes.
-
-- Issue #14081: The 'sep' and 'maxsplit' parameter to str.split, bytes.split,
-  and bytearray.split may now be passed as keyword arguments.
-
-- Issue #13012: The 'keepends' parameter to str.splitlines may now be passed
-  as a keyword argument:  "my_string.splitlines(keepends=True)".  The same
-  change also applies to bytes.splitlines and bytearray.splitlines.
-
-- Issue #7732: Don't open a directory as a file anymore while importing a
-  module. Ignore the direcotry if its name matchs the module name (e.g.
-  "__init__.py") and raise a ImportError instead.
-
-- Issue #13021: Missing decref on an error path.  Thanks to Suman Saha for
-  finding the bug and providing a patch.
-
-- Issue #12973: Fix overflow checks that relied on undefined behaviour in
-  list_repeat (listobject.c) and islice_next (itertoolsmodule.c).  These bugs
-  caused test failures with recent versions of Clang.
-
-- Issue #12904: os.utime, os.futimes, os.lutimes, and os.futimesat now write
-  atime and mtime with nanosecond precision on modern POSIX platforms.
-
-- Issue #12802: the Windows error ERROR_DIRECTORY (numbered 267) is now
-  mapped to POSIX errno ENOTDIR (previously EINVAL).
-
-- Issue #9200: The str.is* methods now work with strings that contain non-BMP
-  characters even in narrow Unicode builds.
-
-- Issue #12791: Break reference cycles early when a generator exits with
-  an exception.
-
-- Issue #12773: Make __doc__ mutable on user-defined classes.
-
-- Issue #12766: Raise a ValueError when creating a class with a class variable
-  that conflicts with a name in __slots__.
-
-- Issue #12266: Fix str.capitalize() to correctly uppercase/lowercase
-  titlecased and cased non-letter characters.
-
-- Issue #12732: In narrow unicode builds, allow Unicode identifiers which fall
-  outside the BMP.
-
-- Issue #12575: Validate user-generated AST before it is compiled.
-
-- Make type(None), type(Ellipsis), and type(NotImplemented) callable. They
-  return the respective singleton instances.
-
-- Forbid summing bytes with sum().
-
-- Verify the types of AST strings and identifiers provided by the user before
-  compiling them.
-
-- Issue #12647: The None object now has a __bool__() method that returns False.
-  Formerly, bool(None) returned False only because of special case logic
-  in PyObject_IsTrue().
-
-- Issue #12579: str.format_map() now raises a ValueError if used on a
-  format string that contains positional fields. Initial patch by
-  Julian Berman.
-
-- Issue #10271: Allow warnings.showwarning() be any callable.
-
-- Issue #11627: Fix segfault when __new__ on a exception returns a
-  non-exception class.
-
-- Issue #12149: Update the method cache after a type's dictionary gets
-  cleared by the garbage collector.  This fixes a segfault when an instance
-  and its type get caught in a reference cycle, and the instance's
-  deallocator calls one of the methods on the type (e.g. when subclassing
-  IOBase).  Diagnosis and patch by Davide Rizzo.
-
-- Issue #9611, Issue #9015: FileIO.read() clamps the length to INT_MAX on Windows.
-
-- Issue #9642: Uniformize the tests on the availability of the mbcs codec, add
-  a new HAVE_MBCS define.
-
-- Issue #9642: Fix filesystem encoding initialization: use the ANSI code page
-  on Windows if the mbcs codec is not available, and fail with a fatal error if
-  we cannot get the locale encoding (if nl_langinfo(CODESET) is not available)
-  instead of using UTF-8.
-
-- When a generator yields, do not retain the caller's exception state on the
-  generator.
-
-- Issue #12475: Prevent generators from leaking their exception state into the
-  caller's frame as they return for the last time.
-
-- Issue #12291: You can now load multiple marshalled objects from a stream,
-  with other data interleaved between marshalled objects.
-
-- Issue #12356: When required positional or keyword-only arguments are not
-  given, produce a informative error message which includes the name(s) of the
-  missing arguments.
-
-- Issue #12370: Fix super with no arguments when __class__ is overriden in the
-  class body.
-
-- Issue #12084: os.stat on Windows now works properly with relative symbolic
-  links when called from any directory.
-
-- Loosen type restrictions on the __dir__ method. __dir__ can now return any
-  sequence, which will be converted to a list and sorted by dir().
-
-- Issue #12265: Make error messages produced by passing an invalid set of
-  arguments to a function more informative.
-
-- Issue #12225: Still allow Python to build if Python is not in its hg repo or
-  mercurial is not installed.
-
-- Issue #1195: my_fgets() now always clears errors before calling fgets(). Fix
-  the following case: sys.stdin.read() stopped with CTRL+d (end of file),
-  raw_input() interrupted by CTRL+c.
-
-- Issue #12216: Allow unexpected EOF errors to happen on any line of the file.
-
-- Issue #12199: The TryExcept and TryFinally and AST nodes have been unified
-  into a Try node.
-
-- Issue #9670: Increase the default stack size for secondary threads on
-  Mac OS X and FreeBSD to reduce the chances of a crash instead of a
-  "maximum recursion depth" RuntimeError exception.
-  (patch by Ronald Oussoren)
-
-- Issue #12106: The use of the multiple-with shorthand syntax is now reflected
-  in the AST.
-
-- Issue #12190: Try to use the same filename object when compiling unmarshalling
-  a code objects in the same file.
-
-- Issue #12166: Move implementations of dir() specialized for various types into
-  the __dir__() methods of those types.
-
-- Issue #5715: In socketserver, close the server socket in the child process.
-
-- Correct lookup of __dir__ on objects. Among other things, this causes errors
-  besides AttributeError found on lookup to be propagated.
-
-- Issue #12060: Use sig_atomic_t type and volatile keyword in the signal
-  module. Patch written by Charles-François Natali.
-
-- Issue #1746656: Added the if_nameindex, if_indextoname, if_nametoindex
-  methods to the socket module.
-
-- Issue #12044: Fixed subprocess.Popen when used as a context manager to
-  wait for the process to end when exiting the context to avoid unintentionally
-  leaving zombie processes around.
-
-- Issue #1195: Fix input() if it is interrupted by CTRL+d and then CTRL+c,
-  clear the end-of-file indicator after CTRL+d.
-
-- Issue #1856: Avoid crashes and lockups when daemon threads run while the
-  interpreter is shutting down; instead, these threads are now killed when
-  they try to take the GIL.
-
-- Issue #9756: When calling a method descriptor or a slot wrapper descriptor,
-  the check of the object type doesn't read the __class__ attribute anymore.
-  Fix a crash if a class override its __class__ attribute (e.g. a proxy of the
-  str type). Patch written by Andreas Stührk.
-
-- Issue #10517: After fork(), reinitialize the TLS used by the PyGILState_*
-  APIs, to avoid a crash with the pthread implementation in RHEL 5.  Patch
-  by Charles-François Natali.
-
-- Issue #10914: Initialize correctly the filesystem codec when creating a new
-  subinterpreter to fix a bootstrap issue with codecs implemented in Python, as
-  the ISO-8859-15 codec.
-
-- Issue #11918: OS/2 and VMS are no more supported because of the lack of
-  maintainer.
-
-- Issue #6780: fix starts/endswith error message to mention that tuples are
-  accepted too.
-
-- Issue #5057: fix a bug in the peepholer that led to non-portable pyc files
-  between narrow and wide builds while optimizing BINARY_SUBSCR on non-BMP
-  chars (e.g. "\U00012345"[0]).
-
-- Issue #11845: Fix typo in rangeobject.c that caused a crash in
-  compute_slice_indices.  Patch by Daniel Urban.
-
-- Issue #5673: Added a `timeout` keyword argument to subprocess.Popen.wait,
-  subprocess.Popen.communicated, subprocess.call, subprocess.check_call, and
-  subprocess.check_output.  If the blocking operation takes more than `timeout`
-  seconds, the `subprocess.TimeoutExpired` exception is raised.
-
-- Issue #11650: PyOS_StdioReadline() retries fgets() if it was interrupted
-  (EINTR), for example if the program is stopped with CTRL+z on Mac OS X. Patch
-  written by Charles-Francois Natali.
-
-- Issue #9319: Include the filename in "Non-UTF8 code ..." syntax error.
-
-- Issue #10785: Store the filename as Unicode in the Python parser.
-
-- Issue #11619: _PyImport_LoadDynamicModule() doesn't encode the path to bytes
-  on Windows.
-
-- Issue #10998: Remove mentions of -Q, sys.flags.division_warning and
-  Py_DivisionWarningFlag left over from Python 2.
-
-- Issue #11244: Remove an unnecessary peepholer check that was preventing
-  negative zeros from being constant-folded properly.
-
-- Issue #11395: io.FileIO().write() clamps the data length to 32,767 bytes on
-  Windows if the file is a TTY to workaround a Windows bug. The Windows console
-  returns an error (12: not enough space error) on writing into stdout if
-  stdout mode is binary and the length is greater than 66,000 bytes (or less,
-  depending on heap usage).
-
-- Issue #11320: fix bogus memory management in Modules/getpath.c, leading to
-  a possible crash when calling Py_SetPath().
-
-- Issue #11432: A bug was introduced in subprocess.Popen on posix systems with
-  3.2.0 where the stdout or stderr file descriptor being the same as the stdin
-  file descriptor would raise an exception. webbrowser.open would fail. fixed.
-
-- Issue #9856: Change object.__format__ with a non-empty format string
-  to be a DeprecationWarning. In 3.2 it was a PendingDeprecationWarning.
-  In 3.4 it will be a TypeError.
-
-- Issue #11244: The peephole optimizer is now able to constant-fold
-  arbitrarily complex expressions.  This also fixes a 3.2 regression where
-  operations involving negative numbers were not constant-folded.
-
-- Issue #11450: Don't truncate hg version info in Py_GetBuildInfo() when
-  there are many tags (e.g. when using mq).  Patch by Nadeem Vawda.
-
-- Issue #11335: Fixed a memory leak in list.sort when the key function
-  throws an exception.
-
-- Issue #8923: When a string is encoded to UTF-8 in strict mode, the result is
-  cached into the object. Examples: str.encode(), str.encode('utf-8'),
-  PyUnicode_AsUTF8String() and PyUnicode_AsEncodedString(unicode, "utf-8",
-  NULL).
-
-- Issue #10829: Refactor PyUnicode_FromFormat(), use the same function to parse
-  the format string in the 3 steps, fix crashs on invalid format strings.
-
-- Issue #13007: whichdb should recognize gdbm 1.9 magic numbers.
-
-- Issue #11286: Raise a ValueError from calling PyMemoryView_FromBuffer with
-  a buffer struct having a NULL data pointer.
-
-- Issue #11272: On Windows, input() strips '\r' (and not only '\n'), and
-  sys.stdin uses universal newline (replace '\r\n' by '\n').
-
-- Issue #11828: startswith and endswith now accept None as slice index.
-  Patch by Torsten Becker.
-
-- Issue #11168: Remove filename debug variable from PyEval_EvalFrameEx().
-  It encoded the Unicode filename to UTF-8, but the encoding fails on
-  undecodable filename (on surrogate characters) which raises an unexpected
-  UnicodeEncodeError on recursion limit.
-
-- Issue #11187: Remove bootstrap code (use ASCII) of
-  PyUnicode_AsEncodedString(), it was replaced by a better fallback (use the
-  locale encoding) in PyUnicode_EncodeFSDefault().
-
-- Check for NULL result in PyType_FromSpec.
-
-- Issue #10516: New copy() and clear() methods for lists and bytearrays.
-
-- Issue #11386: bytearray.pop() now throws IndexError when the bytearray is
-  empty, instead of OverflowError.
-
-- Issue #12380: The rjust, ljust and center methods of bytes and bytearray
-  now accept a bytearray argument.
-
-Library
--------
-
-- Issue #14195: An issue that caused weakref.WeakSet instances to incorrectly
-  return True for a WeakSet instance 'a' in both 'a < a' and 'a > a' has been
-  fixed.
-
-- Issue #14166: Pickler objects now have an optional ``dispatch_table``
-  attribute which allows to set custom per-pickler reduction functions.
-  Patch by sbt.
-
-- Issue #14177: marshal.loads() now raises TypeError when given an unicode
-  string.  Patch by Guilherme Gonçalves.
-
-- Issue #13550: Remove the debug machinery from the threading module: remove
-  verbose arguments from all threading classes and functions.
-
-- Issue #14159: Fix the len() of weak containers (WeakSet, WeakKeyDictionary,
-  WeakValueDictionary) to return a better approximation when some objects
-  are dead or dying.  Moreover, the implementation is now O(1) rather than
-  O(n).
-
-- Issue #11841: Fix comparison bug with 'rc' versions in packaging.version.
-  Patch by Filip GruszczyƄski.
-
-- Issue #6884: Fix long-standing bugs with MANIFEST.in parsing in distutils
-  on Windows.  Also fixed in packaging.
-
-- Issue #8033: sqlite3: Fix 64-bit integer handling in user functions
-  on 32-bit architectures. Initial patch by Philippe Devalkeneer.
-
-- HTMLParser is now able to handle slashes in the start tag.
-
-- Issue #13641: Decoding functions in the base64 module now accept ASCII-only
-  unicode strings.  Patch by Catalin Iacob.
-
-- Issue #14043: Speed up importlib's _FileFinder by at least 8x, and add a
-  new importlib.invalidate_caches() function.
-
-- Issue #14001: CVE-2012-0845: xmlrpc: Fix an endless loop in
-  SimpleXMLRPCServer upon malformed POST request.
-
-- Issue #13961: Move importlib over to using os.replace() for atomic renaming.
-
-- Do away with ambiguous level values (as suggested by PEP 328) in
-  importlib.__import__() by raising ValueError when level < 0.
-
-- Issue #2489: pty.spawn could consume 100% cpu when it encountered an EOF.
-
-- Issue #13014: Fix a possible reference leak in SSLSocket.getpeercert().
-
-- Issue #13777: Add PF_SYSTEM sockets on OS X.
-  Patch by Michael Goderbauer.
-
-- Issue #13015: Fix a possible reference leak in defaultdict.__repr__.
-  Patch by Suman Saha.
-
-- Issue #1326113: distutils' and packaging's build_ext commands option now
-  correctly parses multiple values (separated by whitespace or commas) given
-  to their --libraries option.
-
-- Issue #10287: nntplib now queries the server's CAPABILITIES first before
-  sending MODE READER, and only sends it if not already in READER mode.
-  Patch by Hynek Schlawack.
-
-- Issue #13993: HTMLParser is now able to handle broken end tags when
-  strict=False.
-
-- Issue #13930: lib2to3 now supports writing converted output files to another
-  directory tree as well as copying unchanged files and altering the file
-  suffix.
-
-- Issue #9750: Fix sqlite3.Connection.iterdump on tables and fields
-  with a name that is a keyword or contains quotes. Patch by Marko
-  Kohtala.
-
-- Issue #10287: nntplib now queries the server's CAPABILITIES again after
-  authenticating (since the result may change, according to RFC 4643).
-  Patch by Hynek Schlawack.
-
-- Issue #13590: On OS X 10.7 and 10.6 with Xcode 4.2, building
-  Distutils-based packages with C extension modules may fail because
-  Apple has removed gcc-4.2, the version used to build python.org
-  64-bit/32-bit Pythons.  If the user does not explicitly override
-  the default C compiler by setting the CC environment variable,
-  Distutils will now attempt to compile extension modules with clang
-  if gcc-4.2 is required but not found. Also as a convenience, if
-  the user does explicitly set CC, substitute its value as the default
-  compiler in the Distutils LDSHARED configuration variable for OS X.
-  (Note, the python.org 32-bit-only Pythons use gcc-4.0 and the 10.4u
-  SDK, neither of which are available in Xcode 4.  This change does not
-  attempt to override settings to support their use with Xcode 4.)
-
-- Issue #13960: HTMLParser is now able to handle broken comments when
-  strict=False.
-
-- When '' is a path (e.g. in sys.path), make sure __file__ uses the current
-  working directory instead of '' in importlib.
-
-- Issue #13609: Add two functions to query the terminal size:
-  os.get_terminal_size (low level) and shutil.get_terminal_size (high level).
-  Patch by Zbigniew Jędrzejewski-Szmek.
-
-- Issue #13845: On Windows, time.time() now uses GetSystemTimeAsFileTime()
-  instead of ftime() to have a resolution of 100 ns instead of 1 ms (the clock
-  accuracy is between 0.5 ms and 15 ms).
-
-- Issue #13846: Add time.monotonic(), monotonic clock.
-
-- Issue #8184: multiprocessing: On Windows, don't set SO_REUSEADDR on
-  Connection sockets, and set FILE_FLAG_FIRST_PIPE_INSTANCE on named pipes, to
-  make sure two listeners can't bind to the same socket/pipe (or any existing
-  socket/pipe).
-
-- Issue #10811: Fix recursive usage of cursors. Instead of crashing,
-  raise a ProgrammingError now.
-
-- Issue #13734: Add os.fwalk(), a directory walking function yielding file
-  descriptors.
-
-- Issue #2945: Make the distutils upload command aware of bdist_rpm products.
-
-- Issue #13712: pysetup create should not convert package_data to extra_files.
-
-- Issue #11805: package_data in setup.cfg should allow more than one value.
-
-- Issue #13676: Handle strings with embedded zeros correctly in sqlite3.
-
-- Issue #8828: Add new function os.replace(), for cross-platform renaming
-  with overwriting.
-
-- Issue #13848: open() and the FileIO constructor now check for NUL
-  characters in the file name.  Patch by Hynek Schlawack.
-
-- Issue #13806: The size check in audioop decompression functions was too
-  strict and could reject valid compressed data.  Patch by Oleg Plakhotnyuk.
-
-- Issue #13812: When a multiprocessing Process child raises an exception,
-  flush stderr after printing the exception traceback.
-
-- Issue #13885: CVE-2011-3389: the _ssl module would always disable the CBC
-  IV attack countermeasure.
-
-- Issue #13847: time.localtime() and time.gmtime() now raise an OSError instead
-  of ValueError on failure. time.ctime() and time.asctime() now raises an
-  OSError if localtime() failed. time.clock() now raises a RuntimeError if the
-  processor time used is not available or its value cannot be represented
-
-- Issue #13772: In os.symlink() under Windows, do not try to guess the link
-  target's type (file or directory).  The detection was buggy and made the
-  call non-atomic (therefore prone to race conditions).
-
-- Issue #6631: Disallow relative file paths in urllib urlopen methods.
-
-- Issue #13722: Avoid silencing ImportErrors when initializing the codecs
-  registry.
-
-- Issue #13781: Fix GzipFile bug that caused an exception to be raised when
-  opening for writing using a fileobj returned by os.fdopen().
-
-- Issue #13803: Under Solaris, distutils doesn't include bitness
-  in the directory name.
-
-- Issue #10278: Add time.wallclock() function, monotonic clock.
-
-- Issue #13809: Fix regression where bz2 module wouldn't work when threads are
-  disabled. Original patch by Amaury Forgeot d'Arc.
-
-- Issue #13589: Fix some serialization primitives in the aifc module.
-  Patch by Oleg Plakhotnyuk.
-
-- Issue #13642: Unquote before b64encoding user:password during Basic
-  Authentication. Patch contributed by Joonas Kuorilehto.
-
-- Issue #12364: Fix a hang in concurrent.futures.ProcessPoolExecutor.
-  The hang would occur when retrieving the result of a scheduled future after
-  the executor had been shut down.
-
-- Issue #13502: threading: Fix a race condition in Event.wait() that made it
-  return False when the event was set and cleared right after.
-
-- Issue #9993: When the source and destination are on different filesystems,
-  and the source is a symlink, shutil.move() now recreates a symlink on the
-  destination instead of copying the file contents.  Patch by Jonathan Niehof
-  and Hynek Schlawack.
-
-- Issue #12926: Fix a bug in tarfile's link extraction.
-
-- Issue #13696: Fix the 302 Relative URL Redirection problem.
-
-- Issue #13636: Weak ciphers are now disabled by default in the ssl module
-  (except when SSLv2 is explicitly asked for).
-
-- Issue #12715: Add an optional symlinks argument to shutil functions
-  (copyfile, copymode, copystat, copy, copy2).  When that parameter is
-  true, symlinks aren't dereferenced and the operation instead acts on the
-  symlink itself (or creates one, if relevant).  Patch by Hynek Schlawack.
-
-- Add a flags parameter to select.epoll.
-
-- Issue #13626: Add support for SSL Diffie-Hellman key exchange, through the
-  SSLContext.load_dh_params() method and the ssl.OP_SINGLE_DH_USE option.
-
-- Issue #11006: Don't issue low level warning in subprocess when pipe2() fails.
-
-- Issue #13620: Support for Chrome browser in webbrowser.  Patch contributed
-  by Arnaud Calmettes.
-
-- Issue #11829: Fix code execution holes in inspect.getattr_static for
-  metaclasses with metaclasses. Patch by Andreas Stührk.
-
-- Issue #12708: Add starmap() and starmap_async() methods (similar to
-  itertools.starmap()) to multiprocessing.Pool.  Patch by Hynek Schlawack.
-
-- Issue #1785: Fix inspect and pydoc with misbehaving descriptors.
-
-- Issue #13637: "a2b" functions in the binascii module now accept ASCII-only
-  unicode strings.
-
-- Issue #13634: Add support for querying and disabling SSL compression.
-
-- Issue #13627: Add support for SSL Elliptic Curve-based Diffie-Hellman
-  key exchange, through the SSLContext.set_ecdh_curve() method and the
-  ssl.OP_SINGLE_ECDH_USE option.
-
-- Issue #13635: Add ssl.OP_CIPHER_SERVER_PREFERENCE, so that SSL servers
-  choose the cipher based on their own preferences, rather than on the
-  client's.
-
-- Issue #11813: Fix inspect.getattr_static for modules. Patch by Andreas
-  Stührk.
-
-- Issue #7502: Fix equality comparison for DocTestCase instances.  Patch by
-  Cédric Krier.
-
-- Issue #11870: threading: Properly reinitialize threads internal locks and
-  condition variables to avoid deadlocks in child processes.
-
-- Issue #8035: urllib: Fix a bug where the client could remain stuck after a
-  redirection or an error.
-
-- Issue #13560: os.strerror() now uses the current locale encoding instead of
-  UTF-8.
-
-- Issue #8373: The filesystem path of AF_UNIX sockets now uses the filesystem
-  encoding and the surrogateescape error handler, rather than UTF-8.  Patch
-  by David Watson.
-
-- Issue #10350: Read and save errno before calling a function which might
-  overwrite it.  Original patch by Hallvard B Furuseth.
-
-- Issue #11610: Introduce a more general way to declare abstract properties.
-
-- Issue #13591: A bug in importlib has been fixed that caused import_module
-  to load a module twice.
-
-- Issue #13449 sched.scheduler.run() method has a new "blocking" parameter which
-  when set to False makes run() execute the scheduled events due to expire
-  soonest (if any) and then return.  Patch by Giampaolo Rodolà.
-
-- Issue #8684 sched.scheduler class can be safely used in multi-threaded
-  environments.  Patch by Josiah Carlson and Giampaolo Rodolà.
-
-- Alias resource.error to OSError ala PEP 3151.
-
-- Issue #5689: Add support for lzma compression to the tarfile module.
-
-- Issue #13248: Turn 3.2's PendingDeprecationWarning into 3.3's
-  DeprecationWarning.  It covers 'cgi.escape', 'importlib.abc.PyLoader',
-  'importlib.abc.PyPycLoader', 'nntplib.NNTP.xgtitle', 'nntplib.NNTP.xpath',
-  and private attributes of 'smtpd.SMTPChannel'.
-
-- Issue #5905, Issue #13560: time.strftime() is now using the current locale
-  encoding, instead of UTF-8, if the wcsftime() function is not available.
-
-- Issue #13464: Add a readinto() method to http.client.HTTPResponse.  Patch
-  by Jon Kuhn.
-
-- tarfile.py: Correctly detect bzip2 compressed streams with blocksizes
-  other than 900k.
-
-- Issue #13439: Fix many errors in turtle docstrings.
-
-- Issue #6715: Add a module 'lzma' for compression using the LZMA algorithm.
-  Thanks to Per Øyvind Karlsen for the initial implementation.
-
-- Issue #13487: Make inspect.getmodule robust against changes done to
-  sys.modules while it is iterating over it.
-
-- Issue #12618: Fix a bug that prevented py_compile from creating byte
-  compiled files in the current directory.  Initial patch by Sjoerd de Vries.
-
-- Issue #13444: When stdout has been closed explicitly, we should not attempt
-  to flush it at shutdown and print an error.
-
-- Issue #12567: The curses module uses Unicode functions for Unicode arguments
-  when it is linked to the ncurses library. It encodes also Unicode strings to
-  the locale encoding instead of UTF-8.
-
-- Issue #12856: Ensure child processes do not inherit the parent's random
-  seed for filename generation in the tempfile module.  Patch by Brian
-  Harring.
-
-- Issue #9957: SpooledTemporaryFile.truncate() now accepts an optional size
-  parameter, as other file-like objects.  Patch by Ryan Kelly.
-
-- Issue #13458: Fix a memory leak in the ssl module when decoding a
-  certificate with a subjectAltName.  Patch by Robert Xiao.
-
-- Issue #13415: os.unsetenv() doesn't ignore errors anymore.
-
-- Issue #13245: sched.scheduler class constructor's timefunc and
-  delayfunct parameters are now optional.
-  scheduler.enter and scheduler.enterabs methods gained a new kwargs parameter.
-  Patch contributed by Chris Clark.
-
-- Issue #12328: Under Windows, refactor handling of Ctrl-C events and
-  make _multiprocessing.win32.WaitForMultipleObjects interruptible when
-  the wait_flag parameter is false.  Patch by sbt.
-
-- Issue #13322: Fix BufferedWriter.write() to ensure that BlockingIOError is
-  raised when the wrapped raw file is non-blocking and the write would block.
-  Previous code assumed that the raw write() would raise BlockingIOError, but
-  RawIOBase.write() is defined to returned None when the call would block.
-  Patch by sbt.
-
-- Issue #13358: HTMLParser now calls handle_data only once for each CDATA.
-
-- Issue #4147: minidom's toprettyxml no longer adds whitespace around a text
-  node when it is the only child of an element.  Initial patch by Dan
-  Kenigsberg.
-
-- Issue #13374: The Windows bytes API has been deprecated in the os module. Use
-  Unicode filenames instead of bytes filenames to not depend on the ANSI code
-  page anymore and to support any filename.
-
-- Issue #13297: Use bytes type to send and receive binary data through XMLRPC.
-
-- Issue #6397: Support "/dev/poll" polling objects in select module,
-  under Solaris & derivatives.
-
-- Issues #1745761, #755670, #13357, #12629, #1200313: HTMLParser now correctly
-  handles non-valid attributes, including adjacent and unquoted attributes.
-
-- Issue #13193: Fix distutils.filelist.FileList and packaging.manifest.Manifest
-  under Windows.
-
-- Issue #13384: Remove unnecessary __future__ import in Lib/random.py
-
-- Issue #13149: Speed up append-only StringIO objects.
-
-- Issue #13373: multiprocessing.Queue.get() could sometimes block indefinitely
-  when called with a timeout.  Patch by Arnaud Ysmal.
-
-- Issue #13254: Fix Maildir initialization so that maildir contents
-  are read correctly.
-
-- Issue #3067: locale.setlocale() now raises TypeError if the second
-  argument is an invalid iterable. Its documentation and docstring
-  were also updated. Initial patch by Jyrki Pulliainen.
-
-- Issue #13140: Fix the daemon_threads attribute of ThreadingMixIn.
-
-- Issue #13339: Fix compile error in posixmodule.c due to missing semicolon.
-  Thanks to Robert Xiao.
-
-- Byte compilation in packaging is now isolated from the calling Python -B or
-  -O options, instead of being disallowed under -B or buggy under -O.
-
-- Issue #10570: curses.putp() and curses.tparm() are now expecting a byte
-  string, instead of a Unicode string.
-
-- Issue #13295: http.server now produces valid HTML 4.01 strict.
-
-- Issue #2892: preserve iterparse events in case of SyntaxError.
-
-- Issue #13287: urllib.request and urllib.error now contains an __all__
-  attribute to expose only relevant classes and functions.  Patch by Florent
-  Xicluna.
-
-- Issue #670664: Fix HTMLParser to correctly handle the content of
-  ``<script>...</script>`` and ``<style>...</style>``.
-
-- Issue #10817: Fix urlretrieve function to raise ContentTooShortError even
-  when reporthook is None. Patch by Jyrki Pulliainen.
-
-- Fix the xmlrpc.client user agent to return something similar to
-  urllib.request user agent: "Python-xmlrpc/3.3".
-
-- Issue #13293: Better error message when trying to marshal bytes using
-  xmlrpc.client.
-
-- Issue #13291: NameError in xmlrpc package.
-
-- Issue #13258: Use callable() built-in in the standard library.
-
-- Issue #13273: fix a bug that prevented HTMLParser to properly detect some
-  tags when strict=False.
-
-- Issue #11183: Add finer-grained exceptions to the ssl module, so that
-  you don't have to inspect the exception's attributes in the common case.
-
-- Issue #13216: Add cp65001 codec, the Windows UTF-8 (CP_UTF8).
-
-- Issue #13226: Add RTLD_xxx constants to the os module. These constants can be
-  used with sys.setdlopenflags().
-
-- Issue #10278: Add clock_getres(), clock_gettime() and CLOCK_xxx constants to
-  the time module. time.clock_gettime(time.CLOCK_MONOTONIC) provides a
-  monotonic clock
-
-- Issue #10332: multiprocessing: fix a race condition when a Pool is closed
-  before all tasks have completed.
-
-- Issue #13255: wrong docstrings in array module.
-
-- Issue #8540: Remove deprecated Context._clamp attribute in Decimal module.
-
-- Issue #13235: Added DeprecationWarning to logging.warn() method and function.
-
-- Issue #9168: now smtpd is able to bind privileged port.
-
-- Issue #12529: fix cgi.parse_header issue on strings with double-quotes and
-  semicolons together. Patch by Ben Darnell and Petri Lehtinen.
-
-- Issue #13227: functools.lru_cache() now has a option to distinguish
-  calls with different argument types.
-
-- Issue #6090: zipfile raises a ValueError when a document with a timestamp
-  earlier than 1980 is provided. Patch contributed by Petri Lehtinen.
-
-- Issue #13150: sysconfig no longer parses the Makefile and config.h files
-  when imported, instead doing it at build time.  This makes importing
-  sysconfig faster and reduces Python startup time by 20%.
-
-- Issue #12448: smtplib now flushes stdout while running ``python -m smtplib``
-  in order to display the prompt correctly.
-
-- Issue #12454: The mailbox module is now using ASCII, instead of the locale
-  encoding, to read and write .mh_sequences files.
-
-- Issue #13194: zlib.compressobj().copy() and zlib.decompressobj().copy() are
-  now available on Windows.
-
-- Issue #1673007: urllib.request now supports HEAD request via new method argument.
-  Patch contributions by David Stanek, Patrick Westerhoff and Ezio Melotti.
-
-- Issue #12386: packaging does not fail anymore when writing the RESOURCES
-  file.
-
-- Issue #13158: Fix decoding and encoding of GNU tar specific base-256 number
-  fields in tarfile.
-
-- Issue #13025: mimetypes is now reading MIME types using the UTF-8 encoding,
-  instead of the locale encoding.
-
-- Issue #10653: On Windows, use strftime() instead of wcsftime() because
-  wcsftime() doesn't format time zone correctly.
-
-- Issue #13150: The tokenize module doesn't compile large regular expressions
-  at startup anymore.
-
-- Issue #11171: Fix distutils.sysconfig.get_makefile_filename when Python was
-  configured with different prefix and exec-prefix.
-
-- Issue #11254: Teach distutils and packaging to compile .pyc and .pyo files in
-  PEP 3147-compliant __pycache__ directories.
-
-- Issue #7367: Fix pkgutil.walk_paths to skip directories whose
-  contents cannot be read.
-
-- Issue #3163: The struct module gets new format characters 'n' and 'N'
-  supporting C integer types ``ssize_t`` and ``size_t``, respectively.
-
-- Issue #13099: Fix sqlite3.Cursor.lastrowid under a Turkish locale.
-  Reported and diagnosed by Thomas Kluyver.
-
-- Issue #13087: BufferedReader.seek() now always raises UnsupportedOperation
-  if the underlying raw stream is unseekable, even if the seek could be
-  satisfied using the internal buffer.  Patch by John O'Connor.
-
-- Issue #7689: Allow pickling of dynamically created classes when their
-  metaclass is registered with copyreg.  Patch by Nicolas M. Thiéry and Craig
-  Citro.
-
-- Issue #13034: When decoding some SSL certificates, the subjectAltName
-  extension could be unreported.
-
-- Issue #12306: Expose the runtime version of the zlib C library as a constant,
-  ZLIB_RUNTIME_VERSION, in the zlib module. Patch by Torsten Landschoff.
-
-- Issue #12959: Add collections.ChainMap to collections.__all__.
-
-- Issue #8933: distutils' PKG-INFO files and packaging's METADATA files will
-  now correctly report Metadata-Version: 1.1 instead of 1.0 if a Classifier or
-  Download-URL field is present.
-
-- Issue #12567: Add curses.unget_wch() function. Push a character so the next
-  get_wch() will return it.
-
-- Issue #9561: distutils and packaging now writes egg-info files using UTF-8,
-  instead of the locale encoding.
-
-- Issue #8286: The distutils command sdist will print a warning message instead
-  of crashing when an invalid path is given in the manifest template.
-
-- Issue #12841: tarfile unnecessarily checked the existence of numerical user
-  and group ids on extraction. If one of them did not exist the respective id
-  of the current user (i.e. root) was used for the file and ownership
-  information was lost.
-
-- Issue #12888: Fix a bug in HTMLParser.unescape that prevented it to escape
-  more than 128 entities.  Patch by Peter Otten.
-
-- Issue #12878: Expose a __dict__ attribute on io.IOBase and its subclasses.
-
-- Issue #12494: On error, call(), check_call(), check_output() and
-  getstatusoutput() functions of the subprocess module now kill the process,
-  read its status (to avoid zombis) and close pipes.
-
-- Issue #12720: Expose low-level Linux extended file attribute functions in os.
-
-- Issue #10946: The distutils commands bdist_dumb, bdist_wininst and bdist_msi
-  now respect a --skip-build option given to bdist.  The packaging commands
-  were fixed too.
-
-- Issue #12847: Fix a crash with negative PUT and LONG_BINPUT arguments in
-  the C pickle implementation.
-
-- Issue #11564: Avoid crashes when trying to pickle huge objects or containers
-  (more than 2**31 items).  Instead, in most cases, an OverflowError is raised.
-
-- Issue #12287: Fix a stack corruption in ossaudiodev module when the FD is
-  greater than FD_SETSIZE.
-
-- Issue #12839: Fix crash in zlib module due to version mismatch.
-  Fix by Richard M. Tew.
-
-- Issue #9923: The mailcap module now correctly uses the platform path
-  separator for the MAILCAP environment variable on non-POSIX platforms.
-
-- Issue #12835: Follow up to #6560 that unconditionally prevents use of the
-  unencrypted sendmsg/recvmsg APIs on SSL wrapped sockets. Patch by David
-  Watson.
-
-- Issue #12803: SSLContext.load_cert_chain() now accepts a password argument
-  to be used if the private key is encrypted.  Patch by Adam Simpkins.
-
-- Issue #11657: Fix sending file descriptors over 255 over a multiprocessing
-  Pipe.
-
-- Issue #12811: tabnanny.check() now promptly closes checked files. Patch by
-  Anthony Briggs.
-
-- Issue #6560: The sendmsg/recvmsg API is now exposed by the socket module
-  when provided by the underlying platform, supporting processing of
-  ancillary data in pure Python code. Patch by David Watson and Heiko Wundram.
-
-- Issue #12326: On Linux, sys.platform doesn't contain the major version
-  anymore. It is now always 'linux', instead of 'linux2' or 'linux3' depending
-  on the Linux version used to build Python.
-
-- Issue #12213: Fix a buffering bug with interleaved reads and writes that
-  could appear on BufferedRandom streams.
-
-- Issue #12778: Reduce memory consumption when JSON-encoding a large
-  container of many small objects.
-
-- Issue #12650: Fix a race condition where a subprocess.Popen could leak
-  resources (FD/zombie) when killed at the wrong time.
-
-- Issue #12744: Fix inefficient representation of integers between 2**31 and
-  2**63 on systems with a 64-bit C "long".
-
-- Issue #12646: Add an 'eof' attribute to zlib.Decompress, to make it easier to
-  detect truncated input streams.
-
-- Issue #11513: Fix exception handling ``tarfile.TarFile.gzopen()`` when
-  the file cannot be opened.
-
-- Issue #12687: Fix a possible buffering bug when unpickling text mode
-  (protocol 0, mostly) pickles.
-
-- Issue #10087: Fix the html output format of the calendar module.
-
-- Issue #13121: add support for inplace math operators to collections.Counter.
-
-- Add support for unary plus and unary minus to collections.Counter.
-
-- Issue #12683: urlparse updated to include svn as schemes that uses relative
-  paths. (svn from 1.5 onwards support relative path).
-
-- Issue #12655: Expose functions from sched.h in the os module: sched_yield(),
-  sched_setscheduler(), sched_getscheduler(), sched_setparam(),
-  sched_get_min_priority(), sched_get_max_priority(), sched_rr_get_interval(),
-  sched_getaffinity(), sched_setaffinity().
-
-- Add ThreadError to threading.__all__.
-
-- Issues #11104, #8688: Fix the behavior of distutils' sdist command with
-  manually-maintained MANIFEST files.
-
-- Issue #11281: smtplib.STMP gets source_address parameter, which adds the
-  ability to bind to specific source address on a machine with multiple
-  interfaces. Patch by Paulo Scardine.
-
-- Issue #12464: tempfile.TemporaryDirectory.cleanup() should not follow
-  symlinks: fix it. Patch by Petri Lehtinen.
-
-- Issue #8887: "pydoc somebuiltin.somemethod" (or help('somebuiltin.somemethod')
-  in Python code) now finds the doc of the method.
-
-- Issue #10968: Remove indirection in threading.  The public names (Event,
-  Condition, etc.) used to be factory functions returning instances of hidden
-  classes (_Event, _Condition, etc.), because (if Guido recalls correctly) this
-  code pre-dates the ability to subclass extension types.  It is now possible
-  to inherit from these classes, without having to import the private
-  underscored names like multiprocessing did.
-
-- Issue #9723: Add shlex.quote functions, to escape filenames and command
-  lines.
-
-- Issue #12603: Fix pydoc.synopsis() on files with non-negative st_mtime.
-
-- Issue #12514: Use try/finally to assure the timeit module restores garbage
-  collections when it is done.
-
-- Issue #12607: In subprocess, fix issue where if stdin, stdout or stderr is
-  given as a low fd, it gets overwritten.
-
-- Issue #12576: Fix urlopen behavior on sites which do not send (or obfuscates)
-  ``Connection: close`` header.
-
-- Issue #12560: Build libpython.so on OpenBSD. Patch by Stefan Sperling.
-
-- Issue #1813: Fix codec lookup under Turkish locales.
-
-- Issue #12591: Improve support of "universal newlines" in the subprocess
-  module: the piped streams can now be properly read from or written to.
-
-- Issue #12591: Allow io.TextIOWrapper to work with raw IO objects (without
-  a read1() method), and add a *write_through* parameter to mandate
-  unbuffered writes.
-
-- Issue #10883: Fix socket leaks in urllib.request when using FTP.
-
-- Issue #12592: Make Python build on OpenBSD 5 (and future major releases).
-
-- Issue #12372: POSIX semaphores are broken on AIX: don't use them.
-
-- Issue #12551: Provide a get_channel_binding() method on SSL sockets so as
-  to get channel binding data for the current SSL session (only the
-  "tls-unique" channel binding is implemented).  This allows the implementation
-  of certain authentication mechanisms such as SCRAM-SHA-1-PLUS.  Patch by
-  Jacek Konieczny.
-
-- Issue #665194: email.utils now has format_datetime and parsedate_to_datetime
-  functions, allowing for round tripping of RFC2822 format dates.
-
-- Issue #12571: Add a plat-linux3 directory mirroring the plat-linux2
-  directory, so that "import DLFCN" and other similar imports work on
-  Linux 3.0.
-
-- Issue #7484: smtplib no longer puts <> around addresses in VRFY and EXPN
-  commands; they aren't required and in fact postfix doesn't support that form.
-
-- Issue #12273: Remove ast.__version__. AST changes can be accounted for by
-  checking sys.version_info or sys._mercurial.
-
-- Silence spurious "broken pipe" tracebacks when shutting down a
-  ProcessPoolExecutor.
-
-- Fix potential resource leaks in concurrent.futures.ProcessPoolExecutor
-  by joining all queues and processes when shutdown() is called.
-
-- Issue #11603: Fix a crash when __str__ is rebound as __repr__.  Patch by
-  Andreas Stührk.
-
-- Issue #11321: Fix a crash with multiple imports of the _pickle module when
-  embedding Python.  Patch by Andreas Stührk.
-
-- Issue #6755: Add get_wch() method to curses.window class. Patch by Iñigo
-  Serna.
-
-- Add cgi.closelog() function to close the log file.
-
-- Issue #12502: asyncore: fix polling loop with AF_UNIX sockets.
-
-- Issue #4376: ctypes now supports nested structures in a endian different than
-  the parent structure. Patch by Vlad Riscutia.
-
-- Raise ValueError when attempting to set the _CHUNK_SIZE attribute of a
-  TextIOWrapper to a huge value, not TypeError.
-
-- Issue #12504: Close file handles in a timely manner in packaging.database.
-  This fixes a bug with the remove (uninstall) feature on Windows.
-
-- Issues #12169 and #10510: Factor out code used by various packaging commands
-  to make HTTP POST requests, and make sure it uses CRLF.
-
-- Issue #12016: Multibyte CJK decoders now resynchronize faster. They only
-  ignore the first byte of an invalid byte sequence. For example,
-  b'\xff\n'.decode('gb2312', 'replace') gives '\ufffd\n' instead of '\ufffd'.
-
-- Issue #12459: time.sleep() now raises a ValueError if the sleep length is
-  negative, instead of an infinite sleep on Windows or raising an IOError on
-  Linux for example, to have the same behaviour on all platforms.
-
-- Issue #12451: pydoc: html_getfile() now uses tokenize.open() to support
-  Python scripts using a encoding different than UTF-8 (read the coding cookie
-  of the script).
-
-- Issue #12493: subprocess: Popen.communicate() now also handles EINTR errors
-  if the process has only one pipe.
-
-- Issue #12467: warnings: fix a race condition if a warning is emitted at
-  shutdown, if globals()['__file__'] is None.
-
-- Issue #12451: pydoc: importfile() now opens the Python script in binary mode,
-  instead of text mode using the locale encoding, to avoid encoding issues.
-
-- Issue #12451: runpy: run_path() now opens the Python script in binary mode,
-  instead of text mode using the locale encoding, to support other encodings
-  than UTF-8 (scripts using the coding cookie).
-
-- Issue #12451: xml.dom.pulldom: parse() now opens files in binary mode instead
-  of the text mode (using the locale encoding) to avoid encoding issues.
-
-- Issue #12147: Adjust the new-in-3.2 smtplib.send_message method for better
-  conformance to the RFCs:  correctly handle Sender and Resent- headers.
-
-- Issue #12352: Fix a deadlock in multiprocessing.Heap when a block is freed by
-  the garbage collector while the Heap lock is held.
-
-- Issue #12462: time.sleep() now immediately calls the (Python) signal handler
-  if it is interrupted by a signal, instead of having to wait until the next
-  instruction.
-
-- Issue #12442: new shutil.disk_usage function, providing total, used and free
-  disk space statistics.
-
-- Issue #12451: The XInclude default loader of xml.etree now decodes files from
-  UTF-8 instead of the locale encoding if the encoding is not specified. It now
-  also opens XML files for the parser in binary mode instead of the text mode
-  to avoid encoding issues.
-
-- Issue #12451: doctest.debug_script() doesn't create a temporary file
-  anymore to avoid encoding issues.
-
-- Issue #12451: pydoc.synopsis() now reads the encoding cookie if available,
-  to read the Python script from the right encoding.
-
-- Issue #12451: distutils now opens the setup script in binary mode to read the
-  encoding cookie, instead of opening it in UTF-8.
-
-- Issue #9516: On Mac OS X, change Distutils to no longer globally attempt to
-  check or set the MACOSX_DEPLOYMENT_TARGET environment variable for the
-  interpreter process.  This could cause failures in non-Distutils subprocesses
-  and was unreliable since tests or user programs could modify the interpreter
-  environment after Distutils set it.  Instead, have Distutils set the
-  deployment target only in the environment of each build subprocess.  It is
-  still possible to globally override the default by setting
-  MACOSX_DEPLOYMENT_TARGET before launching the interpreter; its value must be
-  greater or equal to the default value, the value with which the interpreter
-  was built.  Also, implement the same handling in packaging.
-
-- Issue #12422: In the copy module, don't store objects that are their own copy
-  in the memo dict.
-
-- Issue #12303: Add sigwaitinfo() and sigtimedwait() to the signal module.
-
-- Issue #12404: Remove C89 incompatible code from mmap module. Patch by Akira
-  Kitada.
-
-- Issue #1874: email now detects and reports as a defect the presence of
-  any CTE other than 7bit, 8bit, or binary on a multipart.
-
-- Issue #12383: Fix subprocess module with env={}: don't copy the environment
-  variables, start with an empty environment.
-
-- Issue #11637: Fix support for importing packaging setup hooks from the
-  project directory.
-
-- Issue #6771: Moved the curses.wrapper function from the single-function
-  wrapper module into __init__, eliminating the module.  Since __init__ was
-  already importing the function to curses.wrapper, there is no API change.
-
-- Issue #11584: email.header.decode_header no longer fails if the header
-  passed to it is a Header object, and Header/make_header no longer fail
-  if given binary unknown-8bit input.
-
-- Issue #11700: mailbox proxy object close methods can now be called multiple
-  times without error.
-
-- Issue #11767: Correct file descriptor leak in mailbox's __getitem__ method.
-
-- Issue #12133: AbstractHTTPHandler.do_open() of urllib.request closes the HTTP
-  connection if its getresponse() method fails with a socket error. Patch
-  written by Ezio Melotti.
-
-- Issue #12240: Allow multiple setup hooks in packaging's setup.cfg files.
-  Original patch by Erik Bray.
-
-- Issue #9284: Allow inspect.findsource() to find the source of doctest
-  functions.
-
-- Issue #11595: Fix assorted bugs in packaging.util.cfg_to_args, a
-  compatibility helper for the distutils-packaging transition.  Original patch
-  by Erik Bray.
-
-- Issue #12287: In ossaudiodev, check that the device isn't closed in several
-  methods.
-
-- Issue #12009: Fixed regression in netrc file comment handling.
-
-- Issue #12246: Warn and fail when trying to install a third-party project from
-  an uninstalled Python (built in a source checkout).  Original patch by
-  Tshepang Lekhonkhobe.
-
-- Issue #10694: zipfile now ignores garbage at the end of a zipfile.
-
-- Issue #12283: Fixed regression in smtplib quoting of leading dots in DATA.
-
-- Issue #10424: Argparse now includes the names of the missing required
-  arguments in the missing arguments error message.
-
-- Issue #12168: SysLogHandler now allows NUL termination to be controlled using
-  a new 'append_nul' attribute on the handler.
-
-- Issue #11583: Speed up os.path.isdir on Windows by using GetFileAttributes
-  instead of os.stat.
-
-- Issue #12021: Make mmap's read() method argument optional. Patch by Petri
-  Lehtinen.
-
-- Issue #9205: concurrent.futures.ProcessPoolExecutor now detects killed
-  children and raises BrokenProcessPool in such a situation.  Previously it
-  would reliably freeze/deadlock.
-
-- Issue #12040: Expose a new attribute ``sentinel`` on instances of
-  ``multiprocessing.Process``.  Also, fix Process.join() to not use polling
-  anymore, when given a timeout.
-
-- Issue #11893: Remove obsolete internal wrapper class ``SSLFakeFile`` in the
-  smtplib module.  Patch by Catalin Iacob.
-
-- Issue #12080: Fix a Decimal.power() case that took an unreasonably long time
-  to compute.
-
-- Issue #12221: Remove __version__ attributes from pyexpat, pickle, tarfile,
-  pydoc, tkinter, and xml.parsers.expat. This were useless version constants
-  left over from the Mercurial transition
-
-- Named tuples now work correctly with vars().
-
-- Issue #12085: Fix an attribute error in subprocess.Popen destructor if the
-  constructor has failed, e.g. because of an undeclared keyword argument. Patch
-  written by Oleg Oshmyan.
-
-- Issue #12028: Make threading._get_ident() public, rename it to
-  threading.get_ident() and document it. This function was already used using
-  _thread.get_ident().
-
-- Issue #12171: IncrementalEncoder.reset() of CJK codecs (multibytecodec) calls
-  encreset() instead of decreset().
-
-- Issue #12218: Removed wsgiref.egg-info.
-
-- Issue #12196: Add pipe2() to the os module.
-
-- Issue #985064: Make plistlib more resilient to faulty input plists.
-  Patch by Mher Movsisyan.
-
-- Issue #1625: BZ2File and bz2.decompress() now support multi-stream files.
-  Initial patch by Nir Aides.
-
-- Issue #12175: BufferedReader.read(-1) now calls raw.readall() if available.
-
-- Issue #12175: FileIO.readall() now only reads the file position and size
-  once.
-
-- Issue #12175: RawIOBase.readall() now returns None if read() returns None.
-
-- Issue #12175: FileIO.readall() now raises a ValueError instead of an IOError
-  if the file is closed.
-
-- Issue #11109: New service_action method for BaseServer, used by ForkingMixin
-  class for cleanup. Initial Patch by Justin Warkentin.
-
-- Issue #12045: Avoid duplicate execution of command in
-  ctypes.util._get_soname().  Patch by Sijin Joseph.
-
-- Issue #10818: Remove the Tk GUI and the serve() function of the pydoc module,
-  pydoc -g has been deprecated in Python 3.2 and it has a new enhanced web
-  server.
-
-- Issue #1441530: In imaplib, read the data in one chunk to speed up large
-  reads and simplify code.
-
-- Issue #12070: Fix the Makefile parser of the sysconfig module to handle
-  correctly references to "bogus variable" (e.g. "prefix=$/opt/python").
-
-- Issue #12100: Don't reset incremental encoders of CJK codecs at each call to
-  their encode() method anymore, but continue to call the reset() method if the
-  final argument is True.
-
-- Issue #12049: Add RAND_bytes() and RAND_pseudo_bytes() functions to the ssl
-  module.
-
-- Issue #6501: os.device_encoding() returns None on Windows if the application
-  has no console.
-
-- Issue #12105: Add O_CLOEXEC to the os module.
-
-- Issue #12079: Decimal('Infinity').fma(Decimal('0'), (3.91224318126786e+19+0j))
-  now raises TypeError (reflecting the invalid type of the 3rd argument) rather
-  than Decimal.InvalidOperation.
-
-- Issue #12124: zipimport doesn't keep a reference to zlib.decompress() anymore
-  to be able to unload the module.
-
-- Add the packaging module, an improved fork of distutils (also known as
-  distutils2).
-
-- Issue #12065: connect_ex() on an SSL socket now returns the original errno
-  when the socket's timeout expires (it used to return None).
-
-- Issue #8809: The SMTP_SSL constructor and SMTP.starttls() now support
-  passing a ``context`` argument pointing to an ssl.SSLContext instance.
-  Patch by Kasun Herath.
-
-- Issue #9516: Issue #9516: avoid errors in sysconfig when MACOSX_DEPLOYMENT_TARGET
-  is set in shell.
-
-- Issue #8650: Make zlib module 64-bit clean. compress(), decompress() and
-  their incremental counterparts now raise OverflowError if given an input
-  larger than 4GB, instead of silently truncating the input and returning
-  an incorrect result.
-
-- Issue #12050: zlib.decompressobj().decompress() now clears the unconsumed_tail
-  attribute when called without a max_length argument.
-
-- Issue #12062: Fix a flushing bug when doing a certain type of I/O sequence
-  on a file opened in read+write mode (namely: reading, seeking a bit forward,
-  writing, then seeking before the previous write but still within buffered
-  data, and writing again).
-
-- Issue #9971: Write an optimized implementation of BufferedReader.readinto().
-  Patch by John O'Connor.
-
-- Issue #11799: urllib.request Authentication Handlers will raise a ValueError
-  when presented with an unsupported Authentication Scheme. Patch contributed
-  by Yuval Greenfield.
-
-- Issue #10419, #6011: build_scripts command of distutils handles correctly
-  non-ASCII path (path to the Python executable). Open and write the script in
-  binary mode, but ensure that the shebang is decodable from UTF-8 and from the
-  encoding of the script.
-
-- Issue #8498: In socket.accept(), allow to specify 0 as a backlog value in
-  order to accept exactly one connection.  Patch by Daniel Evers.
-
-- Issue #12011: signal.signal() and signal.siginterrupt() raise an OSError,
-  instead of a RuntimeError: OSError has an errno attribute.
-
-- Issue #3709: add a flush_headers method to BaseHTTPRequestHandler, which
-  manages the sending of headers to output stream and flushing the internal
-  headers buffer. Patch contribution by Andrew Schaaf
-
-- Issue #11743: Rewrite multiprocessing connection classes in pure Python.
-
-- Issue #11164: Stop trying to use _xmlplus in the xml module.
-
-- Issue #11888: Add log2 function to math module. Patch written by Mark
-  Dickinson.
-
-- Issue #12012: ssl.PROTOCOL_SSLv2 becomes optional.
-
-- Issue #8407: The signal handler writes the signal number as a single byte
-  instead of a nul byte into the wakeup file descriptor. So it is possible to
-  wait more than one signal and know which signals were raised.
-
-- Issue #8407: Add pthread_kill(), sigpending() and sigwait() functions to the
-  signal module.
-
-- Issue #11927: SMTP_SSL now uses port 465 by default as documented.  Patch
-  by Kasun Herath.
-
-- Issue #12002: ftplib's abort() method raises TypeError.
-
-- Issue #11916: Add a number of MacOSX specific definitions to the errno module.
-  Patch by Pierre Carrier.
-
-- Issue #11999: fixed sporadic sync failure mailbox.Maildir due to its trying to
-  detect mtime changes by comparing to the system clock instead of to the
-  previous value of the mtime.
-
-- Issue #11072: added MLSD command (RFC-3659) support to ftplib.
-
-- Issue #8808: The IMAP4_SSL constructor now allows passing an SSLContext
-  parameter to control parameters of the secure channel.  Patch by Sijin
-  Joseph.
-
-- ntpath.samefile failed to notice that "a.txt" and "A.TXT" refer to the same
-  file on Windows XP. As noticed in issue #10684.
-
-- Issue #12000: When a SSL certificate has a subjectAltName without any
-  dNSName entry, ssl.match_hostname() should use the subject's commonName.
-  Patch by Nicolas Bareil.
-
-- Issue #10775: assertRaises, assertRaisesRegex, assertWarns, and
-  assertWarnsRegex now accept a keyword argument 'msg' when used as context
-  managers.  Initial patch by Winston Ewert.
-
-- Issue #10684: shutil.move used to delete a folder on case insensitive
-  filesystems when the source and destination name where the same except
-  for the case.
-
-- Issue #11647: objects created using contextlib.contextmanager now support
-  more than one call to the function when used as a decorator. Initial patch
-  by Ysj Ray.
-
-- Issue #11930: Removed deprecated time.accept2dyear variable.
-  Removed year >= 1000 restriction from datetime.strftime.
-
-- logging: don't define QueueListener if Python has no thread support.
-
-- functools.cmp_to_key() now works with collections.Hashable().
-
-- Issue #11277: mmap.mmap() calls fcntl(fd, F_FULLFSYNC) on Mac OS X to get
-  around a mmap bug with sparse files. Patch written by Steffen Daode Nurpmeso.
-
-- Issue #8407: Add signal.pthread_sigmask() function to fetch and/or change the
-  signal mask of the calling thread.
-
-- Issue #11858: configparser.ExtendedInterpolation expected lower-case section
-  names.
-
-- Issue #11324: ConfigParser(interpolation=None) now works correctly.
-
-- Issue #11811: ssl.get_server_certificate() is now IPv6-compatible.  Patch
-  by Charles-François Natali.
-
-- Issue #11763: don't use difflib in TestCase.assertMultiLineEqual if the
-  strings are too long.
-
-- Issue #11236: getpass.getpass responds to ctrl-c or ctrl-z on terminal.
-
-- Issue #11856: Speed up parsing of JSON numbers.
-
-- Issue #11005: threading.RLock()._release_save() raises a RuntimeError if the
-  lock was not acquired.
-
-- Issue #11258: Speed up ctypes.util.find_library() under Linux by a factor
-  of 5 to 10.  Initial patch by Jonas H.
-
-- Issue #11382: Trivial system calls, such as dup() or pipe(), needn't
-  release the GIL.  Patch by Charles-François Natali.
-
-- Issue #11223: Add threading._info() function providing informations about
-  the thread implementation.
-
-- Issue #11731: simplify/enhance email parser/generator API by introducing
-  policy objects.
-
-- Issue #11768: The signal handler of the signal module only calls
-  Py_AddPendingCall() for the first signal to fix a deadlock on reentrant or
-  parallel calls. PyErr_SetInterrupt() writes also into the wake up file.
-
-- Issue #11492: fix several issues with header folding in the email package.
-
-- Issue #11852: Add missing imports and update tests.
-
-- Issue #11875: collections.OrderedDict's __reduce__ was temporarily
-  mutating the object instead of just working on a copy.
-
-- Issue #11467: Fix urlparse behavior when handling urls which contains scheme
-  specific part only digits. Patch by Santoso Wijaya.
-
-- collections.Counter().copy() now works correctly for subclasses.
-
-- Issue #11474: Fix the bug with url2pathname() handling of '/C|/' on Windows.
-  Patch by Santoso Wijaya.
-
-- Issue #11684: complete email.parser bytes API by adding BytesHeaderParser.
-
-- The bz2 module now handles 4GiB+ input buffers correctly.
-
-- Issue #9233: Fix json.loads('{}') to return a dict (instead of a list), when
-  _json is not available.
-
-- Issue #11830: Remove unnecessary introspection code in the decimal module.
-
-- Issue #11703: urllib2.geturl() does not return correct url when the original
-  url contains #fragment.
-
-- Issue #10019: Fixed regression in json module where an indent of 0 stopped
-  adding newlines and acted instead like 'None'.
-
-- Issue #11186: pydoc ignores a module if its name contains a surrogate
-  character in the index of modules.
-
-- Issue #11815: Use a light-weight SimpleQueue for the result queue in
-  concurrent.futures.ProcessPoolExecutor.
-
-- Issue #5162: Treat services like frozen executables to allow child spawning
-  from multiprocessing.forking on Windows.
-
-- logging.basicConfig now supports an optional 'handlers' argument taking an
-  iterable of handlers to be added to the root logger. Additional parameter
-  checks were also added to basicConfig.
-
-- Issue #11814: Fix likely typo in multiprocessing.Pool._terminate().
-
-- Issue #11747: Fix range formatting in difflib.context_diff() and
-  difflib.unified_diff().
-
-- Issue #8428: Fix a race condition in multiprocessing.Pool when terminating
-  worker processes: new processes would be spawned while the pool is being
-  shut down.  Patch by Charles-François Natali.
-
-- Issue #2650: re.escape() no longer escapes the '_'.
-
-- Issue #11757: select.select() now raises ValueError when a negative timeout
-  is passed (previously, a select.error with EINVAL would be raised).  Patch
-  by Charles-François Natali.
-
-- Issue #7311: fix html.parser to accept non-ASCII attribute values.
-
-- Issue #11605: email.parser.BytesFeedParser was incorrectly converting
-  multipart subparts with an 8-bit CTE into unicode instead of preserving the
-  bytes.
-
-- Issue #1690608: email.util.formataddr is now RFC 2047 aware:  it now has a
-  charset parameter that defaults to utf-8 and is used as the charset for RFC
-  2047 encoding when the realname contains non-ASCII characters.
-
-- Issue #10963: Ensure that subprocess.communicate() never raises EPIPE.
-
-- Issue #10791: Implement missing method GzipFile.read1(), allowing GzipFile
-  to be wrapped in a TextIOWrapper.  Patch by Nadeem Vawda.
-
-- Issue #11707: Added a fast C version of functools.cmp_to_key().
-  Patch by Filip GruszczyƄski.
-
-- Issue #11688: Add sqlite3.Connection.set_trace_callback().  Patch by
-  Torsten Landschoff.
-
-- Issue #11746: Fix SSLContext.load_cert_chain() to accept elliptic curve
-  private keys.
-
-- Issue #5863: Rewrite BZ2File in pure Python, and allow it to accept
-  file-like objects using a new ``fileobj`` constructor argument.  Patch by
-  Nadeem Vawda.
-
-- unittest.TestCase.assertSameElements has been removed.
-
-- sys.getfilesystemencoding() raises a RuntimeError if initfsencoding() was not
-  called yet: detect bootstrap (startup) issues earlier.
-
-- Issue #11393: Add the new faulthandler module.
-
-- Issue #11618: Fix the timeout logic in threading.Lock.acquire() under Windows.
-
-- Removed the 'strict' argument to email.parser.Parser, which has been
-  deprecated since Python 2.4.
-
-- Issue #11256: Fix inspect.getcallargs on functions that take only keyword
-  arguments.
-
-- Issue #11696: Fix ID generation in msilib.
-
-- itertools.accumulate now supports an optional *func* argument for
-  a user-supplied binary function.
-
-- Issue #11692: Remove unnecessary demo functions in subprocess module.
-
-- Issue #9696: Fix exception incorrectly raised by xdrlib.Packer.pack_int when
-  trying to pack a negative (in-range) integer.
-
-- Issue #11675: multiprocessing.[Raw]Array objects created from an integer size
-  are now zeroed on creation.  This matches the behaviour specified by the
-  documentation.
-
-- Issue #7639: Fix short file name generation in bdist_msi
-
-- Issue #11635: Don't use polling in worker threads and processes launched by
-  concurrent.futures.
-
-- Issue #5845: Automatically read readline configuration to enable completion
-  in interactive mode.
-
-- Issue #6811: Allow importlib to change a code object's co_filename attribute
-  to match the path to where the source code currently is, not where the code
-  object originally came from.
-
-- Issue #8754: Have importlib use the repr of a module name in error messages.
-
-- Issue #11591: Prevent "import site" from modifying sys.path when python
-  was started with -S.
-
-- collections.namedtuple() now adds a _source attribute to the generated
-  class.  This make the source more accessible than the outdated
-  "verbose" option which prints to stdout but doesn't make the source
-  string available.
-
-- Issue #11371: Mark getopt error messages as localizable.  Patch by Filip
-  GruszczyƄski.
-
-- Issue #11333: Add __slots__ to collections ABCs.
-
-- Issue #11628: cmp_to_key generated class should use __slots__.
-
-- Issue #11666: let help() display named tuple attributes and methods
-  that start with a leading underscore.
-
-- Issue #11662: Make urllib and urllib2 ignore redirections if the
-  scheme is not HTTP, HTTPS or FTP (CVE-2011-1521).
-
-- Issue #5537: Fix time2isoz() and time2netscape() functions of
-  httplib.cookiejar for expiration year greater than 2038 on 32-bit systems.
-
-- Issue #4391: Use proper gettext plural forms in optparse.
-
-- Issue #11127: Raise a TypeError when trying to pickle a socket object.
-
-- Issue #11563: ``Connection: close`` header is sent by requests using URLOpener
-  class which helps in closing of sockets after connection is over. Patch
-  contributions by Jeff McNeil and Nadeem Vawda.
-
-- Issue #11459: A ``bufsize`` value of 0 in subprocess.Popen() really creates
-  unbuffered pipes, such that select() works properly on them.
-
-- Issue #5421: Fix misleading error message when one of socket.sendto()'s
-  arguments has the wrong type.  Patch by Nikita Vetoshkin.
-
-- Issue #10812: Add some extra posix functions to the os module.
-
-- Issue #10979: unittest stdout buffering now works with class and module
-  setup and teardown.
-
-- Issue #11243: fix the parameter querying methods of Message to work if
-  the headers contain un-encoded non-ASCII data.
-
-- Issue #11401: fix handling of headers with no value; this fixes a regression
-  relative to Python2 and the result is now the same as it was in Python2.
-
-- Issue #9298: base64 bodies weren't being folded to line lengths less than 78,
-  which was a regression relative to Python2.  Unlike Python2, the last line
-  of the folded body now ends with a carriage return.
-
-- Issue #11560: shutil.unpack_archive now correctly handles the format
-  parameter. Patch by Evan Dandrea.
-
-- Issue #5870: Add `subprocess.DEVNULL` constant.
-
-- Issue #11133: fix two cases where inspect.getattr_static can trigger code
-  execution. Patch by Andreas Stührk.
-
-- Issue #11569: use absolute path to the sysctl command in multiprocessing to
-  ensure that it will be found regardless of the shell PATH. This ensures
-  that multiprocessing.cpu_count works on default installs of MacOSX.
-
-- Issue #11501: disutils.archive_utils.make_zipfile no longer fails if zlib is
-  not installed. Instead, the zipfile.ZIP_STORED compression is used to create
-  the ZipFile. Patch by Natalia B. Bidart.
-
-- Issue #11289: `smtp.SMTP` class is now a context manager so it can be used
-  in a `with` statement.  Contributed by Giampaolo Rodola.
-
-- Issue #11554: Fixed support for Japanese codecs; previously the body output
-  encoding was not done if euc-jp or shift-jis was specified as the charset.
-
-- Issue #11407: `TestCase.run` returns the result object used or created.
-  Contributed by Janathan Hartley.
-
-- Issue #11500: Fixed a bug in the OS X proxy bypass code for fully qualified
-  IP addresses in the proxy exception list.
-
-- Issue #11491: dbm.error is no longer raised when dbm.open is called with
-  the "n" as the flag argument and the file exists. The behavior matches
-  the documentation and general logic.
-
-- Issue #1162477: Postel Principle adjustment to email date parsing: handle the
-  fact that some non-compliant MUAs use '.' instead of ':' in time specs.
-
-- Issue #11131: Fix sign of zero in decimal.Decimal plus and minus
-  operations when the rounding mode is ROUND_FLOOR.
-
-- Issue #9935: Speed up pickling of instances of user-defined classes.
-
-- Issue #5622: Fix curses.wrapper to raise correct exception if curses
-  initialization fails.
-
-- Issue #11408: In threading.Lock.acquire(), only call gettimeofday() when
-  really necessary.  Patch by Charles-François Natali.
-
-- Issue #11391: Writing to a mmap object created with
-  ``mmap.PROT_READ|mmap.PROT_EXEC`` would segfault instead of raising a
-  TypeError.  Patch by Charles-François Natali.
-
-- Issue #9795: add context manager protocol support for nntplib.NNTP class.
-
-- Issue #11306: mailbox in certain cases adapts to an inability to open
-  certain files in read-write mode.  Previously it detected this by
-  checking for EACCES, now it also checks for EROFS.
-
-- Issue #11265: asyncore now correctly handles EPIPE, EBADF and EAGAIN errors
-  on accept(), send() and recv().
-
-- Issue #11377: Deprecate platform.popen() and reimplement it with os.popen().
-
-- Issue #8513: On UNIX, subprocess supports bytes command string.
-
-- Issue #10866: Add socket.sethostname().  Initial patch by Ross Lagerwall.
-
-- Issue #11140: Lock.release() now raises a RuntimeError when attempting
-  to release an unacquired lock, as claimed in the threading documentation.
-  The _thread.error exception is now an alias of RuntimeError.  Patch by
-  Filip GruszczyƄski.  Patch for _dummy_thread by Aymeric Augustin.
-
-- Issue #8594: ftplib now provides a source_address parameter to specify which
-  (address, port) to bind to before connecting.
-
-- Issue #11326: Add the missing connect_ex() implementation for SSL sockets,
-  and make it work for non-blocking connects.
-
-- Issue #11297: Add collections.ChainMap().
-
-- Issue #10755: Add the posix.flistdir() function.  Patch by Ross Lagerwall.
-
-- Issue #4761: Add the ``*at()`` family of functions (openat(), etc.) to the
-  posix module.  Patch by Ross Lagerwall.
-
-- Issue #7322: Trying to read from a socket's file-like object after a timeout
-  occurred now raises an error instead of silently losing data.
-
-- Issue #11291: poplib.POP no longer suppresses errors on quit().
-
-- Issue #11177: asyncore's create_socket() arguments can now be omitted.
-
-- Issue #6064: Add a ``daemon`` keyword argument to the threading.Thread
-  and multiprocessing.Process constructors in order to override the
-  default behaviour of inheriting the daemonic property from the current
-  thread/process.
-
-- Issue #10956: Buffered I/O classes retry reading or writing after a signal
-  has arrived and the handler returned successfully.
-
-- Issue #10784: New os.getpriority() and os.setpriority() functions.
-
-- Issue #11114: Fix catastrophic performance of tell() on text files (up
-  to 1000x faster in some cases).  It is still one to two order of magnitudes
-  slower than binary tell().
-
-- Issue #10882: Add os.sendfile function.
-
-- Issue #10868: Allow usage of the register method of an ABC as a class
-  decorator.
-
-- Issue #11224: Fixed a regression in tarfile that affected the file-like
-  objects returned by TarFile.extractfile() regarding performance, memory
-  consumption and failures with the stream interface.
-
-- Issue #10924: Adding salt and Modular Crypt Format to crypt library.
-  Moved old C wrapper to _crypt, and added a Python wrapper with
-  enhanced salt generation and simpler API for password generation.
-
-- Issue #11074: Make 'tokenize' so it can be reloaded.
-
-- Issue #11085: Moved collections abstract base classes into a separate
-  module called collections.abc, following the pattern used by importlib.abc.
-  For backwards compatibility, the names are imported into the collections
-  module.
-
-- Issue #4681: Allow mmap() to work on file sizes and offsets larger than
-  4GB, even on 32-bit builds.  Initial patch by Ross Lagerwall, adapted for
-  32-bit Windows.
-
-- Issue #11169: compileall module uses repr() to format filenames and paths to
-  escape surrogate characters and show spaces.
-
-- Issue #11089: Fix performance issue limiting the use of ConfigParser()
-  with large config files.
-
-- Issue #10276: Fix the results of zlib.crc32() and zlib.adler32() on buffers
-  larger than 4GB.  Patch by Nadeem Vawda.
-
-- Issue #11388: Added a clear() method to MutableSequence
-
-- Issue #11174: Add argparse.MetavarTypeHelpFormatter, which uses type names
-  for the names of optional and positional arguments in help messages.
-
-- Issue #9348: Raise an early error if argparse nargs and metavar don't match.
-
-- Issue #9026: Fix order of argparse sub-commands in help messages.
-
-- Issue #9347: Fix formatting for tuples in argparse type= error messages.
-
-- Issue #12191: Added shutil.chown() to change user and/or group owner of a
-  given path also specifying their names.
-
-- Issue #13988: The _elementtree accelerator is used whenever available.
-  Now xml.etree.cElementTree becomes a deprecated alias to ElementTree.
-
-Build
------
-
-- Issue #6807: Run msisupport.mak earlier.
-
-- Issue #10580: Minor grammar change in Windows installer.
-
-- Issue #13326: Clean __pycache__ directories correctly on OpenBSD.
-
-- PEP 393: the configure option --with-wide-unicode is removed.
-
-- Issue #12852: Set _XOPEN_SOURCE to 700, instead of 600, to get POSIX 2008
-  functions on OpenBSD (e.g. fdopendir).
-
-- Issue #11863: Remove support for legacy systems deprecated in Python 3.2
-  (following PEP 11).  These systems are systems using Mach C Threads,
-  SunOS lightweight processes, GNU pth threads and IRIX threads.
-
-- Issue #8746: Correct faulty configure checks so that os.chflags() and
-  os.lchflags() are once again built on systems that support these
-  functions (BSD and OS X).  Also add new stat file flags for OS X
-  (UF_HIDDEN and UF_COMPRESSED).
-
-- Issue #10645: Installing Python no longer creates a
-  Python-X.Y.Z-pyX.Y.egg-info file in the lib-dynload directory.
-
-- Do not accidentally include the directory containing sqlite.h twice when
-  building sqlite3.
-
-- Issue #11217: For 64-bit/32-bit Mac OS X universal framework builds,
-  ensure "make install" creates symlinks in --prefix bin for the "-32"
-  files in the framework bin directory like the installer does.
-
-- Issue #11347: Use --no-as-needed when linking libpython3.so.
-
-- Issue #11411: Fix 'make DESTDIR=' with a relative destination.
-
-- Issue #11268: Prevent Mac OS X Installer failure if Documentation
-  package had previously been installed.
-
-- Issue #11495: OSF support is eliminated. It was deprecated in Python 3.2.
-
-IDLE
-----
-
-- Issue #14409: IDLE now properly executes commands in the Shell window
-  when it cannot read the normal config files on startup and
-  has to use the built-in default key bindings.
-  There was previously a bug in one of the defaults.
-
-- IDLE can be launched as python -m idlelib
-
-- Issue #3573: IDLE hangs when passing invalid command line args
-  (directory(ies) instead of file(s)) (Patch by Guilherme Polo)
-
-- Issue #14200: IDLE shell crash on printing non-BMP unicode character.
-
-- Issue #5219: Prevent event handler cascade in IDLE.
-
-- Issue #964437: Make IDLE help window non-modal.
-  Patch by Guilherme Polo and Roger Serwy.
-
-- Issue #13933: IDLE auto-complete did not work with some imported
-  module, like hashlib.  (Patch by Roger Serwy)
-
-- Issue #13506: Add '' to path for IDLE Shell when started and restarted with Restart Shell.
-  Original patches by Marco Scataglini and Roger Serwy.
-
-- Issue #4625: If IDLE cannot write to its recent file or breakpoint files,
-  display a message popup and continue rather than crash.  Original patch by
-  Roger Serwy.
-
-- Issue #8641: Update IDLE 3 syntax coloring to recognize b".." and not u"..".
-  Patch by Tal Einat.
-
-- Issue #13296: Fix IDLE to clear compile __future__ flags on shell restart.
-  (Patch by Roger Serwy)
-
-- Issue #9871: Prevent IDLE 3 crash when given byte stings
-  with invalid hex escape sequences, like b'\x0'.
-  (Original patch by Claudiu Popa.)
-
-- Issue #12636: IDLE reads the coding cookie when executing a Python script.
-
-- Issue #12540: Prevent zombie IDLE processes on Windows due to changes
-  in os.kill().
-
-- Issue #12590: IDLE editor window now always displays the first line
-  when opening a long file.  With Tk 8.5, the first line was hidden.
-
-- Issue #11088: don't crash when using F5 to run a script in IDLE on MacOSX
-  with Tk 8.5.
-
-- Issue #1028: Tk returns invalid Unicode null in %A: UnicodeDecodeError.
-  With Tk < 8.5 _tkinter.c:PythonCmd() raised UnicodeDecodeError, caused
-  IDLE to exit.  Converted to valid Unicode null in PythonCmd().
-
-- Issue #11718: IDLE's open module dialog couldn't find the __init__.py
-  file in a package.
-
-Tools/Demos
------------
-
-- Issue #14053: patchcheck.py ("make patchcheck") now works with MQ patches.
-  Patch by Francisco Martín Brugué.
-
-- Issue #13930: 2to3 is now able to write its converted output files to another
-  directory tree as well as copying unchanged files and altering the file
-  suffix.  See its new -o, -W and --add-suffix options.  This makes it more
-  useful in many automated code translation workflows.
-
-- Issue #13628: python-gdb.py is now able to retrieve more frames in the Python
-  traceback if Python is optimized.
-
-- Issue #11996: libpython (gdb), replace "py-bt" command by "py-bt-full" and
-  add a smarter "py-bt" command printing a classic Python traceback.
-
-- Issue #11179: Make ccbench work under Python 3.1 and 2.7 again.
-
-- Issue #10639: reindent.py no longer converts newlines and will raise
-  an error if attempting to convert a file with mixed newlines.
-  "--newline" option added to specify new line character.
-
-Extension Modules
------------------
-
-- Issue #16847: Fixed improper use of _PyUnicode_CheckConsistency() in
-  non-pydebug builds. Several extension modules now compile cleanly when
-  assert()s are enabled in standard builds (-DDEBUG flag).
-
-- Issue #13840: The error message produced by ctypes.create_string_buffer
-  when given a Unicode string has been fixed.
-
-- Issue #9975: socket: Fix incorrect use of flowinfo and scope_id. Patch by
-  Vilmos Nebehaj.
-
-- Issue #7777: socket: Add Reliable Datagram Sockets (PF_RDS) support.
-
-- Issue #13159: FileIO and BZ2Compressor/BZ2Decompressor now use a linear-time
-  buffer growth strategy instead of a quadratic-time one.
-
-- Issue #10141: socket: Add SocketCAN (PF_CAN) support. Initial patch by
-  Matthias Fuchs, updated by Tiago Gonçalves.
-
-- Issue #13070: Fix a crash when a TextIOWrapper caught in a reference cycle
-  would be finalized after the reference to its underlying BufferedRWPair's
-  writer got cleared by the GC.
-
-- Issue #12881: ctypes: Fix segfault with large structure field names.
-
-- Issue #13058: ossaudiodev: fix a file descriptor leak on error. Patch by
-  Thomas Jarosch.
-
-- Issue #13013: ctypes: Fix a reference leak in PyCArrayType_from_ctype.
-  Thanks to Suman Saha for finding the bug and providing a patch.
-
-- Issue #13022: Fix: _multiprocessing.recvfd() doesn't check that
-  file descriptor was actually received.
-
-- Issue #1172711: Add 'long long' support to the array module.
-  Initial patch by Oren Tirosh and Hirokazu Yamamoto.
-
-- Issue #12483: ctypes: Fix a crash when the destruction of a callback
-  object triggers the garbage collector.
-
-- Issue #12950: Fix passing file descriptors in multiprocessing, under
-  OpenIndiana/Illumos.
-
-- Issue #12764: Fix a crash in ctypes when the name of a Structure field is not
-  a string.
-
-- Issue #11241: subclasses of ctypes.Array can now be subclassed.
-
-- Issue #9651: Fix a crash when ctypes.create_string_buffer(0) was passed to
-  some functions like file.write().
-
-- Issue #10309: Define _GNU_SOURCE so that mremap() gets the proper
-  signature.  Without this, architectures where sizeof void* != sizeof int are
-  broken.  Patch given by Hallvard B Furuseth.
-
-- Issue #12051: Fix segfault in json.dumps() while encoding highly-nested
-  objects using the C accelerations.
-
-- Issue #12017: Fix segfault in json.loads() while decoding highly-nested
-  objects using the C accelerations.
-
-- Issue #1838: Prevent segfault in ctypes, when _as_parameter_ on a class is set
-  to an instance of the class.
-
-Tests
------
-
-- Issue #13125: Silence spurious test_lib2to3 output when in non-verbose mode.
-  Patch by Mikhail Novikov.
-
-- Issue #13447: Add a test file to host regression tests for bugs in the
-  scripts found in the Tools directory.
-
-- Issue #10881: Fix test_site failure with OS X framework builds.
-
-- Issue #13901: Prevent test_distutils failures on OS X with --enable-shared.
-
-- Issue #13862: Fix spurious failure in test_zlib due to runtime/compile time
-  minor versions not matching.
-
-- Issue #12804: Fix test_socket and test_urllib2net failures when running tests
-  on a system without internet access.
-
-- Issue #13726: Fix the ambiguous -S flag in regrtest. It is -o/--slow for slow
-  tests.
-
-- Issue #11659: Fix ResourceWarning in test_subprocess introduced by #11459.
-  Patch by Ben Hayden.
-
-- Issue #11577: fix ResourceWarning triggered by improved binhex test coverage
-
-- Issue #11509: Significantly increase test coverage of fileinput.
-  Patch by Denver Coneybeare at PyCon 2011 Sprints.
-
-- Issue #11689: Fix a variable scoping error in an sqlite3 test
-
-- Issue #13786: Remove unimplemented 'trace' long option from regrtest.py.
-
-- Issue #13725: Fix regrtest to recognize the documented -d flag.
-  Patch by Erno Tukia.
-
-- Issue #13304: Skip test case if user site-packages disabled (-s or
-  PYTHONNOUSERSITE).  (Patch by Carl Meyer)
-
-- Issue #5661: Add a test for ECONNRESET/EPIPE handling to test_asyncore. Patch
-  by Xavier de Gaye.
-
-- Issue #13218: Fix test_ssl failures on Debian/Ubuntu.
-
-- Re-enable lib2to3's test_parser.py tests, though with an expected failure
-  (see issue 13125).
-
-- Issue #12656: Add tests for IPv6 and Unix sockets to test_asyncore.
-
-- Issue #6484: Add unit tests for mailcap module (patch by Gregory Nofi)
-
-- Issue #11651: Improve the Makefile test targets to run more of the test suite
-  more quickly. The --multiprocess option is now enabled by default, reducing
-  the amount of time needed to run the tests. "make test" and "make quicktest"
-  now include some resource-intensive tests, but no longer run the test suite
-  twice to check for bugs in .pyc generation. Tools/scripts/run_test.py provides
-  an easy platform-independent way to run test suite with sensible defaults.
-
-- Issue #12331: The test suite for the packaging module can now run from an
-  installed Python.
-
-- Issue #12331: The test suite for lib2to3 can now run from an installed
-  Python.
-
-- Issue #12626: In regrtest, allow to filter tests using a glob filter
-  with the ``-m`` (or ``--match``) option.  This works with all test cases
-  using the unittest module.  This is useful with long test suites
-  such as test_io or test_subprocess.
-
-- Issue #12624: It is now possible to fail after the first failure when
-  running in verbose mode (``-v`` or ``-W``), by using the ``--failfast``
-  (or ``-G``) option to regrtest.  This is useful with long test suites
-  such as test_io or test_subprocess.
-
-- Issue #12587: Correct faulty test file and reference in test_tokenize.
-  (Patch by Robert Xiao)
-
-- Issue #12573: Add resource checks for dangling Thread and Process objects.
-
-- Issue #12549: Correct test_platform to not fail when OS X returns 'x86_64'
-  as the processor type on some Mac systems.
-
-- Skip network tests when getaddrinfo() returns EAI_AGAIN, meaning a temporary
-  failure in name resolution.
-
-- Issue #11812: Solve transient socket failure to connect to 'localhost'
-  in test_telnetlib.py.
-
-- Solved a potential deadlock in test_telnetlib.py. Related to issue #11812.
-
-- Avoid failing in test_robotparser when mueblesmoraleda.com is flaky and
-  an overzealous DNS service (e.g. OpenDNS) redirects to a placeholder
-  Web site.
-
-- Avoid failing in test_urllibnet.test_bad_address when some overzealous
-  DNS service (e.g. OpenDNS) resolves a non-existent domain name.  The test
-  is now skipped instead.
-
-- Issue #12440: When testing whether some bits in SSLContext.options can be
-  reset, check the version of the OpenSSL headers Python was compiled against,
-  rather than the runtime version of the OpenSSL library.
-
-- Issue #11512: Add a test suite for the cgitb module. Patch by Robbie Clemons.
-
-- Issue #12497: Install test/data to prevent failures of the various codecmaps
-  tests.
-
-- Issue #12496: Install test/capath directory to prevent test_connect_capath
-  testcase failure in test_ssl.
-
-- Issue #12469: Run wakeup and pending signal tests in a subprocess to run the
-  test in a fresh process with only one thread and to not change signal
-  handling of the parent process.
-
-- Issue #8716: Avoid crashes caused by Aqua Tk on OSX when attempting to run
-  test_tk or test_ttk_guionly under a username that is not currently logged
-  in to the console windowserver (as may be the case under buildbot or ssh).
-
-- Issue #12407: Explicitly skip test_capi.EmbeddingTest under Windows.
-
-- Issue #12400: regrtest -W doesn't rerun the tests twice anymore, but captures
-  the output and displays it on failure instead. regrtest -v doesn't print the
-  error twice anymore if there is only one error.
-
-- Issue #12141: Install copies of template C module file so that
-  test_build_ext of test_distutils and test_command_build_ext of
-  test_packaging are no longer silently skipped when
-  run outside of a build directory.
-
-- Issue #8746: Add additional tests for os.chflags() and os.lchflags().
-  Patch by Garrett Cooper.
-
-- Issue #10736: Fix test_ttk test_widgets failures with Cocoa Tk 8.5.9
-  2.8 +  on Mac OS X.  (Patch by Ronald Oussoren)
-
-- Issue #12057: Add tests for ISO 2022 codecs (iso2022_jp, iso2022_jp_2,
-  iso2022_kr).
-
-- Issue #12096: Fix a race condition in test_threading.test_waitfor(). Patch
-  written by Charles-François Natali.
-
-- Issue #11614: import __hello__ prints "Hello World!". Patch written by
-  Andreas Stührk.
-
-- Issue #5723: Improve json tests to be executed with and without accelerations.
-
-- Issue #12041: Make test_wait3 more robust.
-
-- Issue #11873: Change regex in test_compileall to fix occasional failures when
-  when the randomly generated temporary path happened to match the regex.
-
-- Issue #11958: Fix FTP tests for IPv6, bind to "::1" instead of "localhost".
-  Patch written by Charles-Francois Natali.
-
-- Issue #8407, #11859: Fix tests of test_io using threads and an alarm: use
-  pthread_sigmask() to ensure that the SIGALRM signal is received by the main
-  thread.
-
-- Issue #11811: Factor out detection of IPv6 support on the current host
-  and make it available as ``test.support.IPV6_ENABLED``.  Patch by
-  Charles-François Natali.
-
-- Issue #10914: Add a minimal embedding test to test_capi.
-
-- Issue #11223: Skip test_lock_acquire_interruption() and
-  test_rlock_acquire_interruption() of test_threadsignals if a thread lock is
-  implemented using a POSIX mutex and a POSIX condition variable. A POSIX
-  condition variable cannot be interrupted by a signal (e.g. on Linux, the
-  futex system call is restarted).
-
-- Issue #11790: Fix sporadic failures in test_multiprocessing.WithProcessesTestCondition.
-
-- Fix possible "file already exists" error when running the tests in parallel.
-
-- Issue #11719: Fix message about unexpected test_msilib skip on non-Windows
-  platforms. Patch by Nadeem Vawda.
-
-- Issue #11727: Add a --timeout option to regrtest: if a test takes more than
-  TIMEOUT seconds, dumps the traceback of all threads and exits.
-
-- Issue #11653: fix -W with -j in regrtest.
-
-- The email test suite now lives in the Lib/test/test_email package.  The test
-  harness code has also been modernized to allow use of new unittest features.
-
-- regrtest now discovers test packages as well as test modules.
-
-- Issue #11577: improve test coverage of binhex.py. Patch by Arkady Koplyarov.
-
-- New test_crashers added to exercise the scripts in the Lib/test/crashers
-  directory and confirm they fail as expected
-
-- Issue #11578: added test for the timeit module.  Patch by Michael Henry.
-
-- Issue #11503: improve test coverage of posixpath.py. Patch by Evan Dandrea.
-
-- Issue #11505: improves test coverage of string.py, increases granularity of
-  string.Formatter tests. Initial patch by Alicia Arlen.
-
-- Issue #11548: Improve test coverage of the shutil module. Patch by
-  Evan Dandrea.
-
-- Issue #11554: Reactivated test_email_codecs.
-
-- Issue #11505: improves test coverage of string.py. Patch by Alicia
-  Arlen
-
-- Issue #11490: test_subprocess.test_leaking_fds_on_error no longer gives a
-  false positive if the last directory in the path is inaccessible.
-
-- Issue #11223: Fix test_threadsignals to fail, not hang, when the
-  non-semaphore implementation of locks is used under POSIX.
-
-- Issue #10911: Add tests on CGI with non-ASCII characters. Patch written by
-  Pierre Quentel.
-
-- Issue #9931: Fix hangs in GUI tests under Windows in certain conditions.
-  Patch by Hirokazu Yamamoto.
-
-- Issue #10512: Properly close sockets under test.test_cgi.
-
-- Issue #10992: Make tests pass under coverage.
-
-- Issue #10826: Prevent sporadic failure in test_subprocess on Solaris due
-  to open door files.
-
-- Issue #10990: Prevent tests from clobbering a set trace function.
-
-C-API
------
-
-- Issue #13452: PyUnicode_EncodeDecimal() doesn't support error handlers
-  different than "strict" anymore. The caller was unable to compute the
-  size of the output buffer: it depends on the error handler.
-
-- Issue #13560: Add PyUnicode_DecodeLocale(), PyUnicode_DecodeLocaleAndSize()
-  and PyUnicode_EncodeLocale() functions to the C API to decode/encode from/to
-  the current locale encoding.
-
-- Issue #10831: PyUnicode_FromFormat() supports %li, %lli and %zi formats.
-
-- Issue #11246: Fix PyUnicode_FromFormat("%V") to decode the byte string from
-  UTF-8 (with replace error handler) instead of ISO-8859-1 (in strict mode).
-  Patch written by Ray Allen.
-
-- Issue #10830: Fix PyUnicode_FromFormatV("%c") for non-BMP characters on
-  narrow build.
-
-- Add PyObject_GenericGetDict and PyObject_GeneriSetDict. They are generic
-  implementations for the getter and setter of a ``__dict__`` descriptor of C
-  types.
-
-- Issue #13727: Add 3 macros to access PyDateTime_Delta members:
-  PyDateTime_DELTA_GET_DAYS, PyDateTime_DELTA_GET_SECONDS,
-  PyDateTime_DELTA_GET_MICROSECONDS.
-
-- Issue #10542: Add 4 macros to work with surrogates: Py_UNICODE_IS_SURROGATE,
-  Py_UNICODE_IS_HIGH_SURROGATE, Py_UNICODE_IS_LOW_SURROGATE,
-  Py_UNICODE_JOIN_SURROGATES.
-
-- Issue #12724: Add Py_RETURN_NOTIMPLEMENTED macro for returning NotImplemented.
-
-- PY_PATCHLEVEL_REVISION has been removed, since it's meaningless with
-  Mercurial.
-
-- Issue #12173: The first argument of PyImport_ImportModuleLevel is now `const
-  char *` instead of `char *`.
-
-- Issue #12380: PyArg_ParseTuple now accepts a bytearray for the 'c' format.
-
-Documentation
--------------
-
-- Issue #13989: Document that GzipFile does not support text mode, and give a
-  more helpful error message when opened with an invalid mode string.
-
-- Issue #13921: Undocument and clean up sqlite3.OptimizedUnicode,
-  which is obsolete in Python 3.x. It's now aliased to str for
-  backwards compatibility.
-
-- Issue #12102: Document that buffered files must be flushed before being used
-  with mmap. Patch by Steffen Daode Nurpmeso.
-
-- Issue #8982: Improve the documentation for the argparse Namespace object.
-
-- Issue #9343: Document that argparse parent parsers must be configured before
-  their children.
-
-- Issue #13498: Clarify docs of os.makedirs()'s exist_ok argument.  Done with
-  great native-speaker help from R. David Murray.
-
-- Issues #13491 and #13995: Fix many errors in sqlite3 documentation.
-  Initial patch for #13491 by Johannes Vogel.
-
-- Issue #13402: Document absoluteness of sys.executable.
-
-- Issue #13883: PYTHONCASEOK also works on OS X.
-
-- Issue #9021: Add an introduction to the copy module documentation.
-
-- Issue #6005: Examples in the socket library documentation use sendall, where
-  relevant, instead send method.
-
-- Issue #12798: Updated the mimetypes documentation.
-
-- Issue #12949: Document the kwonlyargcount argument for the PyCode_New
-  C API function.
-
-- Issue #13513: Fix io.IOBase documentation to correctly link to the
-  io.IOBase.readline method instead of the readline module.
-
-- Issue #13237: Reorganise subprocess documentation to emphasise convenience
-  functions and the most commonly needed arguments to Popen.
-
-- Issue #13141: Demonstrate recommended style for socketserver examples.
-
-- Issue #11818: Fix tempfile examples for Python 3.
-
-
 **(For information about older versions, consult the HISTORY file.)**
diff --git a/Misc/RPM/python-3.4.spec b/Misc/RPM/python-3.5.spec
similarity index 99%
rename from Misc/RPM/python-3.4.spec
rename to Misc/RPM/python-3.5.spec
index 74f55bc..e3518be 100644
--- a/Misc/RPM/python-3.4.spec
+++ b/Misc/RPM/python-3.5.spec
@@ -39,8 +39,8 @@
 
 %define name python
 #--start constants--
-%define version 3.4.1
-%define libvers 3.4
+%define version 3.5.0a0
+%define libvers 3.5
 #--end constants--
 %define release 1pydotorg
 %define __prefix /usr
diff --git a/Modules/Setup.config.in b/Modules/Setup.config.in
index 5ac2404..adac030 100644
--- a/Modules/Setup.config.in
+++ b/Modules/Setup.config.in
@@ -7,7 +7,7 @@
 @USE_THREAD_MODULE@_thread _threadmodule.c
 
 # The signal module
-@USE_SIGNAL_MODULE@signal signalmodule.c
+@USE_SIGNAL_MODULE@_signal signalmodule.c
 
 # The rest of the modules previously listed in this file are built
 # by the setup.py script in Python 2.1 and later.
diff --git a/Modules/_codecsmodule.c b/Modules/_codecsmodule.c
index 0b093ab..1b21300 100644
--- a/Modules/_codecsmodule.c
+++ b/Modules/_codecsmodule.c
@@ -89,13 +89,15 @@
 codecs.register_error that can handle ValueErrors.");
 
 static PyObject *
-codec_encode(PyObject *self, PyObject *args)
+codec_encode(PyObject *self, PyObject *args, PyObject *kwargs)
 {
+    static char *kwlist[] = {"obj", "encoding", "errors", NULL};
     const char *encoding = NULL;
     const char *errors = NULL;
     PyObject *v;
 
-    if (!PyArg_ParseTuple(args, "O|ss:encode", &v, &encoding, &errors))
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|ss:encode", kwlist,
+                                     &v, &encoding, &errors))
         return NULL;
 
     if (encoding == NULL)
@@ -116,13 +118,15 @@
 able to handle ValueErrors.");
 
 static PyObject *
-codec_decode(PyObject *self, PyObject *args)
+codec_decode(PyObject *self, PyObject *args, PyObject *kwargs)
 {
+    static char *kwlist[] = {"obj", "encoding", "errors", NULL};
     const char *encoding = NULL;
     const char *errors = NULL;
     PyObject *v;
 
-    if (!PyArg_ParseTuple(args, "O|ss:decode", &v, &encoding, &errors))
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|ss:decode", kwlist,
+                                     &v, &encoding, &errors))
         return NULL;
 
     if (encoding == NULL)
@@ -1120,9 +1124,9 @@
         register__doc__},
     {"lookup",                  codec_lookup,                   METH_VARARGS,
         lookup__doc__},
-    {"encode",                  codec_encode,                   METH_VARARGS,
+    {"encode",     (PyCFunction)codec_encode,     METH_VARARGS|METH_KEYWORDS,
         encode__doc__},
-    {"decode",                  codec_decode,                   METH_VARARGS,
+    {"decode",     (PyCFunction)codec_decode,     METH_VARARGS|METH_KEYWORDS,
         decode__doc__},
     {"escape_encode",           escape_encode,                  METH_VARARGS},
     {"escape_decode",           escape_decode,                  METH_VARARGS},
diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c
index c1aa9a3..b2783d2 100644
--- a/Modules/_collectionsmodule.c
+++ b/Modules/_collectionsmodule.c
@@ -3,7 +3,7 @@
 
 /* collections module implementation of a deque() datatype
    Written and maintained by Raymond D. Hettinger <python@rcn.com>
-   Copyright (c) 2004-2013 Python Software Foundation.
+   Copyright (c) 2004-2014 Python Software Foundation.
    All rights reserved.
 */
 
@@ -145,6 +145,12 @@
 
 static PyTypeObject deque_type;
 
+/* XXX Todo: 
+   If aligned memory allocations become available, make the
+   deque object 64 byte aligned so that all of the fields
+   can be retrieved or updated in a single cache line.
+*/
+
 static PyObject *
 deque_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
 {
@@ -454,6 +460,31 @@
     return (PyObject *)deque;
 }
 
+/* The rotate() method is part of the public API and is used internally
+as a primitive for other methods.
+
+Rotation by 1 or -1 is a common case, so any optimizations for high
+volume rotations should take care not to penalize the common case.
+
+Conceptually, a rotate by one is equivalent to a pop on one side and an
+append on the other.  However, a pop/append pair is unnecessarily slow
+because it requires a incref/decref pair for an object located randomly
+in memory.  It is better to just move the object pointer from one block
+to the next without changing the reference count.
+
+When moving batches of pointers, it is tempting to use memcpy() but that
+proved to be slower than a simple loop for a variety of reasons.
+Memcpy() cannot know in advance that we're copying pointers instead of
+bytes, that the source and destination are pointer aligned and
+non-overlapping, that moving just one pointer is a common case, that we
+never need to move more than BLOCKLEN pointers, and that at least one
+pointer is always moved.
+
+For high volume rotations, newblock() and freeblock() are never called
+more than once.  Previously emptied blocks are immediately reused as a
+destination block.  If a block is left-over at the end, it is freed.
+*/
+
 static int
 _deque_rotate(dequeobject *deque, Py_ssize_t n)
 {
@@ -1800,19 +1831,40 @@
     if (mapping_get != NULL && mapping_get == dict_get &&
         mapping_setitem != NULL && mapping_setitem == dict_setitem) {
         while (1) {
+            /* Fast path advantages:
+                   1. Eliminate double hashing
+                      (by re-using the same hash for both the get and set)
+                   2. Avoid argument overhead of PyObject_CallFunctionObjArgs
+                      (argument tuple creation and parsing)
+                   3. Avoid indirection through a bound method object
+                      (creates another argument tuple)
+                   4. Avoid initial increment from zero
+                      (reuse an existing one-object instead)
+            */
+            Py_hash_t hash;
+
             key = PyIter_Next(it);
             if (key == NULL)
                 break;
-            oldval = PyDict_GetItem(mapping, key);
+
+            if (!PyUnicode_CheckExact(key) ||
+                (hash = ((PyASCIIObject *) key)->hash) == -1)
+            {
+                hash = PyObject_Hash(key);
+                if (hash == -1) 
+                    goto done;
+            }
+
+            oldval = _PyDict_GetItem_KnownHash(mapping, key, hash);
             if (oldval == NULL) {
-                if (PyDict_SetItem(mapping, key, one) == -1)
-                    break;
+                if (_PyDict_SetItem_KnownHash(mapping, key, one, hash) == -1)
+                    goto done;
             } else {
                 newval = PyNumber_Add(oldval, one);
                 if (newval == NULL)
-                    break;
-                if (PyDict_SetItem(mapping, key, newval) == -1)
-                    break;
+                    goto done;
+                if (_PyDict_SetItem_KnownHash(mapping, key, newval, hash) == -1)
+                    goto done;
                 Py_CLEAR(newval);
             }
             Py_DECREF(key);
diff --git a/Modules/_datetimemodule.c b/Modules/_datetimemodule.c
index 496ff34..04d9b5d 100644
--- a/Modules/_datetimemodule.c
+++ b/Modules/_datetimemodule.c
@@ -3805,29 +3805,6 @@
     return clone;
 }
 
-static int
-time_bool(PyObject *self)
-{
-    PyObject *offset, *tzinfo;
-    int offsecs = 0;
-
-    if (TIME_GET_SECOND(self) || TIME_GET_MICROSECOND(self)) {
-        /* Since utcoffset is in whole minutes, nothing can
-         * alter the conclusion that this is nonzero.
-         */
-        return 1;
-    }
-    tzinfo = GET_TIME_TZINFO(self);
-    if (tzinfo != Py_None) {
-        offset = call_utcoffset(tzinfo, Py_None);
-        if (offset == NULL)
-            return -1;
-        offsecs = GET_TD_DAYS(offset)*86400 + GET_TD_SECONDS(offset);
-        Py_DECREF(offset);
-    }
-    return (TIME_GET_MINUTE(self)*60 - offsecs + TIME_GET_HOUR(self)*3600) != 0;
-}
-
 /* Pickle support, a simple use of __reduce__. */
 
 /* Let basestate be the non-tzinfo data string.
@@ -3895,19 +3872,6 @@
 All arguments are optional. tzinfo may be None, or an instance of\n\
 a tzinfo subclass. The remaining arguments may be ints.\n");
 
-static PyNumberMethods time_as_number = {
-    0,                                          /* nb_add */
-    0,                                          /* nb_subtract */
-    0,                                          /* nb_multiply */
-    0,                                          /* nb_remainder */
-    0,                                          /* nb_divmod */
-    0,                                          /* nb_power */
-    0,                                          /* nb_negative */
-    0,                                          /* nb_positive */
-    0,                                          /* nb_absolute */
-    (inquiry)time_bool,                         /* nb_bool */
-};
-
 static PyTypeObject PyDateTime_TimeType = {
     PyVarObject_HEAD_INIT(NULL, 0)
     "datetime.time",                            /* tp_name */
@@ -3919,7 +3883,7 @@
     0,                                          /* tp_setattr */
     0,                                          /* tp_reserved */
     (reprfunc)time_repr,                        /* tp_repr */
-    &time_as_number,                            /* tp_as_number */
+    0,                                          /* tp_as_number */
     0,                                          /* tp_as_sequence */
     0,                                          /* tp_as_mapping */
     (hashfunc)time_hash,                        /* tp_hash */
diff --git a/Modules/_decimal/docstrings.h b/Modules/_decimal/docstrings.h
index a6490b9..71029a9 100644
--- a/Modules/_decimal/docstrings.h
+++ b/Modules/_decimal/docstrings.h
@@ -19,26 +19,30 @@
 PyDoc_STRVAR(doc__decimal,
 "C decimal arithmetic module");
 
-PyDoc_STRVAR(doc_getcontext,"\n\
-getcontext() - Get the current default context.\n\
+PyDoc_STRVAR(doc_getcontext,
+"getcontext($module, /)\n--\n\n\
+Get the current default context.\n\
 \n");
 
-PyDoc_STRVAR(doc_setcontext,"\n\
-setcontext(c) - Set a new default context.\n\
+PyDoc_STRVAR(doc_setcontext,
+"setcontext($module, context, /)\n--\n\n\
+Set a new default context.\n\
 \n");
 
-PyDoc_STRVAR(doc_localcontext,"\n\
-localcontext(ctx=None) - Return a context manager that will set the default\n\
-context to a copy of ctx on entry to the with-statement and restore the\n\
-previous default context when exiting the with-statement. If no context is\n\
-specified, a copy of the current default context is used.\n\
+PyDoc_STRVAR(doc_localcontext,
+"localcontext($module, /, ctx=None)\n--\n\n\
+Return a context manager that will set the default context to a copy of ctx\n\
+on entry to the with-statement and restore the previous default context when\n\
+exiting the with-statement. If no context is specified, a copy of the current\n\
+default context is used.\n\
 \n");
 
 #ifdef EXTRA_FUNCTIONALITY
-PyDoc_STRVAR(doc_ieee_context,"\n\
-IEEEContext(bits) - Return a context object initialized to the proper values for\n\
-one of the IEEE interchange formats. The argument must be a multiple of 32 and\n\
-less than IEEE_CONTEXT_MAX_BITS. For the most common values, the constants\n\
+PyDoc_STRVAR(doc_ieee_context,
+"IEEEContext($module, bits, /)\n--\n\n\
+Return a context object initialized to the proper values for one of the\n\
+IEEE interchange formats.  The argument must be a multiple of 32 and less\n\
+than IEEE_CONTEXT_MAX_BITS.  For the most common values, the constants\n\
 DECIMAL32, DECIMAL64 and DECIMAL128 are provided.\n\
 \n");
 #endif
@@ -48,32 +52,34 @@
 /*                       Decimal Object and Methods                           */
 /******************************************************************************/
 
-PyDoc_STRVAR(doc_decimal,"\n\
-Decimal(value=\"0\", context=None): Construct a new Decimal object.\n\
-value can be an integer, string, tuple, or another Decimal object.\n\
-If no value is given, return Decimal('0'). The context does not affect\n\
-the conversion and is only passed to determine if the InvalidOperation\n\
-trap is active.\n\
+PyDoc_STRVAR(doc_decimal,
+"Decimal(value=\"0\", context=None)\n--\n\n\
+Construct a new Decimal object. 'value' can be an integer, string, tuple,\n\
+or another Decimal object. If no value is given, return Decimal('0'). The\n\
+context does not affect the conversion and is only passed to determine if\n\
+the InvalidOperation trap is active.\n\
 \n");
 
-PyDoc_STRVAR(doc_adjusted,"\n\
-adjusted() - Return the adjusted exponent of the number.\n\
-\n\
-Defined as exp + digits - 1.\n\
+PyDoc_STRVAR(doc_adjusted,
+"adjusted($self, /)\n--\n\n\
+Return the adjusted exponent of the number.  Defined as exp + digits - 1.\n\
 \n");
 
-PyDoc_STRVAR(doc_as_tuple,"\n\
-as_tuple() - Return a tuple representation of the number.\n\
+PyDoc_STRVAR(doc_as_tuple,
+"as_tuple($self, /)\n--\n\n\
+Return a tuple representation of the number.\n\
 \n");
 
-PyDoc_STRVAR(doc_canonical,"\n\
-canonical() - Return the canonical encoding of the argument. Currently,\n\
-the encoding of a Decimal instance is always canonical, so this operation\n\
-returns its argument unchanged.\n\
+PyDoc_STRVAR(doc_canonical,
+"canonical($self, /)\n--\n\n\
+Return the canonical encoding of the argument.  Currently, the encoding\n\
+of a Decimal instance is always canonical, so this operation returns its\n\
+argument unchanged.\n\
 \n");
 
-PyDoc_STRVAR(doc_compare,"\n\
-compare(other, context=None) - Compare self to other. Return a decimal value:\n\
+PyDoc_STRVAR(doc_compare,
+"compare($self, /, other, context=None)\n--\n\n\
+Compare self to other.  Return a decimal value:\n\
 \n\
     a or b is a NaN ==> Decimal('NaN')\n\
     a < b           ==> Decimal('-1')\n\
@@ -81,17 +87,18 @@
     a > b           ==> Decimal('1')\n\
 \n");
 
-PyDoc_STRVAR(doc_compare_signal,"\n\
-compare_signal(other, context=None) - Identical to compare, except that\n\
-all NaNs signal.\n\
+PyDoc_STRVAR(doc_compare_signal,
+"compare_signal($self, /, other, context=None)\n--\n\n\
+Identical to compare, except that all NaNs signal.\n\
 \n");
 
-PyDoc_STRVAR(doc_compare_total,"\n\
-compare_total(other, context=None) - Compare two operands using their\n\
-abstract representation rather than their numerical value. Similar to the\n\
-compare() method, but the result gives a total ordering on Decimal instances.\n\
-Two Decimal instances with the same numeric value but different representations\n\
-compare unequal in this ordering:\n\
+PyDoc_STRVAR(doc_compare_total,
+"compare_total($self, /, other, context=None)\n--\n\n\
+Compare two operands using their abstract representation rather than\n\
+their numerical value.  Similar to the compare() method, but the result\n\
+gives a total ordering on Decimal instances.  Two Decimal instances with\n\
+the same numeric value but different representations compare unequal\n\
+in this ordering:\n\
 \n\
     >>> Decimal('12.0').compare_total(Decimal('12'))\n\
     Decimal('-1')\n\
@@ -107,36 +114,39 @@
 InvalidOperation if the second operand cannot be converted exactly.\n\
 \n");
 
-PyDoc_STRVAR(doc_compare_total_mag,"\n\
-compare_total_mag(other, context=None) - Compare two operands using their\n\
-abstract representation rather than their value as in compare_total(), but\n\
-ignoring the sign of each operand. x.compare_total_mag(y) is equivalent to\n\
-x.copy_abs().compare_total(y.copy_abs()).\n\
+PyDoc_STRVAR(doc_compare_total_mag,
+"compare_total_mag($self, /, other, context=None)\n--\n\n\
+Compare two operands using their abstract representation rather than their\n\
+value as in compare_total(), but ignoring the sign of each operand.\n\
+\n\
+x.compare_total_mag(y) is equivalent to x.copy_abs().compare_total(y.copy_abs()).\n\
 \n\
 This operation is unaffected by context and is quiet: no flags are changed\n\
 and no rounding is performed. As an exception, the C version may raise\n\
 InvalidOperation if the second operand cannot be converted exactly.\n\
 \n");
 
-PyDoc_STRVAR(doc_conjugate,"\n\
-conjugate() - Return self.\n\
+PyDoc_STRVAR(doc_conjugate,
+"conjugate($self, /)\n--\n\n\
+Return self.\n\
 \n");
 
-PyDoc_STRVAR(doc_copy_abs,"\n\
-copy_abs() - Return the absolute value of the argument. This operation\n\
-is unaffected by context and is quiet: no flags are changed and no rounding\n\
-is performed.\n\
+PyDoc_STRVAR(doc_copy_abs,
+"copy_abs($self, /)\n--\n\n\
+Return the absolute value of the argument.  This operation is unaffected by\n\
+context and is quiet: no flags are changed and no rounding is performed.\n\
 \n");
 
-PyDoc_STRVAR(doc_copy_negate,"\n\
-copy_negate() - Return the negation of the argument. This operation is\n\
-unaffected by context and is quiet: no flags are changed and no rounding\n\
-is performed.\n\
+PyDoc_STRVAR(doc_copy_negate,
+"copy_negate($self, /)\n--\n\n\
+Return the negation of the argument.  This operation is unaffected by context\n\
+and is quiet: no flags are changed and no rounding is performed.\n\
 \n");
 
-PyDoc_STRVAR(doc_copy_sign,"\n\
-copy_sign(other, context=None) - Return a copy of the first operand with\n\
-the sign set to be the same as the sign of the second operand. For example:\n\
+PyDoc_STRVAR(doc_copy_sign,
+"copy_sign($self, /, other, context=None)\n--\n\n\
+Return a copy of the first operand with the sign set to be the same as the\n\
+sign of the second operand. For example:\n\
 \n\
     >>> Decimal('2.3').copy_sign(Decimal('-1.5'))\n\
     Decimal('-2.3')\n\
@@ -146,14 +156,16 @@
 InvalidOperation if the second operand cannot be converted exactly.\n\
 \n");
 
-PyDoc_STRVAR(doc_exp,"\n\
-exp(context=None) - Return the value of the (natural) exponential function\n\
-e**x at the given number. The function always uses the ROUND_HALF_EVEN mode\n\
-and the result is correctly rounded.\n\
+PyDoc_STRVAR(doc_exp,
+"exp($self, /, context=None)\n--\n\n\
+Return the value of the (natural) exponential function e**x at the given\n\
+number.  The function always uses the ROUND_HALF_EVEN mode and the result\n\
+is correctly rounded.\n\
 \n");
 
-PyDoc_STRVAR(doc_from_float,"\n\
-from_float(f) - Class method that converts a float to a decimal number, exactly.\n\
+PyDoc_STRVAR(doc_from_float,
+"from_float($type, f, /)\n--\n\n\
+Class method that converts a float to a decimal number, exactly.\n\
 Since 0.1 is not exactly representable in binary floating point,\n\
 Decimal.from_float(0.1) is not the same as Decimal('0.1').\n\
 \n\
@@ -168,155 +180,176 @@
 \n\
 \n");
 
-PyDoc_STRVAR(doc_fma,"\n\
-fma(other, third, context=None) - Fused multiply-add. Return self*other+third\n\
-with no rounding of the intermediate product self*other.\n\
+PyDoc_STRVAR(doc_fma,
+"fma($self, /, other, third, context=None)\n--\n\n\
+Fused multiply-add.  Return self*other+third with no rounding of the\n\
+intermediate product self*other.\n\
 \n\
     >>> Decimal(2).fma(3, 5)\n\
     Decimal('11')\n\
 \n\
 \n");
 
-PyDoc_STRVAR(doc_is_canonical,"\n\
-is_canonical() - Return True if the argument is canonical and False otherwise.\n\
-Currently, a Decimal instance is always canonical, so this operation always\n\
-returns True.\n\
+PyDoc_STRVAR(doc_is_canonical,
+"is_canonical($self, /)\n--\n\n\
+Return True if the argument is canonical and False otherwise.  Currently,\n\
+a Decimal instance is always canonical, so this operation always returns\n\
+True.\n\
 \n");
 
-PyDoc_STRVAR(doc_is_finite,"\n\
-is_finite() - Return True if the argument is a finite number, and False if the\n\
-argument is infinite or a NaN.\n\
+PyDoc_STRVAR(doc_is_finite,
+"is_finite($self, /)\n--\n\n\
+Return True if the argument is a finite number, and False if the argument\n\
+is infinite or a NaN.\n\
 \n");
 
-PyDoc_STRVAR(doc_is_infinite,"\n\
-is_infinite() - Return True if the argument is either positive or negative\n\
-infinity and False otherwise.\n\
-\n");
-
-PyDoc_STRVAR(doc_is_nan,"\n\
-is_nan() - Return True if the argument is a (quiet or signaling) NaN and\n\
+PyDoc_STRVAR(doc_is_infinite,
+"is_infinite($self, /)\n--\n\n\
+Return True if the argument is either positive or negative infinity and\n\
 False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_is_normal,"\n\
-is_normal(context=None) - Return True if the argument is a normal finite\n\
-non-zero number with an adjusted exponent greater than or equal to Emin.\n\
-Return False if the argument is zero, subnormal, infinite or a NaN.\n\
+PyDoc_STRVAR(doc_is_nan,
+"is_nan($self, /)\n--\n\n\
+Return True if the argument is a (quiet or signaling) NaN and False\n\
+otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_is_qnan,"\n\
-is_qnan() - Return True if the argument is a quiet NaN, and False otherwise.\n\
+PyDoc_STRVAR(doc_is_normal,
+"is_normal($self, /, context=None)\n--\n\n\
+Return True if the argument is a normal finite non-zero number with an\n\
+adjusted exponent greater than or equal to Emin. Return False if the\n\
+argument is zero, subnormal, infinite or a NaN.\n\
 \n");
 
-PyDoc_STRVAR(doc_is_signed,"\n\
-is_signed() - Return True if the argument has a negative sign and\n\
-False otherwise. Note that both zeros and NaNs can carry signs.\n\
+PyDoc_STRVAR(doc_is_qnan,
+"is_qnan($self, /)\n--\n\n\
+Return True if the argument is a quiet NaN, and False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_is_snan,"\n\
-is_snan() - Return True if the argument is a signaling NaN and False otherwise.\n\
+PyDoc_STRVAR(doc_is_signed,
+"is_signed($self, /)\n--\n\n\
+Return True if the argument has a negative sign and False otherwise.\n\
+Note that both zeros and NaNs can carry signs.\n\
 \n");
 
-PyDoc_STRVAR(doc_is_subnormal,"\n\
-is_subnormal(context=None) - Return True if the argument is subnormal, and\n\
-False otherwise. A number is subnormal if it is non-zero, finite, and has an\n\
-adjusted exponent less than Emin.\n\
+PyDoc_STRVAR(doc_is_snan,
+"is_snan($self, /)\n--\n\n\
+Return True if the argument is a signaling NaN and False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_is_zero,"\n\
-is_zero() - Return True if the argument is a (positive or negative) zero and\n\
-False otherwise.\n\
+PyDoc_STRVAR(doc_is_subnormal,
+"is_subnormal($self, /, context=None)\n--\n\n\
+Return True if the argument is subnormal, and False otherwise. A number is\n\
+subnormal if it is non-zero, finite, and has an adjusted exponent less\n\
+than Emin.\n\
 \n");
 
-PyDoc_STRVAR(doc_ln,"\n\
-ln(context=None) - Return the natural (base e) logarithm of the operand.\n\
-The function always uses the ROUND_HALF_EVEN mode and the result is\n\
-correctly rounded.\n\
+PyDoc_STRVAR(doc_is_zero,
+"is_zero($self, /)\n--\n\n\
+Return True if the argument is a (positive or negative) zero and False\n\
+otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_log10,"\n\
-log10(context=None) - Return the base ten logarithm of the operand.\n\
-The function always uses the ROUND_HALF_EVEN mode and the result is\n\
-correctly rounded.\n\
+PyDoc_STRVAR(doc_ln,
+"ln($self, /, context=None)\n--\n\n\
+Return the natural (base e) logarithm of the operand. The function always\n\
+uses the ROUND_HALF_EVEN mode and the result is correctly rounded.\n\
 \n");
 
-PyDoc_STRVAR(doc_logb,"\n\
-logb(context=None) - For a non-zero number, return the adjusted exponent\n\
-of the operand as a Decimal instance. If the operand is a zero, then\n\
-Decimal('-Infinity') is returned and the DivisionByZero condition is\n\
-raised. If the operand is an infinity then Decimal('Infinity') is returned.\n\
+PyDoc_STRVAR(doc_log10,
+"log10($self, /, context=None)\n--\n\n\
+Return the base ten logarithm of the operand. The function always uses the\n\
+ROUND_HALF_EVEN mode and the result is correctly rounded.\n\
 \n");
 
-PyDoc_STRVAR(doc_logical_and,"\n\
-logical_and(other, context=None) - Return the digit-wise and of the two\n\
-(logical) operands.\n\
+PyDoc_STRVAR(doc_logb,
+"logb($self, /, context=None)\n--\n\n\
+For a non-zero number, return the adjusted exponent of the operand as a\n\
+Decimal instance.  If the operand is a zero, then Decimal('-Infinity') is\n\
+returned and the DivisionByZero condition is raised. If the operand is\n\
+an infinity then Decimal('Infinity') is returned.\n\
 \n");
 
-PyDoc_STRVAR(doc_logical_invert,"\n\
-logical_invert(context=None) - Return the digit-wise inversion of the\n\
-(logical) operand.\n\
+PyDoc_STRVAR(doc_logical_and,
+"logical_and($self, /, other, context=None)\n--\n\n\
+Return the digit-wise 'and' of the two (logical) operands.\n\
 \n");
 
-PyDoc_STRVAR(doc_logical_or,"\n\
-logical_or(other, context=None) - Return the digit-wise or of the two\n\
-(logical) operands.\n\
+PyDoc_STRVAR(doc_logical_invert,
+"logical_invert($self, /, context=None)\n--\n\n\
+Return the digit-wise inversion of the (logical) operand.\n\
 \n");
 
-PyDoc_STRVAR(doc_logical_xor,"\n\
-logical_xor(other, context=None) - Return the digit-wise exclusive or of the\n\
-two (logical) operands.\n\
+PyDoc_STRVAR(doc_logical_or,
+"logical_or($self, /, other, context=None)\n--\n\n\
+Return the digit-wise 'or' of the two (logical) operands.\n\
 \n");
 
-PyDoc_STRVAR(doc_max,"\n\
-max(other, context=None) - Maximum of self and other. If one operand is a\n\
-quiet NaN and the other is numeric, the numeric operand is returned.\n\
+PyDoc_STRVAR(doc_logical_xor,
+"logical_xor($self, /, other, context=None)\n--\n\n\
+Return the digit-wise 'exclusive or' of the two (logical) operands.\n\
 \n");
 
-PyDoc_STRVAR(doc_max_mag,"\n\
-max_mag(other, context=None) - Similar to the max() method, but the\n\
-comparison is done using the absolute values of the operands.\n\
+PyDoc_STRVAR(doc_max,
+"max($self, /, other, context=None)\n--\n\n\
+Maximum of self and other.  If one operand is a quiet NaN and the other is\n\
+numeric, the numeric operand is returned.\n\
 \n");
 
-PyDoc_STRVAR(doc_min,"\n\
-min(other, context=None) - Minimum of self and other. If one operand is a\n\
-quiet NaN and the other is numeric, the numeric operand is returned.\n\
+PyDoc_STRVAR(doc_max_mag,
+"max_mag($self, /, other, context=None)\n--\n\n\
+Similar to the max() method, but the comparison is done using the absolute\n\
+values of the operands.\n\
 \n");
 
-PyDoc_STRVAR(doc_min_mag,"\n\
-min_mag(other, context=None) - Similar to the min() method, but the\n\
-comparison is done using the absolute values of the operands.\n\
+PyDoc_STRVAR(doc_min,
+"min($self, /, other, context=None)\n--\n\n\
+Minimum of self and other. If one operand is a quiet NaN and the other is\n\
+numeric, the numeric operand is returned.\n\
 \n");
 
-PyDoc_STRVAR(doc_next_minus,"\n\
-next_minus(context=None) - Return the largest number representable in the\n\
-given context (or in the current default context if no context is given) that\n\
-is smaller than the given operand.\n\
+PyDoc_STRVAR(doc_min_mag,
+"min_mag($self, /, other, context=None)\n--\n\n\
+Similar to the min() method, but the comparison is done using the absolute\n\
+values of the operands.\n\
 \n");
 
-PyDoc_STRVAR(doc_next_plus,"\n\
-next_plus(context=None) - Return the smallest number representable in the\n\
-given context (or in the current default context if no context is given) that\n\
-is larger than the given operand.\n\
+PyDoc_STRVAR(doc_next_minus,
+"next_minus($self, /, context=None)\n--\n\n\
+Return the largest number representable in the given context (or in the\n\
+current default context if no context is given) that is smaller than the\n\
+given operand.\n\
 \n");
 
-PyDoc_STRVAR(doc_next_toward,"\n\
-next_toward(other, context=None) - If the two operands are unequal, return\n\
-the number closest to the first operand in the direction of the second operand.\n\
-If both operands are numerically equal, return a copy of the first operand\n\
-with the sign set to be the same as the sign of the second operand.\n\
+PyDoc_STRVAR(doc_next_plus,
+"next_plus($self, /, context=None)\n--\n\n\
+Return the smallest number representable in the given context (or in the\n\
+current default context if no context is given) that is larger than the\n\
+given operand.\n\
 \n");
 
-PyDoc_STRVAR(doc_normalize,"\n\
-normalize(context=None) - Normalize the number by stripping the rightmost\n\
-trailing zeros and converting any result equal to Decimal('0') to Decimal('0e0').\n\
-Used for producing canonical values for members of an equivalence class. For\n\
-example, Decimal('32.100') and Decimal('0.321000e+2') both normalize to the\n\
-equivalent value Decimal('32.1').\n\
+PyDoc_STRVAR(doc_next_toward,
+"next_toward($self, /, other, context=None)\n--\n\n\
+If the two operands are unequal, return the number closest to the first\n\
+operand in the direction of the second operand.  If both operands are\n\
+numerically equal, return a copy of the first operand with the sign set\n\
+to be the same as the sign of the second operand.\n\
 \n");
 
-PyDoc_STRVAR(doc_number_class,"\n\
-number_class(context=None) - Return a string describing the class of the\n\
-operand. The returned value is one of the following ten strings:\n\
+PyDoc_STRVAR(doc_normalize,
+"normalize($self, /, context=None)\n--\n\n\
+Normalize the number by stripping the rightmost trailing zeros and\n\
+converting any result equal to Decimal('0') to Decimal('0e0').  Used\n\
+for producing canonical values for members of an equivalence class.\n\
+For example, Decimal('32.100') and Decimal('0.321000e+2') both normalize\n\
+to the equivalent value Decimal('32.1').\n\
+\n");
+
+PyDoc_STRVAR(doc_number_class,
+"number_class($self, /, context=None)\n--\n\n\
+Return a string describing the class of the operand.  The returned value\n\
+is one of the following ten strings:\n\
 \n\
     * '-Infinity', indicating that the operand is negative infinity.\n\
     * '-Normal', indicating that the operand is a negative normal number.\n\
@@ -331,9 +364,10 @@
 \n\
 \n");
 
-PyDoc_STRVAR(doc_quantize,"\n\
-quantize(exp, rounding=None, context=None) - Return a value equal to the\n\
-first operand after rounding and having the exponent of the second operand.\n\
+PyDoc_STRVAR(doc_quantize,
+"quantize($self, /, exp, rounding=None, context=None)\n--\n\n\
+Return a value equal to the first operand after rounding and having the\n\
+exponent of the second operand.\n\
 \n\
     >>> Decimal('1.41421356').quantize(Decimal('1.000'))\n\
     Decimal('1.414')\n\
@@ -352,103 +386,109 @@
 argument is given, the rounding mode of the current thread's context is used.\n\
 \n");
 
-PyDoc_STRVAR(doc_radix,"\n\
-radix() - Return Decimal(10), the radix (base) in which the Decimal class does\n\
+PyDoc_STRVAR(doc_radix,
+"radix($self, /)\n--\n\n\
+Return Decimal(10), the radix (base) in which the Decimal class does\n\
 all its arithmetic. Included for compatibility with the specification.\n\
 \n");
 
-PyDoc_STRVAR(doc_remainder_near,"\n\
-remainder_near(other, context=None) - Return the remainder from dividing\n\
-self by other. This differs from self % other in that the sign of the\n\
-remainder is chosen so as to minimize its absolute value. More precisely, the\n\
-return value is self - n * other where n is the integer nearest to the exact\n\
-value of self / other, and if two integers are equally near then the even one\n\
-is chosen.\n\
+PyDoc_STRVAR(doc_remainder_near,
+"remainder_near($self, /, other, context=None)\n--\n\n\
+Return the remainder from dividing self by other.  This differs from\n\
+self % other in that the sign of the remainder is chosen so as to minimize\n\
+its absolute value. More precisely, the return value is self - n * other\n\
+where n is the integer nearest to the exact value of self / other, and\n\
+if two integers are equally near then the even one is chosen.\n\
 \n\
 If the result is zero then its sign will be the sign of self.\n\
 \n");
 
-PyDoc_STRVAR(doc_rotate,"\n\
-rotate(other, context=None) - Return the result of rotating the digits of the\n\
-first operand by an amount specified by the second operand. The second operand\n\
-must be an integer in the range -precision through precision. The absolute\n\
-value of the second operand gives the number of places to rotate. If the second\n\
-operand is positive then rotation is to the left; otherwise rotation is to the\n\
-right. The coefficient of the first operand is padded on the left with zeros to\n\
+PyDoc_STRVAR(doc_rotate,
+"rotate($self, /, other, context=None)\n--\n\n\
+Return the result of rotating the digits of the first operand by an amount\n\
+specified by the second operand.  The second operand must be an integer in\n\
+the range -precision through precision. The absolute value of the second\n\
+operand gives the number of places to rotate. If the second operand is\n\
+positive then rotation is to the left; otherwise rotation is to the right.\n\
+The coefficient of the first operand is padded on the left with zeros to\n\
 length precision if necessary. The sign and exponent of the first operand are\n\
 unchanged.\n\
 \n");
 
-PyDoc_STRVAR(doc_same_quantum,"\n\
-same_quantum(other, context=None) - Test whether self and other have the\n\
-same exponent or whether both are NaN.\n\
+PyDoc_STRVAR(doc_same_quantum,
+"same_quantum($self, /, other, context=None)\n--\n\n\
+Test whether self and other have the same exponent or whether both are NaN.\n\
 \n\
 This operation is unaffected by context and is quiet: no flags are changed\n\
 and no rounding is performed. As an exception, the C version may raise\n\
 InvalidOperation if the second operand cannot be converted exactly.\n\
 \n");
 
-PyDoc_STRVAR(doc_scaleb,"\n\
-scaleb(other, context=None) - Return the first operand with the exponent\n\
-adjusted the second. Equivalently, return the first operand multiplied by\n\
-10**other. The second operand must be an integer.\n\
+PyDoc_STRVAR(doc_scaleb,
+"scaleb($self, /, other, context=None)\n--\n\n\
+Return the first operand with the exponent adjusted the second.  Equivalently,\n\
+return the first operand multiplied by 10**other. The second operand must be\n\
+an integer.\n\
 \n");
 
-PyDoc_STRVAR(doc_shift,"\n\
-shift(other, context=None) - Return the result of shifting the digits of\n\
-the first operand by an amount specified by the second operand. The second\n\
-operand must be an integer in the range -precision through precision. The\n\
-absolute value of the second operand gives the number of places to shift.\n\
-If the second operand is positive, then the shift is to the left; otherwise\n\
-the shift is to the right. Digits shifted into the coefficient are zeros.\n\
-The sign and exponent of the first operand are unchanged.\n\
+PyDoc_STRVAR(doc_shift,
+"shift($self, /, other, context=None)\n--\n\n\
+Return the result of shifting the digits of the first operand by an amount\n\
+specified by the second operand.  The second operand must be an integer in\n\
+the range -precision through precision. The absolute value of the second\n\
+operand gives the number of places to shift. If the second operand is\n\
+positive, then the shift is to the left; otherwise the shift is to the\n\
+right. Digits shifted into the coefficient are zeros. The sign and exponent\n\
+of the first operand are unchanged.\n\
 \n");
 
-PyDoc_STRVAR(doc_sqrt,"\n\
-sqrt(context=None) - Return the square root of the argument to full precision.\n\
-The result is correctly rounded using the ROUND_HALF_EVEN rounding mode.\n\
+PyDoc_STRVAR(doc_sqrt,
+"sqrt($self, /, context=None)\n--\n\n\
+Return the square root of the argument to full precision. The result is\n\
+correctly rounded using the ROUND_HALF_EVEN rounding mode.\n\
 \n");
 
-PyDoc_STRVAR(doc_to_eng_string,"\n\
-to_eng_string(context=None) - Convert to an engineering-type string.\n\
-Engineering notation has an exponent which is a multiple of 3, so there\n\
-are up to 3 digits left of the decimal place. For example, Decimal('123E+1')\n\
-is converted to Decimal('1.23E+3').\n\
+PyDoc_STRVAR(doc_to_eng_string,
+"to_eng_string($self, /, context=None)\n--\n\n\
+Convert to an engineering-type string.  Engineering notation has an exponent\n\
+which is a multiple of 3, so there are up to 3 digits left of the decimal\n\
+place. For example, Decimal('123E+1') is converted to Decimal('1.23E+3').\n\
 \n\
 The value of context.capitals determines whether the exponent sign is lower\n\
 or upper case. Otherwise, the context does not affect the operation.\n\
 \n");
 
-PyDoc_STRVAR(doc_to_integral,"\n\
-to_integral(rounding=None, context=None) - Identical to the\n\
-to_integral_value() method. The to_integral() name has been kept\n\
-for compatibility with older versions.\n\
+PyDoc_STRVAR(doc_to_integral,
+"to_integral($self, /, rounding=None, context=None)\n--\n\n\
+Identical to the to_integral_value() method.  The to_integral() name has been\n\
+kept for compatibility with older versions.\n\
 \n");
 
-PyDoc_STRVAR(doc_to_integral_exact,"\n\
-to_integral_exact(rounding=None, context=None) - Round to the nearest\n\
-integer, signaling Inexact or Rounded as appropriate if rounding occurs.\n\
-The rounding mode is determined by the rounding parameter if given, else\n\
-by the given context. If neither parameter is given, then the rounding mode\n\
+PyDoc_STRVAR(doc_to_integral_exact,
+"to_integral_exact($self, /, rounding=None, context=None)\n--\n\n\
+Round to the nearest integer, signaling Inexact or Rounded as appropriate if\n\
+rounding occurs.  The rounding mode is determined by the rounding parameter\n\
+if given, else by the given context. If neither parameter is given, then the\n\
+rounding mode of the current default context is used.\n\
+\n");
+
+PyDoc_STRVAR(doc_to_integral_value,
+"to_integral_value($self, /, rounding=None, context=None)\n--\n\n\
+Round to the nearest integer without signaling Inexact or Rounded.  The\n\
+rounding mode is determined by the rounding parameter if given, else by\n\
+the given context. If neither parameter is given, then the rounding mode\n\
 of the current default context is used.\n\
 \n");
 
-PyDoc_STRVAR(doc_to_integral_value,"\n\
-to_integral_value(rounding=None, context=None) - Round to the nearest\n\
-integer without signaling Inexact or Rounded. The rounding mode is determined\n\
-by the rounding parameter if given, else by the given context. If neither\n\
-parameter is given, then the rounding mode of the current default context is\n\
-used.\n\
-\n");
-
 
 /******************************************************************************/
 /*                       Context Object and Methods                           */
 /******************************************************************************/
 
-PyDoc_STRVAR(doc_context,"\n\
+PyDoc_STRVAR(doc_context,
+"Context(prec=None, rounding=None, Emin=None, Emax=None, capitals=None, clamp=None, flags=None, traps=None)\n--\n\n\
 The context affects almost all operations and controls rounding,\n\
-Over/Underflow, raising of exceptions and much more. A new context\n\
+Over/Underflow, raising of exceptions and much more.  A new context\n\
 can be constructed as follows:\n\
 \n\
     >>> c = Context(prec=28, Emin=-425000000, Emax=425000000,\n\
@@ -460,308 +500,372 @@
 \n");
 
 #ifdef EXTRA_FUNCTIONALITY
-PyDoc_STRVAR(doc_ctx_apply,"\n\
-apply(x) - Apply self to Decimal x.\n\
+PyDoc_STRVAR(doc_ctx_apply,
+"apply($self, x, /)\n--\n\n\
+Apply self to Decimal x.\n\
 \n");
 #endif
 
-PyDoc_STRVAR(doc_ctx_clear_flags,"\n\
-clear_flags() - Reset all flags to False.\n\
+PyDoc_STRVAR(doc_ctx_clear_flags,
+"clear_flags($self, /)\n--\n\n\
+Reset all flags to False.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_clear_traps,"\n\
-clear_traps() - Set all traps to False.\n\
+PyDoc_STRVAR(doc_ctx_clear_traps,
+"clear_traps($self, /)\n--\n\n\
+Set all traps to False.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_copy,"\n\
-copy() - Return a duplicate of the context with all flags cleared.\n\
+PyDoc_STRVAR(doc_ctx_copy,
+"copy($self, /)\n--\n\n\
+Return a duplicate of the context with all flags cleared.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_copy_decimal,"\n\
-copy_decimal(x) - Return a copy of Decimal x.\n\
+PyDoc_STRVAR(doc_ctx_copy_decimal,
+"copy_decimal($self, x, /)\n--\n\n\
+Return a copy of Decimal x.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_create_decimal,"\n\
-create_decimal(x) - Create a new Decimal instance from x, using self as the\n\
-context. Unlike the Decimal constructor, this function observes the context\n\
-limits.\n\
+PyDoc_STRVAR(doc_ctx_create_decimal,
+"create_decimal($self, num=\"0\", /)\n--\n\n\
+Create a new Decimal instance from num, using self as the context. Unlike the\n\
+Decimal constructor, this function observes the context limits.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_create_decimal_from_float,"\n\
-create_decimal_from_float(f) - Create a new Decimal instance from float f.\n\
-Unlike the Decimal.from_float() class method, this function observes the\n\
-context limits.\n\
+PyDoc_STRVAR(doc_ctx_create_decimal_from_float,
+"create_decimal_from_float($self, f, /)\n--\n\n\
+Create a new Decimal instance from float f.  Unlike the Decimal.from_float()\n\
+class method, this function observes the context limits.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_Etiny,"\n\
-Etiny() - Return a value equal to Emin - prec + 1, which is the minimum\n\
-exponent value for subnormal results. When underflow occurs, the exponent\n\
-is set to Etiny.\n\
+PyDoc_STRVAR(doc_ctx_Etiny,
+"Etiny($self, /)\n--\n\n\
+Return a value equal to Emin - prec + 1, which is the minimum exponent value\n\
+for subnormal results.  When underflow occurs, the exponent is set to Etiny.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_Etop,"\n\
-Etop() - Return a value equal to Emax - prec + 1. This is the maximum exponent\n\
-if the _clamp field of the context is set to 1 (IEEE clamp mode). Etop() must\n\
-not be negative.\n\
+PyDoc_STRVAR(doc_ctx_Etop,
+"Etop($self, /)\n--\n\n\
+Return a value equal to Emax - prec + 1.  This is the maximum exponent\n\
+if the _clamp field of the context is set to 1 (IEEE clamp mode).  Etop()\n\
+must not be negative.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_abs,"\n\
-abs(x) - Return the absolute value of x.\n\
+PyDoc_STRVAR(doc_ctx_abs,
+"abs($self, x, /)\n--\n\n\
+Return the absolute value of x.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_add,"\n\
-add(x, y) - Return the sum of x and y.\n\
+PyDoc_STRVAR(doc_ctx_add,
+"add($self, x, y, /)\n--\n\n\
+Return the sum of x and y.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_canonical,"\n\
-canonical(x) - Return a new instance of x.\n\
+PyDoc_STRVAR(doc_ctx_canonical,
+"canonical($self, x, /)\n--\n\n\
+Return a new instance of x.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_compare,"\n\
-compare(x, y) - Compare x and y numerically.\n\
+PyDoc_STRVAR(doc_ctx_compare,
+"compare($self, x, y, /)\n--\n\n\
+Compare x and y numerically.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_compare_signal,"\n\
-compare_signal(x, y) - Compare x and y numerically. All NaNs signal.\n\
+PyDoc_STRVAR(doc_ctx_compare_signal,
+"compare_signal($self, x, y, /)\n--\n\n\
+Compare x and y numerically.  All NaNs signal.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_compare_total,"\n\
-compare_total(x, y) - Compare x and y using their abstract representation.\n\
+PyDoc_STRVAR(doc_ctx_compare_total,
+"compare_total($self, x, y, /)\n--\n\n\
+Compare x and y using their abstract representation.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_compare_total_mag,"\n\
-compare_total_mag(x, y) - Compare x and y using their abstract representation,\n\
-ignoring sign.\n\
+PyDoc_STRVAR(doc_ctx_compare_total_mag,
+"compare_total_mag($self, x, y, /)\n--\n\n\
+Compare x and y using their abstract representation, ignoring sign.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_copy_abs,"\n\
-copy_abs(x) - Return a copy of x with the sign set to 0.\n\
+PyDoc_STRVAR(doc_ctx_copy_abs,
+"copy_abs($self, x, /)\n--\n\n\
+Return a copy of x with the sign set to 0.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_copy_negate,"\n\
-copy_negate(x) - Return a copy of x with the sign inverted.\n\
+PyDoc_STRVAR(doc_ctx_copy_negate,
+"copy_negate($self, x, /)\n--\n\n\
+Return a copy of x with the sign inverted.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_copy_sign,"\n\
-copy_sign(x, y) - Copy the sign from y to x.\n\
+PyDoc_STRVAR(doc_ctx_copy_sign,
+"copy_sign($self, x, y, /)\n--\n\n\
+Copy the sign from y to x.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_divide,"\n\
-divide(x, y) - Return x divided by y.\n\
+PyDoc_STRVAR(doc_ctx_divide,
+"divide($self, x, y, /)\n--\n\n\
+Return x divided by y.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_divide_int,"\n\
-divide_int(x, y) - Return x divided by y, truncated to an integer.\n\
+PyDoc_STRVAR(doc_ctx_divide_int,
+"divide_int($self, x, y, /)\n--\n\n\
+Return x divided by y, truncated to an integer.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_divmod,"\n\
-divmod(x, y) - Return quotient and remainder of the division x / y.\n\
+PyDoc_STRVAR(doc_ctx_divmod,
+"divmod($self, x, y, /)\n--\n\n\
+Return quotient and remainder of the division x / y.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_exp,"\n\
-exp(x) - Return e ** x.\n\
+PyDoc_STRVAR(doc_ctx_exp,
+"exp($self, x, /)\n--\n\n\
+Return e ** x.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_fma,"\n\
-fma(x, y, z) - Return x multiplied by y, plus z.\n\
+PyDoc_STRVAR(doc_ctx_fma,
+"fma($self, x, y, z, /)\n--\n\n\
+Return x multiplied by y, plus z.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_is_canonical,"\n\
-is_canonical(x) - Return True if x is canonical, False otherwise.\n\
+PyDoc_STRVAR(doc_ctx_is_canonical,
+"is_canonical($self, x, /)\n--\n\n\
+Return True if x is canonical, False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_is_finite,"\n\
-is_finite(x) - Return True if x is finite, False otherwise.\n\
+PyDoc_STRVAR(doc_ctx_is_finite,
+"is_finite($self, x, /)\n--\n\n\
+Return True if x is finite, False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_is_infinite,"\n\
-is_infinite(x) - Return True if x is infinite, False otherwise.\n\
+PyDoc_STRVAR(doc_ctx_is_infinite,
+"is_infinite($self, x, /)\n--\n\n\
+Return True if x is infinite, False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_is_nan,"\n\
-is_nan(x) - Return True if x is a qNaN or sNaN, False otherwise.\n\
+PyDoc_STRVAR(doc_ctx_is_nan,
+"is_nan($self, x, /)\n--\n\n\
+Return True if x is a qNaN or sNaN, False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_is_normal,"\n\
-is_normal(x) - Return True if x is a normal number, False otherwise.\n\
+PyDoc_STRVAR(doc_ctx_is_normal,
+"is_normal($self, x, /)\n--\n\n\
+Return True if x is a normal number, False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_is_qnan,"\n\
-is_qnan(x) - Return True if x is a quiet NaN, False otherwise.\n\
+PyDoc_STRVAR(doc_ctx_is_qnan,
+"is_qnan($self, x, /)\n--\n\n\
+Return True if x is a quiet NaN, False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_is_signed,"\n\
-is_signed(x) - Return True if x is negative, False otherwise.\n\
+PyDoc_STRVAR(doc_ctx_is_signed,
+"is_signed($self, x, /)\n--\n\n\
+Return True if x is negative, False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_is_snan,"\n\
-is_snan() - Return True if x is a signaling NaN, False otherwise.\n\
+PyDoc_STRVAR(doc_ctx_is_snan,
+"is_snan($self, x, /)\n--\n\n\
+Return True if x is a signaling NaN, False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_is_subnormal,"\n\
-is_subnormal(x) - Return True if x is subnormal, False otherwise.\n\
+PyDoc_STRVAR(doc_ctx_is_subnormal,
+"is_subnormal($self, x, /)\n--\n\n\
+Return True if x is subnormal, False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_is_zero,"\n\
-is_zero(x) - Return True if x is a zero, False otherwise.\n\
+PyDoc_STRVAR(doc_ctx_is_zero,
+"is_zero($self, x, /)\n--\n\n\
+Return True if x is a zero, False otherwise.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_ln,"\n\
-ln(x) - Return the natural (base e) logarithm of x.\n\
+PyDoc_STRVAR(doc_ctx_ln,
+"ln($self, x, /)\n--\n\n\
+Return the natural (base e) logarithm of x.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_log10,"\n\
-log10(x) - Return the base 10 logarithm of x.\n\
+PyDoc_STRVAR(doc_ctx_log10,
+"log10($self, x, /)\n--\n\n\
+Return the base 10 logarithm of x.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_logb,"\n\
-logb(x) - Return the exponent of the magnitude of the operand's MSD.\n\
+PyDoc_STRVAR(doc_ctx_logb,
+"logb($self, x, /)\n--\n\n\
+Return the exponent of the magnitude of the operand's MSD.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_logical_and,"\n\
-logical_and(x, y) - Digit-wise and of x and y.\n\
+PyDoc_STRVAR(doc_ctx_logical_and,
+"logical_and($self, x, y, /)\n--\n\n\
+Digit-wise and of x and y.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_logical_invert,"\n\
-logical_invert(x) - Invert all digits of x.\n\
+PyDoc_STRVAR(doc_ctx_logical_invert,
+"logical_invert($self, x, /)\n--\n\n\
+Invert all digits of x.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_logical_or,"\n\
-logical_or(x, y) - Digit-wise or of x and y.\n\
+PyDoc_STRVAR(doc_ctx_logical_or,
+"logical_or($self, x, y, /)\n--\n\n\
+Digit-wise or of x and y.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_logical_xor,"\n\
-logical_xor(x, y) - Digit-wise xor of x and y.\n\
+PyDoc_STRVAR(doc_ctx_logical_xor,
+"logical_xor($self, x, y, /)\n--\n\n\
+Digit-wise xor of x and y.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_max,"\n\
-max(x, y) - Compare the values numerically and return the maximum.\n\
+PyDoc_STRVAR(doc_ctx_max,
+"max($self, x, y, /)\n--\n\n\
+Compare the values numerically and return the maximum.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_max_mag,"\n\
-max_mag(x, y) - Compare the values numerically with their sign ignored.\n\
+PyDoc_STRVAR(doc_ctx_max_mag,
+"max_mag($self, x, y, /)\n--\n\n\
+Compare the values numerically with their sign ignored.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_min,"\n\
-min(x, y) - Compare the values numerically and return the minimum.\n\
+PyDoc_STRVAR(doc_ctx_min,
+"min($self, x, y, /)\n--\n\n\
+Compare the values numerically and return the minimum.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_min_mag,"\n\
-min_mag(x, y) - Compare the values numerically with their sign ignored.\n\
+PyDoc_STRVAR(doc_ctx_min_mag,
+"min_mag($self, x, y, /)\n--\n\n\
+Compare the values numerically with their sign ignored.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_minus,"\n\
-minus(x) - Minus corresponds to the unary prefix minus operator in Python,\n\
-but applies the context to the result.\n\
+PyDoc_STRVAR(doc_ctx_minus,
+"minus($self, x, /)\n--\n\n\
+Minus corresponds to the unary prefix minus operator in Python, but applies\n\
+the context to the result.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_multiply,"\n\
-multiply(x, y) - Return the product of x and y.\n\
+PyDoc_STRVAR(doc_ctx_multiply,
+"multiply($self, x, y, /)\n--\n\n\
+Return the product of x and y.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_next_minus,"\n\
-next_minus(x) - Return the largest representable number smaller than x.\n\
+PyDoc_STRVAR(doc_ctx_next_minus,
+"next_minus($self, x, /)\n--\n\n\
+Return the largest representable number smaller than x.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_next_plus,"\n\
-next_plus(x) - Return the smallest representable number larger than x.\n\
+PyDoc_STRVAR(doc_ctx_next_plus,
+"next_plus($self, x, /)\n--\n\n\
+Return the smallest representable number larger than x.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_next_toward,"\n\
-next_toward(x) - Return the number closest to x, in the direction towards y.\n\
+PyDoc_STRVAR(doc_ctx_next_toward,
+"next_toward($self, x, y, /)\n--\n\n\
+Return the number closest to x, in the direction towards y.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_normalize,"\n\
-normalize(x) - Reduce x to its simplest form. Alias for reduce(x).\n\
+PyDoc_STRVAR(doc_ctx_normalize,
+"normalize($self, x, /)\n--\n\n\
+Reduce x to its simplest form. Alias for reduce(x).\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_number_class,"\n\
-number_class(x) - Return an indication of the class of x.\n\
+PyDoc_STRVAR(doc_ctx_number_class,
+"number_class($self, x, /)\n--\n\n\
+Return an indication of the class of x.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_plus,"\n\
-plus(x) - Plus corresponds to the unary prefix plus operator in Python,\n\
-but applies the context to the result.\n\
+PyDoc_STRVAR(doc_ctx_plus,
+"plus($self, x, /)\n--\n\n\
+Plus corresponds to the unary prefix plus operator in Python, but applies\n\
+the context to the result.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_power,"\n\
-power(x, y) - Compute x**y. If x is negative, then y must be integral.\n\
-The result will be inexact unless y is integral and the result is finite\n\
-and can be expressed exactly in 'precision' digits. In the Python version\n\
-the result is always correctly rounded, in the C version the result is\n\
-almost always correctly rounded.\n\
+PyDoc_STRVAR(doc_ctx_power,
+"power($self, /, a, b, modulo=None)\n--\n\n\
+Compute a**b. If 'a' is negative, then 'b' must be integral. The result\n\
+will be inexact unless 'a' is integral and the result is finite and can\n\
+be expressed exactly in 'precision' digits.  In the Python version the\n\
+result is always correctly rounded, in the C version the result is almost\n\
+always correctly rounded.\n\
 \n\
-power(x, y, m) - Compute (x**y) % m. The following restrictions hold:\n\
+If modulo is given, compute (a**b) % modulo. The following restrictions\n\
+hold:\n\
 \n\
     * all three arguments must be integral\n\
-    * y must be nonnegative\n\
-    * at least one of x or y must be nonzero\n\
-    * m must be nonzero and less than 10**prec in absolute value\n\
+    * 'b' must be nonnegative\n\
+    * at least one of 'a' or 'b' must be nonzero\n\
+    * modulo must be nonzero and less than 10**prec in absolute value\n\
 \n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_quantize,"\n\
-quantize(x, y) - Return a value equal to x (rounded), having the exponent of y.\n\
+PyDoc_STRVAR(doc_ctx_quantize,
+"quantize($self, x, y, /)\n--\n\n\
+Return a value equal to x (rounded), having the exponent of y.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_radix,"\n\
-radix() - Return 10.\n\
+PyDoc_STRVAR(doc_ctx_radix,
+"radix($self, /)\n--\n\n\
+Return 10.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_remainder,"\n\
-remainder(x, y) - Return the remainder from integer division. The sign of\n\
-the result, if non-zero, is the same as that of the original dividend.\n\
+PyDoc_STRVAR(doc_ctx_remainder,
+"remainder($self, x, y, /)\n--\n\n\
+Return the remainder from integer division.  The sign of the result,\n\
+if non-zero, is the same as that of the original dividend.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_remainder_near,"\n\
-remainder_near(x, y) - Return x - y * n, where n is the integer nearest the\n\
-exact value of x / y (if the result is 0 then its sign will be the sign of x).\n\
+PyDoc_STRVAR(doc_ctx_remainder_near,
+"remainder_near($self, x, y, /)\n--\n\n\
+Return x - y * n, where n is the integer nearest the exact value of x / y\n\
+(if the result is 0 then its sign will be the sign of x).\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_rotate,"\n\
-rotate(x, y) - Return a copy of x, rotated by y places.\n\
+PyDoc_STRVAR(doc_ctx_rotate,
+"rotate($self, x, y, /)\n--\n\n\
+Return a copy of x, rotated by y places.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_same_quantum,"\n\
-same_quantum(x, y) - Return True if the two operands have the same exponent.\n\
+PyDoc_STRVAR(doc_ctx_same_quantum,
+"same_quantum($self, x, y, /)\n--\n\n\
+Return True if the two operands have the same exponent.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_scaleb,"\n\
-scaleb(x, y) - Return the first operand after adding the second value\n\
-to its exp.\n\
+PyDoc_STRVAR(doc_ctx_scaleb,
+"scaleb($self, x, y, /)\n--\n\n\
+Return the first operand after adding the second value to its exp.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_shift,"\n\
-shift(x, y) - Return a copy of x, shifted by y places.\n\
+PyDoc_STRVAR(doc_ctx_shift,
+"shift($self, x, y, /)\n--\n\n\
+Return a copy of x, shifted by y places.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_sqrt,"\n\
-sqrt(x) - Square root of a non-negative number to context precision.\n\
+PyDoc_STRVAR(doc_ctx_sqrt,
+"sqrt($self, x, /)\n--\n\n\
+Square root of a non-negative number to context precision.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_subtract,"\n\
-subtract(x, y) - Return the difference between x and y.\n\
+PyDoc_STRVAR(doc_ctx_subtract,
+"subtract($self, x, y, /)\n--\n\n\
+Return the difference between x and y.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_to_eng_string,"\n\
-to_eng_string(x) - Convert a number to a string, using engineering notation.\n\
+PyDoc_STRVAR(doc_ctx_to_eng_string,
+"to_eng_string($self, x, /)\n--\n\n\
+Convert a number to a string, using engineering notation.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_to_integral,"\n\
-to_integral(x) - Identical to to_integral_value(x).\n\
+PyDoc_STRVAR(doc_ctx_to_integral,
+"to_integral($self, x, /)\n--\n\n\
+Identical to to_integral_value(x).\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_to_integral_exact,"\n\
-to_integral_exact(x) - Round to an integer. Signal if the result is\n\
-rounded or inexact.\n\
+PyDoc_STRVAR(doc_ctx_to_integral_exact,
+"to_integral_exact($self, x, /)\n--\n\n\
+Round to an integer. Signal if the result is rounded or inexact.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_to_integral_value,"\n\
-to_integral_value(x) - Round to an integer.\n\
+PyDoc_STRVAR(doc_ctx_to_integral_value,
+"to_integral_value($self, x, /)\n--\n\n\
+Round to an integer.\n\
 \n");
 
-PyDoc_STRVAR(doc_ctx_to_sci_string,"\n\
-to_sci_string(x) - Convert a number to a string using scientific notation.\n\
+PyDoc_STRVAR(doc_ctx_to_sci_string,
+"to_sci_string($self, x, /)\n--\n\n\
+Convert a number to a string using scientific notation.\n\
 \n");
 
 
diff --git a/Modules/_heapqmodule.c b/Modules/_heapqmodule.c
index eee56a0..ad190df 100644
--- a/Modules/_heapqmodule.c
+++ b/Modules/_heapqmodule.c
@@ -11,10 +11,9 @@
 static int
 _siftdown(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
 {
-    PyObject *newitem, *parent, *olditem;
+    PyObject *newitem, *parent;
+    Py_ssize_t parentpos, size;
     int cmp;
-    Py_ssize_t parentpos;
-    Py_ssize_t size;
 
     assert(PyList_Check(heap));
     size = PyList_GET_SIZE(heap);
@@ -23,39 +22,28 @@
         return -1;
     }
 
-    newitem = PyList_GET_ITEM(heap, pos);
-    Py_INCREF(newitem);
     /* Follow the path to the root, moving parents down until finding
        a place newitem fits. */
-    while (pos > startpos){
+    newitem = PyList_GET_ITEM(heap, pos);
+    while (pos > startpos) {
         parentpos = (pos - 1) >> 1;
         parent = PyList_GET_ITEM(heap, parentpos);
         cmp = PyObject_RichCompareBool(newitem, parent, Py_LT);
-        if (cmp == -1) {
-            Py_DECREF(newitem);
+        if (cmp == -1)
             return -1;
-        }
         if (size != PyList_GET_SIZE(heap)) {
-            Py_DECREF(newitem);
             PyErr_SetString(PyExc_RuntimeError,
                             "list changed size during iteration");
             return -1;
         }
         if (cmp == 0)
             break;
-        Py_INCREF(parent);
-        olditem = PyList_GET_ITEM(heap, pos);
+        parent = PyList_GET_ITEM(heap, parentpos);
+        newitem = PyList_GET_ITEM(heap, pos);
+        PyList_SET_ITEM(heap, parentpos, newitem);
         PyList_SET_ITEM(heap, pos, parent);
-        Py_DECREF(olditem);
         pos = parentpos;
-        if (size != PyList_GET_SIZE(heap)) {
-            PyErr_SetString(PyExc_RuntimeError,
-                            "list changed size during iteration");
-            return -1;
-        }
     }
-    Py_DECREF(PyList_GET_ITEM(heap, pos));
-    PyList_SET_ITEM(heap, pos, newitem);
     return 0;
 }
 
@@ -63,20 +51,16 @@
 _siftup(PyListObject *heap, Py_ssize_t pos)
 {
     Py_ssize_t startpos, endpos, childpos, rightpos, limit;
+    PyObject *tmp1, *tmp2;
     int cmp;
-    PyObject *newitem, *tmp, *olditem;
-    Py_ssize_t size;
 
     assert(PyList_Check(heap));
-    size = PyList_GET_SIZE(heap);
-    endpos = size;
+    endpos = PyList_GET_SIZE(heap);
     startpos = pos;
     if (pos >= endpos) {
         PyErr_SetString(PyExc_IndexError, "index out of range");
         return -1;
     }
-    newitem = PyList_GET_ITEM(heap, pos);
-    Py_INCREF(newitem);
 
     /* Bubble up the smaller child until hitting a leaf. */
     limit = endpos / 2;          /* smallest pos that has no child */
@@ -89,37 +73,24 @@
                 PyList_GET_ITEM(heap, childpos),
                 PyList_GET_ITEM(heap, rightpos),
                 Py_LT);
-            if (cmp == -1) {
-                Py_DECREF(newitem);
+            if (cmp == -1)
                 return -1;
-            }
             if (cmp == 0)
                 childpos = rightpos;
-        }
-        if (size != PyList_GET_SIZE(heap)) {
-            Py_DECREF(newitem);
-            PyErr_SetString(PyExc_RuntimeError,
-                            "list changed size during iteration");
-            return -1;
+            if (endpos != PyList_GET_SIZE(heap)) {
+                PyErr_SetString(PyExc_RuntimeError,
+                                "list changed size during iteration");
+                return -1;
+            }
         }
         /* Move the smaller child up. */
-        tmp = PyList_GET_ITEM(heap, childpos);
-        Py_INCREF(tmp);
-        olditem = PyList_GET_ITEM(heap, pos);
-        PyList_SET_ITEM(heap, pos, tmp);
-        Py_DECREF(olditem);
+        tmp1 = PyList_GET_ITEM(heap, childpos);
+        tmp2 = PyList_GET_ITEM(heap, pos);
+        PyList_SET_ITEM(heap, childpos, tmp2);
+        PyList_SET_ITEM(heap, pos, tmp1);
         pos = childpos;
-        if (size != PyList_GET_SIZE(heap)) {
-            PyErr_SetString(PyExc_RuntimeError,
-                            "list changed size during iteration");
-            return -1;
-        }
     }
-
-    /* The leaf at pos is empty now.  Put newitem there, and bubble
-       it up to its final resting place (by sifting its parents down). */
-    Py_DECREF(PyList_GET_ITEM(heap, pos));
-    PyList_SET_ITEM(heap, pos, newitem);
+    /* Bubble it up to its final resting place (by sifting its parents down). */
     return _siftdown(heap, startpos, pos);
 }
 
@@ -296,123 +267,42 @@
 PyDoc_STRVAR(heapify_doc,
 "Transform list into a heap, in-place, in O(len(heap)) time.");
 
-static PyObject *
-nlargest(PyObject *self, PyObject *args)
-{
-    PyObject *heap=NULL, *elem, *iterable, *sol, *it, *oldelem;
-    Py_ssize_t i, n;
-    int cmp;
-
-    if (!PyArg_ParseTuple(args, "nO:nlargest", &n, &iterable))
-        return NULL;
-
-    it = PyObject_GetIter(iterable);
-    if (it == NULL)
-        return NULL;
-
-    heap = PyList_New(0);
-    if (heap == NULL)
-        goto fail;
-
-    for (i=0 ; i<n ; i++ ){
-        elem = PyIter_Next(it);
-        if (elem == NULL) {
-            if (PyErr_Occurred())
-                goto fail;
-            else
-                goto sortit;
-        }
-        if (PyList_Append(heap, elem) == -1) {
-            Py_DECREF(elem);
-            goto fail;
-        }
-        Py_DECREF(elem);
-    }
-    if (PyList_GET_SIZE(heap) == 0)
-        goto sortit;
-
-    for (i=n/2-1 ; i>=0 ; i--)
-        if(_siftup((PyListObject *)heap, i) == -1)
-            goto fail;
-
-    sol = PyList_GET_ITEM(heap, 0);
-    while (1) {
-        elem = PyIter_Next(it);
-        if (elem == NULL) {
-            if (PyErr_Occurred())
-                goto fail;
-            else
-                goto sortit;
-        }
-        cmp = PyObject_RichCompareBool(sol, elem, Py_LT);
-        if (cmp == -1) {
-            Py_DECREF(elem);
-            goto fail;
-        }
-        if (cmp == 0) {
-            Py_DECREF(elem);
-            continue;
-        }
-        oldelem = PyList_GET_ITEM(heap, 0);
-        PyList_SET_ITEM(heap, 0, elem);
-        Py_DECREF(oldelem);
-        if (_siftup((PyListObject *)heap, 0) == -1)
-            goto fail;
-        sol = PyList_GET_ITEM(heap, 0);
-    }
-sortit:
-    if (PyList_Sort(heap) == -1)
-        goto fail;
-    if (PyList_Reverse(heap) == -1)
-        goto fail;
-    Py_DECREF(it);
-    return heap;
-
-fail:
-    Py_DECREF(it);
-    Py_XDECREF(heap);
-    return NULL;
-}
-
-PyDoc_STRVAR(nlargest_doc,
-"Find the n largest elements in a dataset.\n\
-\n\
-Equivalent to:  sorted(iterable, reverse=True)[:n]\n");
-
 static int
 _siftdownmax(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
 {
     PyObject *newitem, *parent;
+    Py_ssize_t parentpos, size;
     int cmp;
-    Py_ssize_t parentpos;
 
     assert(PyList_Check(heap));
-    if (pos >= PyList_GET_SIZE(heap)) {
+    size = PyList_GET_SIZE(heap);
+    if (pos >= size) {
         PyErr_SetString(PyExc_IndexError, "index out of range");
         return -1;
     }
 
-    newitem = PyList_GET_ITEM(heap, pos);
-    Py_INCREF(newitem);
     /* Follow the path to the root, moving parents down until finding
        a place newitem fits. */
-    while (pos > startpos){
+    newitem = PyList_GET_ITEM(heap, pos);
+    while (pos > startpos) {
         parentpos = (pos - 1) >> 1;
         parent = PyList_GET_ITEM(heap, parentpos);
         cmp = PyObject_RichCompareBool(parent, newitem, Py_LT);
-        if (cmp == -1) {
-            Py_DECREF(newitem);
+        if (cmp == -1)
+            return -1;
+        if (size != PyList_GET_SIZE(heap)) {
+            PyErr_SetString(PyExc_RuntimeError,
+                            "list changed size during iteration");
             return -1;
         }
         if (cmp == 0)
             break;
-        Py_INCREF(parent);
-        Py_DECREF(PyList_GET_ITEM(heap, pos));
+        parent = PyList_GET_ITEM(heap, parentpos);
+        newitem = PyList_GET_ITEM(heap, pos);
+        PyList_SET_ITEM(heap, parentpos, newitem);
         PyList_SET_ITEM(heap, pos, parent);
         pos = parentpos;
     }
-    Py_DECREF(PyList_GET_ITEM(heap, pos));
-    PyList_SET_ITEM(heap, pos, newitem);
     return 0;
 }
 
@@ -420,8 +310,8 @@
 _siftupmax(PyListObject *heap, Py_ssize_t pos)
 {
     Py_ssize_t startpos, endpos, childpos, rightpos, limit;
+    PyObject *tmp1, *tmp2;
     int cmp;
-    PyObject *newitem, *tmp;
 
     assert(PyList_Check(heap));
     endpos = PyList_GET_SIZE(heap);
@@ -430,8 +320,6 @@
         PyErr_SetString(PyExc_IndexError, "index out of range");
         return -1;
     }
-    newitem = PyList_GET_ITEM(heap, pos);
-    Py_INCREF(newitem);
 
     /* Bubble up the smaller child until hitting a leaf. */
     limit = endpos / 2;          /* smallest pos that has no child */
@@ -444,111 +332,56 @@
                 PyList_GET_ITEM(heap, rightpos),
                 PyList_GET_ITEM(heap, childpos),
                 Py_LT);
-            if (cmp == -1) {
-                Py_DECREF(newitem);
+            if (cmp == -1)
                 return -1;
-            }
             if (cmp == 0)
                 childpos = rightpos;
+            if (endpos != PyList_GET_SIZE(heap)) {
+                PyErr_SetString(PyExc_RuntimeError,
+                                "list changed size during iteration");
+                return -1;
+            }
         }
         /* Move the smaller child up. */
-        tmp = PyList_GET_ITEM(heap, childpos);
-        Py_INCREF(tmp);
-        Py_DECREF(PyList_GET_ITEM(heap, pos));
-        PyList_SET_ITEM(heap, pos, tmp);
+        tmp1 = PyList_GET_ITEM(heap, childpos);
+        tmp2 = PyList_GET_ITEM(heap, pos);
+        PyList_SET_ITEM(heap, childpos, tmp2);
+        PyList_SET_ITEM(heap, pos, tmp1);
         pos = childpos;
     }
-
-    /* The leaf at pos is empty now.  Put newitem there, and bubble
-       it up to its final resting place (by sifting its parents down). */
-    Py_DECREF(PyList_GET_ITEM(heap, pos));
-    PyList_SET_ITEM(heap, pos, newitem);
+    /* Bubble it up to its final resting place (by sifting its parents down). */
     return _siftdownmax(heap, startpos, pos);
 }
 
 static PyObject *
-nsmallest(PyObject *self, PyObject *args)
+_heapreplace_max(PyObject *self, PyObject *args)
 {
-    PyObject *heap=NULL, *elem, *iterable, *los, *it, *oldelem;
-    Py_ssize_t i, n;
-    int cmp;
+    PyObject *heap, *item, *returnitem;
 
-    if (!PyArg_ParseTuple(args, "nO:nsmallest", &n, &iterable))
+    if (!PyArg_UnpackTuple(args, "_heapreplace_max", 2, 2, &heap, &item))
         return NULL;
 
-    it = PyObject_GetIter(iterable);
-    if (it == NULL)
+    if (!PyList_Check(heap)) {
+        PyErr_SetString(PyExc_TypeError, "heap argument must be a list");
         return NULL;
-
-    heap = PyList_New(0);
-    if (heap == NULL)
-        goto fail;
-
-    for (i=0 ; i<n ; i++ ){
-        elem = PyIter_Next(it);
-        if (elem == NULL) {
-            if (PyErr_Occurred())
-                goto fail;
-            else
-                goto sortit;
-        }
-        if (PyList_Append(heap, elem) == -1) {
-            Py_DECREF(elem);
-            goto fail;
-        }
-        Py_DECREF(elem);
-    }
-    n = PyList_GET_SIZE(heap);
-    if (n == 0)
-        goto sortit;
-
-    for (i=n/2-1 ; i>=0 ; i--)
-        if(_siftupmax((PyListObject *)heap, i) == -1)
-            goto fail;
-
-    los = PyList_GET_ITEM(heap, 0);
-    while (1) {
-        elem = PyIter_Next(it);
-        if (elem == NULL) {
-            if (PyErr_Occurred())
-                goto fail;
-            else
-                goto sortit;
-        }
-        cmp = PyObject_RichCompareBool(elem, los, Py_LT);
-        if (cmp == -1) {
-            Py_DECREF(elem);
-            goto fail;
-        }
-        if (cmp == 0) {
-            Py_DECREF(elem);
-            continue;
-        }
-
-        oldelem = PyList_GET_ITEM(heap, 0);
-        PyList_SET_ITEM(heap, 0, elem);
-        Py_DECREF(oldelem);
-        if (_siftupmax((PyListObject *)heap, 0) == -1)
-            goto fail;
-        los = PyList_GET_ITEM(heap, 0);
     }
 
-sortit:
-    if (PyList_Sort(heap) == -1)
-        goto fail;
-    Py_DECREF(it);
-    return heap;
+    if (PyList_GET_SIZE(heap) < 1) {
+        PyErr_SetString(PyExc_IndexError, "index out of range");
+        return NULL;
+    }
 
-fail:
-    Py_DECREF(it);
-    Py_XDECREF(heap);
-    return NULL;
+    returnitem = PyList_GET_ITEM(heap, 0);
+    Py_INCREF(item);
+    PyList_SET_ITEM(heap, 0, item);
+    if (_siftupmax((PyListObject *)heap, 0) == -1) {
+        Py_DECREF(returnitem);
+        return NULL;
+    }
+    return returnitem;
 }
 
-PyDoc_STRVAR(nsmallest_doc,
-"Find the n smallest elements in a dataset.\n\
-\n\
-Equivalent to:  sorted(iterable)[:n]\n");
+PyDoc_STRVAR(heapreplace_max_doc, "Maxheap variant of heapreplace");
 
 static PyMethodDef heapq_methods[] = {
     {"heappush",        (PyCFunction)heappush,
@@ -561,10 +394,8 @@
         METH_VARARGS,           heapreplace_doc},
     {"heapify",         (PyCFunction)heapify,
         METH_O,                 heapify_doc},
-    {"nlargest",        (PyCFunction)nlargest,
-        METH_VARARGS,           nlargest_doc},
-    {"nsmallest",       (PyCFunction)nsmallest,
-        METH_VARARGS,           nsmallest_doc},
+    {"_heapreplace_max",(PyCFunction)_heapreplace_max,
+        METH_VARARGS,           heapreplace_max_doc},
     {NULL,              NULL}           /* sentinel */
 };
 
diff --git a/Modules/_io/textio.c b/Modules/_io/textio.c
index ba5789d..24c7b45 100644
--- a/Modules/_io/textio.c
+++ b/Modules/_io/textio.c
@@ -1440,6 +1440,7 @@
     PyObject *dec_buffer = NULL;
     PyObject *dec_flags = NULL;
     PyObject *input_chunk = NULL;
+    Py_buffer input_chunk_buf;
     PyObject *decoded_chars, *chunk_size;
     Py_ssize_t nbytes, nchars;
     int eof;
@@ -1471,6 +1472,15 @@
             Py_DECREF(state);
             return -1;
         }
+
+        if (!PyBytes_Check(dec_buffer)) {
+            PyErr_Format(PyExc_TypeError,
+                         "decoder getstate() should have returned a bytes "
+                         "object, not '%.200s'",
+                         Py_TYPE(dec_buffer)->tp_name);
+            Py_DECREF(state);
+            return -1;
+        }
         Py_INCREF(dec_buffer);
         Py_INCREF(dec_flags);
         Py_DECREF(state);
@@ -1483,23 +1493,24 @@
     chunk_size = PyLong_FromSsize_t(Py_MAX(self->chunk_size, size_hint));
     if (chunk_size == NULL)
         goto fail;
+
     input_chunk = PyObject_CallMethodObjArgs(self->buffer,
         (self->has_read1 ? _PyIO_str_read1: _PyIO_str_read),
         chunk_size, NULL);
     Py_DECREF(chunk_size);
     if (input_chunk == NULL)
         goto fail;
-    if (!PyBytes_Check(input_chunk)) {
+
+    if (PyObject_GetBuffer(input_chunk, &input_chunk_buf, 0) != 0) {
         PyErr_Format(PyExc_TypeError,
-                     "underlying %s() should have returned a bytes object, "
+                     "underlying %s() should have returned a bytes-like object, "
                      "not '%.200s'", (self->has_read1 ? "read1": "read"),
                      Py_TYPE(input_chunk)->tp_name);
         goto fail;
     }
 
-    nbytes = PyBytes_Size(input_chunk);
+    nbytes = input_chunk_buf.len;
     eof = (nbytes == 0);
-
     if (Py_TYPE(self->decoder) == &PyIncrementalNewlineDecoder_Type) {
         decoded_chars = _PyIncrementalNewlineDecoder_decode(
             self->decoder, input_chunk, eof);
@@ -1508,6 +1519,7 @@
         decoded_chars = PyObject_CallMethodObjArgs(self->decoder,
             _PyIO_str_decode, input_chunk, eof ? Py_True : Py_False, NULL);
     }
+    PyBuffer_Release(&input_chunk_buf);
 
     if (check_decoded(decoded_chars) < 0)
         goto fail;
@@ -1524,18 +1536,12 @@
         /* At the snapshot point, len(dec_buffer) bytes before the read, the
          * next input to be decoded is dec_buffer + input_chunk.
          */
-        PyObject *next_input = PyNumber_Add(dec_buffer, input_chunk);
-        if (next_input == NULL)
-            goto fail;
-        if (!PyBytes_Check(next_input)) {
-            PyErr_Format(PyExc_TypeError,
-                         "decoder getstate() should have returned a bytes "
-                         "object, not '%.200s'",
-                         Py_TYPE(next_input)->tp_name);
-            Py_DECREF(next_input);
+        PyObject *next_input = dec_buffer;
+        PyBytes_Concat(&next_input, input_chunk);
+        if (next_input == NULL) {
+            dec_buffer = NULL; /* Reference lost to PyBytes_Concat */
             goto fail;
         }
-        Py_DECREF(dec_buffer);
         Py_CLEAR(self->snapshot);
         self->snapshot = Py_BuildValue("NN", dec_flags, next_input);
     }
diff --git a/Modules/_operator.c b/Modules/_operator.c
index adeb99e..9c5c0d2 100644
--- a/Modules/_operator.c
+++ b/Modules/_operator.c
@@ -69,6 +69,7 @@
 spam2(op_add           , PyNumber_Add)
 spam2(op_sub           , PyNumber_Subtract)
 spam2(op_mul           , PyNumber_Multiply)
+spam2(op_matmul        , PyNumber_MatrixMultiply)
 spam2(op_floordiv      , PyNumber_FloorDivide)
 spam2(op_truediv       , PyNumber_TrueDivide)
 spam2(op_mod           , PyNumber_Remainder)
@@ -86,6 +87,7 @@
 spam2(op_iadd          , PyNumber_InPlaceAdd)
 spam2(op_isub          , PyNumber_InPlaceSubtract)
 spam2(op_imul          , PyNumber_InPlaceMultiply)
+spam2(op_imatmul       , PyNumber_InPlaceMatrixMultiply)
 spam2(op_ifloordiv     , PyNumber_InPlaceFloorDivide)
 spam2(op_itruediv      , PyNumber_InPlaceTrueDivide)
 spam2(op_imod          , PyNumber_InPlaceRemainder)
@@ -343,6 +345,7 @@
 spam2(add, "add(a, b) -- Same as a + b.")
 spam2(sub, "sub(a, b) -- Same as a - b.")
 spam2(mul, "mul(a, b) -- Same as a * b.")
+spam2(matmul, "matmul(a, b) -- Same as a @ b.")
 spam2(floordiv, "floordiv(a, b) -- Same as a // b.")
 spam2(truediv, "truediv(a, b) -- Same as a / b.")
 spam2(mod, "mod(a, b) -- Same as a % b.")
@@ -360,6 +363,7 @@
 spam2(iadd, "a = iadd(a, b) -- Same as a += b.")
 spam2(isub, "a = isub(a, b) -- Same as a -= b.")
 spam2(imul, "a = imul(a, b) -- Same as a *= b.")
+spam2(imatmul, "a = imatmul(a, b) -- Same as a @= b.")
 spam2(ifloordiv, "a = ifloordiv(a, b) -- Same as a //= b.")
 spam2(itruediv, "a = itruediv(a, b) -- Same as a /= b")
 spam2(imod, "a = imod(a, b) -- Same as a %= b.")
diff --git a/Modules/_ssl.c b/Modules/_ssl.c
index 5031476..3b7226d 100644
--- a/Modules/_ssl.c
+++ b/Modules/_ssl.c
@@ -249,10 +249,8 @@
 } timeout_state;
 
 /* Wrap error strings with filename and line # */
-#define STRINGIFY1(x) #x
-#define STRINGIFY2(x) STRINGIFY1(x)
 #define ERRSTR1(x,y,z) (x ":" y ": " z)
-#define ERRSTR(x) ERRSTR1("_ssl.c", STRINGIFY2(__LINE__), x)
+#define ERRSTR(x) ERRSTR1("_ssl.c", Py_STRINGIFY(__LINE__), x)
 
 
 /*
diff --git a/Modules/_struct.c b/Modules/_struct.c
index 1de94e4..4941fc8 100644
--- a/Modules/_struct.c
+++ b/Modules/_struct.c
@@ -85,8 +85,6 @@
 #define BOOL_ALIGN 0
 #endif
 
-#define STRINGIFY(x)    #x
-
 #ifdef __powerc
 #pragma options align=reset
 #endif
@@ -546,8 +544,8 @@
         return -1;
     if (x < SHRT_MIN || x > SHRT_MAX){
         PyErr_SetString(StructError,
-                        "short format requires " STRINGIFY(SHRT_MIN)
-                        " <= number <= " STRINGIFY(SHRT_MAX));
+                        "short format requires " Py_STRINGIFY(SHRT_MIN)
+                        " <= number <= " Py_STRINGIFY(SHRT_MAX));
         return -1;
     }
     y = (short)x;
@@ -564,7 +562,8 @@
         return -1;
     if (x < 0 || x > USHRT_MAX){
         PyErr_SetString(StructError,
-                        "ushort format requires 0 <= number <= " STRINGIFY(USHRT_MAX));
+                        "ushort format requires 0 <= number <= "
+                        Py_STRINGIFY(USHRT_MAX));
         return -1;
     }
     y = (unsigned short)x;
diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c
index db2376d..a755004 100644
--- a/Modules/_testcapimodule.c
+++ b/Modules/_testcapimodule.c
@@ -2710,6 +2710,20 @@
 {
     void *ptr;
 
+    ptr = PyMem_RawMalloc(0);
+    if (ptr == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "PyMem_RawMalloc(0) returns NULL");
+        return NULL;
+    }
+    PyMem_RawFree(ptr);
+
+    ptr = PyMem_RawCalloc(0, 0);
+    if (ptr == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "PyMem_RawCalloc(0, 0) returns NULL");
+        return NULL;
+    }
+    PyMem_RawFree(ptr);
+
     ptr = PyMem_Malloc(0);
     if (ptr == NULL) {
         PyErr_SetString(PyExc_RuntimeError, "PyMem_Malloc(0) returns NULL");
@@ -2717,6 +2731,13 @@
     }
     PyMem_Free(ptr);
 
+    ptr = PyMem_Calloc(0, 0);
+    if (ptr == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "PyMem_Calloc(0, 0) returns NULL");
+        return NULL;
+    }
+    PyMem_Free(ptr);
+
     ptr = PyObject_Malloc(0);
     if (ptr == NULL) {
         PyErr_SetString(PyExc_RuntimeError, "PyObject_Malloc(0) returns NULL");
@@ -2724,6 +2745,13 @@
     }
     PyObject_Free(ptr);
 
+    ptr = PyObject_Calloc(0, 0);
+    if (ptr == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "PyObject_Calloc(0, 0) returns NULL");
+        return NULL;
+    }
+    PyObject_Free(ptr);
+
     Py_RETURN_NONE;
 }
 
@@ -2731,6 +2759,8 @@
     PyMemAllocator alloc;
 
     size_t malloc_size;
+    size_t calloc_nelem;
+    size_t calloc_elsize;
     void *realloc_ptr;
     size_t realloc_new_size;
     void *free_ptr;
@@ -2743,6 +2773,14 @@
     return hook->alloc.malloc(hook->alloc.ctx, size);
 }
 
+static void* hook_calloc (void* ctx, size_t nelem, size_t elsize)
+{
+    alloc_hook_t *hook = (alloc_hook_t *)ctx;
+    hook->calloc_nelem = nelem;
+    hook->calloc_elsize = elsize;
+    return hook->alloc.calloc(hook->alloc.ctx, nelem, elsize);
+}
+
 static void* hook_realloc (void* ctx, void* ptr, size_t new_size)
 {
     alloc_hook_t *hook = (alloc_hook_t *)ctx;
@@ -2765,16 +2803,14 @@
     const char *error_msg;
     alloc_hook_t hook;
     PyMemAllocator alloc;
-    size_t size, size2;
+    size_t size, size2, nelem, elsize;
     void *ptr, *ptr2;
 
-    hook.malloc_size = 0;
-    hook.realloc_ptr = NULL;
-    hook.realloc_new_size = 0;
-    hook.free_ptr = NULL;
+    memset(&hook, 0, sizeof(hook));
 
     alloc.ctx = &hook;
     alloc.malloc = &hook_malloc;
+    alloc.calloc = &hook_calloc;
     alloc.realloc = &hook_realloc;
     alloc.free = &hook_free;
     PyMem_GetAllocator(domain, &hook.alloc);
@@ -2831,6 +2867,33 @@
         goto fail;
     }
 
+    nelem = 2;
+    elsize = 5;
+    switch(domain)
+    {
+    case PYMEM_DOMAIN_RAW: ptr = PyMem_RawCalloc(nelem, elsize); break;
+    case PYMEM_DOMAIN_MEM: ptr = PyMem_Calloc(nelem, elsize); break;
+    case PYMEM_DOMAIN_OBJ: ptr = PyObject_Calloc(nelem, elsize); break;
+    default: ptr = NULL; break;
+    }
+
+    if (ptr == NULL) {
+        error_msg = "calloc failed";
+        goto fail;
+    }
+
+    if (hook.calloc_nelem != nelem || hook.calloc_elsize != elsize) {
+        error_msg = "calloc invalid nelem or elsize";
+        goto fail;
+    }
+
+    switch(domain)
+    {
+    case PYMEM_DOMAIN_RAW: PyMem_RawFree(ptr); break;
+    case PYMEM_DOMAIN_MEM: PyMem_Free(ptr); break;
+    case PYMEM_DOMAIN_OBJ: PyObject_Free(ptr); break;
+    }
+
     Py_INCREF(Py_None);
     res = Py_None;
     goto finally;
@@ -3298,6 +3361,109 @@
 };
 
 
+typedef struct {
+    PyObject_HEAD
+} matmulObject;
+
+static PyObject *
+matmulType_matmul(PyObject *self, PyObject *other)
+{
+    return Py_BuildValue("(sOO)", "matmul", self, other);
+}
+
+static PyObject *
+matmulType_imatmul(PyObject *self, PyObject *other)
+{
+    return Py_BuildValue("(sOO)", "imatmul", self, other);
+}
+
+static void
+matmulType_dealloc(PyObject *self)
+{
+    Py_TYPE(self)->tp_free(self);
+}
+
+static PyNumberMethods matmulType_as_number = {
+    0,                          /* nb_add */
+    0,                          /* nb_subtract */
+    0,                          /* nb_multiply */
+    0,                          /* nb_remainde r*/
+    0,                          /* nb_divmod */
+    0,                          /* nb_power */
+    0,                          /* nb_negative */
+    0,                          /* tp_positive */
+    0,                          /* tp_absolute */
+    0,                          /* tp_bool */
+    0,                          /* nb_invert */
+    0,                          /* nb_lshift */
+    0,                          /* nb_rshift */
+    0,                          /* nb_and */
+    0,                          /* nb_xor */
+    0,                          /* nb_or */
+    0,                          /* nb_int */
+    0,                          /* nb_reserved */
+    0,                          /* nb_float */
+    0,                          /* nb_inplace_add */
+    0,                          /* nb_inplace_subtract */
+    0,                          /* nb_inplace_multiply */
+    0,                          /* nb_inplace_remainder */
+    0,                          /* nb_inplace_power */
+    0,                          /* nb_inplace_lshift */
+    0,                          /* nb_inplace_rshift */
+    0,                          /* nb_inplace_and */
+    0,                          /* nb_inplace_xor */
+    0,                          /* nb_inplace_or */
+    0,                          /* nb_floor_divide */
+    0,                          /* nb_true_divide */
+    0,                          /* nb_inplace_floor_divide */
+    0,                          /* nb_inplace_true_divide */
+    0,                          /* nb_index */
+    matmulType_matmul,        /* nb_matrix_multiply */
+    matmulType_imatmul        /* nb_matrix_inplace_multiply */
+};
+
+static PyTypeObject matmulType = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "matmulType",
+    sizeof(matmulObject),               /* tp_basicsize */
+    0,                                  /* tp_itemsize */
+    matmulType_dealloc,                 /* destructor tp_dealloc */
+    0,                                  /* tp_print */
+    0,                                  /* tp_getattr */
+    0,                                  /* tp_setattr */
+    0,                                  /* tp_reserved */
+    0,                                  /* tp_repr */
+    &matmulType_as_number,              /* tp_as_number */
+    0,                                  /* tp_as_sequence */
+    0,                                  /* tp_as_mapping */
+    0,                                  /* tp_hash */
+    0,                                  /* tp_call */
+    0,                                  /* tp_str */
+    PyObject_GenericGetAttr,            /* tp_getattro */
+    PyObject_GenericSetAttr,            /* tp_setattro */
+    0,                                  /* tp_as_buffer */
+    0,                                  /* tp_flags */
+    "C level type with matrix operations defined",
+    0,                                  /* traverseproc tp_traverse */
+    0,                                  /* tp_clear */
+    0,                                  /* tp_richcompare */
+    0,                                  /* tp_weaklistoffset */
+    0,                                  /* tp_iter */
+    0,                                  /* tp_iternext */
+    0,                                  /* tp_methods */
+    0,                                  /* tp_members */
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    0,
+    PyType_GenericNew,                  /* tp_new */
+    PyObject_Del,                       /* tp_free */
+};
+
 
 static struct PyModuleDef _testcapimodule = {
     PyModuleDef_HEAD_INIT,
@@ -3327,6 +3493,10 @@
     /* don't use a name starting with "test", since we don't want
        test_capi to automatically call this */
     PyModule_AddObject(m, "_test_structmembersType", (PyObject *)&test_structmembersType);
+    if (PyType_Ready(&matmulType) < 0)
+        return NULL;
+    Py_INCREF(&matmulType);
+    PyModule_AddObject(m, "matmulType", (PyObject *)&matmulType);
 
     PyModule_AddObject(m, "CHAR_MAX", PyLong_FromLong(CHAR_MAX));
     PyModule_AddObject(m, "CHAR_MIN", PyLong_FromLong(CHAR_MIN));
diff --git a/Modules/_testembed.c b/Modules/_testembed.c
index a21d251..39ff097 100644
--- a/Modules/_testembed.c
+++ b/Modules/_testembed.c
@@ -109,11 +109,11 @@
     printf("--- Use defaults ---\n");
     check_stdio_details(NULL, NULL);
     printf("--- Set errors only ---\n");
-    check_stdio_details(NULL, "surrogateescape");
+    check_stdio_details(NULL, "ignore");
     printf("--- Set encoding only ---\n");
     check_stdio_details("latin-1", NULL);
     printf("--- Set encoding and errors ---\n");
-    check_stdio_details("latin-1", "surrogateescape");
+    check_stdio_details("latin-1", "replace");
 
     /* Check calling after initialization fails */
     Py_Initialize();
diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c
index af430fb..e47e8f3 100644
--- a/Modules/_tkinter.c
+++ b/Modules/_tkinter.c
@@ -457,6 +457,26 @@
             return result;
         /* Fall through, returning arg. */
     }
+    else if (PyList_Check(arg)) {
+        int i, size;
+        PyObject *elem, *newelem, *result;
+
+        size = PyList_GET_SIZE(arg);
+        result = PyTuple_New(size);
+        if (!result)
+            return NULL;
+        /* Recursively invoke SplitObj for all list items. */
+        for(i = 0; i < size; i++) {
+            elem = PyList_GET_ITEM(arg, i);
+            newelem = SplitObj(elem);
+            if (!newelem) {
+                Py_XDECREF(result);
+                return NULL;
+            }
+            PyTuple_SetItem(result, i, newelem);
+        }
+        return result;
+    }
     else if (PyUnicode_Check(arg)) {
         int argc;
         char **argv;
@@ -882,21 +902,23 @@
     }
     else if (PyFloat_Check(value))
         return Tcl_NewDoubleObj(PyFloat_AS_DOUBLE(value));
-    else if (PyTuple_Check(value)) {
+    else if (PyTuple_Check(value) || PyList_Check(value)) {
         Tcl_Obj **argv;
         Py_ssize_t size, i;
 
-        size = PyTuple_Size(value);
+        size = PySequence_Fast_GET_SIZE(value);
         if (!CHECK_SIZE(size, sizeof(Tcl_Obj *))) {
-            PyErr_SetString(PyExc_OverflowError, "tuple is too long");
+            PyErr_SetString(PyExc_OverflowError,
+                            PyTuple_Check(value) ? "tuple is too long" :
+                                                   "list is too long");
             return NULL;
         }
         argv = (Tcl_Obj **) ckalloc(((size_t)size) * sizeof(Tcl_Obj *));
         if(!argv)
           return 0;
         for (i = 0; i < size; i++)
-          argv[i] = AsObj(PyTuple_GetItem(value,i));
-        result = Tcl_NewListObj(PyTuple_Size(value), argv);
+          argv[i] = AsObj(PySequence_Fast_GET_ITEM(value,i));
+        result = Tcl_NewListObj(size, argv);
         ckfree(FREECAST argv);
         return result;
     }
@@ -1071,7 +1093,7 @@
     if (args == NULL)
         /* do nothing */;
 
-    else if (!PyTuple_Check(args)) {
+    else if (!(PyTuple_Check(args) || PyList_Check(args))) {
         objv[0] = AsObj(args);
         if (objv[0] == 0)
             goto finally;
@@ -1079,11 +1101,13 @@
         Tcl_IncrRefCount(objv[0]);
     }
     else {
-        objc = PyTuple_Size(args);
+        objc = PySequence_Fast_GET_SIZE(args);
 
         if (objc > ARGSZ) {
             if (!CHECK_SIZE(objc, sizeof(Tcl_Obj *))) {
-                PyErr_SetString(PyExc_OverflowError, "tuple is too long");
+                PyErr_SetString(PyExc_OverflowError,
+                                PyTuple_Check(args) ? "tuple is too long" :
+                                                      "list is too long");
                 return NULL;
             }
             objv = (Tcl_Obj **)ckalloc(((size_t)objc) * sizeof(Tcl_Obj *));
@@ -1095,7 +1119,7 @@
         }
 
         for (i = 0; i < objc; i++) {
-            PyObject *v = PyTuple_GetItem(args, i);
+            PyObject *v = PySequence_Fast_GET_ITEM(args, i);
             if (v == Py_None) {
                 objc = i;
                 break;
@@ -1834,6 +1858,9 @@
         Py_INCREF(arg);
         return arg;
     }
+    if (PyList_Check(arg)) {
+        return PySequence_Tuple(arg);
+    }
 
     if (!PyArg_ParseTuple(args, "et:splitlist", "utf-8", &list))
         return NULL;
@@ -1894,7 +1921,7 @@
         }
         return v;
     }
-    if (PyTuple_Check(arg))
+    if (PyTuple_Check(arg) || PyList_Check(arg))
         return SplitObj(arg);
 
     if (!PyArg_ParseTuple(args, "et:split", "utf-8", &list))
@@ -2684,35 +2711,15 @@
         PyErr_SetString(PyExc_ValueError,
                         "nesting too deep in _flatten");
         return 0;
-    } else if (PyList_Check(item)) {
-        size = PyList_GET_SIZE(item);
+    } else if (PyTuple_Check(item) || PyList_Check(item)) {
+        size = PySequence_Fast_GET_SIZE(item);
         /* preallocate (assume no nesting) */
         if (context->size + size > context->maxsize &&
             !_bump(context, size))
             return 0;
         /* copy items to output tuple */
         for (i = 0; i < size; i++) {
-            PyObject *o = PyList_GET_ITEM(item, i);
-            if (PyList_Check(o) || PyTuple_Check(o)) {
-                if (!_flatten1(context, o, depth + 1))
-                    return 0;
-            } else if (o != Py_None) {
-                if (context->size + 1 > context->maxsize &&
-                    !_bump(context, 1))
-                    return 0;
-                Py_INCREF(o);
-                PyTuple_SET_ITEM(context->tuple,
-                                 context->size++, o);
-            }
-        }
-    } else if (PyTuple_Check(item)) {
-        /* same, for tuples */
-        size = PyTuple_GET_SIZE(item);
-        if (context->size + size > context->maxsize &&
-            !_bump(context, size))
-            return 0;
-        for (i = 0; i < size; i++) {
-            PyObject *o = PyTuple_GET_ITEM(item, i);
+            PyObject *o = PySequence_Fast_GET_ITEM(item, i);
             if (PyList_Check(o) || PyTuple_Check(o)) {
                 if (!_flatten1(context, o, depth + 1))
                     return 0;
diff --git a/Modules/_tracemalloc.c b/Modules/_tracemalloc.c
index 780e8ed..429b209 100644
--- a/Modules/_tracemalloc.c
+++ b/Modules/_tracemalloc.c
@@ -16,9 +16,6 @@
 #  define TRACE_DEBUG
 #endif
 
-#define _STR(VAL) #VAL
-#define STR(VAL) _STR(VAL)
-
 /* Protected by the GIL */
 static struct {
     PyMemAllocator mem;
@@ -476,17 +473,22 @@
 }
 
 static void*
-tracemalloc_malloc(void *ctx, size_t size)
+tracemalloc_alloc(int use_calloc, void *ctx, size_t nelem, size_t elsize)
 {
     PyMemAllocator *alloc = (PyMemAllocator *)ctx;
     void *ptr;
 
-    ptr = alloc->malloc(alloc->ctx, size);
+    assert(nelem <= PY_SIZE_MAX / elsize);
+
+    if (use_calloc)
+        ptr = alloc->calloc(alloc->ctx, nelem, elsize);
+    else
+        ptr = alloc->malloc(alloc->ctx, nelem * elsize);
     if (ptr == NULL)
         return NULL;
 
     TABLES_LOCK();
-    if (tracemalloc_add_trace(ptr, size) < 0) {
+    if (tracemalloc_add_trace(ptr, nelem * elsize) < 0) {
         /* Failed to allocate a trace for the new memory block */
         TABLES_UNLOCK();
         alloc->free(alloc->ctx, ptr);
@@ -560,13 +562,16 @@
 }
 
 static void*
-tracemalloc_malloc_gil(void *ctx, size_t size)
+tracemalloc_alloc_gil(int use_calloc, void *ctx, size_t nelem, size_t elsize)
 {
     void *ptr;
 
     if (get_reentrant()) {
         PyMemAllocator *alloc = (PyMemAllocator *)ctx;
-        return alloc->malloc(alloc->ctx, size);
+        if (use_calloc)
+            return alloc->calloc(alloc->ctx, nelem, elsize);
+        else
+            return alloc->malloc(alloc->ctx, nelem * elsize);
     }
 
     /* Ignore reentrant call. PyObjet_Malloc() calls PyMem_Malloc() for
@@ -574,13 +579,25 @@
        allocation twice. */
     set_reentrant(1);
 
-    ptr = tracemalloc_malloc(ctx, size);
+    ptr = tracemalloc_alloc(use_calloc, ctx, nelem, elsize);
 
     set_reentrant(0);
     return ptr;
 }
 
 static void*
+tracemalloc_malloc_gil(void *ctx, size_t size)
+{
+    return tracemalloc_alloc_gil(0, ctx, 1, size);
+}
+
+static void*
+tracemalloc_calloc_gil(void *ctx, size_t nelem, size_t elsize)
+{
+    return tracemalloc_alloc_gil(1, ctx, nelem, elsize);
+}
+
+static void*
 tracemalloc_realloc_gil(void *ctx, void *ptr, size_t new_size)
 {
     void *ptr2;
@@ -614,7 +631,7 @@
 
 #ifdef TRACE_RAW_MALLOC
 static void*
-tracemalloc_raw_malloc(void *ctx, size_t size)
+tracemalloc_raw_alloc(int use_calloc, void *ctx, size_t nelem, size_t elsize)
 {
 #ifdef WITH_THREAD
     PyGILState_STATE gil_state;
@@ -623,7 +640,10 @@
 
     if (get_reentrant()) {
         PyMemAllocator *alloc = (PyMemAllocator *)ctx;
-        return alloc->malloc(alloc->ctx, size);
+        if (use_calloc)
+            return alloc->calloc(alloc->ctx, nelem, elsize);
+        else
+            return alloc->malloc(alloc->ctx, nelem * elsize);
     }
 
     /* Ignore reentrant call. PyGILState_Ensure() may call PyMem_RawMalloc()
@@ -633,10 +653,10 @@
 
 #ifdef WITH_THREAD
     gil_state = PyGILState_Ensure();
-    ptr = tracemalloc_malloc(ctx, size);
+    ptr = tracemalloc_alloc(use_calloc, ctx, nelem, elsize);
     PyGILState_Release(gil_state);
 #else
-    ptr = tracemalloc_malloc(ctx, size);
+    ptr = tracemalloc_alloc(use_calloc, ctx, nelem, elsize);
 #endif
 
     set_reentrant(0);
@@ -644,6 +664,18 @@
 }
 
 static void*
+tracemalloc_raw_malloc(void *ctx, size_t size)
+{
+    return tracemalloc_raw_alloc(0, ctx, 1, size);
+}
+
+static void*
+tracemalloc_raw_calloc(void *ctx, size_t nelem, size_t elsize)
+{
+    return tracemalloc_raw_alloc(1, ctx, nelem, elsize);
+}
+
+static void*
 tracemalloc_raw_realloc(void *ctx, void *ptr, size_t new_size)
 {
 #ifdef WITH_THREAD
@@ -856,6 +888,7 @@
 
 #ifdef TRACE_RAW_MALLOC
     alloc.malloc = tracemalloc_raw_malloc;
+    alloc.calloc = tracemalloc_raw_calloc;
     alloc.realloc = tracemalloc_raw_realloc;
     alloc.free = tracemalloc_free;
 
@@ -865,6 +898,7 @@
 #endif
 
     alloc.malloc = tracemalloc_malloc_gil;
+    alloc.calloc = tracemalloc_calloc_gil;
     alloc.realloc = tracemalloc_realloc_gil;
     alloc.free = tracemalloc_free;
 
diff --git a/Modules/_winapi.c b/Modules/_winapi.c
index b755178..f118436 100644
--- a/Modules/_winapi.c
+++ b/Modules/_winapi.c
@@ -40,6 +40,7 @@
 #define WINDOWS_LEAN_AND_MEAN
 #include "windows.h"
 #include <crtdbg.h>
+#include "winreparse.h"
 
 #if defined(MS_WIN32) && !defined(MS_WIN64)
 #define HANDLE_TO_PYNUM(handle) \
@@ -401,6 +402,140 @@
 }
 
 static PyObject *
+winapi_CreateJunction(PyObject *self, PyObject *args)
+{
+    /* Input arguments */
+    LPWSTR src_path = NULL;
+    LPWSTR dst_path = NULL;
+
+    /* Privilege adjustment */
+    HANDLE token = NULL;
+    TOKEN_PRIVILEGES tp;
+
+    /* Reparse data buffer */
+    const USHORT prefix_len = 4;
+    USHORT print_len = 0;
+    USHORT rdb_size = 0;
+    PREPARSE_DATA_BUFFER rdb = NULL;
+
+    /* Junction point creation */
+    HANDLE junction = NULL;
+    DWORD ret = 0;
+
+    if (!PyArg_ParseTuple(args, "uu", &src_path, &dst_path))
+        return NULL;
+
+    if (src_path == NULL || dst_path == NULL)
+        return PyErr_SetFromWindowsErr(ERROR_INVALID_PARAMETER);
+
+    if (wcsncmp(src_path, L"\\??\\", prefix_len) == 0)
+        return PyErr_SetFromWindowsErr(ERROR_INVALID_PARAMETER);
+
+    /* Adjust privileges to allow rewriting directory entry as a
+       junction point. */
+    if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &token))
+        goto cleanup;
+
+    if (!LookupPrivilegeValue(NULL, SE_RESTORE_NAME, &tp.Privileges[0].Luid))
+        goto cleanup;
+
+    tp.PrivilegeCount = 1;
+    tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+    if (!AdjustTokenPrivileges(token, FALSE, &tp, sizeof(TOKEN_PRIVILEGES),
+                               NULL, NULL))
+        goto cleanup;
+
+    if (GetFileAttributesW(src_path) == INVALID_FILE_ATTRIBUTES)
+        goto cleanup;
+
+    /* Store the absolute link target path length in print_len. */
+    print_len = (USHORT)GetFullPathNameW(src_path, 0, NULL, NULL);
+    if (print_len == 0)
+        goto cleanup;
+
+    /* NUL terminator should not be part of print_len. */
+    --print_len;
+
+    /* REPARSE_DATA_BUFFER usage is heavily under-documented, especially for
+       junction points. Here's what I've learned along the way:
+       - A junction point has two components: a print name and a substitute
+         name. They both describe the link target, but the substitute name is
+         the physical target and the print name is shown in directory listings.
+       - The print name must be a native name, prefixed with "\??\".
+       - Both names are stored after each other in the same buffer (the
+         PathBuffer) and both must be NUL-terminated.
+       - There are four members defining their respective offset and length
+         inside PathBuffer: SubstituteNameOffset, SubstituteNameLength,
+         PrintNameOffset and PrintNameLength.
+       - The total size we need to allocate for the REPARSE_DATA_BUFFER, thus,
+         is the sum of:
+         - the fixed header size (REPARSE_DATA_BUFFER_HEADER_SIZE)
+         - the size of the MountPointReparseBuffer member without the PathBuffer
+         - the size of the prefix ("\??\") in bytes
+         - the size of the print name in bytes
+         - the size of the substitute name in bytes
+         - the size of two NUL terminators in bytes */
+    rdb_size = REPARSE_DATA_BUFFER_HEADER_SIZE +
+        sizeof(rdb->MountPointReparseBuffer) -
+        sizeof(rdb->MountPointReparseBuffer.PathBuffer) +
+        /* Two +1's for NUL terminators. */
+        (prefix_len + print_len + 1 + print_len + 1) * sizeof(WCHAR);
+    rdb = (PREPARSE_DATA_BUFFER)PyMem_RawMalloc(rdb_size);
+    if (rdb == NULL)
+        goto cleanup;
+
+    memset(rdb, 0, rdb_size);
+    rdb->ReparseTag = IO_REPARSE_TAG_MOUNT_POINT;
+    rdb->ReparseDataLength = rdb_size - REPARSE_DATA_BUFFER_HEADER_SIZE;
+    rdb->MountPointReparseBuffer.SubstituteNameOffset = 0;
+    rdb->MountPointReparseBuffer.SubstituteNameLength =
+        (prefix_len + print_len) * sizeof(WCHAR);
+    rdb->MountPointReparseBuffer.PrintNameOffset =
+        rdb->MountPointReparseBuffer.SubstituteNameLength + sizeof(WCHAR);
+    rdb->MountPointReparseBuffer.PrintNameLength = print_len * sizeof(WCHAR);
+
+    /* Store the full native path of link target at the substitute name
+       offset (0). */
+    wcscpy(rdb->MountPointReparseBuffer.PathBuffer, L"\\??\\");
+    if (GetFullPathNameW(src_path, print_len + 1,
+                         rdb->MountPointReparseBuffer.PathBuffer + prefix_len,
+                         NULL) == 0)
+        goto cleanup;
+
+    /* Copy everything but the native prefix to the print name offset. */
+    wcscpy(rdb->MountPointReparseBuffer.PathBuffer +
+             prefix_len + print_len + 1,
+             rdb->MountPointReparseBuffer.PathBuffer + prefix_len);
+
+    /* Create a directory for the junction point. */
+    if (!CreateDirectoryW(dst_path, NULL))
+        goto cleanup;
+
+    junction = CreateFileW(dst_path, GENERIC_READ | GENERIC_WRITE, 0, NULL,
+        OPEN_EXISTING,
+        FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, NULL);
+    if (junction == INVALID_HANDLE_VALUE)
+        goto cleanup;
+
+    /* Make the directory entry a junction point. */
+    if (!DeviceIoControl(junction, FSCTL_SET_REPARSE_POINT, rdb, rdb_size,
+                         NULL, 0, &ret, NULL))
+        goto cleanup;
+
+cleanup:
+    ret = GetLastError();
+
+    CloseHandle(token);
+    CloseHandle(junction);
+    PyMem_RawFree(rdb);
+
+    if (ret != 0)
+        return PyErr_SetFromWindowsErr(ret);
+
+    Py_RETURN_NONE;
+}
+
+static PyObject *
 winapi_CreateNamedPipe(PyObject *self, PyObject *args)
 {
     LPCTSTR lpName;
@@ -1225,6 +1360,8 @@
      METH_VARARGS | METH_KEYWORDS, ""},
     {"CreateFile", winapi_CreateFile, METH_VARARGS,
      ""},
+    {"CreateJunction", winapi_CreateJunction, METH_VARARGS,
+     ""},
     {"CreateNamedPipe", winapi_CreateNamedPipe, METH_VARARGS,
      ""},
     {"CreatePipe", winapi_CreatePipe, METH_VARARGS,
diff --git a/Modules/gcmodule.c b/Modules/gcmodule.c
index 9bb3666..5e8e17b 100644
--- a/Modules/gcmodule.c
+++ b/Modules/gcmodule.c
@@ -25,6 +25,7 @@
 
 #include "Python.h"
 #include "frameobject.h"        /* for PyFrame_ClearFreeList */
+#include "pytime.h"           /* for _PyTime_gettimeofday, _PyTime_INTERVAL */
 
 /* Get an object's GC head */
 #define AS_GC(o) ((PyGC_Head *)(o)-1)
@@ -166,7 +167,6 @@
                 DEBUG_UNCOLLECTABLE | \
                 DEBUG_SAVEALL
 static int debug;
-static PyObject *tmod = NULL;
 
 /* Running stats per generation */
 struct gc_generation_stats {
@@ -894,26 +894,6 @@
     (void)PySet_ClearFreeList();
 }
 
-static double
-get_time(void)
-{
-    double result = 0;
-    if (tmod != NULL) {
-        _Py_IDENTIFIER(time);
-
-        PyObject *f = _PyObject_CallMethodId(tmod, &PyId_time, NULL);
-        if (f == NULL) {
-            PyErr_Clear();
-        }
-        else {
-            if (PyFloat_Check(f))
-                result = PyFloat_AsDouble(f);
-            Py_DECREF(f);
-        }
-    }
-    return result;
-}
-
 /* This is the main function.  Read this to understand how the
  * collection process works. */
 static Py_ssize_t
@@ -928,7 +908,8 @@
     PyGC_Head unreachable; /* non-problematic unreachable trash */
     PyGC_Head finalizers;  /* objects with, & reachable from, __del__ */
     PyGC_Head *gc;
-    double t1 = 0.0;
+    _PyTime_timeval t1;
+
     struct gc_generation_stats *stats = &generation_stats[generation];
 
     if (debug & DEBUG_STATS) {
@@ -936,9 +917,10 @@
                           generation);
         PySys_WriteStderr("gc: objects in each generation:");
         for (i = 0; i < NUM_GENERATIONS; i++)
-            PySys_WriteStderr(" %" PY_FORMAT_SIZE_T "d",
+            PySys_FormatStderr(" %zd",
                               gc_list_size(GEN_HEAD(i)));
-        t1 = get_time();
+        _PyTime_gettimeofday(&t1);
+
         PySys_WriteStderr("\n");
     }
 
@@ -1042,19 +1024,16 @@
             debug_cycle("uncollectable", FROM_GC(gc));
     }
     if (debug & DEBUG_STATS) {
-        double t2 = get_time();
+        _PyTime_timeval t2;
+        _PyTime_gettimeofday(&t2);
+
         if (m == 0 && n == 0)
             PySys_WriteStderr("gc: done");
         else
-            PySys_WriteStderr(
-                "gc: done, "
-                "%" PY_FORMAT_SIZE_T "d unreachable, "
-                "%" PY_FORMAT_SIZE_T "d uncollectable",
+            PySys_FormatStderr(
+                "gc: done, %zd unreachable, %zd uncollectable",
                 n+m, n);
-        if (t1 && t2) {
-            PySys_WriteStderr(", %.4fs elapsed", t2-t1);
-        }
-        PySys_WriteStderr(".\n");
+        PySys_WriteStderr(", %.4fs elapsed\n", _PyTime_INTERVAL(t1, t2));
     }
 
     /* Append instances in the uncollectable set to a Python
@@ -1581,18 +1560,6 @@
     if (PyModule_AddObject(m, "callbacks", callbacks) < 0)
         return NULL;
 
-    /* Importing can't be done in collect() because collect()
-     * can be called via PyGC_Collect() in Py_Finalize().
-     * This wouldn't be a problem, except that <initialized> is
-     * reset to 0 before calling collect which trips up
-     * the import and triggers an assertion.
-     */
-    if (tmod == NULL) {
-        tmod = PyImport_ImportModuleNoBlock("time");
-        if (tmod == NULL)
-            PyErr_Clear();
-    }
-
 #define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
     ADD_INT(DEBUG_STATS);
     ADD_INT(DEBUG_COLLECTABLE);
@@ -1681,7 +1648,6 @@
 _PyGC_Fini(void)
 {
     Py_CLEAR(callbacks);
-    Py_CLEAR(tmod);
 }
 
 /* for debugging */
@@ -1715,15 +1681,19 @@
         _PyObject_GC_UNTRACK(op);
 }
 
-PyObject *
-_PyObject_GC_Malloc(size_t basicsize)
+static PyObject *
+_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
 {
     PyObject *op;
     PyGC_Head *g;
+    size_t size;
     if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
         return PyErr_NoMemory();
-    g = (PyGC_Head *)PyObject_MALLOC(
-        sizeof(PyGC_Head) + basicsize);
+    size = sizeof(PyGC_Head) + basicsize;
+    if (use_calloc)
+        g = (PyGC_Head *)PyObject_Calloc(1, size);
+    else
+        g = (PyGC_Head *)PyObject_Malloc(size);
     if (g == NULL)
         return PyErr_NoMemory();
     g->gc.gc_refs = 0;
@@ -1743,6 +1713,18 @@
 }
 
 PyObject *
+_PyObject_GC_Malloc(size_t basicsize)
+{
+    return _PyObject_GC_Alloc(0, basicsize);
+}
+
+PyObject *
+_PyObject_GC_Calloc(size_t basicsize)
+{
+    return _PyObject_GC_Alloc(1, basicsize);
+}
+
+PyObject *
 _PyObject_GC_New(PyTypeObject *tp)
 {
     PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
diff --git a/Modules/main.c b/Modules/main.c
index 87a21d7..1c25326 100644
--- a/Modules/main.c
+++ b/Modules/main.c
@@ -343,6 +343,8 @@
     int version = 0;
     int saw_unbuffered_flag = 0;
     PyCompilerFlags cf;
+    PyObject *warning_option = NULL;
+    PyObject *warning_options = NULL;
 
     cf.cf_flags = 0;
 
@@ -465,7 +467,15 @@
             break;
 
         case 'W':
-            PySys_AddWarnOption(_PyOS_optarg);
+            if (warning_options == NULL)
+                warning_options = PyList_New(0);
+            if (warning_options == NULL)
+                Py_FatalError("failure in handling of -W argument");
+            warning_option = PyUnicode_FromWideChar(_PyOS_optarg, -1);
+            if (warning_option == NULL)
+                Py_FatalError("failure in handling of -W argument");
+            PyList_Append(warning_options, warning_option);
+            Py_DECREF(warning_option);
             break;
 
         case 'X':
@@ -559,6 +569,12 @@
         PyMem_RawFree(buf);
     }
 #endif
+    if (warning_options != NULL) {
+        Py_ssize_t i;
+        for (i = 0; i < PyList_GET_SIZE(warning_options); i++) {
+            PySys_AddWarnOptionUnicode(PyList_GET_ITEM(warning_options, i));
+        }
+    }
 
     if (command == NULL && module == NULL && _PyOS_optind < argc &&
         wcscmp(argv[_PyOS_optind], L"-") != 0)
@@ -652,6 +668,7 @@
     Py_SetProgramName(argv[0]);
 #endif
     Py_Initialize();
+    Py_XDECREF(warning_options);
 
     if (!Py_QuietFlag && (Py_VerboseFlag ||
                         (command == NULL && filename == NULL &&
diff --git a/Modules/mathmodule.c b/Modules/mathmodule.c
index 7f094ff..7f525ea 100644
--- a/Modules/mathmodule.c
+++ b/Modules/mathmodule.c
@@ -1408,6 +1408,7 @@
 math_factorial(PyObject *self, PyObject *arg)
 {
     long x;
+    int overflow;
     PyObject *result, *odd_part, *two_valuation;
 
     if (PyFloat_Check(arg)) {
@@ -1421,15 +1422,22 @@
         lx = PyLong_FromDouble(dx);
         if (lx == NULL)
             return NULL;
-        x = PyLong_AsLong(lx);
+        x = PyLong_AsLongAndOverflow(lx, &overflow);
         Py_DECREF(lx);
     }
     else
-        x = PyLong_AsLong(arg);
+        x = PyLong_AsLongAndOverflow(arg, &overflow);
 
-    if (x == -1 && PyErr_Occurred())
+    if (x == -1 && PyErr_Occurred()) {
         return NULL;
-    if (x < 0) {
+    }
+    else if (overflow == 1) {
+        PyErr_Format(PyExc_OverflowError,
+                     "factorial() argument should not exceed %ld",
+                     LONG_MAX);
+        return NULL;
+    }
+    else if (overflow == -1 || x < 0) {
         PyErr_SetString(PyExc_ValueError,
                         "factorial() not defined for negative values");
         return NULL;
diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c
index 8cd5485..916be81 100644
--- a/Modules/posixmodule.c
+++ b/Modules/posixmodule.c
@@ -27,6 +27,8 @@
 #include "Python.h"
 #ifndef MS_WINDOWS
 #include "posixmodule.h"
+#else
+#include "winreparse.h"
 #endif
 
 #ifdef __cplusplus
@@ -301,6 +303,9 @@
 #ifndef IO_REPARSE_TAG_SYMLINK
 #define IO_REPARSE_TAG_SYMLINK (0xA000000CL)
 #endif
+#ifndef IO_REPARSE_TAG_MOUNT_POINT
+#define IO_REPARSE_TAG_MOUNT_POINT (0xA0000003L)
+#endif
 #include "osdefs.h"
 #include <malloc.h>
 #include <windows.h>
@@ -1109,41 +1114,6 @@
 #endif
 
 #ifdef MS_WINDOWS
-/* The following structure was copied from
-   http://msdn.microsoft.com/en-us/library/ms791514.aspx as the required
-   include doesn't seem to be present in the Windows SDK (at least as included
-   with Visual Studio Express). */
-typedef struct _REPARSE_DATA_BUFFER {
-    ULONG ReparseTag;
-    USHORT ReparseDataLength;
-    USHORT Reserved;
-    union {
-        struct {
-            USHORT SubstituteNameOffset;
-            USHORT SubstituteNameLength;
-            USHORT PrintNameOffset;
-            USHORT PrintNameLength;
-            ULONG Flags;
-            WCHAR PathBuffer[1];
-        } SymbolicLinkReparseBuffer;
-
-        struct {
-            USHORT SubstituteNameOffset;
-            USHORT  SubstituteNameLength;
-            USHORT  PrintNameOffset;
-            USHORT  PrintNameLength;
-            WCHAR  PathBuffer[1];
-        } MountPointReparseBuffer;
-
-        struct {
-            UCHAR  DataBuffer[1];
-        } GenericReparseBuffer;
-    };
-} REPARSE_DATA_BUFFER, *PREPARSE_DATA_BUFFER;
-
-#define REPARSE_DATA_BUFFER_HEADER_SIZE  FIELD_OFFSET(REPARSE_DATA_BUFFER,\
-                                                      GenericReparseBuffer)
-#define MAXIMUM_REPARSE_DATA_BUFFER_SIZE  ( 16 * 1024 )
 
 static int
 win32_get_reparse_tag(HANDLE reparse_point_handle, ULONG *reparse_tag)
@@ -4492,7 +4462,10 @@
             find_data_handle = FindFirstFileW(lpFileName, &find_data);
 
             if(find_data_handle != INVALID_HANDLE_VALUE) {
-                is_link = find_data.dwReserved0 == IO_REPARSE_TAG_SYMLINK;
+                /* IO_REPARSE_TAG_SYMLINK if it is a symlink and
+                   IO_REPARSE_TAG_MOUNT_POINT if it is a junction point. */
+                is_link = find_data.dwReserved0 == IO_REPARSE_TAG_SYMLINK ||
+                          find_data.dwReserved0 == IO_REPARSE_TAG_MOUNT_POINT;
                 FindClose(find_data_handle);
             }
         }
diff --git a/Modules/signalmodule.c b/Modules/signalmodule.c
index fedaddf..cf4ba61 100644
--- a/Modules/signalmodule.c
+++ b/Modules/signalmodule.c
@@ -956,7 +956,7 @@
 
 static struct PyModuleDef signalmodule = {
     PyModuleDef_HEAD_INIT,
-    "signal",
+    "_signal",
     module_doc,
     -1,
     signal_methods,
@@ -967,7 +967,7 @@
 };
 
 PyMODINIT_FUNC
-PyInit_signal(void)
+PyInit__signal(void)
 {
     PyObject *m, *d, *x;
     int i;
@@ -1380,7 +1380,7 @@
 void
 PyOS_InitInterrupts(void)
 {
-    PyObject *m = PyImport_ImportModule("signal");
+    PyObject *m = PyImport_ImportModule("_signal");
     if (m) {
         Py_DECREF(m);
     }
diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c
index d0149dd..5a2893c 100644
--- a/Modules/socketmodule.c
+++ b/Modules/socketmodule.c
@@ -121,7 +121,7 @@
 getsockname() -- return local address\n\
 getsockopt(level, optname[, buflen]) -- get socket options\n\
 gettimeout() -- return timeout or None\n\
-listen(n) -- start listening for incoming connections\n\
+listen([n]) -- start listening for incoming connections\n\
 recv(buflen[, flags]) -- receive data\n\
 recv_into(buffer[, nbytes[, flags]]) -- receive data (into a buffer)\n\
 recvfrom(buflen[, flags]) -- receive data and sender\'s address\n\
@@ -2534,14 +2534,16 @@
 /* s.listen(n) method */
 
 static PyObject *
-sock_listen(PySocketSockObject *s, PyObject *arg)
+sock_listen(PySocketSockObject *s, PyObject *args)
 {
-    int backlog;
+    /* We try to choose a default backlog high enough to avoid connection drops
+     * for common workloads, yet not too high to limit resource usage. */
+    int backlog = Py_MIN(SOMAXCONN, 128);
     int res;
 
-    backlog = _PyLong_AsInt(arg);
-    if (backlog == -1 && PyErr_Occurred())
+    if (!PyArg_ParseTuple(args, "|i:listen", &backlog))
         return NULL;
+
     Py_BEGIN_ALLOW_THREADS
     /* To avoid problems on systems that don't allow a negative backlog
      * (which doesn't make sense anyway) we force a minimum value of 0. */
@@ -2556,12 +2558,12 @@
 }
 
 PyDoc_STRVAR(listen_doc,
-"listen(backlog)\n\
+"listen([backlog])\n\
 \n\
-Enable a server to accept connections.  The backlog argument must be at\n\
-least 0 (if it is lower, it is set to 0); it specifies the number of\n\
+Enable a server to accept connections.  If backlog is specified, it must be\n\
+at least 0 (if it is lower, it is set to 0); it specifies the number of\n\
 unaccepted connections that the system will allow before refusing new\n\
-connections.");
+connections. If not specified, a default reasonable value is chosen.");
 
 
 /*
@@ -3795,7 +3797,7 @@
     {"share",         (PyCFunction)sock_share, METH_VARARGS,
                       sock_share_doc},
 #endif
-    {"listen",            (PyCFunction)sock_listen, METH_O,
+    {"listen",            (PyCFunction)sock_listen, METH_VARARGS,
                       listen_doc},
     {"recv",              (PyCFunction)sock_recv, METH_VARARGS,
                       recv_doc},
diff --git a/Modules/winreparse.h b/Modules/winreparse.h
new file mode 100644
index 0000000..66f7775
--- /dev/null
+++ b/Modules/winreparse.h
@@ -0,0 +1,53 @@
+#ifndef Py_WINREPARSE_H
+#define Py_WINREPARSE_H
+
+#ifdef MS_WINDOWS
+#include <Windows.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* The following structure was copied from
+   http://msdn.microsoft.com/en-us/library/ff552012.aspx as the required
+   include doesn't seem to be present in the Windows SDK (at least as included
+   with Visual Studio Express). */
+typedef struct _REPARSE_DATA_BUFFER {
+    ULONG ReparseTag;
+    USHORT ReparseDataLength;
+    USHORT Reserved;
+    union {
+        struct {
+            USHORT SubstituteNameOffset;
+            USHORT SubstituteNameLength;
+            USHORT PrintNameOffset;
+            USHORT PrintNameLength;
+            ULONG Flags;
+            WCHAR PathBuffer[1];
+        } SymbolicLinkReparseBuffer;
+
+        struct {
+            USHORT SubstituteNameOffset;
+            USHORT  SubstituteNameLength;
+            USHORT  PrintNameOffset;
+            USHORT  PrintNameLength;
+            WCHAR  PathBuffer[1];
+        } MountPointReparseBuffer;
+
+        struct {
+            UCHAR  DataBuffer[1];
+        } GenericReparseBuffer;
+    };
+} REPARSE_DATA_BUFFER, *PREPARSE_DATA_BUFFER;
+
+#define REPARSE_DATA_BUFFER_HEADER_SIZE  FIELD_OFFSET(REPARSE_DATA_BUFFER,\
+                                                      GenericReparseBuffer)
+#define MAXIMUM_REPARSE_DATA_BUFFER_SIZE  ( 16 * 1024 )
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MS_WINDOWS */
+
+#endif /* !Py_WINREPARSE_H */
diff --git a/Objects/abstract.c b/Objects/abstract.c
index 38ddb0f..aeb8634 100644
--- a/Objects/abstract.c
+++ b/Objects/abstract.c
@@ -932,6 +932,12 @@
 }
 
 PyObject *
+PyNumber_MatrixMultiply(PyObject *v, PyObject *w)
+{
+    return binary_op(v, w, NB_SLOT(nb_matrix_multiply), "@");
+}
+
+PyObject *
 PyNumber_FloorDivide(PyObject *v, PyObject *w)
 {
     return binary_op(v, w, NB_SLOT(nb_floor_divide), "//");
@@ -1012,6 +1018,7 @@
 INPLACE_BINOP(PyNumber_InPlaceLshift, nb_inplace_lshift, nb_lshift, "<<=")
 INPLACE_BINOP(PyNumber_InPlaceRshift, nb_inplace_rshift, nb_rshift, ">>=")
 INPLACE_BINOP(PyNumber_InPlaceSubtract, nb_inplace_subtract, nb_subtract, "-=")
+INPLACE_BINOP(PyNumber_InMatrixMultiply, nb_inplace_matrix_multiply, nb_matrix_multiply, "@=")
 
 PyObject *
 PyNumber_InPlaceFloorDivide(PyObject *v, PyObject *w)
@@ -1078,6 +1085,13 @@
 }
 
 PyObject *
+PyNumber_InPlaceMatrixMultiply(PyObject *v, PyObject *w)
+{
+    return binary_iop(v, w, NB_SLOT(nb_inplace_matrix_multiply),
+                      NB_SLOT(nb_matrix_multiply), "@=");
+}
+
+PyObject *
 PyNumber_InPlaceRemainder(PyObject *v, PyObject *w)
 {
     return binary_iop(v, w, NB_SLOT(nb_inplace_remainder),
diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c
index 5b75705..68b9c4a 100644
--- a/Objects/bytearrayobject.c
+++ b/Objects/bytearrayobject.c
@@ -813,9 +813,21 @@
     }
     else {
         if (count > 0) {
-            if (PyByteArray_Resize((PyObject *)self, count))
+            void *sval;
+            Py_ssize_t alloc;
+
+            assert (Py_SIZE(self) == 0);
+
+            alloc = count + 1;
+            sval = PyObject_Calloc(1, alloc);
+            if (sval == NULL)
                 return -1;
-            memset(PyByteArray_AS_STRING(self), 0, count);
+
+            PyObject_Free(self->ob_bytes);
+
+            self->ob_bytes = self->ob_start = sval;
+            Py_SIZE(self) = count;
+            self->ob_alloc = alloc;
         }
         return 0;
     }
diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c
index b93b9ef..911a93b 100644
--- a/Objects/bytesobject.c
+++ b/Objects/bytesobject.c
@@ -71,15 +71,11 @@
    PyBytes_FromStringAndSize()) or the length of the string in the `str'
    parameter (for PyBytes_FromString()).
 */
-PyObject *
-PyBytes_FromStringAndSize(const char *str, Py_ssize_t size)
+static PyObject *
+_PyBytes_FromSize(Py_ssize_t size, int use_calloc)
 {
     PyBytesObject *op;
-    if (size < 0) {
-        PyErr_SetString(PyExc_SystemError,
-            "Negative size passed to PyBytes_FromStringAndSize");
-        return NULL;
-    }
+    assert(size >= 0);
     if (size == 0 && (op = nullstring) != NULL) {
 #ifdef COUNT_ALLOCS
         null_strings++;
@@ -87,15 +83,6 @@
         Py_INCREF(op);
         return (PyObject *)op;
     }
-    if (size == 1 && str != NULL &&
-        (op = characters[*str & UCHAR_MAX]) != NULL)
-    {
-#ifdef COUNT_ALLOCS
-        one_strings++;
-#endif
-        Py_INCREF(op);
-        return (PyObject *)op;
-    }
 
     if (size > PY_SSIZE_T_MAX - PyBytesObject_SIZE) {
         PyErr_SetString(PyExc_OverflowError,
@@ -104,19 +91,52 @@
     }
 
     /* Inline PyObject_NewVar */
-    op = (PyBytesObject *)PyObject_MALLOC(PyBytesObject_SIZE + size);
+    if (use_calloc)
+        op = (PyBytesObject *)PyObject_Calloc(1, PyBytesObject_SIZE + size);
+    else
+        op = (PyBytesObject *)PyObject_Malloc(PyBytesObject_SIZE + size);
     if (op == NULL)
         return PyErr_NoMemory();
     (void)PyObject_INIT_VAR(op, &PyBytes_Type, size);
     op->ob_shash = -1;
-    if (str != NULL)
-        Py_MEMCPY(op->ob_sval, str, size);
-    op->ob_sval[size] = '\0';
-    /* share short strings */
+    if (!use_calloc)
+        op->ob_sval[size] = '\0';
+    /* empty byte string singleton */
     if (size == 0) {
         nullstring = op;
         Py_INCREF(op);
-    } else if (size == 1 && str != NULL) {
+    }
+    return (PyObject *) op;
+}
+
+PyObject *
+PyBytes_FromStringAndSize(const char *str, Py_ssize_t size)
+{
+    PyBytesObject *op;
+    if (size < 0) {
+        PyErr_SetString(PyExc_SystemError,
+            "Negative size passed to PyBytes_FromStringAndSize");
+        return NULL;
+    }
+    if (size == 1 && str != NULL &&
+        (op = characters[*str & UCHAR_MAX]) != NULL)
+    {
+#ifdef COUNT_ALLOCS
+        one_strings++;
+#endif
+        Py_INCREF(op);
+        return (PyObject *)op;
+    }
+
+    op = (PyBytesObject *)_PyBytes_FromSize(size, 0);
+    if (op == NULL)
+        return NULL;
+    if (str == NULL)
+        return (PyObject *) op;
+
+    Py_MEMCPY(op->ob_sval, str, size);
+    /* share short strings */
+    if (size == 1) {
         characters[*str & UCHAR_MAX] = op;
         Py_INCREF(op);
     }
@@ -2482,7 +2502,7 @@
                             "argument");
             return NULL;
         }
-        return PyBytes_FromString("");
+        return PyBytes_FromStringAndSize(NULL, 0);
     }
 
     if (PyUnicode_Check(x)) {
@@ -2532,11 +2552,9 @@
         return NULL;
     }
     else {
-        new = PyBytes_FromStringAndSize(NULL, size);
+        new = _PyBytes_FromSize(size, 1);
         if (new == NULL)
             return NULL;
-        if (size > 0)
-            memset(((PyBytesObject*)new)->ob_sval, 0, size);
         return new;
     }
 
@@ -2781,7 +2799,6 @@
 void
 PyBytes_Concat(PyObject **pv, PyObject *w)
 {
-    PyObject *v;
     assert(pv != NULL);
     if (*pv == NULL)
         return;
@@ -2789,9 +2806,45 @@
         Py_CLEAR(*pv);
         return;
     }
-    v = bytes_concat(*pv, w);
-    Py_DECREF(*pv);
-    *pv = v;
+
+    if (Py_REFCNT(*pv) == 1 && PyBytes_CheckExact(*pv)) {
+        /* Only one reference, so we can resize in place */
+        Py_ssize_t oldsize;
+        Py_buffer wb;
+        
+        wb.len = -1;
+        if (_getbuffer(w, &wb) < 0) {
+            PyErr_Format(PyExc_TypeError, "can't concat %.100s to %.100s",
+                         Py_TYPE(w)->tp_name, Py_TYPE(*pv)->tp_name);
+            Py_CLEAR(*pv);
+            return;
+        }
+
+        oldsize = PyBytes_GET_SIZE(*pv);
+        if (oldsize > PY_SSIZE_T_MAX - wb.len) {
+            PyErr_NoMemory();
+            goto error;
+        }
+        if (_PyBytes_Resize(pv, oldsize + wb.len) < 0)
+            goto error;
+
+        memcpy(PyBytes_AS_STRING(*pv) + oldsize, wb.buf, wb.len);
+        PyBuffer_Release(&wb);
+        return;
+
+      error:
+        PyBuffer_Release(&wb);
+        Py_CLEAR(*pv);
+        return;
+    }
+
+    else {
+        /* Multiple references, need to create new object */
+        PyObject *v;
+        v = bytes_concat(*pv, w);
+        Py_DECREF(*pv);
+        *pv = v;
+    }
 }
 
 void
diff --git a/Objects/dictobject.c b/Objects/dictobject.c
index 1ccea6e..6c78b94 100644
--- a/Objects/dictobject.c
+++ b/Objects/dictobject.c
@@ -1101,6 +1101,44 @@
     return *value_addr;
 }
 
+PyObject *
+_PyDict_GetItem_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash)
+{
+    PyDictObject *mp = (PyDictObject *)op;
+    PyDictKeyEntry *ep;
+    PyThreadState *tstate;
+    PyObject **value_addr;
+
+    if (!PyDict_Check(op))
+        return NULL;
+
+    /* We can arrive here with a NULL tstate during initialization: try
+       running "python -Wi" for an example related to string interning.
+       Let's just hope that no exception occurs then...  This must be
+       _PyThreadState_Current and not PyThreadState_GET() because in debug
+       mode, the latter complains if tstate is NULL. */
+    tstate = (PyThreadState*)_Py_atomic_load_relaxed(
+        &_PyThreadState_Current);
+    if (tstate != NULL && tstate->curexc_type != NULL) {
+        /* preserve the existing exception */
+        PyObject *err_type, *err_value, *err_tb;
+        PyErr_Fetch(&err_type, &err_value, &err_tb);
+        ep = (mp->ma_keys->dk_lookup)(mp, key, hash, &value_addr);
+        /* ignore errors */
+        PyErr_Restore(err_type, err_value, err_tb);
+        if (ep == NULL)
+            return NULL;
+    }
+    else {
+        ep = (mp->ma_keys->dk_lookup)(mp, key, hash, &value_addr);
+        if (ep == NULL) {
+            PyErr_Clear();
+            return NULL;
+        }
+    }
+    return *value_addr;
+}
+
 /* Variant of PyDict_GetItem() that doesn't suppress exceptions.
    This returns NULL *with* an exception set if an exception occurred.
    It returns NULL *without* an exception set if the key wasn't present.
@@ -1208,6 +1246,24 @@
 }
 
 int
+_PyDict_SetItem_KnownHash(PyObject *op, PyObject *key, PyObject *value,
+                         Py_hash_t hash)
+{
+    PyDictObject *mp;
+
+    if (!PyDict_Check(op)) {
+        PyErr_BadInternalCall();
+        return -1;
+    }
+    assert(key);
+    assert(value);
+    mp = (PyDictObject *)op;
+
+    /* insertdict() handles any resizing that might be necessary */
+    return insertdict(mp, key, hash, value);
+}
+
+int
 PyDict_DelItem(PyObject *op, PyObject *key)
 {
     PyDictObject *mp;
diff --git a/Objects/longobject.c b/Objects/longobject.c
index 7036c0e..c1416a0 100644
--- a/Objects/longobject.c
+++ b/Objects/longobject.c
@@ -21,7 +21,6 @@
          Py_SIZE(x) < 0 ? -(sdigit)(x)->ob_digit[0] :   \
              (Py_SIZE(x) == 0 ? (sdigit)0 :                             \
               (sdigit)(x)->ob_digit[0]))
-#define ABS(x) ((x) < 0 ? -(x) : (x))
 
 #if NSMALLNEGINTS + NSMALLPOSINTS > 0
 /* Small integers are preallocated in this array so that they
@@ -57,7 +56,7 @@
 static PyLongObject *
 maybe_small_long(PyLongObject *v)
 {
-    if (v && ABS(Py_SIZE(v)) <= 1) {
+    if (v && Py_ABS(Py_SIZE(v)) <= 1) {
         sdigit ival = MEDIUM_VALUE(v);
         if (-NSMALLNEGINTS <= ival && ival < NSMALLPOSINTS) {
             Py_DECREF(v);
@@ -114,7 +113,7 @@
 static PyLongObject *
 long_normalize(PyLongObject *v)
 {
-    Py_ssize_t j = ABS(Py_SIZE(v));
+    Py_ssize_t j = Py_ABS(Py_SIZE(v));
     Py_ssize_t i = j;
 
     while (i > 0 && v->ob_digit[i-1] == 0)
@@ -718,7 +717,7 @@
 
     assert(v != NULL);
     assert(PyLong_Check(v));
-    ndigits = ABS(Py_SIZE(v));
+    ndigits = Py_ABS(Py_SIZE(v));
     assert(ndigits == 0 || v->ob_digit[ndigits - 1] != 0);
     if (ndigits > 0) {
         digit msd = v->ob_digit[ndigits - 1];
@@ -1565,7 +1564,7 @@
 static PyLongObject *
 divrem1(PyLongObject *a, digit n, digit *prem)
 {
-    const Py_ssize_t size = ABS(Py_SIZE(a));
+    const Py_ssize_t size = Py_ABS(Py_SIZE(a));
     PyLongObject *z;
 
     assert(n > 0 && n <= PyLong_MASK);
@@ -1597,7 +1596,7 @@
         PyErr_BadInternalCall();
         return -1;
     }
-    size_a = ABS(Py_SIZE(a));
+    size_a = Py_ABS(Py_SIZE(a));
     negative = Py_SIZE(a) < 0;
 
     /* quick and dirty upper bound for the number of digits
@@ -1766,7 +1765,7 @@
         PyErr_BadInternalCall();
         return -1;
     }
-    size_a = ABS(Py_SIZE(a));
+    size_a = Py_ABS(Py_SIZE(a));
     negative = Py_SIZE(a) < 0;
 
     /* Compute a rough upper bound for the length of the string */
@@ -2380,7 +2379,7 @@
 long_divrem(PyLongObject *a, PyLongObject *b,
             PyLongObject **pdiv, PyLongObject **prem)
 {
-    Py_ssize_t size_a = ABS(Py_SIZE(a)), size_b = ABS(Py_SIZE(b));
+    Py_ssize_t size_a = Py_ABS(Py_SIZE(a)), size_b = Py_ABS(Py_SIZE(b));
     PyLongObject *z;
 
     if (size_b == 0) {
@@ -2439,7 +2438,7 @@
 }
 
 /* Unsigned int division with remainder -- the algorithm.  The arguments v1
-   and w1 should satisfy 2 <= ABS(Py_SIZE(w1)) <= ABS(Py_SIZE(v1)). */
+   and w1 should satisfy 2 <= Py_ABS(Py_SIZE(w1)) <= Py_ABS(Py_SIZE(v1)). */
 
 static PyLongObject *
 x_divrem(PyLongObject *v1, PyLongObject *w1, PyLongObject **prem)
@@ -2459,8 +2458,8 @@
        that won't overflow a digit. */
 
     /* allocate space; w will also be used to hold the final remainder */
-    size_v = ABS(Py_SIZE(v1));
-    size_w = ABS(Py_SIZE(w1));
+    size_v = Py_ABS(Py_SIZE(v1));
+    size_w = Py_ABS(Py_SIZE(w1));
     assert(size_v >= size_w && size_w >= 2); /* Assert checks by div() */
     v = _PyLong_New(size_v+1);
     if (v == NULL) {
@@ -2591,7 +2590,7 @@
        multiple of 4, rounding ties to a multiple of 8. */
     static const int half_even_correction[8] = {0, -1, -2, 1, 0, -1, 2, 1};
 
-    a_size = ABS(Py_SIZE(a));
+    a_size = Py_ABS(Py_SIZE(a));
     if (a_size == 0) {
         /* Special case for 0: significand 0.0, exponent 0. */
         *e = 0;
@@ -2732,7 +2731,7 @@
         sign = Py_SIZE(a) - Py_SIZE(b);
     }
     else {
-        Py_ssize_t i = ABS(Py_SIZE(a));
+        Py_ssize_t i = Py_ABS(Py_SIZE(a));
         while (--i >= 0 && a->ob_digit[i] == b->ob_digit[i])
             ;
         if (i < 0)
@@ -2850,7 +2849,7 @@
 static PyLongObject *
 x_add(PyLongObject *a, PyLongObject *b)
 {
-    Py_ssize_t size_a = ABS(Py_SIZE(a)), size_b = ABS(Py_SIZE(b));
+    Py_ssize_t size_a = Py_ABS(Py_SIZE(a)), size_b = Py_ABS(Py_SIZE(b));
     PyLongObject *z;
     Py_ssize_t i;
     digit carry = 0;
@@ -2884,7 +2883,7 @@
 static PyLongObject *
 x_sub(PyLongObject *a, PyLongObject *b)
 {
-    Py_ssize_t size_a = ABS(Py_SIZE(a)), size_b = ABS(Py_SIZE(b));
+    Py_ssize_t size_a = Py_ABS(Py_SIZE(a)), size_b = Py_ABS(Py_SIZE(b));
     PyLongObject *z;
     Py_ssize_t i;
     int sign = 1;
@@ -2944,7 +2943,7 @@
 
     CHECK_BINOP(a, b);
 
-    if (ABS(Py_SIZE(a)) <= 1 && ABS(Py_SIZE(b)) <= 1) {
+    if (Py_ABS(Py_SIZE(a)) <= 1 && Py_ABS(Py_SIZE(b)) <= 1) {
         PyObject *result = PyLong_FromLong(MEDIUM_VALUE(a) +
                                           MEDIUM_VALUE(b));
         return result;
@@ -2974,7 +2973,7 @@
 
     CHECK_BINOP(a, b);
 
-    if (ABS(Py_SIZE(a)) <= 1 && ABS(Py_SIZE(b)) <= 1) {
+    if (Py_ABS(Py_SIZE(a)) <= 1 && Py_ABS(Py_SIZE(b)) <= 1) {
         PyObject* r;
         r = PyLong_FromLong(MEDIUM_VALUE(a)-MEDIUM_VALUE(b));
         return r;
@@ -3003,8 +3002,8 @@
 x_mul(PyLongObject *a, PyLongObject *b)
 {
     PyLongObject *z;
-    Py_ssize_t size_a = ABS(Py_SIZE(a));
-    Py_ssize_t size_b = ABS(Py_SIZE(b));
+    Py_ssize_t size_a = Py_ABS(Py_SIZE(a));
+    Py_ssize_t size_b = Py_ABS(Py_SIZE(b));
     Py_ssize_t i;
 
     z = _PyLong_New(size_a + size_b);
@@ -3098,7 +3097,7 @@
 {
     PyLongObject *hi, *lo;
     Py_ssize_t size_lo, size_hi;
-    const Py_ssize_t size_n = ABS(Py_SIZE(n));
+    const Py_ssize_t size_n = Py_ABS(Py_SIZE(n));
 
     size_lo = Py_MIN(size_n, size);
     size_hi = size_n - size_lo;
@@ -3127,8 +3126,8 @@
 static PyLongObject *
 k_mul(PyLongObject *a, PyLongObject *b)
 {
-    Py_ssize_t asize = ABS(Py_SIZE(a));
-    Py_ssize_t bsize = ABS(Py_SIZE(b));
+    Py_ssize_t asize = Py_ABS(Py_SIZE(a));
+    Py_ssize_t bsize = Py_ABS(Py_SIZE(b));
     PyLongObject *ah = NULL;
     PyLongObject *al = NULL;
     PyLongObject *bh = NULL;
@@ -3348,8 +3347,8 @@
 static PyLongObject *
 k_lopsided_mul(PyLongObject *a, PyLongObject *b)
 {
-    const Py_ssize_t asize = ABS(Py_SIZE(a));
-    Py_ssize_t bsize = ABS(Py_SIZE(b));
+    const Py_ssize_t asize = Py_ABS(Py_SIZE(a));
+    Py_ssize_t bsize = Py_ABS(Py_SIZE(b));
     Py_ssize_t nbdone;          /* # of b digits already multiplied */
     PyLongObject *ret;
     PyLongObject *bslice = NULL;
@@ -3407,7 +3406,7 @@
     CHECK_BINOP(a, b);
 
     /* fast path for single-digit multiplication */
-    if (ABS(Py_SIZE(a)) <= 1 && ABS(Py_SIZE(b)) <= 1) {
+    if (Py_ABS(Py_SIZE(a)) <= 1 && Py_ABS(Py_SIZE(b)) <= 1) {
         stwodigits v = (stwodigits)(MEDIUM_VALUE(a)) * MEDIUM_VALUE(b);
 #ifdef HAVE_LONG_LONG
         return PyLong_FromLongLong((PY_LONG_LONG)v);
@@ -3614,8 +3613,8 @@
     */
 
     /* Reduce to case where a and b are both positive. */
-    a_size = ABS(Py_SIZE(a));
-    b_size = ABS(Py_SIZE(b));
+    a_size = Py_ABS(Py_SIZE(a));
+    b_size = Py_ABS(Py_SIZE(b));
     negate = (Py_SIZE(a) < 0) ^ (Py_SIZE(b) < 0);
     if (b_size == 0) {
         PyErr_SetString(PyExc_ZeroDivisionError,
@@ -3731,7 +3730,7 @@
             inexact = 1;
         Py_DECREF(rem);
     }
-    x_size = ABS(Py_SIZE(x));
+    x_size = Py_ABS(Py_SIZE(x));
     assert(x_size > 0); /* result of division is never zero */
     x_bits = (x_size-1)*PyLong_SHIFT+bits_in_digit(x->ob_digit[x_size-1]);
 
@@ -3841,7 +3840,7 @@
 
     if (Py_SIZE(b) < 0) {  /* if exponent is negative */
         if (c) {
-            PyErr_SetString(PyExc_TypeError, "pow() 2nd argument "
+            PyErr_SetString(PyExc_ValueError, "pow() 2nd argument "
                             "cannot be negative when 3rd argument specified");
             goto Error;
         }
@@ -4003,7 +4002,7 @@
     /* Implement ~x as -(x+1) */
     PyLongObject *x;
     PyLongObject *w;
-    if (ABS(Py_SIZE(v)) <=1)
+    if (Py_ABS(Py_SIZE(v)) <=1)
         return PyLong_FromLong(-(MEDIUM_VALUE(v)+1));
     w = (PyLongObject *)PyLong_FromLong(1L);
     if (w == NULL)
@@ -4020,7 +4019,7 @@
 long_neg(PyLongObject *v)
 {
     PyLongObject *z;
-    if (ABS(Py_SIZE(v)) <= 1)
+    if (Py_ABS(Py_SIZE(v)) <= 1)
         return PyLong_FromLong(-MEDIUM_VALUE(v));
     z = (PyLongObject *)_PyLong_Copy(v);
     if (z != NULL)
@@ -4075,7 +4074,7 @@
             goto rshift_error;
         }
         wordshift = shiftby / PyLong_SHIFT;
-        newsize = ABS(Py_SIZE(a)) - wordshift;
+        newsize = Py_ABS(Py_SIZE(a)) - wordshift;
         if (newsize <= 0)
             return PyLong_FromLong(0);
         loshift = shiftby % PyLong_SHIFT;
@@ -4122,7 +4121,7 @@
     wordshift = shiftby / PyLong_SHIFT;
     remshift  = shiftby - wordshift * PyLong_SHIFT;
 
-    oldsize = ABS(Py_SIZE(a));
+    oldsize = Py_ABS(Py_SIZE(a));
     newsize = oldsize + wordshift;
     if (remshift)
         ++newsize;
@@ -4183,7 +4182,7 @@
        result back to sign-magnitude at the end. */
 
     /* If a is negative, replace it by its two's complement. */
-    size_a = ABS(Py_SIZE(a));
+    size_a = Py_ABS(Py_SIZE(a));
     nega = Py_SIZE(a) < 0;
     if (nega) {
         z = _PyLong_New(size_a);
@@ -4197,7 +4196,7 @@
         Py_INCREF(a);
 
     /* Same for b. */
-    size_b = ABS(Py_SIZE(b));
+    size_b = Py_ABS(Py_SIZE(b));
     negb = Py_SIZE(b) < 0;
     if (negb) {
         z = _PyLong_New(size_b);
@@ -4630,7 +4629,7 @@
 {
     Py_ssize_t res;
 
-    res = offsetof(PyLongObject, ob_digit) + ABS(Py_SIZE(v))*sizeof(digit);
+    res = offsetof(PyLongObject, ob_digit) + Py_ABS(Py_SIZE(v))*sizeof(digit);
     return PyLong_FromSsize_t(res);
 }
 
@@ -4644,7 +4643,7 @@
     assert(v != NULL);
     assert(PyLong_Check(v));
 
-    ndigits = ABS(Py_SIZE(v));
+    ndigits = Py_ABS(Py_SIZE(v));
     if (ndigits == 0)
         return PyLong_FromLong(0);
 
@@ -4849,7 +4848,7 @@
     if (type != &PyLong_Type && PyType_IsSubtype(type, &PyLong_Type)) {
         PyLongObject *newobj;
         int i;
-        Py_ssize_t n = ABS(Py_SIZE(long_obj));
+        Py_ssize_t n = Py_ABS(Py_SIZE(long_obj));
 
         newobj = (PyLongObject *)type->tp_alloc(type, n);
         if (newobj == NULL) {
diff --git a/Objects/memoryobject.c b/Objects/memoryobject.c
index cb644b8..5148ce6 100644
--- a/Objects/memoryobject.c
+++ b/Objects/memoryobject.c
@@ -48,9 +48,6 @@
 */
 
 
-#define XSTRINGIZE(v) #v
-#define STRINGIZE(v) XSTRINGIZE(v)
-
 #define CHECK_MBUF_RELEASED(mbuf) \
     if (((_PyManagedBufferObject *)mbuf)->flags&_Py_MANAGED_BUFFER_RELEASED) { \
         PyErr_SetString(PyExc_ValueError,                                      \
@@ -223,7 +220,7 @@
 
 
 PyDoc_STRVAR(memory_doc,
-"memoryview(object)\n\
+"memoryview($module, object)\n--\n\
 \n\
 Create a new memoryview object which references the given object.");
 
@@ -660,7 +657,7 @@
     if (src->ndim > PyBUF_MAX_NDIM) {
         PyErr_SetString(PyExc_ValueError,
             "memoryview: number of dimensions must not exceed "
-            STRINGIZE(PyBUF_MAX_NDIM));
+            Py_STRINGIFY(PyBUF_MAX_NDIM));
         return NULL;
     }
 
@@ -1341,7 +1338,7 @@
         if (ndim > PyBUF_MAX_NDIM) {
             PyErr_SetString(PyExc_ValueError,
                 "memoryview: number of dimensions must not exceed "
-                STRINGIZE(PyBUF_MAX_NDIM));
+                Py_STRINGIFY(PyBUF_MAX_NDIM));
             return NULL;
         }
         if (self->view.ndim != 1 && ndim != 1) {
@@ -2900,6 +2897,7 @@
 PyDoc_STRVAR(memory_contiguous_doc,
              "A bool indicating whether the memory is contiguous.");
 
+
 static PyGetSetDef memory_getsetlist[] = {
     {"obj",             (getter)memory_obj_get,        NULL, memory_obj_doc},
     {"nbytes",          (getter)memory_nbytes_get,     NULL, memory_nbytes_doc},
@@ -2917,19 +2915,19 @@
 };
 
 PyDoc_STRVAR(memory_release_doc,
-"M.release() -> None\n\
+"release($self, /)\n--\n\
 \n\
 Release the underlying buffer exposed by the memoryview object.");
 PyDoc_STRVAR(memory_tobytes_doc,
-"M.tobytes() -> bytes\n\
+"tobytes($self, /)\n--\n\
 \n\
 Return the data in the buffer as a byte string.");
 PyDoc_STRVAR(memory_tolist_doc,
-"M.tolist() -> list\n\
+"tolist($self, /)\n--\n\
 \n\
 Return the data in the buffer as a list of elements.");
 PyDoc_STRVAR(memory_cast_doc,
-"M.cast(format[, shape]) -> memoryview\n\
+"cast($self, /, format, *, shape)\n--\n\
 \n\
 Cast a memoryview to a new format or shape.");
 
diff --git a/Objects/moduleobject.c b/Objects/moduleobject.c
index f509932..441e731 100644
--- a/Objects/moduleobject.c
+++ b/Objects/moduleobject.c
@@ -32,20 +32,26 @@
 module_init_dict(PyModuleObject *mod, PyObject *md_dict,
                  PyObject *name, PyObject *doc)
 {
+    _Py_IDENTIFIER(__name__);
+    _Py_IDENTIFIER(__doc__);
+    _Py_IDENTIFIER(__package__);
+    _Py_IDENTIFIER(__loader__);
+    _Py_IDENTIFIER(__spec__);
+    
     if (md_dict == NULL)
         return -1;
     if (doc == NULL)
         doc = Py_None;
 
-    if (PyDict_SetItemString(md_dict, "__name__", name) != 0)
+    if (_PyDict_SetItemId(md_dict, &PyId___name__, name) != 0)
         return -1;
-    if (PyDict_SetItemString(md_dict, "__doc__", doc) != 0)
+    if (_PyDict_SetItemId(md_dict, &PyId___doc__, doc) != 0)
         return -1;
-    if (PyDict_SetItemString(md_dict, "__package__", Py_None) != 0)
+    if (_PyDict_SetItemId(md_dict, &PyId___package__, Py_None) != 0)
         return -1;
-    if (PyDict_SetItemString(md_dict, "__loader__", Py_None) != 0)
+    if (_PyDict_SetItemId(md_dict, &PyId___loader__, Py_None) != 0)
         return -1;
-    if (PyDict_SetItemString(md_dict, "__spec__", Py_None) != 0)
+    if (_PyDict_SetItemId(md_dict, &PyId___spec__, Py_None) != 0)
         return -1;
     if (PyUnicode_CheckExact(name)) {
         Py_INCREF(name);
@@ -184,8 +190,9 @@
         Py_DECREF(n);
     }
     if (module->m_doc != NULL) {
+        _Py_IDENTIFIER(__doc__);
         v = PyUnicode_FromString(module->m_doc);
-        if (v == NULL || PyDict_SetItemString(d, "__doc__", v) != 0) {
+        if (v == NULL || _PyDict_SetItemId(d, &PyId___doc__, v) != 0) {
             Py_XDECREF(v);
             Py_DECREF(m);
             return NULL;
@@ -214,6 +221,7 @@
 PyObject*
 PyModule_GetNameObject(PyObject *m)
 {
+    _Py_IDENTIFIER(__name__);
     PyObject *d;
     PyObject *name;
     if (!PyModule_Check(m)) {
@@ -222,7 +230,7 @@
     }
     d = ((PyModuleObject *)m)->md_dict;
     if (d == NULL ||
-        (name = PyDict_GetItemString(d, "__name__")) == NULL ||
+        (name = _PyDict_GetItemId(d, &PyId___name__)) == NULL ||
         !PyUnicode_Check(name))
     {
         PyErr_SetString(PyExc_SystemError, "nameless module");
@@ -245,6 +253,7 @@
 PyObject*
 PyModule_GetFilenameObject(PyObject *m)
 {
+    _Py_IDENTIFIER(__file__);
     PyObject *d;
     PyObject *fileobj;
     if (!PyModule_Check(m)) {
@@ -253,7 +262,7 @@
     }
     d = ((PyModuleObject *)m)->md_dict;
     if (d == NULL ||
-        (fileobj = PyDict_GetItemString(d, "__file__")) == NULL ||
+        (fileobj = _PyDict_GetItemId(d, &PyId___file__)) == NULL ||
         !PyUnicode_Check(fileobj))
     {
         PyErr_SetString(PyExc_SystemError, "module filename missing");
@@ -411,6 +420,31 @@
     return PyObject_CallMethod(interp->importlib, "_module_repr", "O", m);
 }
 
+static PyObject*
+module_getattro(PyModuleObject *m, PyObject *name)
+{
+    PyObject *attr, *mod_name;
+    attr = PyObject_GenericGetAttr((PyObject *)m, name);
+    if (attr || !PyErr_ExceptionMatches(PyExc_AttributeError))
+        return attr;
+    PyErr_Clear();
+    if (m->md_dict) {
+        _Py_IDENTIFIER(__name__);
+        mod_name = _PyDict_GetItemId(m->md_dict, &PyId___name__);
+        if (mod_name) {
+            PyErr_Format(PyExc_AttributeError,
+                        "module '%U' has no attribute '%U'", mod_name, name);
+            return NULL;
+        }
+        else if (PyErr_Occurred()) {
+            PyErr_Clear();
+        }
+    }
+    PyErr_Format(PyExc_AttributeError,
+                "module has no attribute '%U'", name);
+    return NULL;
+}
+
 static int
 module_traverse(PyModuleObject *m, visitproc visit, void *arg)
 {
@@ -464,7 +498,6 @@
     {0}
 };
 
-
 PyDoc_STRVAR(module_doc,
 "module(name[, doc])\n\
 \n\
@@ -488,7 +521,7 @@
     0,                                          /* tp_hash */
     0,                                          /* tp_call */
     0,                                          /* tp_str */
-    PyObject_GenericGetAttr,                    /* tp_getattro */
+    (getattrofunc)module_getattro,              /* tp_getattro */
     PyObject_GenericSetAttr,                    /* tp_setattro */
     0,                                          /* tp_as_buffer */
     Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c
index 004cfaa..af2bab0 100644
--- a/Objects/obmalloc.c
+++ b/Objects/obmalloc.c
@@ -5,6 +5,7 @@
 #ifdef PYMALLOC_DEBUG   /* WITH_PYMALLOC && PYMALLOC_DEBUG */
 /* Forward declaration */
 static void* _PyMem_DebugMalloc(void *ctx, size_t size);
+static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize);
 static void _PyMem_DebugFree(void *ctx, void *p);
 static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
 
@@ -43,6 +44,7 @@
 
 /* Forward declaration */
 static void* _PyObject_Malloc(void *ctx, size_t size);
+static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize);
 static void _PyObject_Free(void *ctx, void *p);
 static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
 #endif
@@ -51,7 +53,7 @@
 static void *
 _PyMem_RawMalloc(void *ctx, size_t size)
 {
-    /* PyMem_Malloc(0) means malloc(1). Some systems would return NULL
+    /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL
        for malloc(0), which would be treated as an error. Some platforms would
        return a pointer with no memory behind it, which would break pymalloc.
        To solve these problems, allocate an extra byte. */
@@ -61,6 +63,20 @@
 }
 
 static void *
+_PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize)
+{
+    /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL
+       for calloc(0, 0), which would be treated as an error. Some platforms
+       would return a pointer with no memory behind it, which would break
+       pymalloc.  To solve these problems, allocate an extra byte. */
+    if (nelem == 0 || elsize == 0) {
+        nelem = 1;
+        elsize = 1;
+    }
+    return calloc(nelem, elsize);
+}
+
+static void *
 _PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
 {
     if (size == 0)
@@ -123,9 +139,9 @@
 #endif
 
 
-#define PYRAW_FUNCS _PyMem_RawMalloc, _PyMem_RawRealloc, _PyMem_RawFree
+#define PYRAW_FUNCS _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree
 #ifdef WITH_PYMALLOC
-#  define PYOBJ_FUNCS _PyObject_Malloc, _PyObject_Realloc, _PyObject_Free
+#  define PYOBJ_FUNCS _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free
 #else
 #  define PYOBJ_FUNCS PYRAW_FUNCS
 #endif
@@ -147,7 +163,7 @@
     {'o', {NULL, PYOBJ_FUNCS}}
     };
 
-#define PYDBG_FUNCS _PyMem_DebugMalloc, _PyMem_DebugRealloc, _PyMem_DebugFree
+#define PYDBG_FUNCS _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree
 #endif
 
 static PyMemAllocator _PyMem_Raw = {
@@ -196,6 +212,7 @@
     PyMemAllocator alloc;
 
     alloc.malloc = _PyMem_DebugMalloc;
+    alloc.calloc = _PyMem_DebugCalloc;
     alloc.realloc = _PyMem_DebugRealloc;
     alloc.free = _PyMem_DebugFree;
 
@@ -228,9 +245,10 @@
     case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break;
     case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break;
     default:
-        /* unknown domain */
+        /* unknown domain: set all attributes to NULL */
         allocator->ctx = NULL;
         allocator->malloc = NULL;
+        allocator->calloc = NULL;
         allocator->realloc = NULL;
         allocator->free = NULL;
     }
@@ -272,10 +290,18 @@
      */
     if (size > (size_t)PY_SSIZE_T_MAX)
         return NULL;
-
     return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
 }
 
+void *
+PyMem_RawCalloc(size_t nelem, size_t elsize)
+{
+    /* see PyMem_RawMalloc() */
+    if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
+        return NULL;
+    return _PyMem_Raw.calloc(_PyMem_Raw.ctx, nelem, elsize);
+}
+
 void*
 PyMem_RawRealloc(void *ptr, size_t new_size)
 {
@@ -300,6 +326,15 @@
 }
 
 void *
+PyMem_Calloc(size_t nelem, size_t elsize)
+{
+    /* see PyMem_RawMalloc() */
+    if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
+        return NULL;
+    return _PyMem.calloc(_PyMem.ctx, nelem, elsize);
+}
+
+void *
 PyMem_Realloc(void *ptr, size_t new_size)
 {
     /* see PyMem_RawMalloc() */
@@ -352,6 +387,15 @@
 }
 
 void *
+PyObject_Calloc(size_t nelem, size_t elsize)
+{
+    /* see PyMem_RawMalloc() */
+    if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
+        return NULL;
+    return _PyObject.calloc(_PyObject.ctx, nelem, elsize);
+}
+
+void *
 PyObject_Realloc(void *ptr, size_t new_size)
 {
     /* see PyMem_RawMalloc() */
@@ -1122,8 +1166,9 @@
  */
 
 static void *
-_PyObject_Malloc(void *ctx, size_t nbytes)
+_PyObject_Alloc(int use_calloc, void *ctx, size_t nelem, size_t elsize)
 {
+    size_t nbytes;
     block *bp;
     poolp pool;
     poolp next;
@@ -1131,6 +1176,9 @@
 
     _Py_AllocatedBlocks++;
 
+    assert(nelem <= PY_SSIZE_T_MAX / elsize);
+    nbytes = nelem * elsize;
+
 #ifdef WITH_VALGRIND
     if (UNLIKELY(running_on_valgrind == -1))
         running_on_valgrind = RUNNING_ON_VALGRIND;
@@ -1138,9 +1186,9 @@
         goto redirect;
 #endif
 
-    /*
-     * This implicitly redirects malloc(0).
-     */
+    if (nelem == 0 || elsize == 0)
+        goto redirect;
+
     if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
         LOCK();
         /*
@@ -1158,6 +1206,8 @@
             assert(bp != NULL);
             if ((pool->freeblock = *(block **)bp) != NULL) {
                 UNLOCK();
+                if (use_calloc)
+                    memset(bp, 0, nbytes);
                 return (void *)bp;
             }
             /*
@@ -1170,6 +1220,8 @@
                 pool->nextoffset += INDEX2SIZE(size);
                 *(block **)(pool->freeblock) = NULL;
                 UNLOCK();
+                if (use_calloc)
+                    memset(bp, 0, nbytes);
                 return (void *)bp;
             }
             /* Pool is full, unlink from used pools. */
@@ -1178,6 +1230,8 @@
             next->prevpool = pool;
             pool->nextpool = next;
             UNLOCK();
+            if (use_calloc)
+                memset(bp, 0, nbytes);
             return (void *)bp;
         }
 
@@ -1257,6 +1311,8 @@
                 assert(bp != NULL);
                 pool->freeblock = *(block **)bp;
                 UNLOCK();
+                if (use_calloc)
+                    memset(bp, 0, nbytes);
                 return (void *)bp;
             }
             /*
@@ -1272,6 +1328,8 @@
             pool->freeblock = bp + size;
             *(block **)(pool->freeblock) = NULL;
             UNLOCK();
+            if (use_calloc)
+                memset(bp, 0, nbytes);
             return (void *)bp;
         }
 
@@ -1311,13 +1369,29 @@
      * has been reached.
      */
     {
-        void *result = PyMem_RawMalloc(nbytes);
+        void *result;
+        if (use_calloc)
+            result = PyMem_RawCalloc(nelem, elsize);
+        else
+            result = PyMem_RawMalloc(nbytes);
         if (!result)
             _Py_AllocatedBlocks--;
         return result;
     }
 }
 
+static void *
+_PyObject_Malloc(void *ctx, size_t nbytes)
+{
+    return _PyObject_Alloc(0, ctx, 1, nbytes);
+}
+
+static void *
+_PyObject_Calloc(void *ctx, size_t nelem, size_t elsize)
+{
+    return _PyObject_Alloc(1, ctx, nelem, elsize);
+}
+
 /* free */
 
 ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
@@ -1561,7 +1635,7 @@
 #endif
 
     if (p == NULL)
-        return _PyObject_Malloc(ctx, nbytes);
+        return _PyObject_Alloc(0, ctx, 1, nbytes);
 
 #ifdef WITH_VALGRIND
     /* Treat running_on_valgrind == -1 the same as 0 */
@@ -1589,7 +1663,7 @@
             }
             size = nbytes;
         }
-        bp = _PyObject_Malloc(ctx, nbytes);
+        bp = _PyObject_Alloc(0, ctx, 1, nbytes);
         if (bp != NULL) {
             memcpy(bp, p, size);
             _PyObject_Free(ctx, p);
@@ -1745,7 +1819,7 @@
 */
 
 static void *
-_PyMem_DebugMalloc(void *ctx, size_t nbytes)
+_PyMem_DebugAlloc(int use_calloc, void *ctx, size_t nbytes)
 {
     debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
     uchar *p;           /* base address of malloc'ed block */
@@ -1758,7 +1832,10 @@
         /* overflow:  can't represent total as a size_t */
         return NULL;
 
-    p = (uchar *)api->alloc.malloc(api->alloc.ctx, total);
+    if (use_calloc)
+        p = (uchar *)api->alloc.calloc(api->alloc.ctx, 1, total);
+    else
+        p = (uchar *)api->alloc.malloc(api->alloc.ctx, total);
     if (p == NULL)
         return NULL;
 
@@ -1767,7 +1844,7 @@
     p[SST] = (uchar)api->api_id;
     memset(p + SST + 1, FORBIDDENBYTE, SST-1);
 
-    if (nbytes > 0)
+    if (nbytes > 0 && !use_calloc)
         memset(p + 2*SST, CLEANBYTE, nbytes);
 
     /* at tail, write pad (SST bytes) and serialno (SST bytes) */
@@ -1778,6 +1855,21 @@
     return p + 2*SST;
 }
 
+static void *
+_PyMem_DebugMalloc(void *ctx, size_t nbytes)
+{
+    return _PyMem_DebugAlloc(0, ctx, nbytes);
+}
+
+static void *
+_PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize)
+{
+    size_t nbytes;
+    assert(elsize == 0 || nelem <= PY_SSIZE_T_MAX / elsize);
+    nbytes = nelem * elsize;
+    return _PyMem_DebugAlloc(1, ctx, nbytes);
+}
+
 /* The debug free first checks the 2*SST bytes on each end for sanity (in
    particular, that the FORBIDDENBYTEs with the api ID are still intact).
    Then fills the original bytes with DEADBYTE.
@@ -1811,7 +1903,7 @@
     int i;
 
     if (p == NULL)
-        return _PyMem_DebugMalloc(ctx, nbytes);
+        return _PyMem_DebugAlloc(0, ctx, nbytes);
 
     _PyMem_DebugCheckAddress(api->api_id, p);
     bumpserialno();
diff --git a/Objects/setobject.c b/Objects/setobject.c
index 34e43b9..dff1597 100644
--- a/Objects/setobject.c
+++ b/Objects/setobject.c
@@ -4,7 +4,7 @@
    Written and maintained by Raymond D. Hettinger <python@rcn.com>
    Derived from Lib/sets.py and Objects/dictobject.c.
 
-   Copyright (c) 2003-2013 Python Software Foundation.
+   Copyright (c) 2003-2014 Python Software Foundation.
    All rights reserved.
 
    The basic lookup function used by all operations.
@@ -67,16 +67,16 @@
     while (1) {
         if (entry->key == key)
             return entry;
-        if (entry->hash == hash && entry->key != dummy) {
+        if (entry->hash == hash && entry->key != dummy) {         /* dummy match unlikely */
             PyObject *startkey = entry->key;
             Py_INCREF(startkey);
             cmp = PyObject_RichCompareBool(startkey, key, Py_EQ);
             Py_DECREF(startkey);
-            if (cmp < 0)
+            if (cmp < 0)                                          /* unlikely */
                 return NULL;
-            if (table != so->table || entry->key != startkey)
+            if (table != so->table || entry->key != startkey)     /* unlikely */
                 return set_lookkey(so, key, hash);
-            if (cmp > 0)
+            if (cmp > 0)                                          /* likely */
                 return entry;
         }
         if (entry->key == dummy && freeslot == NULL)
@@ -135,7 +135,7 @@
        including subclasses of str; e.g., one reason to subclass
        strings is to override __eq__, and for speed we don't cater to
        that here. */
-    if (!PyUnicode_CheckExact(key)) {
+    if (!PyUnicode_CheckExact(key)) {                             /* unlikely */
         so->lookup = set_lookkey;
         return set_lookkey(so, key, hash);
     }
@@ -147,8 +147,8 @@
     while (1) {
         if (entry->key == key
             || (entry->hash == hash
-                && entry->key != dummy
-                && unicode_eq(entry->key, key)))
+                && entry->key != dummy                            /* unlikely */
+                && unicode_eq(entry->key, key)))                  /* likely */
             return entry;
         if (entry->key == dummy && freeslot == NULL)
             freeslot = entry;
@@ -267,6 +267,7 @@
     assert(minused >= 0);
 
     /* Find the smallest table size > minused. */
+    /* XXX speed-up with intrinsics */
     for (newsize = PySet_MINSIZE;
          newsize <= minused && newsize > 0;
          newsize <<= 1)
@@ -1014,6 +1015,12 @@
 PyDoc_STRVAR(update_doc,
 "Update a set with the union of itself and others.");
 
+/* XXX Todo:
+   If aligned memory allocations become available, make the
+   set object 64 byte aligned so that most of the fields
+   can be retrieved or updated in a single cache line.
+*/
+
 static PyObject *
 make_new_set(PyTypeObject *type, PyObject *iterable)
 {
diff --git a/Objects/typeobject.c b/Objects/typeobject.c
index ba106a139..c759204 100644
--- a/Objects/typeobject.c
+++ b/Objects/typeobject.c
@@ -4469,6 +4469,8 @@
         COPYNUM(nb_inplace_true_divide);
         COPYNUM(nb_inplace_floor_divide);
         COPYNUM(nb_index);
+        COPYNUM(nb_matrix_multiply);
+        COPYNUM(nb_inplace_matrix_multiply);
     }
 
     if (type->tp_as_sequence != NULL && base->tp_as_sequence != NULL) {
@@ -5605,6 +5607,7 @@
 SLOT1BIN(slot_nb_add, nb_add, "__add__", "__radd__")
 SLOT1BIN(slot_nb_subtract, nb_subtract, "__sub__", "__rsub__")
 SLOT1BIN(slot_nb_multiply, nb_multiply, "__mul__", "__rmul__")
+SLOT1BIN(slot_nb_matrix_multiply, nb_matrix_multiply, "__matmul__", "__rmatmul__")
 SLOT1BIN(slot_nb_remainder, nb_remainder, "__mod__", "__rmod__")
 SLOT1BIN(slot_nb_divmod, nb_divmod, "__divmod__", "__rdivmod__")
 
@@ -5698,6 +5701,7 @@
 SLOT1(slot_nb_inplace_add, "__iadd__", PyObject *, "O")
 SLOT1(slot_nb_inplace_subtract, "__isub__", PyObject *, "O")
 SLOT1(slot_nb_inplace_multiply, "__imul__", PyObject *, "O")
+SLOT1(slot_nb_inplace_matrix_multiply, "__imatmul__", PyObject *, "O")
 SLOT1(slot_nb_inplace_remainder, "__imod__", PyObject *, "O")
 /* Can't use SLOT1 here, because nb_inplace_power is ternary */
 static PyObject *
@@ -6278,6 +6282,12 @@
            "__index__($self, /)\n--\n\n"
            "Return self converted to an integer, if self is suitable "
            "for use as an index into a list."),
+    BINSLOT("__matmul__", nb_matrix_multiply, slot_nb_matrix_multiply,
+            "@"),
+    RBINSLOT("__rmatmul__", nb_matrix_multiply, slot_nb_matrix_multiply,
+             "@"),
+    IBSLOT("__imatmul__", nb_inplace_matrix_multiply, slot_nb_inplace_matrix_multiply,
+           wrap_binaryfunc, "@="),
     MPSLOT("__len__", mp_length, slot_mp_length, wrap_lenfunc,
            "__len__($self, /)\n--\n\nReturn len(self)."),
     MPSLOT("__getitem__", mp_subscript, slot_mp_subscript,
diff --git a/Objects/typeslots.inc b/Objects/typeslots.inc
index caa1e03..2ed99d8 100644
--- a/Objects/typeslots.inc
+++ b/Objects/typeslots.inc
@@ -73,3 +73,5 @@
 offsetof(PyHeapTypeObject, ht_type.tp_members),
 offsetof(PyHeapTypeObject, ht_type.tp_getset),
 offsetof(PyHeapTypeObject, ht_type.tp_free),
+offsetof(PyHeapTypeObject, as_number.nb_matrix_multiply),
+offsetof(PyHeapTypeObject, as_number.nb_inplace_matrix_multiply),
diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c
index ec22239..af70ede 100644
--- a/Objects/unicodeobject.c
+++ b/Objects/unicodeobject.c
@@ -6817,28 +6817,6 @@
     return PyBytes_AS_STRING(*obj);
 }
 
-static int
-is_dbcs_lead_byte(UINT code_page, const char *s, int offset)
-{
-    const char *curr = s + offset;
-    const char *prev;
-
-    if (!IsDBCSLeadByteEx(code_page, *curr))
-        return 0;
-
-    prev = CharPrevExA(code_page, s, curr, 0);
-    if (prev == curr)
-        return 1;
-    /* FIXME: This code is limited to "true" double-byte encodings,
-       as it assumes an incomplete character consists of a single
-       byte. */
-    if (curr - prev == 2)
-        return 1;
-    if (!IsDBCSLeadByteEx(code_page, *prev))
-        return 1;
-    return 0;
-}
-
 static DWORD
 decode_code_page_flags(UINT code_page)
 {
@@ -6913,7 +6891,7 @@
 decode_code_page_errors(UINT code_page,
                         PyObject **v,
                         const char *in, const int size,
-                        const char *errors)
+                        const char *errors, int final)
 {
     const char *startin = in;
     const char *endin = in + size;
@@ -6940,7 +6918,7 @@
     if (encoding == NULL)
         return -1;
 
-    if (errors == NULL || strcmp(errors, "strict") == 0) {
+    if ((errors == NULL || strcmp(errors, "strict") == 0) && final) {
         /* The last error was ERROR_NO_UNICODE_TRANSLATION, then we raise a
            UnicodeDecodeError. */
         make_decode_exception(&exc, encoding, in, size, 0, 0, reason);
@@ -7003,6 +6981,10 @@
         if (outsize <= 0) {
             Py_ssize_t startinpos, endinpos, outpos;
 
+            /* last character in partial decode? */
+            if (in + insize >= endin && !final)
+                break;
+
             startinpos = in - startin;
             endinpos = startinpos + 1;
             outpos = out - PyUnicode_AS_UNICODE(*v);
@@ -7031,7 +7013,7 @@
     assert(outsize <= PyUnicode_WSTR_LENGTH(*v));
     if (unicode_resize(v, outsize) < 0)
         goto error;
-    ret = size;
+    ret = in - startin;
 
 error:
     Py_XDECREF(encoding_obj);
@@ -7072,24 +7054,19 @@
             done = 1;
         }
 
-        /* Skip trailing lead-byte unless 'final' is set */
-        if (!final && is_dbcs_lead_byte(code_page, s, chunk_size - 1))
-            --chunk_size;
-
         if (chunk_size == 0 && done) {
             if (v != NULL)
                 break;
             _Py_RETURN_UNICODE_EMPTY();
         }
 
-
         converted = decode_code_page_strict(code_page, &v,
                                             s, chunk_size);
         if (converted == -2)
             converted = decode_code_page_errors(code_page, &v,
                                                 s, chunk_size,
-                                                errors);
-        assert(converted != 0);
+                                                errors, final);
+        assert(converted != 0 || done);
 
         if (converted < 0) {
             Py_XDECREF(v);
@@ -8496,10 +8473,10 @@
     }
     else if (PyLong_Check(x)) {
         long value = PyLong_AS_LONG(x);
-        long max = PyUnicode_GetMax();
-        if (value < 0 || value > max) {
-            PyErr_Format(PyExc_TypeError,
-                         "character mapping must be in range(0x%x)", max+1);
+        if (value < 0 || value > MAX_UNICODE) {
+            PyErr_Format(PyExc_ValueError,
+                         "character mapping must be in range(0x%x)",
+                         MAX_UNICODE+1);
             Py_DECREF(x);
             return -1;
         }
@@ -8518,76 +8495,168 @@
         return -1;
     }
 }
-/* ensure that *outobj is at least requiredsize characters long,
-   if not reallocate and adjust various state variables.
-   Return 0 on success, -1 on error */
+
+/* lookup the character, write the result into the writer.
+   Return 1 if the result was written into the writer, return 0 if the mapping
+   was undefined, raise an exception return -1 on error. */
 static int
-charmaptranslate_makespace(Py_UCS4 **outobj, Py_ssize_t *psize,
-                               Py_ssize_t requiredsize)
+charmaptranslate_output(Py_UCS4 ch, PyObject *mapping,
+                        _PyUnicodeWriter *writer)
 {
-    Py_ssize_t oldsize = *psize;
-    Py_UCS4 *new_outobj;
-    if (requiredsize > oldsize) {
-        /* exponentially overallocate to minimize reallocations */
-        if (requiredsize < 2 * oldsize)
-            requiredsize = 2 * oldsize;
-        new_outobj = PyMem_Realloc(*outobj, requiredsize * sizeof(Py_UCS4));
-        if (new_outobj == 0)
-            return -1;
-        *outobj = new_outobj;
-        *psize = requiredsize;
-    }
-    return 0;
-}
-/* lookup the character, put the result in the output string and adjust
-   various state variables. Return a new reference to the object that
-   was put in the output buffer in *result, or Py_None, if the mapping was
-   undefined (in which case no character was written).
-   The called must decref result.
-   Return 0 on success, -1 on error. */
-static int
-charmaptranslate_output(PyObject *input, Py_ssize_t ipos,
-                        PyObject *mapping, Py_UCS4 **output,
-                        Py_ssize_t *osize, Py_ssize_t *opos,
-                        PyObject **res)
-{
-    Py_UCS4 curinp = PyUnicode_READ_CHAR(input, ipos);
-    if (charmaptranslate_lookup(curinp, mapping, res))
+    PyObject *item;
+
+    if (charmaptranslate_lookup(ch, mapping, &item))
         return -1;
-    if (*res==NULL) {
+
+    if (item == NULL) {
         /* not found => default to 1:1 mapping */
-        (*output)[(*opos)++] = curinp;
-    }
-    else if (*res==Py_None)
-        ;
-    else if (PyLong_Check(*res)) {
-        /* no overflow check, because we know that the space is enough */
-        (*output)[(*opos)++] = (Py_UCS4)PyLong_AS_LONG(*res);
-    }
-    else if (PyUnicode_Check(*res)) {
-        Py_ssize_t repsize;
-        if (PyUnicode_READY(*res) == -1)
+        if (_PyUnicodeWriter_WriteCharInline(writer, ch) < 0) {
             return -1;
-        repsize = PyUnicode_GET_LENGTH(*res);
-        if (repsize==1) {
-            /* no overflow check, because we know that the space is enough */
-            (*output)[(*opos)++] = PyUnicode_READ_CHAR(*res, 0);
         }
-        else if (repsize!=0) {
-            /* more than one character */
-            Py_ssize_t requiredsize = *opos +
-                (PyUnicode_GET_LENGTH(input) - ipos) +
-                repsize - 1;
-            Py_ssize_t i;
-            if (charmaptranslate_makespace(output, osize, requiredsize))
-                return -1;
-            for(i = 0; i < repsize; i++)
-                (*output)[(*opos)++] = PyUnicode_READ_CHAR(*res, i);
-        }
+        return 1;
     }
-    else
+
+    if (item == Py_None) {
+        Py_DECREF(item);
+        return 0;
+    }
+
+    if (PyLong_Check(item)) {
+        long ch = (Py_UCS4)PyLong_AS_LONG(item);
+        /* PyLong_AS_LONG() cannot fail, charmaptranslate_lookup() already
+           used it */
+        if (_PyUnicodeWriter_WriteCharInline(writer, ch) < 0) {
+            Py_DECREF(item);
+            return -1;
+        }
+        Py_DECREF(item);
+        return 1;
+    }
+
+    if (!PyUnicode_Check(item)) {
+        Py_DECREF(item);
         return -1;
-    return 0;
+    }
+
+    if (_PyUnicodeWriter_WriteStr(writer, item) < 0) {
+        Py_DECREF(item);
+        return -1;
+    }
+
+    Py_DECREF(item);
+    return 1;
+}
+
+static int
+unicode_fast_translate_lookup(PyObject *mapping, Py_UCS1 ch,
+                              Py_UCS1 *translate)
+{
+    PyObject *item = NULL;
+    int ret = 0;
+
+    if (charmaptranslate_lookup(ch, mapping, &item)) {
+        return -1;
+    }
+
+    if (item == Py_None) {
+        /* deletion */
+        translate[ch] = 0xfe;
+    }
+    else if (item == NULL) {
+        /* not found => default to 1:1 mapping */
+        translate[ch] = ch;
+        return 1;
+    }
+    else if (PyLong_Check(item)) {
+        long replace = PyLong_AS_LONG(item);
+        /* PyLong_AS_LONG() cannot fail, charmaptranslate_lookup() already
+           used it */
+        if (127 < replace) {
+            /* invalid character or character outside ASCII:
+               skip the fast translate */
+            goto exit;
+        }
+        translate[ch] = (Py_UCS1)replace;
+    }
+    else if (PyUnicode_Check(item)) {
+        Py_UCS4 replace;
+
+        if (PyUnicode_READY(item) == -1) {
+            Py_DECREF(item);
+            return -1;
+        }
+        if (PyUnicode_GET_LENGTH(item) != 1)
+            goto exit;
+
+        replace = PyUnicode_READ_CHAR(item, 0);
+        if (replace > 127)
+            goto exit;
+        translate[ch] = (Py_UCS1)replace;
+    }
+    else {
+        /* not None, NULL, long or unicode */
+        goto exit;
+    }
+    ret = 1;
+
+  exit:
+    Py_DECREF(item);
+    return ret;
+}
+
+/* Fast path for ascii => ascii translation. Return 1 if the whole string
+   was translated into writer, return 0 if the input string was partially
+   translated into writer, raise an exception and return -1 on error. */
+static int
+unicode_fast_translate(PyObject *input, PyObject *mapping,
+                       _PyUnicodeWriter *writer, int ignore)
+{
+    Py_UCS1 ascii_table[128], ch, ch2;
+    Py_ssize_t len;
+    Py_UCS1 *in, *end, *out;
+    int res = 0;
+
+    if (PyUnicode_READY(input) == -1)
+        return -1;
+    if (!PyUnicode_IS_ASCII(input))
+        return 0;
+    len = PyUnicode_GET_LENGTH(input);
+
+    memset(ascii_table, 0xff, 128);
+
+    in = PyUnicode_1BYTE_DATA(input);
+    end = in + len;
+
+    assert(PyUnicode_IS_ASCII(writer->buffer));
+    assert(PyUnicode_GET_LENGTH(writer->buffer) == len);
+    out = PyUnicode_1BYTE_DATA(writer->buffer);
+
+    for (; in < end; in++) {
+        ch = *in;
+        ch2 = ascii_table[ch];
+        if (ch2 == 0xff) {
+            int translate = unicode_fast_translate_lookup(mapping, ch,
+                                                          ascii_table);
+            if (translate < 0)
+                return -1;
+            if (translate == 0)
+                goto exit;
+            ch2 = ascii_table[ch];
+        }
+        if (ch2 == 0xfe) {
+            if (ignore)
+                continue;
+            goto exit;
+        }
+        assert(ch2 < 128);
+        *out = ch2;
+        out++;
+    }
+    res = 1;
+
+exit:
+    writer->pos = out - PyUnicode_1BYTE_DATA(writer->buffer);
+    return res;
 }
 
 PyObject *
@@ -8596,22 +8665,17 @@
                             const char *errors)
 {
     /* input object */
-    char *idata;
+    char *data;
     Py_ssize_t size, i;
     int kind;
     /* output buffer */
-    Py_UCS4 *output = NULL;
-    Py_ssize_t osize;
-    PyObject *res;
-    /* current output position */
-    Py_ssize_t opos;
+    _PyUnicodeWriter writer;
+    /* error handler */
     char *reason = "character maps to <undefined>";
     PyObject *errorHandler = NULL;
     PyObject *exc = NULL;
-    /* the following variable is used for caching string comparisons
-     * -1=not initialized, 0=unknown, 1=strict, 2=replace,
-     * 3=ignore, 4=xmlcharrefreplace */
-    int known_errorHandler = -1;
+    int ignore;
+    int res;
 
     if (mapping == NULL) {
         PyErr_BadArgument();
@@ -8620,10 +8684,9 @@
 
     if (PyUnicode_READY(input) == -1)
         return NULL;
-    idata = (char*)PyUnicode_DATA(input);
+    data = (char*)PyUnicode_DATA(input);
     kind = PyUnicode_KIND(input);
     size = PyUnicode_GET_LENGTH(input);
-    i = 0;
 
     if (size == 0) {
         Py_INCREF(input);
@@ -8632,121 +8695,81 @@
 
     /* allocate enough for a simple 1:1 translation without
        replacements, if we need more, we'll resize */
-    osize = size;
-    output = PyMem_Malloc(osize * sizeof(Py_UCS4));
-    opos = 0;
-    if (output == NULL) {
-        PyErr_NoMemory();
+    _PyUnicodeWriter_Init(&writer);
+    if (_PyUnicodeWriter_Prepare(&writer, size, 127) == -1)
         goto onError;
-    }
 
+    ignore = (errors != NULL && strcmp(errors, "ignore") == 0);
+
+    res = unicode_fast_translate(input, mapping, &writer, ignore);
+    if (res < 0) {
+        _PyUnicodeWriter_Dealloc(&writer);
+        return NULL;
+    }
+    if (res == 1)
+        return _PyUnicodeWriter_Finish(&writer);
+
+    i = writer.pos;
     while (i<size) {
         /* try to encode it */
-        PyObject *x = NULL;
-        if (charmaptranslate_output(input, i, mapping,
-                                    &output, &osize, &opos, &x)) {
-            Py_XDECREF(x);
-            goto onError;
-        }
-        Py_XDECREF(x);
-        if (x!=Py_None) /* it worked => adjust input pointer */
-            ++i;
-        else { /* untranslatable character */
-            PyObject *repunicode = NULL; /* initialize to prevent gcc warning */
-            Py_ssize_t repsize;
-            Py_ssize_t newpos;
-            Py_ssize_t uni2;
-            /* startpos for collecting untranslatable chars */
-            Py_ssize_t collstart = i;
-            Py_ssize_t collend = i+1;
-            Py_ssize_t coll;
+        int translate;
+        PyObject *repunicode = NULL; /* initialize to prevent gcc warning */
+        Py_ssize_t newpos;
+        /* startpos for collecting untranslatable chars */
+        Py_ssize_t collstart;
+        Py_ssize_t collend;
+        Py_UCS4 ch;
 
-            /* find all untranslatable characters */
-            while (collend < size) {
-                if (charmaptranslate_lookup(PyUnicode_READ(kind,idata, collend), mapping, &x))
-                    goto onError;
-                Py_XDECREF(x);
-                if (x!=Py_None)
-                    break;
-                ++collend;
-            }
-            /* cache callback name lookup
-             * (if not done yet, i.e. it's the first error) */
-            if (known_errorHandler==-1) {
-                if ((errors==NULL) || (!strcmp(errors, "strict")))
-                    known_errorHandler = 1;
-                else if (!strcmp(errors, "replace"))
-                    known_errorHandler = 2;
-                else if (!strcmp(errors, "ignore"))
-                    known_errorHandler = 3;
-                else if (!strcmp(errors, "xmlcharrefreplace"))
-                    known_errorHandler = 4;
-                else
-                    known_errorHandler = 0;
-            }
-            switch (known_errorHandler) {
-            case 1: /* strict */
-                make_translate_exception(&exc,
-                                         input, collstart, collend, reason);
-                if (exc != NULL)
-                    PyCodec_StrictErrors(exc);
+        ch = PyUnicode_READ(kind, data, i);
+        translate = charmaptranslate_output(ch, mapping, &writer);
+        if (translate < 0)
+            goto onError;
+
+        if (translate != 0) {
+            /* it worked => adjust input pointer */
+            ++i;
+            continue;
+        }
+
+        /* untranslatable character */
+        collstart = i;
+        collend = i+1;
+
+        /* find all untranslatable characters */
+        while (collend < size) {
+            PyObject *x;
+            ch = PyUnicode_READ(kind, data, collend);
+            if (charmaptranslate_lookup(ch, mapping, &x))
                 goto onError;
-            case 2: /* replace */
-                /* No need to check for space, this is a 1:1 replacement */
-                for (coll = collstart; coll<collend; coll++)
-                    output[opos++] = '?';
-                /* fall through */
-            case 3: /* ignore */
-                i = collend;
+            Py_XDECREF(x);
+            if (x != Py_None)
                 break;
-            case 4: /* xmlcharrefreplace */
-                /* generate replacement (temporarily (mis)uses i) */
-                for (i = collstart; i < collend; ++i) {
-                    char buffer[2+29+1+1];
-                    char *cp;
-                    sprintf(buffer, "&#%d;", PyUnicode_READ(kind, idata, i));
-                    if (charmaptranslate_makespace(&output, &osize,
-                                                   opos+strlen(buffer)+(size-collend)))
-                        goto onError;
-                    for (cp = buffer; *cp; ++cp)
-                        output[opos++] = *cp;
-                }
-                i = collend;
-                break;
-            default:
-                repunicode = unicode_translate_call_errorhandler(errors, &errorHandler,
-                                                                 reason, input, &exc,
-                                                                 collstart, collend, &newpos);
-                if (repunicode == NULL)
-                    goto onError;
-                if (PyUnicode_READY(repunicode) == -1) {
-                    Py_DECREF(repunicode);
-                    goto onError;
-                }
-                /* generate replacement  */
-                repsize = PyUnicode_GET_LENGTH(repunicode);
-                if (charmaptranslate_makespace(&output, &osize,
-                                               opos+repsize+(size-collend))) {
-                    Py_DECREF(repunicode);
-                    goto onError;
-                }
-                for (uni2 = 0; repsize-->0; ++uni2)
-                    output[opos++] = PyUnicode_READ_CHAR(repunicode, uni2);
-                i = newpos;
+            ++collend;
+        }
+
+        if (ignore) {
+            i = collend;
+        }
+        else {
+            repunicode = unicode_translate_call_errorhandler(errors, &errorHandler,
+                                                             reason, input, &exc,
+                                                             collstart, collend, &newpos);
+            if (repunicode == NULL)
+                goto onError;
+            if (_PyUnicodeWriter_WriteStr(&writer, repunicode) < 0) {
                 Py_DECREF(repunicode);
+                goto onError;
             }
+            Py_DECREF(repunicode);
+            i = newpos;
         }
     }
-    res = PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, output, opos);
-    if (!res)
-        goto onError;
-    PyMem_Free(output);
     Py_XDECREF(exc);
     Py_XDECREF(errorHandler);
-    return res;
+    return _PyUnicodeWriter_Finish(&writer);
 
   onError:
-    PyMem_Free(output);
+    _PyUnicodeWriter_Dealloc(&writer);
     Py_XDECREF(exc);
     Py_XDECREF(errorHandler);
     return NULL;
@@ -14009,24 +14032,14 @@
     if (!PyNumber_Check(v))
         goto wrongtype;
 
-    /* make sure number is a type of integer */
-    /* if not, issue deprecation warning for now */
+    /* make sure number is a type of integer for o, x, and X */
     if (!PyLong_Check(v)) {
         if (type == 'o' || type == 'x' || type == 'X') {
             iobj = PyNumber_Index(v);
             if (iobj == NULL) {
-                PyErr_Clear();
-                if (PyErr_WarnEx(PyExc_DeprecationWarning,
-                                 "automatic int conversions have been deprecated",
-                                 1)) {
-                    return -1;
-                }
-                iobj = PyNumber_Long(v);
-                if (iobj == NULL ) {
-                    if (PyErr_ExceptionMatches(PyExc_TypeError))
-                        goto wrongtype;
-                    return -1;
-                }
+                if (PyErr_ExceptionMatches(PyExc_TypeError))
+                    goto wrongtype;
+                return -1;
             }
         }
         else {
@@ -14087,10 +14100,23 @@
     return 0;
 
 wrongtype:
-    PyErr_Format(PyExc_TypeError,
-            "%%%c format: a number is required, "
-            "not %.200s",
-            type, Py_TYPE(v)->tp_name);
+    switch(type)
+    {
+        case 'o':
+        case 'x':
+        case 'X':
+            PyErr_Format(PyExc_TypeError,
+                    "%%%c format: an integer is required, "
+                    "not %.200s",
+                    type, Py_TYPE(v)->tp_name);
+            break;
+        default:
+            PyErr_Format(PyExc_TypeError,
+                    "%%%c format: a number is required, "
+                    "not %.200s",
+                    type, Py_TYPE(v)->tp_name);
+            break;
+    }
     return -1;
 }
 
@@ -14108,22 +14134,10 @@
         PyObject *iobj;
         long x;
         /* make sure number is a type of integer */
-        /* if not, issue deprecation warning for now */
         if (!PyLong_Check(v)) {
             iobj = PyNumber_Index(v);
             if (iobj == NULL) {
-                PyErr_Clear();
-                if (PyErr_WarnEx(PyExc_DeprecationWarning,
-                                 "automatic int conversions have been deprecated",
-                                 1)) {
-                    return -1;
-                }
-                iobj = PyNumber_Long(v);
-                if (iobj == NULL ) {
-                    if (PyErr_ExceptionMatches(PyExc_TypeError))
-                        goto onError;
-                    return -1;
-                }
+                goto onError;
             }
             v = iobj;
             Py_DECREF(iobj);
diff --git a/PC/VS9.0/kill_python.c b/PC/VS9.0/kill_python.c
index 604731f..dbc9425 100644
--- a/PC/VS9.0/kill_python.c
+++ b/PC/VS9.0/kill_python.c
@@ -62,7 +62,7 @@
             continue;
 
         len = wcsnlen_s(me.szExePath, MAX_PATH) - KILL_PYTHON_EXE_LEN;
-        wcsncpy_s(path, MAX_PATH+1, me.szExePath, len); 
+        wcsncpy_s(path, MAX_PATH+1, me.szExePath, len);
 
         break;
 
@@ -80,8 +80,8 @@
      * looking for python processes.  When we find one, verify it lives
      * in the same directory we live in.  If it does, kill it.  If we're
      * unable to kill it, treat this as a fatal error and return 1.
-     * 
-     * The rationale behind this is that we're called at the start of the 
+     *
+     * The rationale behind this is that we're called at the start of the
      * build process on the basis that we'll take care of killing any
      * running instances, such that the build won't encounter permission
      * denied errors during linking. If we can't kill one of the processes,
@@ -104,11 +104,11 @@
     do {
 
         /*
-         * XXX TODO: if we really wanted to be fancy, we could check the 
+         * XXX TODO: if we really wanted to be fancy, we could check the
          * modules for all processes (not just the python[_d].exe ones)
-         * and see if any of our DLLs are loaded (i.e. python34[_d].dll),
+         * and see if any of our DLLs are loaded (i.e. python35[_d].dll),
          * as that would also inhibit our ability to rebuild the solution.
-         * Not worth loosing sleep over though; for now, a simple check 
+         * Not worth loosing sleep over though; for now, a simple check
          * for just the python executable should be sufficient.
          */
 
@@ -119,7 +119,7 @@
         /* It's a python process, so figure out which directory it's in... */
         hsm = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pe.th32ProcessID);
         if (hsm == INVALID_HANDLE_VALUE)
-            /* 
+            /*
              * If our module snapshot fails (which will happen if we don't own
              * the process), just ignore it and continue.  (It seems different
              * versions of Windows return different values for GetLastError()
diff --git a/PC/VS9.0/pyproject.vsprops b/PC/VS9.0/pyproject.vsprops
index a909875..e2354bb 100644
--- a/PC/VS9.0/pyproject.vsprops
+++ b/PC/VS9.0/pyproject.vsprops
@@ -38,7 +38,7 @@
 	/>

 	<UserMacro

 		Name="PyDllName"

-		Value="python34"

+		Value="python35"

 	/>

 	<UserMacro

 		Name="PythonExe"

diff --git a/PC/config.c b/PC/config.c
index 72c9381..48dbcc0 100644
--- a/PC/config.c
+++ b/PC/config.c
@@ -19,7 +19,7 @@
 extern PyObject* PyInit__md5(void);
 extern PyObject* PyInit_nt(void);
 extern PyObject* PyInit__operator(void);
-extern PyObject* PyInit_signal(void);
+extern PyObject* PyInit__signal(void);
 extern PyObject* PyInit__sha1(void);
 extern PyObject* PyInit__sha256(void);
 extern PyObject* PyInit__sha512(void);
@@ -91,7 +91,7 @@
     {"math", PyInit_math},
     {"nt", PyInit_nt}, /* Use the NT os functions, not posix */
     {"_operator", PyInit__operator},
-    {"signal", PyInit_signal},
+    {"_signal", PyInit__signal},
     {"_md5", PyInit__md5},
     {"_sha1", PyInit__sha1},
     {"_sha256", PyInit__sha256},
diff --git a/PC/example_nt/example.vcproj b/PC/example_nt/example.vcproj
index df36341..d82f76e 100644
--- a/PC/example_nt/example.vcproj
+++ b/PC/example_nt/example.vcproj
@@ -39,7 +39,7 @@
 			<Tool

 				Name="VCLinkerTool"

 				AdditionalOptions="/export:initexample"

-				AdditionalDependencies="odbc32.lib odbccp32.lib python34.lib"

+				AdditionalDependencies="odbc32.lib odbccp32.lib python35.lib"

 				OutputFile=".\Release/example.pyd"

 				LinkIncremental="1"

 				SuppressStartupBanner="TRUE"

@@ -105,7 +105,7 @@
 			<Tool

 				Name="VCLinkerTool"

 				AdditionalOptions="/export:initexample"

-				AdditionalDependencies="odbc32.lib odbccp32.lib python34_d.lib"

+				AdditionalDependencies="odbc32.lib odbccp32.lib python35_d.lib"

 				OutputFile=".\Debug/example_d.pyd"

 				LinkIncremental="1"

 				SuppressStartupBanner="TRUE"

diff --git a/PC/pyconfig.h b/PC/pyconfig.h
index ccf75f3..c0f802b 100644
--- a/PC/pyconfig.h
+++ b/PC/pyconfig.h
@@ -322,11 +322,11 @@
 			their Makefile (other compilers are generally
 			taken care of by distutils.) */
 #			if defined(_DEBUG)
-#				pragma comment(lib,"python34_d.lib")
+#				pragma comment(lib,"python35_d.lib")
 #			elif defined(Py_LIMITED_API)
 #				pragma comment(lib,"python3.lib")
 #			else
-#				pragma comment(lib,"python34.lib")
+#				pragma comment(lib,"python35.lib")
 #			endif /* _DEBUG */
 #		endif /* _MSC_VER */
 #	endif /* Py_BUILD_CORE */
diff --git a/PC/python3.def b/PC/python3.def
index 37e454b..fad6448 100644
--- a/PC/python3.def
+++ b/PC/python3.def
@@ -1,701 +1,701 @@
-; When changing this file, run python34gen.py
+; When changing this file, run python35gen.py
 LIBRARY	"python3"
 EXPORTS
-  PyArg_Parse=python34.PyArg_Parse
-  PyArg_ParseTuple=python34.PyArg_ParseTuple
-  PyArg_ParseTupleAndKeywords=python34.PyArg_ParseTupleAndKeywords
-  PyArg_UnpackTuple=python34.PyArg_UnpackTuple
-  PyArg_VaParse=python34.PyArg_VaParse
-  PyArg_VaParseTupleAndKeywords=python34.PyArg_VaParseTupleAndKeywords
-  PyArg_ValidateKeywordArguments=python34.PyArg_ValidateKeywordArguments
-  PyBaseObject_Type=python34.PyBaseObject_Type DATA
-  PyBool_FromLong=python34.PyBool_FromLong
-  PyBool_Type=python34.PyBool_Type DATA
-  PyByteArrayIter_Type=python34.PyByteArrayIter_Type DATA
-  PyByteArray_AsString=python34.PyByteArray_AsString
-  PyByteArray_Concat=python34.PyByteArray_Concat
-  PyByteArray_FromObject=python34.PyByteArray_FromObject
-  PyByteArray_FromStringAndSize=python34.PyByteArray_FromStringAndSize
-  PyByteArray_Resize=python34.PyByteArray_Resize
-  PyByteArray_Size=python34.PyByteArray_Size
-  PyByteArray_Type=python34.PyByteArray_Type DATA
-  PyBytesIter_Type=python34.PyBytesIter_Type DATA
-  PyBytes_AsString=python34.PyBytes_AsString
-  PyBytes_AsStringAndSize=python34.PyBytes_AsStringAndSize
-  PyBytes_Concat=python34.PyBytes_Concat
-  PyBytes_ConcatAndDel=python34.PyBytes_ConcatAndDel
-  PyBytes_DecodeEscape=python34.PyBytes_DecodeEscape
-  PyBytes_FromFormat=python34.PyBytes_FromFormat
-  PyBytes_FromFormatV=python34.PyBytes_FromFormatV
-  PyBytes_FromObject=python34.PyBytes_FromObject
-  PyBytes_FromString=python34.PyBytes_FromString
-  PyBytes_FromStringAndSize=python34.PyBytes_FromStringAndSize
-  PyBytes_Repr=python34.PyBytes_Repr
-  PyBytes_Size=python34.PyBytes_Size
-  PyBytes_Type=python34.PyBytes_Type DATA
-  PyCFunction_Call=python34.PyCFunction_Call
-  PyCFunction_ClearFreeList=python34.PyCFunction_ClearFreeList
-  PyCFunction_GetFlags=python34.PyCFunction_GetFlags
-  PyCFunction_GetFunction=python34.PyCFunction_GetFunction
-  PyCFunction_GetSelf=python34.PyCFunction_GetSelf
-  PyCFunction_New=python34.PyCFunction_New
-  PyCFunction_NewEx=python34.PyCFunction_NewEx
-  PyCFunction_Type=python34.PyCFunction_Type DATA
-  PyCallIter_New=python34.PyCallIter_New
-  PyCallIter_Type=python34.PyCallIter_Type DATA
-  PyCallable_Check=python34.PyCallable_Check
-  PyCapsule_GetContext=python34.PyCapsule_GetContext
-  PyCapsule_GetDestructor=python34.PyCapsule_GetDestructor
-  PyCapsule_GetName=python34.PyCapsule_GetName
-  PyCapsule_GetPointer=python34.PyCapsule_GetPointer
-  PyCapsule_Import=python34.PyCapsule_Import
-  PyCapsule_IsValid=python34.PyCapsule_IsValid
-  PyCapsule_New=python34.PyCapsule_New
-  PyCapsule_SetContext=python34.PyCapsule_SetContext
-  PyCapsule_SetDestructor=python34.PyCapsule_SetDestructor
-  PyCapsule_SetName=python34.PyCapsule_SetName
-  PyCapsule_SetPointer=python34.PyCapsule_SetPointer
-  PyCapsule_Type=python34.PyCapsule_Type DATA
-  PyClassMethodDescr_Type=python34.PyClassMethodDescr_Type DATA
-  PyCodec_BackslashReplaceErrors=python34.PyCodec_BackslashReplaceErrors
-  PyCodec_Decode=python34.PyCodec_Decode
-  PyCodec_Decoder=python34.PyCodec_Decoder
-  PyCodec_Encode=python34.PyCodec_Encode
-  PyCodec_Encoder=python34.PyCodec_Encoder
-  PyCodec_IgnoreErrors=python34.PyCodec_IgnoreErrors
-  PyCodec_IncrementalDecoder=python34.PyCodec_IncrementalDecoder
-  PyCodec_IncrementalEncoder=python34.PyCodec_IncrementalEncoder
-  PyCodec_KnownEncoding=python34.PyCodec_KnownEncoding
-  PyCodec_LookupError=python34.PyCodec_LookupError
-  PyCodec_Register=python34.PyCodec_Register
-  PyCodec_RegisterError=python34.PyCodec_RegisterError
-  PyCodec_ReplaceErrors=python34.PyCodec_ReplaceErrors
-  PyCodec_StreamReader=python34.PyCodec_StreamReader
-  PyCodec_StreamWriter=python34.PyCodec_StreamWriter
-  PyCodec_StrictErrors=python34.PyCodec_StrictErrors
-  PyCodec_XMLCharRefReplaceErrors=python34.PyCodec_XMLCharRefReplaceErrors
-  PyComplex_FromDoubles=python34.PyComplex_FromDoubles
-  PyComplex_ImagAsDouble=python34.PyComplex_ImagAsDouble
-  PyComplex_RealAsDouble=python34.PyComplex_RealAsDouble
-  PyComplex_Type=python34.PyComplex_Type DATA
-  PyDescr_NewClassMethod=python34.PyDescr_NewClassMethod
-  PyDescr_NewGetSet=python34.PyDescr_NewGetSet
-  PyDescr_NewMember=python34.PyDescr_NewMember
-  PyDescr_NewMethod=python34.PyDescr_NewMethod
-  PyDictItems_Type=python34.PyDictItems_Type DATA
-  PyDictIterItem_Type=python34.PyDictIterItem_Type DATA
-  PyDictIterKey_Type=python34.PyDictIterKey_Type DATA
-  PyDictIterValue_Type=python34.PyDictIterValue_Type DATA
-  PyDictKeys_Type=python34.PyDictKeys_Type DATA
-  PyDictProxy_New=python34.PyDictProxy_New
-  PyDictProxy_Type=python34.PyDictProxy_Type DATA
-  PyDictValues_Type=python34.PyDictValues_Type DATA
-  PyDict_Clear=python34.PyDict_Clear
-  PyDict_Contains=python34.PyDict_Contains
-  PyDict_Copy=python34.PyDict_Copy
-  PyDict_DelItem=python34.PyDict_DelItem
-  PyDict_DelItemString=python34.PyDict_DelItemString
-  PyDict_GetItem=python34.PyDict_GetItem
-  PyDict_GetItemString=python34.PyDict_GetItemString
-  PyDict_GetItemWithError=python34.PyDict_GetItemWithError
-  PyDict_Items=python34.PyDict_Items
-  PyDict_Keys=python34.PyDict_Keys
-  PyDict_Merge=python34.PyDict_Merge
-  PyDict_MergeFromSeq2=python34.PyDict_MergeFromSeq2
-  PyDict_New=python34.PyDict_New
-  PyDict_Next=python34.PyDict_Next
-  PyDict_SetItem=python34.PyDict_SetItem
-  PyDict_SetItemString=python34.PyDict_SetItemString
-  PyDict_Size=python34.PyDict_Size
-  PyDict_Type=python34.PyDict_Type DATA
-  PyDict_Update=python34.PyDict_Update
-  PyDict_Values=python34.PyDict_Values
-  PyEllipsis_Type=python34.PyEllipsis_Type DATA
-  PyEnum_Type=python34.PyEnum_Type DATA
-  PyErr_BadArgument=python34.PyErr_BadArgument
-  PyErr_BadInternalCall=python34.PyErr_BadInternalCall
-  PyErr_CheckSignals=python34.PyErr_CheckSignals
-  PyErr_Clear=python34.PyErr_Clear
-  PyErr_Display=python34.PyErr_Display
-  PyErr_ExceptionMatches=python34.PyErr_ExceptionMatches
-  PyErr_Fetch=python34.PyErr_Fetch
-  PyErr_Format=python34.PyErr_Format
-  PyErr_GivenExceptionMatches=python34.PyErr_GivenExceptionMatches
-  PyErr_NewException=python34.PyErr_NewException
-  PyErr_NewExceptionWithDoc=python34.PyErr_NewExceptionWithDoc
-  PyErr_NoMemory=python34.PyErr_NoMemory
-  PyErr_NormalizeException=python34.PyErr_NormalizeException
-  PyErr_Occurred=python34.PyErr_Occurred
-  PyErr_Print=python34.PyErr_Print
-  PyErr_PrintEx=python34.PyErr_PrintEx
-  PyErr_ProgramText=python34.PyErr_ProgramText
-  PyErr_Restore=python34.PyErr_Restore
-  PyErr_SetFromErrno=python34.PyErr_SetFromErrno
-  PyErr_SetFromErrnoWithFilename=python34.PyErr_SetFromErrnoWithFilename
-  PyErr_SetFromErrnoWithFilenameObject=python34.PyErr_SetFromErrnoWithFilenameObject
-  PyErr_SetInterrupt=python34.PyErr_SetInterrupt
-  PyErr_SetNone=python34.PyErr_SetNone
-  PyErr_SetObject=python34.PyErr_SetObject
-  PyErr_SetString=python34.PyErr_SetString
-  PyErr_SyntaxLocation=python34.PyErr_SyntaxLocation
-  PyErr_WarnEx=python34.PyErr_WarnEx
-  PyErr_WarnExplicit=python34.PyErr_WarnExplicit
-  PyErr_WarnFormat=python34.PyErr_WarnFormat
-  PyErr_WriteUnraisable=python34.PyErr_WriteUnraisable
-  PyEval_AcquireLock=python34.PyEval_AcquireLock
-  PyEval_AcquireThread=python34.PyEval_AcquireThread
-  PyEval_CallFunction=python34.PyEval_CallFunction
-  PyEval_CallMethod=python34.PyEval_CallMethod
-  PyEval_CallObjectWithKeywords=python34.PyEval_CallObjectWithKeywords
-  PyEval_EvalCode=python34.PyEval_EvalCode
-  PyEval_EvalCodeEx=python34.PyEval_EvalCodeEx
-  PyEval_EvalFrame=python34.PyEval_EvalFrame
-  PyEval_EvalFrameEx=python34.PyEval_EvalFrameEx
-  PyEval_GetBuiltins=python34.PyEval_GetBuiltins
-  PyEval_GetCallStats=python34.PyEval_GetCallStats
-  PyEval_GetFrame=python34.PyEval_GetFrame
-  PyEval_GetFuncDesc=python34.PyEval_GetFuncDesc
-  PyEval_GetFuncName=python34.PyEval_GetFuncName
-  PyEval_GetGlobals=python34.PyEval_GetGlobals
-  PyEval_GetLocals=python34.PyEval_GetLocals
-  PyEval_InitThreads=python34.PyEval_InitThreads
-  PyEval_ReInitThreads=python34.PyEval_ReInitThreads
-  PyEval_ReleaseLock=python34.PyEval_ReleaseLock
-  PyEval_ReleaseThread=python34.PyEval_ReleaseThread
-  PyEval_RestoreThread=python34.PyEval_RestoreThread
-  PyEval_SaveThread=python34.PyEval_SaveThread
-  PyEval_ThreadsInitialized=python34.PyEval_ThreadsInitialized
-  PyExc_ArithmeticError=python34.PyExc_ArithmeticError DATA
-  PyExc_AssertionError=python34.PyExc_AssertionError DATA
-  PyExc_AttributeError=python34.PyExc_AttributeError DATA
-  PyExc_BaseException=python34.PyExc_BaseException DATA
-  PyExc_BufferError=python34.PyExc_BufferError DATA
-  PyExc_BytesWarning=python34.PyExc_BytesWarning DATA
-  PyExc_DeprecationWarning=python34.PyExc_DeprecationWarning DATA
-  PyExc_EOFError=python34.PyExc_EOFError DATA
-  PyExc_EnvironmentError=python34.PyExc_EnvironmentError DATA
-  PyExc_Exception=python34.PyExc_Exception DATA
-  PyExc_FloatingPointError=python34.PyExc_FloatingPointError DATA
-  PyExc_FutureWarning=python34.PyExc_FutureWarning DATA
-  PyExc_GeneratorExit=python34.PyExc_GeneratorExit DATA
-  PyExc_IOError=python34.PyExc_IOError DATA
-  PyExc_ImportError=python34.PyExc_ImportError DATA
-  PyExc_ImportWarning=python34.PyExc_ImportWarning DATA
-  PyExc_IndentationError=python34.PyExc_IndentationError DATA
-  PyExc_IndexError=python34.PyExc_IndexError DATA
-  PyExc_KeyError=python34.PyExc_KeyError DATA
-  PyExc_KeyboardInterrupt=python34.PyExc_KeyboardInterrupt DATA
-  PyExc_LookupError=python34.PyExc_LookupError DATA
-  PyExc_MemoryError=python34.PyExc_MemoryError DATA
-  PyExc_MemoryErrorInst=python34.PyExc_MemoryErrorInst DATA
-  PyExc_NameError=python34.PyExc_NameError DATA
-  PyExc_NotImplementedError=python34.PyExc_NotImplementedError DATA
-  PyExc_OSError=python34.PyExc_OSError DATA
-  PyExc_OverflowError=python34.PyExc_OverflowError DATA
-  PyExc_PendingDeprecationWarning=python34.PyExc_PendingDeprecationWarning DATA
-  PyExc_RecursionErrorInst=python34.PyExc_RecursionErrorInst DATA
-  PyExc_ReferenceError=python34.PyExc_ReferenceError DATA
-  PyExc_RuntimeError=python34.PyExc_RuntimeError DATA
-  PyExc_RuntimeWarning=python34.PyExc_RuntimeWarning DATA
-  PyExc_StopIteration=python34.PyExc_StopIteration DATA
-  PyExc_SyntaxError=python34.PyExc_SyntaxError DATA
-  PyExc_SyntaxWarning=python34.PyExc_SyntaxWarning DATA
-  PyExc_SystemError=python34.PyExc_SystemError DATA
-  PyExc_SystemExit=python34.PyExc_SystemExit DATA
-  PyExc_TabError=python34.PyExc_TabError DATA
-  PyExc_TypeError=python34.PyExc_TypeError DATA
-  PyExc_UnboundLocalError=python34.PyExc_UnboundLocalError DATA
-  PyExc_UnicodeDecodeError=python34.PyExc_UnicodeDecodeError DATA
-  PyExc_UnicodeEncodeError=python34.PyExc_UnicodeEncodeError DATA
-  PyExc_UnicodeError=python34.PyExc_UnicodeError DATA
-  PyExc_UnicodeTranslateError=python34.PyExc_UnicodeTranslateError DATA
-  PyExc_UnicodeWarning=python34.PyExc_UnicodeWarning DATA
-  PyExc_UserWarning=python34.PyExc_UserWarning DATA
-  PyExc_ValueError=python34.PyExc_ValueError DATA
-  PyExc_Warning=python34.PyExc_Warning DATA
-  PyExc_ZeroDivisionError=python34.PyExc_ZeroDivisionError DATA
-  PyException_GetCause=python34.PyException_GetCause
-  PyException_GetContext=python34.PyException_GetContext
-  PyException_GetTraceback=python34.PyException_GetTraceback
-  PyException_SetCause=python34.PyException_SetCause
-  PyException_SetContext=python34.PyException_SetContext
-  PyException_SetTraceback=python34.PyException_SetTraceback
-  PyFile_FromFd=python34.PyFile_FromFd
-  PyFile_GetLine=python34.PyFile_GetLine
-  PyFile_WriteObject=python34.PyFile_WriteObject
-  PyFile_WriteString=python34.PyFile_WriteString
-  PyFilter_Type=python34.PyFilter_Type DATA
-  PyFloat_AsDouble=python34.PyFloat_AsDouble
-  PyFloat_FromDouble=python34.PyFloat_FromDouble
-  PyFloat_FromString=python34.PyFloat_FromString
-  PyFloat_GetInfo=python34.PyFloat_GetInfo
-  PyFloat_GetMax=python34.PyFloat_GetMax
-  PyFloat_GetMin=python34.PyFloat_GetMin
-  PyFloat_Type=python34.PyFloat_Type DATA
-  PyFrozenSet_New=python34.PyFrozenSet_New
-  PyFrozenSet_Type=python34.PyFrozenSet_Type DATA
-  PyGC_Collect=python34.PyGC_Collect
-  PyGILState_Ensure=python34.PyGILState_Ensure
-  PyGILState_GetThisThreadState=python34.PyGILState_GetThisThreadState
-  PyGILState_Release=python34.PyGILState_Release
-  PyGetSetDescr_Type=python34.PyGetSetDescr_Type DATA
-  PyImport_AddModule=python34.PyImport_AddModule
-  PyImport_AppendInittab=python34.PyImport_AppendInittab
-  PyImport_Cleanup=python34.PyImport_Cleanup
-  PyImport_ExecCodeModule=python34.PyImport_ExecCodeModule
-  PyImport_ExecCodeModuleEx=python34.PyImport_ExecCodeModuleEx
-  PyImport_ExecCodeModuleWithPathnames=python34.PyImport_ExecCodeModuleWithPathnames
-  PyImport_GetImporter=python34.PyImport_GetImporter
-  PyImport_GetMagicNumber=python34.PyImport_GetMagicNumber
-  PyImport_GetMagicTag=python34.PyImport_GetMagicTag
-  PyImport_GetModuleDict=python34.PyImport_GetModuleDict
-  PyImport_Import=python34.PyImport_Import
-  PyImport_ImportFrozenModule=python34.PyImport_ImportFrozenModule
-  PyImport_ImportModule=python34.PyImport_ImportModule
-  PyImport_ImportModuleLevel=python34.PyImport_ImportModuleLevel
-  PyImport_ImportModuleNoBlock=python34.PyImport_ImportModuleNoBlock
-  PyImport_ReloadModule=python34.PyImport_ReloadModule
-  PyInterpreterState_Clear=python34.PyInterpreterState_Clear
-  PyInterpreterState_Delete=python34.PyInterpreterState_Delete
-  PyInterpreterState_New=python34.PyInterpreterState_New
-  PyIter_Next=python34.PyIter_Next
-  PyListIter_Type=python34.PyListIter_Type DATA
-  PyListRevIter_Type=python34.PyListRevIter_Type DATA
-  PyList_Append=python34.PyList_Append
-  PyList_AsTuple=python34.PyList_AsTuple
-  PyList_GetItem=python34.PyList_GetItem
-  PyList_GetSlice=python34.PyList_GetSlice
-  PyList_Insert=python34.PyList_Insert
-  PyList_New=python34.PyList_New
-  PyList_Reverse=python34.PyList_Reverse
-  PyList_SetItem=python34.PyList_SetItem
-  PyList_SetSlice=python34.PyList_SetSlice
-  PyList_Size=python34.PyList_Size
-  PyList_Sort=python34.PyList_Sort
-  PyList_Type=python34.PyList_Type DATA
-  PyLongRangeIter_Type=python34.PyLongRangeIter_Type DATA
-  PyLong_AsDouble=python34.PyLong_AsDouble
-  PyLong_AsLong=python34.PyLong_AsLong
-  PyLong_AsLongAndOverflow=python34.PyLong_AsLongAndOverflow
-  PyLong_AsLongLong=python34.PyLong_AsLongLong
-  PyLong_AsLongLongAndOverflow=python34.PyLong_AsLongLongAndOverflow
-  PyLong_AsSize_t=python34.PyLong_AsSize_t
-  PyLong_AsSsize_t=python34.PyLong_AsSsize_t
-  PyLong_AsUnsignedLong=python34.PyLong_AsUnsignedLong
-  PyLong_AsUnsignedLongLong=python34.PyLong_AsUnsignedLongLong
-  PyLong_AsUnsignedLongLongMask=python34.PyLong_AsUnsignedLongLongMask
-  PyLong_AsUnsignedLongMask=python34.PyLong_AsUnsignedLongMask
-  PyLong_AsVoidPtr=python34.PyLong_AsVoidPtr
-  PyLong_FromDouble=python34.PyLong_FromDouble
-  PyLong_FromLong=python34.PyLong_FromLong
-  PyLong_FromLongLong=python34.PyLong_FromLongLong
-  PyLong_FromSize_t=python34.PyLong_FromSize_t
-  PyLong_FromSsize_t=python34.PyLong_FromSsize_t
-  PyLong_FromString=python34.PyLong_FromString
-  PyLong_FromUnsignedLong=python34.PyLong_FromUnsignedLong
-  PyLong_FromUnsignedLongLong=python34.PyLong_FromUnsignedLongLong
-  PyLong_FromVoidPtr=python34.PyLong_FromVoidPtr
-  PyLong_GetInfo=python34.PyLong_GetInfo
-  PyLong_Type=python34.PyLong_Type DATA
-  PyMap_Type=python34.PyMap_Type DATA
-  PyMapping_Check=python34.PyMapping_Check
-  PyMapping_GetItemString=python34.PyMapping_GetItemString
-  PyMapping_HasKey=python34.PyMapping_HasKey
-  PyMapping_HasKeyString=python34.PyMapping_HasKeyString
-  PyMapping_Items=python34.PyMapping_Items
-  PyMapping_Keys=python34.PyMapping_Keys
-  PyMapping_Length=python34.PyMapping_Length
-  PyMapping_SetItemString=python34.PyMapping_SetItemString
-  PyMapping_Size=python34.PyMapping_Size
-  PyMapping_Values=python34.PyMapping_Values
-  PyMem_Free=python34.PyMem_Free
-  PyMem_Malloc=python34.PyMem_Malloc
-  PyMem_Realloc=python34.PyMem_Realloc
-  PyMemberDescr_Type=python34.PyMemberDescr_Type DATA
-  PyMemoryView_FromObject=python34.PyMemoryView_FromObject
-  PyMemoryView_GetContiguous=python34.PyMemoryView_GetContiguous
-  PyMemoryView_Type=python34.PyMemoryView_Type DATA
-  PyMethodDescr_Type=python34.PyMethodDescr_Type DATA
-  PyModule_AddIntConstant=python34.PyModule_AddIntConstant
-  PyModule_AddObject=python34.PyModule_AddObject
-  PyModule_AddStringConstant=python34.PyModule_AddStringConstant
-  PyModule_Create2=python34.PyModule_Create2
-  PyModule_GetDef=python34.PyModule_GetDef
-  PyModule_GetDict=python34.PyModule_GetDict
-  PyModule_GetFilename=python34.PyModule_GetFilename
-  PyModule_GetFilenameObject=python34.PyModule_GetFilenameObject
-  PyModule_GetName=python34.PyModule_GetName
-  PyModule_GetState=python34.PyModule_GetState
-  PyModule_New=python34.PyModule_New
-  PyModule_Type=python34.PyModule_Type DATA
-  PyNullImporter_Type=python34.PyNullImporter_Type DATA
-  PyNumber_Absolute=python34.PyNumber_Absolute
-  PyNumber_Add=python34.PyNumber_Add
-  PyNumber_And=python34.PyNumber_And
-  PyNumber_AsSsize_t=python34.PyNumber_AsSsize_t
-  PyNumber_Check=python34.PyNumber_Check
-  PyNumber_Divmod=python34.PyNumber_Divmod
-  PyNumber_Float=python34.PyNumber_Float
-  PyNumber_FloorDivide=python34.PyNumber_FloorDivide
-  PyNumber_InPlaceAdd=python34.PyNumber_InPlaceAdd
-  PyNumber_InPlaceAnd=python34.PyNumber_InPlaceAnd
-  PyNumber_InPlaceFloorDivide=python34.PyNumber_InPlaceFloorDivide
-  PyNumber_InPlaceLshift=python34.PyNumber_InPlaceLshift
-  PyNumber_InPlaceMultiply=python34.PyNumber_InPlaceMultiply
-  PyNumber_InPlaceOr=python34.PyNumber_InPlaceOr
-  PyNumber_InPlacePower=python34.PyNumber_InPlacePower
-  PyNumber_InPlaceRemainder=python34.PyNumber_InPlaceRemainder
-  PyNumber_InPlaceRshift=python34.PyNumber_InPlaceRshift
-  PyNumber_InPlaceSubtract=python34.PyNumber_InPlaceSubtract
-  PyNumber_InPlaceTrueDivide=python34.PyNumber_InPlaceTrueDivide
-  PyNumber_InPlaceXor=python34.PyNumber_InPlaceXor
-  PyNumber_Index=python34.PyNumber_Index
-  PyNumber_Invert=python34.PyNumber_Invert
-  PyNumber_Long=python34.PyNumber_Long
-  PyNumber_Lshift=python34.PyNumber_Lshift
-  PyNumber_Multiply=python34.PyNumber_Multiply
-  PyNumber_Negative=python34.PyNumber_Negative
-  PyNumber_Or=python34.PyNumber_Or
-  PyNumber_Positive=python34.PyNumber_Positive
-  PyNumber_Power=python34.PyNumber_Power
-  PyNumber_Remainder=python34.PyNumber_Remainder
-  PyNumber_Rshift=python34.PyNumber_Rshift
-  PyNumber_Subtract=python34.PyNumber_Subtract
-  PyNumber_ToBase=python34.PyNumber_ToBase
-  PyNumber_TrueDivide=python34.PyNumber_TrueDivide
-  PyNumber_Xor=python34.PyNumber_Xor
-  PyOS_AfterFork=python34.PyOS_AfterFork
-  PyOS_InitInterrupts=python34.PyOS_InitInterrupts
-  PyOS_InputHook=python34.PyOS_InputHook DATA
-  PyOS_InterruptOccurred=python34.PyOS_InterruptOccurred
-  PyOS_ReadlineFunctionPointer=python34.PyOS_ReadlineFunctionPointer DATA
-  PyOS_double_to_string=python34.PyOS_double_to_string
-  PyOS_getsig=python34.PyOS_getsig
-  PyOS_mystricmp=python34.PyOS_mystricmp
-  PyOS_mystrnicmp=python34.PyOS_mystrnicmp
-  PyOS_setsig=python34.PyOS_setsig
-  PyOS_snprintf=python34.PyOS_snprintf
-  PyOS_string_to_double=python34.PyOS_string_to_double
-  PyOS_strtol=python34.PyOS_strtol
-  PyOS_strtoul=python34.PyOS_strtoul
-  PyOS_vsnprintf=python34.PyOS_vsnprintf
-  PyObject_ASCII=python34.PyObject_ASCII
-  PyObject_AsCharBuffer=python34.PyObject_AsCharBuffer
-  PyObject_AsFileDescriptor=python34.PyObject_AsFileDescriptor
-  PyObject_AsReadBuffer=python34.PyObject_AsReadBuffer
-  PyObject_AsWriteBuffer=python34.PyObject_AsWriteBuffer
-  PyObject_Bytes=python34.PyObject_Bytes
-  PyObject_Call=python34.PyObject_Call
-  PyObject_CallFunction=python34.PyObject_CallFunction
-  PyObject_CallFunctionObjArgs=python34.PyObject_CallFunctionObjArgs
-  PyObject_CallMethod=python34.PyObject_CallMethod
-  PyObject_CallMethodObjArgs=python34.PyObject_CallMethodObjArgs
-  PyObject_CallObject=python34.PyObject_CallObject
-  PyObject_CheckReadBuffer=python34.PyObject_CheckReadBuffer
-  PyObject_ClearWeakRefs=python34.PyObject_ClearWeakRefs
-  PyObject_DelItem=python34.PyObject_DelItem
-  PyObject_DelItemString=python34.PyObject_DelItemString
-  PyObject_Dir=python34.PyObject_Dir
-  PyObject_Format=python34.PyObject_Format
-  PyObject_Free=python34.PyObject_Free
-  PyObject_GC_Del=python34.PyObject_GC_Del
-  PyObject_GC_Track=python34.PyObject_GC_Track
-  PyObject_GC_UnTrack=python34.PyObject_GC_UnTrack
-  PyObject_GenericGetAttr=python34.PyObject_GenericGetAttr
-  PyObject_GenericSetAttr=python34.PyObject_GenericSetAttr
-  PyObject_GetAttr=python34.PyObject_GetAttr
-  PyObject_GetAttrString=python34.PyObject_GetAttrString
-  PyObject_GetItem=python34.PyObject_GetItem
-  PyObject_GetIter=python34.PyObject_GetIter
-  PyObject_HasAttr=python34.PyObject_HasAttr
-  PyObject_HasAttrString=python34.PyObject_HasAttrString
-  PyObject_Hash=python34.PyObject_Hash
-  PyObject_HashNotImplemented=python34.PyObject_HashNotImplemented
-  PyObject_Init=python34.PyObject_Init
-  PyObject_InitVar=python34.PyObject_InitVar
-  PyObject_IsInstance=python34.PyObject_IsInstance
-  PyObject_IsSubclass=python34.PyObject_IsSubclass
-  PyObject_IsTrue=python34.PyObject_IsTrue
-  PyObject_Length=python34.PyObject_Length
-  PyObject_Malloc=python34.PyObject_Malloc
-  PyObject_Not=python34.PyObject_Not
-  PyObject_Realloc=python34.PyObject_Realloc
-  PyObject_Repr=python34.PyObject_Repr
-  PyObject_RichCompare=python34.PyObject_RichCompare
-  PyObject_RichCompareBool=python34.PyObject_RichCompareBool
-  PyObject_SelfIter=python34.PyObject_SelfIter
-  PyObject_SetAttr=python34.PyObject_SetAttr
-  PyObject_SetAttrString=python34.PyObject_SetAttrString
-  PyObject_SetItem=python34.PyObject_SetItem
-  PyObject_Size=python34.PyObject_Size
-  PyObject_Str=python34.PyObject_Str
-  PyObject_Type=python34.PyObject_Type DATA
-  PyParser_SimpleParseFileFlags=python34.PyParser_SimpleParseFileFlags
-  PyParser_SimpleParseStringFlags=python34.PyParser_SimpleParseStringFlags
-  PyProperty_Type=python34.PyProperty_Type DATA
-  PyRangeIter_Type=python34.PyRangeIter_Type DATA
-  PyRange_Type=python34.PyRange_Type DATA
-  PyReversed_Type=python34.PyReversed_Type DATA
-  PySeqIter_New=python34.PySeqIter_New
-  PySeqIter_Type=python34.PySeqIter_Type DATA
-  PySequence_Check=python34.PySequence_Check
-  PySequence_Concat=python34.PySequence_Concat
-  PySequence_Contains=python34.PySequence_Contains
-  PySequence_Count=python34.PySequence_Count
-  PySequence_DelItem=python34.PySequence_DelItem
-  PySequence_DelSlice=python34.PySequence_DelSlice
-  PySequence_Fast=python34.PySequence_Fast
-  PySequence_GetItem=python34.PySequence_GetItem
-  PySequence_GetSlice=python34.PySequence_GetSlice
-  PySequence_In=python34.PySequence_In
-  PySequence_InPlaceConcat=python34.PySequence_InPlaceConcat
-  PySequence_InPlaceRepeat=python34.PySequence_InPlaceRepeat
-  PySequence_Index=python34.PySequence_Index
-  PySequence_Length=python34.PySequence_Length
-  PySequence_List=python34.PySequence_List
-  PySequence_Repeat=python34.PySequence_Repeat
-  PySequence_SetItem=python34.PySequence_SetItem
-  PySequence_SetSlice=python34.PySequence_SetSlice
-  PySequence_Size=python34.PySequence_Size
-  PySequence_Tuple=python34.PySequence_Tuple
-  PySetIter_Type=python34.PySetIter_Type DATA
-  PySet_Add=python34.PySet_Add
-  PySet_Clear=python34.PySet_Clear
-  PySet_Contains=python34.PySet_Contains
-  PySet_Discard=python34.PySet_Discard
-  PySet_New=python34.PySet_New
-  PySet_Pop=python34.PySet_Pop
-  PySet_Size=python34.PySet_Size
-  PySet_Type=python34.PySet_Type DATA
-  PySlice_GetIndices=python34.PySlice_GetIndices
-  PySlice_GetIndicesEx=python34.PySlice_GetIndicesEx
-  PySlice_New=python34.PySlice_New
-  PySlice_Type=python34.PySlice_Type DATA
-  PySortWrapper_Type=python34.PySortWrapper_Type DATA
-  PyState_FindModule=python34.PyState_FindModule
-  PyState_AddModule=python34.PyState_AddModule
-  PyState_RemoveModule=python34.PyState_RemoveModule
-  PyStructSequence_GetItem=python34.PyStructSequence_GetItem
-  PyStructSequence_New=python34.PyStructSequence_New
-  PyStructSequence_NewType=python34.PyStructSequence_NewType
-  PyStructSequence_SetItem=python34.PyStructSequence_SetItem
-  PySuper_Type=python34.PySuper_Type DATA
-  PySys_AddWarnOption=python34.PySys_AddWarnOption
-  PySys_AddWarnOptionUnicode=python34.PySys_AddWarnOptionUnicode
-  PySys_FormatStderr=python34.PySys_FormatStderr
-  PySys_FormatStdout=python34.PySys_FormatStdout
-  PySys_GetObject=python34.PySys_GetObject
-  PySys_HasWarnOptions=python34.PySys_HasWarnOptions
-  PySys_ResetWarnOptions=python34.PySys_ResetWarnOptions
-  PySys_SetArgv=python34.PySys_SetArgv
-  PySys_SetArgvEx=python34.PySys_SetArgvEx
-  PySys_SetObject=python34.PySys_SetObject
-  PySys_SetPath=python34.PySys_SetPath
-  PySys_WriteStderr=python34.PySys_WriteStderr
-  PySys_WriteStdout=python34.PySys_WriteStdout
-  PyThreadState_Clear=python34.PyThreadState_Clear
-  PyThreadState_Delete=python34.PyThreadState_Delete
-  PyThreadState_DeleteCurrent=python34.PyThreadState_DeleteCurrent
-  PyThreadState_Get=python34.PyThreadState_Get
-  PyThreadState_GetDict=python34.PyThreadState_GetDict
-  PyThreadState_New=python34.PyThreadState_New
-  PyThreadState_SetAsyncExc=python34.PyThreadState_SetAsyncExc
-  PyThreadState_Swap=python34.PyThreadState_Swap
-  PyTraceBack_Here=python34.PyTraceBack_Here
-  PyTraceBack_Print=python34.PyTraceBack_Print
-  PyTraceBack_Type=python34.PyTraceBack_Type DATA
-  PyTupleIter_Type=python34.PyTupleIter_Type DATA
-  PyTuple_ClearFreeList=python34.PyTuple_ClearFreeList
-  PyTuple_GetItem=python34.PyTuple_GetItem
-  PyTuple_GetSlice=python34.PyTuple_GetSlice
-  PyTuple_New=python34.PyTuple_New
-  PyTuple_Pack=python34.PyTuple_Pack
-  PyTuple_SetItem=python34.PyTuple_SetItem
-  PyTuple_Size=python34.PyTuple_Size
-  PyTuple_Type=python34.PyTuple_Type DATA
-  PyType_ClearCache=python34.PyType_ClearCache
-  PyType_FromSpec=python34.PyType_FromSpec
-  PyType_FromSpecWithBases=python34.PyType_FromSpecWithBases
-  PyType_GenericAlloc=python34.PyType_GenericAlloc
-  PyType_GenericNew=python34.PyType_GenericNew
-  PyType_GetFlags=python34.PyType_GetFlags
-  PyType_GetSlot=python34.PyType_GetSlot
-  PyType_IsSubtype=python34.PyType_IsSubtype
-  PyType_Modified=python34.PyType_Modified
-  PyType_Ready=python34.PyType_Ready
-  PyType_Type=python34.PyType_Type DATA
-  PyUnicodeDecodeError_Create=python34.PyUnicodeDecodeError_Create
-  PyUnicodeDecodeError_GetEncoding=python34.PyUnicodeDecodeError_GetEncoding
-  PyUnicodeDecodeError_GetEnd=python34.PyUnicodeDecodeError_GetEnd
-  PyUnicodeDecodeError_GetObject=python34.PyUnicodeDecodeError_GetObject
-  PyUnicodeDecodeError_GetReason=python34.PyUnicodeDecodeError_GetReason
-  PyUnicodeDecodeError_GetStart=python34.PyUnicodeDecodeError_GetStart
-  PyUnicodeDecodeError_SetEnd=python34.PyUnicodeDecodeError_SetEnd
-  PyUnicodeDecodeError_SetReason=python34.PyUnicodeDecodeError_SetReason
-  PyUnicodeDecodeError_SetStart=python34.PyUnicodeDecodeError_SetStart
-  PyUnicodeEncodeError_GetEncoding=python34.PyUnicodeEncodeError_GetEncoding
-  PyUnicodeEncodeError_GetEnd=python34.PyUnicodeEncodeError_GetEnd
-  PyUnicodeEncodeError_GetObject=python34.PyUnicodeEncodeError_GetObject
-  PyUnicodeEncodeError_GetReason=python34.PyUnicodeEncodeError_GetReason
-  PyUnicodeEncodeError_GetStart=python34.PyUnicodeEncodeError_GetStart
-  PyUnicodeEncodeError_SetEnd=python34.PyUnicodeEncodeError_SetEnd
-  PyUnicodeEncodeError_SetReason=python34.PyUnicodeEncodeError_SetReason
-  PyUnicodeEncodeError_SetStart=python34.PyUnicodeEncodeError_SetStart
-  PyUnicodeIter_Type=python34.PyUnicodeIter_Type DATA
-  PyUnicodeTranslateError_GetEnd=python34.PyUnicodeTranslateError_GetEnd
-  PyUnicodeTranslateError_GetObject=python34.PyUnicodeTranslateError_GetObject
-  PyUnicodeTranslateError_GetReason=python34.PyUnicodeTranslateError_GetReason
-  PyUnicodeTranslateError_GetStart=python34.PyUnicodeTranslateError_GetStart
-  PyUnicodeTranslateError_SetEnd=python34.PyUnicodeTranslateError_SetEnd
-  PyUnicodeTranslateError_SetReason=python34.PyUnicodeTranslateError_SetReason
-  PyUnicodeTranslateError_SetStart=python34.PyUnicodeTranslateError_SetStart
-  PyUnicode_Append=python34.PyUnicode_Append
-  PyUnicode_AppendAndDel=python34.PyUnicode_AppendAndDel
-  PyUnicode_AsASCIIString=python34.PyUnicode_AsASCIIString
-  PyUnicode_AsCharmapString=python34.PyUnicode_AsCharmapString
-  PyUnicode_AsDecodedObject=python34.PyUnicode_AsDecodedObject
-  PyUnicode_AsDecodedUnicode=python34.PyUnicode_AsDecodedUnicode
-  PyUnicode_AsEncodedObject=python34.PyUnicode_AsEncodedObject
-  PyUnicode_AsEncodedString=python34.PyUnicode_AsEncodedString
-  PyUnicode_AsEncodedUnicode=python34.PyUnicode_AsEncodedUnicode
-  PyUnicode_AsLatin1String=python34.PyUnicode_AsLatin1String
-  PyUnicode_AsRawUnicodeEscapeString=python34.PyUnicode_AsRawUnicodeEscapeString
-  PyUnicode_AsUTF16String=python34.PyUnicode_AsUTF16String
-  PyUnicode_AsUTF32String=python34.PyUnicode_AsUTF32String
-  PyUnicode_AsUTF8String=python34.PyUnicode_AsUTF8String
-  PyUnicode_AsUnicodeEscapeString=python34.PyUnicode_AsUnicodeEscapeString
-  PyUnicode_AsWideChar=python34.PyUnicode_AsWideChar
-  PyUnicode_ClearFreelist=python34.PyUnicode_ClearFreelist
-  PyUnicode_Compare=python34.PyUnicode_Compare
-  PyUnicode_Concat=python34.PyUnicode_Concat
-  PyUnicode_Contains=python34.PyUnicode_Contains
-  PyUnicode_Count=python34.PyUnicode_Count
-  PyUnicode_Decode=python34.PyUnicode_Decode
-  PyUnicode_DecodeASCII=python34.PyUnicode_DecodeASCII
-  PyUnicode_DecodeCharmap=python34.PyUnicode_DecodeCharmap
-  PyUnicode_DecodeFSDefault=python34.PyUnicode_DecodeFSDefault
-  PyUnicode_DecodeFSDefaultAndSize=python34.PyUnicode_DecodeFSDefaultAndSize
-  PyUnicode_DecodeLatin1=python34.PyUnicode_DecodeLatin1
-  PyUnicode_DecodeRawUnicodeEscape=python34.PyUnicode_DecodeRawUnicodeEscape
-  PyUnicode_DecodeUTF16=python34.PyUnicode_DecodeUTF16
-  PyUnicode_DecodeUTF16Stateful=python34.PyUnicode_DecodeUTF16Stateful
-  PyUnicode_DecodeUTF32=python34.PyUnicode_DecodeUTF32
-  PyUnicode_DecodeUTF32Stateful=python34.PyUnicode_DecodeUTF32Stateful
-  PyUnicode_DecodeUTF8=python34.PyUnicode_DecodeUTF8
-  PyUnicode_DecodeUTF8Stateful=python34.PyUnicode_DecodeUTF8Stateful
-  PyUnicode_DecodeUnicodeEscape=python34.PyUnicode_DecodeUnicodeEscape
-  PyUnicode_FSConverter=python34.PyUnicode_FSConverter
-  PyUnicode_FSDecoder=python34.PyUnicode_FSDecoder
-  PyUnicode_Find=python34.PyUnicode_Find
-  PyUnicode_Format=python34.PyUnicode_Format
-  PyUnicode_FromEncodedObject=python34.PyUnicode_FromEncodedObject
-  PyUnicode_FromFormat=python34.PyUnicode_FromFormat
-  PyUnicode_FromFormatV=python34.PyUnicode_FromFormatV
-  PyUnicode_FromObject=python34.PyUnicode_FromObject
-  PyUnicode_FromOrdinal=python34.PyUnicode_FromOrdinal
-  PyUnicode_FromString=python34.PyUnicode_FromString
-  PyUnicode_FromStringAndSize=python34.PyUnicode_FromStringAndSize
-  PyUnicode_FromWideChar=python34.PyUnicode_FromWideChar
-  PyUnicode_GetDefaultEncoding=python34.PyUnicode_GetDefaultEncoding
-  PyUnicode_GetSize=python34.PyUnicode_GetSize
-  PyUnicode_IsIdentifier=python34.PyUnicode_IsIdentifier
-  PyUnicode_Join=python34.PyUnicode_Join
-  PyUnicode_Partition=python34.PyUnicode_Partition
-  PyUnicode_RPartition=python34.PyUnicode_RPartition
-  PyUnicode_RSplit=python34.PyUnicode_RSplit
-  PyUnicode_Replace=python34.PyUnicode_Replace
-  PyUnicode_Resize=python34.PyUnicode_Resize
-  PyUnicode_RichCompare=python34.PyUnicode_RichCompare
-  PyUnicode_SetDefaultEncoding=python34.PyUnicode_SetDefaultEncoding
-  PyUnicode_Split=python34.PyUnicode_Split
-  PyUnicode_Splitlines=python34.PyUnicode_Splitlines
-  PyUnicode_Tailmatch=python34.PyUnicode_Tailmatch
-  PyUnicode_Translate=python34.PyUnicode_Translate
-  PyUnicode_BuildEncodingMap=python34.PyUnicode_BuildEncodingMap
-  PyUnicode_CompareWithASCIIString=python34.PyUnicode_CompareWithASCIIString
-  PyUnicode_DecodeUTF7=python34.PyUnicode_DecodeUTF7
-  PyUnicode_DecodeUTF7Stateful=python34.PyUnicode_DecodeUTF7Stateful
-  PyUnicode_EncodeFSDefault=python34.PyUnicode_EncodeFSDefault
-  PyUnicode_InternFromString=python34.PyUnicode_InternFromString
-  PyUnicode_InternImmortal=python34.PyUnicode_InternImmortal
-  PyUnicode_InternInPlace=python34.PyUnicode_InternInPlace
-  PyUnicode_Type=python34.PyUnicode_Type DATA
-  PyWeakref_GetObject=python34.PyWeakref_GetObject DATA
-  PyWeakref_NewProxy=python34.PyWeakref_NewProxy
-  PyWeakref_NewRef=python34.PyWeakref_NewRef
-  PyWrapperDescr_Type=python34.PyWrapperDescr_Type DATA
-  PyWrapper_New=python34.PyWrapper_New
-  PyZip_Type=python34.PyZip_Type DATA
-  Py_AddPendingCall=python34.Py_AddPendingCall
-  Py_AtExit=python34.Py_AtExit
-  Py_BuildValue=python34.Py_BuildValue
-  Py_CompileString=python34.Py_CompileString
-  Py_DecRef=python34.Py_DecRef
-  Py_EndInterpreter=python34.Py_EndInterpreter
-  Py_Exit=python34.Py_Exit
-  Py_FatalError=python34.Py_FatalError
-  Py_FileSystemDefaultEncoding=python34.Py_FileSystemDefaultEncoding DATA
-  Py_Finalize=python34.Py_Finalize
-  Py_GetBuildInfo=python34.Py_GetBuildInfo
-  Py_GetCompiler=python34.Py_GetCompiler
-  Py_GetCopyright=python34.Py_GetCopyright
-  Py_GetExecPrefix=python34.Py_GetExecPrefix
-  Py_GetPath=python34.Py_GetPath
-  Py_GetPlatform=python34.Py_GetPlatform
-  Py_GetPrefix=python34.Py_GetPrefix
-  Py_GetProgramFullPath=python34.Py_GetProgramFullPath
-  Py_GetProgramName=python34.Py_GetProgramName
-  Py_GetPythonHome=python34.Py_GetPythonHome
-  Py_GetRecursionLimit=python34.Py_GetRecursionLimit
-  Py_GetVersion=python34.Py_GetVersion
-  Py_HasFileSystemDefaultEncoding=python34.Py_HasFileSystemDefaultEncoding DATA
-  Py_IncRef=python34.Py_IncRef
-  Py_Initialize=python34.Py_Initialize
-  Py_InitializeEx=python34.Py_InitializeEx
-  Py_IsInitialized=python34.Py_IsInitialized
-  Py_Main=python34.Py_Main
-  Py_MakePendingCalls=python34.Py_MakePendingCalls
-  Py_NewInterpreter=python34.Py_NewInterpreter
-  Py_ReprEnter=python34.Py_ReprEnter
-  Py_ReprLeave=python34.Py_ReprLeave
-  Py_SetProgramName=python34.Py_SetProgramName
-  Py_SetPythonHome=python34.Py_SetPythonHome
-  Py_SetRecursionLimit=python34.Py_SetRecursionLimit
-  Py_SymtableString=python34.Py_SymtableString
-  Py_VaBuildValue=python34.Py_VaBuildValue
-  _PyErr_BadInternalCall=python34._PyErr_BadInternalCall
-  _PyObject_CallFunction_SizeT=python34._PyObject_CallFunction_SizeT
-  _PyObject_CallMethod_SizeT=python34._PyObject_CallMethod_SizeT
-  _PyObject_GC_Malloc=python34._PyObject_GC_Malloc
-  _PyObject_GC_New=python34._PyObject_GC_New
-  _PyObject_GC_NewVar=python34._PyObject_GC_NewVar
-  _PyObject_GC_Resize=python34._PyObject_GC_Resize
-  _PyObject_New=python34._PyObject_New
-  _PyObject_NewVar=python34._PyObject_NewVar
-  _PyState_AddModule=python34._PyState_AddModule
-  _PyThreadState_Init=python34._PyThreadState_Init
-  _PyThreadState_Prealloc=python34._PyThreadState_Prealloc
-  _PyTrash_delete_later=python34._PyTrash_delete_later DATA
-  _PyTrash_delete_nesting=python34._PyTrash_delete_nesting DATA
-  _PyTrash_deposit_object=python34._PyTrash_deposit_object
-  _PyTrash_destroy_chain=python34._PyTrash_destroy_chain
-  _PyWeakref_CallableProxyType=python34._PyWeakref_CallableProxyType DATA
-  _PyWeakref_ProxyType=python34._PyWeakref_ProxyType DATA
-  _PyWeakref_RefType=python34._PyWeakref_RefType DATA
-  _Py_BuildValue_SizeT=python34._Py_BuildValue_SizeT
-  _Py_CheckRecursionLimit=python34._Py_CheckRecursionLimit DATA
-  _Py_CheckRecursiveCall=python34._Py_CheckRecursiveCall
-  _Py_Dealloc=python34._Py_Dealloc
-  _Py_EllipsisObject=python34._Py_EllipsisObject DATA
-  _Py_FalseStruct=python34._Py_FalseStruct DATA
-  _Py_NoneStruct=python34._Py_NoneStruct DATA
-  _Py_NotImplementedStruct=python34._Py_NotImplementedStruct DATA
-  _Py_SwappedOp=python34._Py_SwappedOp DATA
-  _Py_TrueStruct=python34._Py_TrueStruct DATA
-  _Py_VaBuildValue_SizeT=python34._Py_VaBuildValue_SizeT
-  _PyArg_Parse_SizeT=python34._PyArg_Parse_SizeT
-  _PyArg_ParseTuple_SizeT=python34._PyArg_ParseTuple_SizeT
-  _PyArg_ParseTupleAndKeywords_SizeT=python34._PyArg_ParseTupleAndKeywords_SizeT
-  _PyArg_VaParse_SizeT=python34._PyArg_VaParse_SizeT
-  _PyArg_VaParseTupleAndKeywords_SizeT=python34._PyArg_VaParseTupleAndKeywords_SizeT
-  _Py_BuildValue_SizeT=python34._Py_BuildValue_SizeT
+  PyArg_Parse=python35.PyArg_Parse
+  PyArg_ParseTuple=python35.PyArg_ParseTuple
+  PyArg_ParseTupleAndKeywords=python35.PyArg_ParseTupleAndKeywords
+  PyArg_UnpackTuple=python35.PyArg_UnpackTuple
+  PyArg_VaParse=python35.PyArg_VaParse
+  PyArg_VaParseTupleAndKeywords=python35.PyArg_VaParseTupleAndKeywords
+  PyArg_ValidateKeywordArguments=python35.PyArg_ValidateKeywordArguments
+  PyBaseObject_Type=python35.PyBaseObject_Type DATA
+  PyBool_FromLong=python35.PyBool_FromLong
+  PyBool_Type=python35.PyBool_Type DATA
+  PyByteArrayIter_Type=python35.PyByteArrayIter_Type DATA
+  PyByteArray_AsString=python35.PyByteArray_AsString
+  PyByteArray_Concat=python35.PyByteArray_Concat
+  PyByteArray_FromObject=python35.PyByteArray_FromObject
+  PyByteArray_FromStringAndSize=python35.PyByteArray_FromStringAndSize
+  PyByteArray_Resize=python35.PyByteArray_Resize
+  PyByteArray_Size=python35.PyByteArray_Size
+  PyByteArray_Type=python35.PyByteArray_Type DATA
+  PyBytesIter_Type=python35.PyBytesIter_Type DATA
+  PyBytes_AsString=python35.PyBytes_AsString
+  PyBytes_AsStringAndSize=python35.PyBytes_AsStringAndSize
+  PyBytes_Concat=python35.PyBytes_Concat
+  PyBytes_ConcatAndDel=python35.PyBytes_ConcatAndDel
+  PyBytes_DecodeEscape=python35.PyBytes_DecodeEscape
+  PyBytes_FromFormat=python35.PyBytes_FromFormat
+  PyBytes_FromFormatV=python35.PyBytes_FromFormatV
+  PyBytes_FromObject=python35.PyBytes_FromObject
+  PyBytes_FromString=python35.PyBytes_FromString
+  PyBytes_FromStringAndSize=python35.PyBytes_FromStringAndSize
+  PyBytes_Repr=python35.PyBytes_Repr
+  PyBytes_Size=python35.PyBytes_Size
+  PyBytes_Type=python35.PyBytes_Type DATA
+  PyCFunction_Call=python35.PyCFunction_Call
+  PyCFunction_ClearFreeList=python35.PyCFunction_ClearFreeList
+  PyCFunction_GetFlags=python35.PyCFunction_GetFlags
+  PyCFunction_GetFunction=python35.PyCFunction_GetFunction
+  PyCFunction_GetSelf=python35.PyCFunction_GetSelf
+  PyCFunction_New=python35.PyCFunction_New
+  PyCFunction_NewEx=python35.PyCFunction_NewEx
+  PyCFunction_Type=python35.PyCFunction_Type DATA
+  PyCallIter_New=python35.PyCallIter_New
+  PyCallIter_Type=python35.PyCallIter_Type DATA
+  PyCallable_Check=python35.PyCallable_Check
+  PyCapsule_GetContext=python35.PyCapsule_GetContext
+  PyCapsule_GetDestructor=python35.PyCapsule_GetDestructor
+  PyCapsule_GetName=python35.PyCapsule_GetName
+  PyCapsule_GetPointer=python35.PyCapsule_GetPointer
+  PyCapsule_Import=python35.PyCapsule_Import
+  PyCapsule_IsValid=python35.PyCapsule_IsValid
+  PyCapsule_New=python35.PyCapsule_New
+  PyCapsule_SetContext=python35.PyCapsule_SetContext
+  PyCapsule_SetDestructor=python35.PyCapsule_SetDestructor
+  PyCapsule_SetName=python35.PyCapsule_SetName
+  PyCapsule_SetPointer=python35.PyCapsule_SetPointer
+  PyCapsule_Type=python35.PyCapsule_Type DATA
+  PyClassMethodDescr_Type=python35.PyClassMethodDescr_Type DATA
+  PyCodec_BackslashReplaceErrors=python35.PyCodec_BackslashReplaceErrors
+  PyCodec_Decode=python35.PyCodec_Decode
+  PyCodec_Decoder=python35.PyCodec_Decoder
+  PyCodec_Encode=python35.PyCodec_Encode
+  PyCodec_Encoder=python35.PyCodec_Encoder
+  PyCodec_IgnoreErrors=python35.PyCodec_IgnoreErrors
+  PyCodec_IncrementalDecoder=python35.PyCodec_IncrementalDecoder
+  PyCodec_IncrementalEncoder=python35.PyCodec_IncrementalEncoder
+  PyCodec_KnownEncoding=python35.PyCodec_KnownEncoding
+  PyCodec_LookupError=python35.PyCodec_LookupError
+  PyCodec_Register=python35.PyCodec_Register
+  PyCodec_RegisterError=python35.PyCodec_RegisterError
+  PyCodec_ReplaceErrors=python35.PyCodec_ReplaceErrors
+  PyCodec_StreamReader=python35.PyCodec_StreamReader
+  PyCodec_StreamWriter=python35.PyCodec_StreamWriter
+  PyCodec_StrictErrors=python35.PyCodec_StrictErrors
+  PyCodec_XMLCharRefReplaceErrors=python35.PyCodec_XMLCharRefReplaceErrors
+  PyComplex_FromDoubles=python35.PyComplex_FromDoubles
+  PyComplex_ImagAsDouble=python35.PyComplex_ImagAsDouble
+  PyComplex_RealAsDouble=python35.PyComplex_RealAsDouble
+  PyComplex_Type=python35.PyComplex_Type DATA
+  PyDescr_NewClassMethod=python35.PyDescr_NewClassMethod
+  PyDescr_NewGetSet=python35.PyDescr_NewGetSet
+  PyDescr_NewMember=python35.PyDescr_NewMember
+  PyDescr_NewMethod=python35.PyDescr_NewMethod
+  PyDictItems_Type=python35.PyDictItems_Type DATA
+  PyDictIterItem_Type=python35.PyDictIterItem_Type DATA
+  PyDictIterKey_Type=python35.PyDictIterKey_Type DATA
+  PyDictIterValue_Type=python35.PyDictIterValue_Type DATA
+  PyDictKeys_Type=python35.PyDictKeys_Type DATA
+  PyDictProxy_New=python35.PyDictProxy_New
+  PyDictProxy_Type=python35.PyDictProxy_Type DATA
+  PyDictValues_Type=python35.PyDictValues_Type DATA
+  PyDict_Clear=python35.PyDict_Clear
+  PyDict_Contains=python35.PyDict_Contains
+  PyDict_Copy=python35.PyDict_Copy
+  PyDict_DelItem=python35.PyDict_DelItem
+  PyDict_DelItemString=python35.PyDict_DelItemString
+  PyDict_GetItem=python35.PyDict_GetItem
+  PyDict_GetItemString=python35.PyDict_GetItemString
+  PyDict_GetItemWithError=python35.PyDict_GetItemWithError
+  PyDict_Items=python35.PyDict_Items
+  PyDict_Keys=python35.PyDict_Keys
+  PyDict_Merge=python35.PyDict_Merge
+  PyDict_MergeFromSeq2=python35.PyDict_MergeFromSeq2
+  PyDict_New=python35.PyDict_New
+  PyDict_Next=python35.PyDict_Next
+  PyDict_SetItem=python35.PyDict_SetItem
+  PyDict_SetItemString=python35.PyDict_SetItemString
+  PyDict_Size=python35.PyDict_Size
+  PyDict_Type=python35.PyDict_Type DATA
+  PyDict_Update=python35.PyDict_Update
+  PyDict_Values=python35.PyDict_Values
+  PyEllipsis_Type=python35.PyEllipsis_Type DATA
+  PyEnum_Type=python35.PyEnum_Type DATA
+  PyErr_BadArgument=python35.PyErr_BadArgument
+  PyErr_BadInternalCall=python35.PyErr_BadInternalCall
+  PyErr_CheckSignals=python35.PyErr_CheckSignals
+  PyErr_Clear=python35.PyErr_Clear
+  PyErr_Display=python35.PyErr_Display
+  PyErr_ExceptionMatches=python35.PyErr_ExceptionMatches
+  PyErr_Fetch=python35.PyErr_Fetch
+  PyErr_Format=python35.PyErr_Format
+  PyErr_GivenExceptionMatches=python35.PyErr_GivenExceptionMatches
+  PyErr_NewException=python35.PyErr_NewException
+  PyErr_NewExceptionWithDoc=python35.PyErr_NewExceptionWithDoc
+  PyErr_NoMemory=python35.PyErr_NoMemory
+  PyErr_NormalizeException=python35.PyErr_NormalizeException
+  PyErr_Occurred=python35.PyErr_Occurred
+  PyErr_Print=python35.PyErr_Print
+  PyErr_PrintEx=python35.PyErr_PrintEx
+  PyErr_ProgramText=python35.PyErr_ProgramText
+  PyErr_Restore=python35.PyErr_Restore
+  PyErr_SetFromErrno=python35.PyErr_SetFromErrno
+  PyErr_SetFromErrnoWithFilename=python35.PyErr_SetFromErrnoWithFilename
+  PyErr_SetFromErrnoWithFilenameObject=python35.PyErr_SetFromErrnoWithFilenameObject
+  PyErr_SetInterrupt=python35.PyErr_SetInterrupt
+  PyErr_SetNone=python35.PyErr_SetNone
+  PyErr_SetObject=python35.PyErr_SetObject
+  PyErr_SetString=python35.PyErr_SetString
+  PyErr_SyntaxLocation=python35.PyErr_SyntaxLocation
+  PyErr_WarnEx=python35.PyErr_WarnEx
+  PyErr_WarnExplicit=python35.PyErr_WarnExplicit
+  PyErr_WarnFormat=python35.PyErr_WarnFormat
+  PyErr_WriteUnraisable=python35.PyErr_WriteUnraisable
+  PyEval_AcquireLock=python35.PyEval_AcquireLock
+  PyEval_AcquireThread=python35.PyEval_AcquireThread
+  PyEval_CallFunction=python35.PyEval_CallFunction
+  PyEval_CallMethod=python35.PyEval_CallMethod
+  PyEval_CallObjectWithKeywords=python35.PyEval_CallObjectWithKeywords
+  PyEval_EvalCode=python35.PyEval_EvalCode
+  PyEval_EvalCodeEx=python35.PyEval_EvalCodeEx
+  PyEval_EvalFrame=python35.PyEval_EvalFrame
+  PyEval_EvalFrameEx=python35.PyEval_EvalFrameEx
+  PyEval_GetBuiltins=python35.PyEval_GetBuiltins
+  PyEval_GetCallStats=python35.PyEval_GetCallStats
+  PyEval_GetFrame=python35.PyEval_GetFrame
+  PyEval_GetFuncDesc=python35.PyEval_GetFuncDesc
+  PyEval_GetFuncName=python35.PyEval_GetFuncName
+  PyEval_GetGlobals=python35.PyEval_GetGlobals
+  PyEval_GetLocals=python35.PyEval_GetLocals
+  PyEval_InitThreads=python35.PyEval_InitThreads
+  PyEval_ReInitThreads=python35.PyEval_ReInitThreads
+  PyEval_ReleaseLock=python35.PyEval_ReleaseLock
+  PyEval_ReleaseThread=python35.PyEval_ReleaseThread
+  PyEval_RestoreThread=python35.PyEval_RestoreThread
+  PyEval_SaveThread=python35.PyEval_SaveThread
+  PyEval_ThreadsInitialized=python35.PyEval_ThreadsInitialized
+  PyExc_ArithmeticError=python35.PyExc_ArithmeticError DATA
+  PyExc_AssertionError=python35.PyExc_AssertionError DATA
+  PyExc_AttributeError=python35.PyExc_AttributeError DATA
+  PyExc_BaseException=python35.PyExc_BaseException DATA
+  PyExc_BufferError=python35.PyExc_BufferError DATA
+  PyExc_BytesWarning=python35.PyExc_BytesWarning DATA
+  PyExc_DeprecationWarning=python35.PyExc_DeprecationWarning DATA
+  PyExc_EOFError=python35.PyExc_EOFError DATA
+  PyExc_EnvironmentError=python35.PyExc_EnvironmentError DATA
+  PyExc_Exception=python35.PyExc_Exception DATA
+  PyExc_FloatingPointError=python35.PyExc_FloatingPointError DATA
+  PyExc_FutureWarning=python35.PyExc_FutureWarning DATA
+  PyExc_GeneratorExit=python35.PyExc_GeneratorExit DATA
+  PyExc_IOError=python35.PyExc_IOError DATA
+  PyExc_ImportError=python35.PyExc_ImportError DATA
+  PyExc_ImportWarning=python35.PyExc_ImportWarning DATA
+  PyExc_IndentationError=python35.PyExc_IndentationError DATA
+  PyExc_IndexError=python35.PyExc_IndexError DATA
+  PyExc_KeyError=python35.PyExc_KeyError DATA
+  PyExc_KeyboardInterrupt=python35.PyExc_KeyboardInterrupt DATA
+  PyExc_LookupError=python35.PyExc_LookupError DATA
+  PyExc_MemoryError=python35.PyExc_MemoryError DATA
+  PyExc_MemoryErrorInst=python35.PyExc_MemoryErrorInst DATA
+  PyExc_NameError=python35.PyExc_NameError DATA
+  PyExc_NotImplementedError=python35.PyExc_NotImplementedError DATA
+  PyExc_OSError=python35.PyExc_OSError DATA
+  PyExc_OverflowError=python35.PyExc_OverflowError DATA
+  PyExc_PendingDeprecationWarning=python35.PyExc_PendingDeprecationWarning DATA
+  PyExc_RecursionErrorInst=python35.PyExc_RecursionErrorInst DATA
+  PyExc_ReferenceError=python35.PyExc_ReferenceError DATA
+  PyExc_RuntimeError=python35.PyExc_RuntimeError DATA
+  PyExc_RuntimeWarning=python35.PyExc_RuntimeWarning DATA
+  PyExc_StopIteration=python35.PyExc_StopIteration DATA
+  PyExc_SyntaxError=python35.PyExc_SyntaxError DATA
+  PyExc_SyntaxWarning=python35.PyExc_SyntaxWarning DATA
+  PyExc_SystemError=python35.PyExc_SystemError DATA
+  PyExc_SystemExit=python35.PyExc_SystemExit DATA
+  PyExc_TabError=python35.PyExc_TabError DATA
+  PyExc_TypeError=python35.PyExc_TypeError DATA
+  PyExc_UnboundLocalError=python35.PyExc_UnboundLocalError DATA
+  PyExc_UnicodeDecodeError=python35.PyExc_UnicodeDecodeError DATA
+  PyExc_UnicodeEncodeError=python35.PyExc_UnicodeEncodeError DATA
+  PyExc_UnicodeError=python35.PyExc_UnicodeError DATA
+  PyExc_UnicodeTranslateError=python35.PyExc_UnicodeTranslateError DATA
+  PyExc_UnicodeWarning=python35.PyExc_UnicodeWarning DATA
+  PyExc_UserWarning=python35.PyExc_UserWarning DATA
+  PyExc_ValueError=python35.PyExc_ValueError DATA
+  PyExc_Warning=python35.PyExc_Warning DATA
+  PyExc_ZeroDivisionError=python35.PyExc_ZeroDivisionError DATA
+  PyException_GetCause=python35.PyException_GetCause
+  PyException_GetContext=python35.PyException_GetContext
+  PyException_GetTraceback=python35.PyException_GetTraceback
+  PyException_SetCause=python35.PyException_SetCause
+  PyException_SetContext=python35.PyException_SetContext
+  PyException_SetTraceback=python35.PyException_SetTraceback
+  PyFile_FromFd=python35.PyFile_FromFd
+  PyFile_GetLine=python35.PyFile_GetLine
+  PyFile_WriteObject=python35.PyFile_WriteObject
+  PyFile_WriteString=python35.PyFile_WriteString
+  PyFilter_Type=python35.PyFilter_Type DATA
+  PyFloat_AsDouble=python35.PyFloat_AsDouble
+  PyFloat_FromDouble=python35.PyFloat_FromDouble
+  PyFloat_FromString=python35.PyFloat_FromString
+  PyFloat_GetInfo=python35.PyFloat_GetInfo
+  PyFloat_GetMax=python35.PyFloat_GetMax
+  PyFloat_GetMin=python35.PyFloat_GetMin
+  PyFloat_Type=python35.PyFloat_Type DATA
+  PyFrozenSet_New=python35.PyFrozenSet_New
+  PyFrozenSet_Type=python35.PyFrozenSet_Type DATA
+  PyGC_Collect=python35.PyGC_Collect
+  PyGILState_Ensure=python35.PyGILState_Ensure
+  PyGILState_GetThisThreadState=python35.PyGILState_GetThisThreadState
+  PyGILState_Release=python35.PyGILState_Release
+  PyGetSetDescr_Type=python35.PyGetSetDescr_Type DATA
+  PyImport_AddModule=python35.PyImport_AddModule
+  PyImport_AppendInittab=python35.PyImport_AppendInittab
+  PyImport_Cleanup=python35.PyImport_Cleanup
+  PyImport_ExecCodeModule=python35.PyImport_ExecCodeModule
+  PyImport_ExecCodeModuleEx=python35.PyImport_ExecCodeModuleEx
+  PyImport_ExecCodeModuleWithPathnames=python35.PyImport_ExecCodeModuleWithPathnames
+  PyImport_GetImporter=python35.PyImport_GetImporter
+  PyImport_GetMagicNumber=python35.PyImport_GetMagicNumber
+  PyImport_GetMagicTag=python35.PyImport_GetMagicTag
+  PyImport_GetModuleDict=python35.PyImport_GetModuleDict
+  PyImport_Import=python35.PyImport_Import
+  PyImport_ImportFrozenModule=python35.PyImport_ImportFrozenModule
+  PyImport_ImportModule=python35.PyImport_ImportModule
+  PyImport_ImportModuleLevel=python35.PyImport_ImportModuleLevel
+  PyImport_ImportModuleNoBlock=python35.PyImport_ImportModuleNoBlock
+  PyImport_ReloadModule=python35.PyImport_ReloadModule
+  PyInterpreterState_Clear=python35.PyInterpreterState_Clear
+  PyInterpreterState_Delete=python35.PyInterpreterState_Delete
+  PyInterpreterState_New=python35.PyInterpreterState_New
+  PyIter_Next=python35.PyIter_Next
+  PyListIter_Type=python35.PyListIter_Type DATA
+  PyListRevIter_Type=python35.PyListRevIter_Type DATA
+  PyList_Append=python35.PyList_Append
+  PyList_AsTuple=python35.PyList_AsTuple
+  PyList_GetItem=python35.PyList_GetItem
+  PyList_GetSlice=python35.PyList_GetSlice
+  PyList_Insert=python35.PyList_Insert
+  PyList_New=python35.PyList_New
+  PyList_Reverse=python35.PyList_Reverse
+  PyList_SetItem=python35.PyList_SetItem
+  PyList_SetSlice=python35.PyList_SetSlice
+  PyList_Size=python35.PyList_Size
+  PyList_Sort=python35.PyList_Sort
+  PyList_Type=python35.PyList_Type DATA
+  PyLongRangeIter_Type=python35.PyLongRangeIter_Type DATA
+  PyLong_AsDouble=python35.PyLong_AsDouble
+  PyLong_AsLong=python35.PyLong_AsLong
+  PyLong_AsLongAndOverflow=python35.PyLong_AsLongAndOverflow
+  PyLong_AsLongLong=python35.PyLong_AsLongLong
+  PyLong_AsLongLongAndOverflow=python35.PyLong_AsLongLongAndOverflow
+  PyLong_AsSize_t=python35.PyLong_AsSize_t
+  PyLong_AsSsize_t=python35.PyLong_AsSsize_t
+  PyLong_AsUnsignedLong=python35.PyLong_AsUnsignedLong
+  PyLong_AsUnsignedLongLong=python35.PyLong_AsUnsignedLongLong
+  PyLong_AsUnsignedLongLongMask=python35.PyLong_AsUnsignedLongLongMask
+  PyLong_AsUnsignedLongMask=python35.PyLong_AsUnsignedLongMask
+  PyLong_AsVoidPtr=python35.PyLong_AsVoidPtr
+  PyLong_FromDouble=python35.PyLong_FromDouble
+  PyLong_FromLong=python35.PyLong_FromLong
+  PyLong_FromLongLong=python35.PyLong_FromLongLong
+  PyLong_FromSize_t=python35.PyLong_FromSize_t
+  PyLong_FromSsize_t=python35.PyLong_FromSsize_t
+  PyLong_FromString=python35.PyLong_FromString
+  PyLong_FromUnsignedLong=python35.PyLong_FromUnsignedLong
+  PyLong_FromUnsignedLongLong=python35.PyLong_FromUnsignedLongLong
+  PyLong_FromVoidPtr=python35.PyLong_FromVoidPtr
+  PyLong_GetInfo=python35.PyLong_GetInfo
+  PyLong_Type=python35.PyLong_Type DATA
+  PyMap_Type=python35.PyMap_Type DATA
+  PyMapping_Check=python35.PyMapping_Check
+  PyMapping_GetItemString=python35.PyMapping_GetItemString
+  PyMapping_HasKey=python35.PyMapping_HasKey
+  PyMapping_HasKeyString=python35.PyMapping_HasKeyString
+  PyMapping_Items=python35.PyMapping_Items
+  PyMapping_Keys=python35.PyMapping_Keys
+  PyMapping_Length=python35.PyMapping_Length
+  PyMapping_SetItemString=python35.PyMapping_SetItemString
+  PyMapping_Size=python35.PyMapping_Size
+  PyMapping_Values=python35.PyMapping_Values
+  PyMem_Free=python35.PyMem_Free
+  PyMem_Malloc=python35.PyMem_Malloc
+  PyMem_Realloc=python35.PyMem_Realloc
+  PyMemberDescr_Type=python35.PyMemberDescr_Type DATA
+  PyMemoryView_FromObject=python35.PyMemoryView_FromObject
+  PyMemoryView_GetContiguous=python35.PyMemoryView_GetContiguous
+  PyMemoryView_Type=python35.PyMemoryView_Type DATA
+  PyMethodDescr_Type=python35.PyMethodDescr_Type DATA
+  PyModule_AddIntConstant=python35.PyModule_AddIntConstant
+  PyModule_AddObject=python35.PyModule_AddObject
+  PyModule_AddStringConstant=python35.PyModule_AddStringConstant
+  PyModule_Create2=python35.PyModule_Create2
+  PyModule_GetDef=python35.PyModule_GetDef
+  PyModule_GetDict=python35.PyModule_GetDict
+  PyModule_GetFilename=python35.PyModule_GetFilename
+  PyModule_GetFilenameObject=python35.PyModule_GetFilenameObject
+  PyModule_GetName=python35.PyModule_GetName
+  PyModule_GetState=python35.PyModule_GetState
+  PyModule_New=python35.PyModule_New
+  PyModule_Type=python35.PyModule_Type DATA
+  PyNullImporter_Type=python35.PyNullImporter_Type DATA
+  PyNumber_Absolute=python35.PyNumber_Absolute
+  PyNumber_Add=python35.PyNumber_Add
+  PyNumber_And=python35.PyNumber_And
+  PyNumber_AsSsize_t=python35.PyNumber_AsSsize_t
+  PyNumber_Check=python35.PyNumber_Check
+  PyNumber_Divmod=python35.PyNumber_Divmod
+  PyNumber_Float=python35.PyNumber_Float
+  PyNumber_FloorDivide=python35.PyNumber_FloorDivide
+  PyNumber_InPlaceAdd=python35.PyNumber_InPlaceAdd
+  PyNumber_InPlaceAnd=python35.PyNumber_InPlaceAnd
+  PyNumber_InPlaceFloorDivide=python35.PyNumber_InPlaceFloorDivide
+  PyNumber_InPlaceLshift=python35.PyNumber_InPlaceLshift
+  PyNumber_InPlaceMultiply=python35.PyNumber_InPlaceMultiply
+  PyNumber_InPlaceOr=python35.PyNumber_InPlaceOr
+  PyNumber_InPlacePower=python35.PyNumber_InPlacePower
+  PyNumber_InPlaceRemainder=python35.PyNumber_InPlaceRemainder
+  PyNumber_InPlaceRshift=python35.PyNumber_InPlaceRshift
+  PyNumber_InPlaceSubtract=python35.PyNumber_InPlaceSubtract
+  PyNumber_InPlaceTrueDivide=python35.PyNumber_InPlaceTrueDivide
+  PyNumber_InPlaceXor=python35.PyNumber_InPlaceXor
+  PyNumber_Index=python35.PyNumber_Index
+  PyNumber_Invert=python35.PyNumber_Invert
+  PyNumber_Long=python35.PyNumber_Long
+  PyNumber_Lshift=python35.PyNumber_Lshift
+  PyNumber_Multiply=python35.PyNumber_Multiply
+  PyNumber_Negative=python35.PyNumber_Negative
+  PyNumber_Or=python35.PyNumber_Or
+  PyNumber_Positive=python35.PyNumber_Positive
+  PyNumber_Power=python35.PyNumber_Power
+  PyNumber_Remainder=python35.PyNumber_Remainder
+  PyNumber_Rshift=python35.PyNumber_Rshift
+  PyNumber_Subtract=python35.PyNumber_Subtract
+  PyNumber_ToBase=python35.PyNumber_ToBase
+  PyNumber_TrueDivide=python35.PyNumber_TrueDivide
+  PyNumber_Xor=python35.PyNumber_Xor
+  PyOS_AfterFork=python35.PyOS_AfterFork
+  PyOS_InitInterrupts=python35.PyOS_InitInterrupts
+  PyOS_InputHook=python35.PyOS_InputHook DATA
+  PyOS_InterruptOccurred=python35.PyOS_InterruptOccurred
+  PyOS_ReadlineFunctionPointer=python35.PyOS_ReadlineFunctionPointer DATA
+  PyOS_double_to_string=python35.PyOS_double_to_string
+  PyOS_getsig=python35.PyOS_getsig
+  PyOS_mystricmp=python35.PyOS_mystricmp
+  PyOS_mystrnicmp=python35.PyOS_mystrnicmp
+  PyOS_setsig=python35.PyOS_setsig
+  PyOS_snprintf=python35.PyOS_snprintf
+  PyOS_string_to_double=python35.PyOS_string_to_double
+  PyOS_strtol=python35.PyOS_strtol
+  PyOS_strtoul=python35.PyOS_strtoul
+  PyOS_vsnprintf=python35.PyOS_vsnprintf
+  PyObject_ASCII=python35.PyObject_ASCII
+  PyObject_AsCharBuffer=python35.PyObject_AsCharBuffer
+  PyObject_AsFileDescriptor=python35.PyObject_AsFileDescriptor
+  PyObject_AsReadBuffer=python35.PyObject_AsReadBuffer
+  PyObject_AsWriteBuffer=python35.PyObject_AsWriteBuffer
+  PyObject_Bytes=python35.PyObject_Bytes
+  PyObject_Call=python35.PyObject_Call
+  PyObject_CallFunction=python35.PyObject_CallFunction
+  PyObject_CallFunctionObjArgs=python35.PyObject_CallFunctionObjArgs
+  PyObject_CallMethod=python35.PyObject_CallMethod
+  PyObject_CallMethodObjArgs=python35.PyObject_CallMethodObjArgs
+  PyObject_CallObject=python35.PyObject_CallObject
+  PyObject_CheckReadBuffer=python35.PyObject_CheckReadBuffer
+  PyObject_ClearWeakRefs=python35.PyObject_ClearWeakRefs
+  PyObject_DelItem=python35.PyObject_DelItem
+  PyObject_DelItemString=python35.PyObject_DelItemString
+  PyObject_Dir=python35.PyObject_Dir
+  PyObject_Format=python35.PyObject_Format
+  PyObject_Free=python35.PyObject_Free
+  PyObject_GC_Del=python35.PyObject_GC_Del
+  PyObject_GC_Track=python35.PyObject_GC_Track
+  PyObject_GC_UnTrack=python35.PyObject_GC_UnTrack
+  PyObject_GenericGetAttr=python35.PyObject_GenericGetAttr
+  PyObject_GenericSetAttr=python35.PyObject_GenericSetAttr
+  PyObject_GetAttr=python35.PyObject_GetAttr
+  PyObject_GetAttrString=python35.PyObject_GetAttrString
+  PyObject_GetItem=python35.PyObject_GetItem
+  PyObject_GetIter=python35.PyObject_GetIter
+  PyObject_HasAttr=python35.PyObject_HasAttr
+  PyObject_HasAttrString=python35.PyObject_HasAttrString
+  PyObject_Hash=python35.PyObject_Hash
+  PyObject_HashNotImplemented=python35.PyObject_HashNotImplemented
+  PyObject_Init=python35.PyObject_Init
+  PyObject_InitVar=python35.PyObject_InitVar
+  PyObject_IsInstance=python35.PyObject_IsInstance
+  PyObject_IsSubclass=python35.PyObject_IsSubclass
+  PyObject_IsTrue=python35.PyObject_IsTrue
+  PyObject_Length=python35.PyObject_Length
+  PyObject_Malloc=python35.PyObject_Malloc
+  PyObject_Not=python35.PyObject_Not
+  PyObject_Realloc=python35.PyObject_Realloc
+  PyObject_Repr=python35.PyObject_Repr
+  PyObject_RichCompare=python35.PyObject_RichCompare
+  PyObject_RichCompareBool=python35.PyObject_RichCompareBool
+  PyObject_SelfIter=python35.PyObject_SelfIter
+  PyObject_SetAttr=python35.PyObject_SetAttr
+  PyObject_SetAttrString=python35.PyObject_SetAttrString
+  PyObject_SetItem=python35.PyObject_SetItem
+  PyObject_Size=python35.PyObject_Size
+  PyObject_Str=python35.PyObject_Str
+  PyObject_Type=python35.PyObject_Type DATA
+  PyParser_SimpleParseFileFlags=python35.PyParser_SimpleParseFileFlags
+  PyParser_SimpleParseStringFlags=python35.PyParser_SimpleParseStringFlags
+  PyProperty_Type=python35.PyProperty_Type DATA
+  PyRangeIter_Type=python35.PyRangeIter_Type DATA
+  PyRange_Type=python35.PyRange_Type DATA
+  PyReversed_Type=python35.PyReversed_Type DATA
+  PySeqIter_New=python35.PySeqIter_New
+  PySeqIter_Type=python35.PySeqIter_Type DATA
+  PySequence_Check=python35.PySequence_Check
+  PySequence_Concat=python35.PySequence_Concat
+  PySequence_Contains=python35.PySequence_Contains
+  PySequence_Count=python35.PySequence_Count
+  PySequence_DelItem=python35.PySequence_DelItem
+  PySequence_DelSlice=python35.PySequence_DelSlice
+  PySequence_Fast=python35.PySequence_Fast
+  PySequence_GetItem=python35.PySequence_GetItem
+  PySequence_GetSlice=python35.PySequence_GetSlice
+  PySequence_In=python35.PySequence_In
+  PySequence_InPlaceConcat=python35.PySequence_InPlaceConcat
+  PySequence_InPlaceRepeat=python35.PySequence_InPlaceRepeat
+  PySequence_Index=python35.PySequence_Index
+  PySequence_Length=python35.PySequence_Length
+  PySequence_List=python35.PySequence_List
+  PySequence_Repeat=python35.PySequence_Repeat
+  PySequence_SetItem=python35.PySequence_SetItem
+  PySequence_SetSlice=python35.PySequence_SetSlice
+  PySequence_Size=python35.PySequence_Size
+  PySequence_Tuple=python35.PySequence_Tuple
+  PySetIter_Type=python35.PySetIter_Type DATA
+  PySet_Add=python35.PySet_Add
+  PySet_Clear=python35.PySet_Clear
+  PySet_Contains=python35.PySet_Contains
+  PySet_Discard=python35.PySet_Discard
+  PySet_New=python35.PySet_New
+  PySet_Pop=python35.PySet_Pop
+  PySet_Size=python35.PySet_Size
+  PySet_Type=python35.PySet_Type DATA
+  PySlice_GetIndices=python35.PySlice_GetIndices
+  PySlice_GetIndicesEx=python35.PySlice_GetIndicesEx
+  PySlice_New=python35.PySlice_New
+  PySlice_Type=python35.PySlice_Type DATA
+  PySortWrapper_Type=python35.PySortWrapper_Type DATA
+  PyState_FindModule=python35.PyState_FindModule
+  PyState_AddModule=python35.PyState_AddModule
+  PyState_RemoveModule=python35.PyState_RemoveModule
+  PyStructSequence_GetItem=python35.PyStructSequence_GetItem
+  PyStructSequence_New=python35.PyStructSequence_New
+  PyStructSequence_NewType=python35.PyStructSequence_NewType
+  PyStructSequence_SetItem=python35.PyStructSequence_SetItem
+  PySuper_Type=python35.PySuper_Type DATA
+  PySys_AddWarnOption=python35.PySys_AddWarnOption
+  PySys_AddWarnOptionUnicode=python35.PySys_AddWarnOptionUnicode
+  PySys_FormatStderr=python35.PySys_FormatStderr
+  PySys_FormatStdout=python35.PySys_FormatStdout
+  PySys_GetObject=python35.PySys_GetObject
+  PySys_HasWarnOptions=python35.PySys_HasWarnOptions
+  PySys_ResetWarnOptions=python35.PySys_ResetWarnOptions
+  PySys_SetArgv=python35.PySys_SetArgv
+  PySys_SetArgvEx=python35.PySys_SetArgvEx
+  PySys_SetObject=python35.PySys_SetObject
+  PySys_SetPath=python35.PySys_SetPath
+  PySys_WriteStderr=python35.PySys_WriteStderr
+  PySys_WriteStdout=python35.PySys_WriteStdout
+  PyThreadState_Clear=python35.PyThreadState_Clear
+  PyThreadState_Delete=python35.PyThreadState_Delete
+  PyThreadState_DeleteCurrent=python35.PyThreadState_DeleteCurrent
+  PyThreadState_Get=python35.PyThreadState_Get
+  PyThreadState_GetDict=python35.PyThreadState_GetDict
+  PyThreadState_New=python35.PyThreadState_New
+  PyThreadState_SetAsyncExc=python35.PyThreadState_SetAsyncExc
+  PyThreadState_Swap=python35.PyThreadState_Swap
+  PyTraceBack_Here=python35.PyTraceBack_Here
+  PyTraceBack_Print=python35.PyTraceBack_Print
+  PyTraceBack_Type=python35.PyTraceBack_Type DATA
+  PyTupleIter_Type=python35.PyTupleIter_Type DATA
+  PyTuple_ClearFreeList=python35.PyTuple_ClearFreeList
+  PyTuple_GetItem=python35.PyTuple_GetItem
+  PyTuple_GetSlice=python35.PyTuple_GetSlice
+  PyTuple_New=python35.PyTuple_New
+  PyTuple_Pack=python35.PyTuple_Pack
+  PyTuple_SetItem=python35.PyTuple_SetItem
+  PyTuple_Size=python35.PyTuple_Size
+  PyTuple_Type=python35.PyTuple_Type DATA
+  PyType_ClearCache=python35.PyType_ClearCache
+  PyType_FromSpec=python35.PyType_FromSpec
+  PyType_FromSpecWithBases=python35.PyType_FromSpecWithBases
+  PyType_GenericAlloc=python35.PyType_GenericAlloc
+  PyType_GenericNew=python35.PyType_GenericNew
+  PyType_GetFlags=python35.PyType_GetFlags
+  PyType_GetSlot=python35.PyType_GetSlot
+  PyType_IsSubtype=python35.PyType_IsSubtype
+  PyType_Modified=python35.PyType_Modified
+  PyType_Ready=python35.PyType_Ready
+  PyType_Type=python35.PyType_Type DATA
+  PyUnicodeDecodeError_Create=python35.PyUnicodeDecodeError_Create
+  PyUnicodeDecodeError_GetEncoding=python35.PyUnicodeDecodeError_GetEncoding
+  PyUnicodeDecodeError_GetEnd=python35.PyUnicodeDecodeError_GetEnd
+  PyUnicodeDecodeError_GetObject=python35.PyUnicodeDecodeError_GetObject
+  PyUnicodeDecodeError_GetReason=python35.PyUnicodeDecodeError_GetReason
+  PyUnicodeDecodeError_GetStart=python35.PyUnicodeDecodeError_GetStart
+  PyUnicodeDecodeError_SetEnd=python35.PyUnicodeDecodeError_SetEnd
+  PyUnicodeDecodeError_SetReason=python35.PyUnicodeDecodeError_SetReason
+  PyUnicodeDecodeError_SetStart=python35.PyUnicodeDecodeError_SetStart
+  PyUnicodeEncodeError_GetEncoding=python35.PyUnicodeEncodeError_GetEncoding
+  PyUnicodeEncodeError_GetEnd=python35.PyUnicodeEncodeError_GetEnd
+  PyUnicodeEncodeError_GetObject=python35.PyUnicodeEncodeError_GetObject
+  PyUnicodeEncodeError_GetReason=python35.PyUnicodeEncodeError_GetReason
+  PyUnicodeEncodeError_GetStart=python35.PyUnicodeEncodeError_GetStart
+  PyUnicodeEncodeError_SetEnd=python35.PyUnicodeEncodeError_SetEnd
+  PyUnicodeEncodeError_SetReason=python35.PyUnicodeEncodeError_SetReason
+  PyUnicodeEncodeError_SetStart=python35.PyUnicodeEncodeError_SetStart
+  PyUnicodeIter_Type=python35.PyUnicodeIter_Type DATA
+  PyUnicodeTranslateError_GetEnd=python35.PyUnicodeTranslateError_GetEnd
+  PyUnicodeTranslateError_GetObject=python35.PyUnicodeTranslateError_GetObject
+  PyUnicodeTranslateError_GetReason=python35.PyUnicodeTranslateError_GetReason
+  PyUnicodeTranslateError_GetStart=python35.PyUnicodeTranslateError_GetStart
+  PyUnicodeTranslateError_SetEnd=python35.PyUnicodeTranslateError_SetEnd
+  PyUnicodeTranslateError_SetReason=python35.PyUnicodeTranslateError_SetReason
+  PyUnicodeTranslateError_SetStart=python35.PyUnicodeTranslateError_SetStart
+  PyUnicode_Append=python35.PyUnicode_Append
+  PyUnicode_AppendAndDel=python35.PyUnicode_AppendAndDel
+  PyUnicode_AsASCIIString=python35.PyUnicode_AsASCIIString
+  PyUnicode_AsCharmapString=python35.PyUnicode_AsCharmapString
+  PyUnicode_AsDecodedObject=python35.PyUnicode_AsDecodedObject
+  PyUnicode_AsDecodedUnicode=python35.PyUnicode_AsDecodedUnicode
+  PyUnicode_AsEncodedObject=python35.PyUnicode_AsEncodedObject
+  PyUnicode_AsEncodedString=python35.PyUnicode_AsEncodedString
+  PyUnicode_AsEncodedUnicode=python35.PyUnicode_AsEncodedUnicode
+  PyUnicode_AsLatin1String=python35.PyUnicode_AsLatin1String
+  PyUnicode_AsRawUnicodeEscapeString=python35.PyUnicode_AsRawUnicodeEscapeString
+  PyUnicode_AsUTF16String=python35.PyUnicode_AsUTF16String
+  PyUnicode_AsUTF32String=python35.PyUnicode_AsUTF32String
+  PyUnicode_AsUTF8String=python35.PyUnicode_AsUTF8String
+  PyUnicode_AsUnicodeEscapeString=python35.PyUnicode_AsUnicodeEscapeString
+  PyUnicode_AsWideChar=python35.PyUnicode_AsWideChar
+  PyUnicode_ClearFreelist=python35.PyUnicode_ClearFreelist
+  PyUnicode_Compare=python35.PyUnicode_Compare
+  PyUnicode_Concat=python35.PyUnicode_Concat
+  PyUnicode_Contains=python35.PyUnicode_Contains
+  PyUnicode_Count=python35.PyUnicode_Count
+  PyUnicode_Decode=python35.PyUnicode_Decode
+  PyUnicode_DecodeASCII=python35.PyUnicode_DecodeASCII
+  PyUnicode_DecodeCharmap=python35.PyUnicode_DecodeCharmap
+  PyUnicode_DecodeFSDefault=python35.PyUnicode_DecodeFSDefault
+  PyUnicode_DecodeFSDefaultAndSize=python35.PyUnicode_DecodeFSDefaultAndSize
+  PyUnicode_DecodeLatin1=python35.PyUnicode_DecodeLatin1
+  PyUnicode_DecodeRawUnicodeEscape=python35.PyUnicode_DecodeRawUnicodeEscape
+  PyUnicode_DecodeUTF16=python35.PyUnicode_DecodeUTF16
+  PyUnicode_DecodeUTF16Stateful=python35.PyUnicode_DecodeUTF16Stateful
+  PyUnicode_DecodeUTF32=python35.PyUnicode_DecodeUTF32
+  PyUnicode_DecodeUTF32Stateful=python35.PyUnicode_DecodeUTF32Stateful
+  PyUnicode_DecodeUTF8=python35.PyUnicode_DecodeUTF8
+  PyUnicode_DecodeUTF8Stateful=python35.PyUnicode_DecodeUTF8Stateful
+  PyUnicode_DecodeUnicodeEscape=python35.PyUnicode_DecodeUnicodeEscape
+  PyUnicode_FSConverter=python35.PyUnicode_FSConverter
+  PyUnicode_FSDecoder=python35.PyUnicode_FSDecoder
+  PyUnicode_Find=python35.PyUnicode_Find
+  PyUnicode_Format=python35.PyUnicode_Format
+  PyUnicode_FromEncodedObject=python35.PyUnicode_FromEncodedObject
+  PyUnicode_FromFormat=python35.PyUnicode_FromFormat
+  PyUnicode_FromFormatV=python35.PyUnicode_FromFormatV
+  PyUnicode_FromObject=python35.PyUnicode_FromObject
+  PyUnicode_FromOrdinal=python35.PyUnicode_FromOrdinal
+  PyUnicode_FromString=python35.PyUnicode_FromString
+  PyUnicode_FromStringAndSize=python35.PyUnicode_FromStringAndSize
+  PyUnicode_FromWideChar=python35.PyUnicode_FromWideChar
+  PyUnicode_GetDefaultEncoding=python35.PyUnicode_GetDefaultEncoding
+  PyUnicode_GetSize=python35.PyUnicode_GetSize
+  PyUnicode_IsIdentifier=python35.PyUnicode_IsIdentifier
+  PyUnicode_Join=python35.PyUnicode_Join
+  PyUnicode_Partition=python35.PyUnicode_Partition
+  PyUnicode_RPartition=python35.PyUnicode_RPartition
+  PyUnicode_RSplit=python35.PyUnicode_RSplit
+  PyUnicode_Replace=python35.PyUnicode_Replace
+  PyUnicode_Resize=python35.PyUnicode_Resize
+  PyUnicode_RichCompare=python35.PyUnicode_RichCompare
+  PyUnicode_SetDefaultEncoding=python35.PyUnicode_SetDefaultEncoding
+  PyUnicode_Split=python35.PyUnicode_Split
+  PyUnicode_Splitlines=python35.PyUnicode_Splitlines
+  PyUnicode_Tailmatch=python35.PyUnicode_Tailmatch
+  PyUnicode_Translate=python35.PyUnicode_Translate
+  PyUnicode_BuildEncodingMap=python35.PyUnicode_BuildEncodingMap
+  PyUnicode_CompareWithASCIIString=python35.PyUnicode_CompareWithASCIIString
+  PyUnicode_DecodeUTF7=python35.PyUnicode_DecodeUTF7
+  PyUnicode_DecodeUTF7Stateful=python35.PyUnicode_DecodeUTF7Stateful
+  PyUnicode_EncodeFSDefault=python35.PyUnicode_EncodeFSDefault
+  PyUnicode_InternFromString=python35.PyUnicode_InternFromString
+  PyUnicode_InternImmortal=python35.PyUnicode_InternImmortal
+  PyUnicode_InternInPlace=python35.PyUnicode_InternInPlace
+  PyUnicode_Type=python35.PyUnicode_Type DATA
+  PyWeakref_GetObject=python35.PyWeakref_GetObject DATA
+  PyWeakref_NewProxy=python35.PyWeakref_NewProxy
+  PyWeakref_NewRef=python35.PyWeakref_NewRef
+  PyWrapperDescr_Type=python35.PyWrapperDescr_Type DATA
+  PyWrapper_New=python35.PyWrapper_New
+  PyZip_Type=python35.PyZip_Type DATA
+  Py_AddPendingCall=python35.Py_AddPendingCall
+  Py_AtExit=python35.Py_AtExit
+  Py_BuildValue=python35.Py_BuildValue
+  Py_CompileString=python35.Py_CompileString
+  Py_DecRef=python35.Py_DecRef
+  Py_EndInterpreter=python35.Py_EndInterpreter
+  Py_Exit=python35.Py_Exit
+  Py_FatalError=python35.Py_FatalError
+  Py_FileSystemDefaultEncoding=python35.Py_FileSystemDefaultEncoding DATA
+  Py_Finalize=python35.Py_Finalize
+  Py_GetBuildInfo=python35.Py_GetBuildInfo
+  Py_GetCompiler=python35.Py_GetCompiler
+  Py_GetCopyright=python35.Py_GetCopyright
+  Py_GetExecPrefix=python35.Py_GetExecPrefix
+  Py_GetPath=python35.Py_GetPath
+  Py_GetPlatform=python35.Py_GetPlatform
+  Py_GetPrefix=python35.Py_GetPrefix
+  Py_GetProgramFullPath=python35.Py_GetProgramFullPath
+  Py_GetProgramName=python35.Py_GetProgramName
+  Py_GetPythonHome=python35.Py_GetPythonHome
+  Py_GetRecursionLimit=python35.Py_GetRecursionLimit
+  Py_GetVersion=python35.Py_GetVersion
+  Py_HasFileSystemDefaultEncoding=python35.Py_HasFileSystemDefaultEncoding DATA
+  Py_IncRef=python35.Py_IncRef
+  Py_Initialize=python35.Py_Initialize
+  Py_InitializeEx=python35.Py_InitializeEx
+  Py_IsInitialized=python35.Py_IsInitialized
+  Py_Main=python35.Py_Main
+  Py_MakePendingCalls=python35.Py_MakePendingCalls
+  Py_NewInterpreter=python35.Py_NewInterpreter
+  Py_ReprEnter=python35.Py_ReprEnter
+  Py_ReprLeave=python35.Py_ReprLeave
+  Py_SetProgramName=python35.Py_SetProgramName
+  Py_SetPythonHome=python35.Py_SetPythonHome
+  Py_SetRecursionLimit=python35.Py_SetRecursionLimit
+  Py_SymtableString=python35.Py_SymtableString
+  Py_VaBuildValue=python35.Py_VaBuildValue
+  _PyErr_BadInternalCall=python35._PyErr_BadInternalCall
+  _PyObject_CallFunction_SizeT=python35._PyObject_CallFunction_SizeT
+  _PyObject_CallMethod_SizeT=python35._PyObject_CallMethod_SizeT
+  _PyObject_GC_Malloc=python35._PyObject_GC_Malloc
+  _PyObject_GC_New=python35._PyObject_GC_New
+  _PyObject_GC_NewVar=python35._PyObject_GC_NewVar
+  _PyObject_GC_Resize=python35._PyObject_GC_Resize
+  _PyObject_New=python35._PyObject_New
+  _PyObject_NewVar=python35._PyObject_NewVar
+  _PyState_AddModule=python35._PyState_AddModule
+  _PyThreadState_Init=python35._PyThreadState_Init
+  _PyThreadState_Prealloc=python35._PyThreadState_Prealloc
+  _PyTrash_delete_later=python35._PyTrash_delete_later DATA
+  _PyTrash_delete_nesting=python35._PyTrash_delete_nesting DATA
+  _PyTrash_deposit_object=python35._PyTrash_deposit_object
+  _PyTrash_destroy_chain=python35._PyTrash_destroy_chain
+  _PyWeakref_CallableProxyType=python35._PyWeakref_CallableProxyType DATA
+  _PyWeakref_ProxyType=python35._PyWeakref_ProxyType DATA
+  _PyWeakref_RefType=python35._PyWeakref_RefType DATA
+  _Py_BuildValue_SizeT=python35._Py_BuildValue_SizeT
+  _Py_CheckRecursionLimit=python35._Py_CheckRecursionLimit DATA
+  _Py_CheckRecursiveCall=python35._Py_CheckRecursiveCall
+  _Py_Dealloc=python35._Py_Dealloc
+  _Py_EllipsisObject=python35._Py_EllipsisObject DATA
+  _Py_FalseStruct=python35._Py_FalseStruct DATA
+  _Py_NoneStruct=python35._Py_NoneStruct DATA
+  _Py_NotImplementedStruct=python35._Py_NotImplementedStruct DATA
+  _Py_SwappedOp=python35._Py_SwappedOp DATA
+  _Py_TrueStruct=python35._Py_TrueStruct DATA
+  _Py_VaBuildValue_SizeT=python35._Py_VaBuildValue_SizeT
+  _PyArg_Parse_SizeT=python35._PyArg_Parse_SizeT
+  _PyArg_ParseTuple_SizeT=python35._PyArg_ParseTuple_SizeT
+  _PyArg_ParseTupleAndKeywords_SizeT=python35._PyArg_ParseTupleAndKeywords_SizeT
+  _PyArg_VaParse_SizeT=python35._PyArg_VaParse_SizeT
+  _PyArg_VaParseTupleAndKeywords_SizeT=python35._PyArg_VaParseTupleAndKeywords_SizeT
+  _Py_BuildValue_SizeT=python35._Py_BuildValue_SizeT
diff --git a/PC/python3.mak b/PC/python3.mak
index fb8e7aa..abe1241 100644
--- a/PC/python3.mak
+++ b/PC/python3.mak
@@ -1,14 +1,14 @@
-$(OutDir)python3.dll:	python3.def $(OutDir)python34stub.lib
-	cl /LD /Fe$(OutDir)python3.dll python3dll.c python3.def $(OutDir)python34stub.lib
+$(OutDir)python3.dll:	python3.def $(OutDir)python35stub.lib
+	cl /LD /Fe$(OutDir)python3.dll python3dll.c python3.def $(OutDir)python35stub.lib
 
-$(OutDir)python34stub.lib:	python34stub.def
-	lib /def:python34stub.def /out:$(OutDir)python34stub.lib /MACHINE:$(MACHINE)
+$(OutDir)python35stub.lib:	python35stub.def
+	lib /def:python35stub.def /out:$(OutDir)python35stub.lib /MACHINE:$(MACHINE)
 
 clean:
 	IF EXIST $(OutDir)python3.dll del $(OutDir)python3.dll
 	IF EXIST $(OutDir)python3.lib del $(OutDir)python3.lib
-	IF EXIST $(OutDir)python34stub.lib del $(OutDir)python34stub.lib
+	IF EXIST $(OutDir)python35stub.lib del $(OutDir)python35stub.lib
 	IF EXIST $(OutDir)python3.exp del $(OutDir)python3.exp
-	IF EXIST $(OutDir)python34stub.exp del $(OutDir)python34stub.exp
+	IF EXIST $(OutDir)python35stub.exp del $(OutDir)python35stub.exp
 
 rebuild: clean $(OutDir)python3.dll
diff --git a/PC/python34gen.py b/PC/python35gen.py
similarity index 77%
rename from PC/python34gen.py
rename to PC/python35gen.py
index 180ce11..609cb9d 100644
--- a/PC/python34gen.py
+++ b/PC/python35gen.py
@@ -1,9 +1,9 @@
-# Generate python34stub.def out of python3.def
+# Generate python35stub.def out of python3.def
 # The regular import library cannot be used,
 # since it doesn't provide the right symbols for
 # data forwarding
-out = open("python34stub.def", "w")
-out.write('LIBRARY "python34"\n')
+out = open("python35stub.def", "w")
+out.write('LIBRARY "python35"\n')
 out.write('EXPORTS\n')
 
 inp = open("python3.def")
@@ -14,7 +14,7 @@
 assert line.strip()=='EXPORTS'
 
 for line in inp:
-    # SYM1=python34.SYM2[ DATA]
+    # SYM1=python35.SYM2[ DATA]
     head, tail = line.split('.')
     if 'DATA' in tail:
         symbol, tail = tail.split(' ')
diff --git a/PC/python34stub.def b/PC/python35stub.def
similarity index 99%
rename from PC/python34stub.def
rename to PC/python35stub.def
index 3074cf3..8736ffb 100644
--- a/PC/python34stub.def
+++ b/PC/python35stub.def
@@ -1,4 +1,4 @@
-LIBRARY "python34"
+LIBRARY "python35"
 EXPORTS
 PyArg_Parse
 PyArg_ParseTuple
diff --git a/PCbuild/_tkinter.vcxproj b/PCbuild/_tkinter.vcxproj
index 67f72fe..9218d63 100644
--- a/PCbuild/_tkinter.vcxproj
+++ b/PCbuild/_tkinter.vcxproj
@@ -85,41 +85,49 @@
     <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
     <Import Project="pyd.props" />
     <Import Project="pgupdate.props" />
+    <Import Project="tcltk.props" />
   </ImportGroup>
   <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="PropertySheets">
     <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
     <Import Project="pyd.props" />
     <Import Project="pginstrument.props" />
+    <Import Project="tcltk.props" />
   </ImportGroup>
   <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
     <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
     <Import Project="pyd.props" />
+    <Import Project="tcltk.props" />
   </ImportGroup>
   <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
     <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
     <Import Project="pyd_d.props" />
+    <Import Project="tcltk.props" />
   </ImportGroup>
   <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="PropertySheets">
     <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
     <Import Project="pyd.props" />
     <Import Project="x64.props" />
     <Import Project="pgupdate.props" />
+    <Import Project="tcltk.props" />
   </ImportGroup>
   <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="PropertySheets">
     <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
     <Import Project="pyd.props" />
     <Import Project="x64.props" />
     <Import Project="pginstrument.props" />
+    <Import Project="tcltk.props" />
   </ImportGroup>
   <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
     <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
     <Import Project="pyd.props" />
     <Import Project="x64.props" />
+    <Import Project="tcltk.props" />
   </ImportGroup>
   <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
     <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
     <Import Project="pyd_d.props" />
     <Import Project="x64.props" />
+    <Import Project="tcltk.props" />
   </ImportGroup>
   <PropertyGroup Label="UserMacros" />
   <PropertyGroup>
@@ -155,7 +163,7 @@
       <PreprocessorDefinitions>WITH_APPINIT;%(PreprocessorDefinitions)</PreprocessorDefinitions>
     </ClCompile>
     <Link>
-      <AdditionalDependencies>$(tcltkLibDebug);%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>$(tcltkLib);%(AdditionalDependencies)</AdditionalDependencies>
     </Link>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
@@ -163,11 +171,11 @@
       <TargetEnvironment>X64</TargetEnvironment>
     </Midl>
     <ClCompile>
-      <AdditionalIncludeDirectories>$(tcltk64Dir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <AdditionalIncludeDirectories>$(tcltkDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
       <PreprocessorDefinitions>WITH_APPINIT;%(PreprocessorDefinitions)</PreprocessorDefinitions>
     </ClCompile>
     <Link>
-      <AdditionalDependencies>$(tcltk64LibDebug);%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>$(tcltkLib);%(AdditionalDependencies)</AdditionalDependencies>
     </Link>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
@@ -184,11 +192,11 @@
       <TargetEnvironment>X64</TargetEnvironment>
     </Midl>
     <ClCompile>
-      <AdditionalIncludeDirectories>$(tcltk64Dir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <AdditionalIncludeDirectories>$(tcltkDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
       <PreprocessorDefinitions>WITH_APPINIT;%(PreprocessorDefinitions)</PreprocessorDefinitions>
     </ClCompile>
     <Link>
-      <AdditionalDependencies>$(tcltk64Lib);%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>$(tcltkLib);%(AdditionalDependencies)</AdditionalDependencies>
     </Link>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">
@@ -205,11 +213,11 @@
       <TargetEnvironment>X64</TargetEnvironment>
     </Midl>
     <ClCompile>
-      <AdditionalIncludeDirectories>$(tcltk64Dir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <AdditionalIncludeDirectories>$(tcltkDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
       <PreprocessorDefinitions>WITH_APPINIT;%(PreprocessorDefinitions)</PreprocessorDefinitions>
     </ClCompile>
     <Link>
-      <AdditionalDependencies>$(tcltk64Lib);%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>$(tcltkLib);%(AdditionalDependencies)</AdditionalDependencies>
       <TargetMachine>MachineX64</TargetMachine>
     </Link>
   </ItemDefinitionGroup>
@@ -227,11 +235,11 @@
       <TargetEnvironment>X64</TargetEnvironment>
     </Midl>
     <ClCompile>
-      <AdditionalIncludeDirectories>$(tcltk64Dir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <AdditionalIncludeDirectories>$(tcltkDir)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
       <PreprocessorDefinitions>WITH_APPINIT;%(PreprocessorDefinitions)</PreprocessorDefinitions>
     </ClCompile>
     <Link>
-      <AdditionalDependencies>$(tcltk64Lib);%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>$(tcltkLib);%(AdditionalDependencies)</AdditionalDependencies>
       <TargetMachine>MachineX64</TargetMachine>
     </Link>
   </ItemDefinitionGroup>
@@ -244,6 +252,12 @@
       <Project>{cf7ac3d1-e2df-41d2-bea6-1e2556cdea26}</Project>
       <ReferenceOutputAssembly>false</ReferenceOutputAssembly>
     </ProjectReference>
+    <ProjectReference Include="tcl.vcxproj">
+      <Project>{b5fd6f1d-129e-4bff-9340-03606fac7283}</Project>
+    </ProjectReference>
+    <ProjectReference Include="tk.vcxproj">
+      <Project>{7e85eccf-a72c-4da4-9e52-884508e80ba1}</Project>
+    </ProjectReference>
   </ItemGroup>
   <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
   <ImportGroup Label="ExtensionTargets">
diff --git a/PCbuild/build_ssl.bat b/PCbuild/build_ssl.bat
deleted file mode 100644
index 805d77a..0000000
--- a/PCbuild/build_ssl.bat
+++ /dev/null
@@ -1,12 +0,0 @@
-@echo off
-if not defined HOST_PYTHON (
-  if %1 EQU Debug (
-    set HOST_PYTHON=python_d.exe
-    if not exist python34_d.dll exit 1
-  ) ELSE (
-    set HOST_PYTHON=python.exe
-    if not exist python34.dll exit 1
-  )
-)
-%HOST_PYTHON% build_ssl.py %1 %2 %3
-
diff --git a/PCbuild/build_ssl.py b/PCbuild/build_ssl.py
deleted file mode 100644
index dc51781..0000000
--- a/PCbuild/build_ssl.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Script for building the _ssl and _hashlib modules for Windows.
-# Uses Perl to setup the OpenSSL environment correctly
-# and build OpenSSL, then invokes a simple nmake session
-# for the actual _ssl.pyd and _hashlib.pyd DLLs.
-
-# THEORETICALLY, you can:
-# * Unpack the latest SSL release one level above your main Python source
-#   directory.  It is likely you will already find the zlib library and
-#   any other external packages there.
-# * Install ActivePerl and ensure it is somewhere on your path.
-# * Run this script from the PCBuild directory.
-#
-# it should configure and build SSL, then build the _ssl and _hashlib
-# Python extensions without intervention.
-
-# Modified by Christian Heimes
-# Now this script supports pre-generated makefiles and assembly files.
-# Developers don't need an installation of Perl anymore to build Python. A svn
-# checkout from our svn repository is enough.
-#
-# In Order to create the files in the case of an update you still need Perl.
-# Run build_ssl in this order:
-# python.exe build_ssl.py Release x64
-# python.exe build_ssl.py Release Win32
-
-import os, sys, re, shutil
-import subprocess
-
-# Find all "foo.exe" files on the PATH.
-def find_all_on_path(filename, extras = None):
-    entries = os.environ["PATH"].split(os.pathsep)
-    ret = []
-    for p in entries:
-        fname = os.path.abspath(os.path.join(p, filename))
-        if os.path.isfile(fname) and fname not in ret:
-            ret.append(fname)
-    if extras:
-        for p in extras:
-            fname = os.path.abspath(os.path.join(p, filename))
-            if os.path.isfile(fname) and fname not in ret:
-                ret.append(fname)
-    return ret
-
-# Find a suitable Perl installation for OpenSSL.
-# cygwin perl does *not* work.  ActivePerl does.
-# Being a Perl dummy, the simplest way I can check is if the "Win32" package
-# is available.
-def find_working_perl(perls):
-    for perl in perls:
-        try:
-            subprocess.check_output([perl, "-e", "use Win32;"])
-        except subprocess.CalledProcessError:
-            continue
-        else:
-            return perl
-
-    if perls:
-        print("The following perl interpreters were found:")
-        for p in perls:
-            print(" ", p)
-        print(" None of these versions appear suitable for building OpenSSL")
-    else:
-        print("NO perl interpreters were found on this machine at all!")
-    print(" Please install ActivePerl and ensure it appears on your path")
-
-# Fetch SSL directory from VC properties
-def get_ssl_dir():
-    propfile = (os.path.join(os.path.dirname(__file__), 'pyproject.props'))
-    with open(propfile) as f:
-        m = re.search('openssl-([^<]+)<', f.read())
-        return "..\..\openssl-"+m.group(1)
-
-
-def create_makefile64(makefile, m32):
-    """Create and fix makefile for 64bit
-
-    Replace 32 with 64bit directories
-    """
-    if not os.path.isfile(m32):
-        return
-    with open(m32) as fin:
-        with open(makefile, 'w') as fout:
-            for line in fin:
-                line = line.replace("=tmp32", "=tmp64")
-                line = line.replace("=out32", "=out64")
-                line = line.replace("=inc32", "=inc64")
-                # force 64 bit machine
-                line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
-                line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
-                # don't link against the lib on 64bit systems
-                line = line.replace("bufferoverflowu.lib", "")
-                fout.write(line)
-    os.unlink(m32)
-
-def fix_makefile(makefile):
-    """Fix some stuff in all makefiles
-    """
-    if not os.path.isfile(makefile):
-        return
-    with open(makefile) as fin:
-        lines = fin.readlines()
-    with open(makefile, 'w') as fout:
-        for line in lines:
-            if line.startswith("PERL="):
-                continue
-            if line.startswith("CP="):
-                line = "CP=copy\n"
-            if line.startswith("MKDIR="):
-                line = "MKDIR=mkdir\n"
-            if line.startswith("CFLAG="):
-                line = line.strip()
-                for algo in ("RC5", "MDC2", "IDEA"):
-                    noalgo = " -DOPENSSL_NO_%s" % algo
-                    if noalgo not in line:
-                        line = line + noalgo
-                line = line + '\n'
-            fout.write(line)
-
-def run_configure(configure, do_script):
-    print("perl Configure "+configure+" no-idea no-mdc2")
-    os.system("perl Configure "+configure+" no-idea no-mdc2")
-    print(do_script)
-    os.system(do_script)
-
-def cmp(f1, f2):
-    bufsize = 1024 * 8
-    with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
-        while True:
-            b1 = fp1.read(bufsize)
-            b2 = fp2.read(bufsize)
-            if b1 != b2:
-                return False
-            if not b1:
-                return True
-
-def copy(src, dst):
-    if os.path.isfile(dst) and cmp(src, dst):
-        return
-    shutil.copy(src, dst)
-
-def main():
-    build_all = "-a" in sys.argv
-    if sys.argv[1] == "Release":
-        debug = False
-    elif sys.argv[1] == "Debug":
-        debug = True
-    else:
-        raise ValueError(str(sys.argv))
-
-    if sys.argv[2] == "Win32":
-        arch = "x86"
-        configure = "VC-WIN32"
-        do_script = "ms\\do_nasm"
-        makefile="ms\\nt.mak"
-        m32 = makefile
-        dirsuffix = "32"
-    elif sys.argv[2] == "x64":
-        arch="amd64"
-        configure = "VC-WIN64A"
-        do_script = "ms\\do_win64a"
-        makefile = "ms\\nt64.mak"
-        m32 = makefile.replace('64', '')
-        dirsuffix = "64"
-        #os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
-    else:
-        raise ValueError(str(sys.argv))
-
-    make_flags = ""
-    if build_all:
-        make_flags = "-a"
-    # perl should be on the path, but we also look in "\perl" and "c:\\perl"
-    # as "well known" locations
-    perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
-    perl = find_working_perl(perls)
-    if perl:
-        print("Found a working perl at '%s'" % (perl,))
-    else:
-        print("No Perl installation was found. Existing Makefiles are used.")
-    sys.stdout.flush()
-    # Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live.
-    ssl_dir = get_ssl_dir()
-    if ssl_dir is None:
-        sys.exit(1)
-
-    old_cd = os.getcwd()
-    try:
-        os.chdir(ssl_dir)
-        # rebuild makefile when we do the role over from 32 to 64 build
-        if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
-            os.unlink(m32)
-
-        # If the ssl makefiles do not exist, we invoke Perl to generate them.
-        # Due to a bug in this script, the makefile sometimes ended up empty
-        # Force a regeneration if it is.
-        if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
-            if perl is None:
-                print("Perl is required to build the makefiles!")
-                sys.exit(1)
-
-            print("Creating the makefiles...")
-            sys.stdout.flush()
-            # Put our working Perl at the front of our path
-            os.environ["PATH"] = os.path.dirname(perl) + \
-                                          os.pathsep + \
-                                          os.environ["PATH"]
-            run_configure(configure, do_script)
-            if debug:
-                print("OpenSSL debug builds aren't supported.")
-            #if arch=="x86" and debug:
-            #    # the do_masm script in openssl doesn't generate a debug
-            #    # build makefile so we generate it here:
-            #    os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
-
-            if arch == "amd64":
-                create_makefile64(makefile, m32)
-            fix_makefile(makefile)
-            copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
-            copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
-
-        # If the assembler files don't exist in tmpXX, copy them there
-        if perl is None and os.path.exists("asm"+dirsuffix):
-            if not os.path.exists("tmp"+dirsuffix):
-                os.mkdir("tmp"+dirsuffix)
-            for f in os.listdir("asm"+dirsuffix):
-                if not f.endswith(".asm"): continue
-                if os.path.isfile(r"tmp%s\%s" % (dirsuffix, f)): continue
-                shutil.copy(r"asm%s\%s" % (dirsuffix, f), "tmp"+dirsuffix)
-
-        # Now run make.
-        if arch == "amd64":
-            rc = os.system("nasm -f win64 -DNEAR -Ox -g ms\\uptable.asm")
-            if rc:
-                print("nasm assembler has failed.")
-                sys.exit(rc)
-
-        copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h")
-        copy(r"crypto\opensslconf_%s.h" % arch, r"crypto\opensslconf.h")
-
-        #makeCommand = "nmake /nologo PERL=\"%s\" -f \"%s\"" %(perl, makefile)
-        makeCommand = "nmake /nologo -f \"%s\"" % makefile
-        print("Executing ssl makefiles:", makeCommand)
-        sys.stdout.flush()
-        rc = os.system(makeCommand)
-        if rc:
-            print("Executing "+makefile+" failed")
-            print(rc)
-            sys.exit(rc)
-    finally:
-        os.chdir(old_cd)
-    sys.exit(rc)
-
-if __name__=='__main__':
-    main()
diff --git a/PCbuild/build_tkinter.py b/PCbuild/build_tkinter.py
deleted file mode 100644
index c807e7b..0000000
--- a/PCbuild/build_tkinter.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""Script to compile the dependencies of _tkinter
-
-Copyright (c) 2007 by Christian Heimes <christian@cheimes.de>
-
-Licensed to PSF under a Contributor Agreement.
-"""
-
-import os
-import sys
-
-here = os.path.abspath(os.path.dirname(__file__))
-par = os.path.pardir
-
-TCL = "tcl8.6.1"
-TK = "tk8.6.1"
-TIX = "tix-8.4.3.3"
-
-ROOT = os.path.abspath(os.path.join(here, par, par))
-NMAKE = ('nmake /nologo /f %s %s %s')
-
-def nmake(makefile, command="", **kw):
-    defines = ' '.join(k+'='+str(v) for k, v in kw.items())
-    cmd = NMAKE % (makefile, defines, command)
-    print("\n\n"+cmd+"\n")
-    if os.system(cmd) != 0:
-        raise RuntimeError(cmd)
-
-def build(platform, clean):
-    if platform == "Win32":
-        dest = os.path.join(ROOT, "tcltk")
-        machine = "IX86"
-    elif platform == "AMD64":
-        dest = os.path.join(ROOT, "tcltk64")
-        machine = "AMD64"
-    else:
-        raise ValueError(platform)
-
-    # TCL
-    tcldir = os.path.join(ROOT, TCL)
-    if 1:
-        os.chdir(os.path.join(tcldir, "win"))
-        if clean:
-            nmake("makefile.vc", "clean")
-        nmake("makefile.vc", MACHINE=machine)
-        nmake("makefile.vc", "install", INSTALLDIR=dest, MACHINE=machine)
-
-    # TK
-    if 1:
-        os.chdir(os.path.join(ROOT, TK, "win"))
-        if clean:
-            nmake("makefile.vc", "clean", DEBUG=0, TCLDIR=tcldir)
-        nmake("makefile.vc", DEBUG=0, MACHINE=machine, TCLDIR=tcldir)
-        nmake("makefile.vc", "install", DEBUG=0, INSTALLDIR=dest, MACHINE=machine, TCLDIR=tcldir)
-
-    # TIX
-    if 1:
-        # python9.mak is available at http://svn.python.org
-        os.chdir(os.path.join(ROOT, TIX, "win"))
-        if clean:
-            nmake("python.mak", "clean")
-        nmake("python.mak", MACHINE=machine, INSTALL_DIR=dest)
-        nmake("python.mak", "install", MACHINE=machine, INSTALL_DIR=dest)
-
-def main():
-    if len(sys.argv) < 2 or sys.argv[1] not in ("Win32", "AMD64"):
-        print("%s Win32|AMD64" % sys.argv[0])
-        sys.exit(1)
-
-    if "-c" in sys.argv:
-        clean = True
-    else:
-        clean = False
-
-    build(sys.argv[1], clean)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/PCbuild/debug.props b/PCbuild/debug.props
index 9b7a65a..54f3c32 100644
--- a/PCbuild/debug.props
+++ b/PCbuild/debug.props
@@ -3,6 +3,7 @@
   <PropertyGroup Label="UserMacros">
     <PyDebugExt>_d</PyDebugExt>
     <KillPythonExe>$(OutDir)kill_python_d.exe</KillPythonExe>
+    <TclDebugExt>g</TclDebugExt>
   </PropertyGroup>
   <PropertyGroup>
     <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
@@ -23,5 +24,8 @@
     <BuildMacro Include="KillPythonExe">
       <Value>$(KillPythonExe)</Value>
     </BuildMacro>
+    <BuildMacro Include="TclDebugExt">
+      <Value>$(TclDebugExt)</Value>
+    </BuildMacro>
   </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/PCbuild/kill_python.c b/PCbuild/kill_python.c
index 604731f..dbc9425 100644
--- a/PCbuild/kill_python.c
+++ b/PCbuild/kill_python.c
@@ -62,7 +62,7 @@
             continue;
 
         len = wcsnlen_s(me.szExePath, MAX_PATH) - KILL_PYTHON_EXE_LEN;
-        wcsncpy_s(path, MAX_PATH+1, me.szExePath, len); 
+        wcsncpy_s(path, MAX_PATH+1, me.szExePath, len);
 
         break;
 
@@ -80,8 +80,8 @@
      * looking for python processes.  When we find one, verify it lives
      * in the same directory we live in.  If it does, kill it.  If we're
      * unable to kill it, treat this as a fatal error and return 1.
-     * 
-     * The rationale behind this is that we're called at the start of the 
+     *
+     * The rationale behind this is that we're called at the start of the
      * build process on the basis that we'll take care of killing any
      * running instances, such that the build won't encounter permission
      * denied errors during linking. If we can't kill one of the processes,
@@ -104,11 +104,11 @@
     do {
 
         /*
-         * XXX TODO: if we really wanted to be fancy, we could check the 
+         * XXX TODO: if we really wanted to be fancy, we could check the
          * modules for all processes (not just the python[_d].exe ones)
-         * and see if any of our DLLs are loaded (i.e. python34[_d].dll),
+         * and see if any of our DLLs are loaded (i.e. python35[_d].dll),
          * as that would also inhibit our ability to rebuild the solution.
-         * Not worth loosing sleep over though; for now, a simple check 
+         * Not worth loosing sleep over though; for now, a simple check
          * for just the python executable should be sufficient.
          */
 
@@ -119,7 +119,7 @@
         /* It's a python process, so figure out which directory it's in... */
         hsm = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pe.th32ProcessID);
         if (hsm == INVALID_HANDLE_VALUE)
-            /* 
+            /*
              * If our module snapshot fails (which will happen if we don't own
              * the process), just ignore it and continue.  (It seems different
              * versions of Windows return different values for GetLastError()
diff --git a/PCbuild/pcbuild.sln b/PCbuild/pcbuild.sln
index a346e10..2c3127b 100644
--- a/PCbuild/pcbuild.sln
+++ b/PCbuild/pcbuild.sln
@@ -78,6 +78,12 @@
 EndProject

 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "_testembed", "_testembed.vcxproj", "{6DAC66D9-E703-4624-BE03-49112AB5AA62}"

 EndProject

+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tcl", "tcl.vcxproj", "{B5FD6F1D-129E-4BFF-9340-03606FAC7283}"

+EndProject

+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tk", "tk.vcxproj", "{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}"

+EndProject

+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tix", "tix.vcxproj", "{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}"

+EndProject

 Global

 	GlobalSection(SolutionConfigurationPlatforms) = preSolution

 		Debug|Win32 = Debug|Win32

@@ -645,6 +651,54 @@
 		{6DAC66D9-E703-4624-BE03-49112AB5AA62}.Release|Win32.Build.0 = Release|Win32

 		{6DAC66D9-E703-4624-BE03-49112AB5AA62}.Release|x64.ActiveCfg = Release|x64

 		{6DAC66D9-E703-4624-BE03-49112AB5AA62}.Release|x64.Build.0 = Release|x64

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.Debug|Win32.ActiveCfg = Debug|Win32

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.Debug|Win32.Build.0 = Debug|Win32

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.Debug|x64.ActiveCfg = Debug|x64

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.Debug|x64.Build.0 = Debug|x64

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.PGInstrument|Win32.ActiveCfg = Release|Win32

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.PGInstrument|Win32.Build.0 = Release|Win32

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.PGInstrument|x64.ActiveCfg = PGInstrument|x64

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.PGInstrument|x64.Build.0 = PGInstrument|x64

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.PGUpdate|Win32.ActiveCfg = Release|Win32

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.PGUpdate|Win32.Build.0 = Release|Win32

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.PGUpdate|x64.ActiveCfg = PGUpdate|x64

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.PGUpdate|x64.Build.0 = PGUpdate|x64

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.Release|Win32.ActiveCfg = Release|Win32

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.Release|Win32.Build.0 = Release|Win32

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.Release|x64.ActiveCfg = Release|x64

+		{B5FD6F1D-129E-4BFF-9340-03606FAC7283}.Release|x64.Build.0 = Release|x64

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.Debug|Win32.ActiveCfg = Debug|Win32

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.Debug|Win32.Build.0 = Debug|Win32

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.Debug|x64.ActiveCfg = Debug|x64

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.Debug|x64.Build.0 = Debug|x64

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.PGInstrument|Win32.ActiveCfg = Release|Win32

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.PGInstrument|Win32.Build.0 = Release|Win32

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.PGInstrument|x64.ActiveCfg = PGInstrument|x64

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.PGInstrument|x64.Build.0 = PGInstrument|x64

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.PGUpdate|Win32.ActiveCfg = Release|Win32

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.PGUpdate|Win32.Build.0 = Release|Win32

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.PGUpdate|x64.ActiveCfg = PGUpdate|x64

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.PGUpdate|x64.Build.0 = PGUpdate|x64

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.Release|Win32.ActiveCfg = Release|Win32

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.Release|Win32.Build.0 = Release|Win32

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.Release|x64.ActiveCfg = Release|x64

+		{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}.Release|x64.Build.0 = Release|x64

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.Debug|Win32.ActiveCfg = Debug|Win32

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.Debug|Win32.Build.0 = Debug|Win32

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.Debug|x64.ActiveCfg = Debug|x64

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.Debug|x64.Build.0 = Debug|x64

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.PGInstrument|Win32.ActiveCfg = PGInstrument|Win32

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.PGInstrument|Win32.Build.0 = PGInstrument|Win32

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.PGInstrument|x64.ActiveCfg = PGInstrument|x64

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.PGInstrument|x64.Build.0 = PGInstrument|x64

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.PGUpdate|Win32.ActiveCfg = PGUpdate|Win32

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.PGUpdate|Win32.Build.0 = PGUpdate|Win32

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.PGUpdate|x64.ActiveCfg = PGUpdate|x64

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.PGUpdate|x64.Build.0 = PGUpdate|x64

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.Release|Win32.ActiveCfg = Release|Win32

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.Release|Win32.Build.0 = Release|Win32

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.Release|x64.ActiveCfg = Release|x64

+		{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}.Release|x64.Build.0 = Release|x64

 	EndGlobalSection

 	GlobalSection(SolutionProperties) = preSolution

 		HideSolutionNode = FALSE

diff --git a/PCbuild/prepare_ssl.bat b/PCbuild/prepare_ssl.bat
new file mode 100644
index 0000000..cb06500
--- /dev/null
+++ b/PCbuild/prepare_ssl.bat
@@ -0,0 +1,12 @@
+@echo off
+if not defined HOST_PYTHON (
+  if %1 EQU Debug (
+    shift
+    set HOST_PYTHON=python_d.exe
+    if not exist python35_d.dll exit 1
+  ) ELSE (
+    set HOST_PYTHON=python.exe
+    if not exist python35.dll exit 1
+  )
+)
+%HOST_PYTHON% prepare_ssl.py %1
diff --git a/PCbuild/prepare_ssl.py b/PCbuild/prepare_ssl.py
new file mode 100644
index 0000000..f9f8c12
--- /dev/null
+++ b/PCbuild/prepare_ssl.py
@@ -0,0 +1,217 @@
+# Script for preparing OpenSSL for building on Windows.
+# Uses Perl to create nmake makefiles and otherwise prepare the way
+# for building on 32 or 64 bit platforms.
+
+# Script originally authored by Mark Hammond.
+# Major revisions by:
+#   Martin v. Löwis
+#   Christian Heimes
+#   Zachary Ware
+
+# THEORETICALLY, you can:
+# * Unpack the latest OpenSSL release where $(opensslDir) in
+#   PCbuild\pyproject.props expects it to be.
+# * Install ActivePerl and ensure it is somewhere on your path.
+# * Run this script with the OpenSSL source dir as the only argument.
+#
+# it should configure OpenSSL such that it is ready to be built by
+# ssl.vcxproj on 32 or 64 bit platforms.
+
+import os
+import re
+import sys
+import shutil
+import subprocess
+
+# Find all "foo.exe" files on the PATH.
+def find_all_on_path(filename, extras = None):
+    entries = os.environ["PATH"].split(os.pathsep)
+    ret = []
+    for p in entries:
+        fname = os.path.abspath(os.path.join(p, filename))
+        if os.path.isfile(fname) and fname not in ret:
+            ret.append(fname)
+    if extras:
+        for p in extras:
+            fname = os.path.abspath(os.path.join(p, filename))
+            if os.path.isfile(fname) and fname not in ret:
+                ret.append(fname)
+    return ret
+
+# Find a suitable Perl installation for OpenSSL.
+# cygwin perl does *not* work.  ActivePerl does.
+# Being a Perl dummy, the simplest way I can check is if the "Win32" package
+# is available.
+def find_working_perl(perls):
+    for perl in perls:
+        try:
+            subprocess.check_output([perl, "-e", "use Win32;"])
+        except subprocess.CalledProcessError:
+            continue
+        else:
+            return perl
+
+    if perls:
+        print("The following perl interpreters were found:")
+        for p in perls:
+            print(" ", p)
+        print(" None of these versions appear suitable for building OpenSSL")
+    else:
+        print("NO perl interpreters were found on this machine at all!")
+    print(" Please install ActivePerl and ensure it appears on your path")
+
+def create_makefile64(makefile, m32):
+    """Create and fix makefile for 64bit
+
+    Replace 32 with 64bit directories
+    """
+    if not os.path.isfile(m32):
+        return
+    with open(m32) as fin:
+        with open(makefile, 'w') as fout:
+            for line in fin:
+                line = line.replace("=tmp32", "=tmp64")
+                line = line.replace("=out32", "=out64")
+                line = line.replace("=inc32", "=inc64")
+                # force 64 bit machine
+                line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
+                line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
+                # don't link against the lib on 64bit systems
+                line = line.replace("bufferoverflowu.lib", "")
+                fout.write(line)
+    os.unlink(m32)
+
+def fix_makefile(makefile):
+    """Fix some stuff in all makefiles
+    """
+    if not os.path.isfile(makefile):
+        return
+    with open(makefile) as fin:
+        lines = fin.readlines()
+    with open(makefile, 'w') as fout:
+        for line in lines:
+            if line.startswith("PERL="):
+                continue
+            if line.startswith("CP="):
+                line = "CP=copy\n"
+            if line.startswith("MKDIR="):
+                line = "MKDIR=mkdir\n"
+            if line.startswith("CFLAG="):
+                line = line.strip()
+                for algo in ("RC5", "MDC2", "IDEA"):
+                    noalgo = " -DOPENSSL_NO_%s" % algo
+                    if noalgo not in line:
+                        line = line + noalgo
+                line = line + '\n'
+            fout.write(line)
+
+def run_configure(configure, do_script):
+    print("perl Configure "+configure+" no-idea no-mdc2")
+    os.system("perl Configure "+configure+" no-idea no-mdc2")
+    print(do_script)
+    os.system(do_script)
+
+def cmp(f1, f2):
+    bufsize = 1024 * 8
+    with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
+        while True:
+            b1 = fp1.read(bufsize)
+            b2 = fp2.read(bufsize)
+            if b1 != b2:
+                return False
+            if not b1:
+                return True
+
+def copy(src, dst):
+    if os.path.isfile(dst) and cmp(src, dst):
+        return
+    shutil.copy(src, dst)
+
+def prep(arch):
+    if arch == "x86":
+        configure = "VC-WIN32"
+        do_script = "ms\\do_nasm"
+        makefile="ms\\nt.mak"
+        m32 = makefile
+        dirsuffix = "32"
+    elif arch == "amd64":
+        configure = "VC-WIN64A"
+        do_script = "ms\\do_win64a"
+        makefile = "ms\\nt64.mak"
+        m32 = makefile.replace('64', '')
+        dirsuffix = "64"
+        #os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
+    else:
+        raise ValueError('Unrecognized platform: %s' % arch)
+
+    # rebuild makefile when we do the role over from 32 to 64 build
+    if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
+        os.unlink(m32)
+
+    # If the ssl makefiles do not exist, we invoke Perl to generate them.
+    # Due to a bug in this script, the makefile sometimes ended up empty
+    # Force a regeneration if it is.
+    if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
+        print("Creating the makefiles...")
+        sys.stdout.flush()
+        run_configure(configure, do_script)
+
+        if arch == "amd64":
+            create_makefile64(makefile, m32)
+        fix_makefile(makefile)
+        copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
+        copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
+    else:
+        print(makefile, 'already exists!')
+
+    # If the assembler files don't exist in tmpXX, copy them there
+    if os.path.exists("asm"+dirsuffix):
+        if not os.path.exists("tmp"+dirsuffix):
+            os.mkdir("tmp"+dirsuffix)
+        for f in os.listdir("asm"+dirsuffix):
+            if not f.endswith(".asm"): continue
+            if os.path.isfile(r"tmp%s\%s" % (dirsuffix, f)): continue
+            shutil.copy(r"asm%s\%s" % (dirsuffix, f), "tmp"+dirsuffix)
+
+def main():
+    if len(sys.argv) == 1:
+        print("Not enough arguments: directory containing OpenSSL",
+              "sources must be supplied")
+        sys.exit(1)
+
+    if len(sys.argv) > 2:
+        print("Too many arguments supplied, all we need is the directory",
+              "containing OpenSSL sources")
+        sys.exit(1)
+
+    ssl_dir = sys.argv[1]
+
+    if not os.path.exists(ssl_dir) and os.path.isdir(ssl_dir):
+        print(ssl_dir, "is not an existing directory!")
+        sys.exit(1)
+
+    # perl should be on the path, but we also look in "\perl" and "c:\\perl"
+    # as "well known" locations
+    perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
+    perl = find_working_perl(perls)
+    if perl:
+        print("Found a working perl at '%s'" % (perl,))
+    else:
+        sys.exit(1)
+    sys.stdout.flush()
+
+    # Put our working Perl at the front of our path
+    os.environ["PATH"] = os.path.dirname(perl) + \
+                                os.pathsep + \
+                                os.environ["PATH"]
+
+    old_cwd = os.getcwd()
+    try:
+        os.chdir(ssl_dir)
+        for arch in ['amd64', 'x86']:
+            prep(arch)
+    finally:
+        os.chdir(old_cwd)
+
+if __name__=='__main__':
+    main()
diff --git a/PCbuild/pyproject.props b/PCbuild/pyproject.props
index 2219c95..d9811cc 100644
--- a/PCbuild/pyproject.props
+++ b/PCbuild/pyproject.props
@@ -5,7 +5,7 @@
     <OutDir>$(SolutionDir)</OutDir>
     <IntDir>$(SolutionDir)$(PlatformName)-temp-$(Configuration)\$(ProjectName)\</IntDir>
     <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>  
+  </PropertyGroup>
   <PropertyGroup Condition="'$(Platform)'=='x64'">
     <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
     <_PropertySheetDisplayName>amd64</_PropertySheetDisplayName>
@@ -13,7 +13,7 @@
     <IntDir>$(SolutionDir)$(PlatformName)-temp-$(Configuration)\$(ProjectName)\</IntDir>
   </PropertyGroup>
   <PropertyGroup Label="UserMacros">
-    <PyDllName>python34$(PyDebugExt)</PyDllName>
+    <PyDllName>python35$(PyDebugExt)</PyDllName>
     <PythonExe>$(OutDir)python$(PyDebugExt).exe</PythonExe>
     <KillPythonExe>$(OutDir)kill_python$(PyDebugExt).exe</KillPythonExe>
     <externalsDir>..\..</externalsDir>
@@ -21,12 +21,10 @@
     <bz2Dir>$(externalsDir)\bzip2-1.0.6</bz2Dir>
     <lzmaDir>$(externalsDir)\xz-5.0.5</lzmaDir>
     <opensslDir>$(externalsDir)\openssl-1.0.1g</opensslDir>
+    <tclDir>$(externalsDir)\tcl-8.6.1.0</tclDir>
+    <tkDir>$(externalsDir)\tk-8.6.1.0</tkDir>
+    <tixDir>$(externalsDir)\tix-8.4.3.4</tixDir>
     <tcltkDir>$(externalsDir)\tcltk</tcltkDir>
-    <tcltk64Dir>$(externalsDir)\tcltk64</tcltk64Dir>
-    <tcltkLib>$(tcltkDir)\lib\tcl86t.lib;$(tcltkDir)\lib\tk86t.lib</tcltkLib>
-    <tcltkLibDebug>$(tcltkDir)\lib\tcl86tg.lib;$(tcltkDir)\lib\tk86tg.lib</tcltkLibDebug>
-    <tcltk64Lib>$(tcltk64Dir)\lib\tcl86t.lib;$(tcltk64Dir)\lib\tk86t.lib</tcltk64Lib>
-    <tcltk64LibDebug>$(tcltk64Dir)\lib\tcl86tg.lib;$(tcltk64Dir)\lib\tk86tg.lib</tcltk64LibDebug>
   </PropertyGroup>
   <ItemDefinitionGroup>
     <ClCompile>
@@ -82,23 +80,17 @@
     <BuildMacro Include="opensslDir">
       <Value>$(opensslDir)</Value>
     </BuildMacro>
+    <BuildMacro Include="tclDir">
+      <Value>$(tclDir)</Value>
+    </BuildMacro>
+    <BuildMacro Include="tkDir">
+      <Value>$(tkDir)</Value>
+    </BuildMacro>
+    <BuildMacro Include="tixDir">
+      <Value>$(tixDir)</Value>
+    </BuildMacro>
     <BuildMacro Include="tcltkDir">
       <Value>$(tcltkDir)</Value>
     </BuildMacro>
-    <BuildMacro Include="tcltk64Dir">
-      <Value>$(tcltk64Dir)</Value>
-    </BuildMacro>
-    <BuildMacro Include="tcltkLib">
-      <Value>$(tcltkLib)</Value>
-    </BuildMacro>
-    <BuildMacro Include="tcltkLibDebug">
-      <Value>$(tcltkLibDebug)</Value>
-    </BuildMacro>
-    <BuildMacro Include="tcltk64Lib">
-      <Value>$(tcltk64Lib)</Value>
-    </BuildMacro>
-    <BuildMacro Include="tcltk64LibDebug">
-      <Value>$(tcltk64LibDebug)</Value>
-    </BuildMacro>
   </ItemGroup>
-</Project>
+</Project>
\ No newline at end of file
diff --git a/PCbuild/python.vcxproj b/PCbuild/python.vcxproj
index bd12624..5387343 100644
--- a/PCbuild/python.vcxproj
+++ b/PCbuild/python.vcxproj
@@ -156,6 +156,14 @@
     <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
     <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
     <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+    <CustomBuildAfterTargets Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Link</CustomBuildAfterTargets>
+    <CustomBuildAfterTargets Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Link</CustomBuildAfterTargets>
+    <CustomBuildAfterTargets Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">Link</CustomBuildAfterTargets>
+    <CustomBuildAfterTargets Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">Link</CustomBuildAfterTargets>
+    <CustomBuildAfterTargets Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">Link</CustomBuildAfterTargets>
+    <CustomBuildAfterTargets Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'">Link</CustomBuildAfterTargets>
+    <CustomBuildAfterTargets Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Link</CustomBuildAfterTargets>
+    <CustomBuildAfterTargets Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Link</CustomBuildAfterTargets>
   </PropertyGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
     <ClCompile>
@@ -176,6 +184,25 @@
       <StackReserveSize>2000000</StackReserveSize>
       <BaseAddress>0x1d000000</BaseAddress>
     </Link>
+    <CustomBuildStep>
+      <Command>echo @rem This script invokes the most recently built Python with all arguments&gt; "$(SolutionDir)..\python.bat"
+echo @rem passed through to the interpreter.  This file is generated by the&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem build process and any changes *will* be thrown away by the next&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem rebuild.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem This is only meant as a convenience for developing CPython&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem and using it outside of that context is ill-advised.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @echo Running $(Configuration)^^^|$(Platform) interpreter...&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @"$(OutDir)python$(PyDebugExt).exe" %%*&gt;&gt; "$(SolutionDir)..\python.bat"</Command>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Message>Creating convenience batch file for easily invoking the newly built interpreter.</Message>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Outputs>$(SolutionDir)..\python.bat</Outputs>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Inputs>$(OutDir)python$(PyDebugExt).exe;%(Inputs)</Inputs>
+    </CustomBuildStep>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
     <Midl>
@@ -199,6 +226,25 @@
       <StackReserveSize>2000000</StackReserveSize>
       <BaseAddress>0x1d000000</BaseAddress>
     </Link>
+    <CustomBuildStep>
+      <Command>echo @rem This script invokes the most recently built Python with all arguments&gt; "$(SolutionDir)..\python.bat"
+echo @rem passed through to the interpreter.  This file is generated by the&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem build process and any changes *will* be thrown away by the next&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem rebuild.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem This is only meant as a convenience for developing CPython&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem and using it outside of that context is ill-advised.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @echo Running $(Configuration)^^^|$(Platform) interpreter...&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @"$(OutDir)python$(PyDebugExt).exe" %%*&gt;&gt; "$(SolutionDir)..\python.bat"</Command>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Message>Creating convenience batch file for easily invoking the newly built interpreter.</Message>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Outputs>$(SolutionDir)..\python.bat</Outputs>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Inputs>$(OutDir)python$(PyDebugExt).exe;%(Inputs)</Inputs>
+    </CustomBuildStep>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
     <ClCompile>
@@ -221,6 +267,25 @@
       <StackReserveSize>2000000</StackReserveSize>
       <BaseAddress>0x1d000000</BaseAddress>
     </Link>
+    <CustomBuildStep>
+      <Command>echo @rem This script invokes the most recently built Python with all arguments&gt; "$(SolutionDir)..\python.bat"
+echo @rem passed through to the interpreter.  This file is generated by the&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem build process and any changes *will* be thrown away by the next&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem rebuild.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem This is only meant as a convenience for developing CPython&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem and using it outside of that context is ill-advised.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @echo Running $(Configuration)^^^|$(Platform) interpreter...&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @"$(OutDir)python$(PyDebugExt).exe" %%*&gt;&gt; "$(SolutionDir)..\python.bat"</Command>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Message>Creating convenience batch file for easily invoking the newly built interpreter.</Message>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Outputs>$(SolutionDir)..\python.bat</Outputs>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Inputs>$(OutDir)python$(PyDebugExt).exe;%(Inputs)</Inputs>
+    </CustomBuildStep>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
     <Midl>
@@ -246,6 +311,25 @@
       <StackReserveSize>4194304</StackReserveSize>
       <BaseAddress>0x1d000000</BaseAddress>
     </Link>
+    <CustomBuildStep>
+      <Command>echo @rem This script invokes the most recently built Python with all arguments&gt; "$(SolutionDir)..\python.bat"
+echo @rem passed through to the interpreter.  This file is generated by the&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem build process and any changes *will* be thrown away by the next&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem rebuild.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem This is only meant as a convenience for developing CPython&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem and using it outside of that context is ill-advised.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @echo Running $(Configuration)^^^|$(Platform) interpreter...&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @"$(OutDir)python$(PyDebugExt).exe" %%*&gt;&gt; "$(SolutionDir)..\python.bat"</Command>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Message>Creating convenience batch file for easily invoking the newly built interpreter.</Message>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Outputs>$(SolutionDir)..\python.bat</Outputs>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Inputs>$(OutDir)python$(PyDebugExt).exe;%(Inputs)</Inputs>
+    </CustomBuildStep>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">
     <ClCompile>
@@ -268,6 +352,25 @@
       <ImportLibrary>
       </ImportLibrary>
     </Link>
+    <CustomBuildStep>
+      <Command>echo @rem This script invokes the most recently built Python with all arguments&gt; "$(SolutionDir)..\python.bat"
+echo @rem passed through to the interpreter.  This file is generated by the&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem build process and any changes *will* be thrown away by the next&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem rebuild.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem This is only meant as a convenience for developing CPython&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem and using it outside of that context is ill-advised.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @echo Running $(Configuration)^^^|$(Platform) interpreter...&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @"$(OutDir)python$(PyDebugExt).exe" %%*&gt;&gt; "$(SolutionDir)..\python.bat"</Command>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Message>Creating convenience batch file for easily invoking the newly built interpreter.</Message>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Outputs>$(SolutionDir)..\python.bat</Outputs>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Inputs>$(OutDir)python$(PyDebugExt).exe;%(Inputs)</Inputs>
+    </CustomBuildStep>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">
     <Midl>
@@ -294,6 +397,25 @@
       </ImportLibrary>
       <TargetMachine>MachineX64</TargetMachine>
     </Link>
+    <CustomBuildStep>
+      <Command>echo @rem This script invokes the most recently built Python with all arguments&gt; "$(SolutionDir)..\python.bat"
+echo @rem passed through to the interpreter.  This file is generated by the&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem build process and any changes *will* be thrown away by the next&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem rebuild.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem This is only meant as a convenience for developing CPython&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem and using it outside of that context is ill-advised.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @echo Running $(Configuration)^^^|$(Platform) interpreter...&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @"$(OutDir)python$(PyDebugExt).exe" %%*&gt;&gt; "$(SolutionDir)..\python.bat"</Command>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Message>Creating convenience batch file for easily invoking the newly built interpreter.</Message>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Outputs>$(SolutionDir)..\python.bat</Outputs>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Inputs>$(OutDir)python$(PyDebugExt).exe;%(Inputs)</Inputs>
+    </CustomBuildStep>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">
     <ClCompile>
@@ -316,6 +438,25 @@
       <ImportLibrary>
       </ImportLibrary>
     </Link>
+    <CustomBuildStep>
+      <Command>echo @rem This script invokes the most recently built Python with all arguments&gt; "$(SolutionDir)..\python.bat"
+echo @rem passed through to the interpreter.  This file is generated by the&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem build process and any changes *will* be thrown away by the next&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem rebuild.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem This is only meant as a convenience for developing CPython&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem and using it outside of that context is ill-advised.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @echo Running $(Configuration)^^^|$(Platform) interpreter...&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @"$(OutDir)python$(PyDebugExt).exe" %%*&gt;&gt; "$(SolutionDir)..\python.bat"</Command>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Message>Creating convenience batch file for easily invoking the newly built interpreter.</Message>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Outputs>$(SolutionDir)..\python.bat</Outputs>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Inputs>$(OutDir)python$(PyDebugExt).exe;%(Inputs)</Inputs>
+    </CustomBuildStep>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'">
     <Midl>
@@ -342,6 +483,25 @@
       </ImportLibrary>
       <TargetMachine>MachineX64</TargetMachine>
     </Link>
+    <CustomBuildStep>
+      <Command>echo @rem This script invokes the most recently built Python with all arguments&gt; "$(SolutionDir)..\python.bat"
+echo @rem passed through to the interpreter.  This file is generated by the&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem build process and any changes *will* be thrown away by the next&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem rebuild.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem This is only meant as a convenience for developing CPython&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @rem and using it outside of that context is ill-advised.&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @echo Running $(Configuration)^^^|$(Platform) interpreter...&gt;&gt; "$(SolutionDir)..\python.bat"
+echo @"$(OutDir)python$(PyDebugExt).exe" %%*&gt;&gt; "$(SolutionDir)..\python.bat"</Command>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Message>Creating convenience batch file for easily invoking the newly built interpreter.</Message>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Outputs>$(SolutionDir)..\python.bat</Outputs>
+    </CustomBuildStep>
+    <CustomBuildStep>
+      <Inputs>$(OutDir)python$(PyDebugExt).exe;%(Inputs)</Inputs>
+    </CustomBuildStep>
   </ItemDefinitionGroup>
   <ItemGroup>
     <None Include="..\PC\pycon.ico" />
@@ -361,4 +521,4 @@
   <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
   <ImportGroup Label="ExtensionTargets">
   </ImportGroup>
-</Project>
+</Project>
\ No newline at end of file
diff --git a/PCbuild/readme.txt b/PCbuild/readme.txt
index b8184bf..5d6c0eb 100644
--- a/PCbuild/readme.txt
+++ b/PCbuild/readme.txt
@@ -40,7 +40,7 @@
     Used to build Python with extra debugging capabilities, equivalent
     to using ./configure --with-pydebug on UNIX.  All binaries built
     using this configuration have "_d" added to their name:
-    python34_d.dll, python_d.exe, parser_d.pyd, and so on.  Both the
+    python35_d.dll, python_d.exe, parser_d.pyd, and so on.  Both the
     build and rt (run test) batch files in this directory accept a -d
     option for debug builds.  If you are building Python to help with
     development of CPython, you will most likely use this configuration.
@@ -181,30 +181,19 @@
     you should first try to update NASM and do a full rebuild of
     OpenSSL.
 
-    If you like to use the official sources instead of the files from
-    python.org's subversion repository, Perl is required to build the
-    necessary makefiles and assembly files.  ActivePerl is available
-    from
+    The ssl sub-project expects your OpenSSL sources to have already
+    been configured and be ready to build.  If you get your sources
+    from svn.python.org as suggested in the "Getting External Sources"
+    section below, the OpenSSL source will already be ready to go.  If
+    you want to build a different version, you will need to run
+
+       PCbuild\prepare_ssl.py path\to\openssl-source-dir
+
+    That script will prepare your OpenSSL sources in the same way that
+    those available on svn.python.org have been prepared.  Note that
+    Perl must be installed and available on your PATH to configure
+    OpenSSL.  ActivePerl is recommended and is available from
         http://www.activestate.com/activeperl/
-    The svn.python.org version contains pre-built makefiles and assembly
-    files.
-
-    The build process makes sure that no patented algorithms are
-    included.  For now RC5, MDC2 and IDEA are excluded from the build.
-    You may have to manually remove $(OBJ_D)\i_*.obj from ms\nt.mak if
-    using official sources; the svn.python.org-hosted version is already
-    fixed.
-
-    The ssl.vcxproj sub-project simply invokes PCbuild/build_ssl.py,
-    which locates and builds OpenSSL.
-
-    build_ssl.py attempts to catch the most common errors (such as not
-    being able to find OpenSSL sources, or not being able to find a Perl
-    that works with OpenSSL) and give a reasonable error message.  If
-    you have a problem that doesn't seem to be handled correctly (e.g.,
-    you know you have ActivePerl but we can't find it), please take a
-    peek at build_ssl.py and suggest patches.  Note that build_ssl.py
-    should be able to be run directly from the command-line.
 
     The ssl sub-project does not have the ability to clean the OpenSSL
     build; if you need to rebuild, you'll have to clean it by hand.
@@ -217,11 +206,19 @@
     Homepage:
         http://www.tcl.tk/
 
-    Unlike the other external libraries listed above, Tk must be built
-    separately before the _tkinter module can be built. This means that
-    a pre-built Tcl/Tk installation is expected in ..\..\tcltk (tcltk64
-    for 64-bit) relative to this directory.  See "Getting External
-    Sources" below for the easiest method to ensure Tcl/Tk is built.
+    Tkinter's dependencies are built by the tcl.vcxproj and tk.vcxproj
+    projects.  The tix.vcxproj project also builds the Tix extended
+    widget set for use with Tkinter.
+
+    Those three projects install their respective components in a
+    directory alongside the source directories called "tcltk" on
+    Win32 and "tcltk64" on x64.  They also copy the Tcl and Tk DLLs
+    into the current output directory, which should ensure that Tkinter
+    is able to load Tcl/Tk without having to change your PATH.
+
+    The tcl, tk, and tix sub-projects do not have the ability to clean
+    their builds; if you need to rebuild, you'll have to clean them by
+    hand.
 
 
 Getting External Sources
@@ -250,26 +247,6 @@
 anyway, since that is where the solution is set to look for xz.  The
 same is true for all other external projects.
 
-The external(-amd64).bat scripts will also build a debug build of
-Tcl/Tk, but there aren't any equivalent batch files for building release
-versions of Tcl/Tk currently available.  If you need to build a release
-version of Tcl/Tk, just take a look at the relevant external(-amd64).bat
-file and find the two nmake lines, then call each one without the
-'DEBUG=1' parameter, i.e.:
-
-The external-amd64.bat file contains this for tcl:
-    nmake -f makefile.vc DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 clean all install
-
-So for a release build, you'd call it as:
-    nmake -f makefile.vc MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 clean all install
-
-Note that the above command is called from within ..\..\tcl-8.6.1.0\win
-(relative to this directory); don't forget to build Tk as well as Tcl!
-
-This will be cleaned up in the future; http://bugs.python.org/issue15968
-tracks adding a new tcltk.vcxproj file that will build Tcl/Tk and Tix
-the same way the other external projects listed above are built.
-
 
 Building for AMD64
 ------------------
@@ -332,6 +309,7 @@
  * pyproject (base settings for all projects, user macros like PyDllName)
  * release (release macro: NDEBUG)
  * sqlite3 (used only by sqlite3.vcxproj)
+ * tcltk (used by _tkinter, tcl, tk and tix projects)
  * x64 (AMD64 / x64 platform specific settings)
 
 The pyproject property file defines _WIN32 and x64 defines _WIN64 and
diff --git a/PCbuild/release.props b/PCbuild/release.props
index acfe3e4..b460f01 100644
--- a/PCbuild/release.props
+++ b/PCbuild/release.props
@@ -2,6 +2,7 @@
 <Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
   <PropertyGroup Label="UserMacros">
     <PyDebugExt />
+    <TclDebugExt />
   </PropertyGroup>
   <PropertyGroup>
     <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
@@ -15,5 +16,8 @@
     <BuildMacro Include="PyDebugExt">
       <Value>$(PyDebugExt)</Value>
     </BuildMacro>
+    <BuildMacro Include="TclDebugExt">
+      <Value>$(TclDebugExt)</Value>
+    </BuildMacro>
   </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/PCbuild/ssl.vcxproj b/PCbuild/ssl.vcxproj
index d5eac9a..5f318d0 100644
--- a/PCbuild/ssl.vcxproj
+++ b/PCbuild/ssl.vcxproj
@@ -118,9 +118,12 @@
   <PropertyGroup Label="UserMacros" />
   <PropertyGroup>
     <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
-    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">cd "$(SolutionDir)"
-"$(PythonExe)" build_ssl.py Release $(Platform) -a
-</NMakeBuildCommandLine>
+    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+cd "$(SolutionDir)$(opensslDir)"
+copy /Y crypto\buildinf_x86.h crypto\buildinf.h
+copy /Y crypto\opensslconf_x86.h crypto\opensslconf.h
+nmake /nologo -f "ms\nt.mak"
+	</NMakeBuildCommandLine>
     <NMakeReBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
     <NMakeCleanCommandLine Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">echo OpenSSL must be cleaned manually if you want to rebuild it.</NMakeCleanCommandLine>
     <NMakeOutput Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
@@ -129,9 +132,13 @@
     <NMakeForcedIncludes Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(NMakeForcedIncludes)</NMakeForcedIncludes>
     <NMakeAssemblySearchPath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(NMakeAssemblySearchPath)</NMakeAssemblySearchPath>
     <NMakeForcedUsingAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(NMakeForcedUsingAssemblies)</NMakeForcedUsingAssemblies>
-    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">cd "$(SolutionDir)"
-"$(PythonExe)" build_ssl.py Release $(Platform) -a
-</NMakeBuildCommandLine>
+    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+cd "$(SolutionDir)$(opensslDir)"
+nasm -f win64 -DNEAR -Ox -g ms\\uptable.asm || echo nasm failed! &amp;&amp; exit
+copy /Y crypto\buildinf_amd64.h crypto\buildinf.h
+copy /Y crypto\opensslconf_amd64.h crypto\opensslconf.h
+nmake /nologo -f "ms\nt64.mak"
+	</NMakeBuildCommandLine>
     <NMakeReBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
     <NMakeCleanCommandLine Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">echo OpenSSL must be cleaned manually if you want to rebuild it.</NMakeCleanCommandLine>
     <NMakeOutput Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
@@ -140,9 +147,12 @@
     <NMakeForcedIncludes Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(NMakeForcedIncludes)</NMakeForcedIncludes>
     <NMakeAssemblySearchPath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(NMakeAssemblySearchPath)</NMakeAssemblySearchPath>
     <NMakeForcedUsingAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(NMakeForcedUsingAssemblies)</NMakeForcedUsingAssemblies>
-    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">cd "$(SolutionDir)"
-"$(PythonExe)" build_ssl.py Release $(Platform) -a
-</NMakeBuildCommandLine>
+    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+cd "$(SolutionDir)$(opensslDir)"
+copy /Y crypto\buildinf_x86.h crypto\buildinf.h
+copy /Y crypto\opensslconf_x86.h crypto\opensslconf.h
+nmake /nologo -f "ms\nt.mak"
+	</NMakeBuildCommandLine>
     <NMakeReBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
     <NMakeCleanCommandLine Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">echo OpenSSL must be cleaned manually if you want to rebuild it.</NMakeCleanCommandLine>
     <NMakeOutput Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
@@ -151,9 +161,13 @@
     <NMakeForcedIncludes Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(NMakeForcedIncludes)</NMakeForcedIncludes>
     <NMakeAssemblySearchPath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(NMakeAssemblySearchPath)</NMakeAssemblySearchPath>
     <NMakeForcedUsingAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(NMakeForcedUsingAssemblies)</NMakeForcedUsingAssemblies>
-    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Release|x64'">cd "$(SolutionDir)"
-"$(PythonExe)" build_ssl.py Release $(Platform) -a
-</NMakeBuildCommandLine>
+    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+cd "$(SolutionDir)$(opensslDir)"
+nasm -f win64 -DNEAR -Ox -g ms\\uptable.asm || echo nasm failed! &amp;&amp; exit
+copy /Y crypto\buildinf_amd64.h crypto\buildinf.h
+copy /Y crypto\opensslconf_amd64.h crypto\opensslconf.h
+nmake /nologo -f "ms\nt64.mak"
+	</NMakeBuildCommandLine>
     <NMakeReBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
     <NMakeCleanCommandLine Condition="'$(Configuration)|$(Platform)'=='Release|x64'">echo OpenSSL must be cleaned manually if you want to rebuild it.</NMakeCleanCommandLine>
     <NMakeOutput Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
@@ -162,9 +176,12 @@
     <NMakeForcedIncludes Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(NMakeForcedIncludes)</NMakeForcedIncludes>
     <NMakeAssemblySearchPath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(NMakeAssemblySearchPath)</NMakeAssemblySearchPath>
     <NMakeForcedUsingAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(NMakeForcedUsingAssemblies)</NMakeForcedUsingAssemblies>
-    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">cd "$(SolutionDir)"
-"$(PythonExe)" build_ssl.py Release $(Platform) -a
-</NMakeBuildCommandLine>
+    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">
+cd "$(SolutionDir)$(opensslDir)"
+copy /Y crypto\buildinf_x86.h crypto\buildinf.h
+copy /Y crypto\opensslconf_x86.h crypto\opensslconf.h
+nmake /nologo -f "ms\nt.mak"
+	</NMakeBuildCommandLine>
     <NMakeReBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" />
     <NMakeCleanCommandLine Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">echo OpenSSL must be cleaned manually if you want to rebuild it.</NMakeCleanCommandLine>
     <NMakeOutput Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" />
@@ -173,9 +190,13 @@
     <NMakeForcedIncludes Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">$(NMakeForcedIncludes)</NMakeForcedIncludes>
     <NMakeAssemblySearchPath Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">$(NMakeAssemblySearchPath)</NMakeAssemblySearchPath>
     <NMakeForcedUsingAssemblies Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">$(NMakeForcedUsingAssemblies)</NMakeForcedUsingAssemblies>
-    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">cd "$(SolutionDir)"
-"$(PythonExe)" build_ssl.py Release $(Platform) -a
-</NMakeBuildCommandLine>
+    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">
+cd "$(SolutionDir)$(opensslDir)"
+nasm -f win64 -DNEAR -Ox -g ms\\uptable.asm || echo nasm failed! &amp;&amp; exit
+copy /Y crypto\buildinf_amd64.h crypto\buildinf.h
+copy /Y crypto\opensslconf_amd64.h crypto\opensslconf.h
+nmake /nologo -f "ms\nt64.mak"
+	</NMakeBuildCommandLine>
     <NMakeReBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" />
     <NMakeCleanCommandLine Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">echo OpenSSL must be cleaned manually if you want to rebuild it.</NMakeCleanCommandLine>
     <NMakeOutput Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" />
@@ -184,9 +205,12 @@
     <NMakeForcedIncludes Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">$(NMakeForcedIncludes)</NMakeForcedIncludes>
     <NMakeAssemblySearchPath Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">$(NMakeAssemblySearchPath)</NMakeAssemblySearchPath>
     <NMakeForcedUsingAssemblies Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">$(NMakeForcedUsingAssemblies)</NMakeForcedUsingAssemblies>
-    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">cd "$(SolutionDir)"
-"$(PythonExe)" build_ssl.py Release $(Platform) -a
-</NMakeBuildCommandLine>
+    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">
+cd "$(SolutionDir)$(opensslDir)"
+copy /Y crypto\buildinf_x86.h crypto\buildinf.h
+copy /Y crypto\opensslconf_x86.h crypto\opensslconf.h
+nmake /nologo -f "ms\nt.mak"
+	</NMakeBuildCommandLine>
     <NMakeReBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" />
     <NMakeCleanCommandLine Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">echo OpenSSL must be cleaned manually if you want to rebuild it.</NMakeCleanCommandLine>
     <NMakeOutput Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" />
@@ -195,9 +219,13 @@
     <NMakeForcedIncludes Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">$(NMakeForcedIncludes)</NMakeForcedIncludes>
     <NMakeAssemblySearchPath Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">$(NMakeAssemblySearchPath)</NMakeAssemblySearchPath>
     <NMakeForcedUsingAssemblies Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">$(NMakeForcedUsingAssemblies)</NMakeForcedUsingAssemblies>
-    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'">cd "$(SolutionDir)"
-"$(PythonExe)" build_ssl.py Release $(Platform) -a
-</NMakeBuildCommandLine>
+    <NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'">
+cd "$(SolutionDir)$(opensslDir)"
+nasm -f win64 -DNEAR -Ox -g ms\\uptable.asm || echo nasm failed! &amp;&amp; exit
+copy /Y crypto\buildinf_amd64.h crypto\buildinf.h
+copy /Y crypto\opensslconf_amd64.h crypto\opensslconf.h
+nmake /nologo -f "ms\nt64.mak"
+	</NMakeBuildCommandLine>
     <NMakeReBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" />
     <NMakeCleanCommandLine Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'">echo OpenSSL must be cleaned manually if you want to rebuild it.</NMakeCleanCommandLine>
     <NMakeOutput Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" />
@@ -209,12 +237,6 @@
   </PropertyGroup>
   <ItemDefinitionGroup>
   </ItemDefinitionGroup>
-  <ItemGroup>
-    <ProjectReference Include="python.vcxproj">
-      <Project>{b11d750f-cd1f-4a96-85ce-e69a5c5259f9}</Project>
-      <ReferenceOutputAssembly>false</ReferenceOutputAssembly>
-    </ProjectReference>
-  </ItemGroup>
   <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
   <ImportGroup Label="ExtensionTargets">
   </ImportGroup>
diff --git a/PCbuild/tcl.vcxproj b/PCbuild/tcl.vcxproj
new file mode 100644
index 0000000..dc426b3
--- /dev/null
+++ b/PCbuild/tcl.vcxproj
@@ -0,0 +1,175 @@
+ï»ż<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug|Win32">
+      <Configuration>Debug</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug|x64">
+      <Configuration>Debug</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGInstrument|Win32">
+      <Configuration>PGInstrument</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGInstrument|x64">
+      <Configuration>PGInstrument</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGUpdate|Win32">
+      <Configuration>PGUpdate</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGUpdate|x64">
+      <Configuration>PGUpdate</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|Win32">
+      <Configuration>Release</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|x64">
+      <Configuration>Release</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>{B5FD6F1D-129E-4BFF-9340-03606FAC7283}</ProjectGuid>
+    <RootNamespace>tcl</RootNamespace>
+    <Keyword>MakeFileProj</Keyword>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="pgupdate.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="pginstrument.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="release.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="debug.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="pgupdate.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="pginstrument.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="release.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="debug.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup>
+    <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+    <NMakeBuildCommandLine>
+IF EXIST $(tcltkDir)\include\tcl.h (
+    IF EXIST $(tcltkDir)\bin\$(tclDLLName) (
+        IF EXIST $(OutDir)$(tclDLLName) (
+            echo Tcl is already built and available.
+            exit /b 0
+        ) ELSE (
+            echo Copying tcl dll to $(OutDir)
+            xcopy $(tcltkDir)\bin\$(tclDLLName) $(OutDir)
+            exit /b 0
+        )
+    )
+)
+
+IF NOT EXIST $(tclDir) (
+    echo error: $(tclDir) doesn't exist.
+    exit 1
+)
+
+IF "$(Platform)" EQU "Win32" set TclMachine=IX86
+IF "$(Platform)" EQU "x64" set TclMachine=AMD64
+
+IF "$(Configuration)" EQU "Debug" (
+    set TclOpts=symbols
+) ELSE (
+    set TclOpts=
+)
+
+cd $(tclDir)\win
+
+nmake -f makefile.vc MACHINE=%TclMachine% OPTS=%TclOpts% core shell dlls &amp;&amp; nmake -f makefile.vc MACHINE=%TclMachine% OPTS=%TclOpts% INSTALLDIR=$(SolutionDir)$(tcltkDir) install-binaries install-libraries &amp;&amp; xcopy /y $(SolutionDir)$(tcltkDir)\bin\$(tclDLLName) $(OutDir)
+    </NMakeBuildCommandLine>
+    <NMakeCleanCommandLine>del $(OutDir)$(tclDLLName)
+echo Tcl must be cleaned manually if you want to rebuild it.</NMakeCleanCommandLine>
+  </PropertyGroup>
+  <ItemDefinitionGroup>
+  </ItemDefinitionGroup>
+  <ItemGroup>
+  </ItemGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/PCbuild/tcltk.props b/PCbuild/tcltk.props
new file mode 100644
index 0000000..c169427
--- /dev/null
+++ b/PCbuild/tcltk.props
@@ -0,0 +1,30 @@
+ï»ż<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ImportGroup Label="PropertySheets" />
+  <PropertyGroup Label="UserMacros">
+    <MSDEVDIR>Dummy value to avoid patching Tcl/Tk's makefile.vc</MSDEVDIR>
+    <tclDLLName>tcl86t$(TclDebugExt).dll</tclDLLName>
+    <tkDLLName>tk86t$(TclDebugExt).dll</tkDLLName>
+    <tixDLLName>tix84$(TclDebugExt).dll</tixDLLName>
+    <tcltkLib>$(tcltkDir)\lib\tcl86t$(TclDebugExt).lib;$(tcltkDir)\lib\tk86t$(TclDebugExt).lib</tcltkLib>
+  </PropertyGroup>
+  <ItemDefinitionGroup />
+  <ItemGroup>
+    <BuildMacro Include="MSDEVDIR">
+      <Value>$(MSDEVDIR)</Value>
+      <EnvironmentVariable>true</EnvironmentVariable>
+    </BuildMacro>
+    <BuildMacro Include="tclDLLName">
+      <Value>$(tclDLLName)</Value>
+    </BuildMacro>
+    <BuildMacro Include="tkDLLName">
+      <Value>$(tkDLLName)</Value>
+    </BuildMacro>
+    <BuildMacro Include="tixDLLName">
+      <Value>$(tixDLLName)</Value>
+    </BuildMacro>
+    <BuildMacro Include="tcltkLib">
+      <Value>$(tcltkLib)</Value>
+    </BuildMacro>
+  </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/PCbuild/tix.vcxproj b/PCbuild/tix.vcxproj
new file mode 100644
index 0000000..a177f5c
--- /dev/null
+++ b/PCbuild/tix.vcxproj
@@ -0,0 +1,171 @@
+ï»ż<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug|Win32">
+      <Configuration>Debug</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug|x64">
+      <Configuration>Debug</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGInstrument|Win32">
+      <Configuration>PGInstrument</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGInstrument|x64">
+      <Configuration>PGInstrument</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGUpdate|Win32">
+      <Configuration>PGUpdate</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGUpdate|x64">
+      <Configuration>PGUpdate</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|Win32">
+      <Configuration>Release</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|x64">
+      <Configuration>Release</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>{C5A3E7FB-9695-4B2E-960B-1D9F43F1E555}</ProjectGuid>
+    <RootNamespace>tix</RootNamespace>
+    <Keyword>MakeFileProj</Keyword>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="pgupdate.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="pginstrument.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="release.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="debug.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="pgupdate.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="pginstrument.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="release.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="debug.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup>
+    <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+    <NMakeBuildCommandLine>
+IF EXIST $(tcltkDir)\lib\tix8.4.3\$(tixDLLName) (
+    echo Tix is already built and available.
+    exit /b 0
+)
+
+IF NOT EXIST $(tixDir) (
+    echo error: $(tixDir) doesn't exist.
+    exit 1
+)
+
+IF "$(Platform)" EQU "Win32" set TclMachine=IX86
+IF "$(Platform)" EQU "x64" set TclMachine=AMD64
+
+IF "$(Configuration)" EQU "Debug" (
+    set TixDebug=1
+) ELSE (
+    set TixDebug=0
+)
+
+cd $(tixDir)\win
+nmake -f python.mak MACHINE=%TclMachine% DEBUG=%TixDebug% TCL_DIR=$(SolutionDir)$(tclDir) TK_DIR=$(SolutionDir)$(tkDir) all &amp;&amp; nmake -f python.mak MACHINE=%TclMachine% DEBUG=%TixDebug% TCL_DIR=$(SolutionDir)$(tclDir) TK_DIR=$(SolutionDir)$(tkDir) INSTALL_DIR=$(SolutionDir)$(tcltkDir) install
+    </NMakeBuildCommandLine>
+    <NMakeCleanCommandLine>echo Tix must be cleaned manually if you want to rebuild it.</NMakeCleanCommandLine>
+  </PropertyGroup>
+  <ItemDefinitionGroup>
+  </ItemDefinitionGroup>
+  <ItemGroup>
+    <ProjectReference Include="tcl.vcxproj">
+      <Project>{b5fd6f1d-129e-4bff-9340-03606fac7283}</Project>
+    </ProjectReference>
+    <ProjectReference Include="tk.vcxproj">
+      <Project>{7e85eccf-a72c-4da4-9e52-884508e80ba1}</Project>
+    </ProjectReference>
+  </ItemGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/PCbuild/tk.vcxproj b/PCbuild/tk.vcxproj
new file mode 100644
index 0000000..3944ef7
--- /dev/null
+++ b/PCbuild/tk.vcxproj
@@ -0,0 +1,178 @@
+ï»ż<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug|Win32">
+      <Configuration>Debug</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug|x64">
+      <Configuration>Debug</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGInstrument|Win32">
+      <Configuration>PGInstrument</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGInstrument|x64">
+      <Configuration>PGInstrument</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGUpdate|Win32">
+      <Configuration>PGUpdate</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="PGUpdate|x64">
+      <Configuration>PGUpdate</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|Win32">
+      <Configuration>Release</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|x64">
+      <Configuration>Release</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>{7E85ECCF-A72C-4DA4-9E52-884508E80BA1}</ProjectGuid>
+    <RootNamespace>tk</RootNamespace>
+    <Keyword>MakeFileProj</Keyword>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+    <ConfigurationType>Makefile</ConfigurationType>
+    <CharacterSet>NotSet</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="pgupdate.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="pginstrument.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="release.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="debug.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="pgupdate.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="pginstrument.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="release.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="pyproject.props" />
+    <Import Project="x64.props" />
+    <Import Project="debug.props" />
+    <Import Project="tcltk.props" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup>
+    <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+    <NMakeBuildCommandLine>
+IF EXIST $(tcltkDir)\include\tk.h (
+    IF EXIST $(tcltkDir)\bin\$(tkDLLName) (
+        IF EXIST $(OutDir)$(tkDLLName) (
+            echo Tk is already built and available.
+            exit /b 0
+        ) ELSE (
+            echo Copying tk dll to $(OutDir)
+            xcopy $(tcltkDir)\bin\$(tkDLLName) $(OutDir)
+            exit /b 0
+        )
+    )
+)
+
+IF NOT EXIST $(tkDir) (
+    echo error: $(tkDir) doesn't exist.
+    exit 1
+)
+
+IF "$(Platform)" EQU "Win32" set TclMachine=IX86
+IF "$(Platform)" EQU "x64" set TclMachine=AMD64
+
+IF "$(Configuration)" EQU "Debug" (
+    set TclOpts=symbols,noxp
+) ELSE (
+    set TclOpts=noxp
+)
+
+cd $(tkDir)\win
+
+nmake -f makefile.vc MACHINE=%TclMachine% OPTS=%TclOpts% TCLDIR=$(SolutionDir)$(tclDir) all &amp;&amp; nmake -f makefile.vc MACHINE=%TclMachine% OPTS=%TclOpts% TCLDIR=$(SolutionDir)$(tclDir) INSTALLDIR=$(SolutionDir)$(tcltkDir) install-binaries install-libraries &amp;&amp; xcopy /y $(SolutionDir)$(tcltkDir)\bin\$(tkDLLName) $(OutDir)
+    </NMakeBuildCommandLine>
+    <NMakeCleanCommandLine>del $(OutDir)$(tkDLLName)
+echo Tk must be cleaned manually if you want to rebuild it.</NMakeCleanCommandLine>
+  </PropertyGroup>
+  <ItemDefinitionGroup>
+  </ItemDefinitionGroup>
+  <ItemGroup>
+    <ProjectReference Include="tcl.vcxproj">
+      <Project>{b5fd6f1d-129e-4bff-9340-03606fac7283}</Project>
+    </ProjectReference>
+  </ItemGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/PCbuild/x64.props b/PCbuild/x64.props
index 985c0ef..9e6f5bb 100644
--- a/PCbuild/x64.props
+++ b/PCbuild/x64.props
@@ -3,6 +3,9 @@
   <PropertyGroup Label="UserMacros" Condition="'$(HOST_PYTHON)'!=''">
     <PythonExe>$(HOST_PYTHON)</PythonExe>
   </PropertyGroup>
+  <PropertyGroup Label="UserMacros">
+    <tcltkDir>$(externalsDir)\tcltk64</tcltkDir>
+  </PropertyGroup>
   <ItemDefinitionGroup>
     <ClCompile>
       <BufferSecurityCheck>false</BufferSecurityCheck>
@@ -16,5 +19,8 @@
     <BuildMacro Include="PythonExe">
       <Value>$(PythonExe)</Value>
     </BuildMacro>
+    <BuildMacro Include="tcltkDir">
+      <Value>$(tcltkDir)</Value>
+    </BuildMacro>
   </ItemGroup>
-</Project>
+</Project>
\ No newline at end of file
diff --git a/Parser/Python.asdl b/Parser/Python.asdl
index debd89e..792cab7 100644
--- a/Parser/Python.asdl
+++ b/Parser/Python.asdl
@@ -91,7 +91,7 @@
 
     boolop = And | Or 
 
-    operator = Add | Sub | Mult | Div | Mod | Pow | LShift 
+    operator = Add | Sub | Mult | MatMult | Div | Mod | Pow | LShift 
                  | RShift | BitOr | BitXor | BitAnd | FloorDiv
 
     unaryop = Invert | Not | UAdd | USub
diff --git a/Parser/asdl.py b/Parser/asdl.py
index fc1b16c..0c4e61f 100644
--- a/Parser/asdl.py
+++ b/Parser/asdl.py
@@ -1,255 +1,53 @@
-"""An implementation of the Zephyr Abstract Syntax Definition Language.
+#-------------------------------------------------------------------------------
+# Parser for ASDL [1] definition files. Reads in an ASDL description and parses
+# it into an AST that describes it.
+#
+# The EBNF we're parsing here: Figure 1 of the paper [1]. Extended to support
+# modules and attributes after a product. Words starting with Capital letters
+# are terminals. Literal tokens are in "double quotes". Others are
+# non-terminals. Id is either TokenId or ConstructorId.
+#
+# module        ::= "module" Id "{" [definitions] "}"
+# definitions   ::= { TypeId "=" type }
+# type          ::= product | sum
+# product       ::= fields ["attributes" fields]
+# fields        ::= "(" { field, "," } field ")"
+# field         ::= TypeId ["?" | "*"] [Id]
+# sum           ::= constructor { "|" constructor } ["attributes" fields]
+# constructor   ::= ConstructorId [fields]
+#
+# [1] "The Zephyr Abstract Syntax Description Language" by Wang, et. al. See
+#     http://asdl.sourceforge.net/
+#-------------------------------------------------------------------------------
+from collections import namedtuple
+import re
 
-See http://asdl.sourceforge.net/ and
-http://www.cs.princeton.edu/research/techreps/TR-554-97
+__all__ = [
+    'builtin_types', 'parse', 'AST', 'Module', 'Type', 'Constructor',
+    'Field', 'Sum', 'Product', 'VisitorBase', 'Check', 'check']
 
-Only supports top level module decl, not view.  I'm guessing that view
-is intended to support the browser and I'm not interested in the
-browser.
+# The following classes define nodes into which the ASDL description is parsed.
+# Note: this is a "meta-AST". ASDL files (such as Python.asdl) describe the AST
+# structure used by a programming language. But ASDL files themselves need to be
+# parsed. This module parses ASDL files and uses a simple AST to represent them.
+# See the EBNF at the top of the file to understand the logical connection
+# between the various node types.
 
-Changes for Python: Add support for module versions
-"""
+builtin_types = set(
+    ['identifier', 'string', 'bytes', 'int', 'object', 'singleton'])
 
-import os
-import sys
-import traceback
-
-import spark
-
-def output(*strings):
-    for s in strings:
-        sys.stdout.write(str(s) + "\n")
-
-
-class Token(object):
-    # spark seems to dispatch in the parser based on a token's
-    # type attribute
-    def __init__(self, type, lineno):
-        self.type = type
-        self.lineno = lineno
-
-    def __str__(self):
-        return self.type
-
+class AST:
     def __repr__(self):
-        return str(self)
-
-class Id(Token):
-    def __init__(self, value, lineno):
-        self.type = 'Id'
-        self.value = value
-        self.lineno = lineno
-
-    def __str__(self):
-        return self.value
-
-class String(Token):
-    def __init__(self, value, lineno):
-        self.type = 'String'
-        self.value = value
-        self.lineno = lineno
-
-class ASDLSyntaxError(Exception):
-
-    def __init__(self, lineno, token=None, msg=None):
-        self.lineno = lineno
-        self.token = token
-        self.msg = msg
-
-    def __str__(self):
-        if self.msg is None:
-            return "Error at '%s', line %d" % (self.token, self.lineno)
-        else:
-            return "%s, line %d" % (self.msg, self.lineno)
-
-class ASDLScanner(spark.GenericScanner, object):
-
-    def tokenize(self, input):
-        self.rv = []
-        self.lineno = 1
-        super(ASDLScanner, self).tokenize(input)
-        return self.rv
-
-    def t_id(self, s):
-        r"[\w\.]+"
-        # XXX doesn't distinguish upper vs. lower, which is
-        # significant for ASDL.
-        self.rv.append(Id(s, self.lineno))
-
-    def t_string(self, s):
-        r'"[^"]*"'
-        self.rv.append(String(s, self.lineno))
-
-    def t_xxx(self, s): # not sure what this production means
-        r"<="
-        self.rv.append(Token(s, self.lineno))
-
-    def t_punctuation(self, s):
-        r"[\{\}\*\=\|\(\)\,\?\:]"
-        self.rv.append(Token(s, self.lineno))
-
-    def t_comment(self, s):
-        r"\-\-[^\n]*"
-        pass
-
-    def t_newline(self, s):
-        r"\n"
-        self.lineno += 1
-
-    def t_whitespace(self, s):
-        r"[ \t]+"
-        pass
-
-    def t_default(self, s):
-        r" . +"
-        raise ValueError("unmatched input: %r" % s)
-
-class ASDLParser(spark.GenericParser, object):
-    def __init__(self):
-        super(ASDLParser, self).__init__("module")
-
-    def typestring(self, tok):
-        return tok.type
-
-    def error(self, tok):
-        raise ASDLSyntaxError(tok.lineno, tok)
-
-    def p_module_0(self, info):
-        " module ::= Id Id { } "
-        module, name, _0, _1 = info
-        if module.value != "module":
-            raise ASDLSyntaxError(module.lineno,
-                                  msg="expected 'module', found %s" % module)
-        return Module(name, None)
-
-    def p_module(self, info):
-        " module ::= Id Id { definitions } "
-        module, name, _0, definitions, _1 = info
-        if module.value != "module":
-            raise ASDLSyntaxError(module.lineno,
-                                  msg="expected 'module', found %s" % module)
-        return Module(name, definitions)
-
-    def p_definition_0(self, definition):
-        " definitions ::= definition "
-        return definition[0]
-
-    def p_definition_1(self, definitions):
-        " definitions ::= definition definitions "
-        return definitions[0] + definitions[1]
-
-    def p_definition(self, info):
-        " definition ::= Id = type "
-        id, _, type = info
-        return [Type(id, type)]
-
-    def p_type_0(self, product):
-        " type ::= product "
-        return product[0]
-
-    def p_type_1(self, sum):
-        " type ::= sum "
-        return Sum(sum[0])
-
-    def p_type_2(self, info):
-        " type ::= sum Id ( fields ) "
-        sum, id, _0, attributes, _1 = info
-        if id.value != "attributes":
-            raise ASDLSyntaxError(id.lineno,
-                                  msg="expected attributes, found %s" % id)
-        return Sum(sum, attributes)
-
-    def p_product_0(self, info):
-        " product ::= ( fields ) "
-        _0, fields, _1 = info
-        return Product(fields)
-
-    def p_product_1(self, info):
-        " product ::= ( fields ) Id ( fields ) "
-        _0, fields, _1, id, _2, attributes, _3 = info
-        if id.value != "attributes":
-            raise ASDLSyntaxError(id.lineno,
-                                  msg="expected attributes, found %s" % id)
-        return Product(fields, attributes)
-
-    def p_sum_0(self, constructor):
-        " sum ::= constructor "
-        return [constructor[0]]
-
-    def p_sum_1(self, info):
-        " sum ::= constructor | sum "
-        constructor, _, sum = info
-        return [constructor] + sum
-
-    def p_sum_2(self, info):
-        " sum ::= constructor | sum "
-        constructor, _, sum = info
-        return [constructor] + sum
-
-    def p_constructor_0(self, id):
-        " constructor ::= Id "
-        return Constructor(id[0])
-
-    def p_constructor_1(self, info):
-        " constructor ::= Id ( fields ) "
-        id, _0, fields, _1 = info
-        return Constructor(id, fields)
-
-    def p_fields_0(self, field):
-        " fields ::= field "
-        return [field[0]]
-
-    def p_fields_1(self, info):
-        " fields ::= fields , field "
-        fields, _, field = info
-        return fields + [field]
-
-    def p_field_0(self, type_):
-        " field ::= Id "
-        return Field(type_[0])
-
-    def p_field_1(self, info):
-        " field ::= Id Id "
-        type, name = info
-        return Field(type, name)
-
-    def p_field_2(self, info):
-        " field ::= Id * Id "
-        type, _, name = info
-        return Field(type, name, seq=True)
-
-    def p_field_3(self, info):
-        " field ::= Id ? Id "
-        type, _, name = info
-        return Field(type, name, opt=True)
-
-    def p_field_4(self, type_):
-        " field ::= Id * "
-        return Field(type_[0], seq=True)
-
-    def p_field_5(self, type_):
-        " field ::= Id ? "
-        return Field(type[0], opt=True)
-
-builtin_types = ("identifier", "string", "bytes", "int", "object", "singleton")
-
-# below is a collection of classes to capture the AST of an AST :-)
-# not sure if any of the methods are useful yet, but I'm adding them
-# piecemeal as they seem helpful
-
-class AST(object):
-    pass # a marker class
+        raise NotImplementedError
 
 class Module(AST):
     def __init__(self, name, dfns):
         self.name = name
         self.dfns = dfns
-        self.types = {} # maps type name to value (from dfns)
-        for type in dfns:
-            self.types[type.name.value] = type.value
+        self.types = {type.name: type.value for type in dfns}
 
     def __repr__(self):
-        return "Module(%s, %s)" % (self.name, self.dfns)
+        return 'Module({0.name}, {0.dfns})'.format(self)
 
 class Type(AST):
     def __init__(self, name, value):
@@ -257,7 +55,7 @@
         self.value = value
 
     def __repr__(self):
-        return "Type(%s, %s)" % (self.name, self.value)
+        return 'Type({0.name}, {0.value})'.format(self)
 
 class Constructor(AST):
     def __init__(self, name, fields=None):
@@ -265,7 +63,7 @@
         self.fields = fields or []
 
     def __repr__(self):
-        return "Constructor(%s, %s)" % (self.name, self.fields)
+        return 'Constructor({0.name}, {0.fields})'.format(self)
 
 class Field(AST):
     def __init__(self, type, name=None, seq=False, opt=False):
@@ -282,9 +80,9 @@
         else:
             extra = ""
         if self.name is None:
-            return "Field(%s%s)" % (self.type, extra)
+            return 'Field({0.type}{1})'.format(self, extra)
         else:
-            return "Field(%s, %s%s)" % (self.type, self.name, extra)
+            return 'Field({0.type}, {0.name}{1})'.format(self, extra)
 
 class Sum(AST):
     def __init__(self, types, attributes=None):
@@ -292,10 +90,10 @@
         self.attributes = attributes or []
 
     def __repr__(self):
-        if self.attributes is None:
-            return "Sum(%s)" % self.types
+        if self.attributes:
+            return 'Sum({0.types}, {0.attributes})'.format(self)
         else:
-            return "Sum(%s, %s)" % (self.types, self.attributes)
+            return 'Sum({0.types})'.format(self)
 
 class Product(AST):
     def __init__(self, fields, attributes=None):
@@ -303,49 +101,43 @@
         self.attributes = attributes or []
 
     def __repr__(self):
-        if self.attributes is None:
-            return "Product(%s)" % self.fields
+        if self.attributes:
+            return 'Product({0.fields}, {0.attributes})'.format(self)
         else:
-            return "Product(%s, %s)" % (self.fields, self.attributes)
+            return 'Product({0.fields})'.format(self)
 
-class VisitorBase(object):
+# A generic visitor for the meta-AST that describes ASDL. This can be used by
+# emitters. Note that this visitor does not provide a generic visit method, so a
+# subclass needs to define visit methods from visitModule to as deep as the
+# interesting node.
+# We also define a Check visitor that makes sure the parsed ASDL is well-formed.
 
-    def __init__(self, skip=False):
+class VisitorBase:
+    """Generic tree visitor for ASTs."""
+    def __init__(self):
         self.cache = {}
-        self.skip = skip
 
-    def visit(self, object, *args):
-        meth = self._dispatch(object)
-        if meth is None:
-            return
-        try:
-            meth(object, *args)
-        except Exception:
-            output("Error visiting" + repr(object))
-            output(str(sys.exc_info()[1]))
-            traceback.print_exc()
-            # XXX hack
-            if hasattr(self, 'file'):
-                self.file.flush()
-            os._exit(1)
-
-    def _dispatch(self, object):
-        assert isinstance(object, AST), repr(object)
-        klass = object.__class__
+    def visit(self, obj, *args):
+        klass = obj.__class__
         meth = self.cache.get(klass)
         if meth is None:
             methname = "visit" + klass.__name__
-            if self.skip:
-                meth = getattr(self, methname, None)
-            else:
-                meth = getattr(self, methname)
+            meth = getattr(self, methname, None)
             self.cache[klass] = meth
-        return meth
+        if meth:
+            try:
+                meth(obj, *args)
+            except Exception as e:
+                print("Error visiting %r: %s" % (obj, e))
+                raise
 
 class Check(VisitorBase):
+    """A visitor that checks a parsed ASDL tree for correctness.
 
+    Errors are printed and accumulated.
+    """
     def __init__(self):
-        super(Check, self).__init__(skip=True)
+        super().__init__()
         self.cons = {}
         self.errors = 0
         self.types = {}
@@ -367,8 +159,8 @@
         if conflict is None:
             self.cons[key] = name
         else:
-            output("Redefinition of constructor %s" % key)
-            output("Defined in %s and %s" % (conflict, name))
+            print('Redefinition of constructor {}'.format(key))
+            print('Defined in {} and {}'.format(conflict, name))
             self.errors += 1
         for f in cons.fields:
             self.visit(f, key)
@@ -383,6 +175,11 @@
             self.visit(f, name)
 
 def check(mod):
+    """Check the parsed ASDL tree for correctness.
+
+    Return True if success. For failure, the errors are printed out and False
+    is returned.
+    """
     v = Check()
     v.visit(mod)
 
@@ -390,47 +187,190 @@
         if t not in mod.types and not t in builtin_types:
             v.errors += 1
             uses = ", ".join(v.types[t])
-            output("Undefined type %s, used in %s" % (t, uses))
-
+            print('Undefined type {}, used in {}'.format(t, uses))
     return not v.errors
 
-def parse(file):
-    scanner = ASDLScanner()
-    parser = ASDLParser()
+# The ASDL parser itself comes next. The only interesting external interface
+# here is the top-level parse function.
 
-    f = open(file)
-    try:
-        buf = f.read()
-    finally:
-        f.close()
-    tokens = scanner.tokenize(buf)
-    try:
-        return parser.parse(tokens)
-    except ASDLSyntaxError:
-        err = sys.exc_info()[1]
-        output(str(err))
-        lines = buf.split("\n")
-        output(lines[err.lineno - 1]) # lines starts at 0, files at 1
+def parse(filename):
+    """Parse ASDL from the given file and return a Module node describing it."""
+    with open(filename) as f:
+        parser = ASDLParser()
+        return parser.parse(f.read())
 
-if __name__ == "__main__":
-    import glob
-    import sys
+# Types for describing tokens in an ASDL specification.
+class TokenKind:
+    """TokenKind is provides a scope for enumerated token kinds."""
+    (ConstructorId, TypeId, Equals, Comma, Question, Pipe, Asterisk,
+     LParen, RParen, LBrace, RBrace) = range(11)
 
-    if len(sys.argv) > 1:
-        files = sys.argv[1:]
-    else:
-        testdir = "tests"
-        files = glob.glob(testdir + "/*.asdl")
+    operator_table = {
+        '=': Equals, ',': Comma,    '?': Question, '|': Pipe,    '(': LParen,
+        ')': RParen, '*': Asterisk, '{': LBrace,   '}': RBrace}
 
-    for file in files:
-        output(file)
-        mod = parse(file)
-        if not mod:
-            break
-        output("module", mod.name)
-        output(len(mod.dfns), "definitions")
-        if not check(mod):
-            output("Check failed")
+Token = namedtuple('Token', 'kind value lineno')
+
+class ASDLSyntaxError(Exception):
+    def __init__(self, msg, lineno=None):
+        self.msg = msg
+        self.lineno = lineno or '<unknown>'
+
+    def __str__(self):
+        return 'Syntax error on line {0.lineno}: {0.msg}'.format(self)
+
+def tokenize_asdl(buf):
+    """Tokenize the given buffer. Yield Token objects."""
+    for lineno, line in enumerate(buf.splitlines(), 1):
+        for m in re.finditer(r'\s*(\w+|--.*|.)', line.strip()):
+            c = m.group(1)
+            if c[0].isalpha():
+                # Some kind of identifier
+                if c[0].isupper():
+                    yield Token(TokenKind.ConstructorId, c, lineno)
+                else:
+                    yield Token(TokenKind.TypeId, c, lineno)
+            elif c[:2] == '--':
+                # Comment
+                break
+            else:
+                # Operators
+                try:
+                    op_kind = TokenKind.operator_table[c]
+                except KeyError:
+                    raise ASDLSyntaxError('Invalid operator %s' % c, lineno)
+                yield Token(op_kind, c, lineno)
+
+class ASDLParser:
+    """Parser for ASDL files.
+
+    Create, then call the parse method on a buffer containing ASDL.
+    This is a simple recursive descent parser that uses tokenize_asdl for the
+    lexing.
+    """
+    def __init__(self):
+        self._tokenizer = None
+        self.cur_token = None
+
+    def parse(self, buf):
+        """Parse the ASDL in the buffer and return an AST with a Module root.
+        """
+        self._tokenizer = tokenize_asdl(buf)
+        self._advance()
+        return self._parse_module()
+
+    def _parse_module(self):
+        if self._at_keyword('module'):
+            self._advance()
         else:
-            for dfn in mod.dfns:
-                output(dfn.name, dfn.value)
+            raise ASDLSyntaxError(
+                'Expected "module" (found {})'.format(self.cur_token.value),
+                self.cur_token.lineno)
+        name = self._match(self._id_kinds)
+        self._match(TokenKind.LBrace)
+        defs = self._parse_definitions()
+        self._match(TokenKind.RBrace)
+        return Module(name, defs)
+
+    def _parse_definitions(self):
+        defs = []
+        while self.cur_token.kind == TokenKind.TypeId:
+            typename = self._advance()
+            self._match(TokenKind.Equals)
+            type = self._parse_type()
+            defs.append(Type(typename, type))
+        return defs
+
+    def _parse_type(self):
+        if self.cur_token.kind == TokenKind.LParen:
+            # If we see a (, it's a product
+            return self._parse_product()
+        else:
+            # Otherwise it's a sum. Look for ConstructorId
+            sumlist = [Constructor(self._match(TokenKind.ConstructorId),
+                                   self._parse_optional_fields())]
+            while self.cur_token.kind  == TokenKind.Pipe:
+                # More constructors
+                self._advance()
+                sumlist.append(Constructor(
+                                self._match(TokenKind.ConstructorId),
+                                self._parse_optional_fields()))
+            return Sum(sumlist, self._parse_optional_attributes())
+
+    def _parse_product(self):
+        return Product(self._parse_fields(), self._parse_optional_attributes())
+
+    def _parse_fields(self):
+        fields = []
+        self._match(TokenKind.LParen)
+        while self.cur_token.kind == TokenKind.TypeId:
+            typename = self._advance()
+            is_seq, is_opt = self._parse_optional_field_quantifier()
+            id = (self._advance() if self.cur_token.kind in self._id_kinds
+                                  else None)
+            fields.append(Field(typename, id, seq=is_seq, opt=is_opt))
+            if self.cur_token.kind == TokenKind.RParen:
+                break
+            elif self.cur_token.kind == TokenKind.Comma:
+                self._advance()
+        self._match(TokenKind.RParen)
+        return fields
+
+    def _parse_optional_fields(self):
+        if self.cur_token.kind == TokenKind.LParen:
+            return self._parse_fields()
+        else:
+            return None
+
+    def _parse_optional_attributes(self):
+        if self._at_keyword('attributes'):
+            self._advance()
+            return self._parse_fields()
+        else:
+            return None
+
+    def _parse_optional_field_quantifier(self):
+        is_seq, is_opt = False, False
+        if self.cur_token.kind == TokenKind.Asterisk:
+            is_seq = True
+            self._advance()
+        elif self.cur_token.kind == TokenKind.Question:
+            is_opt = True
+            self._advance()
+        return is_seq, is_opt
+
+    def _advance(self):
+        """ Return the value of the current token and read the next one into
+            self.cur_token.
+        """
+        cur_val = None if self.cur_token is None else self.cur_token.value
+        try:
+            self.cur_token = next(self._tokenizer)
+        except StopIteration:
+            self.cur_token = None
+        return cur_val
+
+    _id_kinds = (TokenKind.ConstructorId, TokenKind.TypeId)
+
+    def _match(self, kind):
+        """The 'match' primitive of RD parsers.
+
+        * Verifies that the current token is of the given kind (kind can
+          be a tuple, in which the kind must match one of its members).
+        * Returns the value of the current token
+        * Reads in the next token
+        """
+        if (isinstance(kind, tuple) and self.cur_token.kind in kind or
+            self.cur_token.kind == kind
+            ):
+            value = self.cur_token.value
+            self._advance()
+            return value
+        else:
+            raise ASDLSyntaxError(
+                'Unmatched {} (found {})'.format(kind, self.cur_token.kind),
+                self.cur_token.lineno)
+
+    def _at_keyword(self, keyword):
+        return (self.cur_token.kind == TokenKind.TypeId and
+                self.cur_token.value == keyword)
diff --git a/Parser/asdl_c.py b/Parser/asdl_c.py
index d6086e6..a5e35d9 100755
--- a/Parser/asdl_c.py
+++ b/Parser/asdl_c.py
@@ -1,9 +1,6 @@
 #! /usr/bin/env python
 """Generate C code from an ASDL description."""
 
-# TO DO
-# handle fields that have a type but no name
-
 import os, sys
 
 import asdl
@@ -14,12 +11,8 @@
 def get_c_type(name):
     """Return a string for the C name of the type.
 
-    This function special cases the default types provided by asdl:
-    identifier, string, int.
+    This function special cases the default types provided by asdl.
     """
-    # XXX ack!  need to figure out where Id is useful and where string
-    if isinstance(name, asdl.Id):
-        name = name.value
     if name in asdl.builtin_types:
         return name
     else:
@@ -144,7 +137,7 @@
 
 
 class StructVisitor(EmitVisitor):
-    """Visitor to generate typdefs for AST."""
+    """Visitor to generate typedefs for AST."""
 
     def visitModule(self, mod):
         for dfn in mod.dfns:
@@ -188,9 +181,6 @@
                 self.visit(f, depth + 1)
             self.emit("} %s;" % cons.name, depth)
             self.emit("", depth)
-        else:
-            # XXX not sure what I want here, nothing is probably fine
-            pass
 
     def visitField(self, field, depth):
         # XXX need to lookup field.type, because it might be something
@@ -198,7 +188,7 @@
         ctype = get_c_type(field.type)
         name = field.name
         if field.seq:
-            if field.type.value in ('cmpop',):
+            if field.type == 'cmpop':
                 self.emit("asdl_int_seq *%(name)s;" % locals(), depth)
             else:
                 self.emit("asdl_seq *%(name)s;" % locals(), depth)
@@ -253,7 +243,7 @@
                 name = f.name
             # XXX should extend get_c_type() to handle this
             if f.seq:
-                if f.type.value in ('cmpop',):
+                if f.type == 'cmpop':
                     ctype = "asdl_int_seq *"
                 else:
                     ctype = "asdl_seq *"
@@ -437,7 +427,7 @@
             self.emit("", 0)
             for f in t.fields:
                 self.visitField(f, t.name, sum=sum, depth=2)
-            args = [f.name.value for f in t.fields] + [a.name.value for a in sum.attributes]
+            args = [f.name for f in t.fields] + [a.name for a in sum.attributes]
             self.emit("*out = %s(%s);" % (t.name, self.buildArgs(args)), 2)
             self.emit("if (*out == NULL) goto failed;", 2)
             self.emit("return 0;", 2)
@@ -465,7 +455,7 @@
         self.emit("", 0)
         for f in prod.fields:
             self.visitField(f, name, prod=prod, depth=1)
-        args = [f.name.value for f in prod.fields]
+        args = [f.name for f in prod.fields]
         self.emit("*out = %s(%s);" % (name, self.buildArgs(args)), 1)
         self.emit("return 0;", 1)
         self.emit("failed:", 0)
@@ -487,8 +477,8 @@
 
     def isSimpleSum(self, field):
         # XXX can the members of this list be determined automatically?
-        return field.type.value in ('expr_context', 'boolop', 'operator',
-                                    'unaryop', 'cmpop')
+        return field.type in ('expr_context', 'boolop', 'operator',
+                              'unaryop', 'cmpop')
 
     def isNumeric(self, field):
         return get_c_type(field.type) in ("int", "bool")
@@ -960,7 +950,7 @@
 
     def visitProduct(self, prod, name):
         if prod.fields:
-            fields = name.value+"_fields"
+            fields = name+"_fields"
         else:
             fields = "NULL"
         self.emit('%s_type = make_type("%s", &AST_type, %s, %d);' %
@@ -987,7 +977,7 @@
 
     def visitConstructor(self, cons, name, simple):
         if cons.fields:
-            fields = cons.name.value+"_fields"
+            fields = cons.name+"_fields"
         else:
             fields = "NULL"
         self.emit('%s_type = make_type("%s", %s_type, %s, %d);' %
@@ -1170,7 +1160,7 @@
     def set(self, field, value, depth):
         if field.seq:
             # XXX should really check for is_simple, but that requires a symbol table
-            if field.type.value == "cmpop":
+            if field.type == "cmpop":
                 # While the sequence elements are stored as void*,
                 # ast2obj_cmpop expects an enum
                 self.emit("{", depth)
@@ -1249,12 +1239,15 @@
 
 common_msg = "/* File automatically generated by %s. */\n\n"
 
-def main(srcfile):
+def main(srcfile, dump_module=False):
     argv0 = sys.argv[0]
     components = argv0.split(os.sep)
     argv0 = os.sep.join(components[-2:])
     auto_gen_msg = common_msg % argv0
     mod = asdl.parse(srcfile)
+    if dump_module:
+        print('Parsed Module:')
+        print(mod)
     if not asdl.check(mod):
         sys.exit(1)
     if INC_DIR:
@@ -1301,16 +1294,19 @@
 
     INC_DIR = ''
     SRC_DIR = ''
-    opts, args = getopt.getopt(sys.argv[1:], "h:c:")
-    if len(opts) != 1:
-        sys.stdout.write("Must specify exactly one output file\n")
-        sys.exit(1)
+    dump_module = False
+    opts, args = getopt.getopt(sys.argv[1:], "dh:c:")
     for o, v in opts:
         if o == '-h':
             INC_DIR = v
         if o == '-c':
             SRC_DIR = v
-    if len(args) != 1:
-        sys.stdout.write("Must specify single input file\n")
+        if o == '-d':
+            dump_module = True
+    if INC_DIR and SRC_DIR:
+        print('Must specify exactly one output file')
         sys.exit(1)
-    main(args[0])
+    elif len(args) != 1:
+        print('Must specify single input file')
+        sys.exit(1)
+    main(args[0], dump_module)
diff --git a/Parser/spark.py b/Parser/spark.py
deleted file mode 100644
index 88c1a89..0000000
--- a/Parser/spark.py
+++ /dev/null
@@ -1,849 +0,0 @@
-#  Copyright (c) 1998-2002 John Aycock
-#
-#  Permission is hereby granted, free of charge, to any person obtaining
-#  a copy of this software and associated documentation files (the
-#  "Software"), to deal in the Software without restriction, including
-#  without limitation the rights to use, copy, modify, merge, publish,
-#  distribute, sublicense, and/or sell copies of the Software, and to
-#  permit persons to whom the Software is furnished to do so, subject to
-#  the following conditions:
-#
-#  The above copyright notice and this permission notice shall be
-#  included in all copies or substantial portions of the Software.
-#
-#  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-#  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-#  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-#  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-#  CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-#  TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-#  SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-__version__ = 'SPARK-0.7 (pre-alpha-5)'
-
-import re
-
-# Compatibility with older pythons.
-def output(string='', end='\n'):
-    sys.stdout.write(string + end)
-
-try:
-    sorted
-except NameError:
-    def sorted(seq):
-        seq2 = seq[:]
-        seq2.sort()
-        return seq2
-
-def _namelist(instance):
-    namelist, namedict, classlist = [], {}, [instance.__class__]
-    for c in classlist:
-        for b in c.__bases__:
-            classlist.append(b)
-        for name in c.__dict__.keys():
-            if name not in namedict:
-                namelist.append(name)
-                namedict[name] = 1
-    return namelist
-
-class GenericScanner:
-    def __init__(self, flags=0):
-        pattern = self.reflect()
-        self.re = re.compile(pattern, re.VERBOSE|flags)
-
-        self.index2func = {}
-        for name, number in self.re.groupindex.items():
-            self.index2func[number-1] = getattr(self, 't_' + name)
-
-    def makeRE(self, name):
-        doc = getattr(self, name).__doc__
-        rv = '(?P<%s>%s)' % (name[2:], doc)
-        return rv
-
-    def reflect(self):
-        rv = []
-        for name in _namelist(self):
-            if name[:2] == 't_' and name != 't_default':
-                rv.append(self.makeRE(name))
-
-        rv.append(self.makeRE('t_default'))
-        return '|'.join(rv)
-
-    def error(self, s, pos):
-        output("Lexical error at position %s" % pos)
-        raise SystemExit
-
-    def tokenize(self, s):
-        pos = 0
-        n = len(s)
-        while pos < n:
-            m = self.re.match(s, pos)
-            if m is None:
-                self.error(s, pos)
-
-            groups = m.groups()
-            for i in range(len(groups)):
-                if groups[i] and i in self.index2func:
-                    self.index2func[i](groups[i])
-            pos = m.end()
-
-    def t_default(self, s):
-        r'( . | \n )+'
-        output("Specification error: unmatched input")
-        raise SystemExit
-
-#
-#  Extracted from GenericParser and made global so that [un]picking works.
-#
-class _State:
-    def __init__(self, stateno, items):
-        self.T, self.complete, self.items = [], [], items
-        self.stateno = stateno
-
-class GenericParser:
-    #
-    #  An Earley parser, as per J. Earley, "An Efficient Context-Free
-    #  Parsing Algorithm", CACM 13(2), pp. 94-102.  Also J. C. Earley,
-    #  "An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
-    #  Carnegie-Mellon University, August 1968.  New formulation of
-    #  the parser according to J. Aycock, "Practical Earley Parsing
-    #  and the SPARK Toolkit", Ph.D. thesis, University of Victoria,
-    #  2001, and J. Aycock and R. N. Horspool, "Practical Earley
-    #  Parsing", unpublished paper, 2001.
-    #
-
-    def __init__(self, start):
-        self.rules = {}
-        self.rule2func = {}
-        self.rule2name = {}
-        self.collectRules()
-        self.augment(start)
-        self.ruleschanged = 1
-
-    _NULLABLE = '\e_'
-    _START = 'START'
-    _BOF = '|-'
-
-    #
-    #  When pickling, take the time to generate the full state machine;
-    #  some information is then extraneous, too.  Unfortunately we
-    #  can't save the rule2func map.
-    #
-    def __getstate__(self):
-        if self.ruleschanged:
-            #
-            #  XXX - duplicated from parse()
-            #
-            self.computeNull()
-            self.newrules = {}
-            self.new2old = {}
-            self.makeNewRules()
-            self.ruleschanged = 0
-            self.edges, self.cores = {}, {}
-            self.states = { 0: self.makeState0() }
-            self.makeState(0, self._BOF)
-        #
-        #  XXX - should find a better way to do this..
-        #
-        changes = 1
-        while changes:
-            changes = 0
-            for k, v in self.edges.items():
-                if v is None:
-                    state, sym = k
-                    if state in self.states:
-                        self.goto(state, sym)
-                        changes = 1
-        rv = self.__dict__.copy()
-        for s in self.states.values():
-            del s.items
-        del rv['rule2func']
-        del rv['nullable']
-        del rv['cores']
-        return rv
-
-    def __setstate__(self, D):
-        self.rules = {}
-        self.rule2func = {}
-        self.rule2name = {}
-        self.collectRules()
-        start = D['rules'][self._START][0][1][1]        # Blech.
-        self.augment(start)
-        D['rule2func'] = self.rule2func
-        D['makeSet'] = self.makeSet_fast
-        self.__dict__ = D
-
-    #
-    #  A hook for GenericASTBuilder and GenericASTMatcher.  Mess
-    #  thee not with this; nor shall thee toucheth the _preprocess
-    #  argument to addRule.
-    #
-    def preprocess(self, rule, func):       return rule, func
-
-    def addRule(self, doc, func, _preprocess=1):
-        fn = func
-        rules = doc.split()
-
-        index = []
-        for i in range(len(rules)):
-            if rules[i] == '::=':
-                index.append(i-1)
-        index.append(len(rules))
-
-        for i in range(len(index)-1):
-            lhs = rules[index[i]]
-            rhs = rules[index[i]+2:index[i+1]]
-            rule = (lhs, tuple(rhs))
-
-            if _preprocess:
-                rule, fn = self.preprocess(rule, func)
-
-            if lhs in self.rules:
-                self.rules[lhs].append(rule)
-            else:
-                self.rules[lhs] = [ rule ]
-            self.rule2func[rule] = fn
-            self.rule2name[rule] = func.__name__[2:]
-        self.ruleschanged = 1
-
-    def collectRules(self):
-        for name in _namelist(self):
-            if name[:2] == 'p_':
-                func = getattr(self, name)
-                doc = func.__doc__
-                self.addRule(doc, func)
-
-    def augment(self, start):
-        rule = '%s ::= %s %s' % (self._START, self._BOF, start)
-        self.addRule(rule, lambda args: args[1], 0)
-
-    def computeNull(self):
-        self.nullable = {}
-        tbd = []
-
-        for rulelist in self.rules.values():
-            lhs = rulelist[0][0]
-            self.nullable[lhs] = 0
-            for rule in rulelist:
-                rhs = rule[1]
-                if len(rhs) == 0:
-                    self.nullable[lhs] = 1
-                    continue
-                #
-                #  We only need to consider rules which
-                #  consist entirely of nonterminal symbols.
-                #  This should be a savings on typical
-                #  grammars.
-                #
-                for sym in rhs:
-                    if sym not in self.rules:
-                        break
-                else:
-                    tbd.append(rule)
-        changes = 1
-        while changes:
-            changes = 0
-            for lhs, rhs in tbd:
-                if self.nullable[lhs]:
-                    continue
-                for sym in rhs:
-                    if not self.nullable[sym]:
-                        break
-                else:
-                    self.nullable[lhs] = 1
-                    changes = 1
-
-    def makeState0(self):
-        s0 = _State(0, [])
-        for rule in self.newrules[self._START]:
-            s0.items.append((rule, 0))
-        return s0
-
-    def finalState(self, tokens):
-        #
-        #  Yuck.
-        #
-        if len(self.newrules[self._START]) == 2 and len(tokens) == 0:
-            return 1
-        start = self.rules[self._START][0][1][1]
-        return self.goto(1, start)
-
-    def makeNewRules(self):
-        worklist = []
-        for rulelist in self.rules.values():
-            for rule in rulelist:
-                worklist.append((rule, 0, 1, rule))
-
-        for rule, i, candidate, oldrule in worklist:
-            lhs, rhs = rule
-            n = len(rhs)
-            while i < n:
-                sym = rhs[i]
-                if sym not in self.rules or \
-                   not self.nullable[sym]:
-                    candidate = 0
-                    i = i + 1
-                    continue
-
-                newrhs = list(rhs)
-                newrhs[i] = self._NULLABLE+sym
-                newrule = (lhs, tuple(newrhs))
-                worklist.append((newrule, i+1,
-                                 candidate, oldrule))
-                candidate = 0
-                i = i + 1
-            else:
-                if candidate:
-                    lhs = self._NULLABLE+lhs
-                    rule = (lhs, rhs)
-                if lhs in self.newrules:
-                    self.newrules[lhs].append(rule)
-                else:
-                    self.newrules[lhs] = [ rule ]
-                self.new2old[rule] = oldrule
-
-    def typestring(self, token):
-        return None
-
-    def error(self, token):
-        output("Syntax error at or near `%s' token" % token)
-        raise SystemExit
-
-    def parse(self, tokens):
-        sets = [ [(1,0), (2,0)] ]
-        self.links = {}
-
-        if self.ruleschanged:
-            self.computeNull()
-            self.newrules = {}
-            self.new2old = {}
-            self.makeNewRules()
-            self.ruleschanged = 0
-            self.edges, self.cores = {}, {}
-            self.states = { 0: self.makeState0() }
-            self.makeState(0, self._BOF)
-
-        for i in range(len(tokens)):
-            sets.append([])
-
-            if sets[i] == []:
-                break
-            self.makeSet(tokens[i], sets, i)
-        else:
-            sets.append([])
-            self.makeSet(None, sets, len(tokens))
-
-        #_dump(tokens, sets, self.states)
-
-        finalitem = (self.finalState(tokens), 0)
-        if finalitem not in sets[-2]:
-            if len(tokens) > 0:
-                self.error(tokens[i-1])
-            else:
-                self.error(None)
-
-        return self.buildTree(self._START, finalitem,
-                              tokens, len(sets)-2)
-
-    def isnullable(self, sym):
-        #
-        #  For symbols in G_e only.  If we weren't supporting 1.5,
-        #  could just use sym.startswith().
-        #
-        return self._NULLABLE == sym[0:len(self._NULLABLE)]
-
-    def skip(self, hs, pos=0):
-        n = len(hs[1])
-        while pos < n:
-            if not self.isnullable(hs[1][pos]):
-                break
-            pos = pos + 1
-        return pos
-
-    def makeState(self, state, sym):
-        assert sym is not None
-        #
-        #  Compute \epsilon-kernel state's core and see if
-        #  it exists already.
-        #
-        kitems = []
-        for rule, pos in self.states[state].items:
-            lhs, rhs = rule
-            if rhs[pos:pos+1] == (sym,):
-                kitems.append((rule, self.skip(rule, pos+1)))
-        core = kitems
-
-        core.sort()
-        tcore = tuple(core)
-        if tcore in self.cores:
-            return self.cores[tcore]
-        #
-        #  Nope, doesn't exist.  Compute it and the associated
-        #  \epsilon-nonkernel state together; we'll need it right away.
-        #
-        k = self.cores[tcore] = len(self.states)
-        K, NK = _State(k, kitems), _State(k+1, [])
-        self.states[k] = K
-        predicted = {}
-
-        edges = self.edges
-        rules = self.newrules
-        for X in K, NK:
-            worklist = X.items
-            for item in worklist:
-                rule, pos = item
-                lhs, rhs = rule
-                if pos == len(rhs):
-                    X.complete.append(rule)
-                    continue
-
-                nextSym = rhs[pos]
-                key = (X.stateno, nextSym)
-                if nextSym not in rules:
-                    if key not in edges:
-                        edges[key] = None
-                        X.T.append(nextSym)
-                else:
-                    edges[key] = None
-                    if nextSym not in predicted:
-                        predicted[nextSym] = 1
-                        for prule in rules[nextSym]:
-                            ppos = self.skip(prule)
-                            new = (prule, ppos)
-                            NK.items.append(new)
-            #
-            #  Problem: we know K needs generating, but we
-            #  don't yet know about NK.  Can't commit anything
-            #  regarding NK to self.edges until we're sure.  Should
-            #  we delay committing on both K and NK to avoid this
-            #  hacky code?  This creates other problems..
-            #
-            if X is K:
-                edges = {}
-
-        if NK.items == []:
-            return k
-
-        #
-        #  Check for \epsilon-nonkernel's core.  Unfortunately we
-        #  need to know the entire set of predicted nonterminals
-        #  to do this without accidentally duplicating states.
-        #
-        core = sorted(predicted.keys())
-        tcore = tuple(core)
-        if tcore in self.cores:
-            self.edges[(k, None)] = self.cores[tcore]
-            return k
-
-        nk = self.cores[tcore] = self.edges[(k, None)] = NK.stateno
-        self.edges.update(edges)
-        self.states[nk] = NK
-        return k
-
-    def goto(self, state, sym):
-        key = (state, sym)
-        if key not in self.edges:
-            #
-            #  No transitions from state on sym.
-            #
-            return None
-
-        rv = self.edges[key]
-        if rv is None:
-            #
-            #  Target state isn't generated yet.  Remedy this.
-            #
-            rv = self.makeState(state, sym)
-            self.edges[key] = rv
-        return rv
-
-    def gotoT(self, state, t):
-        return [self.goto(state, t)]
-
-    def gotoST(self, state, st):
-        rv = []
-        for t in self.states[state].T:
-            if st == t:
-                rv.append(self.goto(state, t))
-        return rv
-
-    def add(self, set, item, i=None, predecessor=None, causal=None):
-        if predecessor is None:
-            if item not in set:
-                set.append(item)
-        else:
-            key = (item, i)
-            if item not in set:
-                self.links[key] = []
-                set.append(item)
-            self.links[key].append((predecessor, causal))
-
-    def makeSet(self, token, sets, i):
-        cur, next = sets[i], sets[i+1]
-
-        ttype = token is not None and self.typestring(token) or None
-        if ttype is not None:
-            fn, arg = self.gotoT, ttype
-        else:
-            fn, arg = self.gotoST, token
-
-        for item in cur:
-            ptr = (item, i)
-            state, parent = item
-            add = fn(state, arg)
-            for k in add:
-                if k is not None:
-                    self.add(next, (k, parent), i+1, ptr)
-                    nk = self.goto(k, None)
-                    if nk is not None:
-                        self.add(next, (nk, i+1))
-
-            if parent == i:
-                continue
-
-            for rule in self.states[state].complete:
-                lhs, rhs = rule
-                for pitem in sets[parent]:
-                    pstate, pparent = pitem
-                    k = self.goto(pstate, lhs)
-                    if k is not None:
-                        why = (item, i, rule)
-                        pptr = (pitem, parent)
-                        self.add(cur, (k, pparent),
-                                 i, pptr, why)
-                        nk = self.goto(k, None)
-                        if nk is not None:
-                            self.add(cur, (nk, i))
-
-    def makeSet_fast(self, token, sets, i):
-        #
-        #  Call *only* when the entire state machine has been built!
-        #  It relies on self.edges being filled in completely, and
-        #  then duplicates and inlines code to boost speed at the
-        #  cost of extreme ugliness.
-        #
-        cur, next = sets[i], sets[i+1]
-        ttype = token is not None and self.typestring(token) or None
-
-        for item in cur:
-            ptr = (item, i)
-            state, parent = item
-            if ttype is not None:
-                k = self.edges.get((state, ttype), None)
-                if k is not None:
-                    #self.add(next, (k, parent), i+1, ptr)
-                    #INLINED --v
-                    new = (k, parent)
-                    key = (new, i+1)
-                    if new not in next:
-                        self.links[key] = []
-                        next.append(new)
-                    self.links[key].append((ptr, None))
-                    #INLINED --^
-                    #nk = self.goto(k, None)
-                    nk = self.edges.get((k, None), None)
-                    if nk is not None:
-                        #self.add(next, (nk, i+1))
-                        #INLINED --v
-                        new = (nk, i+1)
-                        if new not in next:
-                            next.append(new)
-                        #INLINED --^
-            else:
-                add = self.gotoST(state, token)
-                for k in add:
-                    if k is not None:
-                        self.add(next, (k, parent), i+1, ptr)
-                        #nk = self.goto(k, None)
-                        nk = self.edges.get((k, None), None)
-                        if nk is not None:
-                            self.add(next, (nk, i+1))
-
-            if parent == i:
-                continue
-
-            for rule in self.states[state].complete:
-                lhs, rhs = rule
-                for pitem in sets[parent]:
-                    pstate, pparent = pitem
-                    #k = self.goto(pstate, lhs)
-                    k = self.edges.get((pstate, lhs), None)
-                    if k is not None:
-                        why = (item, i, rule)
-                        pptr = (pitem, parent)
-                        #self.add(cur, (k, pparent),
-                        #        i, pptr, why)
-                        #INLINED --v
-                        new = (k, pparent)
-                        key = (new, i)
-                        if new not in cur:
-                            self.links[key] = []
-                            cur.append(new)
-                        self.links[key].append((pptr, why))
-                        #INLINED --^
-                        #nk = self.goto(k, None)
-                        nk = self.edges.get((k, None), None)
-                        if nk is not None:
-                            #self.add(cur, (nk, i))
-                            #INLINED --v
-                            new = (nk, i)
-                            if new not in cur:
-                                cur.append(new)
-                            #INLINED --^
-
-    def predecessor(self, key, causal):
-        for p, c in self.links[key]:
-            if c == causal:
-                return p
-        assert 0
-
-    def causal(self, key):
-        links = self.links[key]
-        if len(links) == 1:
-            return links[0][1]
-        choices = []
-        rule2cause = {}
-        for p, c in links:
-            rule = c[2]
-            choices.append(rule)
-            rule2cause[rule] = c
-        return rule2cause[self.ambiguity(choices)]
-
-    def deriveEpsilon(self, nt):
-        if len(self.newrules[nt]) > 1:
-            rule = self.ambiguity(self.newrules[nt])
-        else:
-            rule = self.newrules[nt][0]
-        #output(rule)
-
-        rhs = rule[1]
-        attr = [None] * len(rhs)
-
-        for i in range(len(rhs)-1, -1, -1):
-            attr[i] = self.deriveEpsilon(rhs[i])
-        return self.rule2func[self.new2old[rule]](attr)
-
-    def buildTree(self, nt, item, tokens, k):
-        state, parent = item
-
-        choices = []
-        for rule in self.states[state].complete:
-            if rule[0] == nt:
-                choices.append(rule)
-        rule = choices[0]
-        if len(choices) > 1:
-            rule = self.ambiguity(choices)
-        #output(rule)
-
-        rhs = rule[1]
-        attr = [None] * len(rhs)
-
-        for i in range(len(rhs)-1, -1, -1):
-            sym = rhs[i]
-            if sym not in self.newrules:
-                if sym != self._BOF:
-                    attr[i] = tokens[k-1]
-                    key = (item, k)
-                    item, k = self.predecessor(key, None)
-            #elif self.isnullable(sym):
-            elif self._NULLABLE == sym[0:len(self._NULLABLE)]:
-                attr[i] = self.deriveEpsilon(sym)
-            else:
-                key = (item, k)
-                why = self.causal(key)
-                attr[i] = self.buildTree(sym, why[0],
-                                         tokens, why[1])
-                item, k = self.predecessor(key, why)
-        return self.rule2func[self.new2old[rule]](attr)
-
-    def ambiguity(self, rules):
-        #
-        #  XXX - problem here and in collectRules() if the same rule
-        #        appears in >1 method.  Also undefined results if rules
-        #        causing the ambiguity appear in the same method.
-        #
-        sortlist = []
-        name2index = {}
-        for i in range(len(rules)):
-            lhs, rhs = rule = rules[i]
-            name = self.rule2name[self.new2old[rule]]
-            sortlist.append((len(rhs), name))
-            name2index[name] = i
-        sortlist.sort()
-        list = [b for a, b in sortlist]
-        return rules[name2index[self.resolve(list)]]
-
-    def resolve(self, list):
-        #
-        #  Resolve ambiguity in favor of the shortest RHS.
-        #  Since we walk the tree from the top down, this
-        #  should effectively resolve in favor of a "shift".
-        #
-        return list[0]
-
-#
-#  GenericASTBuilder automagically constructs a concrete/abstract syntax tree
-#  for a given input.  The extra argument is a class (not an instance!)
-#  which supports the "__setslice__" and "__len__" methods.
-#
-#  XXX - silently overrides any user code in methods.
-#
-
-class GenericASTBuilder(GenericParser):
-    def __init__(self, AST, start):
-        GenericParser.__init__(self, start)
-        self.AST = AST
-
-    def preprocess(self, rule, func):
-        rebind = lambda lhs, self=self: \
-                        lambda args, lhs=lhs, self=self: \
-                                self.buildASTNode(args, lhs)
-        lhs, rhs = rule
-        return rule, rebind(lhs)
-
-    def buildASTNode(self, args, lhs):
-        children = []
-        for arg in args:
-            if isinstance(arg, self.AST):
-                children.append(arg)
-            else:
-                children.append(self.terminal(arg))
-        return self.nonterminal(lhs, children)
-
-    def terminal(self, token):      return token
-
-    def nonterminal(self, type, args):
-        rv = self.AST(type)
-        rv[:len(args)] = args
-        return rv
-
-#
-#  GenericASTTraversal is a Visitor pattern according to Design Patterns.  For
-#  each node it attempts to invoke the method n_<node type>, falling
-#  back onto the default() method if the n_* can't be found.  The preorder
-#  traversal also looks for an exit hook named n_<node type>_exit (no default
-#  routine is called if it's not found).  To prematurely halt traversal
-#  of a subtree, call the prune() method -- this only makes sense for a
-#  preorder traversal.  Node type is determined via the typestring() method.
-#
-
-class GenericASTTraversalPruningException:
-    pass
-
-class GenericASTTraversal:
-    def __init__(self, ast):
-        self.ast = ast
-
-    def typestring(self, node):
-        return node.type
-
-    def prune(self):
-        raise GenericASTTraversalPruningException
-
-    def preorder(self, node=None):
-        if node is None:
-            node = self.ast
-
-        try:
-            name = 'n_' + self.typestring(node)
-            if hasattr(self, name):
-                func = getattr(self, name)
-                func(node)
-            else:
-                self.default(node)
-        except GenericASTTraversalPruningException:
-            return
-
-        for kid in node:
-            self.preorder(kid)
-
-        name = name + '_exit'
-        if hasattr(self, name):
-            func = getattr(self, name)
-            func(node)
-
-    def postorder(self, node=None):
-        if node is None:
-            node = self.ast
-
-        for kid in node:
-            self.postorder(kid)
-
-        name = 'n_' + self.typestring(node)
-        if hasattr(self, name):
-            func = getattr(self, name)
-            func(node)
-        else:
-            self.default(node)
-
-
-    def default(self, node):
-        pass
-
-#
-#  GenericASTMatcher.  AST nodes must have "__getitem__" and "__cmp__"
-#  implemented.
-#
-#  XXX - makes assumptions about how GenericParser walks the parse tree.
-#
-
-class GenericASTMatcher(GenericParser):
-    def __init__(self, start, ast):
-        GenericParser.__init__(self, start)
-        self.ast = ast
-
-    def preprocess(self, rule, func):
-        rebind = lambda func, self=self: \
-                        lambda args, func=func, self=self: \
-                                self.foundMatch(args, func)
-        lhs, rhs = rule
-        rhslist = list(rhs)
-        rhslist.reverse()
-
-        return (lhs, tuple(rhslist)), rebind(func)
-
-    def foundMatch(self, args, func):
-        func(args[-1])
-        return args[-1]
-
-    def match_r(self, node):
-        self.input.insert(0, node)
-        children = 0
-
-        for child in node:
-            if children == 0:
-                self.input.insert(0, '(')
-            children = children + 1
-            self.match_r(child)
-
-        if children > 0:
-            self.input.insert(0, ')')
-
-    def match(self, ast=None):
-        if ast is None:
-            ast = self.ast
-        self.input = []
-
-        self.match_r(ast)
-        self.parse(self.input)
-
-    def resolve(self, list):
-        #
-        #  Resolve ambiguity in favor of the longest RHS.
-        #
-        return list[-1]
-
-def _dump(tokens, sets, states):
-    for i in range(len(sets)):
-        output('set %d' % i)
-        for item in sets[i]:
-            output('\t', item)
-            for (lhs, rhs), pos in states[item[0]].items:
-                output('\t\t', lhs, '::=', end='')
-                output(' '.join(rhs[:pos]), end='')
-                output('.', end='')
-                output(' '.join(rhs[pos:]))
-        if i < len(tokens):
-            output()
-            output('token %s' % str(tokens[i]))
-            output()
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index 7283058..6aaa4a9 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -98,6 +98,7 @@
     "DOUBLESLASH",
     "DOUBLESLASHEQUAL",
     "AT",
+    "ATEQUAL",
     "RARROW",
     "ELLIPSIS",
     /* This table must match the #defines in token.h! */
@@ -1131,7 +1132,7 @@
     case '}':           return RBRACE;
     case '^':           return CIRCUMFLEX;
     case '~':           return TILDE;
-    case '@':       return AT;
+    case '@':           return AT;
     default:            return OP;
     }
 }
@@ -1207,6 +1208,11 @@
         case '=':               return CIRCUMFLEXEQUAL;
         }
         break;
+    case '@':
+        switch (c2) {
+        case '=':               return ATEQUAL;
+        }
+        break;
     }
     return OP;
 }
diff --git a/Python/Python-ast.c b/Python/Python-ast.c
index 44fdafc..994e721 100644
--- a/Python/Python-ast.c
+++ b/Python/Python-ast.c
@@ -349,13 +349,14 @@
 static PyTypeObject *Or_type;
 static PyTypeObject *operator_type;
 static PyObject *Add_singleton, *Sub_singleton, *Mult_singleton,
-*Div_singleton, *Mod_singleton, *Pow_singleton, *LShift_singleton,
-*RShift_singleton, *BitOr_singleton, *BitXor_singleton, *BitAnd_singleton,
-*FloorDiv_singleton;
+*MatMult_singleton, *Div_singleton, *Mod_singleton, *Pow_singleton,
+*LShift_singleton, *RShift_singleton, *BitOr_singleton, *BitXor_singleton,
+*BitAnd_singleton, *FloorDiv_singleton;
 static PyObject* ast2obj_operator(operator_ty);
 static PyTypeObject *Add_type;
 static PyTypeObject *Sub_type;
 static PyTypeObject *Mult_type;
+static PyTypeObject *MatMult_type;
 static PyTypeObject *Div_type;
 static PyTypeObject *Mod_type;
 static PyTypeObject *Pow_type;
@@ -970,6 +971,10 @@
     if (!Mult_type) return 0;
     Mult_singleton = PyType_GenericNew(Mult_type, NULL, NULL);
     if (!Mult_singleton) return 0;
+    MatMult_type = make_type("MatMult", operator_type, NULL, 0);
+    if (!MatMult_type) return 0;
+    MatMult_singleton = PyType_GenericNew(MatMult_type, NULL, NULL);
+    if (!MatMult_singleton) return 0;
     Div_type = make_type("Div", operator_type, NULL, 0);
     if (!Div_type) return 0;
     Div_singleton = PyType_GenericNew(Div_type, NULL, NULL);
@@ -3232,6 +3237,9 @@
         case Mult:
             Py_INCREF(Mult_singleton);
             return Mult_singleton;
+        case MatMult:
+            Py_INCREF(MatMult_singleton);
+            return MatMult_singleton;
         case Div:
             Py_INCREF(Div_singleton);
             return Div_singleton;
@@ -6175,6 +6183,14 @@
         *out = Mult;
         return 0;
     }
+    isinstance = PyObject_IsInstance(obj, (PyObject *)MatMult_type);
+    if (isinstance == -1) {
+        return 1;
+    }
+    if (isinstance) {
+        *out = MatMult;
+        return 0;
+    }
     isinstance = PyObject_IsInstance(obj, (PyObject *)Div_type);
     if (isinstance == -1) {
         return 1;
@@ -6956,6 +6972,8 @@
     if (PyDict_SetItemString(d, "Add", (PyObject*)Add_type) < 0) return NULL;
     if (PyDict_SetItemString(d, "Sub", (PyObject*)Sub_type) < 0) return NULL;
     if (PyDict_SetItemString(d, "Mult", (PyObject*)Mult_type) < 0) return NULL;
+    if (PyDict_SetItemString(d, "MatMult", (PyObject*)MatMult_type) < 0) return
+        NULL;
     if (PyDict_SetItemString(d, "Div", (PyObject*)Div_type) < 0) return NULL;
     if (PyDict_SetItemString(d, "Mod", (PyObject*)Mod_type) < 0) return NULL;
     if (PyDict_SetItemString(d, "Pow", (PyObject*)Pow_type) < 0) return NULL;
diff --git a/Python/ast.c b/Python/ast.c
index 5668755..d6bddf1 100644
--- a/Python/ast.c
+++ b/Python/ast.c
@@ -825,6 +825,8 @@
             return Sub;
         case STAR:
             return Mult;
+        case AT:
+            return MatMult;
         case SLASH:
             return Div;
         case DOUBLESLASH:
@@ -1030,6 +1032,8 @@
                 return Pow;
             else
                 return Mult;
+        case '@':
+            return MatMult;
         default:
             PyErr_Format(PyExc_SystemError, "invalid augassign: %s", STR(n));
             return (operator_ty)0;
@@ -2266,7 +2270,7 @@
        and_expr: shift_expr ('&' shift_expr)*
        shift_expr: arith_expr (('<<'|'>>') arith_expr)*
        arith_expr: term (('+'|'-') term)*
-       term: factor (('*'|'/'|'%'|'//') factor)*
+       term: factor (('*'|'@'|'/'|'%'|'//') factor)*
        factor: ('+'|'-'|'~') factor | power
        power: atom trailer* ('**' factor)*
     */
@@ -2577,7 +2581,7 @@
     /* expr_stmt: testlist_star_expr (augassign (yield_expr|testlist)
                 | ('=' (yield_expr|testlist))*)
        testlist_star_expr: (test|star_expr) (',' test|star_expr)* [',']
-       augassign: '+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^='
+       augassign: '+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^='
                 | '<<=' | '>>=' | '**=' | '//='
        test: ... here starts the operator precendence dance
      */
diff --git a/Python/ceval.c b/Python/ceval.c
index 1cc3c94..e14e772 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -1495,6 +1495,18 @@
             DISPATCH();
         }
 
+        TARGET(BINARY_MATRIX_MULTIPLY) {
+            PyObject *right = POP();
+            PyObject *left = TOP();
+            PyObject *res = PyNumber_MatrixMultiply(left, right);
+            Py_DECREF(left);
+            Py_DECREF(right);
+            SET_TOP(res);
+            if (res == NULL)
+                goto error;
+            DISPATCH();
+        }
+
         TARGET(BINARY_TRUE_DIVIDE) {
             PyObject *divisor = POP();
             PyObject *dividend = TOP();
@@ -1685,6 +1697,18 @@
             DISPATCH();
         }
 
+        TARGET(INPLACE_MATRIX_MULTIPLY) {
+            PyObject *right = POP();
+            PyObject *left = TOP();
+            PyObject *res = PyNumber_InPlaceMatrixMultiply(left, right);
+            Py_DECREF(left);
+            Py_DECREF(right);
+            SET_TOP(res);
+            if (res == NULL)
+                goto error;
+            DISPATCH();
+        }
+
         TARGET(INPLACE_TRUE_DIVIDE) {
             PyObject *divisor = POP();
             PyObject *dividend = TOP();
diff --git a/Python/codecs.c b/Python/codecs.c
index e06d6e0..4c2ae38 100644
--- a/Python/codecs.c
+++ b/Python/codecs.c
@@ -901,6 +901,7 @@
     }
 }
 
+#define ENC_UNKNOWN     -1
 #define ENC_UTF8        0
 #define ENC_UTF16BE     1
 #define ENC_UTF16LE     2
@@ -916,7 +917,11 @@
         encoding += 3;
         if (*encoding == '-' || *encoding == '_' )
             encoding++;
-        if (encoding[0] == '1' && encoding[1] == '6') {
+        if (encoding[0] == '8' && encoding[1] == '\0') {
+            *bytelength = 3;
+            return ENC_UTF8;
+        }
+        else if (encoding[0] == '1' && encoding[1] == '6') {
             encoding += 2;
             *bytelength = 2;
             if (*encoding == '\0') {
@@ -955,9 +960,11 @@
             }
         }
     }
-    /* utf-8 */
-    *bytelength = 3;
-    return ENC_UTF8;
+    else if (strcmp(encoding, "CP_UTF8") == 0) {
+        *bytelength = 3;
+        return ENC_UTF8;
+    }
+    return ENC_UNKNOWN;
 }
 
 /* This handler is declared static until someone demonstrates
@@ -994,6 +1001,12 @@
         }
         code = get_standard_encoding(encoding, &bytelength);
         Py_DECREF(encode);
+        if (code == ENC_UNKNOWN) {
+            /* Not supported, fail with original exception */
+            PyErr_SetObject(PyExceptionInstance_Class(exc), exc);
+            Py_DECREF(object);
+            return NULL;
+        }
 
         res = PyBytes_FromStringAndSize(NULL, bytelength*(end-start));
         if (!res) {
@@ -1068,6 +1081,12 @@
         }
         code = get_standard_encoding(encoding, &bytelength);
         Py_DECREF(encode);
+        if (code == ENC_UNKNOWN) {
+            /* Not supported, fail with original exception */
+            PyErr_SetObject(PyExceptionInstance_Class(exc), exc);
+            Py_DECREF(object);
+            return NULL;
+        }
 
         /* Try decoding a single surrogate character. If
            there are more, let the codec call us again. */
diff --git a/Python/compile.c b/Python/compile.c
index 69419ec..9cc1399 100644
--- a/Python/compile.c
+++ b/Python/compile.c
@@ -881,6 +881,7 @@
 
         case BINARY_POWER:
         case BINARY_MULTIPLY:
+        case BINARY_MATRIX_MULTIPLY:
         case BINARY_MODULO:
         case BINARY_ADD:
         case BINARY_SUBTRACT:
@@ -895,6 +896,7 @@
         case INPLACE_ADD:
         case INPLACE_SUBTRACT:
         case INPLACE_MULTIPLY:
+        case INPLACE_MATRIX_MULTIPLY:
         case INPLACE_MODULO:
             return -1;
         case STORE_SUBSCR:
@@ -2625,6 +2627,8 @@
         return BINARY_SUBTRACT;
     case Mult:
         return BINARY_MULTIPLY;
+    case MatMult:
+        return BINARY_MATRIX_MULTIPLY;
     case Div:
         return BINARY_TRUE_DIVIDE;
     case Mod:
@@ -2689,6 +2693,8 @@
         return INPLACE_SUBTRACT;
     case Mult:
         return INPLACE_MULTIPLY;
+    case MatMult:
+        return INPLACE_MATRIX_MULTIPLY;
     case Div:
         return INPLACE_TRUE_DIVIDE;
     case Mod:
diff --git a/Python/formatter_unicode.c b/Python/formatter_unicode.c
index e3a8149..056bb76 100644
--- a/Python/formatter_unicode.c
+++ b/Python/formatter_unicode.c
@@ -846,6 +846,13 @@
                             " format specifier 'c'");
             goto done;
         }
+        /* error to request alternate format */
+        if (format->alternate) {
+            PyErr_SetString(PyExc_ValueError,
+                            "Alternate form (#) not allowed with integer"
+                            " format specifier 'c'");
+            goto done;
+        }
 
         /* taken from unicodeobject.c formatchar() */
         /* Integer input truncated to a character */
diff --git a/Python/graminit.c b/Python/graminit.c
index e04999b..9f79d59 100644
--- a/Python/graminit.c
+++ b/Python/graminit.c
@@ -476,7 +476,7 @@
     {2, arcs_16_1},
     {3, arcs_16_2},
 };
-static arc arcs_17_0[12] = {
+static arc arcs_17_0[13] = {
     {49, 1},
     {50, 1},
     {51, 1},
@@ -489,19 +489,20 @@
     {58, 1},
     {59, 1},
     {60, 1},
+    {61, 1},
 };
 static arc arcs_17_1[1] = {
     {0, 1},
 };
 static state states_17[2] = {
-    {12, arcs_17_0},
+    {13, arcs_17_0},
     {1, arcs_17_1},
 };
 static arc arcs_18_0[1] = {
-    {61, 1},
+    {62, 1},
 };
 static arc arcs_18_1[1] = {
-    {62, 2},
+    {63, 2},
 };
 static arc arcs_18_2[1] = {
     {0, 2},
@@ -512,7 +513,7 @@
     {1, arcs_18_2},
 };
 static arc arcs_19_0[1] = {
-    {63, 1},
+    {64, 1},
 };
 static arc arcs_19_1[1] = {
     {0, 1},
@@ -522,11 +523,11 @@
     {1, arcs_19_1},
 };
 static arc arcs_20_0[5] = {
-    {64, 1},
     {65, 1},
     {66, 1},
     {67, 1},
     {68, 1},
+    {69, 1},
 };
 static arc arcs_20_1[1] = {
     {0, 1},
@@ -536,7 +537,7 @@
     {1, arcs_20_1},
 };
 static arc arcs_21_0[1] = {
-    {69, 1},
+    {70, 1},
 };
 static arc arcs_21_1[1] = {
     {0, 1},
@@ -546,7 +547,7 @@
     {1, arcs_21_1},
 };
 static arc arcs_22_0[1] = {
-    {70, 1},
+    {71, 1},
 };
 static arc arcs_22_1[1] = {
     {0, 1},
@@ -556,7 +557,7 @@
     {1, arcs_22_1},
 };
 static arc arcs_23_0[1] = {
-    {71, 1},
+    {72, 1},
 };
 static arc arcs_23_1[2] = {
     {9, 2},
@@ -581,14 +582,14 @@
     {1, arcs_24_1},
 };
 static arc arcs_25_0[1] = {
-    {72, 1},
+    {73, 1},
 };
 static arc arcs_25_1[2] = {
     {24, 2},
     {0, 1},
 };
 static arc arcs_25_2[2] = {
-    {73, 3},
+    {74, 3},
     {0, 2},
 };
 static arc arcs_25_3[1] = {
@@ -605,8 +606,8 @@
     {1, arcs_25_4},
 };
 static arc arcs_26_0[2] = {
-    {74, 1},
     {75, 1},
+    {76, 1},
 };
 static arc arcs_26_1[1] = {
     {0, 1},
@@ -616,10 +617,10 @@
     {1, arcs_26_1},
 };
 static arc arcs_27_0[1] = {
-    {76, 1},
+    {77, 1},
 };
 static arc arcs_27_1[1] = {
-    {77, 2},
+    {78, 2},
 };
 static arc arcs_27_2[1] = {
     {0, 2},
@@ -630,32 +631,32 @@
     {1, arcs_27_2},
 };
 static arc arcs_28_0[1] = {
-    {73, 1},
+    {74, 1},
 };
 static arc arcs_28_1[3] = {
-    {78, 2},
     {79, 2},
+    {80, 2},
     {12, 3},
 };
 static arc arcs_28_2[4] = {
-    {78, 2},
     {79, 2},
+    {80, 2},
     {12, 3},
-    {76, 4},
+    {77, 4},
 };
 static arc arcs_28_3[1] = {
-    {76, 4},
+    {77, 4},
 };
 static arc arcs_28_4[3] = {
     {31, 5},
     {13, 6},
-    {80, 5},
+    {81, 5},
 };
 static arc arcs_28_5[1] = {
     {0, 5},
 };
 static arc arcs_28_6[1] = {
-    {80, 7},
+    {81, 7},
 };
 static arc arcs_28_7[1] = {
     {15, 5},
@@ -674,7 +675,7 @@
     {21, 1},
 };
 static arc arcs_29_1[2] = {
-    {82, 2},
+    {83, 2},
     {0, 1},
 };
 static arc arcs_29_2[1] = {
@@ -693,7 +694,7 @@
     {12, 1},
 };
 static arc arcs_30_1[2] = {
-    {82, 2},
+    {83, 2},
     {0, 1},
 };
 static arc arcs_30_2[1] = {
@@ -709,14 +710,14 @@
     {1, arcs_30_3},
 };
 static arc arcs_31_0[1] = {
-    {81, 1},
+    {82, 1},
 };
 static arc arcs_31_1[2] = {
     {30, 2},
     {0, 1},
 };
 static arc arcs_31_2[2] = {
-    {81, 1},
+    {82, 1},
     {0, 2},
 };
 static state states_31[3] = {
@@ -725,7 +726,7 @@
     {2, arcs_31_2},
 };
 static arc arcs_32_0[1] = {
-    {83, 1},
+    {84, 1},
 };
 static arc arcs_32_1[2] = {
     {30, 0},
@@ -739,7 +740,7 @@
     {21, 1},
 };
 static arc arcs_33_1[2] = {
-    {78, 0},
+    {79, 0},
     {0, 1},
 };
 static state states_33[2] = {
@@ -747,7 +748,7 @@
     {2, arcs_33_1},
 };
 static arc arcs_34_0[1] = {
-    {84, 1},
+    {85, 1},
 };
 static arc arcs_34_1[1] = {
     {21, 2},
@@ -762,7 +763,7 @@
     {2, arcs_34_2},
 };
 static arc arcs_35_0[1] = {
-    {85, 1},
+    {86, 1},
 };
 static arc arcs_35_1[1] = {
     {21, 2},
@@ -777,7 +778,7 @@
     {2, arcs_35_2},
 };
 static arc arcs_36_0[1] = {
-    {86, 1},
+    {87, 1},
 };
 static arc arcs_36_1[1] = {
     {24, 2},
@@ -800,11 +801,11 @@
     {1, arcs_36_4},
 };
 static arc arcs_37_0[8] = {
-    {87, 1},
     {88, 1},
     {89, 1},
     {90, 1},
     {91, 1},
+    {92, 1},
     {19, 1},
     {18, 1},
     {17, 1},
@@ -817,7 +818,7 @@
     {1, arcs_37_1},
 };
 static arc arcs_38_0[1] = {
-    {92, 1},
+    {93, 1},
 };
 static arc arcs_38_1[1] = {
     {24, 2},
@@ -829,8 +830,8 @@
     {26, 4},
 };
 static arc arcs_38_4[3] = {
-    {93, 1},
-    {94, 5},
+    {94, 1},
+    {95, 5},
     {0, 4},
 };
 static arc arcs_38_5[1] = {
@@ -853,7 +854,7 @@
     {1, arcs_38_7},
 };
 static arc arcs_39_0[1] = {
-    {95, 1},
+    {96, 1},
 };
 static arc arcs_39_1[1] = {
     {24, 2},
@@ -865,7 +866,7 @@
     {26, 4},
 };
 static arc arcs_39_4[2] = {
-    {94, 5},
+    {95, 5},
     {0, 4},
 };
 static arc arcs_39_5[1] = {
@@ -888,13 +889,13 @@
     {1, arcs_39_7},
 };
 static arc arcs_40_0[1] = {
-    {96, 1},
+    {97, 1},
 };
 static arc arcs_40_1[1] = {
-    {62, 2},
+    {63, 2},
 };
 static arc arcs_40_2[1] = {
-    {97, 3},
+    {98, 3},
 };
 static arc arcs_40_3[1] = {
     {9, 4},
@@ -906,7 +907,7 @@
     {26, 6},
 };
 static arc arcs_40_6[2] = {
-    {94, 7},
+    {95, 7},
     {0, 6},
 };
 static arc arcs_40_7[1] = {
@@ -931,7 +932,7 @@
     {1, arcs_40_9},
 };
 static arc arcs_41_0[1] = {
-    {98, 1},
+    {99, 1},
 };
 static arc arcs_41_1[1] = {
     {25, 2},
@@ -940,8 +941,8 @@
     {26, 3},
 };
 static arc arcs_41_3[2] = {
-    {99, 4},
-    {100, 5},
+    {100, 4},
+    {101, 5},
 };
 static arc arcs_41_4[1] = {
     {25, 6},
@@ -956,9 +957,9 @@
     {26, 9},
 };
 static arc arcs_41_8[4] = {
-    {99, 4},
-    {94, 10},
-    {100, 5},
+    {100, 4},
+    {95, 10},
+    {101, 5},
     {0, 8},
 };
 static arc arcs_41_9[1] = {
@@ -971,7 +972,7 @@
     {26, 12},
 };
 static arc arcs_41_12[2] = {
-    {100, 5},
+    {101, 5},
     {0, 12},
 };
 static state states_41[13] = {
@@ -990,10 +991,10 @@
     {2, arcs_41_12},
 };
 static arc arcs_42_0[1] = {
-    {101, 1},
+    {102, 1},
 };
 static arc arcs_42_1[1] = {
-    {102, 2},
+    {103, 2},
 };
 static arc arcs_42_2[2] = {
     {30, 1},
@@ -1016,11 +1017,11 @@
     {24, 1},
 };
 static arc arcs_43_1[2] = {
-    {82, 2},
+    {83, 2},
     {0, 1},
 };
 static arc arcs_43_2[1] = {
-    {103, 3},
+    {104, 3},
 };
 static arc arcs_43_3[1] = {
     {0, 3},
@@ -1032,14 +1033,14 @@
     {1, arcs_43_3},
 };
 static arc arcs_44_0[1] = {
-    {104, 1},
+    {105, 1},
 };
 static arc arcs_44_1[2] = {
     {24, 2},
     {0, 1},
 };
 static arc arcs_44_2[2] = {
-    {82, 3},
+    {83, 3},
     {0, 2},
 };
 static arc arcs_44_3[1] = {
@@ -1063,14 +1064,14 @@
     {0, 1},
 };
 static arc arcs_45_2[1] = {
-    {105, 3},
+    {106, 3},
 };
 static arc arcs_45_3[1] = {
     {6, 4},
 };
 static arc arcs_45_4[2] = {
     {6, 4},
-    {106, 1},
+    {107, 1},
 };
 static state states_45[5] = {
     {2, arcs_45_0},
@@ -1080,21 +1081,21 @@
     {2, arcs_45_4},
 };
 static arc arcs_46_0[2] = {
-    {107, 1},
-    {108, 2},
+    {108, 1},
+    {109, 2},
 };
 static arc arcs_46_1[2] = {
-    {92, 3},
+    {93, 3},
     {0, 1},
 };
 static arc arcs_46_2[1] = {
     {0, 2},
 };
 static arc arcs_46_3[1] = {
-    {107, 4},
+    {108, 4},
 };
 static arc arcs_46_4[1] = {
-    {94, 5},
+    {95, 5},
 };
 static arc arcs_46_5[1] = {
     {24, 2},
@@ -1108,8 +1109,8 @@
     {1, arcs_46_5},
 };
 static arc arcs_47_0[2] = {
-    {107, 1},
-    {110, 1},
+    {108, 1},
+    {111, 1},
 };
 static arc arcs_47_1[1] = {
     {0, 1},
@@ -1119,7 +1120,7 @@
     {1, arcs_47_1},
 };
 static arc arcs_48_0[1] = {
-    {111, 1},
+    {112, 1},
 };
 static arc arcs_48_1[2] = {
     {33, 2},
@@ -1142,7 +1143,7 @@
     {1, arcs_48_4},
 };
 static arc arcs_49_0[1] = {
-    {111, 1},
+    {112, 1},
 };
 static arc arcs_49_1[2] = {
     {33, 2},
@@ -1152,7 +1153,7 @@
     {25, 3},
 };
 static arc arcs_49_3[1] = {
-    {109, 4},
+    {110, 4},
 };
 static arc arcs_49_4[1] = {
     {0, 4},
@@ -1165,10 +1166,10 @@
     {1, arcs_49_4},
 };
 static arc arcs_50_0[1] = {
-    {112, 1},
+    {113, 1},
 };
 static arc arcs_50_1[2] = {
-    {113, 0},
+    {114, 0},
     {0, 1},
 };
 static state states_50[2] = {
@@ -1176,10 +1177,10 @@
     {2, arcs_50_1},
 };
 static arc arcs_51_0[1] = {
-    {114, 1},
+    {115, 1},
 };
 static arc arcs_51_1[2] = {
-    {115, 0},
+    {116, 0},
     {0, 1},
 };
 static state states_51[2] = {
@@ -1187,11 +1188,11 @@
     {2, arcs_51_1},
 };
 static arc arcs_52_0[2] = {
-    {116, 1},
-    {117, 2},
+    {117, 1},
+    {118, 2},
 };
 static arc arcs_52_1[1] = {
-    {114, 2},
+    {115, 2},
 };
 static arc arcs_52_2[1] = {
     {0, 2},
@@ -1202,10 +1203,10 @@
     {1, arcs_52_2},
 };
 static arc arcs_53_0[1] = {
-    {103, 1},
+    {104, 1},
 };
 static arc arcs_53_1[2] = {
-    {118, 0},
+    {119, 0},
     {0, 1},
 };
 static state states_53[2] = {
@@ -1213,25 +1214,25 @@
     {2, arcs_53_1},
 };
 static arc arcs_54_0[10] = {
-    {119, 1},
     {120, 1},
     {121, 1},
     {122, 1},
     {123, 1},
     {124, 1},
     {125, 1},
-    {97, 1},
-    {116, 2},
-    {126, 3},
+    {126, 1},
+    {98, 1},
+    {117, 2},
+    {127, 3},
 };
 static arc arcs_54_1[1] = {
     {0, 1},
 };
 static arc arcs_54_2[1] = {
-    {97, 1},
+    {98, 1},
 };
 static arc arcs_54_3[2] = {
-    {116, 1},
+    {117, 1},
     {0, 3},
 };
 static state states_54[4] = {
@@ -1244,7 +1245,7 @@
     {31, 1},
 };
 static arc arcs_55_1[1] = {
-    {103, 2},
+    {104, 2},
 };
 static arc arcs_55_2[1] = {
     {0, 2},
@@ -1255,10 +1256,10 @@
     {1, arcs_55_2},
 };
 static arc arcs_56_0[1] = {
-    {127, 1},
+    {128, 1},
 };
 static arc arcs_56_1[2] = {
-    {128, 0},
+    {129, 0},
     {0, 1},
 };
 static state states_56[2] = {
@@ -1266,10 +1267,10 @@
     {2, arcs_56_1},
 };
 static arc arcs_57_0[1] = {
-    {129, 1},
+    {130, 1},
 };
 static arc arcs_57_1[2] = {
-    {130, 0},
+    {131, 0},
     {0, 1},
 };
 static state states_57[2] = {
@@ -1277,10 +1278,10 @@
     {2, arcs_57_1},
 };
 static arc arcs_58_0[1] = {
-    {131, 1},
+    {132, 1},
 };
 static arc arcs_58_1[2] = {
-    {132, 0},
+    {133, 0},
     {0, 1},
 };
 static state states_58[2] = {
@@ -1288,11 +1289,11 @@
     {2, arcs_58_1},
 };
 static arc arcs_59_0[1] = {
-    {133, 1},
+    {134, 1},
 };
 static arc arcs_59_1[3] = {
-    {134, 0},
     {135, 0},
+    {136, 0},
     {0, 1},
 };
 static state states_59[2] = {
@@ -1300,11 +1301,11 @@
     {3, arcs_59_1},
 };
 static arc arcs_60_0[1] = {
-    {136, 1},
+    {137, 1},
 };
 static arc arcs_60_1[3] = {
-    {137, 0},
     {138, 0},
+    {139, 0},
     {0, 1},
 };
 static state states_60[2] = {
@@ -1312,27 +1313,28 @@
     {3, arcs_60_1},
 };
 static arc arcs_61_0[1] = {
-    {139, 1},
+    {140, 1},
 };
-static arc arcs_61_1[5] = {
+static arc arcs_61_1[6] = {
     {31, 0},
-    {140, 0},
+    {11, 0},
     {141, 0},
     {142, 0},
+    {143, 0},
     {0, 1},
 };
 static state states_61[2] = {
     {1, arcs_61_0},
-    {5, arcs_61_1},
+    {6, arcs_61_1},
 };
 static arc arcs_62_0[4] = {
-    {137, 1},
     {138, 1},
-    {143, 1},
-    {144, 2},
+    {139, 1},
+    {144, 1},
+    {145, 2},
 };
 static arc arcs_62_1[1] = {
-    {139, 2},
+    {140, 2},
 };
 static arc arcs_62_2[1] = {
     {0, 2},
@@ -1343,15 +1345,15 @@
     {1, arcs_62_2},
 };
 static arc arcs_63_0[1] = {
-    {145, 1},
+    {146, 1},
 };
 static arc arcs_63_1[3] = {
-    {146, 1},
+    {147, 1},
     {32, 2},
     {0, 1},
 };
 static arc arcs_63_2[1] = {
-    {139, 3},
+    {140, 3},
 };
 static arc arcs_63_3[1] = {
     {0, 3},
@@ -1364,44 +1366,44 @@
 };
 static arc arcs_64_0[10] = {
     {13, 1},
-    {148, 2},
-    {150, 3},
+    {149, 2},
+    {151, 3},
     {21, 4},
-    {153, 4},
-    {154, 5},
-    {79, 4},
-    {155, 4},
+    {154, 4},
+    {155, 5},
+    {80, 4},
     {156, 4},
     {157, 4},
+    {158, 4},
 };
 static arc arcs_64_1[3] = {
     {47, 6},
-    {147, 6},
+    {148, 6},
     {15, 4},
 };
 static arc arcs_64_2[2] = {
-    {147, 7},
-    {149, 4},
+    {148, 7},
+    {150, 4},
 };
 static arc arcs_64_3[2] = {
-    {151, 8},
-    {152, 4},
+    {152, 8},
+    {153, 4},
 };
 static arc arcs_64_4[1] = {
     {0, 4},
 };
 static arc arcs_64_5[2] = {
-    {154, 5},
+    {155, 5},
     {0, 5},
 };
 static arc arcs_64_6[1] = {
     {15, 4},
 };
 static arc arcs_64_7[1] = {
-    {149, 4},
+    {150, 4},
 };
 static arc arcs_64_8[1] = {
-    {152, 4},
+    {153, 4},
 };
 static state states_64[9] = {
     {10, arcs_64_0},
@@ -1419,7 +1421,7 @@
     {48, 1},
 };
 static arc arcs_65_1[3] = {
-    {158, 2},
+    {159, 2},
     {30, 3},
     {0, 1},
 };
@@ -1444,15 +1446,15 @@
 };
 static arc arcs_66_0[3] = {
     {13, 1},
-    {148, 2},
-    {78, 3},
+    {149, 2},
+    {79, 3},
 };
 static arc arcs_66_1[2] = {
     {14, 4},
     {15, 5},
 };
 static arc arcs_66_2[1] = {
-    {159, 6},
+    {160, 6},
 };
 static arc arcs_66_3[1] = {
     {21, 5},
@@ -1464,7 +1466,7 @@
     {0, 5},
 };
 static arc arcs_66_6[1] = {
-    {149, 5},
+    {150, 5},
 };
 static state states_66[7] = {
     {3, arcs_66_0},
@@ -1476,14 +1478,14 @@
     {1, arcs_66_6},
 };
 static arc arcs_67_0[1] = {
-    {160, 1},
+    {161, 1},
 };
 static arc arcs_67_1[2] = {
     {30, 2},
     {0, 1},
 };
 static arc arcs_67_2[2] = {
-    {160, 1},
+    {161, 1},
     {0, 2},
 };
 static state states_67[3] = {
@@ -1501,11 +1503,11 @@
 };
 static arc arcs_68_2[3] = {
     {24, 3},
-    {161, 4},
+    {162, 4},
     {0, 2},
 };
 static arc arcs_68_3[2] = {
-    {161, 4},
+    {162, 4},
     {0, 3},
 };
 static arc arcs_68_4[1] = {
@@ -1534,7 +1536,7 @@
     {1, arcs_69_2},
 };
 static arc arcs_70_0[2] = {
-    {103, 1},
+    {104, 1},
     {48, 1},
 };
 static arc arcs_70_1[2] = {
@@ -1542,7 +1544,7 @@
     {0, 1},
 };
 static arc arcs_70_2[3] = {
-    {103, 1},
+    {104, 1},
     {48, 1},
     {0, 2},
 };
@@ -1572,7 +1574,7 @@
 };
 static arc arcs_72_1[4] = {
     {25, 2},
-    {158, 3},
+    {159, 3},
     {30, 4},
     {0, 1},
 };
@@ -1587,7 +1589,7 @@
     {0, 4},
 };
 static arc arcs_72_5[3] = {
-    {158, 3},
+    {159, 3},
     {30, 7},
     {0, 5},
 };
@@ -1623,7 +1625,7 @@
     {2, arcs_72_10},
 };
 static arc arcs_73_0[1] = {
-    {162, 1},
+    {163, 1},
 };
 static arc arcs_73_1[1] = {
     {21, 2},
@@ -1659,7 +1661,7 @@
     {1, arcs_73_7},
 };
 static arc arcs_74_0[3] = {
-    {163, 1},
+    {164, 1},
     {31, 2},
     {32, 3},
 };
@@ -1674,7 +1676,7 @@
     {24, 6},
 };
 static arc arcs_74_4[4] = {
-    {163, 1},
+    {164, 1},
     {31, 2},
     {32, 3},
     {0, 4},
@@ -1687,7 +1689,7 @@
     {0, 6},
 };
 static arc arcs_74_7[2] = {
-    {163, 5},
+    {164, 5},
     {32, 3},
 };
 static state states_74[8] = {
@@ -1704,7 +1706,7 @@
     {24, 1},
 };
 static arc arcs_75_1[3] = {
-    {158, 2},
+    {159, 2},
     {29, 3},
     {0, 1},
 };
@@ -1721,8 +1723,8 @@
     {1, arcs_75_3},
 };
 static arc arcs_76_0[2] = {
-    {158, 1},
-    {165, 1},
+    {159, 1},
+    {166, 1},
 };
 static arc arcs_76_1[1] = {
     {0, 1},
@@ -1732,19 +1734,19 @@
     {1, arcs_76_1},
 };
 static arc arcs_77_0[1] = {
-    {96, 1},
+    {97, 1},
 };
 static arc arcs_77_1[1] = {
-    {62, 2},
+    {63, 2},
 };
 static arc arcs_77_2[1] = {
-    {97, 3},
+    {98, 3},
 };
 static arc arcs_77_3[1] = {
-    {107, 4},
+    {108, 4},
 };
 static arc arcs_77_4[2] = {
-    {164, 5},
+    {165, 5},
     {0, 4},
 };
 static arc arcs_77_5[1] = {
@@ -1759,13 +1761,13 @@
     {1, arcs_77_5},
 };
 static arc arcs_78_0[1] = {
-    {92, 1},
+    {93, 1},
 };
 static arc arcs_78_1[1] = {
-    {109, 2},
+    {110, 2},
 };
 static arc arcs_78_2[2] = {
-    {164, 3},
+    {165, 3},
     {0, 2},
 };
 static arc arcs_78_3[1] = {
@@ -1788,10 +1790,10 @@
     {1, arcs_79_1},
 };
 static arc arcs_80_0[1] = {
-    {167, 1},
+    {168, 1},
 };
 static arc arcs_80_1[2] = {
-    {168, 2},
+    {169, 2},
     {0, 1},
 };
 static arc arcs_80_2[1] = {
@@ -1803,7 +1805,7 @@
     {1, arcs_80_2},
 };
 static arc arcs_81_0[2] = {
-    {73, 1},
+    {74, 1},
     {9, 2},
 };
 static arc arcs_81_1[1] = {
@@ -1819,11 +1821,11 @@
 };
 static dfa dfas[82] = {
     {256, "single_input", 0, 3, states_0,
-     "\004\050\060\200\000\000\000\240\340\223\160\220\045\200\020\000\000\206\120\076\204\000"},
+     "\004\050\060\200\000\000\000\100\301\047\341\040\113\000\041\000\000\014\241\174\010\001"},
     {257, "file_input", 0, 2, states_1,
-     "\204\050\060\200\000\000\000\240\340\223\160\220\045\200\020\000\000\206\120\076\204\000"},
+     "\204\050\060\200\000\000\000\100\301\047\341\040\113\000\041\000\000\014\241\174\010\001"},
     {258, "eval_input", 0, 3, states_2,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {259, "decorator", 0, 7, states_3,
      "\000\010\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
     {260, "decorators", 0, 2, states_4,
@@ -1843,39 +1845,39 @@
     {267, "vfpdef", 0, 2, states_11,
      "\000\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
     {268, "stmt", 0, 2, states_12,
-     "\000\050\060\200\000\000\000\240\340\223\160\220\045\200\020\000\000\206\120\076\204\000"},
+     "\000\050\060\200\000\000\000\100\301\047\341\040\113\000\041\000\000\014\241\174\010\001"},
     {269, "simple_stmt", 0, 4, states_13,
-     "\000\040\040\200\000\000\000\240\340\223\160\000\000\200\020\000\000\206\120\076\200\000"},
+     "\000\040\040\200\000\000\000\100\301\047\341\000\000\000\041\000\000\014\241\174\000\001"},
     {270, "small_stmt", 0, 2, states_14,
-     "\000\040\040\200\000\000\000\240\340\223\160\000\000\200\020\000\000\206\120\076\200\000"},
+     "\000\040\040\200\000\000\000\100\301\047\341\000\000\000\041\000\000\014\241\174\000\001"},
     {271, "expr_stmt", 0, 6, states_15,
-     "\000\040\040\200\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\200\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {272, "testlist_star_expr", 0, 3, states_16,
-     "\000\040\040\200\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\200\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {273, "augassign", 0, 2, states_17,
-     "\000\000\000\000\000\000\376\037\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\376\077\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
     {274, "del_stmt", 0, 3, states_18,
-     "\000\000\000\000\000\000\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\100\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
     {275, "pass_stmt", 0, 2, states_19,
-     "\000\000\000\000\000\000\000\200\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\000\001\000\000\000\000\000\000\000\000\000\000\000\000\000"},
     {276, "flow_stmt", 0, 2, states_20,
-     "\000\000\000\000\000\000\000\000\340\001\000\000\000\000\000\000\000\000\000\000\200\000"},
+     "\000\000\000\000\000\000\000\000\300\003\000\000\000\000\000\000\000\000\000\000\000\001"},
     {277, "break_stmt", 0, 2, states_21,
-     "\000\000\000\000\000\000\000\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000"},
-    {278, "continue_stmt", 0, 2, states_22,
      "\000\000\000\000\000\000\000\000\100\000\000\000\000\000\000\000\000\000\000\000\000\000"},
-    {279, "return_stmt", 0, 3, states_23,
+    {278, "continue_stmt", 0, 2, states_22,
      "\000\000\000\000\000\000\000\000\200\000\000\000\000\000\000\000\000\000\000\000\000\000"},
-    {280, "yield_stmt", 0, 2, states_24,
-     "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\200\000"},
-    {281, "raise_stmt", 0, 5, states_25,
+    {279, "return_stmt", 0, 3, states_23,
      "\000\000\000\000\000\000\000\000\000\001\000\000\000\000\000\000\000\000\000\000\000\000"},
-    {282, "import_stmt", 0, 2, states_26,
-     "\000\000\000\000\000\000\000\000\000\022\000\000\000\000\000\000\000\000\000\000\000\000"},
-    {283, "import_name", 0, 3, states_27,
-     "\000\000\000\000\000\000\000\000\000\020\000\000\000\000\000\000\000\000\000\000\000\000"},
-    {284, "import_from", 0, 8, states_28,
+    {280, "yield_stmt", 0, 2, states_24,
+     "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\001"},
+    {281, "raise_stmt", 0, 5, states_25,
      "\000\000\000\000\000\000\000\000\000\002\000\000\000\000\000\000\000\000\000\000\000\000"},
+    {282, "import_stmt", 0, 2, states_26,
+     "\000\000\000\000\000\000\000\000\000\044\000\000\000\000\000\000\000\000\000\000\000\000"},
+    {283, "import_name", 0, 3, states_27,
+     "\000\000\000\000\000\000\000\000\000\040\000\000\000\000\000\000\000\000\000\000\000\000"},
+    {284, "import_from", 0, 8, states_28,
+     "\000\000\000\000\000\000\000\000\000\004\000\000\000\000\000\000\000\000\000\000\000\000"},
     {285, "import_as_name", 0, 4, states_29,
      "\000\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
     {286, "dotted_as_name", 0, 4, states_30,
@@ -1887,103 +1889,103 @@
     {289, "dotted_name", 0, 2, states_33,
      "\000\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
     {290, "global_stmt", 0, 3, states_34,
-     "\000\000\000\000\000\000\000\000\000\000\020\000\000\000\000\000\000\000\000\000\000\000"},
-    {291, "nonlocal_stmt", 0, 3, states_35,
      "\000\000\000\000\000\000\000\000\000\000\040\000\000\000\000\000\000\000\000\000\000\000"},
-    {292, "assert_stmt", 0, 5, states_36,
+    {291, "nonlocal_stmt", 0, 3, states_35,
      "\000\000\000\000\000\000\000\000\000\000\100\000\000\000\000\000\000\000\000\000\000\000"},
+    {292, "assert_stmt", 0, 5, states_36,
+     "\000\000\000\000\000\000\000\000\000\000\200\000\000\000\000\000\000\000\000\000\000\000"},
     {293, "compound_stmt", 0, 2, states_37,
-     "\000\010\020\000\000\000\000\000\000\000\000\220\045\000\000\000\000\000\000\000\004\000"},
+     "\000\010\020\000\000\000\000\000\000\000\000\040\113\000\000\000\000\000\000\000\010\000"},
     {294, "if_stmt", 0, 8, states_38,
-     "\000\000\000\000\000\000\000\000\000\000\000\020\000\000\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\040\000\000\000\000\000\000\000\000\000\000"},
     {295, "while_stmt", 0, 8, states_39,
-     "\000\000\000\000\000\000\000\000\000\000\000\200\000\000\000\000\000\000\000\000\000\000"},
-    {296, "for_stmt", 0, 10, states_40,
      "\000\000\000\000\000\000\000\000\000\000\000\000\001\000\000\000\000\000\000\000\000\000"},
+    {296, "for_stmt", 0, 10, states_40,
+     "\000\000\000\000\000\000\000\000\000\000\000\000\002\000\000\000\000\000\000\000\000\000"},
     {297, "try_stmt", 0, 13, states_41,
-     "\000\000\000\000\000\000\000\000\000\000\000\000\004\000\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\000\010\000\000\000\000\000\000\000\000\000"},
     {298, "with_stmt", 0, 5, states_42,
-     "\000\000\000\000\000\000\000\000\000\000\000\000\040\000\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\000\100\000\000\000\000\000\000\000\000\000"},
     {299, "with_item", 0, 4, states_43,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {300, "except_clause", 0, 5, states_44,
-     "\000\000\000\000\000\000\000\000\000\000\000\000\000\001\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\000\000\002\000\000\000\000\000\000\000\000"},
     {301, "suite", 0, 5, states_45,
-     "\004\040\040\200\000\000\000\240\340\223\160\000\000\200\020\000\000\206\120\076\200\000"},
+     "\004\040\040\200\000\000\000\100\301\047\341\000\000\000\041\000\000\014\241\174\000\001"},
     {302, "test", 0, 6, states_46,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {303, "test_nocond", 0, 2, states_47,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {304, "lambdef", 0, 5, states_48,
-     "\000\000\000\000\000\000\000\000\000\000\000\000\000\200\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\001\000\000\000\000\000\000\000"},
     {305, "lambdef_nocond", 0, 5, states_49,
-     "\000\000\000\000\000\000\000\000\000\000\000\000\000\200\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\001\000\000\000\000\000\000\000"},
     {306, "or_test", 0, 2, states_50,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\020\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\040\000\000\014\241\174\000\000"},
     {307, "and_test", 0, 2, states_51,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\020\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\040\000\000\014\241\174\000\000"},
     {308, "not_test", 0, 3, states_52,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\020\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\040\000\000\014\241\174\000\000"},
     {309, "comparison", 0, 2, states_53,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\000\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\000\000\000\014\241\174\000\000"},
     {310, "comp_op", 0, 4, states_54,
-     "\000\000\000\000\000\000\000\000\000\000\000\000\002\000\220\177\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\000\004\000\040\377\000\000\000\000\000\000"},
     {311, "star_expr", 0, 3, states_55,
      "\000\000\000\200\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
     {312, "expr", 0, 2, states_56,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\000\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\000\000\000\014\241\174\000\000"},
     {313, "xor_expr", 0, 2, states_57,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\000\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\000\000\000\014\241\174\000\000"},
     {314, "and_expr", 0, 2, states_58,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\000\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\000\000\000\014\241\174\000\000"},
     {315, "shift_expr", 0, 2, states_59,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\000\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\000\000\000\014\241\174\000\000"},
     {316, "arith_expr", 0, 2, states_60,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\000\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\000\000\000\014\241\174\000\000"},
     {317, "term", 0, 2, states_61,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\000\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\000\000\000\014\241\174\000\000"},
     {318, "factor", 0, 3, states_62,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\000\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\000\000\000\014\241\174\000\000"},
     {319, "power", 0, 4, states_63,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\000\000\000\000\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\000\000\000\000\240\174\000\000"},
     {320, "atom", 0, 9, states_64,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\000\000\000\000\000\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\000\000\000\000\240\174\000\000"},
     {321, "testlist_comp", 0, 5, states_65,
-     "\000\040\040\200\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\200\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {322, "trailer", 0, 7, states_66,
-     "\000\040\000\000\000\000\000\000\000\100\000\000\000\000\000\000\000\000\020\000\000\000"},
+     "\000\040\000\000\000\000\000\000\000\200\000\000\000\000\000\000\000\000\040\000\000\000"},
     {323, "subscriptlist", 0, 3, states_67,
-     "\000\040\040\002\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\002\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {324, "subscript", 0, 5, states_68,
-     "\000\040\040\002\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\002\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {325, "sliceop", 0, 3, states_69,
      "\000\000\000\002\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
     {326, "exprlist", 0, 3, states_70,
-     "\000\040\040\200\000\000\000\000\000\200\000\000\000\000\000\000\000\206\120\076\000\000"},
+     "\000\040\040\200\000\000\000\000\000\000\001\000\000\000\000\000\000\014\241\174\000\000"},
     {327, "testlist", 0, 3, states_71,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {328, "dictorsetmaker", 0, 11, states_72,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {329, "classdef", 0, 8, states_73,
-     "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\004\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\010\000"},
     {330, "arglist", 0, 8, states_74,
-     "\000\040\040\200\001\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\200\001\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {331, "argument", 0, 4, states_75,
-     "\000\040\040\000\000\000\000\000\000\200\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\000\001\000\000\000\041\000\000\014\241\174\000\000"},
     {332, "comp_iter", 0, 2, states_76,
-     "\000\000\000\000\000\000\000\000\000\000\000\020\001\000\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\040\002\000\000\000\000\000\000\000\000\000"},
     {333, "comp_for", 0, 6, states_77,
-     "\000\000\000\000\000\000\000\000\000\000\000\000\001\000\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\000\002\000\000\000\000\000\000\000\000\000"},
     {334, "comp_if", 0, 4, states_78,
-     "\000\000\000\000\000\000\000\000\000\000\000\020\000\000\000\000\000\000\000\000\000\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\040\000\000\000\000\000\000\000\000\000\000"},
     {335, "encoding_decl", 0, 2, states_79,
      "\000\000\040\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"},
     {336, "yield_expr", 0, 3, states_80,
-     "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\200\000"},
+     "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\001"},
     {337, "yield_arg", 0, 3, states_81,
-     "\000\040\040\000\000\000\000\000\000\202\000\000\000\200\020\000\000\206\120\076\000\000"},
+     "\000\040\040\000\000\000\000\000\000\004\001\000\000\000\041\000\000\014\241\174\000\000"},
 };
-static label labels[169] = {
+static label labels[170] = {
     {0, "EMPTY"},
     {256, 0},
     {4, 0},
@@ -2007,7 +2009,7 @@
     {1, "def"},
     {1, 0},
     {263, 0},
-    {50, 0},
+    {51, 0},
     {302, 0},
     {11, 0},
     {301, 0},
@@ -2036,6 +2038,7 @@
     {36, 0},
     {37, 0},
     {38, 0},
+    {50, 0},
     {39, 0},
     {40, 0},
     {41, 0},
@@ -2063,7 +2066,7 @@
     {1, "import"},
     {288, 0},
     {23, 0},
-    {51, 0},
+    {52, 0},
     {287, 0},
     {285, 0},
     {1, "as"},
@@ -2157,6 +2160,6 @@
 grammar _PyParser_Grammar = {
     82,
     dfas,
-    {169, labels},
+    {170, labels},
     256
 };
diff --git a/Python/importlib.h b/Python/importlib.h
index ee230af..7d664de 100644
--- a/Python/importlib.h
+++ b/Python/importlib.h
@@ -702,7 +702,7 @@
     107,119,100,115,114,4,0,0,0,114,4,0,0,0,114,5,
     0,0,0,218,25,95,99,97,108,108,95,119,105,116,104,95,
     102,114,97,109,101,115,95,114,101,109,111,118,101,100,57,1,
-    0,0,115,2,0,0,0,0,8,114,114,0,0,0,105,238,
+    0,0,115,2,0,0,0,0,8,114,114,0,0,0,105,248,
     12,0,0,233,2,0,0,0,114,13,0,0,0,115,2,0,
     0,0,13,10,90,11,95,95,112,121,99,97,99,104,101,95,
     95,122,3,46,112,121,122,4,46,112,121,99,122,4,46,112,
@@ -771,7 +771,7 @@
     110,97,109,101,218,3,115,101,112,114,36,0,0,0,90,3,
     116,97,103,218,8,102,105,108,101,110,97,109,101,114,4,0,
     0,0,114,4,0,0,0,114,5,0,0,0,218,17,99,97,
-    99,104,101,95,102,114,111,109,95,115,111,117,114,99,101,181,
+    99,104,101,95,102,114,111,109,95,115,111,117,114,99,101,182,
     1,0,0,115,22,0,0,0,0,13,31,1,6,1,9,2,
     6,1,18,1,24,1,12,1,12,1,15,1,31,1,114,132,
     0,0,0,99,1,0,0,0,0,0,0,0,5,0,0,0,
@@ -829,7 +829,7 @@
     102,105,108,101,110,97,109,101,90,7,112,121,99,97,99,104,
     101,114,129,0,0,0,114,4,0,0,0,114,4,0,0,0,
     114,5,0,0,0,218,17,115,111,117,114,99,101,95,102,114,
-    111,109,95,99,97,99,104,101,208,1,0,0,115,24,0,0,
+    111,109,95,99,97,99,104,101,209,1,0,0,115,24,0,0,
     0,0,9,18,1,15,1,18,1,18,1,12,1,3,1,24,
     1,21,1,3,1,21,1,19,1,114,135,0,0,0,99,1,
     0,0,0,0,0,0,0,5,0,0,0,13,0,0,0,67,
@@ -865,7 +865,7 @@
     115,116,114,36,0,0,0,90,9,101,120,116,101,110,115,105,
     111,110,218,11,115,111,117,114,99,101,95,112,97,116,104,114,
     4,0,0,0,114,4,0,0,0,114,5,0,0,0,218,15,
-    95,103,101,116,95,115,111,117,114,99,101,102,105,108,101,231,
+    95,103,101,116,95,115,111,117,114,99,101,102,105,108,101,232,
     1,0,0,115,20,0,0,0,0,7,18,1,4,1,24,1,
     35,1,4,1,3,1,16,1,19,1,21,1,114,142,0,0,
     0,99,1,0,0,0,0,0,0,0,2,0,0,0,11,0,
@@ -881,7 +881,7 @@
     41,0,0,0,114,40,0,0,0,41,2,114,35,0,0,0,
     114,42,0,0,0,114,4,0,0,0,114,4,0,0,0,114,
     5,0,0,0,218,10,95,99,97,108,99,95,109,111,100,101,
-    250,1,0,0,115,12,0,0,0,0,2,3,1,19,1,13,
+    251,1,0,0,115,12,0,0,0,0,2,3,1,19,1,13,
     1,11,3,10,1,114,144,0,0,0,218,9,118,101,114,98,
     111,115,105,116,121,114,29,0,0,0,99,1,0,0,0,1,
     0,0,0,3,0,0,0,4,0,0,0,71,0,0,0,115,
@@ -902,7 +902,7 @@
     115,116,100,101,114,114,41,3,218,7,109,101,115,115,97,103,
     101,114,145,0,0,0,114,80,0,0,0,114,4,0,0,0,
     114,4,0,0,0,114,5,0,0,0,218,16,95,118,101,114,
-    98,111,115,101,95,109,101,115,115,97,103,101,6,2,0,0,
+    98,111,115,101,95,109,101,115,115,97,103,101,7,2,0,0,
     115,8,0,0,0,0,2,18,1,15,1,13,1,114,152,0,
     0,0,99,1,0,0,0,0,0,0,0,2,0,0,0,4,
     0,0,0,3,0,0,0,115,38,0,0,0,100,1,0,135,
@@ -938,14 +938,14 @@
     0,114,80,0,0,0,114,108,0,0,0,41,1,218,6,109,
     101,116,104,111,100,114,4,0,0,0,114,5,0,0,0,218,
     19,95,99,104,101,99,107,95,110,97,109,101,95,119,114,97,
-    112,112,101,114,22,2,0,0,115,10,0,0,0,0,1,12,
+    112,112,101,114,23,2,0,0,115,10,0,0,0,0,1,12,
     1,12,1,15,1,25,1,122,40,95,99,104,101,99,107,95,
     110,97,109,101,46,60,108,111,99,97,108,115,62,46,95,99,
     104,101,99,107,95,110,97,109,101,95,119,114,97,112,112,101,
     114,41,1,114,65,0,0,0,41,2,114,154,0,0,0,114,
     155,0,0,0,114,4,0,0,0,41,1,114,154,0,0,0,
     114,5,0,0,0,218,11,95,99,104,101,99,107,95,110,97,
-    109,101,14,2,0,0,115,6,0,0,0,0,8,21,6,13,
+    109,101,15,2,0,0,115,6,0,0,0,0,8,21,6,13,
     1,114,156,0,0,0,99,1,0,0,0,0,0,0,0,2,
     0,0,0,3,0,0,0,3,0,0,0,115,35,0,0,0,
     135,0,0,102,1,0,100,1,0,100,2,0,134,0,0,125,
@@ -967,7 +967,7 @@
     0,218,8,102,117,108,108,110,97,109,101,41,1,218,3,102,
     120,110,114,4,0,0,0,114,5,0,0,0,218,25,95,114,
     101,113,117,105,114,101,115,95,98,117,105,108,116,105,110,95,
-    119,114,97,112,112,101,114,34,2,0,0,115,8,0,0,0,
+    119,114,97,112,112,101,114,35,2,0,0,115,8,0,0,0,
     0,1,15,1,18,1,12,1,122,52,95,114,101,113,117,105,
     114,101,115,95,98,117,105,108,116,105,110,46,60,108,111,99,
     97,108,115,62,46,95,114,101,113,117,105,114,101,115,95,98,
@@ -975,7 +975,7 @@
     114,65,0,0,0,41,2,114,159,0,0,0,114,160,0,0,
     0,114,4,0,0,0,41,1,114,159,0,0,0,114,5,0,
     0,0,218,17,95,114,101,113,117,105,114,101,115,95,98,117,
-    105,108,116,105,110,32,2,0,0,115,6,0,0,0,0,2,
+    105,108,116,105,110,33,2,0,0,115,6,0,0,0,0,2,
     18,5,13,1,114,161,0,0,0,99,1,0,0,0,0,0,
     0,0,2,0,0,0,3,0,0,0,3,0,0,0,115,35,
     0,0,0,135,0,0,102,1,0,100,1,0,100,2,0,134,
@@ -996,7 +996,7 @@
     114,71,0,0,0,114,158,0,0,0,41,1,114,159,0,0,
     0,114,4,0,0,0,114,5,0,0,0,218,24,95,114,101,
     113,117,105,114,101,115,95,102,114,111,122,101,110,95,119,114,
-    97,112,112,101,114,45,2,0,0,115,8,0,0,0,0,1,
+    97,112,112,101,114,46,2,0,0,115,8,0,0,0,0,1,
     15,1,18,1,12,1,122,50,95,114,101,113,117,105,114,101,
     115,95,102,114,111,122,101,110,46,60,108,111,99,97,108,115,
     62,46,95,114,101,113,117,105,114,101,115,95,102,114,111,122,
@@ -1004,7 +1004,7 @@
     0,41,2,114,159,0,0,0,114,163,0,0,0,114,4,0,
     0,0,41,1,114,159,0,0,0,114,5,0,0,0,218,16,
     95,114,101,113,117,105,114,101,115,95,102,114,111,122,101,110,
-    43,2,0,0,115,6,0,0,0,0,2,18,5,13,1,114,
+    44,2,0,0,115,6,0,0,0,0,2,18,5,13,1,114,
     164,0,0,0,99,2,0,0,0,0,0,0,0,5,0,0,
     0,5,0,0,0,67,0,0,0,115,87,0,0,0,124,0,
     0,106,0,0,124,1,0,131,1,0,92,2,0,125,2,0,
@@ -1033,7 +1033,7 @@
     111,97,100,101,114,218,8,112,111,114,116,105,111,110,115,218,
     3,109,115,103,114,4,0,0,0,114,4,0,0,0,114,5,
     0,0,0,218,17,95,102,105,110,100,95,109,111,100,117,108,
-    101,95,115,104,105,109,54,2,0,0,115,10,0,0,0,0,
+    101,95,115,104,105,109,55,2,0,0,115,10,0,0,0,0,
     10,21,1,24,1,6,1,32,1,114,172,0,0,0,99,2,
     0,0,0,0,0,0,0,5,0,0,0,3,0,0,0,67,
     0,0,0,115,93,0,0,0,116,0,0,124,1,0,124,0,
@@ -1058,7 +1058,7 @@
     115,112,101,99,218,7,109,101,116,104,111,100,115,218,6,109,
     111,100,117,108,101,114,4,0,0,0,114,4,0,0,0,114,
     5,0,0,0,218,17,95,108,111,97,100,95,109,111,100,117,
-    108,101,95,115,104,105,109,71,2,0,0,115,14,0,0,0,
+    108,101,95,115,104,105,109,72,2,0,0,115,14,0,0,0,
     0,6,15,1,12,1,15,1,13,1,13,1,11,2,114,180,
     0,0,0,99,4,0,0,0,0,0,0,0,11,0,0,0,
     19,0,0,0,67,0,0,0,115,243,1,0,0,105,0,0,
@@ -1143,7 +1143,7 @@
     95,109,116,105,109,101,218,11,115,111,117,114,99,101,95,115,
     105,122,101,114,4,0,0,0,114,4,0,0,0,114,5,0,
     0,0,218,25,95,118,97,108,105,100,97,116,101,95,98,121,
-    116,101,99,111,100,101,95,104,101,97,100,101,114,87,2,0,
+    116,101,99,111,100,101,95,104,101,97,100,101,114,88,2,0,
     0,115,76,0,0,0,0,11,6,1,12,1,13,3,6,1,
     12,1,13,1,16,1,16,1,16,1,12,1,18,1,10,1,
     18,1,18,1,15,1,10,1,15,1,18,1,15,1,10,1,
@@ -1174,7 +1174,7 @@
     5,114,53,0,0,0,114,67,0,0,0,114,140,0,0,0,
     114,141,0,0,0,218,4,99,111,100,101,114,4,0,0,0,
     114,4,0,0,0,114,5,0,0,0,218,17,95,99,111,109,
-    112,105,108,101,95,98,121,116,101,99,111,100,101,142,2,0,
+    112,105,108,101,95,98,121,116,101,99,111,100,101,143,2,0,
     0,115,16,0,0,0,0,2,15,1,15,1,13,1,12,1,
     19,1,4,2,18,1,114,195,0,0,0,114,84,0,0,0,
     99,3,0,0,0,0,0,0,0,4,0,0,0,3,0,0,
@@ -1194,7 +1194,7 @@
     100,117,109,112,115,41,4,114,194,0,0,0,114,183,0,0,
     0,114,189,0,0,0,114,53,0,0,0,114,4,0,0,0,
     114,4,0,0,0,114,5,0,0,0,218,17,95,99,111,100,
-    101,95,116,111,95,98,121,116,101,99,111,100,101,154,2,0,
+    101,95,116,111,95,98,121,116,101,99,111,100,101,155,2,0,
     0,115,10,0,0,0,0,3,12,1,19,1,19,1,22,1,
     114,198,0,0,0,99,1,0,0,0,0,0,0,0,5,0,
     0,0,4,0,0,0,67,0,0,0,115,89,0,0,0,100,
@@ -1223,7 +1223,7 @@
     218,8,101,110,99,111,100,105,110,103,90,15,110,101,119,108,
     105,110,101,95,100,101,99,111,100,101,114,114,4,0,0,0,
     114,4,0,0,0,114,5,0,0,0,218,13,100,101,99,111,
-    100,101,95,115,111,117,114,99,101,164,2,0,0,115,10,0,
+    100,101,95,115,111,117,114,99,101,165,2,0,0,115,10,0,
     0,0,0,5,12,1,18,1,15,1,18,1,114,203,0,0,
     0,99,1,0,0,0,0,0,0,0,5,0,0,0,35,0,
     0,0,67,0,0,0,115,15,1,0,0,116,0,0,124,0,
@@ -1257,7 +1257,7 @@
     95,114,47,0,0,0,41,5,114,179,0,0,0,114,169,0,
     0,0,114,177,0,0,0,114,67,0,0,0,114,131,0,0,
     0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,
-    218,12,95,109,111,100,117,108,101,95,114,101,112,114,178,2,
+    218,12,95,109,111,100,117,108,101,95,114,101,112,114,179,2,
     0,0,115,46,0,0,0,0,2,18,1,15,4,3,1,17,
     1,13,1,8,1,3,1,13,1,13,1,5,2,12,1,16,
     4,3,1,13,1,13,1,11,1,3,1,13,1,13,1,12,
@@ -1274,7 +1274,7 @@
     41,1,78,41,3,218,7,95,109,111,100,117,108,101,114,208,
     0,0,0,218,5,95,115,112,101,99,41,2,114,71,0,0,
     0,114,179,0,0,0,114,4,0,0,0,114,4,0,0,0,
-    114,5,0,0,0,114,72,0,0,0,216,2,0,0,115,4,
+    114,5,0,0,0,114,72,0,0,0,217,2,0,0,115,4,
     0,0,0,0,1,9,1,122,26,95,105,110,115,116,97,108,
     108,101,100,95,115,97,102,101,108,121,46,95,95,105,110,105,
     116,95,95,99,1,0,0,0,0,0,0,0,1,0,0,0,
@@ -1285,7 +1285,7 @@
     95,105,110,105,116,105,97,108,105,122,105,110,103,114,213,0,
     0,0,114,7,0,0,0,114,73,0,0,0,114,67,0,0,
     0,41,1,114,71,0,0,0,114,4,0,0,0,114,4,0,
-    0,0,114,5,0,0,0,114,75,0,0,0,220,2,0,0,
+    0,0,114,5,0,0,0,114,75,0,0,0,221,2,0,0,
     115,4,0,0,0,0,4,12,1,122,27,95,105,110,115,116,
     97,108,108,101,100,95,115,97,102,101,108,121,46,95,95,101,
     110,116,101,114,95,95,99,1,0,0,0,0,0,0,0,3,
@@ -1303,7 +1303,7 @@
     1,0,100,0,0,107,9,0,86,1,113,3,0,100,0,0,
     83,41,1,78,114,4,0,0,0,41,2,114,22,0,0,0,
     114,76,0,0,0,114,4,0,0,0,114,4,0,0,0,114,
-    5,0,0,0,114,77,0,0,0,230,2,0,0,115,2,0,
+    5,0,0,0,114,77,0,0,0,231,2,0,0,115,2,0,
     0,0,6,0,122,45,95,105,110,115,116,97,108,108,101,100,
     95,115,97,102,101,108,121,46,95,95,101,120,105,116,95,95,
     46,60,108,111,99,97,108,115,62,46,60,103,101,110,101,120,
@@ -1313,14 +1313,14 @@
     0,0,0,114,79,0,0,0,114,152,0,0,0,114,169,0,
     0,0,114,215,0,0,0,41,3,114,71,0,0,0,114,80,
     0,0,0,114,177,0,0,0,114,4,0,0,0,114,4,0,
-    0,0,114,5,0,0,0,114,81,0,0,0,227,2,0,0,
+    0,0,114,5,0,0,0,114,81,0,0,0,228,2,0,0,
     115,18,0,0,0,0,1,3,1,9,1,25,1,3,1,17,
     1,13,1,8,2,26,2,122,26,95,105,110,115,116,97,108,
     108,101,100,95,115,97,102,101,108,121,46,95,95,101,120,105,
     116,95,95,78,41,6,114,57,0,0,0,114,56,0,0,0,
     114,58,0,0,0,114,72,0,0,0,114,75,0,0,0,114,
     81,0,0,0,114,4,0,0,0,114,4,0,0,0,114,4,
-    0,0,0,114,5,0,0,0,114,212,0,0,0,214,2,0,
+    0,0,0,114,5,0,0,0,114,212,0,0,0,215,2,0,
     0,115,6,0,0,0,12,2,12,4,12,7,114,212,0,0,
     0,99,0,0,0,0,0,0,0,0,0,0,0,0,8,0,
     0,0,64,0,0,0,115,172,0,0,0,101,0,0,90,1,
@@ -1445,7 +1445,7 @@
     99,104,101,100,41,6,114,71,0,0,0,114,67,0,0,0,
     114,169,0,0,0,114,217,0,0,0,114,218,0,0,0,114,
     219,0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,
-    0,0,0,114,72,0,0,0,22,3,0,0,115,14,0,0,
+    0,0,0,114,72,0,0,0,23,3,0,0,115,14,0,0,
     0,0,2,9,1,9,1,9,1,9,1,21,3,9,1,122,
     19,77,111,100,117,108,101,83,112,101,99,46,95,95,105,110,
     105,116,95,95,99,1,0,0,0,0,0,0,0,2,0,0,
@@ -1470,7 +1470,7 @@
     95,99,108,97,115,115,95,95,114,57,0,0,0,114,26,0,
     0,0,41,2,114,71,0,0,0,114,80,0,0,0,114,4,
     0,0,0,114,4,0,0,0,114,5,0,0,0,114,101,0,
-    0,0,34,3,0,0,115,16,0,0,0,0,1,15,1,21,
+    0,0,35,3,0,0,115,16,0,0,0,0,1,15,1,21,
     1,15,1,28,1,15,1,6,1,22,1,122,19,77,111,100,
     117,108,101,83,112,101,99,46,95,95,114,101,112,114,95,95,
     99,2,0,0,0,0,0,0,0,3,0,0,0,13,0,0,
@@ -1489,7 +1489,7 @@
     97,115,95,108,111,99,97,116,105,111,110,114,209,0,0,0,
     41,3,114,71,0,0,0,218,5,111,116,104,101,114,218,4,
     115,109,115,108,114,4,0,0,0,114,4,0,0,0,114,5,
-    0,0,0,218,6,95,95,101,113,95,95,44,3,0,0,115,
+    0,0,0,218,6,95,95,101,113,95,95,45,3,0,0,115,
     20,0,0,0,0,1,9,1,3,1,18,1,18,1,18,1,
     15,1,18,1,20,1,13,1,122,17,77,111,100,117,108,101,
     83,112,101,99,46,95,95,101,113,95,95,99,1,0,0,0,
@@ -1510,7 +1510,7 @@
     0,0,114,124,0,0,0,218,17,66,89,84,69,67,79,68,
     69,95,83,85,70,70,73,88,69,83,41,2,114,71,0,0,
     0,114,131,0,0,0,114,4,0,0,0,114,4,0,0,0,
-    114,5,0,0,0,114,225,0,0,0,56,3,0,0,115,22,
+    114,5,0,0,0,114,225,0,0,0,57,3,0,0,115,22,
     0,0,0,0,2,15,1,24,1,9,1,21,1,3,1,19,
     1,13,1,8,1,21,1,18,1,122,17,77,111,100,117,108,
     101,83,112,101,99,46,99,97,99,104,101,100,99,2,0,0,
@@ -1518,7 +1518,7 @@
     0,115,13,0,0,0,124,1,0,124,0,0,95,0,0,100,
     0,0,83,41,1,78,41,1,114,222,0,0,0,41,2,114,
     71,0,0,0,114,225,0,0,0,114,4,0,0,0,114,4,
-    0,0,0,114,5,0,0,0,114,225,0,0,0,70,3,0,
+    0,0,0,114,5,0,0,0,114,225,0,0,0,71,3,0,
     0,115,2,0,0,0,0,2,99,1,0,0,0,0,0,0,
     0,1,0,0,0,2,0,0,0,67,0,0,0,115,46,0,
     0,0,124,0,0,106,0,0,100,1,0,107,8,0,114,35,
@@ -1529,14 +1529,14 @@
     101,110,116,46,78,114,116,0,0,0,114,84,0,0,0,41,
     3,114,220,0,0,0,114,67,0,0,0,114,32,0,0,0,
     41,1,114,71,0,0,0,114,4,0,0,0,114,4,0,0,
-    0,114,5,0,0,0,218,6,112,97,114,101,110,116,74,3,
+    0,114,5,0,0,0,218,6,112,97,114,101,110,116,75,3,
     0,0,115,6,0,0,0,0,3,15,1,20,2,122,17,77,
     111,100,117,108,101,83,112,101,99,46,112,97,114,101,110,116,
     99,1,0,0,0,0,0,0,0,1,0,0,0,1,0,0,
     0,67,0,0,0,115,7,0,0,0,124,0,0,106,0,0,
     83,41,1,78,41,1,114,221,0,0,0,41,1,114,71,0,
     0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,
-    0,114,226,0,0,0,82,3,0,0,115,2,0,0,0,0,
+    0,114,226,0,0,0,83,3,0,0,115,2,0,0,0,0,
     2,122,23,77,111,100,117,108,101,83,112,101,99,46,104,97,
     115,95,108,111,99,97,116,105,111,110,99,2,0,0,0,0,
     0,0,0,2,0,0,0,2,0,0,0,67,0,0,0,115,
@@ -1544,14 +1544,14 @@
     95,1,0,100,0,0,83,41,1,78,41,2,218,4,98,111,
     111,108,114,221,0,0,0,41,2,114,71,0,0,0,218,5,
     118,97,108,117,101,114,4,0,0,0,114,4,0,0,0,114,
-    5,0,0,0,114,226,0,0,0,86,3,0,0,115,2,0,
+    5,0,0,0,114,226,0,0,0,87,3,0,0,115,2,0,
     0,0,0,2,41,12,114,57,0,0,0,114,56,0,0,0,
     114,58,0,0,0,114,59,0,0,0,114,72,0,0,0,114,
     101,0,0,0,114,229,0,0,0,218,8,112,114,111,112,101,
     114,116,121,114,225,0,0,0,218,6,115,101,116,116,101,114,
     114,233,0,0,0,114,226,0,0,0,114,4,0,0,0,114,
     4,0,0,0,114,4,0,0,0,114,5,0,0,0,114,216,
-    0,0,0,241,2,0,0,115,20,0,0,0,12,35,6,2,
+    0,0,0,242,2,0,0,115,20,0,0,0,12,35,6,2,
     15,1,15,11,12,10,12,12,18,14,21,4,18,8,18,4,
     114,216,0,0,0,114,217,0,0,0,114,219,0,0,0,99,
     2,0,0,0,2,0,0,0,5,0,0,0,15,0,0,0,
@@ -1579,7 +1579,7 @@
     0,114,216,0,0,0,41,5,114,67,0,0,0,114,169,0,
     0,0,114,217,0,0,0,114,219,0,0,0,90,6,115,101,
     97,114,99,104,114,4,0,0,0,114,4,0,0,0,114,5,
-    0,0,0,114,173,0,0,0,91,3,0,0,115,28,0,0,
+    0,0,0,114,173,0,0,0,92,3,0,0,115,28,0,0,
     0,0,2,15,1,12,1,16,1,18,1,15,1,7,2,12,
     1,15,1,3,1,19,1,13,1,14,3,9,2,114,173,0,
     0,0,114,169,0,0,0,114,220,0,0,0,99,2,0,0,
@@ -1641,7 +1641,7 @@
     12,108,111,97,100,101,114,95,99,108,97,115,115,114,127,0,
     0,0,114,219,0,0,0,90,7,100,105,114,110,97,109,101,
     114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,114,
-    239,0,0,0,116,3,0,0,115,60,0,0,0,0,12,12,
+    239,0,0,0,117,3,0,0,115,60,0,0,0,0,12,12,
     4,6,1,15,2,3,1,19,1,13,1,11,8,21,1,9,
     3,12,1,22,1,21,1,15,1,9,1,8,2,7,3,12,
     2,15,1,3,1,19,1,13,1,5,2,6,1,18,2,9,
@@ -1681,7 +1681,7 @@
     0,0,0,114,177,0,0,0,114,67,0,0,0,114,242,0,
     0,0,114,225,0,0,0,114,220,0,0,0,114,4,0,0,
     0,114,4,0,0,0,114,5,0,0,0,218,17,95,115,112,
-    101,99,95,102,114,111,109,95,109,111,100,117,108,101,180,3,
+    101,99,95,102,114,111,109,95,109,111,100,117,108,101,181,3,
     0,0,115,72,0,0,0,0,2,3,1,13,1,13,1,5,
     2,12,1,4,2,9,1,12,1,3,1,13,1,13,2,8,
     1,3,1,13,1,13,1,11,1,12,1,12,1,3,1,13,
@@ -1708,7 +1708,7 @@
     0,0,95,0,0,100,0,0,83,41,1,78,41,1,114,177,
     0,0,0,41,2,114,71,0,0,0,114,177,0,0,0,114,
     4,0,0,0,114,4,0,0,0,114,5,0,0,0,114,72,
-    0,0,0,232,3,0,0,115,2,0,0,0,0,1,122,21,
+    0,0,0,233,3,0,0,115,2,0,0,0,0,1,122,21,
     95,83,112,101,99,77,101,116,104,111,100,115,46,95,95,105,
     110,105,116,95,95,99,1,0,0,0,0,0,0,0,3,0,
     0,0,3,0,0,0,67,0,0,0,115,158,0,0,0,124,
@@ -1734,7 +1734,7 @@
     114,169,0,0,0,114,47,0,0,0,114,226,0,0,0,41,
     3,114,71,0,0,0,114,177,0,0,0,114,67,0,0,0,
     114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,114,
-    205,0,0,0,235,3,0,0,115,18,0,0,0,0,3,9,
+    205,0,0,0,236,3,0,0,115,18,0,0,0,0,3,9,
     1,30,1,15,1,15,1,13,2,22,2,9,1,19,2,122,
     24,95,83,112,101,99,77,101,116,104,111,100,115,46,109,111,
     100,117,108,101,95,114,101,112,114,218,9,95,111,118,101,114,
@@ -1825,7 +1825,7 @@
     0,0,0,114,179,0,0,0,114,248,0,0,0,114,249,0,
     0,0,114,177,0,0,0,114,169,0,0,0,114,4,0,0,
     0,114,4,0,0,0,114,5,0,0,0,218,17,105,110,105,
-    116,95,109,111,100,117,108,101,95,97,116,116,114,115,251,3,
+    116,95,109,111,100,117,108,101,95,97,116,116,114,115,252,3,
     0,0,115,88,0,0,0,0,17,9,6,12,1,24,1,3,
     1,16,1,13,1,8,3,30,1,9,1,12,2,15,1,15,
     1,18,1,3,1,13,1,13,1,8,3,30,1,3,1,16,
@@ -1857,7 +1857,7 @@
     0,114,68,0,0,0,114,67,0,0,0,114,254,0,0,0,
     41,3,114,71,0,0,0,114,177,0,0,0,114,179,0,0,
     0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,
-    218,6,99,114,101,97,116,101,75,4,0,0,115,16,0,0,
+    218,6,99,114,101,97,116,101,76,4,0,0,115,16,0,0,
     0,0,7,9,2,18,3,21,2,6,1,12,4,18,1,13,
     1,122,19,95,83,112,101,99,77,101,116,104,111,100,115,46,
     99,114,101,97,116,101,99,2,0,0,0,0,0,0,0,2,
@@ -1879,7 +1879,7 @@
     114,169,0,0,0,218,11,101,120,101,99,95,109,111,100,117,
     108,101,41,2,114,71,0,0,0,114,179,0,0,0,114,4,
     0,0,0,114,4,0,0,0,114,5,0,0,0,218,5,95,
-    101,120,101,99,98,4,0,0,115,2,0,0,0,0,7,122,
+    101,120,101,99,99,4,0,0,115,2,0,0,0,0,7,122,
     18,95,83,112,101,99,77,101,116,104,111,100,115,46,95,101,
     120,101,99,99,2,0,0,0,0,0,0,0,4,0,0,0,
     11,0,0,0,67,0,0,0,115,17,1,0,0,124,0,0,
@@ -1916,7 +1916,7 @@
     100,117,108,101,114,2,1,0,0,41,4,114,71,0,0,0,
     114,179,0,0,0,114,67,0,0,0,114,171,0,0,0,114,
     4,0,0,0,114,4,0,0,0,114,5,0,0,0,114,175,
-    0,0,0,108,4,0,0,115,32,0,0,0,0,2,12,1,
+    0,0,0,109,4,0,0,115,32,0,0,0,0,2,12,1,
     10,1,13,1,24,1,15,1,21,1,18,1,18,1,27,2,
     19,1,4,1,19,1,21,4,22,2,19,1,122,17,95,83,
     112,101,99,77,101,116,104,111,100,115,46,101,120,101,99,99,
@@ -1949,7 +1949,7 @@
     0,114,177,0,0,0,114,179,0,0,0,114,4,0,0,0,
     114,4,0,0,0,114,5,0,0,0,218,25,95,108,111,97,
     100,95,98,97,99,107,119,97,114,100,95,99,111,109,112,97,
-    116,105,98,108,101,132,4,0,0,115,42,0,0,0,0,4,
+    116,105,98,108,101,133,4,0,0,115,42,0,0,0,0,4,
     9,1,19,2,16,1,24,1,3,1,16,1,13,1,8,1,
     24,1,3,4,12,1,15,1,32,1,13,1,8,1,24,1,
     3,1,13,1,13,1,8,1,122,38,95,83,112,101,99,77,
@@ -1976,7 +1976,7 @@
     0,0,0,114,73,0,0,0,41,2,114,71,0,0,0,114,
     179,0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,
     0,0,0,218,14,95,108,111,97,100,95,117,110,108,111,99,
-    107,101,100,162,4,0,0,115,20,0,0,0,0,2,18,2,
+    107,101,100,163,4,0,0,115,20,0,0,0,0,2,18,2,
     21,1,13,2,12,1,13,1,18,1,18,1,30,3,19,5,
     122,27,95,83,112,101,99,77,101,116,104,111,100,115,46,95,
     108,111,97,100,95,117,110,108,111,99,107,101,100,99,1,0,
@@ -2001,7 +2001,7 @@
     0,0,0,114,3,1,0,0,114,103,0,0,0,114,177,0,
     0,0,114,67,0,0,0,114,6,1,0,0,41,1,114,71,
     0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,
-    0,0,114,176,0,0,0,185,4,0,0,115,6,0,0,0,
+    0,0,114,176,0,0,0,186,4,0,0,115,6,0,0,0,
     0,9,10,1,19,1,122,17,95,83,112,101,99,77,101,116,
     104,111,100,115,46,108,111,97,100,78,41,13,114,57,0,0,
     0,114,56,0,0,0,114,58,0,0,0,114,59,0,0,0,
@@ -2009,7 +2009,7 @@
     0,1,0,0,114,2,1,0,0,114,175,0,0,0,114,5,
     1,0,0,114,6,1,0,0,114,176,0,0,0,114,4,0,
     0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,
-    0,114,174,0,0,0,225,3,0,0,115,20,0,0,0,12,
+    0,114,174,0,0,0,226,3,0,0,115,20,0,0,0,12,
     3,6,4,12,3,12,16,24,80,12,23,12,10,12,24,12,
     30,12,23,114,174,0,0,0,99,4,0,0,0,0,0,0,
     0,6,0,0,0,11,0,0,0,67,0,0,0,115,201,0,
@@ -2035,7 +2035,7 @@
     90,8,112,97,116,104,110,97,109,101,90,9,99,112,97,116,
     104,110,97,109,101,114,169,0,0,0,114,177,0,0,0,114,
     4,0,0,0,114,4,0,0,0,114,5,0,0,0,218,14,
-    95,102,105,120,95,117,112,95,109,111,100,117,108,101,199,4,
+    95,102,105,120,95,117,112,95,109,111,100,117,108,101,200,4,
     0,0,115,34,0,0,0,0,2,15,1,15,1,6,1,6,
     1,12,1,12,1,18,2,18,1,6,1,24,1,3,1,10,
     1,10,1,10,1,14,1,13,2,114,9,1,0,0,99,0,
@@ -2076,7 +2076,7 @@
     125,32,40,98,117,105,108,116,45,105,110,41,62,41,2,114,
     47,0,0,0,114,57,0,0,0,41,1,114,179,0,0,0,
     114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,114,
-    205,0,0,0,233,4,0,0,115,2,0,0,0,0,7,122,
+    205,0,0,0,234,4,0,0,115,2,0,0,0,0,7,122,
     27,66,117,105,108,116,105,110,73,109,112,111,114,116,101,114,
     46,109,111,100,117,108,101,95,114,101,112,114,78,99,4,0,
     0,0,0,0,0,0,4,0,0,0,5,0,0,0,67,0,
@@ -2089,7 +2089,7 @@
     117,105,108,116,105,110,114,173,0,0,0,41,4,218,3,99,
     108,115,114,158,0,0,0,114,35,0,0,0,218,6,116,97,
     114,103,101,116,114,4,0,0,0,114,4,0,0,0,114,5,
-    0,0,0,218,9,102,105,110,100,95,115,112,101,99,242,4,
+    0,0,0,218,9,102,105,110,100,95,115,112,101,99,243,4,
     0,0,115,10,0,0,0,0,2,12,1,4,1,15,1,19,
     2,122,25,66,117,105,108,116,105,110,73,109,112,111,114,116,
     101,114,46,102,105,110,100,95,115,112,101,99,99,3,0,0,
@@ -2112,7 +2112,7 @@
     4,114,11,1,0,0,114,158,0,0,0,114,35,0,0,0,
     114,177,0,0,0,114,4,0,0,0,114,4,0,0,0,114,
     5,0,0,0,218,11,102,105,110,100,95,109,111,100,117,108,
-    101,251,4,0,0,115,4,0,0,0,0,9,18,1,122,27,
+    101,252,4,0,0,115,4,0,0,0,0,9,18,1,122,27,
     66,117,105,108,116,105,110,73,109,112,111,114,116,101,114,46,
     102,105,110,100,95,109,111,100,117,108,101,99,2,0,0,0,
     0,0,0,0,3,0,0,0,10,0,0,0,67,0,0,0,
@@ -2126,7 +2126,7 @@
     0,0,90,12,105,110,105,116,95,98,117,105,108,116,105,110,
     114,204,0,0,0,114,250,0,0,0,41,3,114,11,1,0,
     0,114,158,0,0,0,114,179,0,0,0,114,4,0,0,0,
-    114,4,0,0,0,114,5,0,0,0,114,4,1,0,0,7,
+    114,4,0,0,0,114,5,0,0,0,114,4,1,0,0,8,
     5,0,0,115,10,0,0,0,0,6,13,1,24,1,9,1,
     9,1,122,27,66,117,105,108,116,105,110,73,109,112,111,114,
     116,101,114,46,108,111,97,100,95,109,111,100,117,108,101,99,
@@ -2138,7 +2138,7 @@
     101,32,111,98,106,101,99,116,115,46,78,114,4,0,0,0,
     41,2,114,11,1,0,0,114,158,0,0,0,114,4,0,0,
     0,114,4,0,0,0,114,5,0,0,0,218,8,103,101,116,
-    95,99,111,100,101,19,5,0,0,115,2,0,0,0,0,4,
+    95,99,111,100,101,20,5,0,0,115,2,0,0,0,0,4,
     122,24,66,117,105,108,116,105,110,73,109,112,111,114,116,101,
     114,46,103,101,116,95,99,111,100,101,99,2,0,0,0,0,
     0,0,0,2,0,0,0,1,0,0,0,67,0,0,0,115,
@@ -2149,7 +2149,7 @@
     111,100,101,46,78,114,4,0,0,0,41,2,114,11,1,0,
     0,114,158,0,0,0,114,4,0,0,0,114,4,0,0,0,
     114,5,0,0,0,218,10,103,101,116,95,115,111,117,114,99,
-    101,25,5,0,0,115,2,0,0,0,0,4,122,26,66,117,
+    101,26,5,0,0,115,2,0,0,0,0,4,122,26,66,117,
     105,108,116,105,110,73,109,112,111,114,116,101,114,46,103,101,
     116,95,115,111,117,114,99,101,99,2,0,0,0,0,0,0,
     0,2,0,0,0,1,0,0,0,67,0,0,0,115,4,0,
@@ -2159,7 +2159,7 @@
     101,118,101,114,32,112,97,99,107,97,103,101,115,46,70,114,
     4,0,0,0,41,2,114,11,1,0,0,114,158,0,0,0,
     114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,114,
-    219,0,0,0,31,5,0,0,115,2,0,0,0,0,4,122,
+    219,0,0,0,32,5,0,0,115,2,0,0,0,0,4,122,
     26,66,117,105,108,116,105,110,73,109,112,111,114,116,101,114,
     46,105,115,95,112,97,99,107,97,103,101,41,14,114,57,0,
     0,0,114,56,0,0,0,114,58,0,0,0,114,59,0,0,
@@ -2168,7 +2168,7 @@
     100,114,13,1,0,0,114,14,1,0,0,114,161,0,0,0,
     114,4,1,0,0,114,15,1,0,0,114,16,1,0,0,114,
     219,0,0,0,114,4,0,0,0,114,4,0,0,0,114,4,
-    0,0,0,114,5,0,0,0,114,10,1,0,0,224,4,0,
+    0,0,0,114,5,0,0,0,114,10,1,0,0,225,4,0,
     0,115,28,0,0,0,12,7,6,2,18,9,3,1,21,8,
     3,1,18,11,3,1,21,11,3,1,21,5,3,1,21,5,
     3,1,114,10,1,0,0,99,0,0,0,0,0,0,0,0,
@@ -2209,7 +2209,7 @@
     33,114,125,32,40,102,114,111,122,101,110,41,62,41,2,114,
     47,0,0,0,114,57,0,0,0,41,1,218,1,109,114,4,
     0,0,0,114,4,0,0,0,114,5,0,0,0,114,205,0,
-    0,0,47,5,0,0,115,2,0,0,0,0,7,122,26,70,
+    0,0,48,5,0,0,115,2,0,0,0,0,7,122,26,70,
     114,111,122,101,110,73,109,112,111,114,116,101,114,46,109,111,
     100,117,108,101,95,114,101,112,114,78,99,4,0,0,0,0,
     0,0,0,4,0,0,0,5,0,0,0,67,0,0,0,115,
@@ -2220,7 +2220,7 @@
     114,106,0,0,0,114,162,0,0,0,114,173,0,0,0,41,
     4,114,11,1,0,0,114,158,0,0,0,114,35,0,0,0,
     114,12,1,0,0,114,4,0,0,0,114,4,0,0,0,114,
-    5,0,0,0,114,13,1,0,0,56,5,0,0,115,6,0,
+    5,0,0,0,114,13,1,0,0,57,5,0,0,115,6,0,
     0,0,0,2,15,1,19,2,122,24,70,114,111,122,101,110,
     73,109,112,111,114,116,101,114,46,102,105,110,100,95,115,112,
     101,99,99,3,0,0,0,0,0,0,0,3,0,0,0,2,
@@ -2235,7 +2235,7 @@
     32,32,32,32,78,41,2,114,106,0,0,0,114,162,0,0,
     0,41,3,114,11,1,0,0,114,158,0,0,0,114,35,0,
     0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,
-    0,114,14,1,0,0,63,5,0,0,115,2,0,0,0,0,
+    0,114,14,1,0,0,64,5,0,0,115,2,0,0,0,0,
     7,122,26,70,114,111,122,101,110,73,109,112,111,114,116,101,
     114,46,102,105,110,100,95,109,111,100,117,108,101,99,1,0,
     0,0,0,0,0,0,3,0,0,0,4,0,0,0,67,0,
@@ -2254,7 +2254,7 @@
     101,99,116,114,175,0,0,0,114,63,0,0,0,41,3,114,
     179,0,0,0,114,67,0,0,0,114,194,0,0,0,114,4,
     0,0,0,114,4,0,0,0,114,5,0,0,0,114,1,1,
-    0,0,72,5,0,0,115,12,0,0,0,0,2,12,1,15,
+    0,0,73,5,0,0,115,12,0,0,0,0,2,12,1,15,
     1,18,1,12,1,18,1,122,26,70,114,111,122,101,110,73,
     109,112,111,114,116,101,114,46,101,120,101,99,95,109,111,100,
     117,108,101,99,2,0,0,0,0,0,0,0,2,0,0,0,
@@ -2268,7 +2268,7 @@
     97,100,46,10,10,32,32,32,32,32,32,32,32,41,1,114,
     180,0,0,0,41,2,114,11,1,0,0,114,158,0,0,0,
     114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,114,
-    4,1,0,0,81,5,0,0,115,2,0,0,0,0,7,122,
+    4,1,0,0,82,5,0,0,115,2,0,0,0,0,7,122,
     26,70,114,111,122,101,110,73,109,112,111,114,116,101,114,46,
     108,111,97,100,95,109,111,100,117,108,101,99,2,0,0,0,
     0,0,0,0,2,0,0,0,2,0,0,0,67,0,0,0,
@@ -2278,7 +2278,7 @@
     32,116,104,101,32,102,114,111,122,101,110,32,109,111,100,117,
     108,101,46,41,2,114,106,0,0,0,114,21,1,0,0,41,
     2,114,11,1,0,0,114,158,0,0,0,114,4,0,0,0,
-    114,4,0,0,0,114,5,0,0,0,114,15,1,0,0,90,
+    114,4,0,0,0,114,5,0,0,0,114,15,1,0,0,91,
     5,0,0,115,2,0,0,0,0,4,122,23,70,114,111,122,
     101,110,73,109,112,111,114,116,101,114,46,103,101,116,95,99,
     111,100,101,99,2,0,0,0,0,0,0,0,2,0,0,0,
@@ -2289,7 +2289,7 @@
     111,117,114,99,101,32,99,111,100,101,46,78,114,4,0,0,
     0,41,2,114,11,1,0,0,114,158,0,0,0,114,4,0,
     0,0,114,4,0,0,0,114,5,0,0,0,114,16,1,0,
-    0,96,5,0,0,115,2,0,0,0,0,4,122,25,70,114,
+    0,97,5,0,0,115,2,0,0,0,0,4,122,25,70,114,
     111,122,101,110,73,109,112,111,114,116,101,114,46,103,101,116,
     95,115,111,117,114,99,101,99,2,0,0,0,0,0,0,0,
     2,0,0,0,2,0,0,0,67,0,0,0,115,13,0,0,
@@ -2300,7 +2300,7 @@
     41,2,114,106,0,0,0,90,17,105,115,95,102,114,111,122,
     101,110,95,112,97,99,107,97,103,101,41,2,114,11,1,0,
     0,114,158,0,0,0,114,4,0,0,0,114,4,0,0,0,
-    114,5,0,0,0,114,219,0,0,0,102,5,0,0,115,2,
+    114,5,0,0,0,114,219,0,0,0,103,5,0,0,115,2,
     0,0,0,0,4,122,25,70,114,111,122,101,110,73,109,112,
     111,114,116,101,114,46,105,115,95,112,97,99,107,97,103,101,
     41,15,114,57,0,0,0,114,56,0,0,0,114,58,0,0,
@@ -2309,7 +2309,7 @@
     1,1,0,0,114,4,1,0,0,114,164,0,0,0,114,15,
     1,0,0,114,16,1,0,0,114,219,0,0,0,114,4,0,
     0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,
-    0,114,19,1,0,0,38,5,0,0,115,28,0,0,0,12,
+    0,114,19,1,0,0,39,5,0,0,115,28,0,0,0,12,
     7,6,2,18,9,3,1,21,6,3,1,18,8,18,9,18,
     9,3,1,21,5,3,1,21,5,3,1,114,19,1,0,0,
     99,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,
@@ -2347,7 +2347,7 @@
     76,79,67,65,76,95,77,65,67,72,73,78,69,41,2,114,
     11,1,0,0,218,3,107,101,121,114,4,0,0,0,114,4,
     0,0,0,114,5,0,0,0,218,14,95,111,112,101,110,95,
-    114,101,103,105,115,116,114,121,121,5,0,0,115,8,0,0,
+    114,101,103,105,115,116,114,121,122,5,0,0,115,8,0,0,
     0,0,2,3,1,23,1,13,1,122,36,87,105,110,100,111,
     119,115,82,101,103,105,115,116,114,121,70,105,110,100,101,114,
     46,95,111,112,101,110,95,114,101,103,105,115,116,114,121,99,
@@ -2374,7 +2374,7 @@
     24,1,0,0,90,4,104,107,101,121,218,8,102,105,108,101,
     112,97,116,104,114,4,0,0,0,114,4,0,0,0,114,5,
     0,0,0,218,16,95,115,101,97,114,99,104,95,114,101,103,
-    105,115,116,114,121,128,5,0,0,115,22,0,0,0,0,2,
+    105,115,116,114,121,129,5,0,0,115,22,0,0,0,0,2,
     9,1,12,2,9,1,15,1,22,1,3,1,18,1,28,1,
     13,1,9,1,122,38,87,105,110,100,111,119,115,82,101,103,
     105,115,116,114,121,70,105,110,100,101,114,46,95,115,101,97,
@@ -2396,7 +2396,7 @@
     8,114,11,1,0,0,114,158,0,0,0,114,35,0,0,0,
     114,12,1,0,0,114,30,1,0,0,114,169,0,0,0,114,
     127,0,0,0,114,177,0,0,0,114,4,0,0,0,114,4,
-    0,0,0,114,5,0,0,0,114,13,1,0,0,143,5,0,
+    0,0,0,114,5,0,0,0,114,13,1,0,0,144,5,0,
     0,115,24,0,0,0,0,2,15,1,12,1,4,1,3,1,
     14,1,13,1,9,1,22,1,21,1,21,1,9,1,122,31,
     87,105,110,100,111,119,115,82,101,103,105,115,116,114,121,70,
@@ -2415,7 +2415,7 @@
     32,32,32,32,32,32,78,41,2,114,13,1,0,0,114,169,
     0,0,0,41,4,114,11,1,0,0,114,158,0,0,0,114,
     35,0,0,0,114,177,0,0,0,114,4,0,0,0,114,4,
-    0,0,0,114,5,0,0,0,114,14,1,0,0,158,5,0,
+    0,0,0,114,5,0,0,0,114,14,1,0,0,159,5,0,
     0,115,8,0,0,0,0,7,18,1,12,1,7,2,122,33,
     87,105,110,100,111,119,115,82,101,103,105,115,116,114,121,70,
     105,110,100,101,114,46,102,105,110,100,95,109,111,100,117,108,
@@ -2424,7 +2424,7 @@
     0,114,26,1,0,0,114,18,1,0,0,114,25,1,0,0,
     114,31,1,0,0,114,13,1,0,0,114,14,1,0,0,114,
     4,0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,
-    0,0,0,114,22,1,0,0,109,5,0,0,115,20,0,0,
+    0,0,0,114,22,1,0,0,110,5,0,0,115,20,0,0,
     0,12,2,6,3,6,3,6,2,6,2,18,7,18,15,3,
     1,21,14,3,1,114,22,1,0,0,99,0,0,0,0,0,
     0,0,0,0,0,0,0,2,0,0,0,64,0,0,0,115,
@@ -2460,7 +2460,7 @@
     41,5,114,71,0,0,0,114,158,0,0,0,114,131,0,0,
     0,90,13,102,105,108,101,110,97,109,101,95,98,97,115,101,
     90,9,116,97,105,108,95,110,97,109,101,114,4,0,0,0,
-    114,4,0,0,0,114,5,0,0,0,114,219,0,0,0,177,
+    114,4,0,0,0,114,5,0,0,0,114,219,0,0,0,178,
     5,0,0,115,8,0,0,0,0,3,25,1,22,1,19,1,
     122,24,95,76,111,97,100,101,114,66,97,115,105,99,115,46,
     105,115,95,112,97,99,107,97,103,101,99,2,0,0,0,0,
@@ -2479,14 +2479,14 @@
     0,0,0,114,47,0,0,0,114,114,0,0,0,114,175,0,
     0,0,114,63,0,0,0,41,3,114,71,0,0,0,114,179,
     0,0,0,114,194,0,0,0,114,4,0,0,0,114,4,0,
-    0,0,114,5,0,0,0,114,1,1,0,0,185,5,0,0,
+    0,0,114,5,0,0,0,114,1,1,0,0,186,5,0,0,
     115,10,0,0,0,0,2,18,1,12,1,3,1,24,1,122,
     25,95,76,111,97,100,101,114,66,97,115,105,99,115,46,101,
     120,101,99,95,109,111,100,117,108,101,78,41,8,114,57,0,
     0,0,114,56,0,0,0,114,58,0,0,0,114,59,0,0,
     0,114,219,0,0,0,114,1,1,0,0,114,180,0,0,0,
     114,4,1,0,0,114,4,0,0,0,114,4,0,0,0,114,
-    4,0,0,0,114,5,0,0,0,114,32,1,0,0,172,5,
+    4,0,0,0,114,5,0,0,0,114,32,1,0,0,173,5,
     0,0,115,8,0,0,0,12,3,6,2,12,8,12,8,114,
     32,1,0,0,99,0,0,0,0,0,0,0,0,0,0,0,
     0,4,0,0,0,64,0,0,0,115,106,0,0,0,101,0,
@@ -2514,7 +2514,7 @@
     32,32,78,41,1,218,7,73,79,69,114,114,111,114,41,2,
     114,71,0,0,0,114,35,0,0,0,114,4,0,0,0,114,
     4,0,0,0,114,5,0,0,0,218,10,112,97,116,104,95,
-    109,116,105,109,101,198,5,0,0,115,2,0,0,0,0,6,
+    109,116,105,109,101,199,5,0,0,115,2,0,0,0,0,6,
     122,23,83,111,117,114,99,101,76,111,97,100,101,114,46,112,
     97,116,104,95,109,116,105,109,101,99,2,0,0,0,0,0,
     0,0,2,0,0,0,3,0,0,0,67,0,0,0,115,20,
@@ -2549,7 +2549,7 @@
     32,32,32,32,32,32,32,32,114,183,0,0,0,41,1,114,
     35,1,0,0,41,2,114,71,0,0,0,114,35,0,0,0,
     114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,218,
-    10,112,97,116,104,95,115,116,97,116,115,206,5,0,0,115,
+    10,112,97,116,104,95,115,116,97,116,115,207,5,0,0,115,
     2,0,0,0,0,11,122,23,83,111,117,114,99,101,76,111,
     97,100,101,114,46,112,97,116,104,95,115,116,97,116,115,99,
     4,0,0,0,0,0,0,0,4,0,0,0,3,0,0,0,
@@ -2573,7 +2573,7 @@
     71,0,0,0,114,141,0,0,0,90,10,99,97,99,104,101,
     95,112,97,116,104,114,53,0,0,0,114,4,0,0,0,114,
     4,0,0,0,114,5,0,0,0,218,15,95,99,97,99,104,
-    101,95,98,121,116,101,99,111,100,101,219,5,0,0,115,2,
+    101,95,98,121,116,101,99,111,100,101,220,5,0,0,115,2,
     0,0,0,0,8,122,28,83,111,117,114,99,101,76,111,97,
     100,101,114,46,95,99,97,99,104,101,95,98,121,116,101,99,
     111,100,101,99,3,0,0,0,0,0,0,0,3,0,0,0,
@@ -2590,7 +2590,7 @@
     115,46,10,32,32,32,32,32,32,32,32,78,114,4,0,0,
     0,41,3,114,71,0,0,0,114,35,0,0,0,114,53,0,
     0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,
-    0,114,37,1,0,0,229,5,0,0,115,0,0,0,0,122,
+    0,114,37,1,0,0,230,5,0,0,115,0,0,0,0,122,
     21,83,111,117,114,99,101,76,111,97,100,101,114,46,115,101,
     116,95,100,97,116,97,99,2,0,0,0,0,0,0,0,5,
     0,0,0,16,0,0,0,67,0,0,0,115,105,0,0,0,
@@ -2611,7 +2611,7 @@
     97,114,40,0,0,0,114,153,0,0,0,114,203,0,0,0,
     41,5,114,71,0,0,0,114,158,0,0,0,114,35,0,0,
     0,114,201,0,0,0,218,3,101,120,99,114,4,0,0,0,
-    114,4,0,0,0,114,5,0,0,0,114,16,1,0,0,236,
+    114,4,0,0,0,114,5,0,0,0,114,16,1,0,0,237,
     5,0,0,115,14,0,0,0,0,2,15,1,3,1,19,1,
     18,1,9,1,31,1,122,23,83,111,117,114,99,101,76,111,
     97,100,101,114,46,103,101,116,95,115,111,117,114,99,101,218,
@@ -2633,7 +2633,7 @@
     99,111,109,112,105,108,101,41,4,114,71,0,0,0,114,53,
     0,0,0,114,35,0,0,0,114,41,1,0,0,114,4,0,
     0,0,114,4,0,0,0,114,5,0,0,0,218,14,115,111,
-    117,114,99,101,95,116,111,95,99,111,100,101,246,5,0,0,
+    117,114,99,101,95,116,111,95,99,111,100,101,247,5,0,0,
     115,4,0,0,0,0,5,18,1,122,27,83,111,117,114,99,
     101,76,111,97,100,101,114,46,115,111,117,114,99,101,95,116,
     111,95,99,111,100,101,99,2,0,0,0,0,0,0,0,10,
@@ -2694,7 +2694,7 @@
     115,116,114,53,0,0,0,218,10,98,121,116,101,115,95,100,
     97,116,97,114,201,0,0,0,90,11,99,111,100,101,95,111,
     98,106,101,99,116,114,4,0,0,0,114,4,0,0,0,114,
-    5,0,0,0,114,15,1,0,0,254,5,0,0,115,78,0,
+    5,0,0,0,114,15,1,0,0,255,5,0,0,115,78,0,
     0,0,0,7,15,1,6,1,3,1,16,1,13,1,11,2,
     3,1,19,1,13,1,5,2,16,1,3,1,19,1,13,1,
     5,2,3,1,9,1,12,1,13,1,19,1,5,2,9,1,
@@ -2706,7 +2706,7 @@
     1,0,0,114,36,1,0,0,114,38,1,0,0,114,37,1,
     0,0,114,16,1,0,0,114,44,1,0,0,114,15,1,0,
     0,114,4,0,0,0,114,4,0,0,0,114,4,0,0,0,
-    114,5,0,0,0,114,33,1,0,0,196,5,0,0,115,14,
+    114,5,0,0,0,114,33,1,0,0,197,5,0,0,115,14,
     0,0,0,12,2,12,8,12,13,12,10,12,7,12,10,18,
     8,114,33,1,0,0,99,0,0,0,0,0,0,0,0,0,
     0,0,0,4,0,0,0,0,0,0,0,115,112,0,0,0,
@@ -2735,7 +2735,7 @@
     105,110,100,101,114,46,78,41,2,114,67,0,0,0,114,35,
     0,0,0,41,3,114,71,0,0,0,114,158,0,0,0,114,
     35,0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,
-    0,0,0,114,72,0,0,0,55,6,0,0,115,4,0,0,
+    0,0,0,114,72,0,0,0,56,6,0,0,115,4,0,0,
     0,0,3,9,1,122,19,70,105,108,101,76,111,97,100,101,
     114,46,95,95,105,110,105,116,95,95,99,2,0,0,0,0,
     0,0,0,2,0,0,0,3,0,0,0,67,0,0,0,115,
@@ -2744,7 +2744,7 @@
     1,0,107,2,0,83,41,1,78,41,2,114,224,0,0,0,
     114,63,0,0,0,41,2,114,71,0,0,0,114,227,0,0,
     0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,
-    114,229,0,0,0,61,6,0,0,115,4,0,0,0,0,1,
+    114,229,0,0,0,62,6,0,0,115,4,0,0,0,0,1,
     18,1,122,17,70,105,108,101,76,111,97,100,101,114,46,95,
     95,101,113,95,95,99,1,0,0,0,0,0,0,0,1,0,
     0,0,3,0,0,0,67,0,0,0,115,26,0,0,0,116,
@@ -2752,7 +2752,7 @@
     0,106,2,0,131,1,0,65,83,41,1,78,41,3,218,4,
     104,97,115,104,114,67,0,0,0,114,35,0,0,0,41,1,
     114,71,0,0,0,114,4,0,0,0,114,4,0,0,0,114,
-    5,0,0,0,218,8,95,95,104,97,115,104,95,95,65,6,
+    5,0,0,0,218,8,95,95,104,97,115,104,95,95,66,6,
     0,0,115,2,0,0,0,0,1,122,19,70,105,108,101,76,
     111,97,100,101,114,46,95,95,104,97,115,104,95,95,99,2,
     0,0,0,0,0,0,0,2,0,0,0,3,0,0,0,3,
@@ -2767,7 +2767,7 @@
     32,32,32,32,32,32,41,3,218,5,115,117,112,101,114,114,
     48,1,0,0,114,4,1,0,0,41,2,114,71,0,0,0,
     114,158,0,0,0,41,1,114,224,0,0,0,114,4,0,0,
-    0,114,5,0,0,0,114,4,1,0,0,68,6,0,0,115,
+    0,114,5,0,0,0,114,4,1,0,0,69,6,0,0,115,
     2,0,0,0,0,10,122,22,70,105,108,101,76,111,97,100,
     101,114,46,108,111,97,100,95,109,111,100,117,108,101,99,2,
     0,0,0,0,0,0,0,2,0,0,0,1,0,0,0,67,
@@ -2778,7 +2778,7 @@
     121,32,116,104,101,32,102,105,110,100,101,114,46,41,1,114,
     35,0,0,0,41,2,114,71,0,0,0,114,158,0,0,0,
     114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,114,
-    238,0,0,0,80,6,0,0,115,2,0,0,0,0,3,122,
+    238,0,0,0,81,6,0,0,115,2,0,0,0,0,3,122,
     23,70,105,108,101,76,111,97,100,101,114,46,103,101,116,95,
     102,105,108,101,110,97,109,101,99,2,0,0,0,0,0,0,
     0,3,0,0,0,8,0,0,0,67,0,0,0,115,41,0,
@@ -2791,14 +2791,14 @@
     0,114,50,0,0,0,90,4,114,101,97,100,41,3,114,71,
     0,0,0,114,35,0,0,0,114,54,0,0,0,114,4,0,
     0,0,114,4,0,0,0,114,5,0,0,0,114,39,1,0,
-    0,85,6,0,0,115,4,0,0,0,0,2,21,1,122,19,
+    0,86,6,0,0,115,4,0,0,0,0,2,21,1,122,19,
     70,105,108,101,76,111,97,100,101,114,46,103,101,116,95,100,
     97,116,97,41,11,114,57,0,0,0,114,56,0,0,0,114,
     58,0,0,0,114,59,0,0,0,114,72,0,0,0,114,229,
     0,0,0,114,50,1,0,0,114,156,0,0,0,114,4,1,
     0,0,114,238,0,0,0,114,39,1,0,0,114,4,0,0,
     0,114,4,0,0,0,41,1,114,224,0,0,0,114,5,0,
-    0,0,114,48,1,0,0,50,6,0,0,115,14,0,0,0,
+    0,0,114,48,1,0,0,51,6,0,0,115,14,0,0,0,
     12,3,6,2,12,6,12,4,12,3,24,12,18,5,114,48,
     1,0,0,99,0,0,0,0,0,0,0,0,0,0,0,0,
     4,0,0,0,64,0,0,0,115,64,0,0,0,101,0,0,
@@ -2821,7 +2821,7 @@
     109,101,90,7,115,116,95,115,105,122,101,41,3,114,71,0,
     0,0,114,35,0,0,0,114,46,1,0,0,114,4,0,0,
     0,114,4,0,0,0,114,5,0,0,0,114,36,1,0,0,
-    95,6,0,0,115,4,0,0,0,0,2,12,1,122,27,83,
+    96,6,0,0,115,4,0,0,0,0,2,12,1,122,27,83,
     111,117,114,99,101,70,105,108,101,76,111,97,100,101,114,46,
     112,97,116,104,95,115,116,97,116,115,99,4,0,0,0,0,
     0,0,0,5,0,0,0,5,0,0,0,67,0,0,0,115,
@@ -2831,7 +2831,7 @@
     41,2,114,144,0,0,0,114,37,1,0,0,41,5,114,71,
     0,0,0,114,141,0,0,0,114,140,0,0,0,114,53,0,
     0,0,114,42,0,0,0,114,4,0,0,0,114,4,0,0,
-    0,114,5,0,0,0,114,38,1,0,0,100,6,0,0,115,
+    0,114,5,0,0,0,114,38,1,0,0,101,6,0,0,115,
     4,0,0,0,0,2,12,1,122,32,83,111,117,114,99,101,
     70,105,108,101,76,111,97,100,101,114,46,95,99,97,99,104,
     101,95,98,121,116,101,99,111,100,101,114,54,1,0,0,105,
@@ -2869,7 +2869,7 @@
     0,114,53,0,0,0,114,54,1,0,0,114,233,0,0,0,
     114,131,0,0,0,114,27,0,0,0,114,23,0,0,0,114,
     40,1,0,0,114,4,0,0,0,114,4,0,0,0,114,5,
-    0,0,0,114,37,1,0,0,105,6,0,0,115,38,0,0,
+    0,0,0,114,37,1,0,0,106,6,0,0,115,38,0,0,
     0,0,2,18,1,6,2,22,1,18,1,17,2,19,1,15,
     1,3,1,17,1,13,2,7,1,18,3,16,1,27,1,3,
     1,16,1,17,1,18,2,122,25,83,111,117,114,99,101,70,
@@ -2878,7 +2878,7 @@
     58,0,0,0,114,59,0,0,0,114,36,1,0,0,114,38,
     1,0,0,114,37,1,0,0,114,4,0,0,0,114,4,0,
     0,0,114,4,0,0,0,114,5,0,0,0,114,8,1,0,
-    0,91,6,0,0,115,8,0,0,0,12,2,6,2,12,5,
+    0,92,6,0,0,115,8,0,0,0,12,2,6,2,12,5,
     12,5,114,8,1,0,0,99,0,0,0,0,0,0,0,0,
     0,0,0,0,2,0,0,0,64,0,0,0,115,46,0,0,
     0,101,0,0,90,1,0,100,0,0,90,2,0,100,1,0,
@@ -2899,7 +2899,7 @@
     0,0,0,41,5,114,71,0,0,0,114,158,0,0,0,114,
     35,0,0,0,114,53,0,0,0,114,47,1,0,0,114,4,
     0,0,0,114,4,0,0,0,114,5,0,0,0,114,15,1,
-    0,0,138,6,0,0,115,8,0,0,0,0,1,15,1,15,
+    0,0,139,6,0,0,115,8,0,0,0,0,1,15,1,15,
     1,24,1,122,29,83,111,117,114,99,101,108,101,115,115,70,
     105,108,101,76,111,97,100,101,114,46,103,101,116,95,99,111,
     100,101,99,2,0,0,0,0,0,0,0,2,0,0,0,1,
@@ -2909,13 +2909,13 @@
     111,117,114,99,101,32,99,111,100,101,46,78,114,4,0,0,
     0,41,2,114,71,0,0,0,114,158,0,0,0,114,4,0,
     0,0,114,4,0,0,0,114,5,0,0,0,114,16,1,0,
-    0,144,6,0,0,115,2,0,0,0,0,2,122,31,83,111,
+    0,145,6,0,0,115,2,0,0,0,0,2,122,31,83,111,
     117,114,99,101,108,101,115,115,70,105,108,101,76,111,97,100,
     101,114,46,103,101,116,95,115,111,117,114,99,101,78,41,6,
     114,57,0,0,0,114,56,0,0,0,114,58,0,0,0,114,
     59,0,0,0,114,15,1,0,0,114,16,1,0,0,114,4,
     0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,
-    0,0,114,7,1,0,0,134,6,0,0,115,6,0,0,0,
+    0,0,114,7,1,0,0,135,6,0,0,115,6,0,0,0,
     12,2,6,2,12,6,114,7,1,0,0,99,0,0,0,0,
     0,0,0,0,0,0,0,0,3,0,0,0,64,0,0,0,
     115,130,0,0,0,101,0,0,90,1,0,100,0,0,90,2,
@@ -2940,7 +2940,7 @@
     41,2,114,67,0,0,0,114,35,0,0,0,41,3,114,71,
     0,0,0,114,67,0,0,0,114,35,0,0,0,114,4,0,
     0,0,114,4,0,0,0,114,5,0,0,0,114,72,0,0,
-    0,161,6,0,0,115,4,0,0,0,0,1,9,1,122,28,
+    0,162,6,0,0,115,4,0,0,0,0,1,9,1,122,28,
     69,120,116,101,110,115,105,111,110,70,105,108,101,76,111,97,
     100,101,114,46,95,95,105,110,105,116,95,95,99,2,0,0,
     0,0,0,0,0,2,0,0,0,3,0,0,0,67,0,0,
@@ -2949,7 +2949,7 @@
     0,106,1,0,107,2,0,83,41,1,78,41,2,114,224,0,
     0,0,114,63,0,0,0,41,2,114,71,0,0,0,114,227,
     0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,
-    0,0,114,229,0,0,0,165,6,0,0,115,4,0,0,0,
+    0,0,114,229,0,0,0,166,6,0,0,115,4,0,0,0,
     0,1,18,1,122,26,69,120,116,101,110,115,105,111,110,70,
     105,108,101,76,111,97,100,101,114,46,95,95,101,113,95,95,
     99,1,0,0,0,0,0,0,0,1,0,0,0,3,0,0,
@@ -2958,7 +2958,7 @@
     1,0,65,83,41,1,78,41,3,114,49,1,0,0,114,67,
     0,0,0,114,35,0,0,0,41,1,114,71,0,0,0,114,
     4,0,0,0,114,4,0,0,0,114,5,0,0,0,114,50,
-    1,0,0,169,6,0,0,115,2,0,0,0,0,1,122,28,
+    1,0,0,170,6,0,0,115,2,0,0,0,0,1,122,28,
     69,120,116,101,110,115,105,111,110,70,105,108,101,76,111,97,
     100,101,114,46,95,95,104,97,115,104,95,95,99,2,0,0,
     0,0,0,0,0,4,0,0,0,11,0,0,0,67,0,0,
@@ -2986,7 +2986,7 @@
     57,0,0,0,114,250,0,0,0,114,32,0,0,0,41,4,
     114,71,0,0,0,114,158,0,0,0,114,179,0,0,0,114,
     219,0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,
-    0,0,0,114,4,1,0,0,172,6,0,0,115,24,0,0,
+    0,0,0,114,4,1,0,0,173,6,0,0,115,24,0,0,
     0,0,5,13,1,9,1,21,1,16,1,15,1,22,1,28,
     1,9,1,12,1,6,1,28,1,122,31,69,120,116,101,110,
     115,105,111,110,70,105,108,101,76,111,97,100,101,114,46,108,
@@ -3005,7 +3005,7 @@
     113,3,0,100,1,0,83,41,2,114,72,0,0,0,78,114,
     4,0,0,0,41,2,114,22,0,0,0,218,6,115,117,102,
     102,105,120,41,1,218,9,102,105,108,101,95,110,97,109,101,
-    114,4,0,0,0,114,5,0,0,0,114,77,0,0,0,193,
+    114,4,0,0,0,114,5,0,0,0,114,77,0,0,0,194,
     6,0,0,115,2,0,0,0,6,1,122,49,69,120,116,101,
     110,115,105,111,110,70,105,108,101,76,111,97,100,101,114,46,
     105,115,95,112,97,99,107,97,103,101,46,60,108,111,99,97,
@@ -3014,7 +3014,7 @@
     69,88,84,69,78,83,73,79,78,95,83,85,70,70,73,88,
     69,83,41,2,114,71,0,0,0,114,158,0,0,0,114,4,
     0,0,0,41,1,114,58,1,0,0,114,5,0,0,0,114,
-    219,0,0,0,190,6,0,0,115,6,0,0,0,0,2,19,
+    219,0,0,0,191,6,0,0,115,6,0,0,0,0,2,19,
     1,18,1,122,30,69,120,116,101,110,115,105,111,110,70,105,
     108,101,76,111,97,100,101,114,46,105,115,95,112,97,99,107,
     97,103,101,99,2,0,0,0,0,0,0,0,2,0,0,0,
@@ -3025,7 +3025,7 @@
     114,101,97,116,101,32,97,32,99,111,100,101,32,111,98,106,
     101,99,116,46,78,114,4,0,0,0,41,2,114,71,0,0,
     0,114,158,0,0,0,114,4,0,0,0,114,4,0,0,0,
-    114,5,0,0,0,114,15,1,0,0,196,6,0,0,115,2,
+    114,5,0,0,0,114,15,1,0,0,197,6,0,0,115,2,
     0,0,0,0,2,122,28,69,120,116,101,110,115,105,111,110,
     70,105,108,101,76,111,97,100,101,114,46,103,101,116,95,99,
     111,100,101,99,2,0,0,0,0,0,0,0,2,0,0,0,
@@ -3036,7 +3036,7 @@
     117,114,99,101,32,99,111,100,101,46,78,114,4,0,0,0,
     41,2,114,71,0,0,0,114,158,0,0,0,114,4,0,0,
     0,114,4,0,0,0,114,5,0,0,0,114,16,1,0,0,
-    200,6,0,0,115,2,0,0,0,0,2,122,30,69,120,116,
+    201,6,0,0,115,2,0,0,0,0,2,122,30,69,120,116,
     101,110,115,105,111,110,70,105,108,101,76,111,97,100,101,114,
     46,103,101,116,95,115,111,117,114,99,101,99,2,0,0,0,
     0,0,0,0,2,0,0,0,1,0,0,0,67,0,0,0,
@@ -3047,7 +3047,7 @@
     104,101,32,102,105,110,100,101,114,46,41,1,114,35,0,0,
     0,41,2,114,71,0,0,0,114,158,0,0,0,114,4,0,
     0,0,114,4,0,0,0,114,5,0,0,0,114,238,0,0,
-    0,204,6,0,0,115,2,0,0,0,0,3,122,32,69,120,
+    0,205,6,0,0,115,2,0,0,0,0,3,122,32,69,120,
     116,101,110,115,105,111,110,70,105,108,101,76,111,97,100,101,
     114,46,103,101,116,95,102,105,108,101,110,97,109,101,78,41,
     13,114,57,0,0,0,114,56,0,0,0,114,58,0,0,0,
@@ -3055,7 +3055,7 @@
     50,1,0,0,114,156,0,0,0,114,4,1,0,0,114,219,
     0,0,0,114,15,1,0,0,114,16,1,0,0,114,238,0,
     0,0,114,4,0,0,0,114,4,0,0,0,114,4,0,0,
-    0,114,5,0,0,0,114,56,1,0,0,153,6,0,0,115,
+    0,114,5,0,0,0,114,56,1,0,0,154,6,0,0,115,
     18,0,0,0,12,6,6,2,12,4,12,4,12,3,18,18,
     12,6,12,4,12,4,114,56,1,0,0,99,0,0,0,0,
     0,0,0,0,0,0,0,0,2,0,0,0,64,0,0,0,
@@ -3099,7 +3099,7 @@
     116,104,95,102,105,110,100,101,114,41,4,114,71,0,0,0,
     114,67,0,0,0,114,35,0,0,0,218,11,112,97,116,104,
     95,102,105,110,100,101,114,114,4,0,0,0,114,4,0,0,
-    0,114,5,0,0,0,114,72,0,0,0,217,6,0,0,115,
+    0,114,5,0,0,0,114,72,0,0,0,218,6,0,0,115,
     8,0,0,0,0,1,9,1,9,1,21,1,122,23,95,78,
     97,109,101,115,112,97,99,101,80,97,116,104,46,95,95,105,
     110,105,116,95,95,99,1,0,0,0,0,0,0,0,4,0,
@@ -3118,7 +3118,7 @@
     233,0,0,0,218,3,100,111,116,114,94,0,0,0,114,4,
     0,0,0,114,4,0,0,0,114,5,0,0,0,218,23,95,
     102,105,110,100,95,112,97,114,101,110,116,95,112,97,116,104,
-    95,110,97,109,101,115,223,6,0,0,115,8,0,0,0,0,
+    95,110,97,109,101,115,224,6,0,0,115,8,0,0,0,0,
     2,27,1,12,2,4,3,122,38,95,78,97,109,101,115,112,
     97,99,101,80,97,116,104,46,95,102,105,110,100,95,112,97,
     114,101,110,116,95,112,97,116,104,95,110,97,109,101,115,99,
@@ -3131,7 +3131,7 @@
     18,112,97,114,101,110,116,95,109,111,100,117,108,101,95,110,
     97,109,101,90,14,112,97,116,104,95,97,116,116,114,95,110,
     97,109,101,114,4,0,0,0,114,4,0,0,0,114,5,0,
-    0,0,114,61,1,0,0,233,6,0,0,115,4,0,0,0,
+    0,0,114,61,1,0,0,234,6,0,0,115,4,0,0,0,
     0,1,18,1,122,31,95,78,97,109,101,115,112,97,99,101,
     80,97,116,104,46,95,103,101,116,95,112,97,114,101,110,116,
     95,112,97,116,104,99,1,0,0,0,0,0,0,0,3,0,
@@ -3150,7 +3150,7 @@
     0,0,90,11,112,97,114,101,110,116,95,112,97,116,104,114,
     177,0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,
     0,0,0,218,12,95,114,101,99,97,108,99,117,108,97,116,
-    101,237,6,0,0,115,16,0,0,0,0,2,18,1,15,1,
+    101,238,6,0,0,115,16,0,0,0,0,2,18,1,15,1,
     21,3,27,1,9,1,18,1,12,1,122,27,95,78,97,109,
     101,115,112,97,99,101,80,97,116,104,46,95,114,101,99,97,
     108,99,117,108,97,116,101,99,1,0,0,0,0,0,0,0,
@@ -3159,14 +3159,14 @@
     83,41,1,78,41,2,218,4,105,116,101,114,114,67,1,0,
     0,41,1,114,71,0,0,0,114,4,0,0,0,114,4,0,
     0,0,114,5,0,0,0,218,8,95,95,105,116,101,114,95,
-    95,250,6,0,0,115,2,0,0,0,0,1,122,23,95,78,
+    95,251,6,0,0,115,2,0,0,0,0,1,122,23,95,78,
     97,109,101,115,112,97,99,101,80,97,116,104,46,95,95,105,
     116,101,114,95,95,99,1,0,0,0,0,0,0,0,1,0,
     0,0,2,0,0,0,67,0,0,0,115,16,0,0,0,116,
     0,0,124,0,0,106,1,0,131,0,0,131,1,0,83,41,
     1,78,41,2,114,31,0,0,0,114,67,1,0,0,41,1,
     114,71,0,0,0,114,4,0,0,0,114,4,0,0,0,114,
-    5,0,0,0,218,7,95,95,108,101,110,95,95,253,6,0,
+    5,0,0,0,218,7,95,95,108,101,110,95,95,254,6,0,
     0,115,2,0,0,0,0,1,122,22,95,78,97,109,101,115,
     112,97,99,101,80,97,116,104,46,95,95,108,101,110,95,95,
     99,1,0,0,0,0,0,0,0,1,0,0,0,2,0,0,
@@ -3175,7 +3175,7 @@
     78,97,109,101,115,112,97,99,101,80,97,116,104,40,123,33,
     114,125,41,41,2,114,47,0,0,0,114,253,0,0,0,41,
     1,114,71,0,0,0,114,4,0,0,0,114,4,0,0,0,
-    114,5,0,0,0,114,101,0,0,0,0,7,0,0,115,2,
+    114,5,0,0,0,114,101,0,0,0,1,7,0,0,115,2,
     0,0,0,0,1,122,23,95,78,97,109,101,115,112,97,99,
     101,80,97,116,104,46,95,95,114,101,112,114,95,95,99,2,
     0,0,0,0,0,0,0,2,0,0,0,2,0,0,0,67,
@@ -3183,7 +3183,7 @@
     0,131,0,0,107,6,0,83,41,1,78,41,1,114,67,1,
     0,0,41,2,114,71,0,0,0,218,4,105,116,101,109,114,
     4,0,0,0,114,4,0,0,0,114,5,0,0,0,218,12,
-    95,95,99,111,110,116,97,105,110,115,95,95,3,7,0,0,
+    95,95,99,111,110,116,97,105,110,115,95,95,4,7,0,0,
     115,2,0,0,0,0,1,122,27,95,78,97,109,101,115,112,
     97,99,101,80,97,116,104,46,95,95,99,111,110,116,97,105,
     110,115,95,95,99,2,0,0,0,0,0,0,0,2,0,0,
@@ -3192,7 +3192,7 @@
     0,83,41,1,78,41,2,114,253,0,0,0,114,223,0,0,
     0,41,2,114,71,0,0,0,114,71,1,0,0,114,4,0,
     0,0,114,4,0,0,0,114,5,0,0,0,114,223,0,0,
-    0,6,7,0,0,115,2,0,0,0,0,1,122,21,95,78,
+    0,7,7,0,0,115,2,0,0,0,0,1,122,21,95,78,
     97,109,101,115,112,97,99,101,80,97,116,104,46,97,112,112,
     101,110,100,78,41,13,114,57,0,0,0,114,56,0,0,0,
     114,58,0,0,0,114,59,0,0,0,114,72,0,0,0,114,
@@ -3200,7 +3200,7 @@
     1,0,0,114,70,1,0,0,114,101,0,0,0,114,72,1,
     0,0,114,223,0,0,0,114,4,0,0,0,114,4,0,0,
     0,114,4,0,0,0,114,5,0,0,0,114,60,1,0,0,
-    210,6,0,0,115,20,0,0,0,12,5,6,2,12,6,12,
+    211,6,0,0,115,20,0,0,0,12,5,6,2,12,6,12,
     10,12,4,12,13,12,3,12,3,12,3,12,3,114,60,1,
     0,0,99,0,0,0,0,0,0,0,0,0,0,0,0,3,
     0,0,0,64,0,0,0,115,106,0,0,0,101,0,0,90,
@@ -3217,7 +3217,7 @@
     41,1,78,41,2,114,60,1,0,0,114,253,0,0,0,41,
     4,114,71,0,0,0,114,67,0,0,0,114,35,0,0,0,
     114,64,1,0,0,114,4,0,0,0,114,4,0,0,0,114,
-    5,0,0,0,114,72,0,0,0,12,7,0,0,115,2,0,
+    5,0,0,0,114,72,0,0,0,13,7,0,0,115,2,0,
     0,0,0,1,122,25,95,78,97,109,101,115,112,97,99,101,
     76,111,97,100,101,114,46,95,95,105,110,105,116,95,95,99,
     2,0,0,0,0,0,0,0,2,0,0,0,2,0,0,0,
@@ -3234,21 +3234,21 @@
     110,97,109,101,115,112,97,99,101,41,62,41,2,114,47,0,
     0,0,114,57,0,0,0,41,2,114,11,1,0,0,114,179,
     0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,
-    0,0,114,205,0,0,0,15,7,0,0,115,2,0,0,0,
+    0,0,114,205,0,0,0,16,7,0,0,115,2,0,0,0,
     0,7,122,28,95,78,97,109,101,115,112,97,99,101,76,111,
     97,100,101,114,46,109,111,100,117,108,101,95,114,101,112,114,
     99,2,0,0,0,0,0,0,0,2,0,0,0,1,0,0,
     0,67,0,0,0,115,4,0,0,0,100,1,0,83,41,2,
     78,84,114,4,0,0,0,41,2,114,71,0,0,0,114,158,
     0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,
-    0,0,114,219,0,0,0,24,7,0,0,115,2,0,0,0,
+    0,0,114,219,0,0,0,25,7,0,0,115,2,0,0,0,
     0,1,122,27,95,78,97,109,101,115,112,97,99,101,76,111,
     97,100,101,114,46,105,115,95,112,97,99,107,97,103,101,99,
     2,0,0,0,0,0,0,0,2,0,0,0,1,0,0,0,
     67,0,0,0,115,4,0,0,0,100,1,0,83,41,2,78,
     114,30,0,0,0,114,4,0,0,0,41,2,114,71,0,0,
     0,114,158,0,0,0,114,4,0,0,0,114,4,0,0,0,
-    114,5,0,0,0,114,16,1,0,0,27,7,0,0,115,2,
+    114,5,0,0,0,114,16,1,0,0,28,7,0,0,115,2,
     0,0,0,0,1,122,27,95,78,97,109,101,115,112,97,99,
     101,76,111,97,100,101,114,46,103,101,116,95,115,111,117,114,
     99,101,99,2,0,0,0,0,0,0,0,2,0,0,0,6,
@@ -3258,14 +3258,14 @@
     105,110,103,62,114,175,0,0,0,114,42,1,0,0,84,41,
     1,114,43,1,0,0,41,2,114,71,0,0,0,114,158,0,
     0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,
-    0,114,15,1,0,0,30,7,0,0,115,2,0,0,0,0,
+    0,114,15,1,0,0,31,7,0,0,115,2,0,0,0,0,
     1,122,25,95,78,97,109,101,115,112,97,99,101,76,111,97,
     100,101,114,46,103,101,116,95,99,111,100,101,99,2,0,0,
     0,0,0,0,0,2,0,0,0,1,0,0,0,67,0,0,
     0,115,4,0,0,0,100,0,0,83,41,1,78,114,4,0,
     0,0,41,2,114,71,0,0,0,114,179,0,0,0,114,4,
     0,0,0,114,4,0,0,0,114,5,0,0,0,114,1,1,
-    0,0,33,7,0,0,115,2,0,0,0,0,1,122,28,95,
+    0,0,34,7,0,0,115,2,0,0,0,0,1,122,28,95,
     78,97,109,101,115,112,97,99,101,76,111,97,100,101,114,46,
     101,120,101,99,95,109,111,100,117,108,101,99,2,0,0,0,
     0,0,0,0,2,0,0,0,3,0,0,0,67,0,0,0,
@@ -3283,14 +3283,14 @@
     41,3,114,152,0,0,0,114,253,0,0,0,114,180,0,0,
     0,41,2,114,71,0,0,0,114,158,0,0,0,114,4,0,
     0,0,114,4,0,0,0,114,5,0,0,0,114,4,1,0,
-    0,36,7,0,0,115,4,0,0,0,0,7,16,1,122,28,
+    0,37,7,0,0,115,4,0,0,0,0,7,16,1,122,28,
     95,78,97,109,101,115,112,97,99,101,76,111,97,100,101,114,
     46,108,111,97,100,95,109,111,100,117,108,101,78,41,11,114,
     57,0,0,0,114,56,0,0,0,114,58,0,0,0,114,72,
     0,0,0,114,18,1,0,0,114,205,0,0,0,114,219,0,
     0,0,114,16,1,0,0,114,15,1,0,0,114,1,1,0,
     0,114,4,1,0,0,114,4,0,0,0,114,4,0,0,0,
-    114,4,0,0,0,114,5,0,0,0,114,251,0,0,0,11,
+    114,4,0,0,0,114,5,0,0,0,114,251,0,0,0,12,
     7,0,0,115,14,0,0,0,12,1,12,3,18,9,12,3,
     12,3,12,3,12,3,114,251,0,0,0,99,0,0,0,0,
     0,0,0,0,0,0,0,0,5,0,0,0,64,0,0,0,
@@ -3328,7 +3328,7 @@
     101,114,95,99,97,99,104,101,218,6,118,97,108,117,101,115,
     114,60,0,0,0,114,74,1,0,0,41,2,114,11,1,0,
     0,218,6,102,105,110,100,101,114,114,4,0,0,0,114,4,
-    0,0,0,114,5,0,0,0,114,74,1,0,0,53,7,0,
+    0,0,0,114,5,0,0,0,114,74,1,0,0,54,7,0,
     0,115,6,0,0,0,0,4,22,1,15,1,122,28,80,97,
     116,104,70,105,110,100,101,114,46,105,110,118,97,108,105,100,
     97,116,101,95,99,97,99,104,101,115,99,2,0,0,0,0,
@@ -3353,7 +3353,7 @@
     0,0,0,114,153,0,0,0,41,3,114,11,1,0,0,114,
     35,0,0,0,90,4,104,111,111,107,114,4,0,0,0,114,
     4,0,0,0,114,5,0,0,0,218,11,95,112,97,116,104,
-    95,104,111,111,107,115,61,7,0,0,115,16,0,0,0,0,
+    95,104,111,111,107,115,62,7,0,0,115,16,0,0,0,0,
     7,9,1,19,1,16,1,3,1,14,1,13,1,12,2,122,
     22,80,97,116,104,70,105,110,100,101,114,46,95,112,97,116,
     104,95,104,111,111,107,115,99,2,0,0,0,0,0,0,0,
@@ -3383,7 +3383,7 @@
     11,1,0,0,114,35,0,0,0,114,77,1,0,0,114,4,
     0,0,0,114,4,0,0,0,114,5,0,0,0,218,20,95,
     112,97,116,104,95,105,109,112,111,114,116,101,114,95,99,97,
-    99,104,101,78,7,0,0,115,16,0,0,0,0,8,12,1,
+    99,104,101,79,7,0,0,115,16,0,0,0,0,8,12,1,
     15,1,3,1,17,1,13,1,15,1,18,1,122,31,80,97,
     116,104,70,105,110,100,101,114,46,95,112,97,116,104,95,105,
     109,112,111,114,116,101,114,95,99,97,99,104,101,99,3,0,
@@ -3402,7 +3402,7 @@
     0,114,169,0,0,0,114,170,0,0,0,114,177,0,0,0,
     114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,218,
     16,95,108,101,103,97,99,121,95,103,101,116,95,115,112,101,
-    99,95,7,0,0,115,18,0,0,0,0,4,15,1,24,2,
+    99,96,7,0,0,115,18,0,0,0,0,4,15,1,24,2,
     15,1,6,1,12,1,13,1,15,1,9,1,122,27,80,97,
     116,104,70,105,110,100,101,114,46,95,108,101,103,97,99,121,
     95,103,101,116,95,115,112,101,99,78,99,4,0,0,0,0,
@@ -3438,7 +3438,7 @@
     112,97,116,104,90,5,101,110,116,114,121,114,77,1,0,0,
     114,177,0,0,0,114,170,0,0,0,114,4,0,0,0,114,
     4,0,0,0,114,5,0,0,0,218,9,95,103,101,116,95,
-    115,112,101,99,110,7,0,0,115,40,0,0,0,0,5,6,
+    115,112,101,99,111,7,0,0,115,40,0,0,0,0,5,6,
     1,13,1,21,1,6,1,15,1,12,1,15,1,21,2,18,
     1,12,1,6,1,15,1,4,1,9,1,12,1,15,5,20,
     2,15,1,9,1,122,20,80,97,116,104,70,105,110,100,101,
@@ -3465,7 +3465,7 @@
     0,114,217,0,0,0,114,60,1,0,0,41,6,114,11,1,
     0,0,114,158,0,0,0,114,35,0,0,0,114,12,1,0,
     0,114,177,0,0,0,114,84,1,0,0,114,4,0,0,0,
-    114,4,0,0,0,114,5,0,0,0,114,13,1,0,0,142,
+    114,4,0,0,0,114,5,0,0,0,114,13,1,0,0,143,
     7,0,0,115,26,0,0,0,0,4,12,1,12,1,21,1,
     12,1,4,1,15,1,9,1,6,3,9,1,24,1,4,2,
     7,2,122,20,80,97,116,104,70,105,110,100,101,114,46,102,
@@ -3488,7 +3488,7 @@
     0,114,169,0,0,0,41,4,114,11,1,0,0,114,158,0,
     0,0,114,35,0,0,0,114,177,0,0,0,114,4,0,0,
     0,114,4,0,0,0,114,5,0,0,0,114,14,1,0,0,
-    164,7,0,0,115,8,0,0,0,0,8,18,1,12,1,4,
+    165,7,0,0,115,8,0,0,0,0,8,18,1,12,1,4,
     1,122,22,80,97,116,104,70,105,110,100,101,114,46,102,105,
     110,100,95,109,111,100,117,108,101,41,12,114,57,0,0,0,
     114,56,0,0,0,114,58,0,0,0,114,59,0,0,0,114,
@@ -3496,7 +3496,7 @@
     1,0,0,114,81,1,0,0,114,85,1,0,0,114,13,1,
     0,0,114,14,1,0,0,114,4,0,0,0,114,4,0,0,
     0,114,4,0,0,0,114,5,0,0,0,114,73,1,0,0,
-    49,7,0,0,115,22,0,0,0,12,2,6,2,18,8,18,
+    50,7,0,0,115,22,0,0,0,12,2,6,2,18,8,18,
     17,18,17,18,15,3,1,18,31,3,1,21,21,3,1,114,
     73,1,0,0,99,0,0,0,0,0,0,0,0,0,0,0,
     0,3,0,0,0,64,0,0,0,115,133,0,0,0,101,0,
@@ -3545,7 +3545,7 @@
     3,0,100,0,0,83,41,1,78,114,4,0,0,0,41,2,
     114,22,0,0,0,114,57,1,0,0,41,1,114,169,0,0,
     0,114,4,0,0,0,114,5,0,0,0,114,77,0,0,0,
-    193,7,0,0,115,2,0,0,0,6,0,122,38,70,105,108,
+    194,7,0,0,115,2,0,0,0,6,0,122,38,70,105,108,
     101,70,105,110,100,101,114,46,95,95,105,110,105,116,95,95,
     46,60,108,111,99,97,108,115,62,46,60,103,101,110,101,120,
     112,114,62,114,116,0,0,0,114,29,0,0,0,78,114,138,
@@ -3557,7 +3557,7 @@
     71,0,0,0,114,35,0,0,0,218,14,108,111,97,100,101,
     114,95,100,101,116,97,105,108,115,90,7,108,111,97,100,101,
     114,115,114,127,0,0,0,114,4,0,0,0,41,1,114,169,
-    0,0,0,114,5,0,0,0,114,72,0,0,0,187,7,0,
+    0,0,0,114,5,0,0,0,114,72,0,0,0,188,7,0,
     0,115,16,0,0,0,0,4,6,1,19,1,36,1,9,2,
     15,1,9,1,12,1,122,19,70,105,108,101,70,105,110,100,
     101,114,46,95,95,105,110,105,116,95,95,99,1,0,0,0,
@@ -3568,7 +3568,7 @@
     116,105,109,101,46,114,29,0,0,0,78,114,138,0,0,0,
     41,1,114,88,1,0,0,41,1,114,71,0,0,0,114,4,
     0,0,0,114,4,0,0,0,114,5,0,0,0,114,74,1,
-    0,0,201,7,0,0,115,2,0,0,0,0,2,122,28,70,
+    0,0,202,7,0,0,115,2,0,0,0,0,2,122,28,70,
     105,108,101,70,105,110,100,101,114,46,105,110,118,97,108,105,
     100,97,116,101,95,99,97,99,104,101,115,99,2,0,0,0,
     0,0,0,0,3,0,0,0,3,0,0,0,67,0,0,0,
@@ -3592,7 +3592,7 @@
     0,114,169,0,0,0,114,220,0,0,0,41,3,114,71,0,
     0,0,114,158,0,0,0,114,177,0,0,0,114,4,0,0,
     0,114,4,0,0,0,114,5,0,0,0,114,165,0,0,0,
-    207,7,0,0,115,8,0,0,0,0,7,15,1,12,1,10,
+    208,7,0,0,115,8,0,0,0,0,7,15,1,12,1,10,
     1,122,22,70,105,108,101,70,105,110,100,101,114,46,102,105,
     110,100,95,108,111,97,100,101,114,99,6,0,0,0,0,0,
     0,0,7,0,0,0,7,0,0,0,67,0,0,0,115,40,
@@ -3603,7 +3603,7 @@
     7,114,71,0,0,0,114,243,0,0,0,114,158,0,0,0,
     114,35,0,0,0,114,228,0,0,0,114,12,1,0,0,114,
     169,0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,
-    0,0,0,114,85,1,0,0,219,7,0,0,115,6,0,0,
+    0,0,0,114,85,1,0,0,220,7,0,0,115,6,0,0,
     0,0,1,15,1,18,1,122,20,70,105,108,101,70,105,110,
     100,101,114,46,95,103,101,116,95,115,112,101,99,78,99,3,
     0,0,0,0,0,0,0,14,0,0,0,15,0,0,0,67,
@@ -3667,7 +3667,7 @@
     0,0,90,13,105,110,105,116,95,102,105,108,101,110,97,109,
     101,90,9,102,117,108,108,95,112,97,116,104,114,177,0,0,
     0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,
-    114,13,1,0,0,224,7,0,0,115,68,0,0,0,0,3,
+    114,13,1,0,0,225,7,0,0,115,68,0,0,0,0,3,
     6,1,19,1,3,1,34,1,13,1,11,1,15,1,10,1,
     12,2,9,1,9,1,15,2,9,1,6,2,12,1,18,1,
     22,1,10,1,15,1,12,1,32,4,15,2,22,1,22,1,
@@ -3704,7 +3704,7 @@
     4,0,0,0,41,1,114,139,0,0,0,41,2,114,22,0,
     0,0,90,2,102,110,114,4,0,0,0,114,4,0,0,0,
     114,5,0,0,0,250,9,60,115,101,116,99,111,109,112,62,
-    42,8,0,0,115,2,0,0,0,9,0,122,41,70,105,108,
+    43,8,0,0,115,2,0,0,0,9,0,122,41,70,105,108,
     101,70,105,110,100,101,114,46,95,102,105,108,108,95,99,97,
     99,104,101,46,60,108,111,99,97,108,115,62,46,60,115,101,
     116,99,111,109,112,62,78,41,18,114,35,0,0,0,114,3,
@@ -3721,7 +3721,7 @@
     102,102,105,120,95,99,111,110,116,101,110,116,115,114,71,1,
     0,0,114,67,0,0,0,114,65,1,0,0,114,57,1,0,
     0,90,8,110,101,119,95,110,97,109,101,114,4,0,0,0,
-    114,4,0,0,0,114,5,0,0,0,114,93,1,0,0,13,
+    114,4,0,0,0,114,5,0,0,0,114,93,1,0,0,14,
     8,0,0,115,34,0,0,0,0,2,9,1,3,1,31,1,
     22,3,11,3,18,1,18,7,9,1,13,1,24,1,6,1,
     27,2,6,1,17,1,9,1,18,1,122,22,70,105,108,101,
@@ -3761,14 +3761,14 @@
     0,41,2,114,11,1,0,0,114,92,1,0,0,114,4,0,
     0,0,114,5,0,0,0,218,24,112,97,116,104,95,104,111,
     111,107,95,102,111,114,95,70,105,108,101,70,105,110,100,101,
-    114,54,8,0,0,115,6,0,0,0,0,2,12,1,21,1,
+    114,55,8,0,0,115,6,0,0,0,0,2,12,1,21,1,
     122,54,70,105,108,101,70,105,110,100,101,114,46,112,97,116,
     104,95,104,111,111,107,46,60,108,111,99,97,108,115,62,46,
     112,97,116,104,95,104,111,111,107,95,102,111,114,95,70,105,
     108,101,70,105,110,100,101,114,114,4,0,0,0,41,3,114,
     11,1,0,0,114,92,1,0,0,114,99,1,0,0,114,4,
     0,0,0,41,2,114,11,1,0,0,114,92,1,0,0,114,
-    5,0,0,0,218,9,112,97,116,104,95,104,111,111,107,44,
+    5,0,0,0,218,9,112,97,116,104,95,104,111,111,107,45,
     8,0,0,115,4,0,0,0,0,10,21,6,122,20,70,105,
     108,101,70,105,110,100,101,114,46,112,97,116,104,95,104,111,
     111,107,99,1,0,0,0,0,0,0,0,1,0,0,0,2,
@@ -3777,7 +3777,7 @@
     16,70,105,108,101,70,105,110,100,101,114,40,123,33,114,125,
     41,41,2,114,47,0,0,0,114,35,0,0,0,41,1,114,
     71,0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,
-    0,0,0,114,101,0,0,0,62,8,0,0,115,2,0,0,
+    0,0,0,114,101,0,0,0,63,8,0,0,115,2,0,0,
     0,0,1,122,19,70,105,108,101,70,105,110,100,101,114,46,
     95,95,114,101,112,114,95,95,41,15,114,57,0,0,0,114,
     56,0,0,0,114,58,0,0,0,114,59,0,0,0,114,72,
@@ -3785,7 +3785,7 @@
     0,0,114,165,0,0,0,114,85,1,0,0,114,13,1,0,
     0,114,93,1,0,0,114,18,1,0,0,114,100,1,0,0,
     114,101,0,0,0,114,4,0,0,0,114,4,0,0,0,114,
-    4,0,0,0,114,5,0,0,0,114,86,1,0,0,178,7,
+    4,0,0,0,114,5,0,0,0,114,86,1,0,0,179,7,
     0,0,115,20,0,0,0,12,7,6,2,12,14,12,4,6,
     2,12,12,12,5,15,45,12,31,18,18,114,86,1,0,0,
     99,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,
@@ -3803,7 +3803,7 @@
     114,116,32,108,111,99,107,46,78,41,2,114,106,0,0,0,
     114,3,1,0,0,41,1,114,71,0,0,0,114,4,0,0,
     0,114,4,0,0,0,114,5,0,0,0,114,75,0,0,0,
-    72,8,0,0,115,2,0,0,0,0,2,122,28,95,73,109,
+    73,8,0,0,115,2,0,0,0,0,2,122,28,95,73,109,
     112,111,114,116,76,111,99,107,67,111,110,116,101,120,116,46,
     95,95,101,110,116,101,114,95,95,99,4,0,0,0,0,0,
     0,0,4,0,0,0,1,0,0,0,67,0,0,0,115,14,
@@ -3816,13 +3816,13 @@
     114,71,0,0,0,90,8,101,120,99,95,116,121,112,101,90,
     9,101,120,99,95,118,97,108,117,101,90,13,101,120,99,95,
     116,114,97,99,101,98,97,99,107,114,4,0,0,0,114,4,
-    0,0,0,114,5,0,0,0,114,81,0,0,0,76,8,0,
+    0,0,0,114,5,0,0,0,114,81,0,0,0,77,8,0,
     0,115,2,0,0,0,0,2,122,27,95,73,109,112,111,114,
     116,76,111,99,107,67,111,110,116,101,120,116,46,95,95,101,
     120,105,116,95,95,78,41,6,114,57,0,0,0,114,56,0,
     0,0,114,58,0,0,0,114,59,0,0,0,114,75,0,0,
     0,114,81,0,0,0,114,4,0,0,0,114,4,0,0,0,
-    114,4,0,0,0,114,5,0,0,0,114,101,1,0,0,68,
+    114,4,0,0,0,114,5,0,0,0,114,101,1,0,0,69,
     8,0,0,115,6,0,0,0,12,2,6,2,12,4,114,101,
     1,0,0,99,3,0,0,0,0,0,0,0,5,0,0,0,
     4,0,0,0,67,0,0,0,115,91,0,0,0,124,1,0,
@@ -3844,7 +3844,7 @@
     114,67,0,0,0,218,7,112,97,99,107,97,103,101,218,5,
     108,101,118,101,108,90,4,98,105,116,115,90,4,98,97,115,
     101,114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,
-    218,13,95,114,101,115,111,108,118,101,95,110,97,109,101,81,
+    218,13,95,114,101,115,111,108,118,101,95,110,97,109,101,82,
     8,0,0,115,10,0,0,0,0,2,22,1,18,1,15,1,
     10,1,114,104,1,0,0,99,3,0,0,0,0,0,0,0,
     4,0,0,0,3,0,0,0,67,0,0,0,115,47,0,0,
@@ -3855,7 +3855,7 @@
     4,114,77,1,0,0,114,67,0,0,0,114,35,0,0,0,
     114,169,0,0,0,114,4,0,0,0,114,4,0,0,0,114,
     5,0,0,0,218,17,95,102,105,110,100,95,115,112,101,99,
-    95,108,101,103,97,99,121,90,8,0,0,115,8,0,0,0,
+    95,108,101,103,97,99,121,91,8,0,0,115,8,0,0,0,
     0,3,18,1,12,1,4,1,114,105,1,0,0,99,3,0,
     0,0,0,0,0,0,9,0,0,0,27,0,0,0,67,0,
     0,0,115,34,1,0,0,116,0,0,106,1,0,115,28,0,
@@ -3888,7 +3888,7 @@
     115,95,114,101,108,111,97,100,114,77,1,0,0,114,13,1,
     0,0,114,177,0,0,0,114,179,0,0,0,114,208,0,0,
     0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,
-    218,10,95,102,105,110,100,95,115,112,101,99,99,8,0,0,
+    218,10,95,102,105,110,100,95,115,112,101,99,100,8,0,0,
     115,48,0,0,0,0,2,9,1,19,4,15,1,16,1,10,
     1,3,1,13,1,13,1,18,1,12,1,11,2,24,1,12,
     2,22,1,13,1,3,1,13,1,13,4,9,2,12,1,4,
@@ -3926,7 +3926,7 @@
     114,41,4,114,67,0,0,0,114,102,1,0,0,114,103,1,
     0,0,114,171,0,0,0,114,4,0,0,0,114,4,0,0,
     0,114,5,0,0,0,218,13,95,115,97,110,105,116,121,95,
-    99,104,101,99,107,139,8,0,0,115,24,0,0,0,0,2,
+    99,104,101,99,107,140,8,0,0,115,24,0,0,0,0,2,
     15,1,30,1,12,1,15,1,6,1,15,1,15,1,15,1,
     6,2,27,1,19,1,114,110,1,0,0,122,16,78,111,32,
     109,111,100,117,108,101,32,110,97,109,101,100,32,122,4,123,
@@ -3964,7 +3964,7 @@
     108,101,114,171,0,0,0,114,177,0,0,0,114,179,0,0,
     0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,
     218,23,95,102,105,110,100,95,97,110,100,95,108,111,97,100,
-    95,117,110,108,111,99,107,101,100,159,8,0,0,115,42,0,
+    95,117,110,108,111,99,107,101,100,160,8,0,0,115,42,0,
     0,0,0,1,6,1,19,1,6,1,15,1,16,2,15,1,
     11,1,13,1,3,1,13,1,13,1,22,1,26,1,15,1,
     12,1,30,2,18,1,6,2,13,1,32,1,114,113,1,0,
@@ -3979,7 +3979,7 @@
     0,114,113,1,0,0,41,2,114,67,0,0,0,114,112,1,
     0,0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,
     0,218,14,95,102,105,110,100,95,97,110,100,95,108,111,97,
-    100,186,8,0,0,115,4,0,0,0,0,2,13,1,114,114,
+    100,187,8,0,0,115,4,0,0,0,0,2,13,1,114,114,
     1,0,0,99,3,0,0,0,0,0,0,0,5,0,0,0,
     4,0,0,0,67,0,0,0,115,172,0,0,0,116,0,0,
     124,0,0,124,1,0,124,2,0,131,3,0,1,124,2,0,
@@ -4022,7 +4022,7 @@
     0,0,0,114,153,0,0,0,114,112,0,0,0,41,5,114,
     67,0,0,0,114,102,1,0,0,114,103,1,0,0,114,179,
     0,0,0,114,151,0,0,0,114,4,0,0,0,114,4,0,
-    0,0,114,5,0,0,0,114,115,1,0,0,192,8,0,0,
+    0,0,114,5,0,0,0,114,115,1,0,0,193,8,0,0,
     115,26,0,0,0,0,9,16,1,12,1,21,1,10,1,15,
     1,13,1,13,1,12,1,10,2,15,1,21,1,10,1,114,
     115,1,0,0,99,3,0,0,0,0,0,0,0,6,0,0,
@@ -4069,7 +4069,7 @@
     0,0,114,16,0,0,0,90,9,102,114,111,109,95,110,97,
     109,101,114,40,1,0,0,114,4,0,0,0,114,4,0,0,
     0,114,5,0,0,0,218,16,95,104,97,110,100,108,101,95,
-    102,114,111,109,108,105,115,116,216,8,0,0,115,34,0,0,
+    102,114,111,109,108,105,115,116,217,8,0,0,115,34,0,0,
     0,0,10,15,1,12,1,12,1,13,1,15,1,22,1,13,
     1,15,1,21,1,3,1,17,1,18,4,21,1,15,1,9,
     1,32,1,114,121,1,0,0,99,1,0,0,0,0,0,0,
@@ -4095,7 +4095,7 @@
     0,41,2,218,7,103,108,111,98,97,108,115,114,102,1,0,
     0,114,4,0,0,0,114,4,0,0,0,114,5,0,0,0,
     218,17,95,99,97,108,99,95,95,95,112,97,99,107,97,103,
-    101,95,95,248,8,0,0,115,12,0,0,0,0,7,15,1,
+    101,95,95,249,8,0,0,115,12,0,0,0,0,7,15,1,
     12,1,10,1,12,1,25,1,114,123,1,0,0,99,0,0,
     0,0,0,0,0,0,3,0,0,0,3,0,0,0,67,0,
     0,0,115,55,0,0,0,116,0,0,116,1,0,106,2,0,
@@ -4114,7 +4114,7 @@
     114,232,0,0,0,41,3,90,10,101,120,116,101,110,115,105,
     111,110,115,90,6,115,111,117,114,99,101,90,8,98,121,116,
     101,99,111,100,101,114,4,0,0,0,114,4,0,0,0,114,
-    5,0,0,0,114,240,0,0,0,7,9,0,0,115,8,0,
+    5,0,0,0,114,240,0,0,0,8,9,0,0,115,8,0,
     0,0,0,5,18,1,12,1,12,1,114,240,0,0,0,99,
     5,0,0,0,0,0,0,0,9,0,0,0,5,0,0,0,
     67,0,0,0,115,227,0,0,0,124,4,0,100,1,0,107,
@@ -4170,7 +4170,7 @@
     0,90,8,103,108,111,98,97,108,115,95,114,102,1,0,0,
     90,7,99,117,116,95,111,102,102,114,4,0,0,0,114,4,
     0,0,0,114,5,0,0,0,218,10,95,95,105,109,112,111,
-    114,116,95,95,18,9,0,0,115,26,0,0,0,0,11,12,
+    114,116,95,95,19,9,0,0,115,26,0,0,0,0,11,12,
     1,15,2,24,1,12,1,18,1,6,3,12,1,23,1,6,
     1,4,4,35,3,40,2,114,126,1,0,0,99,1,0,0,
     0,0,0,0,0,3,0,0,0,3,0,0,0,67,0,0,
@@ -4185,7 +4185,7 @@
     1,0,0,41,3,114,67,0,0,0,114,177,0,0,0,114,
     178,0,0,0,114,4,0,0,0,114,4,0,0,0,114,5,
     0,0,0,218,18,95,98,117,105,108,116,105,110,95,102,114,
-    111,109,95,110,97,109,101,53,9,0,0,115,10,0,0,0,
+    111,109,95,110,97,109,101,54,9,0,0,115,10,0,0,0,
     0,1,15,1,12,1,19,1,12,1,114,127,1,0,0,99,
     2,0,0,0,0,0,0,0,19,0,0,0,12,0,0,0,
     67,0,0,0,115,232,2,0,0,124,1,0,97,0,0,124,
@@ -4260,7 +4260,7 @@
     100,1,0,83,41,2,114,29,0,0,0,78,41,1,114,31,
     0,0,0,41,2,114,22,0,0,0,114,130,0,0,0,114,
     4,0,0,0,114,4,0,0,0,114,5,0,0,0,114,77,
-    0,0,0,105,9,0,0,115,2,0,0,0,6,0,122,25,
+    0,0,0,106,9,0,0,115,2,0,0,0,6,0,122,25,
     95,115,101,116,117,112,46,60,108,111,99,97,108,115,62,46,
     60,103,101,110,101,120,112,114,62,114,84,0,0,0,122,30,
     105,109,112,111,114,116,108,105,98,32,114,101,113,117,105,114,
@@ -4295,7 +4295,7 @@
     100,117,108,101,90,14,119,101,97,107,114,101,102,95,109,111,
     100,117,108,101,90,13,119,105,110,114,101,103,95,109,111,100,
     117,108,101,114,4,0,0,0,114,4,0,0,0,114,5,0,
-    0,0,218,6,95,115,101,116,117,112,61,9,0,0,115,108,
+    0,0,218,6,95,115,101,116,117,112,62,9,0,0,115,108,
     0,0,0,0,9,6,1,6,2,12,1,9,2,6,3,12,
     1,28,1,15,1,15,1,9,1,15,1,9,2,3,1,15,
     1,12,1,20,3,13,1,13,1,15,1,15,2,13,1,20,
@@ -4325,7 +4325,7 @@
     3,114,134,1,0,0,114,135,1,0,0,90,17,115,117,112,
     112,111,114,116,101,100,95,108,111,97,100,101,114,115,114,4,
     0,0,0,114,4,0,0,0,114,5,0,0,0,218,8,95,
-    105,110,115,116,97,108,108,148,9,0,0,115,16,0,0,0,
+    105,110,115,116,97,108,108,149,9,0,0,115,16,0,0,0,
     0,2,13,1,9,1,28,1,16,1,16,1,15,1,19,1,
     114,137,1,0,0,41,3,122,3,119,105,110,114,1,0,0,
     0,114,2,0,0,0,41,92,114,59,0,0,0,114,10,0,
@@ -4364,7 +4364,7 @@
     0,6,17,6,3,12,12,12,5,12,5,12,6,12,12,12,
     10,12,9,12,5,12,7,15,22,12,8,12,4,15,4,19,
     20,6,2,6,3,22,4,19,68,19,21,19,19,12,19,12,
-    20,12,114,22,1,18,2,6,2,9,2,9,1,9,2,15,
+    20,12,115,22,1,18,2,6,2,9,2,9,1,9,2,15,
     27,12,23,12,19,12,12,18,8,12,18,12,11,12,11,12,
     17,12,16,21,55,21,12,18,10,12,14,12,36,19,27,19,
     106,24,22,9,3,12,1,15,63,18,45,19,230,15,25,19,
diff --git a/Python/marshal.c b/Python/marshal.c
index dc5411c..ca64be3 100644
--- a/Python/marshal.c
+++ b/Python/marshal.c
@@ -13,8 +13,6 @@
 #include "code.h"
 #include "marshal.h"
 
-#define ABS(x) ((x) < 0 ? -(x) : (x))
-
 /* High water mark to determine when the marshalled object is dangerously deep
  * and risks coring the interpreter.  When the object stack gets this deep,
  * raise an exception instead of continuing.
@@ -192,7 +190,7 @@
     }
 
     /* set l to number of base PyLong_MARSHAL_BASE digits */
-    n = ABS(Py_SIZE(ob));
+    n = Py_ABS(Py_SIZE(ob));
     l = (n-1) * PyLong_MARSHAL_RATIO;
     d = ob->ob_digit[n-1];
     assert(d != 0); /* a PyLong is always normalized */
@@ -727,8 +725,8 @@
         return NULL;
     }
 
-    size = 1 + (ABS(n) - 1) / PyLong_MARSHAL_RATIO;
-    shorts_in_top_digit = 1 + (ABS(n) - 1) % PyLong_MARSHAL_RATIO;
+    size = 1 + (Py_ABS(n) - 1) / PyLong_MARSHAL_RATIO;
+    shorts_in_top_digit = 1 + (Py_ABS(n) - 1) % PyLong_MARSHAL_RATIO;
     ob = _PyLong_New(size);
     if (ob == NULL)
         return NULL;
diff --git a/Python/opcode_targets.h b/Python/opcode_targets.h
index f90a17a..1553a7a 100644
--- a/Python/opcode_targets.h
+++ b/Python/opcode_targets.h
@@ -15,8 +15,8 @@
     &&_unknown_opcode,
     &&_unknown_opcode,
     &&TARGET_UNARY_INVERT,
-    &&_unknown_opcode,
-    &&_unknown_opcode,
+    &&TARGET_BINARY_MATRIX_MULTIPLY,
+    &&TARGET_INPLACE_MATRIX_MULTIPLY,
     &&_unknown_opcode,
     &&TARGET_BINARY_POWER,
     &&TARGET_BINARY_MULTIPLY,
diff --git a/Python/pythonrun.c b/Python/pythonrun.c
index 0327830..b2d5464 100644
--- a/Python/pythonrun.c
+++ b/Python/pythonrun.c
@@ -15,6 +15,7 @@
 #include "ast.h"
 #include "marshal.h"
 #include "osdefs.h"
+#include <locale.h>
 
 #ifdef HAVE_SIGNAL_H
 #include <signal.h>
@@ -25,7 +26,6 @@
 #endif
 
 #ifdef HAVE_LANGINFO_H
-#include <locale.h>
 #include <langinfo.h>
 #endif
 
@@ -1160,6 +1160,15 @@
     encoding = _Py_StandardStreamEncoding;
     errors = _Py_StandardStreamErrors;
     if (!encoding || !errors) {
+        if (!errors) {
+            /* When the LC_CTYPE locale is the POSIX locale ("C locale"),
+               stdin and stdout use the surrogateescape error handler by
+               default, instead of the strict error handler. */
+            char *loc = setlocale(LC_CTYPE, NULL);
+            if (loc != NULL && strcmp(loc, "C") == 0)
+                errors = "surrogateescape";
+        }
+
         pythonioencoding = Py_GETENV("PYTHONIOENCODING");
         if (pythonioencoding) {
             char *err;
@@ -1172,7 +1181,7 @@
             if (err) {
                 *err = '\0';
                 err++;
-                if (*err && !errors) {
+                if (*err && !_Py_StandardStreamErrors) {
                     errors = err;
                 }
             }
diff --git a/Python/random.c b/Python/random.c
index 2941ba1..a052b65 100644
--- a/Python/random.c
+++ b/Python/random.c
@@ -15,8 +15,6 @@
 #endif
 
 #ifdef MS_WINDOWS
-/* This handle is never explicitly released. Instead, the operating
-   system will release it when the process terminates. */
 static HCRYPTPROV hCryptProv = 0;
 
 static int
@@ -298,7 +296,12 @@
 void
 _PyRandom_Fini(void)
 {
-#ifndef MS_WINDOWS
+#ifdef MS_WINDOWS
+    if (hCryptProv) {
+        CryptReleaseContext(hCryptProv, 0);
+        hCryptProv = 0;
+    }
+#else
     dev_urandom_close();
 #endif
 }
diff --git a/Python/thread_foobar.h b/Python/thread_foobar.h
index d2b78c5..ea96f9c 100644
--- a/Python/thread_foobar.h
+++ b/Python/thread_foobar.h
@@ -1,4 +1,3 @@
-
 /*
  * Initialization.
  */
@@ -61,10 +60,18 @@
 int
 PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
 {
+    return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, 0);
+}
+
+PyLockStatus
+PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
+                            int intr_flag)
+{
     int success;
 
-    dprintf(("PyThread_acquire_lock(%p, %d) called\n", lock, waitflag));
-    dprintf(("PyThread_acquire_lock(%p, %d) -> %d\n", lock, waitflag, success));
+    dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n", lock, microseconds, intr_flag));
+    dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n",
+	     lock, microseconds, intr_flag, success));
     return success;
 }
 
@@ -73,3 +80,53 @@
 {
     dprintf(("PyThread_release_lock(%p) called\n", lock));
 }
+
+/* The following are only needed if native TLS support exists */
+#define Py_HAVE_NATIVE_TLS
+
+#ifdef Py_HAVE_NATIVE_TLS
+int
+PyThread_create_key(void)
+{
+    int result;
+    return result;
+}
+
+void
+PyThread_delete_key(int key)
+{
+
+}
+
+int
+PyThread_set_key_value(int key, void *value)
+{
+    int ok;
+
+    /* A failure in this case returns -1 */
+    if (!ok)
+        return -1;
+    return 0;
+}
+
+void *
+PyThread_get_key_value(int key)
+{
+    void *result;
+
+    return result;
+}
+
+void
+PyThread_delete_key_value(int key)
+{
+
+}
+
+void
+PyThread_ReInitTLS(void)
+{
+
+}
+
+#endif
diff --git a/README b/README
index f39866c..1b6cd2c 100644
--- a/README
+++ b/README
@@ -1,5 +1,5 @@
-This is Python version 3.4.1
-============================
+This is Python version 3.5.0 alpha 1
+====================================
 
 Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
 2012, 2013, 2014 Python Software Foundation.  All rights reserved.
@@ -52,9 +52,9 @@
 ----------
 
 We try to have a comprehensive overview of the changes in the "What's New in
-Python 3.4" document, found at
+Python 3.5" document, found at
 
-    http://docs.python.org/3.4/whatsnew/3.4.html
+    http://docs.python.org/3.5/whatsnew/3.5.html
 
 For a more detailed change log, read Misc/NEWS (though this file, too, is
 incomplete, and also doesn't list anything merged in from the 2.7 release under
@@ -67,9 +67,9 @@
 Documentation
 -------------
 
-Documentation for Python 3.4 is online, updated daily:
+Documentation for Python 3.5 is online, updated daily:
 
-    http://docs.python.org/3.4/
+    http://docs.python.org/3.5/
 
 It can also be downloaded in many formats for faster access.  The documentation
 is downloadable in HTML, PDF, and reStructuredText formats; the latter version
@@ -94,7 +94,7 @@
 A source-to-source translation tool, "2to3", can take care of the mundane task
 of converting large amounts of source code.  It is not a complete solution but
 is complemented by the deprecation warnings in 2.6.  See
-http://docs.python.org/3.4/library/2to3.html for more information.
+http://docs.python.org/3.5/library/2to3.html for more information.
 
 
 Testing
@@ -132,7 +132,7 @@
 Install that version using "make install".  Install all other versions using
 "make altinstall".
 
-For example, if you want to install Python 2.6, 2.7 and 3.4 with 2.7 being the
+For example, if you want to install Python 2.6, 2.7 and 3.5 with 2.7 being the
 primary version, you would execute "make install" in your 2.7 build directory
 and "make altinstall" in the others.
 
diff --git a/Tools/buildbot/README.tcltk-AMD64 b/Tools/buildbot/README.tcltk-AMD64
deleted file mode 100644
index edc89eb..0000000
--- a/Tools/buildbot/README.tcltk-AMD64
+++ /dev/null
@@ -1,36 +0,0 @@
-Comments on building tcl/tk for AMD64 with the MS SDK compiler
-==============================================================
-
-I did have to build tcl/tk manually.
-
-First, I had to build the nmakehlp.exe helper utility manually by executing
-   cl nmakehlp.c /link bufferoverflowU.lib
-in both the tcl8.4.12\win and tk8.4.12\win directories.
-
-Second, the AMD64 compiler refuses to compile the file
-tcl8.4.12\generic\tclExecute.c because it insists on using intrinsics
-for the 'ceil' and 'floor' functions:
-
-  ..\generic\tclExecute.c(394) : error C2099: initializer is not a constant
-  ..\generic\tclExecute.c(398) : error C2099: initializer is not a constant
-
-I did comment out these lines; an alternative would have been to use
-the /Oi- compiler flag to disable the intrinsic functions.
-The commands then used were these:
-
-   svn export http://svn.python.org/projects/external/tcl8.4.12
-   cd tcl8.4.12\win
-   REM
-   echo patch the tcl8.4.12\generic\tclExecute.c file
-   pause 
-   REM
-   cl nmakehlp.c /link bufferoverflowU.lib
-   nmake -f makefile.vc MACHINE=AMD64
-   nmake -f makefile.vc INSTALLDIR=..\..\tcltk install
-   cd ..\..
-   svn export http://svn.python.org/projects/external/tk8.4.12
-   cd tk8.4.12\win
-   cl nmakehlp.c /link bufferoverflowU.lib
-   nmake -f makefile.vc TCLDIR=..\..\tcl8.4.12 MACHINE=AMD64
-   nmake -f makefile.vc TCLDIR=..\..\tcl8.4.12 INSTALLDIR=..\..\tcltk install
-   cd ..\..
diff --git a/Tools/buildbot/external-amd64.bat b/Tools/buildbot/external-amd64.bat
index 4c3b67b..f859770 100644
--- a/Tools/buildbot/external-amd64.bat
+++ b/Tools/buildbot/external-amd64.bat
@@ -2,20 +2,5 @@
 
 @rem Assume we start inside the Python source directory
 call "Tools\buildbot\external-common.bat"
-call "%VS100COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64
 
-if not exist tcltk64\bin\tcl86tg.dll (
-    cd tcl-8.6.1.0\win
-    nmake -f makefile.vc DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 clean all
-    nmake -f makefile.vc DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 install
-    cd ..\..
-)
-
-if not exist tcltk64\bin\tk86tg.dll (
-    cd tk-8.6.1.0\win    
-    nmake -f makefile.vc OPTS=noxp DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 TCLDIR=..\..\tcl-8.6.1.0 clean
-    nmake -f makefile.vc OPTS=noxp DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 TCLDIR=..\..\tcl-8.6.1.0 all
-    nmake -f makefile.vc OPTS=noxp DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 TCLDIR=..\..\tcl-8.6.1.0 install
-    cd ..\..
-)
 
diff --git a/Tools/buildbot/external-common.bat b/Tools/buildbot/external-common.bat
index 53e3daf..48004a7 100644
--- a/Tools/buildbot/external-common.bat
+++ b/Tools/buildbot/external-common.bat
@@ -15,7 +15,7 @@
 @rem if exist tk-8.4.18.1 rd /s/q tk-8.4.18.1
 @rem if exist db-4.4.20 rd /s/q db-4.4.20
 @rem if exist openssl-1.0.1e rd /s/q openssl-1.0.1g
-@rem if exist sqlite-3.7.12 rd /s/q sqlite-3.7.12    
+@rem if exist sqlite-3.7.12 rd /s/q sqlite-3.7.12
 
 @rem bzip
 if not exist bzip2-1.0.6 (
@@ -29,12 +29,13 @@
     svn export http://svn.python.org/projects/external/openssl-1.0.1g
 )
 
-@rem tcl/tk
+@rem tcl/tk/tix
 if not exist tcl-8.6.1.0 (
-   rd /s/q tcltk tcltk64 tcl-8.5.11.0 tk-8.5.11.0
+   rd /s/q tcltk tcltk64 tcl-8.5.11.0 tk-8.5.11.0 tix-8.4.3.3
    svn export http://svn.python.org/projects/external/tcl-8.6.1.0
 )
 if not exist tk-8.6.1.0 svn export http://svn.python.org/projects/external/tk-8.6.1.0
+if not exist tix-8.4.3.4 svn export http://svn.python.org/projects/external/tix-8.4.3.4
 
 @rem sqlite3
 if not exist sqlite-3.8.3.1 (
diff --git a/Tools/buildbot/external.bat b/Tools/buildbot/external.bat
index c580a14..22dfdc5 100644
--- a/Tools/buildbot/external.bat
+++ b/Tools/buildbot/external.bat
@@ -2,20 +2,4 @@
 
 @rem Assume we start inside the Python source directory
 call "Tools\buildbot\external-common.bat"
-call "%VS100COMNTOOLS%\vsvars32.bat"
 
-if not exist tcltk\bin\tcl86tg.dll (
-    @rem all and install need to be separate invocations, otherwise nmakehlp is not found on install
-    cd tcl-8.6.1.0\win
-    nmake -f makefile.vc DEBUG=1 INSTALLDIR=..\..\tcltk clean all 
-    nmake -f makefile.vc DEBUG=1 INSTALLDIR=..\..\tcltk install
-    cd ..\..
-)
-
-if not exist tcltk\bin\tk86tg.dll (
-    cd tk-8.6.1.0\win
-    nmake -f makefile.vc OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl-8.6.1.0 clean
-    nmake -f makefile.vc OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl-8.6.1.0 all
-    nmake -f makefile.vc OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl-8.6.1.0 install
-    cd ..\..
-)
diff --git a/Tools/buildbot/test-amd64.bat b/Tools/buildbot/test-amd64.bat
index de64f25..11bfcbb 100644
--- a/Tools/buildbot/test-amd64.bat
+++ b/Tools/buildbot/test-amd64.bat
@@ -1,3 +1,6 @@
 @rem Used by the buildbot "test" step.
+
+rem The following line should be removed before #20035 is closed
+set TCL_LIBRARY=%CD%\..\tcltk64\lib\tcl8.6
 cd PCbuild
 call rt.bat -d -q -x64 -uall -rwW -n --timeout=3600 %1 %2 %3 %4 %5 %6 %7 %8 %9
diff --git a/Tools/buildbot/test.bat b/Tools/buildbot/test.bat
index 4e4db10..6b98c79 100644
--- a/Tools/buildbot/test.bat
+++ b/Tools/buildbot/test.bat
@@ -1,3 +1,6 @@
 @rem Used by the buildbot "test" step.
+
+rem The following line should be removed before #20035 is closed
+set TCL_LIBRARY=%CD%\..\tcltk\lib\tcl8.6
 cd PCbuild
 call rt.bat -d -q -uall -rwW -n --timeout=3600 %1 %2 %3 %4 %5 %6 %7 %8 %9
diff --git a/Tools/msi/msi.py b/Tools/msi/msi.py
index e399dde..1c627a7 100644
--- a/Tools/msi/msi.py
+++ b/Tools/msi/msi.py
@@ -925,8 +925,8 @@
     shutil.copyfileobj(open("crtlicense.txt"), out)
     for name, pat, file in (("bzip2","bzip2-*", "LICENSE"),
                       ("openssl", "openssl-*", "LICENSE"),
-                      ("Tcl", "tcl8*", "license.terms"),
-                      ("Tk", "tk8*", "license.terms"),
+                      ("Tcl", "tcl-8*", "license.terms"),
+                      ("Tk", "tk-8*", "license.terms"),
                       ("Tix", "tix-*", "license.terms")):
         out.write("\nThis copy of Python includes a copy of %s, which is licensed under the following terms:\n\n" % name)
         dirs = glob.glob(srcdir+"/../"+pat)
diff --git a/Tools/parser/unparse.py b/Tools/parser/unparse.py
index 837cd81..258c648 100644
--- a/Tools/parser/unparse.py
+++ b/Tools/parser/unparse.py
@@ -401,7 +401,7 @@
         self.dispatch(t.operand)
         self.write(")")
 
-    binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
+    binop = { "Add":"+", "Sub":"-", "Mult":"*", "MatMult":"@", "Div":"/", "Mod":"%",
                     "LShift":"<<", "RShift":">>", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
                     "FloorDiv":"//", "Pow": "**"}
     def _BinOp(self, t):
diff --git a/Tools/scripts/generate_opcode_h.py b/Tools/scripts/generate_opcode_h.py
new file mode 100644
index 0000000..efa18a1
--- /dev/null
+++ b/Tools/scripts/generate_opcode_h.py
@@ -0,0 +1,54 @@
+# This script generates the opcode.h header file.
+
+from __future__ import with_statement
+
+import sys
+header = """/* Auto-generated by Tools/scripts/generate_opcode_h.py */
+#ifndef Py_OPCODE_H
+#define Py_OPCODE_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+    /* Instruction opcodes for compiled code */
+"""
+
+footer = """
+/* EXCEPT_HANDLER is a special, implicit block type which is created when
+   entering an except handler. It is not an opcode but we define it here
+   as we want it to be available to both frameobject.c and ceval.c, while
+   remaining private.*/
+#define EXCEPT_HANDLER 257
+
+
+enum cmp_op {PyCmp_LT=Py_LT, PyCmp_LE=Py_LE, PyCmp_EQ=Py_EQ, PyCmp_NE=Py_NE,
+                PyCmp_GT=Py_GT, PyCmp_GE=Py_GE, PyCmp_IN, PyCmp_NOT_IN,
+                PyCmp_IS, PyCmp_IS_NOT, PyCmp_EXC_MATCH, PyCmp_BAD};
+
+#define HAS_ARG(op) ((op) >= HAVE_ARGUMENT)
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_OPCODE_H */
+"""
+
+
+def main(opcode_py, outfile='Include/opcode.h'):
+    opcode = {}
+    exec(open(opcode_py).read(), opcode)
+    opmap = opcode['opmap']
+    with open(outfile, 'w') as fobj:
+        fobj.write(header)
+        for name in opcode['opname']:
+            if name in opmap:
+                fobj.write("#define %-20s\t%-3s\n" % (name, opmap[name]))
+            if name == 'POP_EXCEPT': # Special entry for HAVE_ARGUMENT
+                fobj.write("#define %-20s\t%-3d\n" %
+                            ('HAVE_ARGUMENT', opcode['HAVE_ARGUMENT']))
+        fobj.write(footer)
+
+
+if __name__ == '__main__':
+    main(sys.argv[1], sys.argv[2])
diff --git a/Tools/scripts/run_tests.py b/Tools/scripts/run_tests.py
index a6c5da3..490a37e 100644
--- a/Tools/scripts/run_tests.py
+++ b/Tools/scripts/run_tests.py
@@ -33,8 +33,6 @@
     # Allow user-specified interpreter options to override our defaults.
     args.extend(test.support.args_from_interpreter_flags())
 
-    # Workaround for issue #20355
-    os.environ.pop("PYTHONWARNINGS", None)
     # Workaround for issue #20361
     args.extend(['-W', 'error::BytesWarning'])
 
diff --git a/configure b/configure
index 3757cc6..fbd4c24 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
 #! /bin/sh
 # Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for python 3.4.
+# Generated by GNU Autoconf 2.69 for python 3.5.
 #
 # Report bugs to <http://bugs.python.org/>.
 #
@@ -580,8 +580,8 @@
 # Identity of this package.
 PACKAGE_NAME='python'
 PACKAGE_TARNAME='python'
-PACKAGE_VERSION='3.4'
-PACKAGE_STRING='python 3.4'
+PACKAGE_VERSION='3.5'
+PACKAGE_STRING='python 3.5'
 PACKAGE_BUGREPORT='http://bugs.python.org/'
 PACKAGE_URL=''
 
@@ -670,6 +670,7 @@
 INSTALL_DATA
 INSTALL_SCRIPT
 INSTALL_PROGRAM
+OPCODEHGEN
 PYTHON
 ASDLGEN
 ac_ct_READELF
@@ -795,6 +796,7 @@
 enable_profiling
 with_pydebug
 with_hash_algorithm
+with_address_sanitizer
 with_libs
 with_system_expat
 with_system_ffi
@@ -1368,7 +1370,7 @@
   # Omit some internal or obsolete options to make the list less imposing.
   # This message is too long to be a string in the A/UX 3.1 sh.
   cat <<_ACEOF
-\`configure' configures python 3.4 to adapt to many kinds of systems.
+\`configure' configures python 3.5 to adapt to many kinds of systems.
 
 Usage: $0 [OPTION]... [VAR=VALUE]...
 
@@ -1433,7 +1435,7 @@
 
 if test -n "$ac_init_help"; then
   case $ac_init_help in
-     short | recursive ) echo "Configuration of python 3.4:";;
+     short | recursive ) echo "Configuration of python 3.5:";;
    esac
   cat <<\_ACEOF
 
@@ -1471,6 +1473,8 @@
   --with-pydebug          build with Py_DEBUG defined
   --with-hash-algorithm=[fnv|siphash24]
                           select hash algorithm
+  --with-address-sanitizer
+                          enable AddressSanitizer
   --with-libs='lib1 ...'  link against additional libs
   --with-system-expat     build pyexpat module using an installed expat
                           library
@@ -1580,7 +1584,7 @@
 test -n "$ac_init_help" && exit $ac_status
 if $ac_init_version; then
   cat <<\_ACEOF
-python configure 3.4
+python configure 3.5
 generated by GNU Autoconf 2.69
 
 Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2419,7 +2423,7 @@
 This file contains any messages produced by compilers while
 running configure, to aid debugging if configure makes a mistake.
 
-It was created by python $as_me 3.4, which was
+It was created by python $as_me 3.5, which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   $ $0 $@
@@ -2989,7 +2993,7 @@
 mv confdefs.h.new confdefs.h
 
 
-VERSION=3.4
+VERSION=3.5
 
 # Version number of Python's own shared library file.
 
@@ -6048,6 +6052,57 @@
 fi
 
 
+for ac_prog in python$PACKAGE_VERSION python3 python
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_PYTHON+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$PYTHON"; then
+  ac_cv_prog_PYTHON="$PYTHON" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_PYTHON="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+PYTHON=$ac_cv_prog_PYTHON
+if test -n "$PYTHON"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYTHON" >&5
+$as_echo "$PYTHON" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$PYTHON" && break
+done
+test -n "$PYTHON" || PYTHON="not-found"
+
+if test "$PYTHON" = not-found; then
+    OPCODEHGEN="@echo python: $PYTHON! cannot run Tools/scripts/generate_opcode_h.py"
+else
+    OPCODEHGEN="$PYTHON"
+fi
+
+
+
 case $MACHDEP in
 bsdos*|hp*|HP*)
 	# install -d does not work on BSDI or HP-UX
@@ -9102,6 +9157,23 @@
 fi
 
 
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for --with-address-sanitizer" >&5
+$as_echo_n "checking for --with-address-sanitizer... " >&6; }
+
+# Check whether --with-address_sanitizer was given.
+if test "${with_address_sanitizer+set}" = set; then :
+  withval=$with_address_sanitizer;
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $withval" >&5
+$as_echo "$withval" >&6; }
+BASECFLAGS="-fsanitize=address -fno-omit-frame-pointer $BASECFLAGS"
+LDFLAGS="-fsanitize=address $LDFLAGS"
+
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
 # Most SVR4 platforms (e.g. Solaris) need -lsocket and -lnsl.
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for t_open in -lnsl" >&5
 $as_echo_n "checking for t_open in -lnsl... " >&6; }
@@ -13169,6 +13241,38 @@
 
 fi
 
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we can use gcc inline assembler to get and set mc68881 fpcr" >&5
+$as_echo_n "checking whether we can use gcc inline assembler to get and set mc68881 fpcr... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  unsigned int fpcr;
+  __asm__ __volatile__ ("fmove.l %%fpcr,%0" : "=g" (fpcr));
+  __asm__ __volatile__ ("fmove.l %0,%%fpcr" : : "g" (fpcr));
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  have_gcc_asm_for_mc68881=yes
+else
+  have_gcc_asm_for_mc68881=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_gcc_asm_for_mc68881" >&5
+$as_echo "$have_gcc_asm_for_mc68881" >&6; }
+if test "$have_gcc_asm_for_mc68881" = yes
+then
+
+$as_echo "#define HAVE_GCC_ASM_FOR_MC68881 1" >>confdefs.h
+
+fi
+
 # Detect whether system arithmetic is subject to x87-style double
 # rounding issues.  The result of this test has little meaning on non
 # IEEE 754 platforms.  On IEEE 754, test should return 1 if rounding
@@ -15912,7 +16016,7 @@
 # report actual input values of CONFIG_FILES etc. instead of their
 # values after options handling.
 ac_log="
-This file was extended by python $as_me 3.4, which was
+This file was extended by python $as_me 3.5, which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   CONFIG_FILES    = $CONFIG_FILES
@@ -15974,7 +16078,7 @@
 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
 ac_cs_version="\\
-python config.status 3.4
+python config.status 3.5
 configured by $0, generated by GNU Autoconf 2.69,
   with options \\"\$ac_cs_config\\"
 
diff --git a/configure.ac b/configure.ac
index 217fffd..a6f613a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -3,7 +3,7 @@
 dnl ***********************************************
 
 # Set VERSION so we only need to edit in one place (i.e., here)
-m4_define(PYTHON_VERSION, 3.4)
+m4_define(PYTHON_VERSION, 3.5)
 
 AC_PREREQ(2.65)
 
@@ -1036,6 +1036,15 @@
     ASDLGEN="$PYTHON"
 fi
 
+AC_SUBST(OPCODEHGEN)
+AC_CHECK_PROGS(PYTHON, python$PACKAGE_VERSION python3 python, not-found)
+if test "$PYTHON" = not-found; then
+    OPCODEHGEN="@echo python: $PYTHON! cannot run Tools/scripts/generate_opcode_h.py"
+else
+    OPCODEHGEN="$PYTHON"
+fi
+
+
 
 case $MACHDEP in
 bsdos*|hp*|HP*)
@@ -2305,6 +2314,17 @@
 ],
 [AC_MSG_RESULT(default)])
 
+AC_MSG_CHECKING(for --with-address-sanitizer)
+AC_ARG_WITH(address_sanitizer,
+            AS_HELP_STRING([--with-address-sanitizer],
+                           [enable AddressSanitizer]),
+[
+AC_MSG_RESULT($withval)
+BASECFLAGS="-fsanitize=address -fno-omit-frame-pointer $BASECFLAGS"
+LDFLAGS="-fsanitize=address $LDFLAGS"
+],
+[AC_MSG_RESULT(no)])
+
 # Most SVR4 platforms (e.g. Solaris) need -lsocket and -lnsl.
 AC_CHECK_LIB(nsl, t_open, [LIBS="-lnsl $LIBS"]) # SVR4
 AC_CHECK_LIB(socket, socket, [LIBS="-lsocket $LIBS"], [], $LIBS) # SVR4 sockets
@@ -3790,6 +3810,19 @@
     [Define if we can use gcc inline assembler to get and set x87 control word])
 fi
 
+AC_MSG_CHECKING(whether we can use gcc inline assembler to get and set mc68881 fpcr)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[
+  unsigned int fpcr;
+  __asm__ __volatile__ ("fmove.l %%fpcr,%0" : "=g" (fpcr));
+  __asm__ __volatile__ ("fmove.l %0,%%fpcr" : : "g" (fpcr));
+]])],[have_gcc_asm_for_mc68881=yes],[have_gcc_asm_for_mc68881=no])
+AC_MSG_RESULT($have_gcc_asm_for_mc68881)
+if test "$have_gcc_asm_for_mc68881" = yes
+then
+    AC_DEFINE(HAVE_GCC_ASM_FOR_MC68881, 1,
+    [Define if we can use gcc inline assembler to get and set mc68881 fpcr])
+fi
+
 # Detect whether system arithmetic is subject to x87-style double
 # rounding issues.  The result of this test has little meaning on non
 # IEEE 754 platforms.  On IEEE 754, test should return 1 if rounding
diff --git a/pyconfig.h.in b/pyconfig.h.in
index 3574f67..e469f6a 100644
--- a/pyconfig.h.in
+++ b/pyconfig.h.in
@@ -313,6 +313,9 @@
 /* Define to 1 if you have the `gamma' function. */
 #undef HAVE_GAMMA
 
+/* Define if we can use gcc inline assembler to get and set mc68881 fpcr */
+#undef HAVE_GCC_ASM_FOR_MC68881
+
 /* Define if we can use x64 gcc inline assembler */
 #undef HAVE_GCC_ASM_FOR_X64
 
diff --git a/setup.py b/setup.py
index c0bb513..e6b4d51 100644
--- a/setup.py
+++ b/setup.py
@@ -167,6 +167,7 @@
     def __init__(self, dist):
         build_ext.__init__(self, dist)
         self.failed = []
+        self.failed_on_import = []
 
     def build_extensions(self):
 
@@ -247,8 +248,9 @@
         build_ext.build_extensions(self)
 
         longest = max([len(e.name) for e in self.extensions])
-        if self.failed:
-            longest = max(longest, max([len(name) for name in self.failed]))
+        if self.failed or self.failed_on_import:
+            all_failed = self.failed + self.failed_on_import
+            longest = max(longest, max([len(name) for name in all_failed]))
 
         def print_three_column(lst):
             lst.sort(key=str.lower)
@@ -276,6 +278,14 @@
             print_three_column(failed)
             print()
 
+        if self.failed_on_import:
+            failed = self.failed_on_import[:]
+            print()
+            print("Following modules built successfully"
+                  " but were removed because they could not be imported:")
+            print_three_column(failed)
+            print()
+
     def build_extension(self, ext):
 
         if ext.name == '_ctypes':
@@ -334,7 +344,7 @@
         try:
             importlib._bootstrap._SpecMethods(spec).load()
         except ImportError as why:
-            self.failed.append(ext.name)
+            self.failed_on_import.append(ext.name)
             self.announce('*** WARNING: renaming "%s" since importing it'
                           ' failed: %s' % (ext.name, why), level=3)
             assert not self.inplace