merge heads
diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst
index b7e87ab..90a01d0 100644
--- a/Doc/library/subprocess.rst
+++ b/Doc/library/subprocess.rst
@@ -735,7 +735,7 @@
 to receive a SIGPIPE if p2 exits before p1.
 
 Alternatively, for trusted input, the shell's own pipeline support may still
-be used directly:
+be used directly::
 
    output=`dmesg | grep hda`
    # becomes
diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst
index 9b3affd..0738e22 100644
--- a/Doc/library/threading.rst
+++ b/Doc/library/threading.rst
@@ -430,7 +430,7 @@
    are blocked waiting for the lock to become unlocked, allow exactly one of them
    to proceed.
 
-   Do not call this method when the lock is unlocked.
+   When invoked on an unlocked lock, a :exc:`ThreadError` is raised.
 
    There is no return value.
 
diff --git a/Lib/idlelib/tabbedpages.py b/Lib/idlelib/tabbedpages.py
index f791702..2557732 100644
--- a/Lib/idlelib/tabbedpages.py
+++ b/Lib/idlelib/tabbedpages.py
@@ -78,7 +78,7 @@
     def remove_tab(self, tab_name):
         """Remove the tab named <tab_name>"""
         if not tab_name in self._tab_names:
-            raise KeyError("No such Tab: '%s" % page_name)
+            raise KeyError("No such Tab: '%s" % tab_name)
 
         self._tab_names.remove(tab_name)
         self._arrange_tabs()
@@ -88,7 +88,7 @@
         if tab_name == self._selected_tab:
             return
         if tab_name is not None and tab_name not in self._tabs:
-            raise KeyError("No such Tab: '%s" % page_name)
+            raise KeyError("No such Tab: '%s" % tab_name)
 
         # deselect the current selected tab
         if self._selected_tab is not None:
diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py
index fa6f7b3..8bb0a3b 100644
--- a/Lib/multiprocessing/connection.py
+++ b/Lib/multiprocessing/connection.py
@@ -101,6 +101,10 @@
     if sys.platform != 'win32' and family == 'AF_PIPE':
         raise ValueError('Family %s is not recognized.' % family)
 
+    if sys.platform == 'win32' and family == 'AF_UNIX':
+        # double check
+        if not hasattr(socket, family):
+            raise ValueError('Family %s is not recognized.' % family)
 
 def address_type(address):
     '''
diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py
index 8de7a8d..298faf7 100644
--- a/Lib/test/test_multiprocessing.py
+++ b/Lib/test/test_multiprocessing.py
@@ -2331,6 +2331,12 @@
         with self.assertRaises(ValueError):
             multiprocessing.connection.Listener(r'\\.\test')
 
+    @unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
+    def test_invalid_family_win32(self):
+        with self.assertRaises(ValueError):
+            multiprocessing.connection.Listener('/var/test.pipe')
+
+
 testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
                    TestStdinBadfiledescriptor, TestInvalidFamily]
 
diff --git a/Lib/test/test_tools.py b/Lib/test/test_tools.py
index 1682124..cfe13ac 100644
--- a/Lib/test/test_tools.py
+++ b/Lib/test/test_tools.py
@@ -5,8 +5,11 @@
 """
 
 import os
+import sys
+import imp
 import unittest
 import sysconfig
+import tempfile
 from test import support
 from test.script_helper import assert_python_ok
 
@@ -17,10 +20,11 @@
 
 srcdir = sysconfig.get_config_var('projectbase')
 basepath = os.path.join(os.getcwd(), srcdir, 'Tools')
+scriptsdir = os.path.join(basepath, 'scripts')
 
 
 class ReindentTests(unittest.TestCase):
-    script = os.path.join(basepath, 'scripts', 'reindent.py')
+    script = os.path.join(scriptsdir, 'reindent.py')
 
     def test_noargs(self):
         assert_python_ok(self.script)
@@ -31,8 +35,73 @@
         self.assertGreater(err, b'')
 
 
+class TestSundryScripts(unittest.TestCase):
+    # At least make sure the rest don't have syntax errors.  When tests are
+    # added for a script it should be added to the whitelist below.
+
+    # scripts that have independent tests.
+    whitelist = ['reindent.py']
+    # scripts that can't be imported without running
+    blacklist = ['make_ctype.py']
+    # scripts that use windows-only modules
+    windows_only = ['win_add2path.py']
+    # blacklisted for other reasons
+    other = ['analyze_dxp.py']
+
+    skiplist = blacklist + whitelist + windows_only + other
+
+    def setUp(self):
+        cm = support.DirsOnSysPath(scriptsdir)
+        cm.__enter__()
+        self.addCleanup(cm.__exit__)
+
+    def test_sundry(self):
+        for fn in os.listdir(scriptsdir):
+            if fn.endswith('.py') and fn not in self.skiplist:
+                __import__(fn[:-3])
+
+    @unittest.skipIf(sys.platform != "win32", "Windows-only test")
+    def test_sundry_windows(self):
+        for fn in self.windows_only:
+            __import__(fn[:-3])
+
+    @unittest.skipIf(not support.threading, "test requires _thread module")
+    def test_analyze_dxp_import(self):
+        if hasattr(sys, 'getdxp'):
+            import analyze_dxp
+        else:
+            with self.assertRaises(RuntimeError):
+                import analyze_dxp
+
+
+class PdepsTests(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(self):
+        path = os.path.join(scriptsdir, 'pdeps.py')
+        self.pdeps = imp.load_source('pdeps', path)
+
+    @classmethod
+    def tearDownClass(self):
+        if 'pdeps' in sys.modules:
+            del sys.modules['pdeps']
+
+    def test_process_errors(self):
+        # Issue #14492: m_import.match(line) can be None.
+        with tempfile.TemporaryDirectory() as tmpdir:
+            fn = os.path.join(tmpdir, 'foo')
+            with open(fn, 'w') as stream:
+                stream.write("#!/this/will/fail")
+            self.pdeps.process(fn, {})
+
+    def test_inverse_attribute_error(self):
+        # Issue #14492: this used to fail with an AttributeError.
+        self.pdeps.inverse({'a': []})
+
+
 def test_main():
-    support.run_unittest(ReindentTests)
+    support.run_unittest(*[obj for obj in globals().values()
+                               if isinstance(obj, type)])
 
 
 if __name__ == '__main__':
diff --git a/Lib/tkinter/ttk.py b/Lib/tkinter/ttk.py
index 928e1de..5ae20a8 100644
--- a/Lib/tkinter/ttk.py
+++ b/Lib/tkinter/ttk.py
@@ -1253,7 +1253,7 @@
 
 
     def exists(self, item):
-        """Returns True if the specified item is present in the three,
+        """Returns True if the specified item is present in the tree,
         False otherwise."""
         return bool(self.tk.call(self._w, "exists", item))
 
diff --git a/Misc/NEWS b/Misc/NEWS
index 18a8d73..4af242a 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -39,6 +39,13 @@
 Library
 -------
 
+- Issue #14496: Fix wrong name in idlelib/tabbedpages.py.
+  Patch by Popa Claudiu.
+
+- Issue #14482: Raise a ValueError, not a NameError, when trying to create
+  a multiprocessing Client or Listener with an AF_UNIX type address under
+  Windows.  Patch by Popa Claudiu.
+
 - Issue #14151: Raise a ValueError, not a NameError, when trying to create
   a multiprocessing Client or Listener with an AF_PIPE type address under
   non-Windows platforms.  Patch by Popa Claudiu.
diff --git a/Tools/scripts/abitype.py b/Tools/scripts/abitype.py
index 4d96c8b..ab0ba42 100755
--- a/Tools/scripts/abitype.py
+++ b/Tools/scripts/abitype.py
@@ -3,34 +3,6 @@
 # Usage: abitype.py < old_code > new_code
 import re, sys
 
-############ Simplistic C scanner ##################################
-tokenizer = re.compile(
-    r"(?P<preproc>#.*\n)"
-    r"|(?P<comment>/\*.*?\*/)"
-    r"|(?P<ident>[a-zA-Z_][a-zA-Z0-9_]*)"
-    r"|(?P<ws>[ \t\n]+)"
-    r"|(?P<other>.)",
-    re.MULTILINE)
-
-tokens = []
-source = sys.stdin.read()
-pos = 0
-while pos != len(source):
-    m = tokenizer.match(source, pos)
-    tokens.append([m.lastgroup, m.group()])
-    pos += len(tokens[-1][1])
-    if tokens[-1][0] == 'preproc':
-        # continuation lines are considered
-        # only in preprocess statements
-        while tokens[-1][1].endswith('\\\n'):
-            nl = source.find('\n', pos)
-            if nl == -1:
-                line = source[pos:]
-            else:
-                line = source[pos:nl+1]
-            tokens[-1][1] += line
-            pos += len(line)
-
 ###### Replacement of PyTypeObject static instances ##############
 
 # classify each token, giving it a one-letter code:
@@ -79,7 +51,7 @@
     while tokens[pos][0] in ('ws', 'comment'):
         pos += 1
     if tokens[pos][1] != 'PyVarObject_HEAD_INIT':
-        raise Exception, '%s has no PyVarObject_HEAD_INIT' % name
+        raise Exception('%s has no PyVarObject_HEAD_INIT' % name)
     while tokens[pos][1] != ')':
         pos += 1
     pos += 1
@@ -183,18 +155,48 @@
     return '\n'.join(res)
 
 
-# Main loop: replace all static PyTypeObjects until
-# there are none left.
-while 1:
-    c = classify()
-    m = re.search('(SW)?TWIW?=W?{.*?};', c)
-    if not m:
-        break
-    start = m.start()
-    end = m.end()
-    name, fields = get_fields(start, m)
-    tokens[start:end] = [('',make_slots(name, fields))]
+if __name__ == '__main__':
 
-# Output result to stdout
-for t, v in tokens:
-    sys.stdout.write(v)
+    ############ Simplistic C scanner ##################################
+    tokenizer = re.compile(
+        r"(?P<preproc>#.*\n)"
+        r"|(?P<comment>/\*.*?\*/)"
+        r"|(?P<ident>[a-zA-Z_][a-zA-Z0-9_]*)"
+        r"|(?P<ws>[ \t\n]+)"
+        r"|(?P<other>.)",
+        re.MULTILINE)
+
+    tokens = []
+    source = sys.stdin.read()
+    pos = 0
+    while pos != len(source):
+        m = tokenizer.match(source, pos)
+        tokens.append([m.lastgroup, m.group()])
+        pos += len(tokens[-1][1])
+        if tokens[-1][0] == 'preproc':
+            # continuation lines are considered
+            # only in preprocess statements
+            while tokens[-1][1].endswith('\\\n'):
+                nl = source.find('\n', pos)
+                if nl == -1:
+                    line = source[pos:]
+                else:
+                    line = source[pos:nl+1]
+                tokens[-1][1] += line
+                pos += len(line)
+
+    # Main loop: replace all static PyTypeObjects until
+    # there are none left.
+    while 1:
+        c = classify()
+        m = re.search('(SW)?TWIW?=W?{.*?};', c)
+        if not m:
+            break
+        start = m.start()
+        end = m.end()
+        name, fields = get_fields(start, m)
+        tokens[start:end] = [('',make_slots(name, fields))]
+
+    # Output result to stdout
+    for t, v in tokens:
+        sys.stdout.write(v)
diff --git a/Tools/scripts/find_recursionlimit.py b/Tools/scripts/find_recursionlimit.py
index 443f052..7a86603 100755
--- a/Tools/scripts/find_recursionlimit.py
+++ b/Tools/scripts/find_recursionlimit.py
@@ -106,14 +106,16 @@
     else:
         print("Yikes!")
 
-limit = 1000
-while 1:
-    check_limit(limit, "test_recurse")
-    check_limit(limit, "test_add")
-    check_limit(limit, "test_repr")
-    check_limit(limit, "test_init")
-    check_limit(limit, "test_getattr")
-    check_limit(limit, "test_getitem")
-    check_limit(limit, "test_cpickle")
-    print("Limit of %d is fine" % limit)
-    limit = limit + 100
+if __name__ == '__main__':
+
+    limit = 1000
+    while 1:
+        check_limit(limit, "test_recurse")
+        check_limit(limit, "test_add")
+        check_limit(limit, "test_repr")
+        check_limit(limit, "test_init")
+        check_limit(limit, "test_getattr")
+        check_limit(limit, "test_getitem")
+        check_limit(limit, "test_cpickle")
+        print("Limit of %d is fine" % limit)
+        limit = limit + 100
diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py
index 77607ce..a494a48 100755
--- a/Tools/scripts/findnocoding.py
+++ b/Tools/scripts/findnocoding.py
@@ -76,29 +76,31 @@
     -c: recognize Python source files trying to compile them
     -d: debug output""" % sys.argv[0]
 
-try:
-    opts, args = getopt.getopt(sys.argv[1:], 'cd')
-except getopt.error as msg:
-    print(msg, file=sys.stderr)
-    print(usage, file=sys.stderr)
-    sys.exit(1)
+if __name__ == '__main__':
 
-is_python = pysource.looks_like_python
-debug = False
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'cd')
+    except getopt.error as msg:
+        print(msg, file=sys.stderr)
+        print(usage, file=sys.stderr)
+        sys.exit(1)
 
-for o, a in opts:
-    if o == '-c':
-        is_python = pysource.can_be_compiled
-    elif o == '-d':
-        debug = True
+    is_python = pysource.looks_like_python
+    debug = False
 
-if not args:
-    print(usage, file=sys.stderr)
-    sys.exit(1)
+    for o, a in opts:
+        if o == '-c':
+            is_python = pysource.can_be_compiled
+        elif o == '-d':
+            debug = True
 
-for fullpath in pysource.walk_python_files(args, is_python):
-    if debug:
-        print("Testing for coding: %s" % fullpath)
-    result = needs_declaration(fullpath)
-    if result:
-        print(fullpath)
+    if not args:
+        print(usage, file=sys.stderr)
+        sys.exit(1)
+
+    for fullpath in pysource.walk_python_files(args, is_python):
+        if debug:
+            print("Testing for coding: %s" % fullpath)
+        result = needs_declaration(fullpath)
+        if result:
+            print(fullpath)
diff --git a/Tools/scripts/fixcid.py b/Tools/scripts/fixcid.py
index 2d4cd1a..87e2a09 100755
--- a/Tools/scripts/fixcid.py
+++ b/Tools/scripts/fixcid.py
@@ -292,7 +292,7 @@
         if not words: continue
         if len(words) == 3 and words[0] == 'struct':
             words[:2] = [words[0] + ' ' + words[1]]
-        elif len(words) <> 2:
+        elif len(words) != 2:
             err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line))
             continue
         if Reverse:
diff --git a/Tools/scripts/md5sum.py b/Tools/scripts/md5sum.py
index 743da72..521960c 100755
--- a/Tools/scripts/md5sum.py
+++ b/Tools/scripts/md5sum.py
@@ -20,7 +20,7 @@
 import sys
 import os
 import getopt
-import md5
+from hashlib import md5
 
 def sum(*files):
     sts = 0
diff --git a/Tools/scripts/parseentities.py b/Tools/scripts/parseentities.py
index 5b0f1c6..a042d1c 100755
--- a/Tools/scripts/parseentities.py
+++ b/Tools/scripts/parseentities.py
@@ -13,7 +13,6 @@
 
 """
 import re,sys
-import TextTools
 
 entityRE = re.compile('<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
 
@@ -45,7 +44,7 @@
                 charcode = repr(charcode)
         else:
             charcode = repr(charcode)
-        comment = TextTools.collapse(comment)
+        comment = ' '.join(comment.split())
         f.write("    '%s':\t%s,  \t# %s\n" % (name,charcode,comment))
     f.write('\n}\n')
 
diff --git a/Tools/scripts/pdeps.py b/Tools/scripts/pdeps.py
index 938f31c..f8218ac 100755
--- a/Tools/scripts/pdeps.py
+++ b/Tools/scripts/pdeps.py
@@ -76,10 +76,9 @@
             nextline = fp.readline()
             if not nextline: break
             line = line[:-1] + nextline
-        if m_import.match(line) >= 0:
-            (a, b), (a1, b1) = m_import.regs[:2]
-        elif m_from.match(line) >= 0:
-            (a, b), (a1, b1) = m_from.regs[:2]
+        m_found = m_import.match(line) or m_from.match(line)
+        if m_found:
+            (a, b), (a1, b1) = m_found.regs[:2]
         else: continue
         words = line[a1:b1].split(',')
         # print '#', line, words
@@ -87,6 +86,7 @@
             word = word.strip()
             if word not in list:
                 list.append(word)
+    fp.close()
 
 
 # Compute closure (this is in fact totally general)
@@ -123,7 +123,7 @@
 def inverse(table):
     inv = {}
     for key in table.keys():
-        if not inv.has_key(key):
+        if key not in inv:
             inv[key] = []
         for item in table[key]:
             store(inv, item, key)