Marc-Andre's third try at this bulk patch seems to work (except that
his copy of test_contains.py seems to be broken -- the lines he
deleted were already absent).  Checkin messages:


New Unicode support for int(), float(), complex() and long().

- new APIs PyInt_FromUnicode() and PyLong_FromUnicode()
- added support for Unicode to PyFloat_FromString()
- new encoding API PyUnicode_EncodeDecimal() which converts
  Unicode to a decimal char* string (used in the above new
  APIs)
- shortcuts for calls like int(<int object>) and float(<float obj>)
- tests for all of the above

Unicode compares and contains checks:
- comparing Unicode and non-string types now works; TypeErrors
  are masked, all other errors such as ValueError during
  Unicode coercion are passed through (note that PyUnicode_Compare
  does not implement the masking -- PyObject_Compare does this)
- contains now works for non-string types too; TypeErrors are
  masked and 0 returned; all other errors are passed through

Better testing support for the standard codecs.

Misc minor enhancements, such as an alias dbcs for the mbcs codec.

Changes:
- PyLong_FromString() now applies the same error checks as
  does PyInt_FromString(): trailing garbage is reported
  as error and not longer silently ignored. The only characters
  which may be trailing the digits are 'L' and 'l' -- these
  are still silently ignored.
- string.ato?() now directly interface to int(), long() and
  float(). The error strings are now a little different, but
  the type still remains the same. These functions are now
  ready to get declared obsolete ;-)
- PyNumber_Int() now also does a check for embedded NULL chars
  in the input string; PyNumber_Long() already did this (and
  still does)

Followed by:

Looks like I've gone a step too far there... (and test_contains.py
seem to have a bug too).

I've changed back to reporting all errors in PyUnicode_Contains()
and added a few more test cases to test_contains.py (plus corrected
the join() NameError).
diff --git a/Lib/encodings/__init__.py b/Lib/encodings/__init__.py
index cd5876e..fa3119a 100644
--- a/Lib/encodings/__init__.py
+++ b/Lib/encodings/__init__.py
@@ -4,8 +4,8 @@
     directory.
 
     Codec modules must have names corresponding to standard lower-case
-    encoding names. Hyphens are automatically converted to
-    underscores, e.g. 'utf-8' is looked up as module utf_8.
+    encoding names with hyphens mapped to underscores, e.g. 'utf-8' is
+    implemented by the module 'utf_8.py'.
 
     Each codec module must export the following interface:
 
@@ -40,7 +40,7 @@
         return entry
 
     # Import the module
-    modname = string.replace(encoding,'-','_')
+    modname = string.replace(encoding, '-', '_')
     modname = aliases.aliases.get(modname,modname)
     try:
         mod = __import__(modname,globals(),locals(),'*')
diff --git a/Lib/encodings/aliases.py b/Lib/encodings/aliases.py
index d99b38e..1214cf9 100644
--- a/Lib/encodings/aliases.py
+++ b/Lib/encodings/aliases.py
@@ -54,4 +54,7 @@
     'macroman': 'mac_roman',
     'macturkish': 'mac_turkish',
 
+    # MBCS
+    'dbcs': 'mbcs',
+
 }
diff --git a/Lib/string.py b/Lib/string.py
index 5eb5697..5f90723 100644
--- a/Lib/string.py
+++ b/Lib/string.py
@@ -196,14 +196,11 @@
     Return the floating point number represented by the string s.
 
     """
-    if type(s) == _StringType:
-        return _float(s)
-    else:
-        raise TypeError('argument 1: expected string, %s found' %
-                        type(s).__name__)
+    return _float(s)
+
 
 # Convert string to integer
-def atoi(*args):
+def atoi(s , base=10):
     """atoi(s [,base]) -> int
 
     Return the integer represented by the string s in the given
@@ -214,23 +211,11 @@
     accepted.
 
     """
-    try:
-        s = args[0]
-    except IndexError:
-        raise TypeError('function requires at least 1 argument: %d given' %
-                        len(args))
-    # Don't catch type error resulting from too many arguments to int().  The
-    # error message isn't compatible but the error type is, and this function
-    # is complicated enough already.
-    if type(s) == _StringType:
-        return _apply(_int, args)
-    else:
-        raise TypeError('argument 1: expected string, %s found' %
-                        type(s).__name__)
+    return _int(s, base)
 
 
 # Convert string to long integer
-def atol(*args):
+def atol(s, base=10):
     """atol(s [,base]) -> long
 
     Return the long integer represented by the string s in the
@@ -242,19 +227,7 @@
     unless base is 0.
 
     """
-    try:
-        s = args[0]
-    except IndexError:
-        raise TypeError('function requires at least 1 argument: %d given' %
-                        len(args))
-    # Don't catch type error resulting from too many arguments to long().  The
-    # error message isn't compatible but the error type is, and this function
-    # is complicated enough already.
-    if type(s) == _StringType:
-        return _apply(_long, args)
-    else:
-        raise TypeError('argument 1: expected string, %s found' %
-                        type(s).__name__)
+    return _long(s, base)
 
 
 # Left-justify a string
diff --git a/Lib/test/output/test_unicode b/Lib/test/output/test_unicode
index 88e8624..15ffdc1 100644
--- a/Lib/test/output/test_unicode
+++ b/Lib/test/output/test_unicode
@@ -3,3 +3,4 @@
 Testing Unicode contains method... done.
 Testing Unicode formatting strings... done.
 Testing builtin codecs... done.
+Testing standard mapping codecs... 0-127... 128-255... done.
diff --git a/Lib/test/test_b1.py b/Lib/test/test_b1.py
index d9c6320..6a89d22 100644
--- a/Lib/test/test_b1.py
+++ b/Lib/test/test_b1.py
@@ -95,6 +95,7 @@
 if complex(0j, 3.14) <> 3.14j: raise TestFailed, 'complex(0j, 3.14)'
 if complex(0.0, 3.14) <> 3.14j: raise TestFailed, 'complex(0.0, 3.14)'
 if complex("  3.14+J  ") <> 3.14+1j:  raise TestFailed, 'complex("  3.14+J  )"'
+if complex(u"  3.14+J  ") <> 3.14+1j:  raise TestFailed, 'complex(u"  3.14+J  )"'
 class Z:
     def __complex__(self): return 3.14j
 z = Z()
@@ -208,6 +209,9 @@
 if float(314) <> 314.0: raise TestFailed, 'float(314)'
 if float(314L) <> 314.0: raise TestFailed, 'float(314L)'
 if float("  3.14  ") <> 3.14:  raise TestFailed, 'float("  3.14  ")'
+if float(u"  3.14  ") <> 3.14:  raise TestFailed, 'float(u"  3.14  ")'
+if float(u"  \u0663.\u0661\u0664  ") <> 3.14:
+    raise TestFailed, 'float(u"  \u0663.\u0661\u0664  ")'
 
 print 'getattr'
 import sys
@@ -254,6 +258,9 @@
 if int(-3.9) <> -3: raise TestFailed, 'int(-3.9)'
 if int(3.5) <> 3: raise TestFailed, 'int(3.5)'
 if int(-3.5) <> -3: raise TestFailed, 'int(-3.5)'
+# Different base:
+if int("10",16) <> 16L: raise TestFailed, 'int("10",16)'
+if int(u"10",16) <> 16L: raise TestFailed, 'int(u"10",16)'
 # Test conversion fron strings and various anomalies
 L = [
         ('0', 0),
@@ -267,9 +274,28 @@
         ('314 ', 314),
         ('  \t\t  314  \t\t  ', 314),
         (`sys.maxint`, sys.maxint),
+        ('  1x', ValueError),
+        ('  1  ', 1),
+        ('  1\02  ', ValueError),
         ('', ValueError),
         (' ', ValueError),
         ('  \t\t  ', ValueError),
+        (u'0', 0),
+        (u'1', 1),
+        (u'9', 9),
+        (u'10', 10),
+        (u'99', 99),
+        (u'100', 100),
+        (u'314', 314),
+        (u' 314', 314),
+        (u'\u0663\u0661\u0664 ', 314),
+        (u'  \t\t  314  \t\t  ', 314),
+        (u'  1x', ValueError),
+        (u'  1  ', 1),
+        (u'  1\02  ', ValueError),
+        (u'', ValueError),
+        (u' ', ValueError),
+        (u'  \t\t  ', ValueError),
 ]
 for s, v in L:
     for sign in "", "+", "-":
@@ -349,10 +375,17 @@
 if long(-3.9) <> -3L: raise TestFailed, 'long(-3.9)'
 if long(3.5) <> 3L: raise TestFailed, 'long(3.5)'
 if long(-3.5) <> -3L: raise TestFailed, 'long(-3.5)'
+if long("-3") <> -3L: raise TestFailed, 'long("-3")'
+if long(u"-3") <> -3L: raise TestFailed, 'long(u"-3")'
+# Different base:
+if long("10",16) <> 16L: raise TestFailed, 'long("10",16)'
+if long(u"10",16) <> 16L: raise TestFailed, 'long(u"10",16)'
 # Check conversions from string (same test set as for int(), and then some)
 LL = [
         ('1' + '0'*20, 10L**20),
         ('1' + '0'*100, 10L**100),
+        (u'1' + u'0'*20, 10L**20),
+        (u'1' + u'0'*100, 10L**100),
 ]
 for s, v in L + LL:
     for sign in "", "+", "-":
@@ -363,11 +396,11 @@
                 vv = -v
             try:
                 if long(ss) != long(vv):
-                    raise TestFailed, "int(%s)" % `ss`
+                    raise TestFailed, "long(%s)" % `ss`
             except v:
                 pass
             except ValueError, e:
-                raise TestFailed, "int(%s) raised ValueError: %s" % (`ss`, e)
+                raise TestFailed, "long(%s) raised ValueError: %s" % (`ss`, e)
 
 print 'map'
 if map(None, 'hello world') <> ['h','e','l','l','o',' ','w','o','r','l','d']:
diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py
index f90887a..5c0a063 100644
--- a/Lib/test/test_unicode.py
+++ b/Lib/test/test_unicode.py
@@ -221,15 +221,23 @@
 
 # Contains:
 print 'Testing Unicode contains method...',
-assert ('a' in 'abdb') == 1
-assert ('a' in 'bdab') == 1
-assert ('a' in 'bdaba') == 1
-assert ('a' in 'bdba') == 1
+assert ('a' in u'abdb') == 1
+assert ('a' in u'bdab') == 1
+assert ('a' in u'bdaba') == 1
+assert ('a' in u'bdba') == 1
 assert ('a' in u'bdba') == 1
 assert (u'a' in u'bdba') == 1
 assert (u'a' in u'bdb') == 0
 assert (u'a' in 'bdb') == 0
 assert (u'a' in 'bdba') == 1
+assert (u'a' in ('a',1,None)) == 1
+assert (u'a' in (1,None,'a')) == 1
+assert (u'a' in (1,None,u'a')) == 1
+assert ('a' in ('a',1,None)) == 1
+assert ('a' in (1,None,'a')) == 1
+assert ('a' in (1,None,u'a')) == 1
+assert ('a' in ('x',1,u'y')) == 0
+assert ('a' in ('x',1,None)) == 0
 print 'done.'
 
 # Formatting:
@@ -270,11 +278,88 @@
     assert unicode(u.encode(encoding),encoding) == u
 
 u = u''.join(map(unichr, range(256)))
-for encoding in ('latin-1',):
-    assert unicode(u.encode(encoding),encoding) == u
+for encoding in (
+    'latin-1',
+    ):
+    try:
+        assert unicode(u.encode(encoding),encoding) == u
+    except AssertionError:
+        print '*** codec "%s" failed round-trip' % encoding
+    except ValueError,why:
+        print '*** codec for "%s" failed: %s' % (encoding, why)
 
 u = u''.join(map(unichr, range(128)))
-for encoding in ('ascii',):
-    assert unicode(u.encode(encoding),encoding) == u
+for encoding in (
+    'ascii',
+    ):
+    try:
+        assert unicode(u.encode(encoding),encoding) == u
+    except AssertionError:
+        print '*** codec "%s" failed round-trip' % encoding
+    except ValueError,why:
+        print '*** codec for "%s" failed: %s' % (encoding, why)
+
+print 'done.'
+
+print 'Testing standard mapping codecs...',
+
+print '0-127...',
+s = ''.join(map(chr, range(128)))
+for encoding in (
+    'cp037', 'cp1026',
+    'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
+    'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
+    'cp863', 'cp865', 'cp866', 
+    'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
+    'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
+    'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
+    'mac_cyrillic', 'mac_latin2',
+
+    'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
+    'cp1256', 'cp1257', 'cp1258',
+    'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
+
+    'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
+    'cp1006', 'cp875', 'iso8859_8',
+    
+    ### These have undefined mappings:
+    #'cp424',
+    
+    ):
+    try:
+        assert unicode(s,encoding).encode(encoding) == s
+    except AssertionError:
+        print '*** codec "%s" failed round-trip' % encoding
+    except ValueError,why:
+        print '*** codec for "%s" failed: %s' % (encoding, why)
+
+print '128-255...',
+s = ''.join(map(chr, range(128,256)))
+for encoding in (
+    'cp037', 'cp1026',
+    'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
+    'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
+    'cp863', 'cp865', 'cp866', 
+    'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
+    'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
+    'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
+    'mac_cyrillic', 'mac_latin2',
+    
+    ### These have undefined mappings:
+    #'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
+    #'cp1256', 'cp1257', 'cp1258',
+    #'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
+    #'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
+    
+    ### These fail the round-trip:
+    #'cp1006', 'cp875', 'iso8859_8',
+    
+    ):
+    try:
+        assert unicode(s,encoding).encode(encoding) == s
+    except AssertionError:
+        print '*** codec "%s" failed round-trip' % encoding
+    except ValueError,why:
+        print '*** codec for "%s" failed: %s' % (encoding, why)
 
 print 'done.'