Rename 'unicode' to 'str' in its tp_name field. Rename 'str' to 'str8'.
Change all occurrences of unichr to chr.
diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py
index b7fba5c..9233871 100644
--- a/Lib/test/test_builtin.py
+++ b/Lib/test/test_builtin.py
@@ -90,7 +90,7 @@
         (str(''), ValueError),
         (str(' '), ValueError),
         (str('  \t\t  '), ValueError),
-        (unichr(0x200), ValueError),
+        (chr(0x200), ValueError),
 ]
 
 class TestFailingBool:
@@ -221,7 +221,7 @@
                           mode='eval', source='0', filename='tmp')
         if have_unicode:
             compile(str(b'print(u"\xc3\xa5")\n', 'utf8'), '', 'exec')
-            self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec')
+            self.assertRaises(TypeError, compile, chr(0), 'f', 'exec')
             self.assertRaises(ValueError, compile, str('a = 1'), 'f', 'bad')
 
 
@@ -557,7 +557,7 @@
 
             class shiftunicode(str):
                 def __getitem__(self, index):
-                    return unichr(ord(str.__getitem__(self, index))+1)
+                    return chr(ord(str.__getitem__(self, index))+1)
             self.assertEqual(
                 filter(lambda x: x>=str("3"), shiftunicode("1234")),
                 str("345")
@@ -676,7 +676,7 @@
         self.assertRaises(TypeError, getattr, sys, 1, "foo")
         self.assertRaises(TypeError, getattr)
         if have_unicode:
-            self.assertRaises(UnicodeError, getattr, sys, unichr(sys.maxunicode))
+            self.assertRaises(UnicodeError, getattr, sys, chr(sys.maxunicode))
 
     def test_hasattr(self):
         import sys
@@ -684,7 +684,7 @@
         self.assertRaises(TypeError, hasattr, sys, 1)
         self.assertRaises(TypeError, hasattr)
         if have_unicode:
-            self.assertRaises(UnicodeError, hasattr, sys, unichr(sys.maxunicode))
+            self.assertRaises(UnicodeError, hasattr, sys, chr(sys.maxunicode))
 
     def test_hash(self):
         hash(None)
@@ -789,7 +789,7 @@
         self.assert_(isinstance(x, int))
 
         if have_unicode:
-            x = int(unichr(0x661) * 600)
+            x = int(chr(0x661) * 600)
             self.assert_(isinstance(x, int))
 
         self.assertRaises(TypeError, int, 1, 12)
@@ -1387,7 +1387,7 @@
         self.assertEqual(ord('A'), 65)
         self.assertEqual(ord('a'), 97)
         if have_unicode:
-            self.assertEqual(ord(unichr(sys.maxunicode)), sys.maxunicode)
+            self.assertEqual(ord(chr(sys.maxunicode)), sys.maxunicode)
         self.assertRaises(TypeError, ord, 42)
         if have_unicode:
             self.assertRaises(TypeError, ord, str("12"))
@@ -1668,15 +1668,15 @@
 
     def test_unichr(self):
         if have_unicode:
-            self.assertEqual(unichr(32), str(' '))
-            self.assertEqual(unichr(65), str('A'))
-            self.assertEqual(unichr(97), str('a'))
+            self.assertEqual(chr(32), str(' '))
+            self.assertEqual(chr(65), str('A'))
+            self.assertEqual(chr(97), str('a'))
             self.assertEqual(
-                unichr(sys.maxunicode),
+                chr(sys.maxunicode),
                 str(('\\U%08x' % (sys.maxunicode)).encode("ascii"), 'unicode-escape')
             )
-            self.assertRaises(ValueError, unichr, sys.maxunicode+1)
-            self.assertRaises(TypeError, unichr)
+            self.assertRaises(ValueError, chr, sys.maxunicode+1)
+            self.assertRaises(TypeError, chr)
 
     # We don't want self in vars(), so these are static methods
 
diff --git a/Lib/test/test_codeccallbacks.py b/Lib/test/test_codeccallbacks.py
index b934073..5215b87 100644
--- a/Lib/test/test_codeccallbacks.py
+++ b/Lib/test/test_codeccallbacks.py
@@ -137,7 +137,7 @@
         # base encodings.
         sin = "a\xac\u1234\u20ac\u8000"
         if sys.maxunicode > 0xffff:
-            sin += unichr(sys.maxunicode)
+            sin += chr(sys.maxunicode)
         sout = "a\\xac\\u1234\\u20ac\\u8000"
         if sys.maxunicode > 0xffff:
             sout += "\\U%08x" % sys.maxunicode
@@ -509,7 +509,7 @@
         )
         # Use the correct exception
         cs = (0, 1, 9, 10, 99, 100, 999, 1000, 9999, 10000, 0x3042)
-        s = "".join(unichr(c) for c in cs)
+        s = "".join(chr(c) for c in cs)
         self.assertEquals(
             codecs.xmlcharrefreplace_errors(
                 UnicodeEncodeError("ascii", s, 0, len(s), "ouch")
@@ -650,7 +650,7 @@
         v = (1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 50000)
         if sys.maxunicode>=100000:
             v += (100000, 500000, 1000000)
-        s = "".join([unichr(x) for x in v])
+        s = "".join([chr(x) for x in v])
         codecs.register_error("test.xmlcharrefreplace", codecs.xmlcharrefreplace_errors)
         for enc in ("ascii", "iso-8859-15"):
             for err in ("xmlcharrefreplace", "test.xmlcharrefreplace"):
diff --git a/Lib/test/test_codecmaps_jp.py b/Lib/test/test_codecmaps_jp.py
index 58abb30..31b80eb 100644
--- a/Lib/test/test_codecmaps_jp.py
+++ b/Lib/test/test_codecmaps_jp.py
@@ -21,7 +21,7 @@
         ('\xff', '\uf8f3'),
     ]
     for i in range(0xa1, 0xe0):
-        supmaps.append((chr(i), unichr(i+0xfec0)))
+        supmaps.append((chr(i), chr(i+0xfec0)))
 
 
 class TestEUCJPCOMPATMap(test_multibytecodec_support.TestBase_Mapping,
diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py
index 2132b8d..4ae8f60 100644
--- a/Lib/test/test_descr.py
+++ b/Lib/test/test_descr.py
@@ -1122,7 +1122,7 @@
         # this used to leak references
         try:
             class C(object):
-                __slots__ = [unichr(128)]
+                __slots__ = [chr(128)]
         except (TypeError, UnicodeEncodeError):
             pass
         else:
diff --git a/Lib/test/test_multibytecodec.py b/Lib/test/test_multibytecodec.py
index 8ea4bf9..0b2021a 100644
--- a/Lib/test/test_multibytecodec.py
+++ b/Lib/test/test_multibytecodec.py
@@ -210,9 +210,9 @@
 
     def test_bug1572832(self):
         if sys.maxunicode >= 0x10000:
-            myunichr = unichr
+            myunichr = chr
         else:
-            myunichr = lambda x: unichr(0xD7C0+(x>>10)) + unichr(0xDC00+(x&0x3FF))
+            myunichr = lambda x: chr(0xD7C0+(x>>10)) + chr(0xDC00+(x&0x3FF))
 
         for x in xrange(0x10000, 0x110000):
             # Any ISO 2022 codec will cause the segfault
diff --git a/Lib/test/test_multibytecodec_support.py b/Lib/test/test_multibytecodec_support.py
index 6abcdd6..acfb2f1 100644
--- a/Lib/test/test_multibytecodec_support.py
+++ b/Lib/test/test_multibytecodec_support.py
@@ -244,8 +244,8 @@
                 self.assertEqual(ostream.getvalue(), self.tstring[0])
 
 if len('\U00012345') == 2: # ucs2 build
-    _unichr = unichr
-    def unichr(v):
+    _unichr = chr
+    def chr(v):
         if v >= 0x10000:
             return _unichr(0xd800 + ((v - 0x10000) >> 10)) + \
                    _unichr(0xdc00 + ((v - 0x10000) & 0x3ff))
@@ -272,7 +272,7 @@
         return test_support.open_urlresource(self.mapfileurl)
 
     def test_mapping_file(self):
-        unichrs = lambda s: ''.join(map(unichr, map(eval, s.split('+'))))
+        unichrs = lambda s: ''.join(map(chr, map(eval, s.split('+'))))
         urt_wa = {}
 
         for line in self.open_mapping_file():
diff --git a/Lib/test/test_normalization.py b/Lib/test/test_normalization.py
index a48af4d..1331e51 100644
--- a/Lib/test/test_normalization.py
+++ b/Lib/test/test_normalization.py
@@ -28,7 +28,7 @@
     for x in data:
         if x > sys.maxunicode:
             raise RangeError
-    return "".join([unichr(x) for x in data])
+    return "".join([chr(x) for x in data])
 
 class NormalizationTest(unittest.TestCase):
     def test_main(self):
@@ -77,7 +77,7 @@
 
         # Perform tests for all other data
         for c in range(sys.maxunicode+1):
-            X = unichr(c)
+            X = chr(c)
             if X in part1_data:
                 continue
             self.failUnless(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
diff --git a/Lib/test/test_ucn.py b/Lib/test/test_ucn.py
index 4472e90..485e124 100644
--- a/Lib/test/test_ucn.py
+++ b/Lib/test/test_ucn.py
@@ -96,7 +96,7 @@
         import unicodedata
         count = 0
         for code in xrange(0x10000):
-            char = unichr(code)
+            char = chr(code)
             name = unicodedata.name(char, None)
             if name is not None:
                 self.assertEqual(unicodedata.lookup(name), char)
diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py
index 125fd56..3dd92ae 100644
--- a/Lib/test/test_unicode.py
+++ b/Lib/test/test_unicode.py
@@ -90,7 +90,7 @@
                 "\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
                 "\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
                 "\\xfe\\xff'")
-            testrepr = repr(''.join(map(unichr, xrange(256))))
+            testrepr = repr(''.join(map(chr, xrange(256))))
             self.assertEqual(testrepr, latin1repr)
             # Test repr works on wide unicode escapes without overflow.
             self.assertEqual(repr("\U00010000" * 39 + "\uffff" * 4096),
@@ -632,7 +632,7 @@
 
         # Roundtrip safety for BMP (just the first 1024 chars)
         for c in xrange(1024):
-            u = unichr(c)
+            u = chr(c)
             for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le',
                              'utf-16-be', 'raw_unicode_escape',
                              'unicode_escape', 'unicode_internal'):
@@ -640,13 +640,13 @@
 
         # Roundtrip safety for BMP (just the first 256 chars)
         for c in xrange(256):
-            u = unichr(c)
+            u = chr(c)
             for encoding in ('latin-1',):
                 self.assertEqual(str(u.encode(encoding),encoding), u)
 
         # Roundtrip safety for BMP (just the first 128 chars)
         for c in xrange(128):
-            u = unichr(c)
+            u = chr(c)
             for encoding in ('ascii',):
                 self.assertEqual(str(u.encode(encoding),encoding), u)
 
@@ -661,7 +661,7 @@
         # This excludes surrogates: in the full range, there would be
         # a surrogate pair (\udbff\udc00), which gets converted back
         # to a non-BMP character (\U0010fc00)
-        u = ''.join(map(unichr, range(0,0xd800)+range(0xe000,0x10000)))
+        u = ''.join(map(chr, range(0,0xd800)+range(0xe000,0x10000)))
         for encoding in ('utf-8',):
             self.assertEqual(str(u.encode(encoding),encoding), u)
 
diff --git a/Lib/test/test_unicodedata.py b/Lib/test/test_unicodedata.py
index 227aa5a..dc4be19 100644
--- a/Lib/test/test_unicodedata.py
+++ b/Lib/test/test_unicodedata.py
@@ -21,7 +21,7 @@
     def test_method_checksum(self):
         h = hashlib.sha1()
         for i in range(65536):
-            char = unichr(i)
+            char = chr(i)
             data = [
                 # Predicates (single char)
                 "01"[char.isalnum()],
@@ -82,7 +82,7 @@
         h = hashlib.sha1()
 
         for i in range(0x10000):
-            char = unichr(i)
+            char = chr(i)
             data = [
                 # Properties
                 str(self.db.digit(char, -1)),
@@ -194,7 +194,7 @@
         # its numeric value should be the same.
         count = 0
         for i in xrange(0x10000):
-            c = unichr(i)
+            c = chr(i)
             dec = self.db.decimal(c, -1)
             if dec != -1:
                 self.assertEqual(dec, self.db.numeric(c))
@@ -207,7 +207,7 @@
         # its numeric value should be the same.
         count = 0
         for i in xrange(0x10000):
-            c = unichr(i)
+            c = chr(i)
             dec = self.db.digit(c, -1)
             if dec != -1:
                 self.assertEqual(dec, self.db.numeric(c))