Rip out all the u"..." literals and calls to unicode().
diff --git a/Lib/test/bad_coding2.py b/Lib/test/bad_coding2.py
index ea901b5..bb2bb7e 100644
--- a/Lib/test/bad_coding2.py
+++ b/Lib/test/bad_coding2.py
@@ -1,2 +1,2 @@
 #coding: utf8
-print '我'
+print('我')
diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py
index 4691e13..4cd9073 100644
--- a/Lib/test/pickletester.py
+++ b/Lib/test/pickletester.py
@@ -484,8 +484,8 @@
 
     if have_unicode:
         def test_unicode(self):
-            endcases = [unicode(''), unicode('<\\u>'), unicode('<\\\u1234>'),
-                        unicode('<\n>'),  unicode('<\\>')]
+            endcases = [str(''), str('<\\u>'), str('<\\\u1234>'),
+                        str('<\n>'),  str('<\\>')]
             for proto in protocols:
                 for u in endcases:
                     p = self.dumps(u, proto)
@@ -908,8 +908,8 @@
 class MyStr(str):
     sample = "hello"
 
-class MyUnicode(unicode):
-    sample = u"hello \u1234"
+class MyUnicode(str):
+    sample = "hello \u1234"
 
 class MyTuple(tuple):
     sample = (1, 2, 3)
diff --git a/Lib/test/string_tests.py b/Lib/test/string_tests.py
index 2431262..116bcf0 100644
--- a/Lib/test/string_tests.py
+++ b/Lib/test/string_tests.py
@@ -589,7 +589,7 @@
         self.checkequal(['a']*19 + ['a '], aaa, 'split', None, 19)
 
         # mixed use of str and unicode
-        self.checkequal([u'a', u'b', u'c d'], 'a b c d', 'split', u' ', 2)
+        self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', ' ', 2)
 
     def test_additional_rsplit(self):
         self.checkequal(['this', 'is', 'the', 'rsplit', 'function'],
@@ -622,7 +622,7 @@
         self.checkequal([' a  a'] + ['a']*18, aaa, 'rsplit', None, 18)
 
         # mixed use of str and unicode
-        self.checkequal([u'a b', u'c', u'd'], 'a b c d', 'rsplit', u' ', 2)
+        self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', ' ', 2)
 
     def test_strip(self):
         self.checkequal('hello', '   hello   ', 'strip')
@@ -644,14 +644,14 @@
 
         # strip/lstrip/rstrip with unicode arg
         if test_support.have_unicode:
-            self.checkequal(unicode('hello', 'ascii'), 'xyzzyhelloxyzzy',
-                 'strip', unicode('xyz', 'ascii'))
-            self.checkequal(unicode('helloxyzzy', 'ascii'), 'xyzzyhelloxyzzy',
-                 'lstrip', unicode('xyz', 'ascii'))
-            self.checkequal(unicode('xyzzyhello', 'ascii'), 'xyzzyhelloxyzzy',
-                 'rstrip', unicode('xyz', 'ascii'))
-            self.checkequal(unicode('hello', 'ascii'), 'hello',
-                 'strip', unicode('xyz', 'ascii'))
+            self.checkequal(str('hello', 'ascii'), 'xyzzyhelloxyzzy',
+                 'strip', str('xyz', 'ascii'))
+            self.checkequal(str('helloxyzzy', 'ascii'), 'xyzzyhelloxyzzy',
+                 'lstrip', str('xyz', 'ascii'))
+            self.checkequal(str('xyzzyhello', 'ascii'), 'xyzzyhelloxyzzy',
+                 'rstrip', str('xyz', 'ascii'))
+            self.checkequal(str('hello', 'ascii'), 'hello',
+                 'strip', str('xyz', 'ascii'))
 
         self.checkraises(TypeError, 'hello', 'strip', 42, 42)
         self.checkraises(TypeError, 'hello', 'lstrip', 42, 42)
@@ -908,13 +908,13 @@
         self.checkequal(False, '', '__contains__', 'asdf')    # vereq('asdf' in '', False)
 
     def test_subscript(self):
-        self.checkequal(u'a', 'abc', '__getitem__', 0)
-        self.checkequal(u'c', 'abc', '__getitem__', -1)
-        self.checkequal(u'a', 'abc', '__getitem__', 0)
-        self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 3))
-        self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 1000))
-        self.checkequal(u'a', 'abc', '__getitem__', slice(0, 1))
-        self.checkequal(u'', 'abc', '__getitem__', slice(0, 0))
+        self.checkequal('a', 'abc', '__getitem__', 0)
+        self.checkequal('c', 'abc', '__getitem__', -1)
+        self.checkequal('a', 'abc', '__getitem__', 0)
+        self.checkequal('abc', 'abc', '__getitem__', slice(0, 3))
+        self.checkequal('abc', 'abc', '__getitem__', slice(0, 1000))
+        self.checkequal('a', 'abc', '__getitem__', slice(0, 1))
+        self.checkequal('', 'abc', '__getitem__', slice(0, 0))
         # FIXME What about negative indices? This is handled differently by [] and __getitem__(slice)
 
         self.checkraises(TypeError, 'abc', '__getitem__', 'def')
@@ -957,11 +957,11 @@
         self.checkequal('abc', 'a', 'join', ('abc',))
         self.checkequal('z', 'a', 'join', UserList(['z']))
         if test_support.have_unicode:
-            self.checkequal(unicode('a.b.c'), unicode('.'), 'join', ['a', 'b', 'c'])
-            self.checkequal(unicode('a.b.c'), '.', 'join', [unicode('a'), 'b', 'c'])
-            self.checkequal(unicode('a.b.c'), '.', 'join', ['a', unicode('b'), 'c'])
-            self.checkequal(unicode('a.b.c'), '.', 'join', ['a', 'b', unicode('c')])
-            self.checkraises(TypeError, '.', 'join', ['a', unicode('b'), 3])
+            self.checkequal(str('a.b.c'), str('.'), 'join', ['a', 'b', 'c'])
+            self.checkequal(str('a.b.c'), '.', 'join', [str('a'), 'b', 'c'])
+            self.checkequal(str('a.b.c'), '.', 'join', ['a', str('b'), 'c'])
+            self.checkequal(str('a.b.c'), '.', 'join', ['a', 'b', str('c')])
+            self.checkraises(TypeError, '.', 'join', ['a', str('b'), 3])
         for i in [5, 25, 125]:
             self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
                  ['a' * i] * i)
@@ -1159,7 +1159,7 @@
         self.assert_(s1 is s2)
 
         # Should also test mixed-type join.
-        if t is unicode:
+        if t is str:
             s1 = subclass("abcd")
             s2 = "".join([s1])
             self.assert_(s1 is not s2)
@@ -1171,14 +1171,14 @@
 
         elif t is str:
             s1 = subclass("abcd")
-            s2 = u"".join([s1])
+            s2 = "".join([s1])
             self.assert_(s1 is not s2)
-            self.assert_(type(s2) is unicode) # promotes!
+            self.assert_(type(s2) is str) # promotes!
 
             s1 = t("abcd")
-            s2 = u"".join([s1])
+            s2 = "".join([s1])
             self.assert_(s1 is not s2)
-            self.assert_(type(s2) is unicode) # promotes!
+            self.assert_(type(s2) is str) # promotes!
 
         else:
             self.fail("unexpected type for MixinStrUnicodeTest %r" % t)
diff --git a/Lib/test/test_StringIO.py b/Lib/test/test_StringIO.py
index 83cd76c..9c3a83f 100644
--- a/Lib/test/test_StringIO.py
+++ b/Lib/test/test_StringIO.py
@@ -112,10 +112,10 @@
         f = self.MODULE.StringIO()
         f.write(self._line[:6])
         f.seek(3)
-        f.write(unicode(self._line[20:26]))
-        f.write(unicode(self._line[52]))
+        f.write(str(self._line[20:26]))
+        f.write(str(self._line[52]))
         s = f.getvalue()
-        self.assertEqual(s, unicode('abcuvwxyz!'))
+        self.assertEqual(s, str('abcuvwxyz!'))
         self.assertEqual(type(s), types.UnicodeType)
 
 class TestcStringIO(TestGenericStringIO):
@@ -130,18 +130,18 @@
         # Check that this works.
 
         f = self.MODULE.StringIO()
-        f.write(unicode(self._line[:5]))
+        f.write(str(self._line[:5]))
         s = f.getvalue()
         self.assertEqual(s, 'abcde')
         self.assertEqual(type(s), types.StringType)
 
-        f = self.MODULE.StringIO(unicode(self._line[:5]))
+        f = self.MODULE.StringIO(str(self._line[:5]))
         s = f.getvalue()
         self.assertEqual(s, 'abcde')
         self.assertEqual(type(s), types.StringType)
 
         self.assertRaises(UnicodeEncodeError, self.MODULE.StringIO,
-                          unicode('\xf4', 'latin-1'))
+                          str('\xf4', 'latin-1'))
 
 import sys
 if sys.platform.startswith('java'):
diff --git a/Lib/test/test_array.py b/Lib/test/test_array.py
index 34b573f..b845570 100755
--- a/Lib/test/test_array.py
+++ b/Lib/test/test_array.py
@@ -747,7 +747,7 @@
 
     def test_nounicode(self):
         a = array.array(self.typecode, self.example)
-        self.assertRaises(ValueError, a.fromunicode, unicode(''))
+        self.assertRaises(ValueError, a.fromunicode, str(''))
         self.assertRaises(ValueError, a.tounicode)
 
 tests.append(CharacterTest)
@@ -755,27 +755,27 @@
 if test_support.have_unicode:
     class UnicodeTest(StringTest):
         typecode = 'u'
-        example = unicode(r'\x01\u263a\x00\ufeff', 'unicode-escape')
-        smallerexample = unicode(r'\x01\u263a\x00\ufefe', 'unicode-escape')
-        biggerexample = unicode(r'\x01\u263a\x01\ufeff', 'unicode-escape')
-        outside = unicode('\x33')
+        example = str(r'\x01\u263a\x00\ufeff', 'unicode-escape')
+        smallerexample = str(r'\x01\u263a\x00\ufefe', 'unicode-escape')
+        biggerexample = str(r'\x01\u263a\x01\ufeff', 'unicode-escape')
+        outside = str('\x33')
         minitemsize = 2
 
         def test_unicode(self):
-            self.assertRaises(TypeError, array.array, 'b', unicode('foo', 'ascii'))
+            self.assertRaises(TypeError, array.array, 'b', str('foo', 'ascii'))
 
-            a = array.array('u', unicode(r'\xa0\xc2\u1234', 'unicode-escape'))
-            a.fromunicode(unicode(' ', 'ascii'))
-            a.fromunicode(unicode('', 'ascii'))
-            a.fromunicode(unicode('', 'ascii'))
-            a.fromunicode(unicode(r'\x11abc\xff\u1234', 'unicode-escape'))
+            a = array.array('u', str(r'\xa0\xc2\u1234', 'unicode-escape'))
+            a.fromunicode(str(' ', 'ascii'))
+            a.fromunicode(str('', 'ascii'))
+            a.fromunicode(str('', 'ascii'))
+            a.fromunicode(str(r'\x11abc\xff\u1234', 'unicode-escape'))
             s = a.tounicode()
             self.assertEqual(
                 s,
-                unicode(r'\xa0\xc2\u1234 \x11abc\xff\u1234', 'unicode-escape')
+                str(r'\xa0\xc2\u1234 \x11abc\xff\u1234', 'unicode-escape')
             )
 
-            s = unicode(r'\x00="\'a\\b\x80\xff\u0000\u0001\u1234', 'unicode-escape')
+            s = str(r'\x00="\'a\\b\x80\xff\u0000\u0001\u1234', 'unicode-escape')
             a = array.array('u', s)
             self.assertEqual(
                 repr(a),
diff --git a/Lib/test/test_bigmem.py b/Lib/test/test_bigmem.py
index 5d2d844..0e30b1b 100644
--- a/Lib/test/test_bigmem.py
+++ b/Lib/test/test_bigmem.py
@@ -562,11 +562,11 @@
 
     @bigmemtest(minsize=_2G + 2, memuse=16)
     def test_compare(self, size):
-        t1 = (u'',) * size
-        t2 = (u'',) * size
+        t1 = ('',) * size
+        t2 = ('',) * size
         self.failUnless(t1 == t2)
         del t2
-        t2 = (u'',) * (size + 1)
+        t2 = ('',) * (size + 1)
         self.failIf(t1 == t2)
         del t2
         t2 = (1,) * size
@@ -667,11 +667,11 @@
 
     @bigmemtest(minsize=_2G + 2, memuse=16)
     def test_compare(self, size):
-        l1 = [u''] * size
-        l2 = [u''] * size
+        l1 = [''] * size
+        l2 = [''] * size
         self.failUnless(l1 == l2)
         del l2
-        l2 = [u''] * (size + 1)
+        l2 = [''] * (size + 1)
         self.failIf(l1 == l2)
         del l2
         l2 = [2] * size
@@ -896,27 +896,27 @@
 
     @bigmemtest(minsize=_2G // 5 + 4, memuse=8 * 5)
     def test_pop(self, size):
-        l = [u"a", u"b", u"c", u"d", u"e"] * size
+        l = ["a", "b", "c", "d", "e"] * size
         size *= 5
         self.assertEquals(len(l), size)
 
         item = l.pop()
         size -= 1
         self.assertEquals(len(l), size)
-        self.assertEquals(item, u"e")
-        self.assertEquals(l[-2:], [u"c", u"d"])
+        self.assertEquals(item, "e")
+        self.assertEquals(l[-2:], ["c", "d"])
 
         item = l.pop(0)
         size -= 1
         self.assertEquals(len(l), size)
-        self.assertEquals(item, u"a")
-        self.assertEquals(l[:2], [u"b", u"c"])
+        self.assertEquals(item, "a")
+        self.assertEquals(l[:2], ["b", "c"])
 
         item = l.pop(size - 2)
         size -= 1
         self.assertEquals(len(l), size)
-        self.assertEquals(item, u"c")
-        self.assertEquals(l[-2:], [u"b", u"d"])
+        self.assertEquals(item, "c")
+        self.assertEquals(l[-2:], ["b", "d"])
 
     @bigmemtest(minsize=_2G + 10, memuse=8)
     def test_remove(self, size):
diff --git a/Lib/test/test_binascii.py b/Lib/test/test_binascii.py
index ea8be31..f16258e 100755
--- a/Lib/test/test_binascii.py
+++ b/Lib/test/test_binascii.py
@@ -124,7 +124,7 @@
 
         # Verify the treatment of Unicode strings
         if test_support.have_unicode:
-            self.assertEqual(binascii.hexlify(unicode('a', 'ascii')), '61')
+            self.assertEqual(binascii.hexlify(str('a', 'ascii')), '61')
 
     def test_qp(self):
         # A test for SF bug 534347 (segfaults without the proper fix)
diff --git a/Lib/test/test_bool.py b/Lib/test/test_bool.py
index dd04b27..14709f7 100644
--- a/Lib/test/test_bool.py
+++ b/Lib/test/test_bool.py
@@ -208,28 +208,28 @@
         self.assertIs("xyz".startswith("z"), False)
 
         if test_support.have_unicode:
-            self.assertIs(unicode("xyz", 'ascii').endswith(unicode("z", 'ascii')), True)
-            self.assertIs(unicode("xyz", 'ascii').endswith(unicode("x", 'ascii')), False)
-            self.assertIs(unicode("xyz0123", 'ascii').isalnum(), True)
-            self.assertIs(unicode("@#$%", 'ascii').isalnum(), False)
-            self.assertIs(unicode("xyz", 'ascii').isalpha(), True)
-            self.assertIs(unicode("@#$%", 'ascii').isalpha(), False)
-            self.assertIs(unicode("0123", 'ascii').isdecimal(), True)
-            self.assertIs(unicode("xyz", 'ascii').isdecimal(), False)
-            self.assertIs(unicode("0123", 'ascii').isdigit(), True)
-            self.assertIs(unicode("xyz", 'ascii').isdigit(), False)
-            self.assertIs(unicode("xyz", 'ascii').islower(), True)
-            self.assertIs(unicode("XYZ", 'ascii').islower(), False)
-            self.assertIs(unicode("0123", 'ascii').isnumeric(), True)
-            self.assertIs(unicode("xyz", 'ascii').isnumeric(), False)
-            self.assertIs(unicode(" ", 'ascii').isspace(), True)
-            self.assertIs(unicode("XYZ", 'ascii').isspace(), False)
-            self.assertIs(unicode("X", 'ascii').istitle(), True)
-            self.assertIs(unicode("x", 'ascii').istitle(), False)
-            self.assertIs(unicode("XYZ", 'ascii').isupper(), True)
-            self.assertIs(unicode("xyz", 'ascii').isupper(), False)
-            self.assertIs(unicode("xyz", 'ascii').startswith(unicode("x", 'ascii')), True)
-            self.assertIs(unicode("xyz", 'ascii').startswith(unicode("z", 'ascii')), False)
+            self.assertIs(str("xyz", 'ascii').endswith(str("z", 'ascii')), True)
+            self.assertIs(str("xyz", 'ascii').endswith(str("x", 'ascii')), False)
+            self.assertIs(str("xyz0123", 'ascii').isalnum(), True)
+            self.assertIs(str("@#$%", 'ascii').isalnum(), False)
+            self.assertIs(str("xyz", 'ascii').isalpha(), True)
+            self.assertIs(str("@#$%", 'ascii').isalpha(), False)
+            self.assertIs(str("0123", 'ascii').isdecimal(), True)
+            self.assertIs(str("xyz", 'ascii').isdecimal(), False)
+            self.assertIs(str("0123", 'ascii').isdigit(), True)
+            self.assertIs(str("xyz", 'ascii').isdigit(), False)
+            self.assertIs(str("xyz", 'ascii').islower(), True)
+            self.assertIs(str("XYZ", 'ascii').islower(), False)
+            self.assertIs(str("0123", 'ascii').isnumeric(), True)
+            self.assertIs(str("xyz", 'ascii').isnumeric(), False)
+            self.assertIs(str(" ", 'ascii').isspace(), True)
+            self.assertIs(str("XYZ", 'ascii').isspace(), False)
+            self.assertIs(str("X", 'ascii').istitle(), True)
+            self.assertIs(str("x", 'ascii').istitle(), False)
+            self.assertIs(str("XYZ", 'ascii').isupper(), True)
+            self.assertIs(str("xyz", 'ascii').isupper(), False)
+            self.assertIs(str("xyz", 'ascii').startswith(str("x", 'ascii')), True)
+            self.assertIs(str("xyz", 'ascii').startswith(str("z", 'ascii')), False)
 
     def test_boolean(self):
         self.assertEqual(True & 1, 1)
diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py
index acb0d45..b7fba5c 100644
--- a/Lib/test/test_builtin.py
+++ b/Lib/test/test_builtin.py
@@ -74,22 +74,22 @@
 ]
 if have_unicode:
     L += [
-        (unicode('0'), 0),
-        (unicode('1'), 1),
-        (unicode('9'), 9),
-        (unicode('10'), 10),
-        (unicode('99'), 99),
-        (unicode('100'), 100),
-        (unicode('314'), 314),
-        (unicode(' 314'), 314),
-        (unicode(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
-        (unicode('  \t\t  314  \t\t  '), 314),
-        (unicode('  1x'), ValueError),
-        (unicode('  1  '), 1),
-        (unicode('  1\02  '), ValueError),
-        (unicode(''), ValueError),
-        (unicode(' '), ValueError),
-        (unicode('  \t\t  '), ValueError),
+        (str('0'), 0),
+        (str('1'), 1),
+        (str('9'), 9),
+        (str('10'), 10),
+        (str('99'), 99),
+        (str('100'), 100),
+        (str('314'), 314),
+        (str(' 314'), 314),
+        (str(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
+        (str('  \t\t  314  \t\t  '), 314),
+        (str('  1x'), ValueError),
+        (str('  1  '), 1),
+        (str('  1\02  '), ValueError),
+        (str(''), ValueError),
+        (str(' '), ValueError),
+        (str('  \t\t  '), ValueError),
         (unichr(0x200), ValueError),
 ]
 
@@ -220,9 +220,9 @@
         self.assertRaises(TypeError, compile, 'pass', '?', 'exec',
                           mode='eval', source='0', filename='tmp')
         if have_unicode:
-            compile(unicode(b'print(u"\xc3\xa5")\n', 'utf8'), '', 'exec')
+            compile(str(b'print(u"\xc3\xa5")\n', 'utf8'), '', 'exec')
             self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec')
-            self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad')
+            self.assertRaises(ValueError, compile, str('a = 1'), 'f', 'bad')
 
 
     def test_delattr(self):
@@ -329,19 +329,19 @@
         self.assertEqual(eval('b', globals, locals), 200)
         self.assertEqual(eval('c', globals, locals), 300)
         if have_unicode:
-            self.assertEqual(eval(unicode('1+1')), 2)
-            self.assertEqual(eval(unicode(' 1+1\n')), 2)
+            self.assertEqual(eval(str('1+1')), 2)
+            self.assertEqual(eval(str(' 1+1\n')), 2)
         globals = {'a': 1, 'b': 2}
         locals = {'b': 200, 'c': 300}
         if have_unicode:
-            self.assertEqual(eval(unicode('a'), globals), 1)
-            self.assertEqual(eval(unicode('a'), globals, locals), 1)
-            self.assertEqual(eval(unicode('b'), globals, locals), 200)
-            self.assertEqual(eval(unicode('c'), globals, locals), 300)
+            self.assertEqual(eval(str('a'), globals), 1)
+            self.assertEqual(eval(str('a'), globals, locals), 1)
+            self.assertEqual(eval(str('b'), globals, locals), 200)
+            self.assertEqual(eval(str('c'), globals, locals), 300)
             bom = '\xef\xbb\xbf'
             self.assertEqual(eval((bom + 'a').encode("latin-1"), globals, locals), 1)
-            self.assertEqual(eval(unicode(b'u"\xc3\xa5"', 'utf8'), globals),
-                             unicode(b'\xc3\xa5', 'utf8'))
+            self.assertEqual(eval(str(b'u"\xc3\xa5"', 'utf8'), globals),
+                             str(b'\xc3\xa5', 'utf8'))
         self.assertRaises(TypeError, eval)
         self.assertRaises(TypeError, eval, ())
 
@@ -472,7 +472,7 @@
             del g['__builtins__']
         self.assertEqual(g, {'z': 1})
 
-        exec(u'z = 1+1', g)
+        exec('z = 1+1', g)
         if '__builtins__' in g:
             del g['__builtins__']
         self.assertEqual(g, {'z': 2})
@@ -539,28 +539,28 @@
 
         if have_unicode:
             # test bltinmodule.c::filterunicode()
-            self.assertEqual(filter(None, unicode("12")), unicode("12"))
-            self.assertEqual(filter(lambda x: x>="3", unicode("1234")), unicode("34"))
-            self.assertRaises(TypeError, filter, 42, unicode("12"))
-            self.assertRaises(ValueError, filter, lambda x: x >="3", badstr(unicode("1234")))
+            self.assertEqual(filter(None, str("12")), str("12"))
+            self.assertEqual(filter(lambda x: x>="3", str("1234")), str("34"))
+            self.assertRaises(TypeError, filter, 42, str("12"))
+            self.assertRaises(ValueError, filter, lambda x: x >="3", badstr(str("1234")))
 
-            class badunicode(unicode):
+            class badunicode(str):
                 def __getitem__(self, index):
                     return 42
             self.assertRaises(TypeError, filter, lambda x: x >=42, badunicode("1234"))
 
-            class weirdunicode(unicode):
+            class weirdunicode(str):
                 def __getitem__(self, index):
-                    return weirdunicode(2*unicode.__getitem__(self, index))
+                    return weirdunicode(2*str.__getitem__(self, index))
             self.assertEqual(
-                filter(lambda x: x>=unicode("33"), weirdunicode("1234")), unicode("3344"))
+                filter(lambda x: x>=str("33"), weirdunicode("1234")), str("3344"))
 
-            class shiftunicode(unicode):
+            class shiftunicode(str):
                 def __getitem__(self, index):
-                    return unichr(ord(unicode.__getitem__(self, index))+1)
+                    return unichr(ord(str.__getitem__(self, index))+1)
             self.assertEqual(
-                filter(lambda x: x>=unicode("3"), shiftunicode("1234")),
-                unicode("345")
+                filter(lambda x: x>=str("3"), shiftunicode("1234")),
+                str("345")
             )
 
     def test_filter_subclasses(self):
@@ -578,12 +578,12 @@
             str2:   {"": "", "123": "112233"}
         }
         if have_unicode:
-            class unicode2(unicode):
+            class unicode2(str):
                 def __getitem__(self, index):
-                    return 2*unicode.__getitem__(self, index)
+                    return 2*str.__getitem__(self, index)
             inputs[unicode2] = {
-                unicode(): unicode(),
-                unicode("123"): unicode("112233")
+                str(): str(),
+                str("123"): str("112233")
             }
 
         for (cls, inps) in inputs.items():
@@ -607,10 +607,10 @@
         self.assertRaises(ValueError, float, "  0x3.1  ")
         self.assertRaises(ValueError, float, "  -0x3.p-1  ")
         if have_unicode:
-            self.assertEqual(float(unicode("  3.14  ")), 3.14)
-            self.assertEqual(float(unicode(b"  \u0663.\u0661\u0664  ",'raw-unicode-escape')), 3.14)
+            self.assertEqual(float(str("  3.14  ")), 3.14)
+            self.assertEqual(float(str(b"  \u0663.\u0661\u0664  ",'raw-unicode-escape')), 3.14)
             # Implementation limitation in PyFloat_FromString()
-            self.assertRaises(ValueError, float, unicode("1"*10000))
+            self.assertRaises(ValueError, float, str("1"*10000))
 
     @run_with_locale('LC_NUMERIC', 'fr_FR', 'de_DE')
     def test_float_with_comma(self):
@@ -692,7 +692,7 @@
         self.assertEqual(hash(1), hash(1.0))
         hash('spam')
         if have_unicode:
-            self.assertEqual(hash('spam'), hash(unicode('spam')))
+            self.assertEqual(hash('spam'), hash(str('spam')))
         hash((0,1,2,3))
         def f(): pass
         self.assertRaises(TypeError, hash, [])
@@ -743,7 +743,7 @@
         # Different base:
         self.assertEqual(int("10",16), 16)
         if have_unicode:
-            self.assertEqual(int(unicode("10"),16), 16)
+            self.assertEqual(int(str("10"),16), 16)
         # Test conversion from strings and various anomalies
         for s, v in L:
             for sign in "", "+", "-":
@@ -913,7 +913,7 @@
         self.assertRaises(TypeError, iter, 42, 42)
         lists = [("1", "2"), ["1", "2"], "12"]
         if have_unicode:
-            lists.append(unicode("12"))
+            lists.append(str("12"))
         for l in lists:
             i = iter(l)
             self.assertEqual(next(i), '1')
@@ -1012,11 +1012,11 @@
         self.assertEqual(int(-3.5), -3)
         self.assertEqual(int("-3"), -3)
         if have_unicode:
-            self.assertEqual(int(unicode("-3")), -3)
+            self.assertEqual(int(str("-3")), -3)
         # Different base:
         self.assertEqual(int("10",16), 16)
         if have_unicode:
-            self.assertEqual(int(unicode("10"),16), 16)
+            self.assertEqual(int(str("10"),16), 16)
         # Check conversions from string (same test set as for int(), and then some)
         LL = [
                 ('1' + '0'*20, 10**20),
@@ -1025,8 +1025,8 @@
         L2 = L[:]
         if have_unicode:
             L2 += [
-                (unicode('1') + unicode('0')*20, 10**20),
-                (unicode('1') + unicode('0')*100, 10**100),
+                (str('1') + str('0')*20, 10**20),
+                (str('1') + str('0')*100, 10**100),
         ]
         for s, v in L2 + LL:
             for sign in "", "+", "-":
@@ -1390,7 +1390,7 @@
             self.assertEqual(ord(unichr(sys.maxunicode)), sys.maxunicode)
         self.assertRaises(TypeError, ord, 42)
         if have_unicode:
-            self.assertRaises(TypeError, ord, unicode("12"))
+            self.assertRaises(TypeError, ord, str("12"))
 
     def test_pow(self):
         self.assertEqual(pow(0,0), 1)
@@ -1668,12 +1668,12 @@
 
     def test_unichr(self):
         if have_unicode:
-            self.assertEqual(unichr(32), unicode(' '))
-            self.assertEqual(unichr(65), unicode('A'))
-            self.assertEqual(unichr(97), unicode('a'))
+            self.assertEqual(unichr(32), str(' '))
+            self.assertEqual(unichr(65), str('A'))
+            self.assertEqual(unichr(97), str('a'))
             self.assertEqual(
                 unichr(sys.maxunicode),
-                unicode(('\\U%08x' % (sys.maxunicode)).encode("ascii"), 'unicode-escape')
+                str(('\\U%08x' % (sys.maxunicode)).encode("ascii"), 'unicode-escape')
             )
             self.assertRaises(ValueError, unichr, sys.maxunicode+1)
             self.assertRaises(TypeError, unichr)
@@ -1767,14 +1767,14 @@
         s = 'abracadabra'
         types = [list, tuple]
         if have_unicode:
-            types.insert(0, unicode)
+            types.insert(0, str)
         for T in types:
             self.assertEqual(sorted(s), sorted(T(s)))
 
         s = ''.join(dict.fromkeys(s).keys())  # unique letters only
         types = [set, frozenset, list, tuple, dict.fromkeys]
         if have_unicode:
-            types.insert(0, unicode)
+            types.insert(0, str)
         for T in types:
             self.assertEqual(sorted(s), sorted(T(s)))
 
diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py
index e0bb21e..7178c06 100644
--- a/Lib/test/test_bytes.py
+++ b/Lib/test/test_bytes.py
@@ -132,10 +132,10 @@
 
         # But they should never compare equal to Unicode!
         # Test this for all expected byte orders and Unicode character sizes
-        self.assertEqual(b"\0a\0b\0c" == u"abc", False)
-        self.assertEqual(b"\0\0\0a\0\0\0b\0\0\0c" == u"abc", False)
-        self.assertEqual(b"a\0b\0c\0" == u"abc", False)
-        self.assertEqual(b"a\0\0\0b\0\0\0c\0\0\0" == u"abc", False)
+        self.assertEqual(b"\0a\0b\0c" == "abc", False)
+        self.assertEqual(b"\0\0\0a\0\0\0b\0\0\0c" == "abc", False)
+        self.assertEqual(b"a\0b\0c\0" == "abc", False)
+        self.assertEqual(b"a\0\0\0b\0\0\0c\0\0\0" == "abc", False)
 
     def test_nohash(self):
         self.assertRaises(TypeError, hash, bytes())
@@ -323,7 +323,7 @@
         self.assertEqual(b, bytes(list(range(8)) + list(range(256))))
 
     def test_encoding(self):
-        sample = u"Hello world\n\u1234\u5678\u9abc\udef0"
+        sample = "Hello world\n\u1234\u5678\u9abc\udef0"
         for enc in ("utf8", "utf16"):
             b = bytes(sample, enc)
             self.assertEqual(b, bytes(map(ord, sample.encode(enc))))
@@ -332,11 +332,11 @@
         self.assertEqual(b, bytes(sample[:-4]))
 
     def test_decode(self):
-        sample = u"Hello world\n\u1234\u5678\u9abc\def0\def0"
+        sample = "Hello world\n\u1234\u5678\u9abc\def0\def0"
         for enc in ("utf8", "utf16"):
             b = bytes(sample, enc)
             self.assertEqual(b.decode(enc), sample)
-        sample = u"Hello world\n\x80\x81\xfe\xff"
+        sample = "Hello world\n\x80\x81\xfe\xff"
         b = bytes(sample, "latin1")
         self.assertRaises(UnicodeDecodeError, b.decode, "utf8")
         self.assertEqual(b.decode("utf8", "ignore"), "Hello world\n")
@@ -366,8 +366,8 @@
         self.assertEqual(b1 + b2, bytes("abcdef"))
         self.assertEqual(b1 + "def", bytes("abcdef"))
         self.assertEqual("def" + b1, bytes("defabc"))
-        self.assertRaises(TypeError, lambda: b1 + u"def")
-        self.assertRaises(TypeError, lambda: u"abc" + b2)
+        self.assertRaises(TypeError, lambda: b1 + "def")
+        self.assertRaises(TypeError, lambda: "abc" + b2)
 
     def test_repeat(self):
         b = bytes("abc")
@@ -391,7 +391,7 @@
         b += "xyz"
         self.assertEqual(b, b"abcdefxyz")
         try:
-            b += u""
+            b += ""
         except TypeError:
             pass
         else:
@@ -476,10 +476,10 @@
 
     def test_literal(self):
         tests =  [
-            (b"Wonderful spam", u"Wonderful spam"),
-            (br"Wonderful spam too", u"Wonderful spam too"),
-            (b"\xaa\x00\000\200", u"\xaa\x00\000\200"),
-            (br"\xaa\x00\000\200", ur"\xaa\x00\000\200"),
+            (b"Wonderful spam", "Wonderful spam"),
+            (br"Wonderful spam too", "Wonderful spam too"),
+            (b"\xaa\x00\000\200", "\xaa\x00\000\200"),
+            (br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
         ]
         for b, s in tests:
             self.assertEqual(b, bytes(s, 'latin-1'))
diff --git a/Lib/test/test_cfgparser.py b/Lib/test/test_cfgparser.py
index 85dfa32..360998e 100644
--- a/Lib/test/test_cfgparser.py
+++ b/Lib/test/test_cfgparser.py
@@ -248,12 +248,12 @@
         cf.set("sect", "option2", "splat")
         cf.set("sect", "option2", mystr("splat"))
         try:
-            unicode
+            str
         except NameError:
             pass
         else:
-            cf.set("sect", "option1", unicode("splat"))
-            cf.set("sect", "option2", unicode("splat"))
+            cf.set("sect", "option1", str("splat"))
+            cf.set("sect", "option2", str("splat"))
 
     def test_read_returns_file_list(self):
         file1 = test_support.findfile("cfgparser.1")
diff --git a/Lib/test/test_charmapcodec.py b/Lib/test/test_charmapcodec.py
index 2866984..d5981be 100644
--- a/Lib/test/test_charmapcodec.py
+++ b/Lib/test/test_charmapcodec.py
@@ -27,27 +27,27 @@
 
 class CharmapCodecTest(unittest.TestCase):
     def test_constructorx(self):
-        self.assertEquals(unicode('abc', codecname), u'abc')
-        self.assertEquals(unicode('xdef', codecname), u'abcdef')
-        self.assertEquals(unicode('defx', codecname), u'defabc')
-        self.assertEquals(unicode('dxf', codecname), u'dabcf')
-        self.assertEquals(unicode('dxfx', codecname), u'dabcfabc')
+        self.assertEquals(str('abc', codecname), 'abc')
+        self.assertEquals(str('xdef', codecname), 'abcdef')
+        self.assertEquals(str('defx', codecname), 'defabc')
+        self.assertEquals(str('dxf', codecname), 'dabcf')
+        self.assertEquals(str('dxfx', codecname), 'dabcfabc')
 
     def test_encodex(self):
-        self.assertEquals(u'abc'.encode(codecname), 'abc')
-        self.assertEquals(u'xdef'.encode(codecname), 'abcdef')
-        self.assertEquals(u'defx'.encode(codecname), 'defabc')
-        self.assertEquals(u'dxf'.encode(codecname), 'dabcf')
-        self.assertEquals(u'dxfx'.encode(codecname), 'dabcfabc')
+        self.assertEquals('abc'.encode(codecname), 'abc')
+        self.assertEquals('xdef'.encode(codecname), 'abcdef')
+        self.assertEquals('defx'.encode(codecname), 'defabc')
+        self.assertEquals('dxf'.encode(codecname), 'dabcf')
+        self.assertEquals('dxfx'.encode(codecname), 'dabcfabc')
 
     def test_constructory(self):
-        self.assertEquals(unicode('ydef', codecname), u'def')
-        self.assertEquals(unicode('defy', codecname), u'def')
-        self.assertEquals(unicode('dyf', codecname), u'df')
-        self.assertEquals(unicode('dyfy', codecname), u'df')
+        self.assertEquals(str('ydef', codecname), 'def')
+        self.assertEquals(str('defy', codecname), 'def')
+        self.assertEquals(str('dyf', codecname), 'df')
+        self.assertEquals(str('dyfy', codecname), 'df')
 
     def test_maptoundefined(self):
-        self.assertRaises(UnicodeError, unicode, 'abc\001', codecname)
+        self.assertRaises(UnicodeError, str, 'abc\001', codecname)
 
 def test_main():
     test.test_support.run_unittest(CharmapCodecTest)
diff --git a/Lib/test/test_codeccallbacks.py b/Lib/test/test_codeccallbacks.py
index 159c86d..b934073 100644
--- a/Lib/test/test_codeccallbacks.py
+++ b/Lib/test/test_codeccallbacks.py
@@ -16,18 +16,18 @@
         # otherwise we'd get an endless loop
         if realpos <= exc.start:
             self.pos = len(exc.object)
-        return (u"<?>", oldpos)
+        return ("<?>", oldpos)
 
 # A UnicodeEncodeError object with a bad start attribute
 class BadStartUnicodeEncodeError(UnicodeEncodeError):
     def __init__(self):
-        UnicodeEncodeError.__init__(self, "ascii", u"", 0, 1, "bad")
+        UnicodeEncodeError.__init__(self, "ascii", "", 0, 1, "bad")
         self.start = []
 
 # A UnicodeEncodeError object with a bad object attribute
 class BadObjectUnicodeEncodeError(UnicodeEncodeError):
     def __init__(self):
-        UnicodeEncodeError.__init__(self, "ascii", u"", 0, 1, "bad")
+        UnicodeEncodeError.__init__(self, "ascii", "", 0, 1, "bad")
         self.object = []
 
 # A UnicodeDecodeError object without an end attribute
@@ -45,19 +45,19 @@
 # A UnicodeTranslateError object without a start attribute
 class NoStartUnicodeTranslateError(UnicodeTranslateError):
     def __init__(self):
-        UnicodeTranslateError.__init__(self, u"", 0, 1, "bad")
+        UnicodeTranslateError.__init__(self, "", 0, 1, "bad")
         del self.start
 
 # A UnicodeTranslateError object without an end attribute
 class NoEndUnicodeTranslateError(UnicodeTranslateError):
     def __init__(self):
-        UnicodeTranslateError.__init__(self,  u"", 0, 1, "bad")
+        UnicodeTranslateError.__init__(self,  "", 0, 1, "bad")
         del self.end
 
 # A UnicodeTranslateError object without an object attribute
 class NoObjectUnicodeTranslateError(UnicodeTranslateError):
     def __init__(self):
-        UnicodeTranslateError.__init__(self, u"", 0, 1, "bad")
+        UnicodeTranslateError.__init__(self, "", 0, 1, "bad")
         del self.object
 
 class CodecCallbackTest(unittest.TestCase):
@@ -66,7 +66,7 @@
         # replace unencodable characters which numeric character entities.
         # For ascii, latin-1 and charmaps this is completely implemented
         # in C and should be reasonably fast.
-        s = u"\u30b9\u30d1\u30e2 \xe4nd eggs"
+        s = "\u30b9\u30d1\u30e2 \xe4nd eggs"
         self.assertEqual(
             s.encode("ascii", "xmlcharrefreplace"),
             "&#12473;&#12497;&#12514; &#228;nd eggs"
@@ -86,15 +86,15 @@
             l = []
             for c in exc.object[exc.start:exc.end]:
                 try:
-                    l.append(u"&%s;" % htmlentitydefs.codepoint2name[ord(c)])
+                    l.append("&%s;" % htmlentitydefs.codepoint2name[ord(c)])
                 except KeyError:
-                    l.append(u"&#%d;" % ord(c))
-            return (u"".join(l), exc.end)
+                    l.append("&#%d;" % ord(c))
+            return ("".join(l), exc.end)
 
         codecs.register_error(
             "test.xmlcharnamereplace", xmlcharnamereplace)
 
-        sin = u"\xab\u211c\xbb = \u2329\u1234\u20ac\u232a"
+        sin = "\xab\u211c\xbb = \u2329\u1234\u20ac\u232a"
         sout = "&laquo;&real;&raquo; = &lang;&#4660;&euro;&rang;"
         self.assertEqual(sin.encode("ascii", "test.xmlcharnamereplace"), sout)
         sout = "\xab&real;\xbb = &lang;&#4660;&euro;&rang;"
@@ -116,13 +116,13 @@
                 raise TypeError("don't know how to handle %r" % exc)
             l = []
             for c in exc.object[exc.start:exc.end]:
-                l.append(unicodedata.name(c, u"0x%x" % ord(c)))
-            return (u"\033[1m%s\033[0m" % u", ".join(l), exc.end)
+                l.append(unicodedata.name(c, "0x%x" % ord(c)))
+            return ("\033[1m%s\033[0m" % ", ".join(l), exc.end)
 
         codecs.register_error(
             "test.uninamereplace", uninamereplace)
 
-        sin = u"\xac\u1234\u20ac\u8000"
+        sin = "\xac\u1234\u20ac\u8000"
         sout = "\033[1mNOT SIGN, ETHIOPIC SYLLABLE SEE, EURO SIGN, CJK UNIFIED IDEOGRAPH-8000\033[0m"
         self.assertEqual(sin.encode("ascii", "test.uninamereplace"), sout)
 
@@ -135,7 +135,7 @@
     def test_backslashescape(self):
         # Does the same as the "unicode-escape" encoding, but with different
         # base encodings.
-        sin = u"a\xac\u1234\u20ac\u8000"
+        sin = "a\xac\u1234\u20ac\u8000"
         if sys.maxunicode > 0xffff:
             sin += unichr(sys.maxunicode)
         sout = "a\\xac\\u1234\\u20ac\\u8000"
@@ -163,7 +163,7 @@
             if not isinstance(exc, UnicodeDecodeError):
                 raise TypeError("don't know how to handle %r" % exc)
             if exc.object[exc.start:exc.end].startswith("\xc0\x80"):
-                return (u"\x00", exc.start+2) # retry after two bytes
+                return ("\x00", exc.start+2) # retry after two bytes
             else:
                 raise exc
 
@@ -171,7 +171,7 @@
             "test.relaxedutf8", relaxedutf8)
 
         sin = "a\x00b\xc0\x80c\xc3\xbc\xc0\x80\xc0\x80"
-        sout = u"a\x00b\x00c\xfc\x00\x00"
+        sout = "a\x00b\x00c\xfc\x00\x00"
         self.assertEqual(sin.decode("utf-8", "test.relaxedutf8"), sout)
         sin = "\xc0\x80\xc0\x81"
         self.assertRaises(UnicodeError, sin.decode, "utf-8", "test.relaxedutf8")
@@ -182,22 +182,22 @@
         # to be able to use e.g. the "replace" handler, the
         # charmap has to have a mapping for "?".
         charmap = dict([ (ord(c), 2*c.upper()) for c in "abcdefgh"])
-        sin = u"abc"
+        sin = "abc"
         sout = "AABBCC"
         self.assertEquals(codecs.charmap_encode(sin, "strict", charmap)[0], sout)
 
-        sin = u"abcA"
+        sin = "abcA"
         self.assertRaises(UnicodeError, codecs.charmap_encode, sin, "strict", charmap)
 
         charmap[ord("?")] = "XYZ"
-        sin = u"abcDEF"
+        sin = "abcDEF"
         sout = "AABBCCXYZXYZXYZ"
         self.assertEquals(codecs.charmap_encode(sin, "replace", charmap)[0], sout)
 
-        charmap[ord("?")] = u"XYZ"
+        charmap[ord("?")] = "XYZ"
         self.assertRaises(TypeError, codecs.charmap_encode, sin, "replace", charmap)
 
-        charmap[ord("?")] = u"XYZ"
+        charmap[ord("?")] = "XYZ"
         self.assertRaises(TypeError, codecs.charmap_encode, sin, "replace", charmap)
 
     def test_decodeunicodeinternal(self):
@@ -210,23 +210,23 @@
             def handler_unicodeinternal(exc):
                 if not isinstance(exc, UnicodeDecodeError):
                     raise TypeError("don't know how to handle %r" % exc)
-                return (u"\x01", 1)
+                return ("\x01", 1)
 
             self.assertEqual(
                 "\x00\x00\x00\x00\x00".decode("unicode-internal", "ignore"),
-                u"\u0000"
+                "\u0000"
             )
 
             self.assertEqual(
                 "\x00\x00\x00\x00\x00".decode("unicode-internal", "replace"),
-                u"\u0000\ufffd"
+                "\u0000\ufffd"
             )
 
             codecs.register_error("test.hui", handler_unicodeinternal)
 
             self.assertEqual(
                 "\x00\x00\x00\x00\x00".decode("unicode-internal", "test.hui"),
-                u"\u0000\u0001\u0000"
+                "\u0000\u0001\u0000"
             )
 
     def test_callbacks(self):
@@ -234,16 +234,16 @@
             if not isinstance(exc, UnicodeEncodeError) \
                and not isinstance(exc, UnicodeDecodeError):
                 raise TypeError("don't know how to handle %r" % exc)
-            l = [u"<%d>" % ord(exc.object[pos]) for pos in xrange(exc.start, exc.end)]
-            return (u"[%s]" % u"".join(l), exc.end)
+            l = ["<%d>" % ord(exc.object[pos]) for pos in xrange(exc.start, exc.end)]
+            return ("[%s]" % "".join(l), exc.end)
 
         codecs.register_error("test.handler1", handler1)
 
         def handler2(exc):
             if not isinstance(exc, UnicodeDecodeError):
                 raise TypeError("don't know how to handle %r" % exc)
-            l = [u"<%d>" % ord(exc.object[pos]) for pos in xrange(exc.start, exc.end)]
-            return (u"[%s]" % u"".join(l), exc.end+1) # skip one character
+            l = ["<%d>" % ord(exc.object[pos]) for pos in xrange(exc.start, exc.end)]
+            return ("[%s]" % "".join(l), exc.end+1) # skip one character
 
         codecs.register_error("test.handler2", handler2)
 
@@ -251,36 +251,36 @@
 
         self.assertEqual(
             s.decode("ascii", "test.handler1"),
-            u"\x00[<129>]\x7f[<128>][<255>]"
+            "\x00[<129>]\x7f[<128>][<255>]"
         )
         self.assertEqual(
             s.decode("ascii", "test.handler2"),
-            u"\x00[<129>][<128>]"
+            "\x00[<129>][<128>]"
         )
 
         self.assertEqual(
             "\\u3042\u3xxx".decode("unicode-escape", "test.handler1"),
-            u"\u3042[<92><117><51><120>]xx"
+            "\u3042[<92><117><51><120>]xx"
         )
 
         self.assertEqual(
             "\\u3042\u3xx".decode("unicode-escape", "test.handler1"),
-            u"\u3042[<92><117><51><120><120>]"
+            "\u3042[<92><117><51><120><120>]"
         )
 
         self.assertEqual(
-            codecs.charmap_decode("abc", "test.handler1", {ord("a"): u"z"})[0],
-            u"z[<98>][<99>]"
+            codecs.charmap_decode("abc", "test.handler1", {ord("a"): "z"})[0],
+            "z[<98>][<99>]"
         )
 
         self.assertEqual(
-            u"g\xfc\xdfrk".encode("ascii", "test.handler1"),
-            u"g[<252><223>]rk"
+            "g\xfc\xdfrk".encode("ascii", "test.handler1"),
+            "g[<252><223>]rk"
         )
 
         self.assertEqual(
-            u"g\xfc\xdf".encode("ascii", "test.handler1"),
-            u"g[<252><223>]"
+            "g\xfc\xdf".encode("ascii", "test.handler1"),
+            "g[<252><223>]"
         )
 
     def test_longstrings(self):
@@ -292,7 +292,7 @@
             codecs.register_error("test." + err, codecs.lookup_error(err))
         l = 1000
         errors += [ "test." + err for err in errors ]
-        for uni in [ s*l for s in (u"x", u"\u3042", u"a\xe4") ]:
+        for uni in [ s*l for s in ("x", "\u3042", "a\xe4") ]:
             for enc in ("ascii", "latin-1", "iso-8859-1", "iso-8859-15", "utf-8", "utf-7", "utf-16"):
                 for err in errors:
                     try:
@@ -307,7 +307,7 @@
         # check with one argument too much
         self.assertRaises(TypeError, exctype, *(args + ["too much"]))
         # check with one argument of the wrong type
-        wrongargs = [ "spam", u"eggs", 42, 1.0, None ]
+        wrongargs = [ "spam", "eggs", 42, 1.0, None ]
         for i in xrange(len(args)):
             for wrongarg in wrongargs:
                 if type(wrongarg) is type(args[i]):
@@ -328,33 +328,33 @@
     def test_unicodeencodeerror(self):
         self.check_exceptionobjectargs(
             UnicodeEncodeError,
-            ["ascii", u"g\xfcrk", 1, 2, "ouch"],
+            ["ascii", "g\xfcrk", 1, 2, "ouch"],
             "'ascii' codec can't encode character u'\\xfc' in position 1: ouch"
         )
         self.check_exceptionobjectargs(
             UnicodeEncodeError,
-            ["ascii", u"g\xfcrk", 1, 4, "ouch"],
+            ["ascii", "g\xfcrk", 1, 4, "ouch"],
             "'ascii' codec can't encode characters in position 1-3: ouch"
         )
         self.check_exceptionobjectargs(
             UnicodeEncodeError,
-            ["ascii", u"\xfcx", 0, 1, "ouch"],
+            ["ascii", "\xfcx", 0, 1, "ouch"],
             "'ascii' codec can't encode character u'\\xfc' in position 0: ouch"
         )
         self.check_exceptionobjectargs(
             UnicodeEncodeError,
-            ["ascii", u"\u0100x", 0, 1, "ouch"],
+            ["ascii", "\u0100x", 0, 1, "ouch"],
             "'ascii' codec can't encode character u'\\u0100' in position 0: ouch"
         )
         self.check_exceptionobjectargs(
             UnicodeEncodeError,
-            ["ascii", u"\uffffx", 0, 1, "ouch"],
+            ["ascii", "\uffffx", 0, 1, "ouch"],
             "'ascii' codec can't encode character u'\\uffff' in position 0: ouch"
         )
         if sys.maxunicode > 0xffff:
             self.check_exceptionobjectargs(
                 UnicodeEncodeError,
-                ["ascii", u"\U00010000x", 0, 1, "ouch"],
+                ["ascii", "\U00010000x", 0, 1, "ouch"],
                 "'ascii' codec can't encode character u'\\U00010000' in position 0: ouch"
             )
 
@@ -373,28 +373,28 @@
     def test_unicodetranslateerror(self):
         self.check_exceptionobjectargs(
             UnicodeTranslateError,
-            [u"g\xfcrk", 1, 2, "ouch"],
+            ["g\xfcrk", 1, 2, "ouch"],
             "can't translate character u'\\xfc' in position 1: ouch"
         )
         self.check_exceptionobjectargs(
             UnicodeTranslateError,
-            [u"g\u0100rk", 1, 2, "ouch"],
+            ["g\u0100rk", 1, 2, "ouch"],
             "can't translate character u'\\u0100' in position 1: ouch"
         )
         self.check_exceptionobjectargs(
             UnicodeTranslateError,
-            [u"g\uffffrk", 1, 2, "ouch"],
+            ["g\uffffrk", 1, 2, "ouch"],
             "can't translate character u'\\uffff' in position 1: ouch"
         )
         if sys.maxunicode > 0xffff:
             self.check_exceptionobjectargs(
                 UnicodeTranslateError,
-                [u"g\U00010000rk", 1, 2, "ouch"],
+                ["g\U00010000rk", 1, 2, "ouch"],
                 "can't translate character u'\\U00010000' in position 1: ouch"
             )
         self.check_exceptionobjectargs(
             UnicodeTranslateError,
-            [u"g\xfcrk", 1, 3, "ouch"],
+            ["g\xfcrk", 1, 3, "ouch"],
             "can't translate characters in position 1-2: ouch"
         )
 
@@ -416,7 +416,7 @@
         self.assertRaises(
             UnicodeEncodeError,
             codecs.strict_errors,
-            UnicodeEncodeError("ascii", u"\u3042", 0, 1, "ouch")
+            UnicodeEncodeError("ascii", "\u3042", 0, 1, "ouch")
         )
 
     def test_badandgoodignoreexceptions(self):
@@ -434,16 +434,16 @@
         )
         # If the correct exception is passed in, "ignore" returns an empty replacement
         self.assertEquals(
-            codecs.ignore_errors(UnicodeEncodeError("ascii", u"\u3042", 0, 1, "ouch")),
-            (u"", 1)
+            codecs.ignore_errors(UnicodeEncodeError("ascii", "\u3042", 0, 1, "ouch")),
+            ("", 1)
         )
         self.assertEquals(
             codecs.ignore_errors(UnicodeDecodeError("ascii", "\xff", 0, 1, "ouch")),
-            (u"", 1)
+            ("", 1)
         )
         self.assertEquals(
-            codecs.ignore_errors(UnicodeTranslateError(u"\u3042", 0, 1, "ouch")),
-            (u"", 1)
+            codecs.ignore_errors(UnicodeTranslateError("\u3042", 0, 1, "ouch")),
+            ("", 1)
         )
 
     def test_badandgoodreplaceexceptions(self):
@@ -471,16 +471,16 @@
         )
         # With the correct exception, "replace" returns an "?" or u"\ufffd" replacement
         self.assertEquals(
-            codecs.replace_errors(UnicodeEncodeError("ascii", u"\u3042", 0, 1, "ouch")),
-            (u"?", 1)
+            codecs.replace_errors(UnicodeEncodeError("ascii", "\u3042", 0, 1, "ouch")),
+            ("?", 1)
         )
         self.assertEquals(
             codecs.replace_errors(UnicodeDecodeError("ascii", "\xff", 0, 1, "ouch")),
-            (u"\ufffd", 1)
+            ("\ufffd", 1)
         )
         self.assertEquals(
-            codecs.replace_errors(UnicodeTranslateError(u"\u3042", 0, 1, "ouch")),
-            (u"\ufffd", 1)
+            codecs.replace_errors(UnicodeTranslateError("\u3042", 0, 1, "ouch")),
+            ("\ufffd", 1)
         )
 
     def test_badandgoodxmlcharrefreplaceexceptions(self):
@@ -505,7 +505,7 @@
         self.assertRaises(
             TypeError,
             codecs.xmlcharrefreplace_errors,
-            UnicodeTranslateError(u"\u3042", 0, 1, "ouch")
+            UnicodeTranslateError("\u3042", 0, 1, "ouch")
         )
         # Use the correct exception
         cs = (0, 1, 9, 10, 99, 100, 999, 1000, 9999, 10000, 0x3042)
@@ -514,7 +514,7 @@
             codecs.xmlcharrefreplace_errors(
                 UnicodeEncodeError("ascii", s, 0, len(s), "ouch")
             ),
-            (u"".join(u"&#%d;" % ord(c) for c in s), len(s))
+            ("".join("&#%d;" % ord(c) for c in s), len(s))
         )
 
     def test_badandgoodbackslashreplaceexceptions(self):
@@ -539,41 +539,41 @@
         self.assertRaises(
             TypeError,
             codecs.backslashreplace_errors,
-            UnicodeTranslateError(u"\u3042", 0, 1, "ouch")
+            UnicodeTranslateError("\u3042", 0, 1, "ouch")
         )
         # Use the correct exception
         self.assertEquals(
-            codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\u3042", 0, 1, "ouch")),
-            (u"\\u3042", 1)
+            codecs.backslashreplace_errors(UnicodeEncodeError("ascii", "\u3042", 0, 1, "ouch")),
+            ("\\u3042", 1)
         )
         self.assertEquals(
-            codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\x00", 0, 1, "ouch")),
-            (u"\\x00", 1)
+            codecs.backslashreplace_errors(UnicodeEncodeError("ascii", "\x00", 0, 1, "ouch")),
+            ("\\x00", 1)
         )
         self.assertEquals(
-            codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\xff", 0, 1, "ouch")),
-            (u"\\xff", 1)
+            codecs.backslashreplace_errors(UnicodeEncodeError("ascii", "\xff", 0, 1, "ouch")),
+            ("\\xff", 1)
         )
         self.assertEquals(
-            codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\u0100", 0, 1, "ouch")),
-            (u"\\u0100", 1)
+            codecs.backslashreplace_errors(UnicodeEncodeError("ascii", "\u0100", 0, 1, "ouch")),
+            ("\\u0100", 1)
         )
         self.assertEquals(
-            codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\uffff", 0, 1, "ouch")),
-            (u"\\uffff", 1)
+            codecs.backslashreplace_errors(UnicodeEncodeError("ascii", "\uffff", 0, 1, "ouch")),
+            ("\\uffff", 1)
         )
         if sys.maxunicode>0xffff:
             self.assertEquals(
-                codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\U00010000", 0, 1, "ouch")),
-                (u"\\U00010000", 1)
+                codecs.backslashreplace_errors(UnicodeEncodeError("ascii", "\U00010000", 0, 1, "ouch")),
+                ("\\U00010000", 1)
             )
             self.assertEquals(
-                codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\U0010ffff", 0, 1, "ouch")),
-                (u"\\U0010ffff", 1)
+                codecs.backslashreplace_errors(UnicodeEncodeError("ascii", "\U0010ffff", 0, 1, "ouch")),
+                ("\\U0010ffff", 1)
             )
 
     def test_badhandlerresults(self):
-        results = ( 42, u"foo", (1,2,3), (u"foo", 1, 3), (u"foo", None), (u"foo",), ("foo", 1, 3), ("foo", None), ("foo",) )
+        results = ( 42, "foo", (1,2,3), ("foo", 1, 3), ("foo", None), ("foo",), ("foo", 1, 3), ("foo", None), ("foo",) )
         encs = ("ascii", "latin-1", "iso-8859-1", "iso-8859-15")
 
         for res in results:
@@ -581,7 +581,7 @@
             for enc in encs:
                 self.assertRaises(
                     TypeError,
-                    u"\u3042".encode,
+                    "\u3042".encode,
                     enc,
                     "test.badhandler"
                 )
@@ -614,14 +614,14 @@
     def test_unencodablereplacement(self):
         def unencrepl(exc):
             if isinstance(exc, UnicodeEncodeError):
-                return (u"\u4242", exc.end)
+                return ("\u4242", exc.end)
             else:
                 raise TypeError("don't know how to handle %r" % exc)
         codecs.register_error("test.unencreplhandler", unencrepl)
         for enc in ("ascii", "iso-8859-1", "iso-8859-15"):
             self.assertRaises(
                 UnicodeEncodeError,
-                u"\u4242".encode,
+                "\u4242".encode,
                 enc,
                 "test.unencreplhandler"
             )
@@ -650,7 +650,7 @@
         v = (1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 50000)
         if sys.maxunicode>=100000:
             v += (100000, 500000, 1000000)
-        s = u"".join([unichr(x) for x in v])
+        s = "".join([unichr(x) for x in v])
         codecs.register_error("test.xmlcharrefreplace", codecs.xmlcharrefreplace_errors)
         for enc in ("ascii", "iso-8859-15"):
             for err in ("xmlcharrefreplace", "test.xmlcharrefreplace"):
@@ -673,7 +673,7 @@
         self.assertRaises(TypeError, "\\uyyyy".decode, "raw-unicode-escape", "test.baddecodereturn1")
 
         def baddecodereturn2(exc):
-            return (u"?", None)
+            return ("?", None)
         codecs.register_error("test.baddecodereturn2", baddecodereturn2)
         self.assertRaises(TypeError, "\xff".decode, "ascii", "test.baddecodereturn2")
 
@@ -682,11 +682,11 @@
 
         # Valid negative position
         handler.pos = -1
-        self.assertEquals("\xff0".decode("ascii", "test.posreturn"), u"<?>0")
+        self.assertEquals("\xff0".decode("ascii", "test.posreturn"), "<?>0")
 
         # Valid negative position
         handler.pos = -2
-        self.assertEquals("\xff0".decode("ascii", "test.posreturn"), u"<?><?>")
+        self.assertEquals("\xff0".decode("ascii", "test.posreturn"), "<?><?>")
 
         # Negative position out of bounds
         handler.pos = -3
@@ -694,11 +694,11 @@
 
         # Valid positive position
         handler.pos = 1
-        self.assertEquals("\xff0".decode("ascii", "test.posreturn"), u"<?>0")
+        self.assertEquals("\xff0".decode("ascii", "test.posreturn"), "<?>0")
 
         # Largest valid positive position (one beyond end of input)
         handler.pos = 2
-        self.assertEquals("\xff0".decode("ascii", "test.posreturn"), u"<?>")
+        self.assertEquals("\xff0".decode("ascii", "test.posreturn"), "<?>")
 
         # Invalid positive position
         handler.pos = 3
@@ -706,7 +706,7 @@
 
         # Restart at the "0"
         handler.pos = 6
-        self.assertEquals("\\uyyyy0".decode("raw-unicode-escape", "test.posreturn"), u"<?>0")
+        self.assertEquals("\\uyyyy0".decode("raw-unicode-escape", "test.posreturn"), "<?>0")
 
         class D(dict):
             def __getitem__(self, key):
@@ -719,44 +719,44 @@
         # enhance coverage of:
         # Objects/unicodeobject.c::unicode_encode_call_errorhandler()
         # and callers
-        self.assertRaises(LookupError, u"\xff".encode, "ascii", "test.unknown")
+        self.assertRaises(LookupError, "\xff".encode, "ascii", "test.unknown")
 
         def badencodereturn1(exc):
             return 42
         codecs.register_error("test.badencodereturn1", badencodereturn1)
-        self.assertRaises(TypeError, u"\xff".encode, "ascii", "test.badencodereturn1")
+        self.assertRaises(TypeError, "\xff".encode, "ascii", "test.badencodereturn1")
 
         def badencodereturn2(exc):
-            return (u"?", None)
+            return ("?", None)
         codecs.register_error("test.badencodereturn2", badencodereturn2)
-        self.assertRaises(TypeError, u"\xff".encode, "ascii", "test.badencodereturn2")
+        self.assertRaises(TypeError, "\xff".encode, "ascii", "test.badencodereturn2")
 
         handler = PosReturn()
         codecs.register_error("test.posreturn", handler.handle)
 
         # Valid negative position
         handler.pos = -1
-        self.assertEquals(u"\xff0".encode("ascii", "test.posreturn"), "<?>0")
+        self.assertEquals("\xff0".encode("ascii", "test.posreturn"), "<?>0")
 
         # Valid negative position
         handler.pos = -2
-        self.assertEquals(u"\xff0".encode("ascii", "test.posreturn"), "<?><?>")
+        self.assertEquals("\xff0".encode("ascii", "test.posreturn"), "<?><?>")
 
         # Negative position out of bounds
         handler.pos = -3
-        self.assertRaises(IndexError, u"\xff0".encode, "ascii", "test.posreturn")
+        self.assertRaises(IndexError, "\xff0".encode, "ascii", "test.posreturn")
 
         # Valid positive position
         handler.pos = 1
-        self.assertEquals(u"\xff0".encode("ascii", "test.posreturn"), "<?>0")
+        self.assertEquals("\xff0".encode("ascii", "test.posreturn"), "<?>0")
 
         # Largest valid positive position (one beyond end of input
         handler.pos = 2
-        self.assertEquals(u"\xff0".encode("ascii", "test.posreturn"), "<?>")
+        self.assertEquals("\xff0".encode("ascii", "test.posreturn"), "<?>")
 
         # Invalid positive position
         handler.pos = 3
-        self.assertRaises(IndexError, u"\xff0".encode, "ascii", "test.posreturn")
+        self.assertRaises(IndexError, "\xff0".encode, "ascii", "test.posreturn")
 
         handler.pos = 0
 
@@ -764,9 +764,9 @@
             def __getitem__(self, key):
                 raise ValueError
         for err in ("strict", "replace", "xmlcharrefreplace", "backslashreplace", "test.posreturn"):
-            self.assertRaises(UnicodeError, codecs.charmap_encode, u"\xff", err, {0xff: None})
-            self.assertRaises(ValueError, codecs.charmap_encode, u"\xff", err, D())
-            self.assertRaises(TypeError, codecs.charmap_encode, u"\xff", err, {0xff: 300})
+            self.assertRaises(UnicodeError, codecs.charmap_encode, "\xff", err, {0xff: None})
+            self.assertRaises(ValueError, codecs.charmap_encode, "\xff", err, D())
+            self.assertRaises(TypeError, codecs.charmap_encode, "\xff", err, {0xff: 300})
 
     def test_translatehelper(self):
         # enhance coverage of:
@@ -777,20 +777,20 @@
         class D(dict):
             def __getitem__(self, key):
                 raise ValueError
-        self.assertRaises(ValueError, u"\xff".translate, D())
-        self.assertRaises(TypeError, u"\xff".translate, {0xff: sys.maxunicode+1})
-        self.assertRaises(TypeError, u"\xff".translate, {0xff: ()})
+        self.assertRaises(ValueError, "\xff".translate, D())
+        self.assertRaises(TypeError, "\xff".translate, {0xff: sys.maxunicode+1})
+        self.assertRaises(TypeError, "\xff".translate, {0xff: ()})
 
     def test_bug828737(self):
         charmap = {
-            ord("&"): u"&amp;",
-            ord("<"): u"&lt;",
-            ord(">"): u"&gt;",
-            ord('"'): u"&quot;",
+            ord("&"): "&amp;",
+            ord("<"): "&lt;",
+            ord(">"): "&gt;",
+            ord('"'): "&quot;",
         }
 
         for n in (1, 10, 100, 1000):
-            text = u'abc<def>ghi'*n
+            text = 'abc<def>ghi'*n
             text.translate(charmap)
 
 def test_main():
diff --git a/Lib/test/test_codecencodings_cn.py b/Lib/test/test_codecencodings_cn.py
index 96b0d77..e0e44d8 100644
--- a/Lib/test/test_codecencodings_cn.py
+++ b/Lib/test/test_codecencodings_cn.py
@@ -15,9 +15,9 @@
         # invalid bytes
         ("abc\x81\x81\xc1\xc4", "strict",  None),
         ("abc\xc8", "strict",  None),
-        ("abc\x81\x81\xc1\xc4", "replace", u"abc\ufffd\u804a"),
-        ("abc\x81\x81\xc1\xc4\xc8", "replace", u"abc\ufffd\u804a\ufffd"),
-        ("abc\x81\x81\xc1\xc4", "ignore",  u"abc\u804a"),
+        ("abc\x81\x81\xc1\xc4", "replace", "abc\ufffd\u804a"),
+        ("abc\x81\x81\xc1\xc4\xc8", "replace", "abc\ufffd\u804a\ufffd"),
+        ("abc\x81\x81\xc1\xc4", "ignore",  "abc\u804a"),
         ("\xc1\x64", "strict", None),
     )
 
@@ -28,11 +28,11 @@
         # invalid bytes
         ("abc\x80\x80\xc1\xc4", "strict",  None),
         ("abc\xc8", "strict",  None),
-        ("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u804a"),
-        ("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u804a\ufffd"),
-        ("abc\x80\x80\xc1\xc4", "ignore",  u"abc\u804a"),
+        ("abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\u804a"),
+        ("abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\u804a\ufffd"),
+        ("abc\x80\x80\xc1\xc4", "ignore",  "abc\u804a"),
         ("\x83\x34\x83\x31", "strict", None),
-        (u"\u30fb", "strict", None),
+        ("\u30fb", "strict", None),
     )
 
 class Test_GB18030(test_multibytecodec_support.TestBase, unittest.TestCase):
@@ -42,11 +42,11 @@
         # invalid bytes
         ("abc\x80\x80\xc1\xc4", "strict",  None),
         ("abc\xc8", "strict",  None),
-        ("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u804a"),
-        ("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u804a\ufffd"),
-        ("abc\x80\x80\xc1\xc4", "ignore",  u"abc\u804a"),
-        ("abc\x84\x39\x84\x39\xc1\xc4", "replace", u"abc\ufffd\u804a"),
-        (u"\u30fb", "strict", "\x819\xa79"),
+        ("abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\u804a"),
+        ("abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\u804a\ufffd"),
+        ("abc\x80\x80\xc1\xc4", "ignore",  "abc\u804a"),
+        ("abc\x84\x39\x84\x39\xc1\xc4", "replace", "abc\ufffd\u804a"),
+        ("\u30fb", "strict", "\x819\xa79"),
     )
     has_iso10646 = True
 
diff --git a/Lib/test/test_codecencodings_hk.py b/Lib/test/test_codecencodings_hk.py
index b1c2606..32ade71 100644
--- a/Lib/test/test_codecencodings_hk.py
+++ b/Lib/test/test_codecencodings_hk.py
@@ -15,9 +15,9 @@
         # invalid bytes
         ("abc\x80\x80\xc1\xc4", "strict",  None),
         ("abc\xc8", "strict",  None),
-        ("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u8b10"),
-        ("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u8b10\ufffd"),
-        ("abc\x80\x80\xc1\xc4", "ignore",  u"abc\u8b10"),
+        ("abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\u8b10"),
+        ("abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\u8b10\ufffd"),
+        ("abc\x80\x80\xc1\xc4", "ignore",  "abc\u8b10"),
     )
 
 def test_main():
diff --git a/Lib/test/test_codecencodings_jp.py b/Lib/test/test_codecencodings_jp.py
index 5f81f41..3da093a 100644
--- a/Lib/test/test_codecencodings_jp.py
+++ b/Lib/test/test_codecencodings_jp.py
@@ -15,12 +15,12 @@
         # invalid bytes
         ("abc\x81\x00\x81\x00\x82\x84", "strict",  None),
         ("abc\xf8", "strict",  None),
-        ("abc\x81\x00\x82\x84", "replace", u"abc\ufffd\uff44"),
-        ("abc\x81\x00\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
-        ("abc\x81\x00\x82\x84", "ignore",  u"abc\uff44"),
+        ("abc\x81\x00\x82\x84", "replace", "abc\ufffd\uff44"),
+        ("abc\x81\x00\x82\x84\x88", "replace", "abc\ufffd\uff44\ufffd"),
+        ("abc\x81\x00\x82\x84", "ignore",  "abc\uff44"),
         # sjis vs cp932
-        ("\\\x7e", "replace", u"\\\x7e"),
-        ("\x81\x5f\x81\x61\x81\x7c", "replace", u"\uff3c\u2225\uff0d"),
+        ("\\\x7e", "replace", "\\\x7e"),
+        ("\x81\x5f\x81\x61\x81\x7c", "replace", "\uff3c\u2225\uff0d"),
     )
 
 class Test_EUC_JISX0213(test_multibytecodec_support.TestBase,
@@ -31,25 +31,25 @@
         # invalid bytes
         ("abc\x80\x80\xc1\xc4", "strict",  None),
         ("abc\xc8", "strict",  None),
-        ("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u7956"),
-        ("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u7956\ufffd"),
-        ("abc\x80\x80\xc1\xc4", "ignore",  u"abc\u7956"),
-        ("abc\x8f\x83\x83", "replace", u"abc\ufffd"),
+        ("abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\u7956"),
+        ("abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\u7956\ufffd"),
+        ("abc\x80\x80\xc1\xc4", "ignore",  "abc\u7956"),
+        ("abc\x8f\x83\x83", "replace", "abc\ufffd"),
         ("\xc1\x64", "strict", None),
-        ("\xa1\xc0", "strict", u"\uff3c"),
+        ("\xa1\xc0", "strict", "\uff3c"),
     )
     xmlcharnametest = (
-        u"\xab\u211c\xbb = \u2329\u1234\u232a",
+        "\xab\u211c\xbb = \u2329\u1234\u232a",
         "\xa9\xa8&real;\xa9\xb2 = &lang;&#4660;&rang;"
     )
 
 eucjp_commontests = (
     ("abc\x80\x80\xc1\xc4", "strict",  None),
     ("abc\xc8", "strict",  None),
-    ("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u7956"),
-    ("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u7956\ufffd"),
-    ("abc\x80\x80\xc1\xc4", "ignore",  u"abc\u7956"),
-    ("abc\x8f\x83\x83", "replace", u"abc\ufffd"),
+    ("abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\u7956"),
+    ("abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\u7956\ufffd"),
+    ("abc\x80\x80\xc1\xc4", "ignore",  "abc\u7956"),
+    ("abc\x8f\x83\x83", "replace", "abc\ufffd"),
     ("\xc1\x64", "strict", None),
 )
 
@@ -58,25 +58,25 @@
     encoding = 'euc_jp'
     tstring = test_multibytecodec_support.load_teststring('euc_jp')
     codectests = eucjp_commontests + (
-        ("\xa1\xc0\\", "strict", u"\uff3c\\"),
-        (u"\xa5", "strict", "\x5c"),
-        (u"\u203e", "strict", "\x7e"),
+        ("\xa1\xc0\\", "strict", "\uff3c\\"),
+        ("\xa5", "strict", "\x5c"),
+        ("\u203e", "strict", "\x7e"),
     )
 
 shiftjis_commonenctests = (
     ("abc\x80\x80\x82\x84", "strict",  None),
     ("abc\xf8", "strict",  None),
-    ("abc\x80\x80\x82\x84", "replace", u"abc\ufffd\uff44"),
-    ("abc\x80\x80\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
-    ("abc\x80\x80\x82\x84def", "ignore",  u"abc\uff44def"),
+    ("abc\x80\x80\x82\x84", "replace", "abc\ufffd\uff44"),
+    ("abc\x80\x80\x82\x84\x88", "replace", "abc\ufffd\uff44\ufffd"),
+    ("abc\x80\x80\x82\x84def", "ignore",  "abc\uff44def"),
 )
 
 class Test_SJIS_COMPAT(test_multibytecodec_support.TestBase, unittest.TestCase):
     encoding = 'shift_jis'
     tstring = test_multibytecodec_support.load_teststring('shift_jis')
     codectests = shiftjis_commonenctests + (
-        ("\\\x7e", "strict", u"\\\x7e"),
-        ("\x81\x5f\x81\x61\x81\x7c", "strict", u"\uff3c\u2016\u2212"),
+        ("\\\x7e", "strict", "\\\x7e"),
+        ("\x81\x5f\x81\x61\x81\x7c", "strict", "\uff3c\u2016\u2212"),
     )
 
 class Test_SJISX0213(test_multibytecodec_support.TestBase, unittest.TestCase):
@@ -86,15 +86,15 @@
         # invalid bytes
         ("abc\x80\x80\x82\x84", "strict",  None),
         ("abc\xf8", "strict",  None),
-        ("abc\x80\x80\x82\x84", "replace", u"abc\ufffd\uff44"),
-        ("abc\x80\x80\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
-        ("abc\x80\x80\x82\x84def", "ignore",  u"abc\uff44def"),
+        ("abc\x80\x80\x82\x84", "replace", "abc\ufffd\uff44"),
+        ("abc\x80\x80\x82\x84\x88", "replace", "abc\ufffd\uff44\ufffd"),
+        ("abc\x80\x80\x82\x84def", "ignore",  "abc\uff44def"),
         # sjis vs cp932
-        ("\\\x7e", "replace", u"\xa5\u203e"),
-        ("\x81\x5f\x81\x61\x81\x7c", "replace", u"\x5c\u2016\u2212"),
+        ("\\\x7e", "replace", "\xa5\u203e"),
+        ("\x81\x5f\x81\x61\x81\x7c", "replace", "\x5c\u2016\u2212"),
     )
     xmlcharnametest = (
-        u"\xab\u211c\xbb = \u2329\u1234\u232a",
+        "\xab\u211c\xbb = \u2329\u1234\u232a",
         "\x85G&real;\x85Q = &lang;&#4660;&rang;"
     )
 
diff --git a/Lib/test/test_codecencodings_kr.py b/Lib/test/test_codecencodings_kr.py
index a30eaf9..92c6a80 100644
--- a/Lib/test/test_codecencodings_kr.py
+++ b/Lib/test/test_codecencodings_kr.py
@@ -15,9 +15,9 @@
         # invalid bytes
         ("abc\x80\x80\xc1\xc4", "strict",  None),
         ("abc\xc8", "strict",  None),
-        ("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\uc894"),
-        ("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\uc894\ufffd"),
-        ("abc\x80\x80\xc1\xc4", "ignore",  u"abc\uc894"),
+        ("abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\uc894"),
+        ("abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\uc894\ufffd"),
+        ("abc\x80\x80\xc1\xc4", "ignore",  "abc\uc894"),
     )
 
 class Test_EUCKR(test_multibytecodec_support.TestBase, unittest.TestCase):
@@ -27,9 +27,9 @@
         # invalid bytes
         ("abc\x80\x80\xc1\xc4", "strict",  None),
         ("abc\xc8", "strict",  None),
-        ("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\uc894"),
-        ("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\uc894\ufffd"),
-        ("abc\x80\x80\xc1\xc4", "ignore",  u"abc\uc894"),
+        ("abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\uc894"),
+        ("abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\uc894\ufffd"),
+        ("abc\x80\x80\xc1\xc4", "ignore",  "abc\uc894"),
     )
 
 class Test_JOHAB(test_multibytecodec_support.TestBase, unittest.TestCase):
@@ -39,9 +39,9 @@
         # invalid bytes
         ("abc\x80\x80\xc1\xc4", "strict",  None),
         ("abc\xc8", "strict",  None),
-        ("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\ucd27"),
-        ("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\ucd27\ufffd"),
-        ("abc\x80\x80\xc1\xc4", "ignore",  u"abc\ucd27"),
+        ("abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ucd27"),
+        ("abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ucd27\ufffd"),
+        ("abc\x80\x80\xc1\xc4", "ignore",  "abc\ucd27"),
     )
 
 def test_main():
diff --git a/Lib/test/test_codecencodings_tw.py b/Lib/test/test_codecencodings_tw.py
index 983d06f..054ace0 100644
--- a/Lib/test/test_codecencodings_tw.py
+++ b/Lib/test/test_codecencodings_tw.py
@@ -15,9 +15,9 @@
         # invalid bytes
         ("abc\x80\x80\xc1\xc4", "strict",  None),
         ("abc\xc8", "strict",  None),
-        ("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u8b10"),
-        ("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u8b10\ufffd"),
-        ("abc\x80\x80\xc1\xc4", "ignore",  u"abc\u8b10"),
+        ("abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\u8b10"),
+        ("abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\u8b10\ufffd"),
+        ("abc\x80\x80\xc1\xc4", "ignore",  "abc\u8b10"),
     )
 
 def test_main():
diff --git a/Lib/test/test_codecmaps_jp.py b/Lib/test/test_codecmaps_jp.py
index 5466a98..58abb30 100644
--- a/Lib/test/test_codecmaps_jp.py
+++ b/Lib/test/test_codecmaps_jp.py
@@ -14,11 +14,11 @@
     mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/' \
                  'WINDOWS/CP932.TXT'
     supmaps = [
-        ('\x80', u'\u0080'),
-        ('\xa0', u'\uf8f0'),
-        ('\xfd', u'\uf8f1'),
-        ('\xfe', u'\uf8f2'),
-        ('\xff', u'\uf8f3'),
+        ('\x80', '\u0080'),
+        ('\xa0', '\uf8f0'),
+        ('\xfd', '\uf8f1'),
+        ('\xfe', '\uf8f2'),
+        ('\xff', '\uf8f3'),
     ]
     for i in range(0xa1, 0xe0):
         supmaps.append((chr(i), unichr(i+0xfec0)))
@@ -38,12 +38,12 @@
     mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/OBSOLETE' \
                  '/EASTASIA/JIS/SHIFTJIS.TXT'
     pass_enctest = [
-        ('\x81_', u'\\'),
+        ('\x81_', '\\'),
     ]
     pass_dectest = [
-        ('\\', u'\xa5'),
-        ('~', u'\u203e'),
-        ('\x81_', u'\\'),
+        ('\\', '\xa5'),
+        ('~', '\u203e'),
+        ('\x81_', '\\'),
     ]
 
 class TestEUCJISX0213Map(test_multibytecodec_support.TestBase_Mapping,
diff --git a/Lib/test/test_codecmaps_kr.py b/Lib/test/test_codecmaps_kr.py
index 1b350b9..3f06187 100644
--- a/Lib/test/test_codecmaps_kr.py
+++ b/Lib/test/test_codecmaps_kr.py
@@ -30,8 +30,8 @@
     # but, in early 90s that is the only era used johab widely,
     # the most softwares implements it as REVERSE SOLIDUS.
     # So, we ignore the standard here.
-    pass_enctest = [('\\', u'\u20a9')]
-    pass_dectest = [('\\', u'\u20a9')]
+    pass_enctest = [('\\', '\u20a9')]
+    pass_dectest = [('\\', '\u20a9')]
 
 def test_main():
     test_support.run_unittest(__name__)
diff --git a/Lib/test/test_codecmaps_tw.py b/Lib/test/test_codecmaps_tw.py
index 143ae23..71402c4 100644
--- a/Lib/test/test_codecmaps_tw.py
+++ b/Lib/test/test_codecmaps_tw.py
@@ -20,8 +20,8 @@
     mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/' \
                  'WINDOWS/CP950.TXT'
     pass_enctest = [
-        ('\xa2\xcc', u'\u5341'),
-        ('\xa2\xce', u'\u5345'),
+        ('\xa2\xcc', '\u5341'),
+        ('\xa2\xce', '\u5345'),
     ]
 
 def test_main():
diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py
index f7a9789..991b44a 100644
--- a/Lib/test/test_codecs.py
+++ b/Lib/test/test_codecs.py
@@ -64,41 +64,41 @@
         # entries from partialresults.
         q = Queue()
         r = codecs.getreader(self.encoding)(q)
-        result = u""
+        result = ""
         for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
             q.write(c)
             result += r.read()
             self.assertEqual(result, partialresult)
         # check that there's nothing left in the buffers
-        self.assertEqual(r.read(), u"")
+        self.assertEqual(r.read(), "")
         self.assertEqual(r.bytebuffer, "")
-        self.assertEqual(r.charbuffer, u"")
+        self.assertEqual(r.charbuffer, "")
 
         # do the check again, this time using a incremental decoder
         d = codecs.getincrementaldecoder(self.encoding)()
-        result = u""
+        result = ""
         for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
             result += d.decode(c)
             self.assertEqual(result, partialresult)
         # check that there's nothing left in the buffers
-        self.assertEqual(d.decode("", True), u"")
+        self.assertEqual(d.decode("", True), "")
         self.assertEqual(d.buffer, "")
 
         # Check whether the rest method works properly
         d.reset()
-        result = u""
+        result = ""
         for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
             result += d.decode(c)
             self.assertEqual(result, partialresult)
         # check that there's nothing left in the buffers
-        self.assertEqual(d.decode("", True), u"")
+        self.assertEqual(d.decode("", True), "")
         self.assertEqual(d.buffer, "")
 
         # check iterdecode()
         encoded = input.encode(self.encoding)
         self.assertEqual(
             input,
-            u"".join(codecs.iterdecode(encoded, self.encoding))
+            "".join(codecs.iterdecode(encoded, self.encoding))
         )
 
     def test_readline(self):
@@ -116,9 +116,9 @@
                 lines.append(line)
             return "|".join(lines)
 
-        s = u"foo\nbar\r\nbaz\rspam\u2028eggs"
-        sexpected = u"foo\n|bar\r\n|baz\r|spam\u2028|eggs"
-        sexpectednoends = u"foo|bar|baz|spam|eggs"
+        s = "foo\nbar\r\nbaz\rspam\u2028eggs"
+        sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
+        sexpectednoends = "foo|bar|baz|spam|eggs"
         self.assertEqual(readalllines(s, True), sexpected)
         self.assertEqual(readalllines(s, False), sexpectednoends)
         self.assertEqual(readalllines(s, True, 10), sexpected)
@@ -127,28 +127,28 @@
         # Test long lines (multiple calls to read() in readline())
         vw = []
         vwo = []
-        for (i, lineend) in enumerate(u"\n \r\n \r \u2028".split()):
-            vw.append((i*200)*u"\3042" + lineend)
-            vwo.append((i*200)*u"\3042")
+        for (i, lineend) in enumerate("\n \r\n \r \u2028".split()):
+            vw.append((i*200)*"\3042" + lineend)
+            vwo.append((i*200)*"\3042")
         self.assertEqual(readalllines("".join(vw), True), "".join(vw))
         self.assertEqual(readalllines("".join(vw), False),"".join(vwo))
 
         # Test lines where the first read might end with \r, so the
         # reader has to look ahead whether this is a lone \r or a \r\n
         for size in xrange(80):
-            for lineend in u"\n \r\n \r \u2028".split():
-                s = 10*(size*u"a" + lineend + u"xxx\n")
+            for lineend in "\n \r\n \r \u2028".split():
+                s = 10*(size*"a" + lineend + "xxx\n")
                 reader = getreader(s)
                 for i in xrange(10):
                     self.assertEqual(
                         reader.readline(keepends=True),
-                        size*u"a" + lineend,
+                        size*"a" + lineend,
                     )
                 reader = getreader(s)
                 for i in xrange(10):
                     self.assertEqual(
                         reader.readline(keepends=False),
-                        size*u"a",
+                        size*"a",
                     )
 
     def test_bug1175396(self):
@@ -226,31 +226,31 @@
         reader = codecs.getreader(self.encoding)(q)
 
         # No lineends
-        writer.write(u"foo\r")
-        self.assertEqual(reader.readline(keepends=False), u"foo")
-        writer.write(u"\nbar\r")
-        self.assertEqual(reader.readline(keepends=False), u"")
-        self.assertEqual(reader.readline(keepends=False), u"bar")
-        writer.write(u"baz")
-        self.assertEqual(reader.readline(keepends=False), u"baz")
-        self.assertEqual(reader.readline(keepends=False), u"")
+        writer.write("foo\r")
+        self.assertEqual(reader.readline(keepends=False), "foo")
+        writer.write("\nbar\r")
+        self.assertEqual(reader.readline(keepends=False), "")
+        self.assertEqual(reader.readline(keepends=False), "bar")
+        writer.write("baz")
+        self.assertEqual(reader.readline(keepends=False), "baz")
+        self.assertEqual(reader.readline(keepends=False), "")
 
         # Lineends
-        writer.write(u"foo\r")
-        self.assertEqual(reader.readline(keepends=True), u"foo\r")
-        writer.write(u"\nbar\r")
-        self.assertEqual(reader.readline(keepends=True), u"\n")
-        self.assertEqual(reader.readline(keepends=True), u"bar\r")
-        writer.write(u"baz")
-        self.assertEqual(reader.readline(keepends=True), u"baz")
-        self.assertEqual(reader.readline(keepends=True), u"")
-        writer.write(u"foo\r\n")
-        self.assertEqual(reader.readline(keepends=True), u"foo\r\n")
+        writer.write("foo\r")
+        self.assertEqual(reader.readline(keepends=True), "foo\r")
+        writer.write("\nbar\r")
+        self.assertEqual(reader.readline(keepends=True), "\n")
+        self.assertEqual(reader.readline(keepends=True), "bar\r")
+        writer.write("baz")
+        self.assertEqual(reader.readline(keepends=True), "baz")
+        self.assertEqual(reader.readline(keepends=True), "")
+        writer.write("foo\r\n")
+        self.assertEqual(reader.readline(keepends=True), "foo\r\n")
 
     def test_bug1098990_a(self):
-        s1 = u"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
-        s2 = u"offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
-        s3 = u"next line.\r\n"
+        s1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
+        s2 = "offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
+        s3 = "next line.\r\n"
 
         s = (s1+s2+s3).encode(self.encoding)
         stream = StringIO.StringIO(s)
@@ -258,14 +258,14 @@
         self.assertEqual(reader.readline(), s1)
         self.assertEqual(reader.readline(), s2)
         self.assertEqual(reader.readline(), s3)
-        self.assertEqual(reader.readline(), u"")
+        self.assertEqual(reader.readline(), "")
 
     def test_bug1098990_b(self):
-        s1 = u"aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
-        s2 = u"bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
-        s3 = u"stillokay:bbbbxx\r\n"
-        s4 = u"broken!!!!badbad\r\n"
-        s5 = u"againokay.\r\n"
+        s1 = "aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
+        s2 = "bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
+        s3 = "stillokay:bbbbxx\r\n"
+        s4 = "broken!!!!badbad\r\n"
+        s5 = "againokay.\r\n"
 
         s = (s1+s2+s3+s4+s5).encode(self.encoding)
         stream = StringIO.StringIO(s)
@@ -275,7 +275,7 @@
         self.assertEqual(reader.readline(), s3)
         self.assertEqual(reader.readline(), s4)
         self.assertEqual(reader.readline(), s5)
-        self.assertEqual(reader.readline(), u"")
+        self.assertEqual(reader.readline(), "")
 
 class UTF16Test(ReadTest):
     encoding = "utf-16"
@@ -288,15 +288,15 @@
         # encode some stream
         s = StringIO.StringIO()
         f = writer(s)
-        f.write(u"spam")
-        f.write(u"spam")
+        f.write("spam")
+        f.write("spam")
         d = s.getvalue()
         # check whether there is exactly one BOM in it
         self.assert_(d == self.spamle or d == self.spambe)
         # try to read it back
         s = StringIO.StringIO(d)
         f = reader(s)
-        self.assertEquals(f.read(), u"spamspam")
+        self.assertEquals(f.read(), "spamspam")
 
     def test_badbom(self):
         s = StringIO.StringIO("\xff\xff")
@@ -309,18 +309,18 @@
 
     def test_partial(self):
         self.check_partial(
-            u"\x00\xff\u0100\uffff",
+            "\x00\xff\u0100\uffff",
             [
-                u"", # first byte of BOM read
-                u"", # second byte of BOM read => byteorder known
-                u"",
-                u"\x00",
-                u"\x00",
-                u"\x00\xff",
-                u"\x00\xff",
-                u"\x00\xff\u0100",
-                u"\x00\xff\u0100",
-                u"\x00\xff\u0100\uffff",
+                "", # first byte of BOM read
+                "", # second byte of BOM read => byteorder known
+                "",
+                "\x00",
+                "\x00",
+                "\x00\xff",
+                "\x00\xff",
+                "\x00\xff\u0100",
+                "\x00\xff\u0100",
+                "\x00\xff\u0100\uffff",
             ]
         )
 
@@ -330,25 +330,25 @@
 
     def test_decoder_state(self):
         self.check_state_handling_decode(self.encoding,
-                                         u"spamspam", self.spamle)
+                                         "spamspam", self.spamle)
         self.check_state_handling_decode(self.encoding,
-                                         u"spamspam", self.spambe)
+                                         "spamspam", self.spambe)
 
 class UTF16LETest(ReadTest):
     encoding = "utf-16-le"
 
     def test_partial(self):
         self.check_partial(
-            u"\x00\xff\u0100\uffff",
+            "\x00\xff\u0100\uffff",
             [
-                u"",
-                u"\x00",
-                u"\x00",
-                u"\x00\xff",
-                u"\x00\xff",
-                u"\x00\xff\u0100",
-                u"\x00\xff\u0100",
-                u"\x00\xff\u0100\uffff",
+                "",
+                "\x00",
+                "\x00",
+                "\x00\xff",
+                "\x00\xff",
+                "\x00\xff\u0100",
+                "\x00\xff\u0100",
+                "\x00\xff\u0100\uffff",
             ]
         )
 
@@ -361,16 +361,16 @@
 
     def test_partial(self):
         self.check_partial(
-            u"\x00\xff\u0100\uffff",
+            "\x00\xff\u0100\uffff",
             [
-                u"",
-                u"\x00",
-                u"\x00",
-                u"\x00\xff",
-                u"\x00\xff",
-                u"\x00\xff\u0100",
-                u"\x00\xff\u0100",
-                u"\x00\xff\u0100\uffff",
+                "",
+                "\x00",
+                "\x00",
+                "\x00\xff",
+                "\x00\xff",
+                "\x00\xff\u0100",
+                "\x00\xff\u0100",
+                "\x00\xff\u0100\uffff",
             ]
         )
 
@@ -383,24 +383,24 @@
 
     def test_partial(self):
         self.check_partial(
-            u"\x00\xff\u07ff\u0800\uffff",
+            "\x00\xff\u07ff\u0800\uffff",
             [
-                u"\x00",
-                u"\x00",
-                u"\x00\xff",
-                u"\x00\xff",
-                u"\x00\xff\u07ff",
-                u"\x00\xff\u07ff",
-                u"\x00\xff\u07ff",
-                u"\x00\xff\u07ff\u0800",
-                u"\x00\xff\u07ff\u0800",
-                u"\x00\xff\u07ff\u0800",
-                u"\x00\xff\u07ff\u0800\uffff",
+                "\x00",
+                "\x00",
+                "\x00\xff",
+                "\x00\xff",
+                "\x00\xff\u07ff",
+                "\x00\xff\u07ff",
+                "\x00\xff\u07ff",
+                "\x00\xff\u07ff\u0800",
+                "\x00\xff\u07ff\u0800",
+                "\x00\xff\u07ff\u0800",
+                "\x00\xff\u07ff\u0800\uffff",
             ]
         )
 
     def test_decoder_state(self):
-        u = u"\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
+        u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
         self.check_state_handling_decode(self.encoding,
                                          u, u.encode(self.encoding))
 
@@ -450,39 +450,39 @@
 
     def test_partial(self):
         self.check_partial(
-            u"\ufeff\x00\xff\u07ff\u0800\uffff",
+            "\ufeff\x00\xff\u07ff\u0800\uffff",
             [
-                u"",
-                u"",
-                u"", # First BOM has been read and skipped
-                u"",
-                u"",
-                u"\ufeff", # Second BOM has been read and emitted
-                u"\ufeff\x00", # "\x00" read and emitted
-                u"\ufeff\x00", # First byte of encoded u"\xff" read
-                u"\ufeff\x00\xff", # Second byte of encoded u"\xff" read
-                u"\ufeff\x00\xff", # First byte of encoded u"\u07ff" read
-                u"\ufeff\x00\xff\u07ff", # Second byte of encoded u"\u07ff" read
-                u"\ufeff\x00\xff\u07ff",
-                u"\ufeff\x00\xff\u07ff",
-                u"\ufeff\x00\xff\u07ff\u0800",
-                u"\ufeff\x00\xff\u07ff\u0800",
-                u"\ufeff\x00\xff\u07ff\u0800",
-                u"\ufeff\x00\xff\u07ff\u0800\uffff",
+                "",
+                "",
+                "", # First BOM has been read and skipped
+                "",
+                "",
+                "\ufeff", # Second BOM has been read and emitted
+                "\ufeff\x00", # "\x00" read and emitted
+                "\ufeff\x00", # First byte of encoded u"\xff" read
+                "\ufeff\x00\xff", # Second byte of encoded u"\xff" read
+                "\ufeff\x00\xff", # First byte of encoded u"\u07ff" read
+                "\ufeff\x00\xff\u07ff", # Second byte of encoded u"\u07ff" read
+                "\ufeff\x00\xff\u07ff",
+                "\ufeff\x00\xff\u07ff",
+                "\ufeff\x00\xff\u07ff\u0800",
+                "\ufeff\x00\xff\u07ff\u0800",
+                "\ufeff\x00\xff\u07ff\u0800",
+                "\ufeff\x00\xff\u07ff\u0800\uffff",
             ]
         )
 
     def test_bug1601501(self):
         # SF bug #1601501: check that the codec works with a buffer
-        unicode("\xef\xbb\xbf", "utf-8-sig")
+        str("\xef\xbb\xbf", "utf-8-sig")
 
     def test_bom(self):
         d = codecs.getincrementaldecoder("utf-8-sig")()
-        s = u"spam"
+        s = "spam"
         self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
 
     def test_decoder_state(self):
-        u = u"\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
+        u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
         self.check_state_handling_decode(self.encoding,
                                          u, u.encode(self.encoding))
 
@@ -494,7 +494,7 @@
     def test_recoding(self):
         f = StringIO.StringIO()
         f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
-        f2.write(u"a")
+        f2.write("a")
         f2.close()
         # Python used to crash on this at exit because of a refcount
         # bug in _codecsmodule.c
@@ -502,104 +502,104 @@
 # From RFC 3492
 punycode_testcases = [
     # A Arabic (Egyptian):
-    (u"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
-     u"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
+    ("\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
+     "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
      "egbpdaj6bu4bxfgehfvwxn"),
     # B Chinese (simplified):
-    (u"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
+    ("\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
      "ihqwcrb4cv8a8dqg056pqjye"),
     # C Chinese (traditional):
-    (u"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
+    ("\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
      "ihqwctvzc91f659drss3x8bo0yb"),
     # D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
-    (u"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
-     u"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
-     u"\u0065\u0073\u006B\u0079",
+    ("\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
+     "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
+     "\u0065\u0073\u006B\u0079",
      "Proprostnemluvesky-uyb24dma41a"),
     # E Hebrew:
-    (u"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
-     u"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
-     u"\u05D1\u05E8\u05D9\u05EA",
+    ("\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
+     "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
+     "\u05D1\u05E8\u05D9\u05EA",
      "4dbcagdahymbxekheh6e0a7fei0b"),
     # F Hindi (Devanagari):
-    (u"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
-    u"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
-    u"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
-    u"\u0939\u0948\u0902",
+    ("\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
+    "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
+    "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
+    "\u0939\u0948\u0902",
     "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
 
     #(G) Japanese (kanji and hiragana):
-    (u"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
-    u"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
+    ("\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
+    "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
      "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
 
     # (H) Korean (Hangul syllables):
-    (u"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
-     u"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
-     u"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
+    ("\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
+     "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
+     "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
      "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
      "psd879ccm6fea98c"),
 
     # (I) Russian (Cyrillic):
-    (u"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
-     u"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
-     u"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
-     u"\u0438",
+    ("\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
+     "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
+     "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
+     "\u0438",
      "b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
 
     # (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
-    (u"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
-     u"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
-     u"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
-     u"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
-     u"\u0061\u00F1\u006F\u006C",
+    ("\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
+     "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
+     "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
+     "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
+     "\u0061\u00F1\u006F\u006C",
      "PorqunopuedensimplementehablarenEspaol-fmd56a"),
 
     # (K) Vietnamese:
     #  T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
     #   <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
-    (u"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
-     u"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
-     u"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
-     u"\u0056\u0069\u1EC7\u0074",
+    ("\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
+     "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
+     "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
+     "\u0056\u0069\u1EC7\u0074",
      "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
 
     #(L) 3<nen>B<gumi><kinpachi><sensei>
-    (u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
+    ("\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
      "3B-ww4c5e180e575a65lsy2b"),
 
     # (M) <amuro><namie>-with-SUPER-MONKEYS
-    (u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
-     u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
-     u"\u004F\u004E\u004B\u0045\u0059\u0053",
+    ("\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
+     "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
+     "\u004F\u004E\u004B\u0045\u0059\u0053",
      "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
 
     # (N) Hello-Another-Way-<sorezore><no><basho>
-    (u"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
-     u"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
-     u"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
+    ("\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
+     "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
+     "\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
      "Hello-Another-Way--fc4qua05auwb3674vfr0b"),
 
     # (O) <hitotsu><yane><no><shita>2
-    (u"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
+    ("\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
      "2-u9tlzr9756bt3uc0v"),
 
     # (P) Maji<de>Koi<suru>5<byou><mae>
-    (u"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
-     u"\u308B\u0035\u79D2\u524D",
+    ("\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
+     "\u308B\u0035\u79D2\u524D",
      "MajiKoi5-783gue6qz075azm5e"),
 
      # (Q) <pafii>de<runba>
-    (u"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
+    ("\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
      "de-jg4avhby1noc0d"),
 
     # (R) <sono><supiido><de>
-    (u"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
+    ("\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
      "d9juau41awczczp"),
 
     # (S) -> $1.00 <-
-    (u"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
-     u"\u003C\u002D",
+    ("\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
+     "\u003C\u002D",
      "-> $1.00 <--")
     ]
 
@@ -627,9 +627,9 @@
         # points" above 0x10ffff on UCS-4 builds.
         if sys.maxunicode > 0xffff:
             ok = [
-                ("\x00\x10\xff\xff", u"\U0010ffff"),
-                ("\x00\x00\x01\x01", u"\U00000101"),
-                ("", u""),
+                ("\x00\x10\xff\xff", "\U0010ffff"),
+                ("\x00\x00\x01\x01", "\U00000101"),
+                ("", ""),
             ]
             not_ok = [
                 "\x7f\xff\xff\xff",
@@ -664,10 +664,10 @@
         if sys.maxunicode > 0xffff:
             codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
             decoder = codecs.getdecoder("unicode_internal")
-            ab = u"ab".encode("unicode_internal")
+            ab = "ab".encode("unicode_internal")
             ignored = decoder("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
                 "UnicodeInternalTest")
-            self.assertEquals((u"ab", 12), ignored)
+            self.assertEquals(("ab", 12), ignored)
 
 # From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
 nameprep_tests = [
@@ -831,12 +831,12 @@
                 # Skipped
                 continue
             # The Unicode strings are given in UTF-8
-            orig = unicode(orig, "utf-8")
+            orig = str(orig, "utf-8")
             if prepped is None:
                 # Input contains prohibited characters
                 self.assertRaises(UnicodeError, nameprep, orig)
             else:
-                prepped = unicode(prepped, "utf-8")
+                prepped = str(prepped, "utf-8")
                 try:
                     self.assertEquals(nameprep(orig), prepped)
                 except Exception as e:
@@ -844,97 +844,97 @@
 
 class IDNACodecTest(unittest.TestCase):
     def test_builtin_decode(self):
-        self.assertEquals(unicode("python.org", "idna"), u"python.org")
-        self.assertEquals(unicode("python.org.", "idna"), u"python.org.")
-        self.assertEquals(unicode("xn--pythn-mua.org", "idna"), u"pyth\xf6n.org")
-        self.assertEquals(unicode("xn--pythn-mua.org.", "idna"), u"pyth\xf6n.org.")
+        self.assertEquals(str("python.org", "idna"), "python.org")
+        self.assertEquals(str("python.org.", "idna"), "python.org.")
+        self.assertEquals(str("xn--pythn-mua.org", "idna"), "pyth\xf6n.org")
+        self.assertEquals(str("xn--pythn-mua.org.", "idna"), "pyth\xf6n.org.")
 
     def test_builtin_encode(self):
-        self.assertEquals(u"python.org".encode("idna"), "python.org")
+        self.assertEquals("python.org".encode("idna"), "python.org")
         self.assertEquals("python.org.".encode("idna"), "python.org.")
-        self.assertEquals(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org")
-        self.assertEquals(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.")
+        self.assertEquals("pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org")
+        self.assertEquals("pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.")
 
     def test_stream(self):
         import StringIO
         r = codecs.getreader("idna")(StringIO.StringIO("abc"))
         r.read(3)
-        self.assertEquals(r.read(), u"")
+        self.assertEquals(r.read(), "")
 
     def test_incremental_decode(self):
         self.assertEquals(
             "".join(codecs.iterdecode("python.org", "idna")),
-            u"python.org"
-        )
-        self.assertEquals(
-            "".join(codecs.iterdecode("python.org.", "idna")),
-            u"python.org."
-        )
-        self.assertEquals(
-            "".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
-            u"pyth\xf6n.org."
-        )
-        self.assertEquals(
-            "".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
-            u"pyth\xf6n.org."
-        )
-
-        decoder = codecs.getincrementaldecoder("idna")()
-        self.assertEquals(decoder.decode("xn--xam", ), u"")
-        self.assertEquals(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
-        self.assertEquals(decoder.decode(u"rg"), u"")
-        self.assertEquals(decoder.decode(u"", True), u"org")
-
-        decoder.reset()
-        self.assertEquals(decoder.decode("xn--xam", ), u"")
-        self.assertEquals(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
-        self.assertEquals(decoder.decode("rg."), u"org.")
-        self.assertEquals(decoder.decode("", True), u"")
-
-    def test_incremental_encode(self):
-        self.assertEquals(
-            "".join(codecs.iterencode(u"python.org", "idna")),
             "python.org"
         )
         self.assertEquals(
-            "".join(codecs.iterencode(u"python.org.", "idna")),
+            "".join(codecs.iterdecode("python.org.", "idna")),
             "python.org."
         )
         self.assertEquals(
-            "".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
+            "".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
+            "pyth\xf6n.org."
+        )
+        self.assertEquals(
+            "".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
+            "pyth\xf6n.org."
+        )
+
+        decoder = codecs.getincrementaldecoder("idna")()
+        self.assertEquals(decoder.decode("xn--xam", ), "")
+        self.assertEquals(decoder.decode("ple-9ta.o", ), "\xe4xample.")
+        self.assertEquals(decoder.decode("rg"), "")
+        self.assertEquals(decoder.decode("", True), "org")
+
+        decoder.reset()
+        self.assertEquals(decoder.decode("xn--xam", ), "")
+        self.assertEquals(decoder.decode("ple-9ta.o", ), "\xe4xample.")
+        self.assertEquals(decoder.decode("rg."), "org.")
+        self.assertEquals(decoder.decode("", True), "")
+
+    def test_incremental_encode(self):
+        self.assertEquals(
+            "".join(codecs.iterencode("python.org", "idna")),
+            "python.org"
+        )
+        self.assertEquals(
+            "".join(codecs.iterencode("python.org.", "idna")),
+            "python.org."
+        )
+        self.assertEquals(
+            "".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
             "xn--pythn-mua.org."
         )
         self.assertEquals(
-            "".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
+            "".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
             "xn--pythn-mua.org."
         )
 
         encoder = codecs.getincrementalencoder("idna")()
-        self.assertEquals(encoder.encode(u"\xe4x"), "")
-        self.assertEquals(encoder.encode(u"ample.org"), "xn--xample-9ta.")
-        self.assertEquals(encoder.encode(u"", True), "org")
+        self.assertEquals(encoder.encode("\xe4x"), "")
+        self.assertEquals(encoder.encode("ample.org"), "xn--xample-9ta.")
+        self.assertEquals(encoder.encode("", True), "org")
 
         encoder.reset()
-        self.assertEquals(encoder.encode(u"\xe4x"), "")
-        self.assertEquals(encoder.encode(u"ample.org."), "xn--xample-9ta.org.")
-        self.assertEquals(encoder.encode(u"", True), "")
+        self.assertEquals(encoder.encode("\xe4x"), "")
+        self.assertEquals(encoder.encode("ample.org."), "xn--xample-9ta.org.")
+        self.assertEquals(encoder.encode("", True), "")
 
 class CodecsModuleTest(unittest.TestCase):
 
     def test_decode(self):
         self.assertEquals(codecs.decode('\xe4\xf6\xfc', 'latin-1'),
-                          u'\xe4\xf6\xfc')
+                          '\xe4\xf6\xfc')
         self.assertRaises(TypeError, codecs.decode)
-        self.assertEquals(codecs.decode('abc'), u'abc')
+        self.assertEquals(codecs.decode('abc'), 'abc')
         self.assertRaises(UnicodeDecodeError, codecs.decode, '\xff', 'ascii')
 
     def test_encode(self):
-        self.assertEquals(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'),
+        self.assertEquals(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
                           '\xe4\xf6\xfc')
         self.assertRaises(TypeError, codecs.encode)
         self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
-        self.assertEquals(codecs.encode(u'abc'), 'abc')
-        self.assertRaises(UnicodeEncodeError, codecs.encode, u'\xffff', 'ascii')
+        self.assertEquals(codecs.encode('abc'), 'abc')
+        self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
 
     def test_register(self):
         self.assertRaises(TypeError, codecs.register)
@@ -969,7 +969,7 @@
 
     def test_readlines(self):
         f = self.reader(self.stream)
-        self.assertEquals(f.readlines(), [u'\ud55c\n', u'\uae00'])
+        self.assertEquals(f.readlines(), ['\ud55c\n', '\uae00'])
 
 class EncodedFileTest(unittest.TestCase):
 
@@ -1154,7 +1154,7 @@
 
 class BasicUnicodeTest(unittest.TestCase, MixInCheckStateHandling):
     def test_basics(self):
-        s = u"abc123" # all codecs should be able to encode these
+        s = "abc123" # all codecs should be able to encode these
         for encoding in all_unicode_encodings:
             name = codecs.lookup(encoding).name
             if encoding.endswith("_codec"):
@@ -1178,7 +1178,7 @@
                     encodedresult += q.read()
                 q = Queue()
                 reader = codecs.getreader(encoding)(q)
-                decodedresult = u""
+                decodedresult = ""
                 for c in encodedresult:
                     q.write(c)
                     decodedresult += reader.read()
@@ -1197,9 +1197,9 @@
                     encodedresult = ""
                     for c in s:
                         encodedresult += encoder.encode(c)
-                    encodedresult += encoder.encode(u"", True)
+                    encodedresult += encoder.encode("", True)
                     decoder = codecs.getincrementaldecoder(encoding)()
-                    decodedresult = u""
+                    decodedresult = ""
                     for c in encodedresult:
                         decodedresult += decoder.decode(c)
                     decodedresult += decoder.decode("", True)
@@ -1209,21 +1209,21 @@
                     encodedresult = ""
                     for c in s:
                         encodedresult += cencoder.encode(c)
-                    encodedresult += cencoder.encode(u"", True)
+                    encodedresult += cencoder.encode("", True)
                     cdecoder = _testcapi.codec_incrementaldecoder(encoding)
-                    decodedresult = u""
+                    decodedresult = ""
                     for c in encodedresult:
                         decodedresult += cdecoder.decode(c)
                     decodedresult += cdecoder.decode("", True)
                     self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
 
                     # check iterencode()/iterdecode()
-                    result = u"".join(codecs.iterdecode(codecs.iterencode(s, encoding), encoding))
+                    result = "".join(codecs.iterdecode(codecs.iterencode(s, encoding), encoding))
                     self.assertEqual(result, s, "%r != %r (encoding=%r)" % (result, s, encoding))
 
                     # check iterencode()/iterdecode() with empty string
-                    result = u"".join(codecs.iterdecode(codecs.iterencode(u"", encoding), encoding))
-                    self.assertEqual(result, u"")
+                    result = "".join(codecs.iterdecode(codecs.iterencode("", encoding), encoding))
+                    self.assertEqual(result, "")
 
                 if encoding not in only_strict_mode:
                     # check incremental decoder/encoder with errors argument
@@ -1235,17 +1235,17 @@
                     else:
                         encodedresult = "".join(encoder.encode(c) for c in s)
                         decoder = codecs.getincrementaldecoder(encoding)("ignore")
-                        decodedresult = u"".join(decoder.decode(c) for c in encodedresult)
+                        decodedresult = "".join(decoder.decode(c) for c in encodedresult)
                         self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
 
                         encodedresult = "".join(cencoder.encode(c) for c in s)
                         cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
-                        decodedresult = u"".join(cdecoder.decode(c) for c in encodedresult)
+                        decodedresult = "".join(cdecoder.decode(c) for c in encodedresult)
                         self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
 
     def test_seek(self):
         # all codecs should be able to encode these
-        s = u"%s\n%s\n" % (100*u"abc123", 100*u"def456")
+        s = "%s\n%s\n" % (100*"abc123", 100*"def456")
         for encoding in all_unicode_encodings:
             if encoding == "idna": # FIXME: See SF bug #1163178
                 continue
@@ -1278,7 +1278,7 @@
 
     def test_decoder_state(self):
         # Check that getstate() and setstate() handle the state properly
-        u = u"abc123"
+        u = "abc123"
         for encoding in all_unicode_encodings:
             if encoding not in broken_incremental_coders:
                 self.check_state_handling_decode(encoding, u, u.encode(encoding))
@@ -1296,34 +1296,34 @@
 class CharmapTest(unittest.TestCase):
     def test_decode_with_string_map(self):
         self.assertEquals(
-            codecs.charmap_decode("\x00\x01\x02", "strict", u"abc"),
-            (u"abc", 3)
+            codecs.charmap_decode("\x00\x01\x02", "strict", "abc"),
+            ("abc", 3)
         )
 
         self.assertEquals(
-            codecs.charmap_decode("\x00\x01\x02", "replace", u"ab"),
-            (u"ab\ufffd", 3)
+            codecs.charmap_decode("\x00\x01\x02", "replace", "ab"),
+            ("ab\ufffd", 3)
         )
 
         self.assertEquals(
-            codecs.charmap_decode("\x00\x01\x02", "replace", u"ab\ufffe"),
-            (u"ab\ufffd", 3)
+            codecs.charmap_decode("\x00\x01\x02", "replace", "ab\ufffe"),
+            ("ab\ufffd", 3)
         )
 
         self.assertEquals(
-            codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab"),
-            (u"ab", 3)
+            codecs.charmap_decode("\x00\x01\x02", "ignore", "ab"),
+            ("ab", 3)
         )
 
         self.assertEquals(
-            codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab\ufffe"),
-            (u"ab", 3)
+            codecs.charmap_decode("\x00\x01\x02", "ignore", "ab\ufffe"),
+            ("ab", 3)
         )
 
         allbytes = "".join(chr(i) for i in xrange(256))
         self.assertEquals(
-            codecs.charmap_decode(allbytes, "ignore", u""),
-            (u"", len(allbytes))
+            codecs.charmap_decode(allbytes, "ignore", ""),
+            ("", len(allbytes))
         )
 
 class WithStmtTest(unittest.TestCase):
@@ -1337,7 +1337,7 @@
         info = codecs.lookup("utf-8")
         with codecs.StreamReaderWriter(f, info.streamreader,
                                        info.streamwriter, 'strict') as srw:
-            self.assertEquals(srw.read(), u"\xfc")
+            self.assertEquals(srw.read(), "\xfc")
 
 
 def test_main():
diff --git a/Lib/test/test_compile.py b/Lib/test/test_compile.py
index ae55485..5cfe27c 100644
--- a/Lib/test/test_compile.py
+++ b/Lib/test/test_compile.py
@@ -318,7 +318,7 @@
         self.assertNotEqual(id(f1.__code__), id(f2.__code__))
 
     def test_unicode_encoding(self):
-        code = u"# -*- coding: utf-8 -*-\npass\n"
+        code = "# -*- coding: utf-8 -*-\npass\n"
         self.assertRaises(SyntaxError, compile, code, "tmp", "exec")
 
     def test_subscripts(self):
diff --git a/Lib/test/test_complex.py b/Lib/test/test_complex.py
index 0d034f5..39520f1 100644
--- a/Lib/test/test_complex.py
+++ b/Lib/test/test_complex.py
@@ -227,7 +227,7 @@
 
         self.assertEqual(complex("  3.14+J  "), 3.14+1j)
         if test_support.have_unicode:
-            self.assertEqual(complex(unicode("  3.14+J  ")), 3.14+1j)
+            self.assertEqual(complex(str("  3.14+J  ")), 3.14+1j)
 
         # SF bug 543840:  complex(string) accepts strings with \0
         # Fixed in 2.3.
@@ -251,8 +251,8 @@
         self.assertRaises(ValueError, complex, "1+(2j)")
         self.assertRaises(ValueError, complex, "(1+2j)123")
         if test_support.have_unicode:
-            self.assertRaises(ValueError, complex, unicode("1"*500))
-            self.assertRaises(ValueError, complex, unicode("x"))
+            self.assertRaises(ValueError, complex, str("1"*500))
+            self.assertRaises(ValueError, complex, str("x"))
 
         class EvilExc(Exception):
             pass
diff --git a/Lib/test/test_contains.py b/Lib/test/test_contains.py
index e6f5cf7..c902421 100644
--- a/Lib/test/test_contains.py
+++ b/Lib/test/test_contains.py
@@ -59,31 +59,31 @@
 
     # Test char in Unicode
 
-    check('c' in unicode('abc'), "'c' not in u'abc'")
-    check('d' not in unicode('abc'), "'d' in u'abc'")
+    check('c' in str('abc'), "'c' not in u'abc'")
+    check('d' not in str('abc'), "'d' in u'abc'")
 
-    check('' in unicode(''), "'' not in u''")
-    check(unicode('') in '', "u'' not in ''")
-    check(unicode('') in unicode(''), "u'' not in u''")
-    check('' in unicode('abc'), "'' not in u'abc'")
-    check(unicode('') in 'abc', "u'' not in 'abc'")
-    check(unicode('') in unicode('abc'), "u'' not in u'abc'")
+    check('' in str(''), "'' not in u''")
+    check(str('') in '', "u'' not in ''")
+    check(str('') in str(''), "u'' not in u''")
+    check('' in str('abc'), "'' not in u'abc'")
+    check(str('') in 'abc', "u'' not in 'abc'")
+    check(str('') in str('abc'), "u'' not in u'abc'")
 
     try:
-        None in unicode('abc')
+        None in str('abc')
         check(0, "None in u'abc' did not raise error")
     except TypeError:
         pass
 
     # Test Unicode char in Unicode
 
-    check(unicode('c') in unicode('abc'), "u'c' not in u'abc'")
-    check(unicode('d') not in unicode('abc'), "u'd' in u'abc'")
+    check(str('c') in str('abc'), "u'c' not in u'abc'")
+    check(str('d') not in str('abc'), "u'd' in u'abc'")
 
     # Test Unicode char in string
 
-    check(unicode('c') in 'abc', "u'c' not in 'abc'")
-    check(unicode('d') not in 'abc', "u'd' in 'abc'")
+    check(str('c') in 'abc', "u'c' not in 'abc'")
+    check(str('d') not in 'abc', "u'd' in 'abc'")
 
 # A collection of tests on builtin sequence types
 a = range(10)
diff --git a/Lib/test/test_cookielib.py b/Lib/test/test_cookielib.py
index cb9dffb..a4d0fe2 100644
--- a/Lib/test/test_cookielib.py
+++ b/Lib/test/test_cookielib.py
@@ -570,7 +570,7 @@
             ("/foo\031/bar", "/foo%19/bar"),
             ("/\175foo/bar", "/%7Dfoo/bar"),
             # unicode
-            (u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"),  # UTF-8 encoded
+            ("/foo/bar\uabcd", "/foo/bar%EA%AF%8D"),  # UTF-8 encoded
             ]
         for arg, result in cases:
             self.assertEquals(escape_path(arg), result)
@@ -1540,7 +1540,7 @@
         self.assert_(not cookie)
 
         # unicode URL doesn't raise exception
-        cookie = interact_2965(c, u"http://www.acme.com/\xfc")
+        cookie = interact_2965(c, "http://www.acme.com/\xfc")
 
     def test_mozilla(self):
         # Save / load Mozilla/Netscape cookie file format.
diff --git a/Lib/test/test_copy.py b/Lib/test/test_copy.py
index dbca158..4871007 100644
--- a/Lib/test/test_copy.py
+++ b/Lib/test/test_copy.py
@@ -83,7 +83,7 @@
         def f():
             pass
         tests = [None, 42, 2**100, 3.14, True, False, 1j,
-                 "hello", u"hello\u1234", f.__code__,
+                 "hello", "hello\u1234", f.__code__,
                  NewStyle, xrange(10), Classic, max]
         for x in tests:
             self.assert_(copy.copy(x) is x, repr(x))
@@ -256,7 +256,7 @@
         def f():
             pass
         tests = [None, 42, 2**100, 3.14, True, False, 1j,
-                 "hello", u"hello\u1234", f.__code__,
+                 "hello", "hello\u1234", f.__code__,
                  NewStyle, xrange(10), Classic, max]
         for x in tests:
             self.assert_(copy.deepcopy(x) is x, repr(x))
diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py
index aba1c74..2132b8d 100644
--- a/Lib/test/test_descr.py
+++ b/Lib/test/test_descr.py
@@ -264,7 +264,7 @@
     del junk
 
     # Just make sure these don't blow up!
-    for arg in 2, 2, 2j, 2e0, [2], "2", u"2", (2,), {2:2}, type, test_dir:
+    for arg in 2, 2, 2j, 2e0, [2], "2", "2", (2,), {2:2}, type, test_dir:
         dir(arg)
 
     # Test dir on custom classes. Since these have object as a
@@ -1100,25 +1100,25 @@
 
     # Test unicode slot names
     try:
-        unicode
+        str
     except NameError:
         pass
     else:
         # Test a single unicode string is not expanded as a sequence.
         class C(object):
-            __slots__ = unicode("abc")
+            __slots__ = str("abc")
         c = C()
         c.abc = 5
         vereq(c.abc, 5)
 
         # _unicode_to_string used to modify slots in certain circumstances
-        slots = (unicode("foo"), unicode("bar"))
+        slots = (str("foo"), str("bar"))
         class C(object):
             __slots__ = slots
         x = C()
         x.foo = 5
         vereq(x.foo, 5)
-        veris(type(slots[0]), unicode)
+        veris(type(slots[0]), str)
         # this used to leak references
         try:
             class C(object):
@@ -2301,64 +2301,64 @@
     verify(s.lower().__class__ is str)
     vereq(s.lower(), base)
 
-    class madunicode(unicode):
+    class madunicode(str):
         _rev = None
         def rev(self):
             if self._rev is not None:
                 return self._rev
             L = list(self)
             L.reverse()
-            self._rev = self.__class__(u"".join(L))
+            self._rev = self.__class__("".join(L))
             return self._rev
     u = madunicode("ABCDEF")
-    vereq(u, u"ABCDEF")
-    vereq(u.rev(), madunicode(u"FEDCBA"))
-    vereq(u.rev().rev(), madunicode(u"ABCDEF"))
-    base = u"12345"
+    vereq(u, "ABCDEF")
+    vereq(u.rev(), madunicode("FEDCBA"))
+    vereq(u.rev().rev(), madunicode("ABCDEF"))
+    base = "12345"
     u = madunicode(base)
-    vereq(unicode(u), base)
-    verify(unicode(u).__class__ is unicode)
+    vereq(str(u), base)
+    verify(str(u).__class__ is str)
     vereq(hash(u), hash(base))
     vereq({u: 1}[base], 1)
     vereq({base: 1}[u], 1)
-    verify(u.strip().__class__ is unicode)
+    verify(u.strip().__class__ is str)
     vereq(u.strip(), base)
-    verify(u.lstrip().__class__ is unicode)
+    verify(u.lstrip().__class__ is str)
     vereq(u.lstrip(), base)
-    verify(u.rstrip().__class__ is unicode)
+    verify(u.rstrip().__class__ is str)
     vereq(u.rstrip(), base)
-    verify(u.replace(u"x", u"x").__class__ is unicode)
-    vereq(u.replace(u"x", u"x"), base)
-    verify(u.replace(u"xy", u"xy").__class__ is unicode)
-    vereq(u.replace(u"xy", u"xy"), base)
-    verify(u.center(len(u)).__class__ is unicode)
+    verify(u.replace("x", "x").__class__ is str)
+    vereq(u.replace("x", "x"), base)
+    verify(u.replace("xy", "xy").__class__ is str)
+    vereq(u.replace("xy", "xy"), base)
+    verify(u.center(len(u)).__class__ is str)
     vereq(u.center(len(u)), base)
-    verify(u.ljust(len(u)).__class__ is unicode)
+    verify(u.ljust(len(u)).__class__ is str)
     vereq(u.ljust(len(u)), base)
-    verify(u.rjust(len(u)).__class__ is unicode)
+    verify(u.rjust(len(u)).__class__ is str)
     vereq(u.rjust(len(u)), base)
-    verify(u.lower().__class__ is unicode)
+    verify(u.lower().__class__ is str)
     vereq(u.lower(), base)
-    verify(u.upper().__class__ is unicode)
+    verify(u.upper().__class__ is str)
     vereq(u.upper(), base)
-    verify(u.capitalize().__class__ is unicode)
+    verify(u.capitalize().__class__ is str)
     vereq(u.capitalize(), base)
-    verify(u.title().__class__ is unicode)
+    verify(u.title().__class__ is str)
     vereq(u.title(), base)
-    verify((u + u"").__class__ is unicode)
-    vereq(u + u"", base)
-    verify((u"" + u).__class__ is unicode)
-    vereq(u"" + u, base)
-    verify((u * 0).__class__ is unicode)
-    vereq(u * 0, u"")
-    verify((u * 1).__class__ is unicode)
+    verify((u + "").__class__ is str)
+    vereq(u + "", base)
+    verify(("" + u).__class__ is str)
+    vereq("" + u, base)
+    verify((u * 0).__class__ is str)
+    vereq(u * 0, "")
+    verify((u * 1).__class__ is str)
     vereq(u * 1, base)
-    verify((u * 2).__class__ is unicode)
+    verify((u * 2).__class__ is str)
     vereq(u * 2, base + base)
-    verify(u[:].__class__ is unicode)
+    verify(u[:].__class__ is str)
     vereq(u[:], base)
-    verify(u[0:0].__class__ is unicode)
-    vereq(u[0:0], u"")
+    verify(u[0:0].__class__ is str)
+    vereq(u[0:0], "")
 
     class sublist(list):
         pass
@@ -2437,12 +2437,12 @@
     vereq(int(x=3), 3)
     vereq(complex(imag=42, real=666), complex(666, 42))
     vereq(str(object=500), '500')
-    vereq(unicode(string='abc', errors='strict'), u'abc')
+    vereq(str(string='abc', errors='strict'), 'abc')
     vereq(tuple(sequence=range(3)), (0, 1, 2))
     vereq(list(sequence=(0, 1, 2)), range(3))
     # note: as of Python 2.3, dict() no longer has an "items" keyword arg
 
-    for constructor in (int, float, int, complex, str, unicode,
+    for constructor in (int, float, int, complex, str, str,
                         tuple, list, file):
         try:
             constructor(bogus_keyword_arg=1)
@@ -2719,13 +2719,13 @@
     class H(object):
         __slots__ = ["b", "a"]
     try:
-        unicode
+        str
     except NameError:
         class I(object):
             __slots__ = ["a", "b"]
     else:
         class I(object):
-            __slots__ = [unicode("a"), unicode("b")]
+            __slots__ = [str("a"), str("b")]
     class J(object):
         __slots__ = ["c", "b"]
     class K(object):
@@ -3124,9 +3124,9 @@
 
     # It's not clear that unicode will continue to support the character
     # buffer interface, and this test will fail if that's taken away.
-    class MyUni(unicode):
+    class MyUni(str):
         pass
-    base = u'abc'
+    base = 'abc'
     m = MyUni(base)
     vereq(binascii.b2a_hex(m), binascii.b2a_hex(base))
 
diff --git a/Lib/test/test_doctest2.py b/Lib/test/test_doctest2.py
index a7d548c..eb39ab9 100644
--- a/Lib/test/test_doctest2.py
+++ b/Lib/test/test_doctest2.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-u"""A module to test whether doctest recognizes some 2.2 features,
+"""A module to test whether doctest recognizes some 2.2 features,
 like static and class methods.
 
 >>> print('yup')  # 1
@@ -15,7 +15,7 @@
 from test import test_support
 
 class C(object):
-    u"""Class C.
+    """Class C.
 
     >>> print(C())  # 2
     42
diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py
index 5a22297..9e7d15b 100644
--- a/Lib/test/test_exceptions.py
+++ b/Lib/test/test_exceptions.py
@@ -251,19 +251,19 @@
                  'print_file_and_line' : None, 'msg' : 'msgStr',
                  'filename' : None, 'lineno' : None, 'offset' : None}),
             (UnicodeError, (), {'message' : '', 'args' : (),}),
-            (UnicodeEncodeError, ('ascii', u'a', 0, 1, 'ordinal not in range'),
-                {'message' : '', 'args' : ('ascii', u'a', 0, 1,
+            (UnicodeEncodeError, ('ascii', 'a', 0, 1, 'ordinal not in range'),
+                {'message' : '', 'args' : ('ascii', 'a', 0, 1,
                                            'ordinal not in range'),
-                 'encoding' : 'ascii', 'object' : u'a',
+                 'encoding' : 'ascii', 'object' : 'a',
                  'start' : 0, 'reason' : 'ordinal not in range'}),
             (UnicodeDecodeError, ('ascii', '\xff', 0, 1, 'ordinal not in range'),
                 {'message' : '', 'args' : ('ascii', '\xff', 0, 1,
                                            'ordinal not in range'),
                  'encoding' : 'ascii', 'object' : '\xff',
                  'start' : 0, 'reason' : 'ordinal not in range'}),
-            (UnicodeTranslateError, (u"\u3042", 0, 1, "ouch"),
-                {'message' : '', 'args' : (u'\u3042', 0, 1, 'ouch'),
-                 'object' : u'\u3042', 'reason' : 'ouch',
+            (UnicodeTranslateError, ("\u3042", 0, 1, "ouch"),
+                {'message' : '', 'args' : ('\u3042', 0, 1, 'ouch'),
+                 'object' : '\u3042', 'reason' : 'ouch',
                  'start' : 0, 'end' : 1}),
         ]
         try:
@@ -334,9 +334,9 @@
         # Make sure both instances and classes have a str and unicode
         # representation.
         self.failUnless(str(Exception))
-        self.failUnless(unicode(Exception))
+        self.failUnless(str(Exception))
         self.failUnless(str(Exception('a')))
-        self.failUnless(unicode(Exception(u'a')))
+        self.failUnless(str(Exception('a')))
 
     def testExceptionCleanup(self):
         # Make sure "except V as N" exceptions are cleaned up properly
diff --git a/Lib/test/test_file.py b/Lib/test/test_file.py
index f682f89..95e9b3e 100644
--- a/Lib/test/test_file.py
+++ b/Lib/test/test_file.py
@@ -145,7 +145,7 @@
 
     def testUnicodeOpen(self):
         # verify repr works for unicode too
-        f = open(unicode(TESTFN), "w")
+        f = open(str(TESTFN), "w")
         self.assert_(repr(f).startswith("<open file u'" + TESTFN))
         f.close()
         os.unlink(TESTFN)
diff --git a/Lib/test/test_fileinput.py b/Lib/test/test_fileinput.py
index 10d3cfc..e4b477a 100644
--- a/Lib/test/test_fileinput.py
+++ b/Lib/test/test_fileinput.py
@@ -160,7 +160,7 @@
             encoding = sys.getfilesystemencoding()
             if encoding is None:
                 encoding = 'ascii'
-            fi = FileInput(files=unicode(t1, encoding))
+            fi = FileInput(files=str(t1, encoding))
             lines = list(fi)
             self.assertEqual(lines, ["A\n", "B"])
         finally:
diff --git a/Lib/test/test_fileio.py b/Lib/test/test_fileio.py
index 4d969f5..56fef11 100644
--- a/Lib/test/test_fileio.py
+++ b/Lib/test/test_fileio.py
@@ -149,7 +149,7 @@
 
     def testUnicodeOpen(self):
         # verify repr works for unicode too
-        f = _fileio._FileIO(unicode(TESTFN), "w")
+        f = _fileio._FileIO(str(TESTFN), "w")
         f.close()
         os.unlink(TESTFN)
 
diff --git a/Lib/test/test_format.py b/Lib/test/test_format.py
index 658a302..a006bbf 100644
--- a/Lib/test/test_format.py
+++ b/Lib/test/test_format.py
@@ -35,7 +35,7 @@
 def testboth(formatstr, *args):
     testformat(formatstr, *args)
     if have_unicode:
-        testformat(unicode(formatstr), *args)
+        testformat(str(formatstr), *args)
 
 
 testboth("%.1d", (1,), "1")
@@ -216,18 +216,18 @@
 test_exc('abc %a', 1, ValueError,
          "unsupported format character 'a' (0x61) at index 5")
 if have_unicode:
-    test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
+    test_exc(str('abc %\u3000','raw-unicode-escape'), 1, ValueError,
              "unsupported format character '?' (0x3000) at index 5")
 
 test_exc('%d', '1', TypeError, "int argument required, not str")
 test_exc('%g', '1', TypeError, "float argument required, not str")
 test_exc('no format', '1', TypeError,
          "not all arguments converted during string formatting")
-test_exc('no format', u'1', TypeError,
+test_exc('no format', '1', TypeError,
          "not all arguments converted during string formatting")
-test_exc(u'no format', '1', TypeError,
+test_exc('no format', '1', TypeError,
          "not all arguments converted during string formatting")
-test_exc(u'no format', u'1', TypeError,
+test_exc('no format', '1', TypeError,
          "not all arguments converted during string formatting")
 
 class Foobar(int):
diff --git a/Lib/test/test_getargs.py b/Lib/test/test_getargs.py
index 4ce34bc..21bfeeb 100644
--- a/Lib/test/test_getargs.py
+++ b/Lib/test/test_getargs.py
@@ -19,6 +19,6 @@
 
 if have_unicode:
     try:
-        marshal.loads(unicode(r"\222", 'unicode-escape'))
+        marshal.loads(str(r"\222", 'unicode-escape'))
     except UnicodeError:
         pass
diff --git a/Lib/test/test_gettext.py b/Lib/test/test_gettext.py
index ab6bc9a..676d2fe 100644
--- a/Lib/test/test_gettext.py
+++ b/Lib/test/test_gettext.py
@@ -95,33 +95,33 @@
         eq = self.assertEqual
         # test some translations
         eq(_('albatross'), 'albatross')
-        eq(_(u'mullusk'), 'bacon')
+        eq(_('mullusk'), 'bacon')
         eq(_(r'Raymond Luxury Yach-t'), 'Throatwobbler Mangrove')
-        eq(_(ur'nudge nudge'), 'wink wink')
+        eq(_(r'nudge nudge'), 'wink wink')
 
     def test_double_quotes(self):
         eq = self.assertEqual
         # double quotes
         eq(_("albatross"), 'albatross')
-        eq(_(u"mullusk"), 'bacon')
+        eq(_("mullusk"), 'bacon')
         eq(_(r"Raymond Luxury Yach-t"), 'Throatwobbler Mangrove')
-        eq(_(ur"nudge nudge"), 'wink wink')
+        eq(_(r"nudge nudge"), 'wink wink')
 
     def test_triple_single_quotes(self):
         eq = self.assertEqual
         # triple single quotes
         eq(_('''albatross'''), 'albatross')
-        eq(_(u'''mullusk'''), 'bacon')
+        eq(_('''mullusk'''), 'bacon')
         eq(_(r'''Raymond Luxury Yach-t'''), 'Throatwobbler Mangrove')
-        eq(_(ur'''nudge nudge'''), 'wink wink')
+        eq(_(r'''nudge nudge'''), 'wink wink')
 
     def test_triple_double_quotes(self):
         eq = self.assertEqual
         # triple double quotes
         eq(_("""albatross"""), 'albatross')
-        eq(_(u"""mullusk"""), 'bacon')
+        eq(_("""mullusk"""), 'bacon')
         eq(_(r"""Raymond Luxury Yach-t"""), 'Throatwobbler Mangrove')
-        eq(_(ur"""nudge nudge"""), 'wink wink')
+        eq(_(r"""nudge nudge"""), 'wink wink')
 
     def test_multiline_strings(self):
         eq = self.assertEqual
@@ -143,11 +143,11 @@
         t.install()
         eq(_('nudge nudge'), 'wink wink')
         # Try unicode return type
-        t.install(unicode=True)
+        t.install(str=True)
         eq(_('mullusk'), 'bacon')
         # Test installation of other methods
         import __builtin__
-        t.install(unicode=True, names=["gettext", "lgettext"])
+        t.install(str=True, names=["gettext", "lgettext"])
         eq(_, t.ugettext)
         eq(__builtin__.gettext, t.ugettext)
         eq(lgettext, t.lgettext)
@@ -175,33 +175,33 @@
         eq = self.assertEqual
         # test some translations
         eq(self._('albatross'), 'albatross')
-        eq(self._(u'mullusk'), 'bacon')
+        eq(self._('mullusk'), 'bacon')
         eq(self._(r'Raymond Luxury Yach-t'), 'Throatwobbler Mangrove')
-        eq(self._(ur'nudge nudge'), 'wink wink')
+        eq(self._(r'nudge nudge'), 'wink wink')
 
     def test_double_quotes(self):
         eq = self.assertEqual
         # double quotes
         eq(self._("albatross"), 'albatross')
-        eq(self._(u"mullusk"), 'bacon')
+        eq(self._("mullusk"), 'bacon')
         eq(self._(r"Raymond Luxury Yach-t"), 'Throatwobbler Mangrove')
-        eq(self._(ur"nudge nudge"), 'wink wink')
+        eq(self._(r"nudge nudge"), 'wink wink')
 
     def test_triple_single_quotes(self):
         eq = self.assertEqual
         # triple single quotes
         eq(self._('''albatross'''), 'albatross')
-        eq(self._(u'''mullusk'''), 'bacon')
+        eq(self._('''mullusk'''), 'bacon')
         eq(self._(r'''Raymond Luxury Yach-t'''), 'Throatwobbler Mangrove')
-        eq(self._(ur'''nudge nudge'''), 'wink wink')
+        eq(self._(r'''nudge nudge'''), 'wink wink')
 
     def test_triple_double_quotes(self):
         eq = self.assertEqual
         # triple double quotes
         eq(self._("""albatross"""), 'albatross')
-        eq(self._(u"""mullusk"""), 'bacon')
+        eq(self._("""mullusk"""), 'bacon')
         eq(self._(r"""Raymond Luxury Yach-t"""), 'Throatwobbler Mangrove')
-        eq(self._(ur"""nudge nudge"""), 'wink wink')
+        eq(self._(r"""nudge nudge"""), 'wink wink')
 
     def test_multiline_strings(self):
         eq = self.assertEqual
@@ -309,12 +309,12 @@
 
     def test_unicode_msgid(self):
         unless = self.failUnless
-        unless(isinstance(self._(''), unicode))
-        unless(isinstance(self._(u''), unicode))
+        unless(isinstance(self._(''), str))
+        unless(isinstance(self._(''), str))
 
     def test_unicode_msgstr(self):
         eq = self.assertEqual
-        eq(self._(u'ab\xde'), u'\xa4yz')
+        eq(self._('ab\xde'), '\xa4yz')
 
 
 class WeirdMetadataTest(GettextBaseTest):
diff --git a/Lib/test/test_glob.py b/Lib/test/test_glob.py
index f1993ab..a628e6e 100644
--- a/Lib/test/test_glob.py
+++ b/Lib/test/test_glob.py
@@ -54,11 +54,11 @@
 
         # test return types are unicode, but only if os.listdir
         # returns unicode filenames
-        uniset = set([unicode])
-        tmp = os.listdir(u'.')
+        uniset = set([str])
+        tmp = os.listdir('.')
         if set(type(x) for x in tmp) == uniset:
-            u1 = glob.glob(u'*')
-            u2 = glob.glob(u'./*')
+            u1 = glob.glob('*')
+            u2 = glob.glob('./*')
             self.assertEquals(set(type(r) for r in u1), uniset)
             self.assertEquals(set(type(r) for r in u2), uniset)
 
diff --git a/Lib/test/test_htmlparser.py b/Lib/test/test_htmlparser.py
index 229bbed..b408a18 100755
--- a/Lib/test/test_htmlparser.py
+++ b/Lib/test/test_htmlparser.py
@@ -311,7 +311,7 @@
 
     def test_entityrefs_in_attributes(self):
         self._run_check("<html foo='&euro;&amp;&#97;&#x61;&unsupported;'>", [
-                ("starttag", "html", [("foo", u"\u20AC&aa&unsupported;")])
+                ("starttag", "html", [("foo", "\u20AC&aa&unsupported;")])
                 ])
 
 
diff --git a/Lib/test/test_index.py b/Lib/test/test_index.py
index 0045469..b930c29 100644
--- a/Lib/test/test_index.py
+++ b/Lib/test/test_index.py
@@ -161,7 +161,7 @@
     seq = "this is a test"
 
 class UnicodeTestCase(SeqTestCase):
-    seq = u"this is a test"
+    seq = "this is a test"
 
 
 class XRangeTestCase(unittest.TestCase):
diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py
index 0b63fee..9b05aa9 100644
--- a/Lib/test/test_io.py
+++ b/Lib/test/test_io.py
@@ -542,13 +542,13 @@
     def multi_line_test(self, f, enc):
         f.seek(0)
         f.truncate()
-        sample = u"s\xff\u0fff\uffff"
+        sample = "s\xff\u0fff\uffff"
         wlines = []
         for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
             chars = []
             for i in xrange(size):
                 chars.append(sample[i % len(sample)])
-            line = u"".join(chars) + "\n"
+            line = "".join(chars) + "\n"
             wlines.append((f.tell(), line))
             f.write(line)
         f.seek(0)
@@ -564,19 +564,19 @@
     def testTelling(self):
         f = io.open(test_support.TESTFN, "w+", encoding="utf8")
         p0 = f.tell()
-        f.write(u"\xff\n")
+        f.write("\xff\n")
         p1 = f.tell()
-        f.write(u"\xff\n")
+        f.write("\xff\n")
         p2 = f.tell()
         f.seek(0)
         self.assertEquals(f.tell(), p0)
-        self.assertEquals(f.readline(), u"\xff\n")
+        self.assertEquals(f.readline(), "\xff\n")
         self.assertEquals(f.tell(), p1)
-        self.assertEquals(f.readline(), u"\xff\n")
+        self.assertEquals(f.readline(), "\xff\n")
         self.assertEquals(f.tell(), p2)
         f.seek(0)
         for line in f:
-            self.assertEquals(line, u"\xff\n")
+            self.assertEquals(line, "\xff\n")
             self.assertRaises(IOError, f.tell)
         self.assertEquals(f.tell(), p2)
         f.close()
@@ -584,10 +584,10 @@
     def testSeeking(self):
         chunk_size = io.TextIOWrapper._CHUNK_SIZE
         prefix_size = chunk_size - 2
-        u_prefix = u"a" * prefix_size
+        u_prefix = "a" * prefix_size
         prefix = bytes(u_prefix.encode("utf-8"))
         self.assertEquals(len(u_prefix), len(prefix))
-        u_suffix = u"\u8888\n"
+        u_suffix = "\u8888\n"
         suffix = bytes(u_suffix.encode("utf-8"))
         line = prefix + suffix
         f = io.open(test_support.TESTFN, "wb")
@@ -614,7 +614,7 @@
     def timingTest(self):
         timer = time.time
         enc = "utf8"
-        line = u"\0\x0f\xff\u0fff\uffff\U000fffff\U0010ffff"*3 + "\n"
+        line = "\0\x0f\xff\u0fff\uffff\U000fffff\U0010ffff"*3 + "\n"
         nlines = 10000
         nchars = len(line)
         nbytes = len(line.encode(enc))
diff --git a/Lib/test/test_isinstance.py b/Lib/test/test_isinstance.py
index 366ced7..0744090 100644
--- a/Lib/test/test_isinstance.py
+++ b/Lib/test/test_isinstance.py
@@ -243,7 +243,7 @@
 
         self.assertEqual(True, issubclass(int, (int, (float, int))))
         if test_support.have_unicode:
-            self.assertEqual(True, issubclass(str, (unicode, (Child, NewChild, basestring))))
+            self.assertEqual(True, issubclass(str, (str, (Child, NewChild, basestring))))
 
     def test_subclass_recursion_limit(self):
         # make sure that issubclass raises RuntimeError before the C stack is
diff --git a/Lib/test/test_iter.py b/Lib/test/test_iter.py
index 65e143d..5a75c50 100644
--- a/Lib/test/test_iter.py
+++ b/Lib/test/test_iter.py
@@ -216,9 +216,9 @@
     # Test a Unicode string
     if have_unicode:
         def test_iter_unicode(self):
-            self.check_for_loop(iter(unicode("abcde")),
-                                [unicode("a"), unicode("b"), unicode("c"),
-                                 unicode("d"), unicode("e")])
+            self.check_for_loop(iter(str("abcde")),
+                                [str("a"), str("b"), str("c"),
+                                 str("d"), str("e")])
 
     # Test a directory
     def test_iter_dict(self):
@@ -518,7 +518,7 @@
                 i = self.i
                 self.i = i+1
                 if i == 2:
-                    return unicode("fooled you!")
+                    return str("fooled you!")
                 return next(self.it)
 
         f = open(TESTFN, "w")
@@ -535,7 +535,7 @@
         # and pass that on to unicode.join().
         try:
             got = " - ".join(OhPhooey(f))
-            self.assertEqual(got, unicode("a\n - b\n - fooled you! - c\n"))
+            self.assertEqual(got, str("a\n - b\n - fooled you! - c\n"))
         finally:
             f.close()
             try:
diff --git a/Lib/test/test_macfs.py b/Lib/test/test_macfs.py
index 9c0e3a1..e25250b 100644
--- a/Lib/test/test_macfs.py
+++ b/Lib/test/test_macfs.py
@@ -32,7 +32,7 @@
 
     def test_fsref_unicode(self):
         if sys.getfilesystemencoding():
-            testfn_unicode = unicode(test_support.TESTFN)
+            testfn_unicode = str(test_support.TESTFN)
             fsr = macfs.FSRef(testfn_unicode)
             self.assertEqual(os.path.realpath(test_support.TESTFN), fsr.as_pathname())
 
diff --git a/Lib/test/test_marshal.py b/Lib/test/test_marshal.py
index 9c58c12..7f7c5e6 100644
--- a/Lib/test/test_marshal.py
+++ b/Lib/test/test_marshal.py
@@ -106,7 +106,7 @@
 
 class StringTestCase(unittest.TestCase):
     def test_unicode(self):
-        for s in [u"", u"Andrè Previn", u"abc", u" "*10000]:
+        for s in ["", "Andrè Previn", "abc", " "*10000]:
             new = marshal.loads(marshal.dumps(s))
             self.assertEqual(s, new)
             self.assertEqual(type(s), type(new))
@@ -156,7 +156,7 @@
          'alist': ['.zyx.41'],
          'atuple': ('.zyx.41',)*10,
          'aboolean': False,
-         'aunicode': u"Andrè Previn"
+         'aunicode': "Andrè Previn"
          }
     def test_dict(self):
         new = marshal.loads(marshal.dumps(self.d))
diff --git a/Lib/test/test_minidom.py b/Lib/test/test_minidom.py
index 5f95365..9b61d1a 100644
--- a/Lib/test/test_minidom.py
+++ b/Lib/test/test_minidom.py
@@ -166,7 +166,7 @@
 
     def testAppendChild(self):
         dom = parse(tstfile)
-        dom.documentElement.appendChild(dom.createComment(u"Hello"))
+        dom.documentElement.appendChild(dom.createComment("Hello"))
         self.confirm(dom.documentElement.childNodes[-1].nodeName == "#comment")
         self.confirm(dom.documentElement.childNodes[-1].data == "Hello")
         dom.unlink()
@@ -427,7 +427,7 @@
 
     def testElementReprAndStrUnicode(self):
         dom = Document()
-        el = dom.appendChild(dom.createElement(u"abc"))
+        el = dom.appendChild(dom.createElement("abc"))
         string1 = repr(el)
         string2 = str(el)
         self.confirm(string1 == string2)
@@ -436,7 +436,7 @@
     def testElementReprAndStrUnicodeNS(self):
         dom = Document()
         el = dom.appendChild(
-            dom.createElementNS(u"http://www.slashdot.org", u"slash:abc"))
+            dom.createElementNS("http://www.slashdot.org", "slash:abc"))
         string1 = repr(el)
         string2 = str(el)
         self.confirm(string1 == string2)
@@ -445,7 +445,7 @@
 
     def testAttributeRepr(self):
         dom = Document()
-        el = dom.appendChild(dom.createElement(u"abc"))
+        el = dom.appendChild(dom.createElement("abc"))
         node = el.setAttribute("abc", "def")
         self.confirm(str(node) == repr(node))
         dom.unlink()
@@ -869,7 +869,7 @@
 
     def testEncodings(self):
         doc = parseString('<foo>&#x20ac;</foo>')
-        self.confirm(doc.toxml() == u'<?xml version="1.0" ?><foo>\u20ac</foo>'
+        self.confirm(doc.toxml() == '<?xml version="1.0" ?><foo>\u20ac</foo>'
                 and doc.toxml('utf-8') ==
                 '<?xml version="1.0" encoding="utf-8"?><foo>\xe2\x82\xac</foo>'
                 and doc.toxml('iso-8859-15') ==
diff --git a/Lib/test/test_module.py b/Lib/test/test_module.py
index cc8b192..d091c6b 100644
--- a/Lib/test/test_module.py
+++ b/Lib/test/test_module.py
@@ -35,15 +35,15 @@
 
     def test_unicode_docstring(self):
         # Unicode docstring
-        foo = ModuleType("foo", u"foodoc\u1234")
+        foo = ModuleType("foo", "foodoc\u1234")
         self.assertEqual(foo.__name__, "foo")
-        self.assertEqual(foo.__doc__, u"foodoc\u1234")
+        self.assertEqual(foo.__doc__, "foodoc\u1234")
         self.assertEqual(foo.__dict__,
-                         {"__name__": "foo", "__doc__": u"foodoc\u1234"})
+                         {"__name__": "foo", "__doc__": "foodoc\u1234"})
 
     def test_reinit(self):
         # Reinitialization should not replace the __dict__
-        foo = ModuleType("foo", u"foodoc\u1234")
+        foo = ModuleType("foo", "foodoc\u1234")
         foo.bar = 42
         d = foo.__dict__
         foo.__init__("foo", "foodoc")
diff --git a/Lib/test/test_multibytecodec.py b/Lib/test/test_multibytecodec.py
index c5615a8..8ea4bf9 100644
--- a/Lib/test/test_multibytecodec.py
+++ b/Lib/test/test_multibytecodec.py
@@ -30,9 +30,9 @@
 
     def test_nullcoding(self):
         for enc in ALL_CJKENCODINGS:
-            self.assertEqual(''.decode(enc), u'')
-            self.assertEqual(unicode('', enc), u'')
-            self.assertEqual(u''.encode(enc), '')
+            self.assertEqual(''.decode(enc), '')
+            self.assertEqual(str('', enc), '')
+            self.assertEqual(''.encode(enc), '')
 
     def test_str_decode(self):
         for enc in ALL_CJKENCODINGS:
@@ -40,7 +40,7 @@
 
     def test_errorcallback_longindex(self):
         dec = codecs.getdecoder('euc-kr')
-        myreplace  = lambda exc: (u'', sys.maxint+1)
+        myreplace  = lambda exc: ('', sys.maxint+1)
         codecs.register_error('test.cjktest', myreplace)
         self.assertRaises(IndexError, dec,
                           'apple\x92ham\x93spam', 'test.cjktest')
@@ -58,14 +58,14 @@
     def test_stateless(self):
         # cp949 encoder isn't stateful at all.
         encoder = codecs.getincrementalencoder('cp949')()
-        self.assertEqual(encoder.encode(u'\ud30c\uc774\uc36c \ub9c8\uc744'),
+        self.assertEqual(encoder.encode('\ud30c\uc774\uc36c \ub9c8\uc744'),
                          '\xc6\xc4\xc0\xcc\xbd\xe3 \xb8\xb6\xc0\xbb')
         self.assertEqual(encoder.reset(), None)
-        self.assertEqual(encoder.encode(u'\u2606\u223c\u2606', True),
+        self.assertEqual(encoder.encode('\u2606\u223c\u2606', True),
                          '\xa1\xd9\xa1\xad\xa1\xd9')
         self.assertEqual(encoder.reset(), None)
-        self.assertEqual(encoder.encode(u'', True), '')
-        self.assertEqual(encoder.encode(u'', False), '')
+        self.assertEqual(encoder.encode('', True), '')
+        self.assertEqual(encoder.encode('', False), '')
         self.assertEqual(encoder.reset(), None)
 
     def test_stateful(self):
@@ -75,29 +75,29 @@
         #   U+0300 => ABDC
 
         encoder = codecs.getincrementalencoder('jisx0213')()
-        self.assertEqual(encoder.encode(u'\u00e6\u0300'), '\xab\xc4')
-        self.assertEqual(encoder.encode(u'\u00e6'), '')
-        self.assertEqual(encoder.encode(u'\u0300'), '\xab\xc4')
-        self.assertEqual(encoder.encode(u'\u00e6', True), '\xa9\xdc')
+        self.assertEqual(encoder.encode('\u00e6\u0300'), '\xab\xc4')
+        self.assertEqual(encoder.encode('\u00e6'), '')
+        self.assertEqual(encoder.encode('\u0300'), '\xab\xc4')
+        self.assertEqual(encoder.encode('\u00e6', True), '\xa9\xdc')
 
         self.assertEqual(encoder.reset(), None)
-        self.assertEqual(encoder.encode(u'\u0300'), '\xab\xdc')
+        self.assertEqual(encoder.encode('\u0300'), '\xab\xdc')
 
-        self.assertEqual(encoder.encode(u'\u00e6'), '')
+        self.assertEqual(encoder.encode('\u00e6'), '')
         self.assertEqual(encoder.encode('', True), '\xa9\xdc')
         self.assertEqual(encoder.encode('', True), '')
 
     def test_stateful_keep_buffer(self):
         encoder = codecs.getincrementalencoder('jisx0213')()
-        self.assertEqual(encoder.encode(u'\u00e6'), '')
-        self.assertRaises(UnicodeEncodeError, encoder.encode, u'\u0123')
-        self.assertEqual(encoder.encode(u'\u0300\u00e6'), '\xab\xc4')
-        self.assertRaises(UnicodeEncodeError, encoder.encode, u'\u0123')
+        self.assertEqual(encoder.encode('\u00e6'), '')
+        self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
+        self.assertEqual(encoder.encode('\u0300\u00e6'), '\xab\xc4')
+        self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
         self.assertEqual(encoder.reset(), None)
-        self.assertEqual(encoder.encode(u'\u0300'), '\xab\xdc')
-        self.assertEqual(encoder.encode(u'\u00e6'), '')
-        self.assertRaises(UnicodeEncodeError, encoder.encode, u'\u0123')
-        self.assertEqual(encoder.encode(u'', True), '\xa9\xdc')
+        self.assertEqual(encoder.encode('\u0300'), '\xab\xdc')
+        self.assertEqual(encoder.encode('\u00e6'), '')
+        self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
+        self.assertEqual(encoder.encode('', True), '\xa9\xdc')
 
 
 class Test_IncrementalDecoder(unittest.TestCase):
@@ -106,52 +106,52 @@
         # cp949 decoder is simple with only 1 or 2 bytes sequences.
         decoder = codecs.getincrementaldecoder('cp949')()
         self.assertEqual(decoder.decode('\xc6\xc4\xc0\xcc\xbd'),
-                         u'\ud30c\uc774')
+                         '\ud30c\uc774')
         self.assertEqual(decoder.decode('\xe3 \xb8\xb6\xc0\xbb'),
-                         u'\uc36c \ub9c8\uc744')
-        self.assertEqual(decoder.decode(''), u'')
+                         '\uc36c \ub9c8\uc744')
+        self.assertEqual(decoder.decode(''), '')
 
     def test_dbcs_keep_buffer(self):
         decoder = codecs.getincrementaldecoder('cp949')()
-        self.assertEqual(decoder.decode('\xc6\xc4\xc0'), u'\ud30c')
+        self.assertEqual(decoder.decode('\xc6\xc4\xc0'), '\ud30c')
         self.assertRaises(UnicodeDecodeError, decoder.decode, '', True)
-        self.assertEqual(decoder.decode('\xcc'), u'\uc774')
+        self.assertEqual(decoder.decode('\xcc'), '\uc774')
 
-        self.assertEqual(decoder.decode('\xc6\xc4\xc0'), u'\ud30c')
+        self.assertEqual(decoder.decode('\xc6\xc4\xc0'), '\ud30c')
         self.assertRaises(UnicodeDecodeError, decoder.decode, '\xcc\xbd', True)
-        self.assertEqual(decoder.decode('\xcc'), u'\uc774')
+        self.assertEqual(decoder.decode('\xcc'), '\uc774')
 
     def test_iso2022(self):
         decoder = codecs.getincrementaldecoder('iso2022-jp')()
         ESC = '\x1b'
-        self.assertEqual(decoder.decode(ESC + '('), u'')
-        self.assertEqual(decoder.decode('B', True), u'')
-        self.assertEqual(decoder.decode(ESC + '$'), u'')
-        self.assertEqual(decoder.decode('B@$'), u'\u4e16')
-        self.assertEqual(decoder.decode('@$@'), u'\u4e16')
-        self.assertEqual(decoder.decode('$', True), u'\u4e16')
+        self.assertEqual(decoder.decode(ESC + '('), '')
+        self.assertEqual(decoder.decode('B', True), '')
+        self.assertEqual(decoder.decode(ESC + '$'), '')
+        self.assertEqual(decoder.decode('B@$'), '\u4e16')
+        self.assertEqual(decoder.decode('@$@'), '\u4e16')
+        self.assertEqual(decoder.decode('$', True), '\u4e16')
         self.assertEqual(decoder.reset(), None)
-        self.assertEqual(decoder.decode('@$'), u'@$')
-        self.assertEqual(decoder.decode(ESC + '$'), u'')
+        self.assertEqual(decoder.decode('@$'), '@$')
+        self.assertEqual(decoder.decode(ESC + '$'), '')
         self.assertRaises(UnicodeDecodeError, decoder.decode, '', True)
-        self.assertEqual(decoder.decode('B@$'), u'\u4e16')
+        self.assertEqual(decoder.decode('B@$'), '\u4e16')
 
 
 class Test_StreamWriter(unittest.TestCase):
-    if len(u'\U00012345') == 2: # UCS2
+    if len('\U00012345') == 2: # UCS2
         def test_gb18030(self):
             s= StringIO.StringIO()
             c = codecs.getwriter('gb18030')(s)
-            c.write(u'123')
+            c.write('123')
             self.assertEqual(s.getvalue(), '123')
-            c.write(u'\U00012345')
+            c.write('\U00012345')
             self.assertEqual(s.getvalue(), '123\x907\x959')
-            c.write(u'\U00012345'[0])
+            c.write('\U00012345'[0])
             self.assertEqual(s.getvalue(), '123\x907\x959')
-            c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac')
+            c.write('\U00012345'[1] + '\U00012345' + '\uac00\u00ac')
             self.assertEqual(s.getvalue(),
                     '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
-            c.write(u'\U00012345'[0])
+            c.write('\U00012345'[0])
             self.assertEqual(s.getvalue(),
                     '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
             self.assertRaises(UnicodeError, c.reset)
@@ -161,20 +161,20 @@
         def test_utf_8(self):
             s= StringIO.StringIO()
             c = codecs.getwriter('utf-8')(s)
-            c.write(u'123')
+            c.write('123')
             self.assertEqual(s.getvalue(), '123')
-            c.write(u'\U00012345')
+            c.write('\U00012345')
             self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85')
 
             # Python utf-8 codec can't buffer surrogate pairs yet.
             if 0:
-                c.write(u'\U00012345'[0])
+                c.write('\U00012345'[0])
                 self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85')
-                c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac')
+                c.write('\U00012345'[1] + '\U00012345' + '\uac00\u00ac')
                 self.assertEqual(s.getvalue(),
                     '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
                     '\xea\xb0\x80\xc2\xac')
-                c.write(u'\U00012345'[0])
+                c.write('\U00012345'[0])
                 self.assertEqual(s.getvalue(),
                     '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
                     '\xea\xb0\x80\xc2\xac')
@@ -182,7 +182,7 @@
                 self.assertEqual(s.getvalue(),
                     '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
                     '\xea\xb0\x80\xc2\xac\xed\xa0\x88')
-                c.write(u'\U00012345'[1])
+                c.write('\U00012345'[1])
                 self.assertEqual(s.getvalue(),
                     '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
                     '\xea\xb0\x80\xc2\xac\xed\xa0\x88\xed\xbd\x85')
@@ -199,13 +199,13 @@
 class Test_ISO2022(unittest.TestCase):
     def test_g2(self):
         iso2022jp2 = '\x1b(B:hu4:unit\x1b.A\x1bNi de famille'
-        uni = u':hu4:unit\xe9 de famille'
+        uni = ':hu4:unit\xe9 de famille'
         self.assertEqual(iso2022jp2.decode('iso2022-jp-2'), uni)
 
     def test_iso2022_jp_g0(self):
-        self.failIf('\x0e' in u'\N{SOFT HYPHEN}'.encode('iso-2022-jp-2'))
+        self.failIf('\x0e' in '\N{SOFT HYPHEN}'.encode('iso-2022-jp-2'))
         for encoding in ('iso-2022-jp-2004', 'iso-2022-jp-3'):
-            e = u'\u3406'.encode(encoding)
+            e = '\u3406'.encode(encoding)
             self.failIf(filter(lambda x: x >= '\x80', e))
 
     def test_bug1572832(self):
diff --git a/Lib/test/test_multibytecodec_support.py b/Lib/test/test_multibytecodec_support.py
index d94d115..6abcdd6 100644
--- a/Lib/test/test_multibytecodec_support.py
+++ b/Lib/test/test_multibytecodec_support.py
@@ -18,7 +18,7 @@
     roundtriptest   = 1    # set if roundtrip is possible with unicode
     has_iso10646    = 0    # set if this encoding contains whole iso10646 map
     xmlcharnametest = None # string to test xmlcharrefreplace
-    unmappedunicode = u'\udeee' # a unicode codepoint that is not mapped.
+    unmappedunicode = '\udeee' # a unicode codepoint that is not mapped.
 
     def setUp(self):
         if self.codec is None:
@@ -54,7 +54,7 @@
         if self.has_iso10646:
             return
 
-        s = u"\u0b13\u0b23\u0b60 nd eggs"
+        s = "\u0b13\u0b23\u0b60 nd eggs"
         self.assertEqual(
             self.encode(s, "xmlcharrefreplace")[0],
             "&#2835;&#2851;&#2912; nd eggs"
@@ -72,17 +72,17 @@
             l = []
             for c in exc.object[exc.start:exc.end]:
                 if ord(c) in codepoint2name:
-                    l.append(u"&%s;" % codepoint2name[ord(c)])
+                    l.append("&%s;" % codepoint2name[ord(c)])
                 else:
-                    l.append(u"&#%d;" % ord(c))
-            return (u"".join(l), exc.end)
+                    l.append("&#%d;" % ord(c))
+            return ("".join(l), exc.end)
 
         codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
 
         if self.xmlcharnametest:
             sin, sout = self.xmlcharnametest
         else:
-            sin = u"\xab\u211c\xbb = \u2329\u1234\u232a"
+            sin = "\xab\u211c\xbb = \u2329\u1234\u232a"
             sout = "&laquo;&real;&raquo; = &lang;&#4660;&rang;"
         self.assertEqual(self.encode(sin,
                                     "test.xmlcharnamereplace")[0], sout)
@@ -98,20 +98,20 @@
 
     def test_callback_long_index(self):
         def myreplace(exc):
-            return (u'x', int(exc.end))
+            return ('x', int(exc.end))
         codecs.register_error("test.cjktest", myreplace)
-        self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
+        self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
                                      'test.cjktest'), ('abcdxefgh', 9))
 
         def myreplace(exc):
-            return (u'x', sys.maxint + 1)
+            return ('x', sys.maxint + 1)
         codecs.register_error("test.cjktest", myreplace)
         self.assertRaises(IndexError, self.encode, self.unmappedunicode,
                           'test.cjktest')
 
     def test_callback_None_index(self):
         def myreplace(exc):
-            return (u'x', None)
+            return ('x', None)
         codecs.register_error("test.cjktest", myreplace)
         self.assertRaises(TypeError, self.encode, self.unmappedunicode,
                           'test.cjktest')
@@ -120,25 +120,25 @@
         def myreplace(exc):
             if myreplace.limit > 0:
                 myreplace.limit -= 1
-                return (u'REPLACED', 0)
+                return ('REPLACED', 0)
             else:
-                return (u'TERMINAL', exc.end)
+                return ('TERMINAL', exc.end)
         myreplace.limit = 3
         codecs.register_error("test.cjktest", myreplace)
-        self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
+        self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
                                      'test.cjktest'),
                 ('abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
 
     def test_callback_forward_index(self):
         def myreplace(exc):
-            return (u'REPLACED', exc.end + 2)
+            return ('REPLACED', exc.end + 2)
         codecs.register_error("test.cjktest", myreplace)
-        self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
+        self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
                                      'test.cjktest'), ('abcdREPLACEDgh', 9))
 
     def test_callback_index_outofbound(self):
         def myreplace(exc):
-            return (u'TERM', 100)
+            return ('TERM', 100)
         codecs.register_error("test.cjktest", myreplace)
         self.assertRaises(IndexError, self.encode, self.unmappedunicode,
                           'test.cjktest')
@@ -191,7 +191,7 @@
 
         e.reset()
         def tempreplace(exc):
-            return (u'called', exc.end)
+            return ('called', exc.end)
         codecs.register_error('test.incremental_error_callback', tempreplace)
         e.errors = 'test.incremental_error_callback'
         self.assertEqual(e.encode(inv, True), 'called')
@@ -243,7 +243,7 @@
 
                 self.assertEqual(ostream.getvalue(), self.tstring[0])
 
-if len(u'\U00012345') == 2: # ucs2 build
+if len('\U00012345') == 2: # ucs2 build
     _unichr = unichr
     def unichr(v):
         if v >= 0x10000:
@@ -272,7 +272,7 @@
         return test_support.open_urlresource(self.mapfileurl)
 
     def test_mapping_file(self):
-        unichrs = lambda s: u''.join(map(unichr, map(eval, s.split('+'))))
+        unichrs = lambda s: ''.join(map(unichr, map(eval, s.split('+'))))
         urt_wa = {}
 
         for line in self.open_mapping_file():
@@ -311,7 +311,7 @@
         if (csetch, unich) not in self.pass_enctest:
             self.assertEqual(unich.encode(self.encoding), csetch)
         if (csetch, unich) not in self.pass_dectest:
-            self.assertEqual(unicode(csetch, self.encoding), unich)
+            self.assertEqual(str(csetch, self.encoding), unich)
 
 def load_teststring(encoding):
     from test import cjkencodings_test
diff --git a/Lib/test/test_normalization.py b/Lib/test/test_normalization.py
index b571bdc..a48af4d 100644
--- a/Lib/test/test_normalization.py
+++ b/Lib/test/test_normalization.py
@@ -28,7 +28,7 @@
     for x in data:
         if x > sys.maxunicode:
             raise RangeError
-    return u"".join([unichr(x) for x in data])
+    return "".join([unichr(x) for x in data])
 
 class NormalizationTest(unittest.TestCase):
     def test_main(self):
@@ -84,7 +84,7 @@
 
     def test_bug_834676(self):
         # Check for bug 834676
-        normalize('NFC', u'\ud55c\uae00')
+        normalize('NFC', '\ud55c\uae00')
 
 
 def test_main():
diff --git a/Lib/test/test_optparse.py b/Lib/test/test_optparse.py
index 88e3a1f..811ec34 100644
--- a/Lib/test/test_optparse.py
+++ b/Lib/test/test_optparse.py
@@ -1520,8 +1520,8 @@
 
     def test_help_unicode(self):
         self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
-        self.parser.add_option("-a", action="store_true", help=u"ol\u00E9!")
-        expect = u"""\
+        self.parser.add_option("-a", action="store_true", help="ol\u00E9!")
+        expect = """\
 Options:
   -h, --help  show this help message and exit
   -a          ol\u00E9!
@@ -1530,8 +1530,8 @@
 
     def test_help_unicode_description(self):
         self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
-                                               description=u"ol\u00E9!")
-        expect = u"""\
+                                               description="ol\u00E9!")
+        expect = """\
 ol\u00E9!
 
 Options:
diff --git a/Lib/test/test_pep263.py b/Lib/test/test_pep263.py
index 3b09c12..ae61af9 100644
--- a/Lib/test/test_pep263.py
+++ b/Lib/test/test_pep263.py
@@ -8,11 +8,11 @@
 

     def test_pep263(self):

         self.assertEqual(

-            u"ðÉÔÏÎ".encode("utf-8"),

+            "ðÉÔÏÎ".encode("utf-8"),

             '\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'

         )

         self.assertEqual(

-            u"\ð".encode("utf-8"),

+            "\ð".encode("utf-8"),

             '\\\xd0\x9f'

         )

 

diff --git a/Lib/test/test_pep277.py b/Lib/test/test_pep277.py
index 8efa50a..5574c7d 100644
--- a/Lib/test/test_pep277.py
+++ b/Lib/test/test_pep277.py
@@ -7,14 +7,14 @@
 
 filenames = [
     'abc',
-    u'ascii',
-    u'Gr\xfc\xdf-Gott',
-    u'\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
-    u'\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
-    u'\u306b\u307d\u3093',
-    u'\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
-    u'\u66e8\u66e9\u66eb',
-    u'\u66e8\u05e9\u3093\u0434\u0393\xdf',
+    'ascii',
+    'Gr\xfc\xdf-Gott',
+    '\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
+    '\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
+    '\u306b\u307d\u3093',
+    '\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
+    '\u66e8\u66e9\u66eb',
+    '\u66e8\u05e9\u3093\u0434\u0393\xdf',
     ]
 
 # Destroy directory dirname and all files under it, to one level.
@@ -23,7 +23,7 @@
     # an error if we can't remove it.
     if os.path.exists(dirname):
         # must pass unicode to os.listdir() so we get back unicode results.
-        for fname in os.listdir(unicode(dirname)):
+        for fname in os.listdir(str(dirname)):
             os.unlink(os.path.join(dirname, fname))
         os.rmdir(dirname)
 
@@ -80,7 +80,7 @@
         f1 = os.listdir(test_support.TESTFN)
         # Printing f1 is not appropriate, as specific filenames
         # returned depend on the local encoding
-        f2 = os.listdir(unicode(test_support.TESTFN,
+        f2 = os.listdir(str(test_support.TESTFN,
                                 sys.getfilesystemencoding()))
         f2.sort()
         print(f2)
@@ -91,8 +91,8 @@
             os.rename("tmp",name)
 
     def test_directory(self):
-        dirname = os.path.join(test_support.TESTFN,u'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
-        filename = u'\xdf-\u66e8\u66e9\u66eb'
+        dirname = os.path.join(test_support.TESTFN,'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
+        filename = '\xdf-\u66e8\u66e9\u66eb'
         oldwd = os.getcwd()
         os.mkdir(dirname)
         os.chdir(dirname)
diff --git a/Lib/test/test_pep292.py b/Lib/test/test_pep292.py
index d1100ea..9820490 100644
--- a/Lib/test/test_pep292.py
+++ b/Lib/test/test_pep292.py
@@ -134,8 +134,8 @@
 
     def test_unicode_values(self):
         s = Template('$who likes $what')
-        d = dict(who=u't\xffm', what=u'f\xfe\fed')
-        self.assertEqual(s.substitute(d), u't\xffm likes f\xfe\x0ced')
+        d = dict(who='t\xffm', what='f\xfe\fed')
+        self.assertEqual(s.substitute(d), 't\xffm likes f\xfe\x0ced')
 
     def test_keyword_arguments(self):
         eq = self.assertEqual
diff --git a/Lib/test/test_pep352.py b/Lib/test/test_pep352.py
index cd91ca7..48862ce 100644
--- a/Lib/test/test_pep352.py
+++ b/Lib/test/test_pep352.py
@@ -90,7 +90,7 @@
         arg = "spam"
         exc = Exception(arg)
         results = ([len(exc.args), 1], [exc.args[0], arg], [exc.message, arg],
-                [str(exc), str(arg)], [unicode(exc), unicode(arg)],
+                [str(exc), str(arg)], [str(exc), str(arg)],
             [repr(exc), exc.__class__.__name__ + repr(exc.args)])
         self.interface_test_driver(results)
 
@@ -101,7 +101,7 @@
         exc = Exception(*args)
         results = ([len(exc.args), arg_count], [exc.args, args],
                 [exc.message, ''], [str(exc), str(args)],
-                [unicode(exc), unicode(args)],
+                [str(exc), str(args)],
                 [repr(exc), exc.__class__.__name__ + repr(exc.args)])
         self.interface_test_driver(results)
 
@@ -109,7 +109,7 @@
         # Make sure that with no args that interface is correct
         exc = Exception()
         results = ([len(exc.args), 0], [exc.args, tuple()], [exc.message, ''],
-                [str(exc), ''], [unicode(exc), u''],
+                [str(exc), ''], [str(exc), ''],
                 [repr(exc), exc.__class__.__name__ + '()'])
         self.interface_test_driver(results)
 
diff --git a/Lib/test/test_plistlib.py b/Lib/test/test_plistlib.py
index 8e8d3e3..1b8012e 100644
--- a/Lib/test/test_plistlib.py
+++ b/Lib/test/test_plistlib.py
@@ -104,7 +104,7 @@
             anInt = 728,
             aDict=dict(
                 anotherString="<hello & 'hi' there!>",
-                aUnicodeValue=u'M\xe4ssig, Ma\xdf',
+                aUnicodeValue='M\xe4ssig, Ma\xdf',
                 aTrueValue=True,
                 aFalseValue=False,
                 deeperDict=dict(a=17, b=32.5, c=[1, 2, "text"]),
@@ -114,7 +114,7 @@
             nestedData = [plistlib.Data("<lots of binary gunk>\0\1\2\3" * 10)],
             aDate = datetime.datetime(2004, 10, 26, 10, 33, 33),
         )
-        pl[u'\xc5benraa'] = "That was a unicode key."
+        pl['\xc5benraa'] = "That was a unicode key."
         return pl
 
     def test_create(self):
diff --git a/Lib/test/test_pprint.py b/Lib/test/test_pprint.py
index 8fc8d10..c281872 100644
--- a/Lib/test/test_pprint.py
+++ b/Lib/test/test_pprint.py
@@ -3,7 +3,7 @@
 import unittest
 
 try:
-    uni = unicode
+    uni = str
 except NameError:
     def uni(x):
         return x
diff --git a/Lib/test/test_pyexpat.py b/Lib/test/test_pyexpat.py
index 0900d1e..62c5d02 100644
--- a/Lib/test/test_pyexpat.py
+++ b/Lib/test/test_pyexpat.py
@@ -281,7 +281,7 @@
     def check(self, expected, label):
         self.assertEquals(self.stuff, expected,
                 "%s\nstuff    = %r\nexpected = %r"
-                % (label, self.stuff, map(unicode, expected)))
+                % (label, self.stuff, map(str, expected)))
 
     def CharacterDataHandler(self, text):
         self.stuff.append(text)
diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py
index 13fa413..b6dfea8 100644
--- a/Lib/test/test_re.py
+++ b/Lib/test/test_re.py
@@ -324,12 +324,12 @@
         self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
         self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
         self.assertEqual(re.search(r"\b(b.)\b",
-                                   u"abcd abc bcd bx").group(1), "bx")
+                                   "abcd abc bcd bx").group(1), "bx")
         self.assertEqual(re.search(r"\B(b.)\B",
-                                   u"abc bcd bc abxd").group(1), "bx")
-        self.assertEqual(re.search(r"^abc$", u"\nabc\n", re.M).group(0), "abc")
-        self.assertEqual(re.search(r"^\Aabc\Z$", u"abc", re.M).group(0), "abc")
-        self.assertEqual(re.search(r"^\Aabc\Z$", u"\nabc\n", re.M), None)
+                                   "abc bcd bc abxd").group(1), "bx")
+        self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
+        self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
+        self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
         self.assertEqual(re.search(r"\d\D\w\W\s\S",
                                    "1aa! a").group(0), "1aa! a")
         self.assertEqual(re.search(r"\d\D\w\W\s\S",
@@ -339,13 +339,13 @@
 
     def test_ignore_case(self):
         self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
-        self.assertEqual(re.match("abc", u"ABC", re.I).group(0), "ABC")
+        self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
 
     def test_bigcharset(self):
-        self.assertEqual(re.match(u"([\u2222\u2223])",
-                                  u"\u2222").group(1), u"\u2222")
-        self.assertEqual(re.match(u"([\u2222\u2223])",
-                                  u"\u2222", re.UNICODE).group(1), u"\u2222")
+        self.assertEqual(re.match("([\u2222\u2223])",
+                                  "\u2222").group(1), "\u2222")
+        self.assertEqual(re.match("([\u2222\u2223])",
+                                  "\u2222", re.UNICODE).group(1), "\u2222")
 
     def test_anyall(self):
         self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
@@ -387,7 +387,7 @@
         self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
 
         self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
-        self.assertEqual(re.match("abc", u"ABC", re.I).group(0), "ABC")
+        self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
 
     def test_not_literal(self):
         self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
@@ -493,7 +493,7 @@
         self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
 
     def test_bug_612074(self):
-        pat=u"["+re.escape(u"\u2039")+u"]"
+        pat="["+re.escape("\u2039")+"]"
         self.assertEqual(re.compile(pat) and 1, 1)
 
     def test_stack_overflow(self):
@@ -561,10 +561,10 @@
     def test_bug_764548(self):
         # bug 764548, re.compile() barfs on str/unicode subclasses
         try:
-            unicode
+            str
         except NameError:
             return  # no problem if we have no unicode
-        class my_unicode(unicode): pass
+        class my_unicode(str): pass
         pat = re.compile(my_unicode("abc"))
         self.assertEqual(pat.match("xyz"), None)
 
@@ -575,7 +575,7 @@
 
     def test_bug_926075(self):
         try:
-            unicode
+            str
         except NameError:
             return # no problem if we have no unicode
         self.assert_(re.compile('bug_926075') is not
@@ -583,7 +583,7 @@
 
     def test_bug_931848(self):
         try:
-            unicode
+            str
         except NameError:
             pass
         pattern = eval('u"[\u002E\u3002\uFF0E\uFF61]"')
@@ -689,7 +689,7 @@
                 # Try the match on a unicode string, and check that it
                 # still succeeds.
                 try:
-                    result = obj.search(unicode(s, "latin-1"))
+                    result = obj.search(str(s, "latin-1"))
                     if result is None:
                         print('=== Fails on unicode match', t)
                 except NameError:
@@ -699,7 +699,7 @@
 
                 # Try the match on a unicode pattern, and check that it
                 # still succeeds.
-                obj=re.compile(unicode(pattern, "latin-1"))
+                obj=re.compile(str(pattern, "latin-1"))
                 result = obj.search(s)
                 if result is None:
                     print('=== Fails on unicode pattern match', t)
diff --git a/Lib/test/test_set.py b/Lib/test/test_set.py
index 45bf32c..0e47623 100644
--- a/Lib/test/test_set.py
+++ b/Lib/test/test_set.py
@@ -72,7 +72,7 @@
         self.assertEqual(type(u), self.thetype)
         self.assertRaises(PassThru, self.s.union, check_pass_thru())
         self.assertRaises(TypeError, self.s.union, [[]])
-        for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
+        for C in set, frozenset, dict.fromkeys, str, str, list, tuple:
             self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
             self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
             self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
@@ -96,7 +96,7 @@
         self.assertEqual(self.s, self.thetype(self.word))
         self.assertEqual(type(i), self.thetype)
         self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
-        for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
+        for C in set, frozenset, dict.fromkeys, str, str, list, tuple:
             self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
             self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
             self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
@@ -121,7 +121,7 @@
         self.assertEqual(type(i), self.thetype)
         self.assertRaises(PassThru, self.s.difference, check_pass_thru())
         self.assertRaises(TypeError, self.s.difference, [[]])
-        for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
+        for C in set, frozenset, dict.fromkeys, str, str, list, tuple:
             self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
             self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
             self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
@@ -146,7 +146,7 @@
         self.assertEqual(type(i), self.thetype)
         self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
         self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
-        for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
+        for C in set, frozenset, dict.fromkeys, str, str, list, tuple:
             self.assertEqual(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
             self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
             self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
@@ -390,7 +390,7 @@
         self.assertRaises(PassThru, self.s.update, check_pass_thru())
         self.assertRaises(TypeError, self.s.update, [[]])
         for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
-            for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
+            for C in set, frozenset, dict.fromkeys, str, str, list, tuple:
                 s = self.thetype('abcba')
                 self.assertEqual(s.update(C(p)), None)
                 self.assertEqual(s, set(q))
@@ -411,7 +411,7 @@
         self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
         self.assertRaises(TypeError, self.s.intersection_update, [[]])
         for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
-            for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
+            for C in set, frozenset, dict.fromkeys, str, str, list, tuple:
                 s = self.thetype('abcba')
                 self.assertEqual(s.intersection_update(C(p)), None)
                 self.assertEqual(s, set(q))
@@ -436,7 +436,7 @@
         self.assertRaises(TypeError, self.s.difference_update, [[]])
         self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
         for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
-            for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
+            for C in set, frozenset, dict.fromkeys, str, str, list, tuple:
                 s = self.thetype('abcba')
                 self.assertEqual(s.difference_update(C(p)), None)
                 self.assertEqual(s, set(q))
@@ -460,7 +460,7 @@
         self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
         self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
         for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
-            for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
+            for C in set, frozenset, dict.fromkeys, str, str, list, tuple:
                 s = self.thetype('abcba')
                 self.assertEqual(s.symmetric_difference_update(C(p)), None)
                 self.assertEqual(s, set(q))
diff --git a/Lib/test/test_startfile.py b/Lib/test/test_startfile.py
index c4d12d7..8d80e7a 100644
--- a/Lib/test/test_startfile.py
+++ b/Lib/test/test_startfile.py
@@ -18,7 +18,7 @@
         self.assertRaises(OSError, startfile, "nonexisting.vbs")
 
     def test_nonexisting_u(self):
-        self.assertRaises(OSError, startfile, u"nonexisting.vbs")
+        self.assertRaises(OSError, startfile, "nonexisting.vbs")
 
     def test_empty(self):
         empty = path.join(path.dirname(__file__), "empty.vbs")
@@ -27,8 +27,8 @@
 
     def test_empty_u(self):
         empty = path.join(path.dirname(__file__), "empty.vbs")
-        startfile(unicode(empty, "mbcs"))
-        startfile(unicode(empty, "mbcs"), "open")
+        startfile(str(empty, "mbcs"))
+        startfile(str(empty, "mbcs"), "open")
 
 def test_main():
     test_support.run_unittest(TestCase)
diff --git a/Lib/test/test_str.py b/Lib/test/test_str.py
index 0067bdb..6869941 100644
--- a/Lib/test/test_str.py
+++ b/Lib/test/test_str.py
@@ -31,7 +31,7 @@
         # Make sure __str__() behaves properly
         class Foo0:
             def __unicode__(self):
-                return u"foo"
+                return "foo"
 
         class Foo1:
             def __str__(self):
@@ -43,28 +43,28 @@
 
         class Foo3(object):
             def __str__(self):
-                return u"foo"
+                return "foo"
 
-        class Foo4(unicode):
+        class Foo4(str):
             def __str__(self):
-                return u"foo"
+                return "foo"
 
         class Foo5(str):
             def __str__(self):
-                return u"foo"
+                return "foo"
 
         class Foo6(str):
             def __str__(self):
                 return "foos"
 
             def __unicode__(self):
-                return u"foou"
+                return "foou"
 
-        class Foo7(unicode):
+        class Foo7(str):
             def __str__(self):
                 return "foos"
             def __unicode__(self):
-                return u"foou"
+                return "foou"
 
         class Foo8(str):
             def __new__(cls, content=""):
@@ -88,7 +88,7 @@
         self.assertEqual(str(Foo7("bar")), "foos")
         self.assertEqual(str(Foo8("foo")), "foofoo")
         self.assertEqual(str(Foo9("foo")), "string")
-        self.assertEqual(unicode(Foo9("foo")), u"not unicode")
+        self.assertEqual(str(Foo9("foo")), "not unicode")
 
 def test_main():
     test_support.run_unittest(StrTest)
diff --git a/Lib/test/test_stringprep.py b/Lib/test/test_stringprep.py
index 60425dd..0452790 100644
--- a/Lib/test/test_stringprep.py
+++ b/Lib/test/test_stringprep.py
@@ -8,66 +8,66 @@
 
 class StringprepTests(unittest.TestCase):
     def test(self):
-        self.failUnless(in_table_a1(u"\u0221"))
-        self.failIf(in_table_a1(u"\u0222"))
+        self.failUnless(in_table_a1("\u0221"))
+        self.failIf(in_table_a1("\u0222"))
 
-        self.failUnless(in_table_b1(u"\u00ad"))
-        self.failIf(in_table_b1(u"\u00ae"))
+        self.failUnless(in_table_b1("\u00ad"))
+        self.failIf(in_table_b1("\u00ae"))
 
-        self.failUnless(map_table_b2(u"\u0041"), u"\u0061")
-        self.failUnless(map_table_b2(u"\u0061"), u"\u0061")
+        self.failUnless(map_table_b2("\u0041"), "\u0061")
+        self.failUnless(map_table_b2("\u0061"), "\u0061")
 
-        self.failUnless(map_table_b3(u"\u0041"), u"\u0061")
-        self.failUnless(map_table_b3(u"\u0061"), u"\u0061")
+        self.failUnless(map_table_b3("\u0041"), "\u0061")
+        self.failUnless(map_table_b3("\u0061"), "\u0061")
 
-        self.failUnless(in_table_c11(u"\u0020"))
-        self.failIf(in_table_c11(u"\u0021"))
+        self.failUnless(in_table_c11("\u0020"))
+        self.failIf(in_table_c11("\u0021"))
 
-        self.failUnless(in_table_c12(u"\u00a0"))
-        self.failIf(in_table_c12(u"\u00a1"))
+        self.failUnless(in_table_c12("\u00a0"))
+        self.failIf(in_table_c12("\u00a1"))
 
-        self.failUnless(in_table_c12(u"\u00a0"))
-        self.failIf(in_table_c12(u"\u00a1"))
+        self.failUnless(in_table_c12("\u00a0"))
+        self.failIf(in_table_c12("\u00a1"))
 
-        self.failUnless(in_table_c11_c12(u"\u00a0"))
-        self.failIf(in_table_c11_c12(u"\u00a1"))
+        self.failUnless(in_table_c11_c12("\u00a0"))
+        self.failIf(in_table_c11_c12("\u00a1"))
 
-        self.failUnless(in_table_c21(u"\u001f"))
-        self.failIf(in_table_c21(u"\u0020"))
+        self.failUnless(in_table_c21("\u001f"))
+        self.failIf(in_table_c21("\u0020"))
 
-        self.failUnless(in_table_c22(u"\u009f"))
-        self.failIf(in_table_c22(u"\u00a0"))
+        self.failUnless(in_table_c22("\u009f"))
+        self.failIf(in_table_c22("\u00a0"))
 
-        self.failUnless(in_table_c21_c22(u"\u009f"))
-        self.failIf(in_table_c21_c22(u"\u00a0"))
+        self.failUnless(in_table_c21_c22("\u009f"))
+        self.failIf(in_table_c21_c22("\u00a0"))
 
-        self.failUnless(in_table_c3(u"\ue000"))
-        self.failIf(in_table_c3(u"\uf900"))
+        self.failUnless(in_table_c3("\ue000"))
+        self.failIf(in_table_c3("\uf900"))
 
-        self.failUnless(in_table_c4(u"\uffff"))
-        self.failIf(in_table_c4(u"\u0000"))
+        self.failUnless(in_table_c4("\uffff"))
+        self.failIf(in_table_c4("\u0000"))
 
-        self.failUnless(in_table_c5(u"\ud800"))
-        self.failIf(in_table_c5(u"\ud7ff"))
+        self.failUnless(in_table_c5("\ud800"))
+        self.failIf(in_table_c5("\ud7ff"))
 
-        self.failUnless(in_table_c6(u"\ufff9"))
-        self.failIf(in_table_c6(u"\ufffe"))
+        self.failUnless(in_table_c6("\ufff9"))
+        self.failIf(in_table_c6("\ufffe"))
 
-        self.failUnless(in_table_c7(u"\u2ff0"))
-        self.failIf(in_table_c7(u"\u2ffc"))
+        self.failUnless(in_table_c7("\u2ff0"))
+        self.failIf(in_table_c7("\u2ffc"))
 
-        self.failUnless(in_table_c8(u"\u0340"))
-        self.failIf(in_table_c8(u"\u0342"))
+        self.failUnless(in_table_c8("\u0340"))
+        self.failIf(in_table_c8("\u0342"))
 
         # C.9 is not in the bmp
         # self.failUnless(in_table_c9(u"\U000E0001"))
         # self.failIf(in_table_c8(u"\U000E0002"))
 
-        self.failUnless(in_table_d1(u"\u05be"))
-        self.failIf(in_table_d1(u"\u05bf"))
+        self.failUnless(in_table_d1("\u05be"))
+        self.failIf(in_table_d1("\u05bf"))
 
-        self.failUnless(in_table_d2(u"\u0041"))
-        self.failIf(in_table_d2(u"\u0040"))
+        self.failUnless(in_table_d2("\u0041"))
+        self.failIf(in_table_d2("\u0040"))
 
         # This would generate a hash of all predicates. However, running
         # it is quite expensive, and only serves to detect changes in the
diff --git a/Lib/test/test_support.py b/Lib/test/test_support.py
index 1ff0e4d..29a3852 100644
--- a/Lib/test/test_support.py
+++ b/Lib/test/test_support.py
@@ -131,7 +131,7 @@
     return (x > y) - (x < y)
 
 try:
-    unicode
+    str
     have_unicode = True
 except NameError:
     have_unicode = False
@@ -151,13 +151,13 @@
         # Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
         # TESTFN_UNICODE is a filename that can be encoded using the
         # file system encoding, but *not* with the default (ascii) encoding
-        if isinstance('', unicode):
+        if isinstance('', str):
             # python -U
             # XXX perhaps unicode() should accept Unicode strings?
             TESTFN_UNICODE = "@test-\xe0\xf2"
         else:
             # 2 latin characters.
-            TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
+            TESTFN_UNICODE = str("@test-\xe0\xf2", "latin-1")
         TESTFN_ENCODING = sys.getfilesystemencoding()
         # TESTFN_UNICODE_UNENCODEABLE is a filename that should *not* be
         # able to be encoded by *either* the default or filesystem encoding.
diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py
index ac7dca3..08c7a88 100644
--- a/Lib/test/test_tarfile.py
+++ b/Lib/test/test_tarfile.py
@@ -711,7 +711,7 @@
 
     def _test_unicode_filename(self, encoding):
         tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT)
-        name = u"\u20ac".encode(encoding) # Euro sign
+        name = "\u20ac".encode(encoding) # Euro sign
         tar.encoding = encoding
         tar.addfile(tarfile.TarInfo(name))
         tar.close()
@@ -723,7 +723,7 @@
     def test_unicode_filename_error(self):
         # The euro sign filename cannot be translated to iso8859-1 encoding.
         tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT, encoding="utf8")
-        name = u"\u20ac".encode("utf8") # Euro sign
+        name = "\u20ac".encode("utf8") # Euro sign
         tar.addfile(tarfile.TarInfo(name))
         tar.close()
 
@@ -732,13 +732,13 @@
     def test_pax_headers(self):
         self._test_pax_headers({"foo": "bar", "uid": 0, "mtime": 1.23})
 
-        self._test_pax_headers({"euro": u"\u20ac".encode("utf8")})
+        self._test_pax_headers({"euro": "\u20ac".encode("utf8")})
 
-        self._test_pax_headers({"euro": u"\u20ac"},
-                               {"euro": u"\u20ac".encode("utf8")})
+        self._test_pax_headers({"euro": "\u20ac"},
+                               {"euro": "\u20ac".encode("utf8")})
 
-        self._test_pax_headers({u"\u20ac": "euro"},
-                               {u"\u20ac".encode("utf8"): "euro"})
+        self._test_pax_headers({"\u20ac": "euro"},
+                               {"\u20ac".encode("utf8"): "euro"})
 
     def _test_pax_headers(self, pax_headers, cmp_headers=None):
         if cmp_headers is None:
diff --git a/Lib/test/test_textwrap.py b/Lib/test/test_textwrap.py
index 5f0b51b..472d125 100644
--- a/Lib/test/test_textwrap.py
+++ b/Lib/test/test_textwrap.py
@@ -341,13 +341,13 @@
             # *Very* simple test of wrapping Unicode strings.  I'm sure
             # there's more to it than this, but let's at least make
             # sure textwrap doesn't crash on Unicode input!
-            text = u"Hello there, how are you today?"
-            self.check_wrap(text, 50, [u"Hello there, how are you today?"])
-            self.check_wrap(text, 20, [u"Hello there, how are", "you today?"])
+            text = "Hello there, how are you today?"
+            self.check_wrap(text, 50, ["Hello there, how are you today?"])
+            self.check_wrap(text, 20, ["Hello there, how are", "you today?"])
             olines = self.wrapper.wrap(text)
-            assert isinstance(olines, list) and isinstance(olines[0], unicode)
+            assert isinstance(olines, list) and isinstance(olines[0], str)
             otext = self.wrapper.fill(text)
-            assert isinstance(otext, unicode)
+            assert isinstance(otext, str)
 
     def test_split(self):
         # Ensure that the standard _split() method works as advertised
diff --git a/Lib/test/test_timeout.py b/Lib/test/test_timeout.py
index 94765d9..803ea9f 100644
--- a/Lib/test/test_timeout.py
+++ b/Lib/test/test_timeout.py
@@ -50,7 +50,7 @@
         self.sock.settimeout(0.0)
         self.sock.settimeout(None)
         self.assertRaises(TypeError, self.sock.settimeout, "")
-        self.assertRaises(TypeError, self.sock.settimeout, u"")
+        self.assertRaises(TypeError, self.sock.settimeout, "")
         self.assertRaises(TypeError, self.sock.settimeout, ())
         self.assertRaises(TypeError, self.sock.settimeout, [])
         self.assertRaises(TypeError, self.sock.settimeout, {})
diff --git a/Lib/test/test_types.py b/Lib/test/test_types.py
index f5970ba..9441181 100644
--- a/Lib/test/test_types.py
+++ b/Lib/test/test_types.py
@@ -200,16 +200,16 @@
         self.assertEqual(a[-100:100:2], '02468')
 
         if have_unicode:
-            a = unicode('0123456789', 'ascii')
+            a = str('0123456789', 'ascii')
             self.assertEqual(a[::], a)
-            self.assertEqual(a[::2], unicode('02468', 'ascii'))
-            self.assertEqual(a[1::2], unicode('13579', 'ascii'))
-            self.assertEqual(a[::-1], unicode('9876543210', 'ascii'))
-            self.assertEqual(a[::-2], unicode('97531', 'ascii'))
-            self.assertEqual(a[3::-2], unicode('31', 'ascii'))
+            self.assertEqual(a[::2], str('02468', 'ascii'))
+            self.assertEqual(a[1::2], str('13579', 'ascii'))
+            self.assertEqual(a[::-1], str('9876543210', 'ascii'))
+            self.assertEqual(a[::-2], str('97531', 'ascii'))
+            self.assertEqual(a[3::-2], str('31', 'ascii'))
             self.assertEqual(a[-100:100:], a)
             self.assertEqual(a[100:-100:-1], a[::-1])
-            self.assertEqual(a[-100:100:2], unicode('02468', 'ascii'))
+            self.assertEqual(a[-100:100:2], str('02468', 'ascii'))
 
 
     def test_type_function(self):
diff --git a/Lib/test/test_ucn.py b/Lib/test/test_ucn.py
index 775044b..4472e90 100644
--- a/Lib/test/test_ucn.py
+++ b/Lib/test/test_ucn.py
@@ -17,7 +17,7 @@
         # Helper that put all \N escapes inside eval'd raw strings,
         # to make sure this script runs even if the compiler
         # chokes on \N escapes
-        res = eval(ur'u"\N{%s}"' % name)
+        res = eval(r'u"\N{%s}"' % name)
         self.assertEqual(res, code)
         return res
 
@@ -51,10 +51,10 @@
             "LATIN SMALL LETTER P",
             "FULL STOP"
         ]
-        string = u"The rEd fOx ate the sheep."
+        string = "The rEd fOx ate the sheep."
 
         self.assertEqual(
-            u"".join([self.checkletter(*args) for args in zip(chars, string)]),
+            "".join([self.checkletter(*args) for args in zip(chars, string)]),
             string
         )
 
@@ -67,30 +67,30 @@
             self.assertEqual(unicodedata.name(code), name)
 
     def test_hangul_syllables(self):
-        self.checkletter("HANGUL SYLLABLE GA", u"\uac00")
-        self.checkletter("HANGUL SYLLABLE GGWEOSS", u"\uafe8")
-        self.checkletter("HANGUL SYLLABLE DOLS", u"\ub3d0")
-        self.checkletter("HANGUL SYLLABLE RYAN", u"\ub7b8")
-        self.checkletter("HANGUL SYLLABLE MWIK", u"\ubba0")
-        self.checkletter("HANGUL SYLLABLE BBWAEM", u"\ubf88")
-        self.checkletter("HANGUL SYLLABLE SSEOL", u"\uc370")
-        self.checkletter("HANGUL SYLLABLE YI", u"\uc758")
-        self.checkletter("HANGUL SYLLABLE JJYOSS", u"\ucb40")
-        self.checkletter("HANGUL SYLLABLE KYEOLS", u"\ucf28")
-        self.checkletter("HANGUL SYLLABLE PAN", u"\ud310")
-        self.checkletter("HANGUL SYLLABLE HWEOK", u"\ud6f8")
-        self.checkletter("HANGUL SYLLABLE HIH", u"\ud7a3")
+        self.checkletter("HANGUL SYLLABLE GA", "\uac00")
+        self.checkletter("HANGUL SYLLABLE GGWEOSS", "\uafe8")
+        self.checkletter("HANGUL SYLLABLE DOLS", "\ub3d0")
+        self.checkletter("HANGUL SYLLABLE RYAN", "\ub7b8")
+        self.checkletter("HANGUL SYLLABLE MWIK", "\ubba0")
+        self.checkletter("HANGUL SYLLABLE BBWAEM", "\ubf88")
+        self.checkletter("HANGUL SYLLABLE SSEOL", "\uc370")
+        self.checkletter("HANGUL SYLLABLE YI", "\uc758")
+        self.checkletter("HANGUL SYLLABLE JJYOSS", "\ucb40")
+        self.checkletter("HANGUL SYLLABLE KYEOLS", "\ucf28")
+        self.checkletter("HANGUL SYLLABLE PAN", "\ud310")
+        self.checkletter("HANGUL SYLLABLE HWEOK", "\ud6f8")
+        self.checkletter("HANGUL SYLLABLE HIH", "\ud7a3")
 
         import unicodedata
-        self.assertRaises(ValueError, unicodedata.name, u"\ud7a4")
+        self.assertRaises(ValueError, unicodedata.name, "\ud7a4")
 
     def test_cjk_unified_ideographs(self):
-        self.checkletter("CJK UNIFIED IDEOGRAPH-3400", u"\u3400")
-        self.checkletter("CJK UNIFIED IDEOGRAPH-4DB5", u"\u4db5")
-        self.checkletter("CJK UNIFIED IDEOGRAPH-4E00", u"\u4e00")
-        self.checkletter("CJK UNIFIED IDEOGRAPH-9FA5", u"\u9fa5")
-        self.checkletter("CJK UNIFIED IDEOGRAPH-20000", u"\U00020000")
-        self.checkletter("CJK UNIFIED IDEOGRAPH-2A6D6", u"\U0002a6d6")
+        self.checkletter("CJK UNIFIED IDEOGRAPH-3400", "\u3400")
+        self.checkletter("CJK UNIFIED IDEOGRAPH-4DB5", "\u4db5")
+        self.checkletter("CJK UNIFIED IDEOGRAPH-4E00", "\u4e00")
+        self.checkletter("CJK UNIFIED IDEOGRAPH-9FA5", "\u9fa5")
+        self.checkletter("CJK UNIFIED IDEOGRAPH-20000", "\U00020000")
+        self.checkletter("CJK UNIFIED IDEOGRAPH-2A6D6", "\U0002a6d6")
 
     def test_bmp_characters(self):
         import unicodedata
@@ -103,38 +103,38 @@
                 count += 1
 
     def test_misc_symbols(self):
-        self.checkletter("PILCROW SIGN", u"\u00b6")
-        self.checkletter("REPLACEMENT CHARACTER", u"\uFFFD")
-        self.checkletter("HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK", u"\uFF9F")
-        self.checkletter("FULLWIDTH LATIN SMALL LETTER A", u"\uFF41")
+        self.checkletter("PILCROW SIGN", "\u00b6")
+        self.checkletter("REPLACEMENT CHARACTER", "\uFFFD")
+        self.checkletter("HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK", "\uFF9F")
+        self.checkletter("FULLWIDTH LATIN SMALL LETTER A", "\uFF41")
 
     def test_errors(self):
         import unicodedata
         self.assertRaises(TypeError, unicodedata.name)
-        self.assertRaises(TypeError, unicodedata.name, u'xx')
+        self.assertRaises(TypeError, unicodedata.name, 'xx')
         self.assertRaises(TypeError, unicodedata.lookup)
-        self.assertRaises(KeyError, unicodedata.lookup, u'unknown')
+        self.assertRaises(KeyError, unicodedata.lookup, 'unknown')
 
     def test_strict_eror_handling(self):
         # bogus character name
         self.assertRaises(
             UnicodeError,
-            unicode, "\\N{blah}", 'unicode-escape', 'strict'
+            str, "\\N{blah}", 'unicode-escape', 'strict'
         )
         # long bogus character name
         self.assertRaises(
             UnicodeError,
-            unicode, "\\N{%s}" % ("x" * 100000), 'unicode-escape', 'strict'
+            str, "\\N{%s}" % ("x" * 100000), 'unicode-escape', 'strict'
         )
         # missing closing brace
         self.assertRaises(
             UnicodeError,
-            unicode, "\\N{SPACE", 'unicode-escape', 'strict'
+            str, "\\N{SPACE", 'unicode-escape', 'strict'
         )
         # missing opening brace
         self.assertRaises(
             UnicodeError,
-            unicode, "\\NSPACE", 'unicode-escape', 'strict'
+            str, "\\NSPACE", 'unicode-escape', 'strict'
         )
 
 def test_main():
diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py
index a704cc9..125fd56 100644
--- a/Lib/test/test_unicode.py
+++ b/Lib/test/test_unicode.py
@@ -32,7 +32,7 @@
     string_tests.MixinStrUnicodeUserStringTest,
     string_tests.MixinStrUnicodeTest,
     ):
-    type2test = unicode
+    type2test = str
 
     def checkequalnofix(self, result, object, methodname, *args):
         method = getattr(object, methodname)
@@ -43,9 +43,9 @@
         # if the original is returned make sure that
         # this doesn't happen with subclasses
         if realresult is object:
-            class usub(unicode):
+            class usub(str):
                 def __repr__(self):
-                    return 'usub(%r)' % unicode.__repr__(self)
+                    return 'usub(%r)' % str.__repr__(self)
             object = usub(object)
             method = getattr(object, methodname)
             realresult = method(*args)
@@ -53,8 +53,8 @@
             self.assert_(object is not realresult)
 
     def test_literals(self):
-        self.assertEqual(u'\xff', u'\u00ff')
-        self.assertEqual(u'\uffff', u'\U0000ffff')
+        self.assertEqual('\xff', '\u00ff')
+        self.assertEqual('\uffff', '\U0000ffff')
         self.assertRaises(UnicodeError, eval, 'u\'\\Ufffffffe\'')
         self.assertRaises(UnicodeError, eval, 'u\'\\Uffffffff\'')
         self.assertRaises(UnicodeError, eval, 'u\'\\U%08x\'' % 0x110000)
@@ -62,19 +62,19 @@
     def test_repr(self):
         if not sys.platform.startswith('java'):
             # Test basic sanity of repr()
-            self.assertEqual(repr(u'abc'), "u'abc'")
-            self.assertEqual(repr(u'ab\\c'), "u'ab\\\\c'")
-            self.assertEqual(repr(u'ab\\'), "u'ab\\\\'")
-            self.assertEqual(repr(u'\\c'), "u'\\\\c'")
-            self.assertEqual(repr(u'\\'), "u'\\\\'")
-            self.assertEqual(repr(u'\n'), "u'\\n'")
-            self.assertEqual(repr(u'\r'), "u'\\r'")
-            self.assertEqual(repr(u'\t'), "u'\\t'")
-            self.assertEqual(repr(u'\b'), "u'\\x08'")
-            self.assertEqual(repr(u"'\""), """u'\\'"'""")
-            self.assertEqual(repr(u"'\""), """u'\\'"'""")
-            self.assertEqual(repr(u"'"), '''u"'"''')
-            self.assertEqual(repr(u'"'), """u'"'""")
+            self.assertEqual(repr('abc'), "u'abc'")
+            self.assertEqual(repr('ab\\c'), "u'ab\\\\c'")
+            self.assertEqual(repr('ab\\'), "u'ab\\\\'")
+            self.assertEqual(repr('\\c'), "u'\\\\c'")
+            self.assertEqual(repr('\\'), "u'\\\\'")
+            self.assertEqual(repr('\n'), "u'\\n'")
+            self.assertEqual(repr('\r'), "u'\\r'")
+            self.assertEqual(repr('\t'), "u'\\t'")
+            self.assertEqual(repr('\b'), "u'\\x08'")
+            self.assertEqual(repr("'\""), """u'\\'"'""")
+            self.assertEqual(repr("'\""), """u'\\'"'""")
+            self.assertEqual(repr("'"), '''u"'"''')
+            self.assertEqual(repr('"'), """u'"'""")
             latin1repr = (
                 "u'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
                 "\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
@@ -90,52 +90,52 @@
                 "\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
                 "\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
                 "\\xfe\\xff'")
-            testrepr = repr(u''.join(map(unichr, xrange(256))))
+            testrepr = repr(''.join(map(unichr, xrange(256))))
             self.assertEqual(testrepr, latin1repr)
             # Test repr works on wide unicode escapes without overflow.
-            self.assertEqual(repr(u"\U00010000" * 39 + u"\uffff" * 4096),
-                             repr(u"\U00010000" * 39 + u"\uffff" * 4096))
+            self.assertEqual(repr("\U00010000" * 39 + "\uffff" * 4096),
+                             repr("\U00010000" * 39 + "\uffff" * 4096))
 
     def test_iterators(self):
         # Make sure unicode objects have an __iter__ method
-        it = u"\u1111\u2222\u3333".__iter__()
-        self.assertEqual(next(it), u"\u1111")
-        self.assertEqual(next(it), u"\u2222")
-        self.assertEqual(next(it), u"\u3333")
+        it = "\u1111\u2222\u3333".__iter__()
+        self.assertEqual(next(it), "\u1111")
+        self.assertEqual(next(it), "\u2222")
+        self.assertEqual(next(it), "\u3333")
         self.assertRaises(StopIteration, next, it)
 
     def test_count(self):
         string_tests.CommonTest.test_count(self)
         # check mixed argument types
-        self.checkequalnofix(3,  'aaa', 'count', u'a')
-        self.checkequalnofix(0,  'aaa', 'count', u'b')
-        self.checkequalnofix(3, u'aaa', 'count',  'a')
-        self.checkequalnofix(0, u'aaa', 'count',  'b')
-        self.checkequalnofix(0, u'aaa', 'count',  'b')
-        self.checkequalnofix(1, u'aaa', 'count',  'a', -1)
-        self.checkequalnofix(3, u'aaa', 'count',  'a', -10)
-        self.checkequalnofix(2, u'aaa', 'count',  'a', 0, -1)
-        self.checkequalnofix(0, u'aaa', 'count',  'a', 0, -10)
+        self.checkequalnofix(3,  'aaa', 'count', 'a')
+        self.checkequalnofix(0,  'aaa', 'count', 'b')
+        self.checkequalnofix(3, 'aaa', 'count',  'a')
+        self.checkequalnofix(0, 'aaa', 'count',  'b')
+        self.checkequalnofix(0, 'aaa', 'count',  'b')
+        self.checkequalnofix(1, 'aaa', 'count',  'a', -1)
+        self.checkequalnofix(3, 'aaa', 'count',  'a', -10)
+        self.checkequalnofix(2, 'aaa', 'count',  'a', 0, -1)
+        self.checkequalnofix(0, 'aaa', 'count',  'a', 0, -10)
 
     def test_find(self):
-        self.checkequalnofix(0,  u'abcdefghiabc', 'find', u'abc')
-        self.checkequalnofix(9,  u'abcdefghiabc', 'find', u'abc', 1)
-        self.checkequalnofix(-1, u'abcdefghiabc', 'find', u'def', 4)
+        self.checkequalnofix(0,  'abcdefghiabc', 'find', 'abc')
+        self.checkequalnofix(9,  'abcdefghiabc', 'find', 'abc', 1)
+        self.checkequalnofix(-1, 'abcdefghiabc', 'find', 'def', 4)
 
-        self.assertRaises(TypeError, u'hello'.find)
-        self.assertRaises(TypeError, u'hello'.find, 42)
+        self.assertRaises(TypeError, 'hello'.find)
+        self.assertRaises(TypeError, 'hello'.find, 42)
 
     def test_rfind(self):
         string_tests.CommonTest.test_rfind(self)
         # check mixed argument types
-        self.checkequalnofix(9,   'abcdefghiabc', 'rfind', u'abc')
-        self.checkequalnofix(12,  'abcdefghiabc', 'rfind', u'')
-        self.checkequalnofix(12, u'abcdefghiabc', 'rfind',  '')
+        self.checkequalnofix(9,   'abcdefghiabc', 'rfind', 'abc')
+        self.checkequalnofix(12,  'abcdefghiabc', 'rfind', '')
+        self.checkequalnofix(12, 'abcdefghiabc', 'rfind',  '')
 
     def test_index(self):
         string_tests.CommonTest.test_index(self)
         # check mixed argument types
-        for (t1, t2) in ((str, unicode), (unicode, str)):
+        for (t1, t2) in ((str, str), (str, str)):
             self.checkequalnofix(0, t1('abcdefghiabc'), 'index',  t2(''))
             self.checkequalnofix(3, t1('abcdefghiabc'), 'index',  t2('def'))
             self.checkequalnofix(0, t1('abcdefghiabc'), 'index',  t2('abc'))
@@ -148,7 +148,7 @@
     def test_rindex(self):
         string_tests.CommonTest.test_rindex(self)
         # check mixed argument types
-        for (t1, t2) in ((str, unicode), (unicode, str)):
+        for (t1, t2) in ((str, str), (str, str)):
             self.checkequalnofix(12, t1('abcdefghiabc'), 'rindex',  t2(''))
             self.checkequalnofix(3,  t1('abcdefghiabc'), 'rindex',  t2('def'))
             self.checkequalnofix(9,  t1('abcdefghiabc'), 'rindex',  t2('abc'))
@@ -161,291 +161,291 @@
             self.assertRaises(ValueError, t1('abcdefghi').rindex,  t2('ghi'), 0, -1)
 
     def test_translate(self):
-        self.checkequalnofix(u'bbbc', u'abababc', 'translate', {ord('a'):None})
-        self.checkequalnofix(u'iiic', u'abababc', 'translate', {ord('a'):None, ord('b'):ord('i')})
-        self.checkequalnofix(u'iiix', u'abababc', 'translate', {ord('a'):None, ord('b'):ord('i'), ord('c'):u'x'})
-        self.checkequalnofix(u'<i><i><i>c', u'abababc', 'translate', {ord('a'):None, ord('b'):u'<i>'})
-        self.checkequalnofix(u'c', u'abababc', 'translate', {ord('a'):None, ord('b'):u''})
-        self.checkequalnofix(u'xyyx', u'xzx', 'translate', {ord('z'):u'yy'})
+        self.checkequalnofix('bbbc', 'abababc', 'translate', {ord('a'):None})
+        self.checkequalnofix('iiic', 'abababc', 'translate', {ord('a'):None, ord('b'):ord('i')})
+        self.checkequalnofix('iiix', 'abababc', 'translate', {ord('a'):None, ord('b'):ord('i'), ord('c'):'x'})
+        self.checkequalnofix('<i><i><i>c', 'abababc', 'translate', {ord('a'):None, ord('b'):'<i>'})
+        self.checkequalnofix('c', 'abababc', 'translate', {ord('a'):None, ord('b'):''})
+        self.checkequalnofix('xyyx', 'xzx', 'translate', {ord('z'):'yy'})
 
-        self.assertRaises(TypeError, u'hello'.translate)
-        self.assertRaises(TypeError, u'abababc'.translate, {ord('a'):''})
+        self.assertRaises(TypeError, 'hello'.translate)
+        self.assertRaises(TypeError, 'abababc'.translate, {ord('a'):''})
 
     def test_split(self):
         string_tests.CommonTest.test_split(self)
 
         # Mixed arguments
-        self.checkequalnofix([u'a', u'b', u'c', u'd'], u'a//b//c//d', 'split', '//')
-        self.checkequalnofix([u'a', u'b', u'c', u'd'], 'a//b//c//d', 'split', u'//')
-        self.checkequalnofix([u'endcase ', u''], u'endcase test', 'split', 'test')
+        self.checkequalnofix(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
+        self.checkequalnofix(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
+        self.checkequalnofix(['endcase ', ''], 'endcase test', 'split', 'test')
 
     def test_join(self):
         string_tests.MixinStrUnicodeUserStringTest.test_join(self)
 
         # mixed arguments
-        self.checkequalnofix(u'a b c d', u' ', 'join', ['a', 'b', u'c', u'd'])
-        self.checkequalnofix(u'abcd', u'', 'join', (u'a', u'b', u'c', u'd'))
-        self.checkequalnofix(u'w x y z', u' ', 'join', string_tests.Sequence('wxyz'))
-        self.checkequalnofix(u'a b c d', ' ', 'join', [u'a', u'b', u'c', u'd'])
-        self.checkequalnofix(u'a b c d', ' ', 'join', ['a', 'b', u'c', u'd'])
-        self.checkequalnofix(u'abcd', '', 'join', (u'a', u'b', u'c', u'd'))
-        self.checkequalnofix(u'w x y z', ' ', 'join', string_tests.Sequence(u'wxyz'))
+        self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
+        self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
+        self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
+        self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
+        self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
+        self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
+        self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
 
     def test_strip(self):
         string_tests.CommonTest.test_strip(self)
-        self.assertRaises(UnicodeError, u"hello".strip, "\xff")
+        self.assertRaises(UnicodeError, "hello".strip, "\xff")
 
     def test_replace(self):
         string_tests.CommonTest.test_replace(self)
 
         # method call forwarded from str implementation because of unicode argument
-        self.checkequalnofix(u'one@two!three!', 'one!two!three!', 'replace', u'!', u'@', 1)
-        self.assertRaises(TypeError, 'replace'.replace, u"r", 42)
+        self.checkequalnofix('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
+        self.assertRaises(TypeError, 'replace'.replace, "r", 42)
 
     def test_comparison(self):
         # Comparisons:
-        self.assertEqual(u'abc', 'abc')
-        self.assertEqual('abc', u'abc')
-        self.assertEqual(u'abc', u'abc')
-        self.assert_(u'abcd' > 'abc')
-        self.assert_('abcd' > u'abc')
-        self.assert_(u'abcd' > u'abc')
-        self.assert_(u'abc' < 'abcd')
-        self.assert_('abc' < u'abcd')
-        self.assert_(u'abc' < u'abcd')
+        self.assertEqual('abc', 'abc')
+        self.assertEqual('abc', 'abc')
+        self.assertEqual('abc', 'abc')
+        self.assert_('abcd' > 'abc')
+        self.assert_('abcd' > 'abc')
+        self.assert_('abcd' > 'abc')
+        self.assert_('abc' < 'abcd')
+        self.assert_('abc' < 'abcd')
+        self.assert_('abc' < 'abcd')
 
         if 0:
             # Move these tests to a Unicode collation module test...
             # Testing UTF-16 code point order comparisons...
 
             # No surrogates, no fixup required.
-            self.assert_(u'\u0061' < u'\u20ac')
+            self.assert_('\u0061' < '\u20ac')
             # Non surrogate below surrogate value, no fixup required
-            self.assert_(u'\u0061' < u'\ud800\udc02')
+            self.assert_('\u0061' < '\ud800\udc02')
 
             # Non surrogate above surrogate value, fixup required
             def test_lecmp(s, s2):
                 self.assert_(s < s2)
 
             def test_fixup(s):
-                s2 = u'\ud800\udc01'
+                s2 = '\ud800\udc01'
                 test_lecmp(s, s2)
-                s2 = u'\ud900\udc01'
+                s2 = '\ud900\udc01'
                 test_lecmp(s, s2)
-                s2 = u'\uda00\udc01'
+                s2 = '\uda00\udc01'
                 test_lecmp(s, s2)
-                s2 = u'\udb00\udc01'
+                s2 = '\udb00\udc01'
                 test_lecmp(s, s2)
-                s2 = u'\ud800\udd01'
+                s2 = '\ud800\udd01'
                 test_lecmp(s, s2)
-                s2 = u'\ud900\udd01'
+                s2 = '\ud900\udd01'
                 test_lecmp(s, s2)
-                s2 = u'\uda00\udd01'
+                s2 = '\uda00\udd01'
                 test_lecmp(s, s2)
-                s2 = u'\udb00\udd01'
+                s2 = '\udb00\udd01'
                 test_lecmp(s, s2)
-                s2 = u'\ud800\ude01'
+                s2 = '\ud800\ude01'
                 test_lecmp(s, s2)
-                s2 = u'\ud900\ude01'
+                s2 = '\ud900\ude01'
                 test_lecmp(s, s2)
-                s2 = u'\uda00\ude01'
+                s2 = '\uda00\ude01'
                 test_lecmp(s, s2)
-                s2 = u'\udb00\ude01'
+                s2 = '\udb00\ude01'
                 test_lecmp(s, s2)
-                s2 = u'\ud800\udfff'
+                s2 = '\ud800\udfff'
                 test_lecmp(s, s2)
-                s2 = u'\ud900\udfff'
+                s2 = '\ud900\udfff'
                 test_lecmp(s, s2)
-                s2 = u'\uda00\udfff'
+                s2 = '\uda00\udfff'
                 test_lecmp(s, s2)
-                s2 = u'\udb00\udfff'
+                s2 = '\udb00\udfff'
                 test_lecmp(s, s2)
 
-                test_fixup(u'\ue000')
-                test_fixup(u'\uff61')
+                test_fixup('\ue000')
+                test_fixup('\uff61')
 
         # Surrogates on both sides, no fixup required
-        self.assert_(u'\ud800\udc02' < u'\ud84d\udc56')
+        self.assert_('\ud800\udc02' < '\ud84d\udc56')
 
     def test_islower(self):
         string_tests.MixinStrUnicodeUserStringTest.test_islower(self)
-        self.checkequalnofix(False, u'\u1FFc', 'islower')
+        self.checkequalnofix(False, '\u1FFc', 'islower')
 
     def test_isupper(self):
         string_tests.MixinStrUnicodeUserStringTest.test_isupper(self)
         if not sys.platform.startswith('java'):
-            self.checkequalnofix(False, u'\u1FFc', 'isupper')
+            self.checkequalnofix(False, '\u1FFc', 'isupper')
 
     def test_istitle(self):
         string_tests.MixinStrUnicodeUserStringTest.test_title(self)
-        self.checkequalnofix(True, u'\u1FFc', 'istitle')
-        self.checkequalnofix(True, u'Greek \u1FFcitlecases ...', 'istitle')
+        self.checkequalnofix(True, '\u1FFc', 'istitle')
+        self.checkequalnofix(True, 'Greek \u1FFcitlecases ...', 'istitle')
 
     def test_isspace(self):
         string_tests.MixinStrUnicodeUserStringTest.test_isspace(self)
-        self.checkequalnofix(True, u'\u2000', 'isspace')
-        self.checkequalnofix(True, u'\u200a', 'isspace')
-        self.checkequalnofix(False, u'\u2014', 'isspace')
+        self.checkequalnofix(True, '\u2000', 'isspace')
+        self.checkequalnofix(True, '\u200a', 'isspace')
+        self.checkequalnofix(False, '\u2014', 'isspace')
 
     def test_isalpha(self):
         string_tests.MixinStrUnicodeUserStringTest.test_isalpha(self)
-        self.checkequalnofix(True, u'\u1FFc', 'isalpha')
+        self.checkequalnofix(True, '\u1FFc', 'isalpha')
 
     def test_isdecimal(self):
-        self.checkequalnofix(False, u'', 'isdecimal')
-        self.checkequalnofix(False, u'a', 'isdecimal')
-        self.checkequalnofix(True, u'0', 'isdecimal')
-        self.checkequalnofix(False, u'\u2460', 'isdecimal') # CIRCLED DIGIT ONE
-        self.checkequalnofix(False, u'\xbc', 'isdecimal') # VULGAR FRACTION ONE QUARTER
-        self.checkequalnofix(True, u'\u0660', 'isdecimal') # ARABIC-INDIC DIGIT ZERO
-        self.checkequalnofix(True, u'0123456789', 'isdecimal')
-        self.checkequalnofix(False, u'0123456789a', 'isdecimal')
+        self.checkequalnofix(False, '', 'isdecimal')
+        self.checkequalnofix(False, 'a', 'isdecimal')
+        self.checkequalnofix(True, '0', 'isdecimal')
+        self.checkequalnofix(False, '\u2460', 'isdecimal') # CIRCLED DIGIT ONE
+        self.checkequalnofix(False, '\xbc', 'isdecimal') # VULGAR FRACTION ONE QUARTER
+        self.checkequalnofix(True, '\u0660', 'isdecimal') # ARABIC-INDIC DIGIT ZERO
+        self.checkequalnofix(True, '0123456789', 'isdecimal')
+        self.checkequalnofix(False, '0123456789a', 'isdecimal')
 
         self.checkraises(TypeError, 'abc', 'isdecimal', 42)
 
     def test_isdigit(self):
         string_tests.MixinStrUnicodeUserStringTest.test_isdigit(self)
-        self.checkequalnofix(True, u'\u2460', 'isdigit')
-        self.checkequalnofix(False, u'\xbc', 'isdigit')
-        self.checkequalnofix(True, u'\u0660', 'isdigit')
+        self.checkequalnofix(True, '\u2460', 'isdigit')
+        self.checkequalnofix(False, '\xbc', 'isdigit')
+        self.checkequalnofix(True, '\u0660', 'isdigit')
 
     def test_isnumeric(self):
-        self.checkequalnofix(False, u'', 'isnumeric')
-        self.checkequalnofix(False, u'a', 'isnumeric')
-        self.checkequalnofix(True, u'0', 'isnumeric')
-        self.checkequalnofix(True, u'\u2460', 'isnumeric')
-        self.checkequalnofix(True, u'\xbc', 'isnumeric')
-        self.checkequalnofix(True, u'\u0660', 'isnumeric')
-        self.checkequalnofix(True, u'0123456789', 'isnumeric')
-        self.checkequalnofix(False, u'0123456789a', 'isnumeric')
+        self.checkequalnofix(False, '', 'isnumeric')
+        self.checkequalnofix(False, 'a', 'isnumeric')
+        self.checkequalnofix(True, '0', 'isnumeric')
+        self.checkequalnofix(True, '\u2460', 'isnumeric')
+        self.checkequalnofix(True, '\xbc', 'isnumeric')
+        self.checkequalnofix(True, '\u0660', 'isnumeric')
+        self.checkequalnofix(True, '0123456789', 'isnumeric')
+        self.checkequalnofix(False, '0123456789a', 'isnumeric')
 
-        self.assertRaises(TypeError, u"abc".isnumeric, 42)
+        self.assertRaises(TypeError, "abc".isnumeric, 42)
 
     def test_contains(self):
         # Testing Unicode contains method
-        self.assert_('a' in u'abdb')
-        self.assert_('a' in u'bdab')
-        self.assert_('a' in u'bdaba')
-        self.assert_('a' in u'bdba')
-        self.assert_('a' in u'bdba')
-        self.assert_(u'a' in u'bdba')
-        self.assert_(u'a' not in u'bdb')
-        self.assert_(u'a' not in 'bdb')
-        self.assert_(u'a' in 'bdba')
-        self.assert_(u'a' in ('a',1,None))
-        self.assert_(u'a' in (1,None,'a'))
-        self.assert_(u'a' in (1,None,u'a'))
+        self.assert_('a' in 'abdb')
+        self.assert_('a' in 'bdab')
+        self.assert_('a' in 'bdaba')
+        self.assert_('a' in 'bdba')
+        self.assert_('a' in 'bdba')
+        self.assert_('a' in 'bdba')
+        self.assert_('a' not in 'bdb')
+        self.assert_('a' not in 'bdb')
+        self.assert_('a' in 'bdba')
         self.assert_('a' in ('a',1,None))
         self.assert_('a' in (1,None,'a'))
-        self.assert_('a' in (1,None,u'a'))
-        self.assert_('a' not in ('x',1,u'y'))
+        self.assert_('a' in (1,None,'a'))
+        self.assert_('a' in ('a',1,None))
+        self.assert_('a' in (1,None,'a'))
+        self.assert_('a' in (1,None,'a'))
+        self.assert_('a' not in ('x',1,'y'))
         self.assert_('a' not in ('x',1,None))
-        self.assert_(u'abcd' not in u'abcxxxx')
-        self.assert_(u'ab' in u'abcd')
-        self.assert_('ab' in u'abc')
-        self.assert_(u'ab' in 'abc')
-        self.assert_(u'ab' in (1,None,u'ab'))
-        self.assert_(u'' in u'abc')
-        self.assert_('' in u'abc')
+        self.assert_('abcd' not in 'abcxxxx')
+        self.assert_('ab' in 'abcd')
+        self.assert_('ab' in 'abc')
+        self.assert_('ab' in 'abc')
+        self.assert_('ab' in (1,None,'ab'))
+        self.assert_('' in 'abc')
+        self.assert_('' in 'abc')
 
         # If the following fails either
         # the contains operator does not propagate UnicodeErrors or
         # someone has changed the default encoding
-        self.assertRaises(UnicodeError, 'g\xe2teau'.__contains__, u'\xe2')
+        self.assertRaises(UnicodeError, 'g\xe2teau'.__contains__, '\xe2')
 
-        self.assert_(u'' in '')
-        self.assert_('' in u'')
-        self.assert_(u'' in u'')
-        self.assert_(u'' in 'abc')
-        self.assert_('' in u'abc')
-        self.assert_(u'' in u'abc')
-        self.assert_(u'\0' not in 'abc')
-        self.assert_('\0' not in u'abc')
-        self.assert_(u'\0' not in u'abc')
-        self.assert_(u'\0' in '\0abc')
-        self.assert_('\0' in u'\0abc')
-        self.assert_(u'\0' in u'\0abc')
-        self.assert_(u'\0' in 'abc\0')
-        self.assert_('\0' in u'abc\0')
-        self.assert_(u'\0' in u'abc\0')
-        self.assert_(u'a' in '\0abc')
-        self.assert_('a' in u'\0abc')
-        self.assert_(u'a' in u'\0abc')
-        self.assert_(u'asdf' in 'asdf')
-        self.assert_('asdf' in u'asdf')
-        self.assert_(u'asdf' in u'asdf')
-        self.assert_(u'asdf' not in 'asd')
-        self.assert_('asdf' not in u'asd')
-        self.assert_(u'asdf' not in u'asd')
-        self.assert_(u'asdf' not in '')
-        self.assert_('asdf' not in u'')
-        self.assert_(u'asdf' not in u'')
+        self.assert_('' in '')
+        self.assert_('' in '')
+        self.assert_('' in '')
+        self.assert_('' in 'abc')
+        self.assert_('' in 'abc')
+        self.assert_('' in 'abc')
+        self.assert_('\0' not in 'abc')
+        self.assert_('\0' not in 'abc')
+        self.assert_('\0' not in 'abc')
+        self.assert_('\0' in '\0abc')
+        self.assert_('\0' in '\0abc')
+        self.assert_('\0' in '\0abc')
+        self.assert_('\0' in 'abc\0')
+        self.assert_('\0' in 'abc\0')
+        self.assert_('\0' in 'abc\0')
+        self.assert_('a' in '\0abc')
+        self.assert_('a' in '\0abc')
+        self.assert_('a' in '\0abc')
+        self.assert_('asdf' in 'asdf')
+        self.assert_('asdf' in 'asdf')
+        self.assert_('asdf' in 'asdf')
+        self.assert_('asdf' not in 'asd')
+        self.assert_('asdf' not in 'asd')
+        self.assert_('asdf' not in 'asd')
+        self.assert_('asdf' not in '')
+        self.assert_('asdf' not in '')
+        self.assert_('asdf' not in '')
 
-        self.assertRaises(TypeError, u"abc".__contains__)
+        self.assertRaises(TypeError, "abc".__contains__)
 
     def test_formatting(self):
         string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
         # Testing Unicode formatting strings...
-        self.assertEqual(u"%s, %s" % (u"abc", "abc"), u'abc, abc')
-        self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, 2, 3), u'abc, abc, 1, 2.000000,  3.00')
-        self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, -2, 3), u'abc, abc, 1, -2.000000,  3.00')
-        self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.5), u'abc, abc, -1, -2.000000,  3.50')
-        self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.57), u'abc, abc, -1, -2.000000,  3.57')
-        self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 1003.57), u'abc, abc, -1, -2.000000, 1003.57')
+        self.assertEqual("%s, %s" % ("abc", "abc"), 'abc, abc')
+        self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, 2, 3), 'abc, abc, 1, 2.000000,  3.00')
+        self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, -2, 3), 'abc, abc, 1, -2.000000,  3.00')
+        self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.5), 'abc, abc, -1, -2.000000,  3.50')
+        self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.57), 'abc, abc, -1, -2.000000,  3.57')
+        self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 1003.57), 'abc, abc, -1, -2.000000, 1003.57')
         if not sys.platform.startswith('java'):
-            self.assertEqual(u"%r, %r" % (u"abc", "abc"), u"u'abc', 'abc'")
-        self.assertEqual(u"%(x)s, %(y)s" % {'x':u"abc", 'y':"def"}, u'abc, def')
-        self.assertEqual(u"%(x)s, %(\xfc)s" % {'x':u"abc", u'\xfc':"def"}, u'abc, def')
+            self.assertEqual("%r, %r" % ("abc", "abc"), "u'abc', 'abc'")
+        self.assertEqual("%(x)s, %(y)s" % {'x':"abc", 'y':"def"}, 'abc, def')
+        self.assertEqual("%(x)s, %(\xfc)s" % {'x':"abc", '\xfc':"def"}, 'abc, def')
 
-        self.assertEqual(u'%c' % 0x1234, u'\u1234')
-        self.assertRaises(OverflowError, u"%c".__mod__, (sys.maxunicode+1,))
+        self.assertEqual('%c' % 0x1234, '\u1234')
+        self.assertRaises(OverflowError, "%c".__mod__, (sys.maxunicode+1,))
 
         # formatting jobs delegated from the string implementation:
-        self.assertEqual('...%(foo)s...' % {'foo':u"abc"}, u'...abc...')
         self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
-        self.assertEqual('...%(foo)s...' % {u'foo':"abc"}, '...abc...')
-        self.assertEqual('...%(foo)s...' % {u'foo':u"abc"}, u'...abc...')
-        self.assertEqual('...%(foo)s...' % {u'foo':u"abc",'def':123},  u'...abc...')
-        self.assertEqual('...%(foo)s...' % {u'foo':u"abc",u'def':123}, u'...abc...')
-        self.assertEqual('...%s...%s...%s...%s...' % (1,2,3,u"abc"), u'...1...2...3...abc...')
-        self.assertEqual('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,u"abc"), u'...%...%s...1...2...3...abc...')
-        self.assertEqual('...%s...' % u"abc", u'...abc...')
-        self.assertEqual('%*s' % (5,u'abc',), u'  abc')
-        self.assertEqual('%*s' % (-5,u'abc',), u'abc  ')
-        self.assertEqual('%*.*s' % (5,2,u'abc',), u'   ab')
-        self.assertEqual('%*.*s' % (5,3,u'abc',), u'  abc')
-        self.assertEqual('%i %*.*s' % (10, 5,3,u'abc',), u'10   abc')
-        self.assertEqual('%i%s %*.*s' % (10, 3, 5, 3, u'abc',), u'103   abc')
-        self.assertEqual('%c' % u'a', u'a')
+        self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
+        self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
+        self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
+        self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123},  '...abc...')
+        self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
+        self.assertEqual('...%s...%s...%s...%s...' % (1,2,3,"abc"), '...1...2...3...abc...')
+        self.assertEqual('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,"abc"), '...%...%s...1...2...3...abc...')
+        self.assertEqual('...%s...' % "abc", '...abc...')
+        self.assertEqual('%*s' % (5,'abc',), '  abc')
+        self.assertEqual('%*s' % (-5,'abc',), 'abc  ')
+        self.assertEqual('%*.*s' % (5,2,'abc',), '   ab')
+        self.assertEqual('%*.*s' % (5,3,'abc',), '  abc')
+        self.assertEqual('%i %*.*s' % (10, 5,3,'abc',), '10   abc')
+        self.assertEqual('%i%s %*.*s' % (10, 3, 5, 3, 'abc',), '103   abc')
+        self.assertEqual('%c' % 'a', 'a')
         class Wrapper:
             def __str__(self):
-                return u'\u1234'
-        self.assertEqual('%s' % Wrapper(), u'\u1234')
+                return '\u1234'
+        self.assertEqual('%s' % Wrapper(), '\u1234')
 
     @test_support.run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
     def test_format_float(self):
         # should not format with a comma, but always with C locale
-        self.assertEqual(u'1.0', u'%.1f' % 1.0)
+        self.assertEqual('1.0', '%.1f' % 1.0)
 
     def test_constructor(self):
         # unicode(obj) tests (this maps to PyObject_Unicode() at C level)
 
         self.assertEqual(
-            unicode(u'unicode remains unicode'),
-            u'unicode remains unicode'
+            str('unicode remains unicode'),
+            'unicode remains unicode'
         )
 
-        class UnicodeSubclass(unicode):
+        class UnicodeSubclass(str):
             pass
 
         self.assertEqual(
-            unicode(UnicodeSubclass('unicode subclass becomes unicode')),
-            u'unicode subclass becomes unicode'
+            str(UnicodeSubclass('unicode subclass becomes unicode')),
+            'unicode subclass becomes unicode'
         )
 
         self.assertEqual(
-            unicode('strings are converted to unicode'),
-            u'strings are converted to unicode'
+            str('strings are converted to unicode'),
+            'strings are converted to unicode'
         )
 
         class UnicodeCompat:
@@ -455,8 +455,8 @@
                 return self.x
 
         self.assertEqual(
-            unicode(UnicodeCompat('__unicode__ compatible objects are recognized')),
-            u'__unicode__ compatible objects are recognized')
+            str(UnicodeCompat('__unicode__ compatible objects are recognized')),
+            '__unicode__ compatible objects are recognized')
 
         class StringCompat:
             def __init__(self, x):
@@ -465,26 +465,26 @@
                 return self.x
 
         self.assertEqual(
-            unicode(StringCompat('__str__ compatible objects are recognized')),
-            u'__str__ compatible objects are recognized'
+            str(StringCompat('__str__ compatible objects are recognized')),
+            '__str__ compatible objects are recognized'
         )
 
         # unicode(obj) is compatible to str():
 
         o = StringCompat('unicode(obj) is compatible to str()')
-        self.assertEqual(unicode(o), u'unicode(obj) is compatible to str()')
+        self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
         self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
 
         # %-formatting and .__unicode__()
-        self.assertEqual(u'%s' %
-                         UnicodeCompat(u"u'%s' % obj uses obj.__unicode__()"),
-                         u"u'%s' % obj uses obj.__unicode__()")
-        self.assertEqual(u'%s' %
-                         UnicodeCompat(u"u'%s' % obj falls back to obj.__str__()"),
-                         u"u'%s' % obj falls back to obj.__str__()")
+        self.assertEqual('%s' %
+                         UnicodeCompat("u'%s' % obj uses obj.__unicode__()"),
+                         "u'%s' % obj uses obj.__unicode__()")
+        self.assertEqual('%s' %
+                         UnicodeCompat("u'%s' % obj falls back to obj.__str__()"),
+                         "u'%s' % obj falls back to obj.__str__()")
 
         for obj in (123, 123.45, 123):
-            self.assertEqual(unicode(obj), unicode(str(obj)))
+            self.assertEqual(str(obj), str(str(obj)))
 
         # unicode(obj, encoding, error) tests (this maps to
         # PyUnicode_FromEncodedObject() at C level)
@@ -492,71 +492,71 @@
         if not sys.platform.startswith('java'):
             self.assertRaises(
                 TypeError,
-                unicode,
-                u'decoding unicode is not supported',
+                str,
+                'decoding unicode is not supported',
                 'utf-8',
                 'strict'
             )
 
         self.assertEqual(
-            unicode('strings are decoded to unicode', 'utf-8', 'strict'),
-            u'strings are decoded to unicode'
+            str('strings are decoded to unicode', 'utf-8', 'strict'),
+            'strings are decoded to unicode'
         )
 
         if not sys.platform.startswith('java'):
             self.assertEqual(
-                unicode(
+                str(
                     buffer('character buffers are decoded to unicode'),
                     'utf-8',
                     'strict'
                 ),
-                u'character buffers are decoded to unicode'
+                'character buffers are decoded to unicode'
             )
 
-        self.assertRaises(TypeError, unicode, 42, 42, 42)
+        self.assertRaises(TypeError, str, 42, 42, 42)
 
     def test_codecs_utf7(self):
         utfTests = [
-            (u'A\u2262\u0391.', 'A+ImIDkQ.'),             # RFC2152 example
-            (u'Hi Mom -\u263a-!', 'Hi Mom -+Jjo--!'),     # RFC2152 example
-            (u'\u65E5\u672C\u8A9E', '+ZeVnLIqe-'),        # RFC2152 example
-            (u'Item 3 is \u00a31.', 'Item 3 is +AKM-1.'), # RFC2152 example
-            (u'+', '+-'),
-            (u'+-', '+--'),
-            (u'+?', '+-?'),
-            (u'\?', '+AFw?'),
-            (u'+?', '+-?'),
-            (ur'\\?', '+AFwAXA?'),
-            (ur'\\\?', '+AFwAXABc?'),
-            (ur'++--', '+-+---')
+            ('A\u2262\u0391.', 'A+ImIDkQ.'),             # RFC2152 example
+            ('Hi Mom -\u263a-!', 'Hi Mom -+Jjo--!'),     # RFC2152 example
+            ('\u65E5\u672C\u8A9E', '+ZeVnLIqe-'),        # RFC2152 example
+            ('Item 3 is \u00a31.', 'Item 3 is +AKM-1.'), # RFC2152 example
+            ('+', '+-'),
+            ('+-', '+--'),
+            ('+?', '+-?'),
+            ('\?', '+AFw?'),
+            ('+?', '+-?'),
+            (r'\\?', '+AFwAXA?'),
+            (r'\\\?', '+AFwAXABc?'),
+            (r'++--', '+-+---')
         ]
 
         for (x, y) in utfTests:
             self.assertEqual(x.encode('utf-7'), y)
 
         # surrogates not supported
-        self.assertRaises(UnicodeError, unicode, '+3ADYAA-', 'utf-7')
+        self.assertRaises(UnicodeError, str, '+3ADYAA-', 'utf-7')
 
-        self.assertEqual(unicode('+3ADYAA-', 'utf-7', 'replace'), u'\ufffd')
+        self.assertEqual(str('+3ADYAA-', 'utf-7', 'replace'), '\ufffd')
 
     def test_codecs_utf8(self):
-        self.assertEqual(u''.encode('utf-8'), '')
-        self.assertEqual(u'\u20ac'.encode('utf-8'), '\xe2\x82\xac')
-        self.assertEqual(u'\ud800\udc02'.encode('utf-8'), '\xf0\x90\x80\x82')
-        self.assertEqual(u'\ud84d\udc56'.encode('utf-8'), '\xf0\xa3\x91\x96')
-        self.assertEqual(u'\ud800'.encode('utf-8'), '\xed\xa0\x80')
-        self.assertEqual(u'\udc00'.encode('utf-8'), '\xed\xb0\x80')
+        self.assertEqual(''.encode('utf-8'), '')
+        self.assertEqual('\u20ac'.encode('utf-8'), '\xe2\x82\xac')
+        self.assertEqual('\ud800\udc02'.encode('utf-8'), '\xf0\x90\x80\x82')
+        self.assertEqual('\ud84d\udc56'.encode('utf-8'), '\xf0\xa3\x91\x96')
+        self.assertEqual('\ud800'.encode('utf-8'), '\xed\xa0\x80')
+        self.assertEqual('\udc00'.encode('utf-8'), '\xed\xb0\x80')
         self.assertEqual(
-            (u'\ud800\udc02'*1000).encode('utf-8'),
+            ('\ud800\udc02'*1000).encode('utf-8'),
             '\xf0\x90\x80\x82'*1000
         )
         self.assertEqual(
-            u'\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
-            u'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
-            u'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
-            u'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
-            u'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
-            u' Nunstuck git und'.encode('utf-8'),
+            '\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
+            '\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
+            '\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
+            '\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
+            '\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
+            ' Nunstuck git und'.encode('utf-8'),
             '\xe6\xad\xa3\xe7\xa2\xba\xe3\x81\xab\xe8\xa8\x80\xe3\x81'
             '\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3\xe3\x81\xaf\xe3'
             '\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe'
@@ -570,9 +570,9 @@
         )
 
         # UTF-8 specific decoding tests
-        self.assertEqual(unicode('\xf0\xa3\x91\x96', 'utf-8'), u'\U00023456' )
-        self.assertEqual(unicode('\xf0\x90\x80\x82', 'utf-8'), u'\U00010002' )
-        self.assertEqual(unicode('\xe2\x82\xac', 'utf-8'), u'\u20ac' )
+        self.assertEqual(str('\xf0\xa3\x91\x96', 'utf-8'), '\U00023456' )
+        self.assertEqual(str('\xf0\x90\x80\x82', 'utf-8'), '\U00010002' )
+        self.assertEqual(str('\xe2\x82\xac', 'utf-8'), '\u20ac' )
 
         # Other possible utf-8 test cases:
         # * strict decoding testing for all of the
@@ -580,55 +580,55 @@
 
     def test_codecs_idna(self):
         # Test whether trailing dot is preserved
-        self.assertEqual(u"www.python.org.".encode("idna"), "www.python.org.")
+        self.assertEqual("www.python.org.".encode("idna"), "www.python.org.")
 
     def test_codecs_errors(self):
         # Error handling (encoding)
-        self.assertRaises(UnicodeError, u'Andr\202 x'.encode, 'ascii')
-        self.assertRaises(UnicodeError, u'Andr\202 x'.encode, 'ascii','strict')
-        self.assertEqual(u'Andr\202 x'.encode('ascii','ignore'), "Andr x")
-        self.assertEqual(u'Andr\202 x'.encode('ascii','replace'), "Andr? x")
+        self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii')
+        self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii','strict')
+        self.assertEqual('Andr\202 x'.encode('ascii','ignore'), "Andr x")
+        self.assertEqual('Andr\202 x'.encode('ascii','replace'), "Andr? x")
 
         # Error handling (decoding)
-        self.assertRaises(UnicodeError, unicode, 'Andr\202 x', 'ascii')
-        self.assertRaises(UnicodeError, unicode, 'Andr\202 x', 'ascii','strict')
-        self.assertEqual(unicode('Andr\202 x','ascii','ignore'), u"Andr x")
-        self.assertEqual(unicode('Andr\202 x','ascii','replace'), u'Andr\uFFFD x')
+        self.assertRaises(UnicodeError, str, 'Andr\202 x', 'ascii')
+        self.assertRaises(UnicodeError, str, 'Andr\202 x', 'ascii','strict')
+        self.assertEqual(str('Andr\202 x','ascii','ignore'), "Andr x")
+        self.assertEqual(str('Andr\202 x','ascii','replace'), 'Andr\uFFFD x')
 
         # Error handling (unknown character names)
-        self.assertEqual("\\N{foo}xx".decode("unicode-escape", "ignore"), u"xx")
+        self.assertEqual("\\N{foo}xx".decode("unicode-escape", "ignore"), "xx")
 
         # Error handling (truncated escape sequence)
         self.assertRaises(UnicodeError, "\\".decode, "unicode-escape")
 
         self.assertRaises(TypeError, "hello".decode, "test.unicode1")
-        self.assertRaises(TypeError, unicode, "hello", "test.unicode2")
-        self.assertRaises(TypeError, u"hello".encode, "test.unicode1")
-        self.assertRaises(TypeError, u"hello".encode, "test.unicode2")
+        self.assertRaises(TypeError, str, "hello", "test.unicode2")
+        self.assertRaises(TypeError, "hello".encode, "test.unicode1")
+        self.assertRaises(TypeError, "hello".encode, "test.unicode2")
         # executes PyUnicode_Encode()
         import imp
         self.assertRaises(
             ImportError,
             imp.find_module,
             "non-existing module",
-            [u"non-existing dir"]
+            ["non-existing dir"]
         )
 
         # Error handling (wrong arguments)
-        self.assertRaises(TypeError, u"hello".encode, 42, 42, 42)
+        self.assertRaises(TypeError, "hello".encode, 42, 42, 42)
 
         # Error handling (PyUnicode_EncodeDecimal())
-        self.assertRaises(UnicodeError, int, u"\u0200")
+        self.assertRaises(UnicodeError, int, "\u0200")
 
     def test_codecs(self):
         # Encoding
-        self.assertEqual(u'hello'.encode('ascii'), 'hello')
-        self.assertEqual(u'hello'.encode('utf-7'), 'hello')
-        self.assertEqual(u'hello'.encode('utf-8'), 'hello')
-        self.assertEqual(u'hello'.encode('utf8'), 'hello')
-        self.assertEqual(u'hello'.encode('utf-16-le'), 'h\000e\000l\000l\000o\000')
-        self.assertEqual(u'hello'.encode('utf-16-be'), '\000h\000e\000l\000l\000o')
-        self.assertEqual(u'hello'.encode('latin-1'), 'hello')
+        self.assertEqual('hello'.encode('ascii'), 'hello')
+        self.assertEqual('hello'.encode('utf-7'), 'hello')
+        self.assertEqual('hello'.encode('utf-8'), 'hello')
+        self.assertEqual('hello'.encode('utf8'), 'hello')
+        self.assertEqual('hello'.encode('utf-16-le'), 'h\000e\000l\000l\000o\000')
+        self.assertEqual('hello'.encode('utf-16-be'), '\000h\000e\000l\000l\000o')
+        self.assertEqual('hello'.encode('latin-1'), 'hello')
 
         # Roundtrip safety for BMP (just the first 1024 chars)
         for c in xrange(1024):
@@ -636,34 +636,34 @@
             for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le',
                              'utf-16-be', 'raw_unicode_escape',
                              'unicode_escape', 'unicode_internal'):
-                self.assertEqual(unicode(u.encode(encoding),encoding), u)
+                self.assertEqual(str(u.encode(encoding),encoding), u)
 
         # Roundtrip safety for BMP (just the first 256 chars)
         for c in xrange(256):
             u = unichr(c)
             for encoding in ('latin-1',):
-                self.assertEqual(unicode(u.encode(encoding),encoding), u)
+                self.assertEqual(str(u.encode(encoding),encoding), u)
 
         # Roundtrip safety for BMP (just the first 128 chars)
         for c in xrange(128):
             u = unichr(c)
             for encoding in ('ascii',):
-                self.assertEqual(unicode(u.encode(encoding),encoding), u)
+                self.assertEqual(str(u.encode(encoding),encoding), u)
 
         # Roundtrip safety for non-BMP (just a few chars)
-        u = u'\U00010001\U00020002\U00030003\U00040004\U00050005'
+        u = '\U00010001\U00020002\U00030003\U00040004\U00050005'
         for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
                          #'raw_unicode_escape',
                          'unicode_escape', 'unicode_internal'):
-            self.assertEqual(unicode(u.encode(encoding),encoding), u)
+            self.assertEqual(str(u.encode(encoding),encoding), u)
 
         # UTF-8 must be roundtrip safe for all UCS-2 code points
         # This excludes surrogates: in the full range, there would be
         # a surrogate pair (\udbff\udc00), which gets converted back
         # to a non-BMP character (\U0010fc00)
-        u = u''.join(map(unichr, range(0,0xd800)+range(0xe000,0x10000)))
+        u = ''.join(map(unichr, range(0,0xd800)+range(0xe000,0x10000)))
         for encoding in ('utf-8',):
-            self.assertEqual(unicode(u.encode(encoding),encoding), u)
+            self.assertEqual(str(u.encode(encoding),encoding), u)
 
     def test_codecs_charmap(self):
         # 0-127
@@ -692,7 +692,7 @@
             #'cp875'
 
             ):
-            self.assertEqual(unicode(s, encoding).encode(encoding), s)
+            self.assertEqual(str(s, encoding).encode(encoding), s)
 
         # 128-255
         s = ''.join(map(chr, xrange(128, 256)))
@@ -717,14 +717,14 @@
             #'cp1006', 'cp875', 'iso8859_8',
 
             ):
-            self.assertEqual(unicode(s, encoding).encode(encoding), s)
+            self.assertEqual(str(s, encoding).encode(encoding), s)
 
     def test_concatenation(self):
-        self.assertEqual((u"abc" u"def"), u"abcdef")
-        self.assertEqual(("abc" u"def"), u"abcdef")
-        self.assertEqual((u"abc" "def"), u"abcdef")
-        self.assertEqual((u"abc" u"def" "ghi"), u"abcdefghi")
-        self.assertEqual(("abc" "def" u"ghi"), u"abcdefghi")
+        self.assertEqual(("abc" "def"), "abcdef")
+        self.assertEqual(("abc" "def"), "abcdef")
+        self.assertEqual(("abc" "def"), "abcdef")
+        self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
+        self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
 
     def test_printing(self):
         class BitBucket:
@@ -732,20 +732,20 @@
                 pass
 
         out = BitBucket()
-        print(u'abc', file=out)
-        print(u'abc', u'def', file=out)
-        print(u'abc', 'def', file=out)
-        print('abc', u'def', file=out)
-        print(u'abc\n', file=out)
-        print(u'abc\n', end=' ', file=out)
-        print(u'abc\n', end=' ', file=out)
-        print(u'def\n', file=out)
-        print(u'def\n', file=out)
+        print('abc', file=out)
+        print('abc', 'def', file=out)
+        print('abc', 'def', file=out)
+        print('abc', 'def', file=out)
+        print('abc\n', file=out)
+        print('abc\n', end=' ', file=out)
+        print('abc\n', end=' ', file=out)
+        print('def\n', file=out)
+        print('def\n', file=out)
 
     def test_ucs4(self):
         if sys.maxunicode == 0xFFFF:
             return
-        x = u'\U00100000'
+        x = '\U00100000'
         y = x.encode("raw-unicode-escape").decode("raw-unicode-escape")
         self.assertEqual(x, y)
 
@@ -757,11 +757,11 @@
 
         class Foo1:
             def __unicode__(self):
-                return u"foo"
+                return "foo"
 
         class Foo2(object):
             def __unicode__(self):
-                return u"foo"
+                return "foo"
 
         class Foo3(object):
             def __unicode__(self):
@@ -771,7 +771,7 @@
             def __unicode__(self):
                 return "foo"
 
-        class Foo5(unicode):
+        class Foo5(str):
             def __unicode__(self):
                 return "foo"
 
@@ -780,37 +780,37 @@
                 return "foos"
 
             def __unicode__(self):
-                return u"foou"
+                return "foou"
 
-        class Foo7(unicode):
+        class Foo7(str):
             def __str__(self):
                 return "foos"
             def __unicode__(self):
-                return u"foou"
+                return "foou"
 
-        class Foo8(unicode):
+        class Foo8(str):
             def __new__(cls, content=""):
-                return unicode.__new__(cls, 2*content)
+                return str.__new__(cls, 2*content)
             def __unicode__(self):
                 return self
 
-        class Foo9(unicode):
+        class Foo9(str):
             def __str__(self):
                 return "string"
             def __unicode__(self):
                 return "not unicode"
 
-        self.assertEqual(unicode(Foo0()), u"foo")
-        self.assertEqual(unicode(Foo1()), u"foo")
-        self.assertEqual(unicode(Foo2()), u"foo")
-        self.assertEqual(unicode(Foo3()), u"foo")
-        self.assertEqual(unicode(Foo4("bar")), u"foo")
-        self.assertEqual(unicode(Foo5("bar")), u"foo")
-        self.assertEqual(unicode(Foo6("bar")), u"foou")
-        self.assertEqual(unicode(Foo7("bar")), u"foou")
-        self.assertEqual(unicode(Foo8("foo")), u"foofoo")
+        self.assertEqual(str(Foo0()), "foo")
+        self.assertEqual(str(Foo1()), "foo")
+        self.assertEqual(str(Foo2()), "foo")
+        self.assertEqual(str(Foo3()), "foo")
+        self.assertEqual(str(Foo4("bar")), "foo")
+        self.assertEqual(str(Foo5("bar")), "foo")
+        self.assertEqual(str(Foo6("bar")), "foou")
+        self.assertEqual(str(Foo7("bar")), "foou")
+        self.assertEqual(str(Foo8("foo")), "foofoo")
         self.assertEqual(str(Foo9("foo")), "string")
-        self.assertEqual(unicode(Foo9("foo")), u"not unicode")
+        self.assertEqual(str(Foo9("foo")), "not unicode")
 
     def test_unicode_repr(self):
         class s1:
@@ -819,7 +819,7 @@
 
         class s2:
             def __repr__(self):
-                return u'\\n'
+                return '\\n'
 
         self.assertEqual(repr(s1()), '\\n')
         self.assertEqual(repr(s2()), '\\n')
diff --git a/Lib/test/test_unicode_file.py b/Lib/test/test_unicode_file.py
index 328b5b6..2fc64cd 100644
--- a/Lib/test/test_unicode_file.py
+++ b/Lib/test/test_unicode_file.py
@@ -20,7 +20,7 @@
     # encoding instead.
     import sys
     try:
-        TESTFN_UNICODE = unicode("@test-\xe0\xf2", sys.getfilesystemencoding())
+        TESTFN_UNICODE = str("@test-\xe0\xf2", sys.getfilesystemencoding())
         TESTFN_ENCODED = TESTFN_UNICODE.encode(TESTFN_ENCODING)
         if '?' in TESTFN_ENCODED:
             # MBCS will not report the error properly
diff --git a/Lib/test/test_unicodedata.py b/Lib/test/test_unicodedata.py
index 0023bf4..227aa5a 100644
--- a/Lib/test/test_unicodedata.py
+++ b/Lib/test/test_unicodedata.py
@@ -24,26 +24,26 @@
             char = unichr(i)
             data = [
                 # Predicates (single char)
-                u"01"[char.isalnum()],
-                u"01"[char.isalpha()],
-                u"01"[char.isdecimal()],
-                u"01"[char.isdigit()],
-                u"01"[char.islower()],
-                u"01"[char.isnumeric()],
-                u"01"[char.isspace()],
-                u"01"[char.istitle()],
-                u"01"[char.isupper()],
+                "01"[char.isalnum()],
+                "01"[char.isalpha()],
+                "01"[char.isdecimal()],
+                "01"[char.isdigit()],
+                "01"[char.islower()],
+                "01"[char.isnumeric()],
+                "01"[char.isspace()],
+                "01"[char.istitle()],
+                "01"[char.isupper()],
 
                 # Predicates (multiple chars)
-                u"01"[(char + u'abc').isalnum()],
-                u"01"[(char + u'abc').isalpha()],
-                u"01"[(char + u'123').isdecimal()],
-                u"01"[(char + u'123').isdigit()],
-                u"01"[(char + u'abc').islower()],
-                u"01"[(char + u'123').isnumeric()],
-                u"01"[(char + u' \t').isspace()],
-                u"01"[(char + u'abc').istitle()],
-                u"01"[(char + u'ABC').isupper()],
+                "01"[(char + 'abc').isalnum()],
+                "01"[(char + 'abc').isalpha()],
+                "01"[(char + '123').isdecimal()],
+                "01"[(char + '123').isdigit()],
+                "01"[(char + 'abc').islower()],
+                "01"[(char + '123').isnumeric()],
+                "01"[(char + ' \t').isspace()],
+                "01"[(char + 'abc').istitle()],
+                "01"[(char + 'ABC').isupper()],
 
                 # Mappings (single char)
                 char.lower(),
@@ -51,13 +51,13 @@
                 char.title(),
 
                 # Mappings (multiple chars)
-                (char + u'abc').lower(),
-                (char + u'ABC').upper(),
-                (char + u'abc').title(),
-                (char + u'ABC').title(),
+                (char + 'abc').lower(),
+                (char + 'ABC').upper(),
+                (char + 'abc').title(),
+                (char + 'ABC').title(),
 
                 ]
-            h.update(u''.join(data).encode(encoding))
+            h.update(''.join(data).encode(encoding))
         result = h.hexdigest()
         self.assertEqual(result, self.expectedchecksum)
 
@@ -99,92 +99,92 @@
         self.assertEqual(result, self.expectedchecksum)
 
     def test_digit(self):
-        self.assertEqual(self.db.digit(u'A', None), None)
-        self.assertEqual(self.db.digit(u'9'), 9)
-        self.assertEqual(self.db.digit(u'\u215b', None), None)
-        self.assertEqual(self.db.digit(u'\u2468'), 9)
+        self.assertEqual(self.db.digit('A', None), None)
+        self.assertEqual(self.db.digit('9'), 9)
+        self.assertEqual(self.db.digit('\u215b', None), None)
+        self.assertEqual(self.db.digit('\u2468'), 9)
 
         self.assertRaises(TypeError, self.db.digit)
-        self.assertRaises(TypeError, self.db.digit, u'xx')
-        self.assertRaises(ValueError, self.db.digit, u'x')
+        self.assertRaises(TypeError, self.db.digit, 'xx')
+        self.assertRaises(ValueError, self.db.digit, 'x')
 
     def test_numeric(self):
-        self.assertEqual(self.db.numeric(u'A',None), None)
-        self.assertEqual(self.db.numeric(u'9'), 9)
-        self.assertEqual(self.db.numeric(u'\u215b'), 0.125)
-        self.assertEqual(self.db.numeric(u'\u2468'), 9.0)
+        self.assertEqual(self.db.numeric('A',None), None)
+        self.assertEqual(self.db.numeric('9'), 9)
+        self.assertEqual(self.db.numeric('\u215b'), 0.125)
+        self.assertEqual(self.db.numeric('\u2468'), 9.0)
 
         self.assertRaises(TypeError, self.db.numeric)
-        self.assertRaises(TypeError, self.db.numeric, u'xx')
-        self.assertRaises(ValueError, self.db.numeric, u'x')
+        self.assertRaises(TypeError, self.db.numeric, 'xx')
+        self.assertRaises(ValueError, self.db.numeric, 'x')
 
     def test_decimal(self):
-        self.assertEqual(self.db.decimal(u'A',None), None)
-        self.assertEqual(self.db.decimal(u'9'), 9)
-        self.assertEqual(self.db.decimal(u'\u215b', None), None)
-        self.assertEqual(self.db.decimal(u'\u2468', None), None)
+        self.assertEqual(self.db.decimal('A',None), None)
+        self.assertEqual(self.db.decimal('9'), 9)
+        self.assertEqual(self.db.decimal('\u215b', None), None)
+        self.assertEqual(self.db.decimal('\u2468', None), None)
 
         self.assertRaises(TypeError, self.db.decimal)
-        self.assertRaises(TypeError, self.db.decimal, u'xx')
-        self.assertRaises(ValueError, self.db.decimal, u'x')
+        self.assertRaises(TypeError, self.db.decimal, 'xx')
+        self.assertRaises(ValueError, self.db.decimal, 'x')
 
     def test_category(self):
-        self.assertEqual(self.db.category(u'\uFFFE'), 'Cn')
-        self.assertEqual(self.db.category(u'a'), 'Ll')
-        self.assertEqual(self.db.category(u'A'), 'Lu')
+        self.assertEqual(self.db.category('\uFFFE'), 'Cn')
+        self.assertEqual(self.db.category('a'), 'Ll')
+        self.assertEqual(self.db.category('A'), 'Lu')
 
         self.assertRaises(TypeError, self.db.category)
-        self.assertRaises(TypeError, self.db.category, u'xx')
+        self.assertRaises(TypeError, self.db.category, 'xx')
 
     def test_bidirectional(self):
-        self.assertEqual(self.db.bidirectional(u'\uFFFE'), '')
-        self.assertEqual(self.db.bidirectional(u' '), 'WS')
-        self.assertEqual(self.db.bidirectional(u'A'), 'L')
+        self.assertEqual(self.db.bidirectional('\uFFFE'), '')
+        self.assertEqual(self.db.bidirectional(' '), 'WS')
+        self.assertEqual(self.db.bidirectional('A'), 'L')
 
         self.assertRaises(TypeError, self.db.bidirectional)
-        self.assertRaises(TypeError, self.db.bidirectional, u'xx')
+        self.assertRaises(TypeError, self.db.bidirectional, 'xx')
 
     def test_decomposition(self):
-        self.assertEqual(self.db.decomposition(u'\uFFFE'),'')
-        self.assertEqual(self.db.decomposition(u'\u00bc'), '<fraction> 0031 2044 0034')
+        self.assertEqual(self.db.decomposition('\uFFFE'),'')
+        self.assertEqual(self.db.decomposition('\u00bc'), '<fraction> 0031 2044 0034')
 
         self.assertRaises(TypeError, self.db.decomposition)
-        self.assertRaises(TypeError, self.db.decomposition, u'xx')
+        self.assertRaises(TypeError, self.db.decomposition, 'xx')
 
     def test_mirrored(self):
-        self.assertEqual(self.db.mirrored(u'\uFFFE'), 0)
-        self.assertEqual(self.db.mirrored(u'a'), 0)
-        self.assertEqual(self.db.mirrored(u'\u2201'), 1)
+        self.assertEqual(self.db.mirrored('\uFFFE'), 0)
+        self.assertEqual(self.db.mirrored('a'), 0)
+        self.assertEqual(self.db.mirrored('\u2201'), 1)
 
         self.assertRaises(TypeError, self.db.mirrored)
-        self.assertRaises(TypeError, self.db.mirrored, u'xx')
+        self.assertRaises(TypeError, self.db.mirrored, 'xx')
 
     def test_combining(self):
-        self.assertEqual(self.db.combining(u'\uFFFE'), 0)
-        self.assertEqual(self.db.combining(u'a'), 0)
-        self.assertEqual(self.db.combining(u'\u20e1'), 230)
+        self.assertEqual(self.db.combining('\uFFFE'), 0)
+        self.assertEqual(self.db.combining('a'), 0)
+        self.assertEqual(self.db.combining('\u20e1'), 230)
 
         self.assertRaises(TypeError, self.db.combining)
-        self.assertRaises(TypeError, self.db.combining, u'xx')
+        self.assertRaises(TypeError, self.db.combining, 'xx')
 
     def test_normalize(self):
         self.assertRaises(TypeError, self.db.normalize)
-        self.assertRaises(ValueError, self.db.normalize, 'unknown', u'xx')
-        self.assertEqual(self.db.normalize('NFKC', u''), u'')
+        self.assertRaises(ValueError, self.db.normalize, 'unknown', 'xx')
+        self.assertEqual(self.db.normalize('NFKC', ''), '')
         # The rest can be found in test_normalization.py
         # which requires an external file.
 
     def test_east_asian_width(self):
         eaw = self.db.east_asian_width
         self.assertRaises(TypeError, eaw, 'a')
-        self.assertRaises(TypeError, eaw, u'')
-        self.assertRaises(TypeError, eaw, u'ra')
-        self.assertEqual(eaw(u'\x1e'), 'N')
-        self.assertEqual(eaw(u'\x20'), 'Na')
-        self.assertEqual(eaw(u'\uC894'), 'W')
-        self.assertEqual(eaw(u'\uFF66'), 'H')
-        self.assertEqual(eaw(u'\uFF1F'), 'F')
-        self.assertEqual(eaw(u'\u2010'), 'A')
+        self.assertRaises(TypeError, eaw, '')
+        self.assertRaises(TypeError, eaw, 'ra')
+        self.assertEqual(eaw('\x1e'), 'N')
+        self.assertEqual(eaw('\x20'), 'Na')
+        self.assertEqual(eaw('\uC894'), 'W')
+        self.assertEqual(eaw('\uFF66'), 'H')
+        self.assertEqual(eaw('\uFF1F'), 'F')
+        self.assertEqual(eaw('\u2010'), 'A')
 
 class UnicodeMiscTest(UnicodeDatabaseTest):
 
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
index 3a37525..a62afde 100644
--- a/Lib/test/test_urllib.py
+++ b/Lib/test/test_urllib.py
@@ -425,8 +425,8 @@
                          "using unquote_plus(): %s != %s" % (expect, result))
 
     def test_unquote_with_unicode(self):
-        r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc')
-        self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc')
+        r = urllib.unquote('br%C3%BCckner_sapporo_20050930.doc')
+        self.assertEqual(r, 'br\xc3\xbcckner_sapporo_20050930.doc')
 
 class urlencode_Tests(unittest.TestCase):
     """Tests for urlencode()"""
diff --git a/Lib/test/test_winreg.py b/Lib/test/test_winreg.py
index 08de67c..76ecd64 100644
--- a/Lib/test/test_winreg.py
+++ b/Lib/test/test_winreg.py
@@ -19,10 +19,10 @@
 ]
 if have_unicode:
     test_data+=[
-    (unicode("Unicode Val"),  unicode("A Unicode value"),                      REG_SZ,),
-    ("UnicodeExpand", unicode("The path is %path%"),                   REG_EXPAND_SZ),
-    ("Multi-unicode", [unicode("Lots"), unicode("of"), unicode("unicode"), unicode("values")], REG_MULTI_SZ),
-    ("Multi-mixed",   [unicode("Unicode"), unicode("and"), "string", "values"],REG_MULTI_SZ),
+    (str("Unicode Val"),  str("A Unicode value"),                      REG_SZ,),
+    ("UnicodeExpand", str("The path is %path%"),                   REG_EXPAND_SZ),
+    ("Multi-unicode", [str("Lots"), str("of"), str("unicode"), str("values")], REG_MULTI_SZ),
+    ("Multi-mixed",   [str("Unicode"), str("and"), "string", "values"],REG_MULTI_SZ),
     ]
 
 def WriteTestData(root_key):
diff --git a/Lib/test/test_xmlrpc.py b/Lib/test/test_xmlrpc.py
index 0798fa1..76ec018 100644
--- a/Lib/test/test_xmlrpc.py
+++ b/Lib/test/test_xmlrpc.py
@@ -5,7 +5,7 @@
 from test import test_support
 
 try:
-    unicode
+    str
 except NameError:
     have_unicode = False
 else:
@@ -18,8 +18,8 @@
           'anotherlist': ['.zyx.41'],
           'abase64': xmlrpclib.Binary("my dog has fleas"),
           'boolean': xmlrpclib.False,
-          'unicode': u'\u4000\u6000\u8000',
-          u'ukey\u4000': 'regular value',
+          'unicode': '\u4000\u6000\u8000',
+          'ukey\u4000': 'regular value',
           'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
           'datetime2': xmlrpclib.DateTime(
                         (2005, 02, 10, 11, 41, 23, 0, 1, -1)),
@@ -147,11 +147,11 @@
 
         items = list(d.items())
         if have_unicode:
-            self.assertEquals(s, u"abc \x95")
-            self.assert_(isinstance(s, unicode))
-            self.assertEquals(items, [(u"def \x96", u"ghi \x97")])
-            self.assert_(isinstance(items[0][0], unicode))
-            self.assert_(isinstance(items[0][1], unicode))
+            self.assertEquals(s, "abc \x95")
+            self.assert_(isinstance(s, str))
+            self.assertEquals(items, [("def \x96", "ghi \x97")])
+            self.assert_(isinstance(items[0][0], str))
+            self.assert_(isinstance(items[0][1], str))
         else:
             self.assertEquals(s, "abc \xc2\x95")
             self.assertEquals(items, [("def \xc2\x96", "ghi \xc2\x97")])
diff --git a/Lib/test/testcodec.py b/Lib/test/testcodec.py
index 5da754d..7ac9203 100644
--- a/Lib/test/testcodec.py
+++ b/Lib/test/testcodec.py
@@ -35,10 +35,10 @@
 
 decoding_map = codecs.make_identity_dict(range(256))
 decoding_map.update({
-        0x78: u"abc", # 1-n decoding mapping
+        0x78: "abc", # 1-n decoding mapping
         "abc": 0x0078,# 1-n encoding mapping
         0x01: None,   # decoding mapping to <undefined>
-        0x79: u"",    # decoding mapping to <remove character>
+        0x79: "",    # decoding mapping to <remove character>
 })
 
 ### Encoding Map