Apply diff2.txt from SF patch http://www.python.org/sf/572113
(with one small bugfix in bgen/bgen/scantools.py)

This replaces string module functions with string methods
for the stuff in the Tools directory. Several uses of
string.letters etc. are still remaining.
diff --git a/Tools/webchecker/tktools.py b/Tools/webchecker/tktools.py
index 0db4d49..3a68f9a 100644
--- a/Tools/webchecker/tktools.py
+++ b/Tools/webchecker/tktools.py
@@ -1,7 +1,6 @@
 """Assorted Tk-related subroutines used in Grail."""
 
 
-import string
 from types import *
 from Tkinter import *
 
@@ -335,7 +334,7 @@
     """Turn a list or tuple into a single string -- recursively."""
     t = type(msg)
     if t in (ListType, TupleType):
-        msg = string.join(map(flatten, msg))
+        msg = ' '.join(map(flatten, msg))
     elif t is ClassType:
         msg = msg.__name__
     else:
@@ -345,7 +344,7 @@
 
 def boolean(s):
     """Test whether a string is a Tk boolean, without error checking."""
-    if string.lower(s) in ('', '0', 'no', 'off', 'false'): return 0
+    if s.lower() in ('', '0', 'no', 'off', 'false'): return 0
     else: return 1
 
 
diff --git a/Tools/webchecker/wcgui.py b/Tools/webchecker/wcgui.py
index ae012bf..e467d92 100755
--- a/Tools/webchecker/wcgui.py
+++ b/Tools/webchecker/wcgui.py
@@ -60,7 +60,6 @@
 
 import sys
 import getopt
-import string
 from Tkinter import *
 import tktools
 import webchecker
@@ -86,7 +85,7 @@
     extra_roots = []
     for o, a in opts:
         if o == '-m':
-            webchecker.maxpage = string.atoi(a)
+            webchecker.maxpage = int(a)
         if o == '-q':
             webchecker.verbose = 0
         if o == '-v':
@@ -169,7 +168,7 @@
         self.root_seed = None
         webchecker.Checker.__init__(self)
         if root:
-            root = string.strip(str(root))
+            root = str(root).strip()
             if root:
                 self.suggestroot(root)
         self.newstatus()
@@ -189,7 +188,7 @@
 
     def enterroot(self, event=None):
         root = self.__rootentry.get()
-        root = string.strip(root)
+        root = root.strip()
         if root:
             self.__checking.config(text="Adding root "+root)
             self.__checking.update_idletasks()
@@ -353,7 +352,7 @@
     def selectedindices(self):
         l = self.list.curselection()
         if not l: return []
-        return map(string.atoi, l)
+        return map(int, l)
 
     def insert(self, url):
         if url not in self.items:
diff --git a/Tools/webchecker/webchecker.py b/Tools/webchecker/webchecker.py
index fa70f65..fd7f578 100755
--- a/Tools/webchecker/webchecker.py
+++ b/Tools/webchecker/webchecker.py
@@ -109,7 +109,6 @@
 import sys
 import os
 from types import *
-import string
 import StringIO
 import getopt
 import pickle
@@ -124,7 +123,7 @@
 
 # Extract real version number if necessary
 if __version__[0] == '$':
-    _v = string.split(__version__)
+    _v = __version__.split()
     if len(_v) == 3:
         __version__ = _v[1]
 
@@ -170,13 +169,13 @@
         if o == '-d':
             dumpfile = a
         if o == '-m':
-            maxpage = string.atoi(a)
+            maxpage = int(a)
         if o == '-n':
             norun = 1
         if o == '-q':
             verbose = 0
         if o == '-r':
-            roundsize = string.atoi(a)
+            roundsize = int(a)
         if o == '-t':
             extra_roots.append(a)
         if o == '-a':
@@ -248,7 +247,7 @@
     f.close()
     if verbose > 0:
         print "Done."
-        print "Root:", string.join(c.roots, "\n      ")
+        print "Root:", "\n      ".join(c.roots)
     return c
 
 
@@ -316,7 +315,7 @@
             troot = root
             scheme, netloc, path, params, query, fragment = \
                     urlparse.urlparse(root)
-            i = string.rfind(path, "/") + 1
+            i = path.rfind("/") + 1
             if 0 < i < len(path):
                 path = path[:i]
                 troot = urlparse.urlunparse((scheme, netloc, path,
@@ -544,7 +543,7 @@
 
     def checkforhtml(self, info, url):
         if info.has_key('content-type'):
-            ctype = string.lower(cgi.parse_header(info['content-type'])[0])
+            ctype = cgi.parse_header(info['content-type'])[0].lower()
         else:
             if url[-1:] == "/":
                 return 1
@@ -809,7 +808,7 @@
     def do_link(self, attributes):
         for name, value in attributes:
             if name == "rel":
-                parts = string.split(string.lower(value))
+                parts = value.lower().split()
                 if (  parts == ["stylesheet"]
                       or parts == ["alternate", "stylesheet"]):
                     self.link_attr(attributes, "href")
@@ -836,13 +835,13 @@
     def link_attr(self, attributes, *args):
         for name, value in attributes:
             if name in args:
-                if value: value = string.strip(value)
+                if value: value = value.strip()
                 if value: self.links[value] = None
 
     def do_base(self, attributes):
         for name, value in attributes:
             if name == 'href':
-                if value: value = string.strip(value)
+                if value: value = value.strip()
                 if value:
                     if self.checker:
                         self.checker.note(1, "  Base %s", value)
diff --git a/Tools/webchecker/websucker.py b/Tools/webchecker/websucker.py
index 5f726b3..ef2fa44 100755
--- a/Tools/webchecker/websucker.py
+++ b/Tools/webchecker/websucker.py
@@ -6,7 +6,6 @@
 
 import os
 import sys
-import string
 import urllib
 import getopt
 
@@ -14,7 +13,7 @@
 
 # Extract real version number if necessary
 if __version__[0] == '$':
-    _v = string.split(__version__)
+    _v = __version__.split()
     if len(_v) == 3:
         __version__ = _v[1]
 
@@ -90,14 +89,14 @@
     def savefilename(self, url):
         type, rest = urllib.splittype(url)
         host, path = urllib.splithost(rest)
-        while path[:1] == "/": path = path[1:]
+        path = path.lstrip("/")
         user, host = urllib.splituser(host)
         host, port = urllib.splitnport(host)
-        host = string.lower(host)
+        host = host.lower()
         if not path or path[-1] == "/":
             path = path + "index.html"
         if os.sep != "/":
-            path = string.join(string.split(path, "/"), os.sep)
+            path = os.sep.join(path.split("/"))
             if os.name == "mac":
                 path = os.sep + path
         path = os.path.join(host, path)
diff --git a/Tools/webchecker/wsgui.py b/Tools/webchecker/wsgui.py
index c301c6f..e44c6cd 100755
--- a/Tools/webchecker/wsgui.py
+++ b/Tools/webchecker/wsgui.py
@@ -8,7 +8,6 @@
 
 from Tkinter import *
 import Tkinter
-import string
 import websucker
 import sys
 import os
@@ -150,13 +149,13 @@
             return
         self.url_entry.selection_range(0, END)
         url = self.url_entry.get()
-        url = string.strip(url)
+        url = url.strip()
         if not url:
             self.top.bell()
             self.message("[Error: No URL entered]")
             return
         self.rooturl = url
-        dir = string.strip(self.dir_entry.get())
+        dir = self.dir_entry.get().strip()
         if not dir:
             self.sucker.savedir = None
         else:
@@ -184,7 +183,7 @@
                 text = self.top.selection_get(selection=t)
             except TclError:
                 continue
-            text = string.strip(text)
+            text = text.strip()
             if text:
                 break
         if not text: