Fuck.  For PC support, this must be in the distribution.
diff --git a/Lib/dos_8x3/arrayio.py b/Lib/dos_8x3/arrayio.py
new file mode 100755
index 0000000..5b06f92
--- /dev/null
+++ b/Lib/dos_8x3/arrayio.py
@@ -0,0 +1,142 @@
+"""File-like objects that read/write an array buffer.
+
+This implements (nearly) all stdio methods.
+
+f = ArrayIO()       # ready for writing
+f = ArrayIO(buf)    # ready for reading
+f.close()           # explicitly release resources held
+flag = f.isatty()   # always false
+pos = f.tell()      # get current position
+f.seek(pos)         # set current position
+f.seek(pos, mode)   # mode 0: absolute; 1: relative; 2: relative to EOF
+buf = f.read()      # read until EOF
+buf = f.read(n)     # read up to n bytes
+buf = f.readline()  # read until end of line ('\n') or EOF
+list = f.readlines()# list of f.readline() results until EOF
+f.write(buf)        # write at current position
+f.writelines(list)  # for line in list: f.write(line)
+f.getvalue()        # return whole file's contents as a string
+
+Notes:
+- This is very similar to StringIO.  StringIO is faster for reading,
+  but ArrayIO is faster for writing.
+- ArrayIO uses an array object internally, but all its interfaces
+  accept and return strings.
+- Using a real file is often faster (but less convenient).
+- fileno() is left unimplemented so that code which uses it triggers
+  an exception early.
+- Seeking far beyond EOF and then writing will insert real null
+  bytes that occupy space in the buffer.
+- There's a simple test set (see end of this file).
+"""
+
+import string
+from array import array
+
+class ArrayIO:
+	def __init__(self, buf = ''):
+		self.buf = array('c', buf)
+		self.pos = 0
+		self.closed = 0
+		self.softspace = 0
+	def close(self):
+		if not self.closed:
+			self.closed = 1
+			del self.buf, self.pos
+	def isatty(self):
+		return 0
+	def seek(self, pos, mode = 0):
+		if mode == 1:
+			pos = pos + self.pos
+		elif mode == 2:
+			pos = pos + len(self.buf)
+		self.pos = max(0, pos)
+	def tell(self):
+		return self.pos
+	def read(self, n = -1):
+		if n < 0:
+			newpos = len(self.buf)
+		else:
+			newpos = min(self.pos+n, len(self.buf))
+		r = self.buf[self.pos:newpos].tostring()
+		self.pos = newpos
+		return r
+	def readline(self):
+		i = string.find(self.buf[self.pos:].tostring(), '\n')
+		if i < 0:
+			newpos = len(self.buf)
+		else:
+			newpos = self.pos+i+1
+		r = self.buf[self.pos:newpos].tostring()
+		self.pos = newpos
+		return r
+	def readlines(self):
+		lines = string.splitfields(self.read(), '\n')
+		if not lines:
+			return lines
+		for i in range(len(lines)-1):
+			lines[i] = lines[i] + '\n'
+		if not lines[-1]:
+			del lines[-1]
+		return lines
+	def write(self, s):
+		if not s: return
+		a = array('c', s)
+		n = self.pos - len(self.buf)
+		if n > 0:
+			self.buf[len(self.buf):] = array('c', '\0')*n
+		newpos = self.pos + len(a)
+		self.buf[self.pos:newpos] = a
+		self.pos = newpos
+	def writelines(self, list):
+		self.write(string.joinfields(list, ''))
+	def flush(self):
+		pass
+	def getvalue(self):
+		return self.buf.tostring()
+
+
+# A little test suite
+
+def test():
+	import sys
+	if sys.argv[1:]:
+		file = sys.argv[1]
+	else:
+		file = '/etc/passwd'
+	lines = open(file, 'r').readlines()
+	text = open(file, 'r').read()
+	f = ArrayIO()
+	for line in lines[:-2]:
+		f.write(line)
+	f.writelines(lines[-2:])
+	if f.getvalue() != text:
+		raise RuntimeError, 'write failed'
+	length = f.tell()
+	print 'File length =', length
+	f.seek(len(lines[0]))
+	f.write(lines[1])
+	f.seek(0)
+	print 'First line =', `f.readline()`
+	here = f.tell()
+	line = f.readline()
+	print 'Second line =', `line`
+	f.seek(-len(line), 1)
+	line2 = f.read(len(line))
+	if line != line2:
+		raise RuntimeError, 'bad result after seek back'
+	f.seek(len(line2), 1)
+	list = f.readlines()
+	line = list[-1]
+	f.seek(f.tell() - len(line))
+	line2 = f.read()
+	if line != line2:
+		raise RuntimeError, 'bad result after seek back from EOF'
+	print 'Read', len(list), 'more lines'
+	print 'File length =', f.tell()
+	if f.tell() != length:
+		raise RuntimeError, 'bad length'
+	f.close()
+
+if __name__ == '__main__':
+	test()
diff --git a/Lib/dos_8x3/ast.py b/Lib/dos_8x3/ast.py
new file mode 100755
index 0000000..6f92bee
--- /dev/null
+++ b/Lib/dos_8x3/ast.py
@@ -0,0 +1,224 @@
+"""Object-oriented interface to the parser module.
+
+This module exports three classes which together provide an interface
+to the parser module.  Together, the three classes represent two ways
+to create parsed representations of Python source and the two starting
+data types (source text and tuple representations).  Each class
+provides interfaces which are identical other than the constructors.
+The constructors are described in detail in the documentation for each
+class and the remaining, shared portion of the interface is documented
+below.  Briefly, the three classes provided are:
+
+AST
+    Defines the primary interface to the AST objects and supports creation
+    from the tuple representation of the parse tree.
+
+ExpressionAST
+    Supports creation of expression constructs from source text.
+
+SuiteAST
+    Supports creation of statement suites from source text.
+
+FileSuiteAST
+    Convenience subclass of the `SuiteAST' class; loads source text of the
+    suite from an external file.
+
+Aside from the constructors, several methods are provided to allow
+access to the various interpretations of the parse tree and to check
+conditions of the construct represented by the parse tree.
+
+ast()
+    Returns the corresponding `parser.ASTType' object.
+
+code()
+    Returns the compiled code object.
+
+filename()
+    Returns the name of the associated source file, if known.
+
+isExpression()
+    Returns true value if parse tree represents an expression, or a false
+    value otherwise.
+
+isSuite()
+    Returns true value if parse tree represents a suite of statements, or
+    a false value otherwise.
+
+text()
+    Returns the source text, or None if not available.
+
+tuple()
+    Returns the tuple representing the parse tree.
+"""
+
+__version__ = '$Revision$'
+__copyright__ = """Copyright (c) 1995, 1996 by Fred L. Drake, Jr.
+
+This software may be used and distributed freely for any purpose provided
+that this notice is included unchanged on any and all copies.  The author
+does not warrant or guarantee this software in any way.
+"""
+
+class AST:
+    """Base class for Abstract Syntax Tree objects.
+
+    Creates an Abstract Syntax Tree based on the tuple representation
+    of the parse tree.  The parse tree can represent either an
+    expression or a suite; which will be recognized automatically.
+    This base class provides all of the query methods for subclass
+    objects defined in this module.
+    """
+    _p = __import__('parser')		# import internally to avoid
+					# namespace pollution at the
+					# top level
+    _text = None
+    _code = None
+    _ast  = None
+    _type = 'unknown'
+    _tupl = None
+
+    def __init__(self, tuple):
+	"""Create an `AST' instance from a tuple-tree representation.
+
+	tuple
+	    The tuple tree to convert.
+
+	The tuple-tree may represent either an expression or a suite; the
+	type will be determined automatically.
+	"""
+	if type(tuple) is not type(()):
+	    raise TypeError, 'Base AST class requires tuple parameter.'
+
+	self._tupl = tuple
+	self._ast  = self._p.tuple2ast(tuple)
+	self._type = (self._p.isexpr(self._ast) and 'expression') or 'suite'
+
+    def tuple(self):
+	"""Returns the tuple representing the parse tree.
+	"""
+	if self._tupl is None:
+	    self._tupl = self._p.ast2tuple(self._ast)
+	return self._tupl
+
+    def code(self):
+	"""Returns the compiled code object.
+
+	The code object returned by this method may be passed to the
+	exec statement if `AST.isSuite()' is true or to the eval()
+	function if `AST.isExpression()' is true.  All the usual rules
+	regarding execution of code objects apply.
+	"""
+	if not self._code:
+	    self._code = self._p.compileast(self._ast)
+	return self._code
+
+    def ast(self):
+	"""Returns the corresponding `parser.ASTType' object.
+	"""
+	return self._ast
+
+    def filename(self):
+	"""Returns the name of the source file if known, or None.
+	"""
+	return None
+
+    def text(self):
+	"""Returns the source text, or None if not available.
+
+	If the instance is of class `AST', None is returned since no
+	source text is available.  If of class `ExpressionAST' or
+	`SuiteAST', the source text passed to the constructor is
+	returned.
+	"""
+	return self._text
+
+    def isSuite(self):
+	"""Determine if `AST' instance represents a suite of statements.
+	"""
+	return self._type == 'suite'
+
+    def isExpression(self):
+	"""Determine if `AST' instance represents an expression.
+	"""
+	return self._type == 'expression'
+
+
+
+class SuiteAST(AST):
+    """Statement suite parse tree representation.
+
+    This subclass of the `AST' base class represents statement suites
+    parsed from the source text of a Python suite.  If the source text
+    does not represent a parsable suite of statements, the appropriate
+    exception is raised by the parser.
+    """
+    _type = 'suite'
+
+    def __init__(self, text):
+	"""Initialize a `SuiteAST' from source text.
+
+	text
+	    Source text to parse.
+	"""
+	if type(text) is not type(''):
+	    raise TypeError, 'SuiteAST requires source text parameter.'
+	self._text = text
+	self._ast  = self._p.suite(text)
+
+    def isSuite(self):
+	return 1
+
+    def isExpression(self):
+	return 0
+
+
+class FileSuiteAST(SuiteAST):
+    """Representation of a python source file syntax tree.
+
+    This provides a convenience wrapper around the `SuiteAST' class to
+    load the source text from an external file.
+    """
+    def __init__(self, fileName):
+	"""Initialize a `SuiteAST' from a source file.
+
+	fileName
+	    Name of the external source file.
+	"""
+	self._fileName = fileName
+	SuiteAST.__init__(self, open(fileName).read())
+
+    def filename(self):
+	return self._fileName
+
+
+
+class ExpressionAST(AST):
+    """Expression parse tree representation.
+
+    This subclass of the `AST' base class represents expression
+    constructs parsed from the source text of a Python expression.  If
+    the source text does not represent a parsable expression, the
+    appropriate exception is raised by the Python parser.
+    """
+    _type = 'expression'
+
+    def __init__(self, text):
+	"""Initialize an expression AST from source text.
+
+	text
+	    Source text to parse.
+	"""
+	if type(text) is not type(''):
+	    raise TypeError, 'ExpressionAST requires source text parameter.'
+	self._text = text
+	self._ast  = self._p.expr(text)
+
+    def isSuite(self):
+	return 0
+
+    def isExpression(self):
+	return 1
+
+
+#
+#  end of file
diff --git a/Lib/dos_8x3/basehttp.py b/Lib/dos_8x3/basehttp.py
new file mode 100755
index 0000000..281ddf6
--- /dev/null
+++ b/Lib/dos_8x3/basehttp.py
@@ -0,0 +1,482 @@
+"""HTTP server base class.
+
+Note: the class in this module doesn't implement any HTTP request; see
+SimpleHTTPServer for simple implementations of GET, HEAD and POST
+(including CGI scripts).
+
+Contents:
+
+- BaseHTTPRequestHandler: HTTP request handler base class
+- test: test function
+
+XXX To do:
+
+- send server version
+- log requests even later (to capture byte count)
+- log user-agent header and other interesting goodies
+- send error log to separate file
+- are request names really case sensitive?
+
+"""
+
+
+# See also:
+#
+# HTTP Working Group                                        T. Berners-Lee
+# INTERNET-DRAFT                                            R. T. Fielding
+# <draft-ietf-http-v10-spec-00.txt>                     H. Frystyk Nielsen
+# Expires September 8, 1995                                  March 8, 1995
+#
+# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
+
+
+# Log files
+# ---------
+# 
+# Here's a quote from the NCSA httpd docs about log file format.
+# 
+# | The logfile format is as follows. Each line consists of: 
+# | 
+# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb 
+# | 
+# |        host: Either the DNS name or the IP number of the remote client 
+# |        rfc931: Any information returned by identd for this person,
+# |                - otherwise. 
+# |        authuser: If user sent a userid for authentication, the user name,
+# |                  - otherwise. 
+# |        DD: Day 
+# |        Mon: Month (calendar name) 
+# |        YYYY: Year 
+# |        hh: hour (24-hour format, the machine's timezone) 
+# |        mm: minutes 
+# |        ss: seconds 
+# |        request: The first line of the HTTP request as sent by the client. 
+# |        ddd: the status code returned by the server, - if not available. 
+# |        bbbb: the total number of bytes sent,
+# |              *not including the HTTP/1.0 header*, - if not available 
+# | 
+# | You can determine the name of the file accessed through request.
+# 
+# (Actually, the latter is only true if you know the server configuration
+# at the time the request was made!)
+
+
+__version__ = "0.2"
+
+
+import sys
+import time
+import socket # For gethostbyaddr()
+import string
+import rfc822
+import mimetools
+import SocketServer
+
+# Default error message
+DEFAULT_ERROR_MESSAGE = """\
+<head>
+<title>Error response</title>
+</head>
+<body>
+<h1>Error response</h1>
+<p>Error code %(code)d.
+<p>Message: %(message)s.
+<p>Error code explanation: %(code)s = %(explain)s.
+</body>
+"""
+
+
+class HTTPServer(SocketServer.TCPServer):
+
+    def server_bind(self):
+	"""Override server_bind to store the server name."""
+	SocketServer.TCPServer.server_bind(self)
+	host, port = self.socket.getsockname()
+	if not host or host == '0.0.0.0':
+	    host = socket.gethostname()
+	hostname, hostnames, hostaddrs = socket.gethostbyaddr(host)
+	if '.' not in hostname:
+	    for host in hostnames:
+		if '.' in host:
+		    hostname = host
+		    break
+	self.server_name = hostname
+	self.server_port = port
+
+
+class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
+
+    """HTTP request handler base class.
+
+    The following explanation of HTTP serves to guide you through the
+    code as well as to expose any misunderstandings I may have about
+    HTTP (so you don't need to read the code to figure out I'm wrong
+    :-).
+
+    HTTP (HyperText Transfer Protocol) is an extensible protocol on
+    top of a reliable stream transport (e.g. TCP/IP).  The protocol
+    recognizes three parts to a request:
+
+    1. One line identifying the request type and path
+    2. An optional set of RFC-822-style headers
+    3. An optional data part
+
+    The headers and data are separated by a blank line.
+
+    The first line of the request has the form
+
+    <command> <path> <version>
+
+    where <command> is a (case-sensitive) keyword such as GET or POST,
+    <path> is a string containing path information for the request,
+    and <version> should be the string "HTTP/1.0".  <path> is encoded
+    using the URL encoding scheme (using %xx to signify the ASCII
+    character with hex code xx).
+
+    The protocol is vague about whether lines are separated by LF
+    characters or by CRLF pairs -- for compatibility with the widest
+    range of clients, both should be accepted.  Similarly, whitespace
+    in the request line should be treated sensibly (allowing multiple
+    spaces between components and allowing trailing whitespace).
+
+    Similarly, for output, lines ought to be separated by CRLF pairs
+    but most clients grok LF characters just fine.
+
+    If the first line of the request has the form
+
+    <command> <path>
+
+    (i.e. <version> is left out) then this is assumed to be an HTTP
+    0.9 request; this form has no optional headers and data part and
+    the reply consists of just the data.
+
+    The reply form of the HTTP 1.0 protocol again has three parts:
+
+    1. One line giving the response code
+    2. An optional set of RFC-822-style headers
+    3. The data
+
+    Again, the headers and data are separated by a blank line.
+
+    The response code line has the form
+
+    <version> <responsecode> <responsestring>
+
+    where <version> is the protocol version (always "HTTP/1.0"),
+    <responsecode> is a 3-digit response code indicating success or
+    failure of the request, and <responsestring> is an optional
+    human-readable string explaining what the response code means.
+
+    This server parses the request and the headers, and then calls a
+    function specific to the request type (<command>).  Specifically,
+    a request SPAM will be handled by a method handle_SPAM().  If no
+    such method exists the server sends an error response to the
+    client.  If it exists, it is called with no arguments:
+
+    do_SPAM()
+
+    Note that the request name is case sensitive (i.e. SPAM and spam
+    are different requests).
+
+    The various request details are stored in instance variables:
+
+    - client_address is the client IP address in the form (host,
+    port);
+
+    - command, path and version are the broken-down request line;
+
+    - headers is an instance of mimetools.Message (or a derived
+    class) containing the header information;
+
+    - rfile is a file object open for reading positioned at the
+    start of the optional input data part;
+
+    - wfile is a file object open for writing.
+
+    IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
+
+    The first thing to be written must be the response line.  Then
+    follow 0 or more header lines, then a blank line, and then the
+    actual data (if any).  The meaning of the header lines depends on
+    the command executed by the server; in most cases, when data is
+    returned, there should be at least one header line of the form
+
+    Content-type: <type>/<subtype>
+
+    where <type> and <subtype> should be registered MIME types,
+    e.g. "text/html" or "text/plain".
+
+    """
+
+    # The Python system version, truncated to its first component.
+    sys_version = "Python/" + string.split(sys.version)[0]
+
+    # The server software version.  You may want to override this.
+    # The format is multiple whitespace-separated strings,
+    # where each string is of the form name[/version].
+    server_version = "BaseHTTP/" + __version__
+
+    def handle(self):
+	"""Handle a single HTTP request.
+
+	You normally don't need to override this method; see the class
+	__doc__ string for information on how to handle specific HTTP
+	commands such as GET and POST.
+
+	"""
+
+	self.raw_requestline = self.rfile.readline()
+	self.request_version = version = "HTTP/0.9" # Default
+	requestline = self.raw_requestline
+	if requestline[-2:] == '\r\n':
+	    requestline = requestline[:-2]
+	elif requestline[-1:] == '\n':
+	    requestline = requestline[:-1]
+	self.requestline = requestline
+	words = string.split(requestline)
+	if len(words) == 3:
+	    [command, path, version] = words
+	    if version != self.protocol_version:
+		self.send_error(400, "Bad request version (%s)" % `version`)
+		return
+	elif len(words) == 2:
+	    [command, path] = words
+	    if command != 'GET':
+		self.send_error(400,
+				"Bad HTTP/0.9 request type (%s)" % `command`)
+		return
+	else:
+	    self.send_error(400, "Bad request syntax (%s)" % `requestline`)
+	    return
+	self.command, self.path, self.request_version = command, path, version
+	self.headers = self.MessageClass(self.rfile, 0)
+	mname = 'do_' + command
+	if not hasattr(self, mname):
+	    self.send_error(501, "Unsupported method (%s)" % `mname`)
+	    return
+	method = getattr(self, mname)
+	method()
+
+    def send_error(self, code, message=None):
+	"""Send and log an error reply.
+
+	Arguments are the error code, and a detailed message.
+	The detailed message defaults to the short entry matching the
+	response code.
+
+	This sends an error response (so it must be called before any
+	output has been generated), logs the error, and finally sends
+	a piece of HTML explaining the error to the user.
+
+	"""
+
+	try:
+	    short, long = self.responses[code]
+	except KeyError:
+	    short, long = '???', '???'
+	if not message:
+	    message = short
+	explain = long
+	self.log_error("code %d, message %s", code, message)
+	self.send_response(code, message)
+	self.end_headers()
+	self.wfile.write(self.error_message_format %
+			 {'code': code,
+			  'message': message,
+			  'explain': explain})
+
+    error_message_format = DEFAULT_ERROR_MESSAGE
+
+    def send_response(self, code, message=None):
+	"""Send the response header and log the response code.
+
+	Also send two standard headers with the server software
+	version and the current date.
+
+	"""
+	self.log_request(code)
+	if message is None:
+	    if self.responses.has_key(code):
+		message = self.responses[code][1]
+	    else:
+		message = ''
+	if self.request_version != 'HTTP/0.9':
+	    self.wfile.write("%s %s %s\r\n" %
+			     (self.protocol_version, str(code), message))
+	self.send_header('Server', self.version_string())
+	self.send_header('Date', self.date_time_string())
+
+    def send_header(self, keyword, value):
+	"""Send a MIME header."""
+	if self.request_version != 'HTTP/0.9':
+	    self.wfile.write("%s: %s\r\n" % (keyword, value))
+
+    def end_headers(self):
+	"""Send the blank line ending the MIME headers."""
+	if self.request_version != 'HTTP/0.9':
+	    self.wfile.write("\r\n")
+
+    def log_request(self, code='-', size='-'):
+	"""Log an accepted request.
+
+	This is called by send_reponse().
+
+	"""
+
+	self.log_message('"%s" %s %s',
+			 self.requestline, str(code), str(size))
+
+    def log_error(self, *args):
+	"""Log an error.
+
+	This is called when a request cannot be fulfilled.  By
+	default it passes the message on to log_message().
+
+	Arguments are the same as for log_message().
+
+	XXX This should go to the separate error log.
+
+	"""
+
+	apply(self.log_message, args)
+
+    def log_message(self, format, *args):
+	"""Log an arbitrary message.
+
+	This is used by all other logging functions.  Override
+	it if you have specific logging wishes.
+
+	The first argument, FORMAT, is a format string for the
+	message to be logged.  If the format string contains
+	any % escapes requiring parameters, they should be
+	specified as subsequent arguments (it's just like
+	printf!).
+
+	The client host and current date/time are prefixed to
+	every message.
+
+	"""
+
+	sys.stderr.write("%s - - [%s] %s\n" %
+			 (self.address_string(),
+			  self.log_date_time_string(),
+			  format%args))
+
+    def version_string(self):
+	"""Return the server software version string."""
+	return self.server_version + ' ' + self.sys_version
+
+    def date_time_string(self):
+	"""Return the current date and time formatted for a message header."""
+	now = time.time()
+	year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
+	s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
+		self.weekdayname[wd],
+		day, self.monthname[month], year,
+		hh, mm, ss)
+	return s
+
+    def log_date_time_string(self):
+	"""Return the current time formatted for logging."""
+	now = time.time()
+	year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
+	s = "%02d/%3s/%04d %02d:%02d:%02d" % (
+		day, self.monthname[month], year, hh, mm, ss)
+	return s
+
+    weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+    monthname = [None,
+		 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+		 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+    def address_string(self):
+	"""Return the client address formatted for logging.
+
+	This version looks up the full hostname using gethostbyaddr(),
+	and tries to find a name that contains at least one dot.
+
+	"""
+
+	(host, port) = self.client_address
+	try:
+	    name, names, addresses = socket.gethostbyaddr(host)
+	except socket.error, msg:
+	    return host
+	names.insert(0, name)
+	for name in names:
+	    if '.' in name: return name
+	return names[0]
+
+
+    # Essentially static class variables
+
+    # The version of the HTTP protocol we support.
+    # Don't override unless you know what you're doing (hint: incoming
+    # requests are required to have exactly this version string).
+    protocol_version = "HTTP/1.0"
+
+    # The Message-like class used to parse headers
+    MessageClass = mimetools.Message
+
+    # Table mapping response codes to messages; entries have the
+    # form {code: (shortmessage, longmessage)}.
+    # See http://www.w3.org/hypertext/WWW/Protocols/HTTP/HTRESP.html
+    responses = {
+	200: ('OK', 'Request fulfilled, document follows'),
+	201: ('Created', 'Document created, URL follows'),
+	202: ('Accepted',
+	      'Request accepted, processing continues off-line'),
+	203: ('Partial information', 'Request fulfilled from cache'),
+	204: ('No response', 'Request fulfilled, nothing follows'),
+	
+	301: ('Moved', 'Object moved permanently -- see URI list'),
+	302: ('Found', 'Object moved temporarily -- see URI list'),
+	303: ('Method', 'Object moved -- see Method and URL list'),
+	304: ('Not modified',
+	      'Document has not changed singe given time'),
+	
+	400: ('Bad request',
+	      'Bad request syntax or unsupported method'),
+	401: ('Unauthorized',
+	      'No permission -- see authorization schemes'),
+	402: ('Payment required',
+	      'No payment -- see charging schemes'),
+	403: ('Forbidden',
+	      'Request forbidden -- authorization will not help'),
+	404: ('Not found', 'Nothing matches the given URI'),
+	
+	500: ('Internal error', 'Server got itself in trouble'),
+	501: ('Not implemented',
+	      'Server does not support this operation'),
+	502: ('Service temporarily overloaded',
+	      'The server cannot process the request due to a high load'),
+	503: ('Gateway timeout',
+	      'The gateway server did not receive a timely response'),
+	
+	}
+
+
+def test(HandlerClass = BaseHTTPRequestHandler,
+	 ServerClass = HTTPServer):
+    """Test the HTTP request handler class.
+
+    This runs an HTTP server on port 8000 (or the first command line
+    argument).
+
+    """
+
+    if sys.argv[1:]:
+	port = string.atoi(sys.argv[1])
+    else:
+	port = 8000
+    server_address = ('', port)
+
+    httpd = ServerClass(server_address, HandlerClass)
+
+    print "Serving HTTP on port", port, "..."
+    httpd.serve_forever()
+
+
+if __name__ == '__main__':
+    test()
diff --git a/Lib/dos_8x3/bastion.py b/Lib/dos_8x3/bastion.py
new file mode 100755
index 0000000..7ddd93e
--- /dev/null
+++ b/Lib/dos_8x3/bastion.py
@@ -0,0 +1,162 @@
+"""Bastionification utility.
+
+A bastion (for another object -- the 'original') is an object that has
+the same methods as the original but does not give access to its
+instance variables.  Bastions have a number of uses, but the most
+obvious one is to provide code executing in restricted mode with a
+safe interface to an object implemented in unrestricted mode.
+
+The bastionification routine has an optional second argument which is
+a filter function.  Only those methods for which the filter method
+(called with the method name as argument) returns true are accessible.
+The default filter method returns true unless the method name begins
+with an underscore.
+
+There are a number of possible implementations of bastions.  We use a
+'lazy' approach where the bastion's __getattr__() discipline does all
+the work for a particular method the first time it is used.  This is
+usually fastest, especially if the user doesn't call all available
+methods.  The retrieved methods are stored as instance variables of
+the bastion, so the overhead is only occurred on the first use of each
+method.
+
+Detail: the bastion class has a __repr__() discipline which includes
+the repr() of the original object.  This is precomputed when the
+bastion is created.
+
+"""
+
+__version__ = '$Revision$'
+# $Source$
+
+
+from types import MethodType
+
+
+class BastionClass:
+
+    """Helper class used by the Bastion() function.
+
+    You could subclass this and pass the subclass as the bastionclass
+    argument to the Bastion() function, as long as the constructor has
+    the same signature (a get() function and a name for the object).
+
+    """
+
+    def __init__(self, get, name):
+	"""Constructor.
+
+	Arguments:
+
+	get - a function that gets the attribute value (by name)
+	name - a human-readable name for the original object
+	       (suggestion: use repr(object))
+
+	"""
+	self._get_ = get
+	self._name_ = name
+
+    def __repr__(self):
+	"""Return a representation string.
+
+	This includes the name passed in to the constructor, so that
+	if you print the bastion during debugging, at least you have
+	some idea of what it is.
+
+	"""
+	return "<Bastion for %s>" % self._name_
+
+    def __getattr__(self, name):
+	"""Get an as-yet undefined attribute value.
+
+	This calls the get() function that was passed to the
+	constructor.  The result is stored as an instance variable so
+	that the next time the same attribute is requested,
+	__getattr__() won't be invoked.
+
+	If the get() function raises an exception, this is simply
+	passed on -- exceptions are not cached.
+
+	"""
+	attribute = self._get_(name)
+	self.__dict__[name] = attribute
+	return attribute
+
+
+def Bastion(object, filter = lambda name: name[:1] != '_',
+	    name=None, bastionclass=BastionClass):
+    """Create a bastion for an object, using an optional filter.
+
+    See the Bastion module's documentation for background.
+
+    Arguments:
+
+    object - the original object
+    filter - a predicate that decides whether a function name is OK;
+             by default all names are OK that don't start with '_'
+    name - the name of the object; default repr(object)
+    bastionclass - class used to create the bastion; default BastionClass
+
+    """
+
+    # Note: we define *two* ad-hoc functions here, get1 and get2.
+    # Both are intended to be called in the same way: get(name).
+    # It is clear that the real work (getting the attribute
+    # from the object and calling the filter) is done in get1.
+    # Why can't we pass get1 to the bastion?  Because the user
+    # would be able to override the filter argument!  With get2,
+    # overriding the default argument is no security loophole:
+    # all it does is call it.
+    # Also notice that we can't place the object and filter as
+    # instance variables on the bastion object itself, since
+    # the user has full access to all instance variables!
+
+    def get1(name, object=object, filter=filter):
+	"""Internal function for Bastion().  See source comments."""
+	if filter(name):
+	    attribute = getattr(object, name)
+	    if type(attribute) == MethodType:
+		return attribute
+	raise AttributeError, name
+
+    def get2(name, get1=get1):
+	"""Internal function for Bastion().  See source comments."""
+	return get1(name)
+
+    if name is None:
+	name = `object`
+    return bastionclass(get2, name)
+
+
+def _test():
+    """Test the Bastion() function."""
+    class Original:
+	def __init__(self):
+	    self.sum = 0
+	def add(self, n):
+	    self._add(n)
+	def _add(self, n):
+	    self.sum = self.sum + n
+	def total(self):
+	    return self.sum
+    o = Original()
+    b = Bastion(o)
+    b.add(81)
+    b.add(18)
+    print "b.total() =", b.total()
+    try:
+	print "b.sum =", b.sum,
+    except:
+	print "inaccessible"
+    else:
+	print "accessible"
+    try:
+	print "b._add =", b._add,
+    except:
+	print "inaccessible"
+    else:
+	print "accessible"
+
+
+if __name__ == '__main__':
+    _test()
diff --git a/Lib/dos_8x3/cgihttps.py b/Lib/dos_8x3/cgihttps.py
new file mode 100755
index 0000000..837f7c2
--- /dev/null
+++ b/Lib/dos_8x3/cgihttps.py
@@ -0,0 +1,203 @@
+"""CGI-savvy HTTP Server.
+
+This module builds on SimpleHTTPServer by implementing GET and POST
+requests to cgi-bin scripts.
+
+"""
+
+
+__version__ = "0.3"
+
+
+import os
+import sys
+import time
+import socket
+import string
+import urllib
+import BaseHTTPServer
+import SimpleHTTPServer
+
+
+class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
+
+    """Complete HTTP server with GET, HEAD and POST commands.
+
+    GET and HEAD also support running CGI scripts.
+
+    The POST command is *only* implemented for CGI scripts.
+
+    """
+
+    def do_POST(self):
+	"""Serve a POST request.
+
+	This is only implemented for CGI scripts.
+
+	"""
+
+	if self.is_cgi():
+	    self.run_cgi()
+	else:
+	    self.send_error(501, "Can only POST to CGI scripts")
+
+    def send_head(self):
+	"""Version of send_head that support CGI scripts"""
+	if self.is_cgi():
+	    return self.run_cgi()
+	else:
+	    return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
+
+    def is_cgi(self):
+	"""test whether PATH corresponds to a CGI script.
+
+	Return a tuple (dir, rest) if PATH requires running a
+	CGI script, None if not.  Note that rest begins with a
+	slash if it is not empty.
+
+	The default implementation tests whether the path
+	begins with one of the strings in the list
+	self.cgi_directories (and the next character is a '/'
+	or the end of the string).
+
+	"""
+
+	path = self.path
+
+	for x in self.cgi_directories:
+	    i = len(x)
+	    if path[:i] == x and (not path[i:] or path[i] == '/'):
+		self.cgi_info = path[:i], path[i+1:]
+		return 1
+	return 0
+
+    cgi_directories = ['/cgi-bin', '/htbin']
+
+    def run_cgi(self):
+	"""Execute a CGI script."""
+	dir, rest = self.cgi_info
+	i = string.rfind(rest, '?')
+	if i >= 0:
+	    rest, query = rest[:i], rest[i+1:]
+	else:
+	    query = ''
+	i = string.find(rest, '/')
+	if i >= 0:
+	    script, rest = rest[:i], rest[i:]
+	else:
+	    script, rest = rest, ''
+	scriptname = dir + '/' + script
+	scriptfile = self.translate_path(scriptname)
+	if not os.path.exists(scriptfile):
+	    self.send_error(404, "No such CGI script (%s)" % `scriptname`)
+	    return
+	if not os.path.isfile(scriptfile):
+	    self.send_error(403, "CGI script is not a plain file (%s)" %
+			    `scriptname`)
+	    return
+	if not executable(scriptfile):
+	    self.send_error(403, "CGI script is not executable (%s)" %
+			    `scriptname`)
+	    return
+	nobody = nobody_uid()
+	self.send_response(200, "Script output follows")
+	self.wfile.flush() # Always flush before forking
+	pid = os.fork()
+	if pid != 0:
+	    # Parent
+	    pid, sts = os.waitpid(pid, 0)
+	    if sts:
+		self.log_error("CGI script exit status x%x" % sts)
+	    return
+	# Child
+	try:
+	    # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
+	    # XXX Much of the following could be prepared ahead of time!
+	    env = {}
+	    env['SERVER_SOFTWARE'] = self.version_string()
+	    env['SERVER_NAME'] = self.server.server_name
+	    env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+	    env['SERVER_PROTOCOL'] = self.protocol_version
+	    env['SERVER_PORT'] = str(self.server.server_port)
+	    env['REQUEST_METHOD'] = self.command
+	    uqrest = urllib.unquote(rest)
+	    env['PATH_INFO'] = uqrest
+	    env['PATH_TRANSLATED'] = self.translate_path(uqrest)
+	    env['SCRIPT_NAME'] = scriptname
+	    if query:
+		env['QUERY_STRING'] = query
+	    host = self.address_string()
+	    if host != self.client_address[0]:
+		env['REMOTE_HOST'] = host
+	    env['REMOTE_ADDR'] = self.client_address[0]
+	    # AUTH_TYPE
+	    # REMOTE_USER
+	    # REMOTE_IDENT
+	    env['CONTENT_TYPE'] = self.headers.type
+	    length = self.headers.getheader('content-length')
+	    if length:
+		env['CONTENT_LENGTH'] = length
+	    accept = []
+	    for line in self.headers.getallmatchingheaders('accept'):
+		if line[:1] in string.whitespace:
+		    accept.append(string.strip(line))
+		else:
+		    accept = accept + string.split(line[7:])
+	    env['HTTP_ACCEPT'] = string.joinfields(accept, ',')
+	    ua = self.headers.getheader('user-agent')
+	    if ua:
+		env['HTTP_USER_AGENT'] = ua
+	    # XXX Other HTTP_* headers
+	    import regsub
+	    decoded_query = regsub.gsub('+', ' ', query)
+	    try:
+		os.setuid(nobody)
+	    except os.error:
+		pass
+	    os.dup2(self.rfile.fileno(), 0)
+	    os.dup2(self.wfile.fileno(), 1)
+	    print scriptfile, script, decoded_query
+	    os.execve(scriptfile,
+		      [script, decoded_query],
+		      env)
+	except:
+	    self.server.handle_error(self.request, self.client_address)
+	    os._exit(127)
+
+
+nobody = None
+
+def nobody_uid():
+    """Internal routine to get nobody's uid"""
+    global nobody
+    if nobody:
+	return nobody
+    import pwd
+    try:
+	nobody = pwd.getpwnam('nobody')[2]
+    except pwd.error:
+	nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
+    return nobody
+
+
+def executable(path):
+    """Test for executable file."""
+    try:
+	st = os.stat(path)
+    except os.error:
+	return 0
+    return st[0] & 0111 != 0
+
+
+def test(HandlerClass = CGIHTTPRequestHandler,
+	 ServerClass = BaseHTTPServer.HTTPServer):
+    import sys
+    if sys.argv[1:2] == ['-r']:
+	db = MyArchive()
+	db.regenindices()
+	return
+    SimpleHTTPServer.test(HandlerClass, ServerClass)
+
+
+if __name__ == '__main__':
+    test()
diff --git a/Lib/dos_8x3/compilea.py b/Lib/dos_8x3/compilea.py
new file mode 100755
index 0000000..3120284
--- /dev/null
+++ b/Lib/dos_8x3/compilea.py
@@ -0,0 +1,70 @@
+# Routines to force "compilation" of all .py files in a directory
+# tree or on sys.path.  By default recursion is pruned at a depth of
+# 10 and the current directory, if it occurs in sys.path, is skipped.
+# When called as a script, compiles argument directories, or sys.path
+# if no arguments.
+# After a similar module by Sjoerd Mullender.
+
+import os
+import sys
+import py_compile
+
+def compile_dir(dir, maxlevels = 10):
+	print 'Listing', dir, '...'
+	try:
+		names = os.listdir(dir)
+	except os.error:
+		print "Can't list", dir
+		names = []
+	names.sort()
+	for name in names:
+		fullname = os.path.join(dir, name)
+		if os.path.isfile(fullname):
+			head, tail = name[:-3], name[-3:]
+			if tail == '.py':
+				print 'Compiling', fullname, '...'
+				try:
+					py_compile.compile(fullname)
+				except KeyboardInterrupt:
+					del names[:]
+					print '\n[interrupt]'
+					break
+				except:
+					if type(sys.exc_type) == type(''):
+						exc_type_name = sys.exc_type
+					else: exc_type_name = sys.exc_type.__name__
+					print 'Sorry:', exc_type_name + ':',
+					print sys.exc_value
+		elif maxlevels > 0 and \
+		     name != os.curdir and name != os.pardir and \
+		     os.path.isdir(fullname) and \
+		     not os.path.islink(fullname):
+			compile_dir(fullname, maxlevels - 1)
+
+def compile_path(skip_curdir = 1):
+	for dir in sys.path:
+		if dir == os.curdir and skip_curdir:
+			print 'Skipping current directory'
+		else:
+			compile_dir(dir, 0)
+
+def main():
+	import getopt
+	try:
+		opts, args = getopt.getopt(sys.argv[1:], 'l')
+	except getopt.error, msg:
+		print msg
+		print "usage: compileall [-l] [directory ...]"
+		print "-l: don't recurse down"
+		print "if no arguments, -l sys.path is assumed"
+	maxlevels = 10
+	for o, a in opts:
+		if o == '-l': maxlevels = 0
+	if args:
+		for dir in sys.argv[1:]:
+			compile_dir(dir, maxlevels)
+	else:
+		compile_path()
+
+if __name__ == '__main__':
+	main()
diff --git a/Lib/dos_8x3/complex.py b/Lib/dos_8x3/complex.py
new file mode 100755
index 0000000..f4892f3
--- /dev/null
+++ b/Lib/dos_8x3/complex.py
@@ -0,0 +1,275 @@
+# Complex numbers
+# ---------------
+
+# This module represents complex numbers as instances of the class Complex.
+# A Complex instance z has two data attribues, z.re (the real part) and z.im
+# (the imaginary part).  In fact, z.re and z.im can have any value -- all
+# arithmetic operators work regardless of the type of z.re and z.im (as long
+# as they support numerical operations).
+#
+# The following functions exist (Complex is actually a class):
+# Complex([re [,im]) -> creates a complex number from a real and an imaginary part
+# IsComplex(z) -> true iff z is a complex number (== has .re and .im attributes)
+# Polar([r [,phi [,fullcircle]]]) ->
+#	the complex number z for which r == z.radius() and phi == z.angle(fullcircle)
+#	(r and phi default to 0)
+#
+# Complex numbers have the following methods:
+# z.abs() -> absolute value of z
+# z.radius() == z.abs()
+# z.angle([fullcircle]) -> angle from positive X axis; fullcircle gives units
+# z.phi([fullcircle]) == z.angle(fullcircle)
+#
+# These standard functions and unary operators accept complex arguments:
+# abs(z)
+# -z
+# +z
+# not z
+# repr(z) == `z`
+# str(z)
+# hash(z) -> a combination of hash(z.re) and hash(z.im) such that if z.im is zero
+#            the result equals hash(z.re)
+# Note that hex(z) and oct(z) are not defined.
+#
+# These conversions accept complex arguments only if their imaginary part is zero:
+# int(z)
+# long(z)
+# float(z)
+#
+# The following operators accept two complex numbers, or one complex number
+# and one real number (int, long or float):
+# z1 + z2
+# z1 - z2
+# z1 * z2
+# z1 / z2
+# pow(z1, z2)
+# cmp(z1, z2)
+# Note that z1 % z2 and divmod(z1, z2) are not defined,
+# nor are shift and mask operations.
+#
+# The standard module math does not support complex numbers.
+# (I suppose it would be easy to implement a cmath module.)
+#
+# Idea:
+# add a class Polar(r, phi) and mixed-mode arithmetic which
+# chooses the most appropriate type for the result:
+# Complex for +,-,cmp
+# Polar   for *,/,pow
+
+
+import types, math
+
+if not hasattr(math, 'hypot'):
+	def hypot(x, y):
+		# XXX I know there's a way to compute this without possibly causing
+		# overflow, but I can't remember what it is right now...
+		return math.sqrt(x*x + y*y)
+	math.hypot = hypot
+
+twopi = math.pi*2.0
+halfpi = math.pi/2.0
+
+def IsComplex(obj):
+	return hasattr(obj, 're') and hasattr(obj, 'im')
+
+def Polar(r = 0, phi = 0, fullcircle = twopi):
+	phi = phi * (twopi / fullcircle)
+	return Complex(math.cos(phi)*r, math.sin(phi)*r)
+
+class Complex:
+
+	def __init__(self, re=0, im=0):
+		if IsComplex(re):
+			im = im + re.im
+			re = re.re
+		if IsComplex(im):
+			re = re - im.im
+			im = im.re
+		self.re = re
+		self.im = im
+
+	def __setattr__(self, name, value):
+		if hasattr(self, name):
+			raise TypeError, "Complex numbers have set-once attributes"
+		self.__dict__[name] = value
+
+	def __repr__(self):
+		if not self.im:
+			return 'Complex(%s)' % `self.re`
+		else:
+			return 'Complex(%s, %s)' % (`self.re`, `self.im`)
+
+	def __str__(self):
+		if not self.im:
+			return `self.re`
+		else:
+			return 'Complex(%s, %s)' % (`self.re`, `self.im`)
+
+	def __coerce__(self, other):
+		if IsComplex(other):
+			return self, other
+		return self, Complex(other)	# May fail
+
+	def __cmp__(self, other):
+		return cmp(self.re, other.re) or cmp(self.im, other.im)
+
+	def __hash__(self):
+		if not self.im: return hash(self.re)
+		mod = sys.maxint + 1L
+		return int((hash(self.re) + 2L*hash(self.im) + mod) % (2L*mod) - mod)
+
+	def __neg__(self):
+		return Complex(-self.re, -self.im)
+
+	def __pos__(self):
+		return self
+
+	def __abs__(self):
+		return math.hypot(self.re, self.im)
+		##return math.sqrt(self.re*self.re + self.im*self.im)
+
+
+	def __int__(self):
+		if self.im:
+			raise ValueError, "can't convert Complex with nonzero im to int"
+		return int(self.re)
+
+	def __long__(self):
+		if self.im:
+			raise ValueError, "can't convert Complex with nonzero im to long"
+		return long(self.re)
+
+	def __float__(self):
+		if self.im:
+			raise ValueError, "can't convert Complex with nonzero im to float"
+		return float(self.re)
+
+	def __nonzero__(self):
+		return not (self.re == self.im == 0)
+
+	abs = radius = __abs__
+
+	def angle(self, fullcircle = twopi):
+		return (fullcircle/twopi) * ((halfpi - math.atan2(self.re, self.im)) % twopi)
+
+	phi = angle
+
+	def __add__(self, other):
+		return Complex(self.re + other.re, self.im + other.im)
+
+	__radd__ = __add__
+
+	def __sub__(self, other):
+		return Complex(self.re - other.re, self.im - other.im)
+
+	def __rsub__(self, other):
+		return Complex(other.re - self.re, other.im - self.im)
+
+	def __mul__(self, other):
+		return Complex(self.re*other.re - self.im*other.im,
+		               self.re*other.im + self.im*other.re)
+
+	__rmul__ = __mul__
+
+	def __div__(self, other):
+		# Deviating from the general principle of not forcing re or im
+		# to be floats, we cast to float here, otherwise division
+		# of Complex numbers with integer re and im parts would use
+		# the (truncating) integer division
+		d = float(other.re*other.re + other.im*other.im)
+		if not d: raise ZeroDivisionError, 'Complex division'
+		return Complex((self.re*other.re + self.im*other.im) / d,
+		               (self.im*other.re - self.re*other.im) / d)
+
+	def __rdiv__(self, other):
+		return other / self
+
+	def __pow__(self, n, z=None):
+		if z is not None:
+			raise TypeError, 'Complex does not support ternary pow()'
+		if IsComplex(n):
+			if n.im: raise TypeError, 'Complex to the Complex power'
+			n = n.re
+		r = pow(self.abs(), n)
+		phi = n*self.angle()
+		return Complex(math.cos(phi)*r, math.sin(phi)*r)
+	
+	def __rpow__(self, base):
+		return pow(base, self)
+
+
+# Everything below this point is part of the test suite
+
+def checkop(expr, a, b, value, fuzz = 1e-6):
+	import sys
+	print '       ', a, 'and', b,
+	try:
+		result = eval(expr)
+	except:
+		result = sys.exc_type
+	print '->', result
+	if (type(result) == type('') or type(value) == type('')):
+		ok = result == value
+	else:
+		ok = abs(result - value) <= fuzz
+	if not ok:
+		print '!!\t!!\t!! should be', value, 'diff', abs(result - value)
+
+
+def test():
+	testsuite = {
+		'a+b': [
+			(1, 10, 11),
+			(1, Complex(0,10), Complex(1,10)),
+			(Complex(0,10), 1, Complex(1,10)),
+			(Complex(0,10), Complex(1), Complex(1,10)),
+			(Complex(1), Complex(0,10), Complex(1,10)),
+		],
+		'a-b': [
+			(1, 10, -9),
+			(1, Complex(0,10), Complex(1,-10)),
+			(Complex(0,10), 1, Complex(-1,10)),
+			(Complex(0,10), Complex(1), Complex(-1,10)),
+			(Complex(1), Complex(0,10), Complex(1,-10)),
+		],
+		'a*b': [
+			(1, 10, 10),
+			(1, Complex(0,10), Complex(0, 10)),
+			(Complex(0,10), 1, Complex(0,10)),
+			(Complex(0,10), Complex(1), Complex(0,10)),
+			(Complex(1), Complex(0,10), Complex(0,10)),
+		],
+		'a/b': [
+			(1., 10, 0.1),
+			(1, Complex(0,10), Complex(0, -0.1)),
+			(Complex(0, 10), 1, Complex(0, 10)),
+			(Complex(0, 10), Complex(1), Complex(0, 10)),
+			(Complex(1), Complex(0,10), Complex(0, -0.1)),
+		],
+		'pow(a,b)': [
+			(1, 10, 1),
+			(1, Complex(0,10), 'TypeError'),
+			(Complex(0,10), 1, Complex(0,10)),
+			(Complex(0,10), Complex(1), Complex(0,10)),
+			(Complex(1), Complex(0,10), 'TypeError'),
+			(2, Complex(4,0), 16),
+		],
+		'cmp(a,b)': [
+			(1, 10, -1),
+			(1, Complex(0,10), 1),
+			(Complex(0,10), 1, -1),
+			(Complex(0,10), Complex(1), -1),
+			(Complex(1), Complex(0,10), 1),
+		],
+	}
+	exprs = testsuite.keys()
+	exprs.sort()
+	for expr in exprs:
+		print expr + ':'
+		t = (expr,)
+		for item in testsuite[expr]:
+			apply(checkop, t+item)
+	
+
+if __name__ == '__main__':
+	test()
diff --git a/Lib/dos_8x3/formatte.py b/Lib/dos_8x3/formatte.py
new file mode 100755
index 0000000..0266379
--- /dev/null
+++ b/Lib/dos_8x3/formatte.py
@@ -0,0 +1,399 @@
+import regex
+import regsub
+import string
+import sys
+from types import StringType
+
+
+AS_IS = None
+
+
+class NullFormatter:
+
+    def __init__(self): pass
+    def end_paragraph(self, blankline): pass
+    def add_line_break(self): pass
+    def add_hor_rule(self, abswidth=None, percentwidth=1.0,
+		     height=None, align=None): pass
+    def add_label_data(self, format, counter): pass
+    def add_flowing_data(self, data): pass
+    def add_literal_data(self, data): pass
+    def flush_softspace(self): pass
+    def push_alignment(self, align): pass
+    def pop_alignment(self): pass
+    def push_font(self, x): pass
+    def pop_font(self): pass
+    def push_margin(self, margin): pass
+    def pop_margin(self): pass
+    def set_spacing(self, spacing): pass
+    def push_style(self, *styles): pass
+    def pop_style(self, n=1): pass
+    def assert_line_data(self, flag=1): pass
+
+
+class AbstractFormatter:
+
+    def __init__(self, writer):
+	self.writer = writer		# Output device
+	self.align = None		# Current alignment
+	self.align_stack = []		# Alignment stack
+	self.font_stack = []		# Font state
+	self.margin_stack = []		# Margin state
+	self.spacing = None		# Vertical spacing state
+	self.style_stack = []		# Other state, e.g. color
+	self.nospace = 1		# Should leading space be suppressed
+	self.softspace = 0		# Should a space be inserted
+	self.para_end = 1		# Just ended a paragraph
+	self.parskip = 0		# Skipped space between paragraphs?
+	self.hard_break = 1		# Have a hard break
+	self.have_label = 0
+
+    def end_paragraph(self, blankline):
+	if not self.hard_break:
+	    self.writer.send_line_break()
+	    self.have_label = 0
+	if self.parskip < blankline and not self.have_label:
+	    self.writer.send_paragraph(blankline - self.parskip)
+	    self.parskip = blankline
+	    self.have_label = 0
+	self.hard_break = self.nospace = self.para_end = 1
+	self.softspace = 0
+
+    def add_line_break(self):
+	if not (self.hard_break or self.para_end):
+	    self.writer.send_line_break()
+	    self.have_label = self.parskip = 0
+	self.hard_break = self.nospace = 1
+	self.softspace = 0
+
+    def add_hor_rule(self, *args, **kw):
+	if not self.hard_break:
+	    self.writer.send_line_break()
+	apply(self.writer.send_hor_rule, args, kw)
+	self.hard_break = self.nospace = 1
+	self.have_label = self.para_end = self.softspace = self.parskip = 0
+
+    def add_label_data(self, format, counter, blankline = None):
+	if self.have_label or not self.hard_break:
+	    self.writer.send_line_break()
+	if not self.para_end:
+	    self.writer.send_paragraph((blankline and 1) or 0)
+	if type(format) is StringType:
+	    self.writer.send_label_data(self.format_counter(format, counter))
+	else:
+	    self.writer.send_label_data(format)
+	self.nospace = self.have_label = self.hard_break = self.para_end = 1
+	self.softspace = self.parskip = 0
+
+    def format_counter(self, format, counter):
+        label = ''
+        for c in format:
+            try:
+                if c == '1':
+		    label = label + ('%d' % counter)
+                elif c in 'aA':
+		    if counter > 0:
+			label = label + self.format_letter(c, counter)
+                elif c in 'iI':
+		    if counter > 0:
+			label = label + self.format_roman(c, counter)
+		else:
+		    label = label + c
+            except:
+                label = label + c
+        return label
+
+    def format_letter(self, case, counter):
+	label = ''
+	while counter > 0:
+	    counter, x = divmod(counter-1, 26)
+	    s = chr(ord(case) + x)
+	    label = s + label
+	return label
+
+    def format_roman(self, case, counter):
+        ones = ['i', 'x', 'c', 'm']
+        fives = ['v', 'l', 'd']
+        label, index = '', 0
+	# This will die of IndexError when counter is too big
+        while counter > 0:
+            counter, x = divmod(counter, 10)
+            if x == 9:
+                label = ones[index] + ones[index+1] + label
+            elif x == 4:
+                label = ones[index] + fives[index] + label
+            else:
+                if x >= 5:
+                    s = fives[index]
+                    x = x-5
+                else:
+                    s = ''
+                s = s + ones[index]*x
+		label = s + label
+            index = index + 1
+        if case == 'I':
+	    return string.upper(label)
+        return label
+
+    def add_flowing_data(self, data,
+			 # These are only here to load them into locals:
+			 whitespace = string.whitespace,
+			 join = string.join, split = string.split):
+	if not data: return
+	# The following looks a bit convoluted but is a great improvement over
+	# data = regsub.gsub('[' + string.whitespace + ']+', ' ', data)
+	prespace = data[:1] in whitespace
+	postspace = data[-1:] in whitespace
+	data = join(split(data))
+	if self.nospace and not data:
+	    return
+	elif prespace or self.softspace:
+	    if not data:
+		if not self.nospace:
+		    self.softspace = 1
+		    self.parskip = 0
+		return
+	    if not self.nospace:
+		data = ' ' + data
+	self.hard_break = self.nospace = self.para_end = \
+			  self.parskip = self.have_label = 0
+	self.softspace = postspace
+	self.writer.send_flowing_data(data)
+
+    def add_literal_data(self, data):
+	if not data: return
+	#  Caller is expected to cause flush_softspace() if needed.
+	self.hard_break = data[-1:] == '\n'
+	self.nospace = self.para_end = self.softspace = \
+		       self.parskip = self.have_label = 0
+	self.writer.send_literal_data(data)
+
+    def flush_softspace(self):
+	if self.softspace:
+	    self.hard_break = self.nospace = self.para_end = self.parskip = \
+			      self.have_label = self.softspace = 0
+	    self.writer.send_flowing_data(' ')
+
+    def push_alignment(self, align):
+	if align and align != self.align:
+	    self.writer.new_alignment(align)
+	    self.align = align
+	    self.align_stack.append(align)
+	else:
+	    self.align_stack.append(self.align)
+
+    def pop_alignment(self):
+	if self.align_stack:
+	    del self.align_stack[-1]
+	if self.align_stack:
+	    self.align = align = self.align_stack[-1]
+	    self.writer.new_alignment(align)
+	else:
+	    self.align = None
+	    self.writer.new_alignment(None)
+
+    def push_font(self, (size, i, b, tt)):
+	if self.softspace:
+	    self.hard_break = self.nospace = self.para_end = self.softspace = 0
+	    self.writer.send_flowing_data(' ')
+	if self.font_stack:
+	    csize, ci, cb, ctt = self.font_stack[-1]
+	    if size is AS_IS: size = csize
+	    if i is AS_IS: i = ci
+	    if b is AS_IS: b = cb
+	    if tt is AS_IS: tt = ctt
+	font = (size, i, b, tt)
+	self.font_stack.append(font)
+	self.writer.new_font(font)
+
+    def pop_font(self):
+	if self.softspace:
+	    self.hard_break = self.nospace = self.para_end = self.softspace = 0
+	    self.writer.send_flowing_data(' ')
+	if self.font_stack:
+	    del self.font_stack[-1]
+	if self.font_stack:
+	    font = self.font_stack[-1]
+	else:
+	    font = None
+	self.writer.new_font(font)
+
+    def push_margin(self, margin):
+	self.margin_stack.append(margin)
+	fstack = filter(None, self.margin_stack)
+	if not margin and fstack:
+	    margin = fstack[-1]
+	self.writer.new_margin(margin, len(fstack))
+
+    def pop_margin(self):
+	if self.margin_stack:
+	    del self.margin_stack[-1]
+	fstack = filter(None, self.margin_stack)
+	if fstack:
+	    margin = fstack[-1]
+	else:
+	    margin = None
+	self.writer.new_margin(margin, len(fstack))
+
+    def set_spacing(self, spacing):
+	self.spacing = spacing
+	self.writer.new_spacing(spacing)
+
+    def push_style(self, *styles):
+	if self.softspace:
+	    self.hard_break = self.nospace = self.para_end = self.softspace = 0
+	    self.writer.send_flowing_data(' ')
+	for style in styles:
+	    self.style_stack.append(style)
+	self.writer.new_styles(tuple(self.style_stack))
+
+    def pop_style(self, n=1):
+	if self.softspace:
+	    self.hard_break = self.nospace = self.para_end = self.softspace = 0
+	    self.writer.send_flowing_data(' ')
+	del self.style_stack[-n:]
+	self.writer.new_styles(tuple(self.style_stack))
+
+    def assert_line_data(self, flag=1):
+	self.nospace = self.hard_break = not flag
+	self.para_end = self.have_label = 0
+
+
+class NullWriter:
+    """Minimal writer interface to use in testing.
+    """
+    def __init__(self): pass
+    def new_alignment(self, align): pass
+    def new_font(self, font): pass
+    def new_margin(self, margin, level): pass
+    def new_spacing(self, spacing): pass
+    def new_styles(self, styles): pass
+    def send_paragraph(self, blankline): pass
+    def send_line_break(self): pass
+    def send_hor_rule(self, *args, **kw): pass
+    def send_label_data(self, data): pass
+    def send_flowing_data(self, data): pass
+    def send_literal_data(self, data): pass
+
+
+class AbstractWriter(NullWriter):
+
+    def __init__(self):
+	pass
+
+    def new_alignment(self, align):
+	print "new_alignment(%s)" % `align`
+
+    def new_font(self, font):
+	print "new_font(%s)" % `font`
+
+    def new_margin(self, margin, level):
+	print "new_margin(%s, %d)" % (`margin`, level)
+
+    def new_spacing(self, spacing):
+	print "new_spacing(%s)" % `spacing`
+
+    def new_styles(self, styles):
+	print "new_styles(%s)" % `styles`
+
+    def send_paragraph(self, blankline):
+	print "send_paragraph(%s)" % `blankline`
+
+    def send_line_break(self):
+	print "send_line_break()"
+
+    def send_hor_rule(self, *args, **kw):
+	print "send_hor_rule()"
+
+    def send_label_data(self, data):
+	print "send_label_data(%s)" % `data`
+
+    def send_flowing_data(self, data):
+	print "send_flowing_data(%s)" % `data`
+
+    def send_literal_data(self, data):
+	print "send_literal_data(%s)" % `data`
+
+
+class DumbWriter(NullWriter):
+
+    def __init__(self, file=None, maxcol=72):
+	self.file = file or sys.stdout
+	self.maxcol = maxcol
+	NullWriter.__init__(self)
+	self.reset()
+
+    def reset(self):
+	self.col = 0
+	self.atbreak = 0
+
+    def send_paragraph(self, blankline):
+	self.file.write('\n' + '\n'*blankline)
+	self.col = 0
+	self.atbreak = 0
+
+    def send_line_break(self):
+	self.file.write('\n')
+	self.col = 0
+	self.atbreak = 0
+
+    def send_hor_rule(self, *args, **kw):
+	self.file.write('\n')
+	self.file.write('-'*self.maxcol)
+	self.file.write('\n')
+	self.col = 0
+	self.atbreak = 0
+
+    def send_literal_data(self, data):
+	self.file.write(data)
+	i = string.rfind(data, '\n')
+	if i >= 0:
+	    self.col = 0
+	    data = data[i+1:]
+	data = string.expandtabs(data)
+	self.col = self.col + len(data)
+	self.atbreak = 0
+
+    def send_flowing_data(self, data):
+	if not data: return
+	atbreak = self.atbreak or data[0] in string.whitespace
+	col = self.col
+	maxcol = self.maxcol
+	write = self.file.write
+	for word in string.split(data):
+	    if atbreak:
+		if col + len(word) >= maxcol:
+		    write('\n')
+		    col = 0
+		else:
+		    write(' ')
+		    col = col + 1
+	    write(word)
+	    col = col + len(word)
+	    atbreak = 1
+	self.col = col
+	self.atbreak = data[-1] in string.whitespace
+
+
+def test(file = None):
+    w = DumbWriter()
+    f = AbstractFormatter(w)
+    if file:
+	fp = open(file)
+    elif sys.argv[1:]:
+	fp = open(sys.argv[1])
+    else:
+	fp = sys.stdin
+    while 1:
+	line = fp.readline()
+	if not line:
+	    break
+	if line == '\n':
+	    f.end_paragraph(1)
+	else:
+	    f.add_flowing_data(line)
+    f.end_paragraph(0)
+
+
+if __name__ == '__main__':
+    test()
diff --git a/Lib/dos_8x3/gopherli.py b/Lib/dos_8x3/gopherli.py
new file mode 100755
index 0000000..cf06e95
--- /dev/null
+++ b/Lib/dos_8x3/gopherli.py
@@ -0,0 +1,191 @@
+# Gopher protocol client interface
+
+import string
+
+# Default selector, host and port
+DEF_SELECTOR = '1/'
+DEF_HOST     = 'gopher.micro.umn.edu'
+DEF_PORT     = 70
+
+# Recognized file types
+A_TEXT       = '0'
+A_MENU       = '1'
+A_CSO        = '2'
+A_ERROR      = '3'
+A_MACBINHEX  = '4'
+A_PCBINHEX   = '5'
+A_UUENCODED  = '6'
+A_INDEX      = '7'
+A_TELNET     = '8'
+A_BINARY     = '9'
+A_DUPLICATE  = '+'
+A_SOUND      = 's'
+A_EVENT      = 'e'
+A_CALENDAR   = 'c'
+A_HTML       = 'h'
+A_TN3270     = 'T'
+A_MIME       = 'M'
+A_IMAGE      = 'I'
+A_WHOIS      = 'w'
+A_QUERY      = 'q'
+A_GIF        = 'g'
+A_HTML       = 'h'			# HTML file
+A_WWW        = 'w'			# WWW address
+A_PLUS_IMAGE = ':'
+A_PLUS_MOVIE = ';'
+A_PLUS_SOUND = '<'
+
+
+# Function mapping all file types to strings; unknown types become TYPE='x'
+_names = dir()
+_type_to_name_map = None
+def type_to_name(gtype):
+	global _type_to_name_map
+	if not _type_to_name_map:
+		for name in _names:
+			if name[:2] == 'A_':
+				_type_to_name_map[eval(name)] = name[2:]
+	if _type_to_name_map.has_key(gtype):
+		return _type_to_name_map[gtype]
+	return 'TYPE=' + `gtype`
+
+# Names for characters and strings
+CRLF = '\r\n'
+TAB = '\t'
+
+# Send a selector to a given host and port, return a file with the reply
+def send_selector(selector, host, port = 0):
+	import socket
+	import string
+	if not port:
+		i = string.find(host, ':')
+		if i >= 0:
+			host, port = host[:i], string.atoi(host[i+1:])
+	if not port:
+		port = DEF_PORT
+	elif type(port) == type(''):
+		port = string.atoi(port)
+	s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+	s.connect(host, port)
+	s.send(selector + CRLF)
+	s.shutdown(1)
+	return s.makefile('rb')
+
+# Send a selector and a query string
+def send_query(selector, query, host, port = 0):
+	return send_selector(selector + '\t' + query, host, port)
+
+# The following functions interpret the data returned by the gopher
+# server according to the expected type, e.g. textfile or directory
+
+# Get a directory in the form of a list of entries
+def get_directory(f):
+	import string
+	list = []
+	while 1:
+		line = f.readline()
+		if not line:
+			print '(Unexpected EOF from server)'
+			break
+		if line[-2:] == CRLF:
+			line = line[:-2]
+		elif line[-1:] in CRLF:
+			line = line[:-1]
+		if line == '.':
+			break
+		if not line:
+			print '(Empty line from server)'
+			continue
+		gtype = line[0]
+		parts = string.splitfields(line[1:], TAB)
+		if len(parts) < 4:
+			print '(Bad line from server:', `line`, ')'
+			continue
+		if len(parts) > 4:
+			if parts[4:] != ['+']:
+			    print '(Extra info from server:', parts[4:], ')'
+		else:
+			parts.append('')
+		parts.insert(0, gtype)
+		list.append(parts)
+	return list
+
+# Get a text file as a list of lines, with trailing CRLF stripped
+def get_textfile(f):
+	list = []
+	get_alt_textfile(f, list.append)
+	return list
+
+# Get a text file and pass each line to a function, with trailing CRLF stripped
+def get_alt_textfile(f, func):
+	while 1:
+		line = f.readline()
+		if not line:
+			print '(Unexpected EOF from server)'
+			break
+		if line[-2:] == CRLF:
+			line = line[:-2]
+		elif line[-1:] in CRLF:
+			line = line[:-1]
+		if line == '.':
+			break
+		if line[:2] == '..':
+			line = line[1:]
+		func(line)
+
+# Get a binary file as one solid data block
+def get_binary(f):
+	data = f.read()
+	return data
+
+# Get a binary file and pass each block to a function
+def get_alt_binary(f, func, blocksize):
+	while 1:
+		data = f.read(blocksize)
+		if not data:
+			break
+		func(data)
+
+# Trivial test program
+def test():
+	import sys
+	import getopt
+	opts, args = getopt.getopt(sys.argv[1:], '')
+	selector = DEF_SELECTOR
+	type = selector[0]
+	host = DEF_HOST
+	port = DEF_PORT
+	if args:
+		host = args[0]
+		args = args[1:]
+	if args:
+		type = args[0]
+		args = args[1:]
+		if len(type) > 1:
+			type, selector = type[0], type
+		else:
+			selector = ''
+			if args:
+				selector = args[0]
+				args = args[1:]
+		query = ''
+		if args:
+			query = args[0]
+			args = args[1:]
+	if type == A_INDEX:
+		f = send_query(selector, query, host)
+	else:
+		f = send_selector(selector, host)
+	if type == A_TEXT:
+		list = get_textfile(f)
+		for item in list: print item
+	elif type in (A_MENU, A_INDEX):
+		list = get_directory(f)
+		for item in list: print item
+	else:
+		data = get_binary(f)
+		print 'binary data:', len(data), 'bytes:', `data[:100]`[:40]
+
+# Run the test when run as script
+if __name__ == '__main__':
+	test()
diff --git a/Lib/dos_8x3/htmlenti.py b/Lib/dos_8x3/htmlenti.py
new file mode 100755
index 0000000..55aefaa
--- /dev/null
+++ b/Lib/dos_8x3/htmlenti.py
@@ -0,0 +1,105 @@
+# Proposed entity definitions for HTML, taken from
+# http://www.w3.org/hypertext/WWW/MarkUp/html-spec/html-spec_14.html
+
+entitydefs = {
+    'lt':       '<',
+    'gt':       '>',
+    'amp':      '&',
+    'quot':	'"',
+    'nbsp':	chr(160),	# no-break space
+    'iexcl':	chr(161),	# inverted exclamation mark
+    'cent':	chr(162),	# cent sign
+    'pound':	chr(163),	# pound sterling sign
+    'curren':	chr(164),	# general currency sign
+    'yen':	chr(165),	# yen sign
+    'brvbar':	chr(166),	# broken (vertical) bar
+    'sect':	chr(167),	# section sign
+    'uml':	chr(168),	# umlaut (dieresis)
+    'copy':	chr(169),	# copyright sign
+    'ordf':	chr(170),	# ordinal indicator, feminine
+    'laquo':	chr(171),	# angle quotation mark, left
+    'not':	chr(172),	# not sign
+    'shy':	chr(173),	# soft hyphen
+    'reg':	chr(174),	# registered sign
+    'macr':	chr(175),	# macron
+    'deg':	chr(176),	# degree sign
+    'plusmn':	chr(177),	# plus-or-minus sign
+    'sup2':	chr(178),	# superscript two
+    'sup3':	chr(179),	# superscript three
+    'acute':	chr(180),	# acute accent
+    'micro':	chr(181),	# micro sign
+    'para':	chr(182),	# pilcrow (paragraph sign)
+    'middot':	chr(183),	# middle dot
+    'cedil':	chr(184),	# cedilla
+    'sup1':	chr(185),	# superscript one
+    'ordm':	chr(186),	# ordinal indicator, masculine
+    'raquo':	chr(187),	# angle quotation mark, right
+    'frac14':	chr(188),	# fraction one-quarter
+    'frac12':	chr(189),	# fraction one-half
+    'frac34':	chr(190),	# fraction three-quarters
+    'iquest':	chr(191),	# inverted question mark
+    'Agrave':	chr(192),	# capital A, grave accent
+    'Aacute':	chr(193),	# capital A, acute accent
+    'Acirc':	chr(194),	# capital A, circumflex accent
+    'Atilde':	chr(195),	# capital A, tilde
+    'Auml':	chr(196),	# capital A, dieresis or umlaut mark
+    'Aring':	chr(197),	# capital A, ring
+    'AElig':	chr(198),	# capital AE diphthong (ligature)
+    'Ccedil':	chr(199),	# capital C, cedilla
+    'Egrave':	chr(200),	# capital E, grave accent
+    'Eacute':	chr(201),	# capital E, acute accent
+    'Ecirc':	chr(202),	# capital E, circumflex accent
+    'Euml':	chr(203),	# capital E, dieresis or umlaut mark
+    'Igrave':	chr(204),	# capital I, grave accent
+    'Iacute':	chr(205),	# capital I, acute accent
+    'Icirc':	chr(206),	# capital I, circumflex accent
+    'Iuml':	chr(207),	# capital I, dieresis or umlaut mark
+    'ETH':	chr(208),	# capital Eth, Icelandic
+    'Ntilde':	chr(209),	# capital N, tilde
+    'Ograve':	chr(210),	# capital O, grave accent
+    'Oacute':	chr(211),	# capital O, acute accent
+    'Ocirc':	chr(212),	# capital O, circumflex accent
+    'Otilde':	chr(213),	# capital O, tilde
+    'Ouml':	chr(214),	# capital O, dieresis or umlaut mark
+    'times':	chr(215),	# multiply sign
+    'Oslash':	chr(216),	# capital O, slash
+    'Ugrave':	chr(217),	# capital U, grave accent
+    'Uacute':	chr(218),	# capital U, acute accent
+    'Ucirc':	chr(219),	# capital U, circumflex accent
+    'Uuml':	chr(220),	# capital U, dieresis or umlaut mark
+    'Yacute':	chr(221),	# capital Y, acute accent
+    'THORN':	chr(222),	# capital THORN, Icelandic
+    'szlig':	chr(223),	# small sharp s, German (sz ligature)
+    'agrave':	chr(224),	# small a, grave accent
+    'aacute':	chr(225),	# small a, acute accent
+    'acirc':	chr(226),	# small a, circumflex accent
+    'atilde':	chr(227),	# small a, tilde
+    'auml':	chr(228),	# small a, dieresis or umlaut mark
+    'aring':	chr(229),	# small a, ring
+    'aelig':	chr(230),	# small ae diphthong (ligature)
+    'ccedil':	chr(231),	# small c, cedilla
+    'egrave':	chr(232),	# small e, grave accent
+    'eacute':	chr(233),	# small e, acute accent
+    'ecirc':	chr(234),	# small e, circumflex accent
+    'euml':	chr(235),	# small e, dieresis or umlaut mark
+    'igrave':	chr(236),	# small i, grave accent
+    'iacute':	chr(237),	# small i, acute accent
+    'icirc':	chr(238),	# small i, circumflex accent
+    'iuml':	chr(239),	# small i, dieresis or umlaut mark
+    'eth':	chr(240),	# small eth, Icelandic
+    'ntilde':	chr(241),	# small n, tilde
+    'ograve':	chr(242),	# small o, grave accent
+    'oacute':	chr(243),	# small o, acute accent
+    'ocirc':	chr(244),	# small o, circumflex accent
+    'otilde':	chr(245),	# small o, tilde
+    'ouml':	chr(246),	# small o, dieresis or umlaut mark
+    'divide':	chr(247),	# divide sign
+    'oslash':	chr(248),	# small o, slash
+    'ugrave':	chr(249),	# small u, grave accent
+    'uacute':	chr(250),	# small u, acute accent
+    'ucirc':	chr(251),	# small u, circumflex accent
+    'uuml':	chr(252),	# small u, dieresis or umlaut mark
+    'yacute':	chr(253),	# small y, acute accent
+    'thorn':	chr(254),	# small thorn, Icelandic
+    'yuml':	chr(255),	# small y, dieresis or umlaut mark
+}
diff --git a/Lib/dos_8x3/importal.py b/Lib/dos_8x3/importal.py
new file mode 100755
index 0000000..780862c
--- /dev/null
+++ b/Lib/dos_8x3/importal.py
@@ -0,0 +1,36 @@
+# THIS IS OBSOLETE -- USE MODULE 'compileall' INSTEAD!
+
+# Utility module to import all modules in the path, in the hope
+# that this will update their ".pyc" files.
+
+import os
+import sys
+
+# Sabotage 'gl' and 'stdwin' to prevent windows popping up...
+for m in 'gl', 'stdwin', 'fl', 'fm':
+	sys.modules[m] = sys
+
+exceptions = ['importall']
+
+for dir in sys.path:
+	print 'Listing', dir
+	try:
+		names = os.listdir(dir)
+	except os.error:
+		print 'Can\'t list', dir
+		names = []
+	names.sort()
+	for name in names:
+		head, tail = name[:-3], name[-3:]
+		if tail == '.py' and head not in exceptions:
+			s = 'import ' + head
+			print s
+			try:
+				exec s + '\n'
+			except KeyboardInterrupt:
+				del names[:]
+				print '\n[interrupt]'
+				break
+			except:
+				print 'Sorry:', sys.exc_type + ':',
+				print sys.exc_value
diff --git a/Lib/dos_8x3/linecach.py b/Lib/dos_8x3/linecach.py
new file mode 100755
index 0000000..7de373f
--- /dev/null
+++ b/Lib/dos_8x3/linecach.py
@@ -0,0 +1,90 @@
+# Cache lines from files.
+# This is intended to read lines from modules imported -- hence if a filename
+# is not found, it will look down the module search path for a file by
+# that name.
+
+import sys
+import os
+from stat import *
+
+def getline(filename, lineno):
+	lines = getlines(filename)
+	if 1 <= lineno <= len(lines):
+		return lines[lineno-1]
+	else:
+		return ''
+
+
+# The cache
+
+cache = {} # The cache
+
+
+# Clear the cache entirely
+
+def clearcache():
+	global cache
+	cache = {}
+
+
+# Get the lines for a file from the cache.
+# Update the cache if it doesn't contain an entry for this file already.
+
+def getlines(filename):
+	if cache.has_key(filename):
+		return cache[filename][2]
+	else:
+		return updatecache(filename)
+
+
+# Discard cache entries that are out of date.
+# (This is not checked upon each call!)
+
+def checkcache():
+	for filename in cache.keys():
+		size, mtime, lines, fullname = cache[filename]
+		try:
+			stat = os.stat(fullname)
+		except os.error:
+			del cache[filename]
+			continue
+		if size <> stat[ST_SIZE] or mtime <> stat[ST_MTIME]:
+			del cache[filename]
+
+
+# Update a cache entry and return its list of lines.
+# If something's wrong, print a message, discard the cache entry,
+# and return an empty list.
+
+def updatecache(filename):
+	if cache.has_key(filename):
+		del cache[filename]
+	if not filename or filename[0] + filename[-1] == '<>':
+		return []
+	fullname = filename
+	try:
+		stat = os.stat(fullname)
+	except os.error, msg:
+		# Try looking through the module search path
+		basename = os.path.split(filename)[1]
+		for dirname in sys.path:
+			fullname = os.path.join(dirname, basename)
+			try:
+				stat = os.stat(fullname)
+				break
+			except os.error:
+				pass
+		else:
+			# No luck
+##			print '*** Cannot stat', filename, ':', msg
+			return []
+	try:
+		fp = open(fullname, 'r')
+		lines = fp.readlines()
+		fp.close()
+	except IOError, msg:
+##		print '*** Cannot open', fullname, ':', msg
+		return []
+	size, mtime = stat[ST_SIZE], stat[ST_MTIME]
+	cache[filename] = size, mtime, lines, fullname
+	return lines
diff --git a/Lib/dos_8x3/macurl2p.py b/Lib/dos_8x3/macurl2p.py
new file mode 100755
index 0000000..65afe69
--- /dev/null
+++ b/Lib/dos_8x3/macurl2p.py
@@ -0,0 +1,76 @@
+"""Mac specific module for conversion between pathnames and URLs."""
+
+import string
+import urllib
+import os
+
+def url2pathname(pathname):
+    "Convert /-delimited pathname to mac pathname"
+    #
+    # XXXX The .. handling should be fixed...
+    #
+    tp = urllib.splittype(pathname)[0]
+    if tp and tp <> 'file':
+	raise RuntimeError, 'Cannot convert non-local URL to pathname'
+    components = string.split(pathname, '/')
+    # Remove . and embedded ..
+    i = 0
+    while i < len(components):
+	if components[i] == '.':
+	    del components[i]
+	elif components[i] == '..' and i > 0 and \
+				  components[i-1] not in ('', '..'):
+	    del components[i-1:i+1]
+	    i = i-1
+	elif components[i] == '' and i > 0 and components[i-1] <> '':
+	    del components[i]
+	else:
+	    i = i+1
+    if not components[0]:
+	# Absolute unix path, don't start with colon
+	return string.join(components[1:], ':')
+    else:
+	# relative unix path, start with colon. First replace
+	# leading .. by empty strings (giving ::file)
+	i = 0
+	while i < len(components) and components[i] == '..':
+	    components[i] = ''
+	    i = i + 1
+	return ':' + string.join(components, ':')
+
+def pathname2url(pathname):
+    "convert mac pathname to /-delimited pathname"
+    if '/' in pathname:
+	raise RuntimeError, "Cannot convert pathname containing slashes"
+    components = string.split(pathname, ':')
+    # Replace empty string ('::') by .. (will result in '/../' later)
+    for i in range(1, len(components)):
+	if components[i] == '':
+	    components[i] = '..'
+    # Truncate names longer than 31 bytes
+    components = map(lambda x: x[:31], components)
+
+    if os.path.isabs(pathname):
+	return '/' + string.join(components, '/')
+    else:
+	return string.join(components, '/')
+
+def test():
+    for url in ["index.html",
+		"bar/index.html",
+		"/foo/bar/index.html",
+		"/foo/bar/",
+		"/"]:
+	print `url`, '->', `url2pathname(url)`
+    for path in ["drive:",
+		 "drive:dir:",
+		 "drive:dir:file",
+		 "drive:file",
+		 "file",
+		 ":file",
+		 ":dir:",
+		 ":dir:file"]:
+	print `path`, '->', `pathname2url(path)`
+
+if __name__ == '__main__':
+    test()
diff --git a/Lib/dos_8x3/mimetool.py b/Lib/dos_8x3/mimetool.py
new file mode 100755
index 0000000..da33a77
--- /dev/null
+++ b/Lib/dos_8x3/mimetool.py
@@ -0,0 +1,187 @@
+# Various tools used by MIME-reading or MIME-writing programs.
+
+
+import os
+import rfc822
+import string
+import tempfile
+
+
+# A derived class of rfc822.Message that knows about MIME headers and
+# contains some hooks for decoding encoded and multipart messages.
+
+class Message(rfc822.Message):
+
+	def __init__(self, fp, seekable = 1):
+		rfc822.Message.__init__(self, fp, seekable)
+		self.encodingheader = \
+			self.getheader('content-transfer-encoding')
+		self.typeheader = \
+			self.getheader('content-type')
+		self.parsetype()
+		self.parseplist()
+
+	def parsetype(self):
+		str = self.typeheader
+		if str == None:
+			str = 'text/plain'
+		if ';' in str:
+			i = string.index(str, ';')
+			self.plisttext = str[i:]
+			str = str[:i]
+		else:
+			self.plisttext = ''
+		fields = string.splitfields(str, '/')
+		for i in range(len(fields)):
+			fields[i] = string.lower(string.strip(fields[i]))
+		self.type = string.joinfields(fields, '/')
+		self.maintype = fields[0]
+		self.subtype = string.joinfields(fields[1:], '/')
+
+	def parseplist(self):
+		str = self.plisttext
+		self.plist = []
+		while str[:1] == ';':
+			str = str[1:]
+			if ';' in str:
+				# XXX Should parse quotes!
+				end = string.index(str, ';')
+			else:
+				end = len(str)
+			f = str[:end]
+			if '=' in f:
+				i = string.index(f, '=')
+				f = string.lower(string.strip(f[:i])) + \
+					'=' + string.strip(f[i+1:])
+			self.plist.append(string.strip(f))
+			str = str[end:]
+
+	def getplist(self):
+		return self.plist
+
+	def getparam(self, name):
+		name = string.lower(name) + '='
+		n = len(name)
+		for p in self.plist:
+			if p[:n] == name:
+				return rfc822.unquote(p[n:])
+		return None
+
+	def getencoding(self):
+		if self.encodingheader == None:
+			return '7bit'
+		return string.lower(self.encodingheader)
+
+	def gettype(self):
+		return self.type
+
+	def getmaintype(self):
+		return self.maintype
+
+	def getsubtype(self):
+		return self.subtype
+
+
+
+
+# Utility functions
+# -----------------
+
+
+# Return a random string usable as a multipart boundary.
+# The method used is so that it is *very* unlikely that the same
+# string of characters will every occur again in the Universe,
+# so the caller needn't check the data it is packing for the
+# occurrence of the boundary.
+#
+# The boundary contains dots so you have to quote it in the header.
+
+_prefix = None
+
+def choose_boundary():
+	global _prefix
+	import time
+	import rand
+	if _prefix == None:
+		import socket
+		import os
+		hostid = socket.gethostbyname(socket.gethostname())
+		uid = `os.getuid()`
+		pid = `os.getpid()`
+		seed = `rand.rand()`
+		_prefix = hostid + '.' + uid + '.' + pid
+	timestamp = `int(time.time())`
+	seed = `rand.rand()`
+	return _prefix + '.' + timestamp + '.' + seed
+
+
+# Subroutines for decoding some common content-transfer-types
+
+# XXX This requires that uudecode and mmencode are in $PATH
+
+def decode(input, output, encoding):
+	if decodetab.has_key(encoding):
+		pipethrough(input, decodetab[encoding], output)
+	else:
+		raise ValueError, \
+		      'unknown Content-Transfer-Encoding: %s' % encoding
+
+def encode(input, output, encoding):
+	if encodetab.has_key(encoding):
+		pipethrough(input, encodetab[encoding], output)
+	else:
+		raise ValueError, \
+		      'unknown Content-Transfer-Encoding: %s' % encoding
+
+uudecode_pipe = '''(
+TEMP=/tmp/@uu.$$
+sed "s%^begin [0-7][0-7]* .*%begin 600 $TEMP%" | uudecode
+cat $TEMP
+rm $TEMP
+)'''
+
+decodetab = {
+	'uuencode':		uudecode_pipe,
+	'x-uuencode':		uudecode_pipe,
+	'quoted-printable':	'mmencode -u -q',
+	'base64':		'mmencode -u -b',
+}
+
+encodetab = {
+	'x-uuencode':		'uuencode tempfile',
+	'uuencode':		'uuencode tempfile',
+	'quoted-printable':	'mmencode -q',
+	'base64':		'mmencode -b',
+}
+
+def pipeto(input, command):
+	pipe = os.popen(command, 'w')
+	copyliteral(input, pipe)
+	pipe.close()
+
+def pipethrough(input, command, output):
+	tempname = tempfile.mktemp()
+	try:
+		temp = open(tempname, 'w')
+	except IOError:
+		print '*** Cannot create temp file', `tempname`
+		return
+	copyliteral(input, temp)
+	temp.close()
+	pipe = os.popen(command + ' <' + tempname, 'r')
+	copybinary(pipe, output)
+	pipe.close()
+	os.unlink(tempname)
+
+def copyliteral(input, output):
+	while 1:
+		line = input.readline()
+		if not line: break
+		output.write(line)
+
+def copybinary(input, output):
+	BUFSIZE = 8192
+	while 1:
+		line = input.read(BUFSIZE)
+		if not line: break
+		output.write(line)
diff --git a/Lib/dos_8x3/multifil.py b/Lib/dos_8x3/multifil.py
new file mode 100755
index 0000000..71e0dd0
--- /dev/null
+++ b/Lib/dos_8x3/multifil.py
@@ -0,0 +1,128 @@
+# A class that makes each part of a multipart message "feel" like an
+# ordinary file, as long as you use fp.readline().  Allows recursive
+# use, for nested multipart messages.  Probably best used together
+# with module mimetools.
+#
+# Suggested use:
+#
+# real_fp = open(...)
+# fp = MultiFile(real_fp)
+#
+# "read some lines from fp"
+# fp.push(separator)
+# while 1:
+#	"read lines from fp until it returns an empty string" (A)
+#	if not fp.next(): break
+# fp.pop()
+# "read remaining lines from fp until it returns an empty string"
+#
+# The latter sequence may be used recursively at (A).
+# It is also allowed to use multiple push()...pop() sequences.
+# Note that if a nested multipart message is terminated by a separator
+# for an outer message, this is not reported, even though it is really
+# illegal input.
+
+import sys
+import string
+
+err = sys.stderr.write
+
+Error = 'multifile.Error'
+
+class MultiFile:
+	#
+	def __init__(self, fp):
+		self.fp = fp
+		self.stack = [] # Grows down
+		self.level = 0
+		self.last = 0
+		self.start = self.fp.tell()
+		self.posstack = [] # Grows down
+	#
+	def tell(self):
+		if self.level > 0:
+			return self.lastpos
+		return self.fp.tell() - self.start
+	#
+	def seek(self, pos):
+		if not 0 <= pos <= self.tell() or \
+				self.level > 0 and pos > self.lastpos:
+			raise Error, 'bad MultiFile.seek() call'
+		self.fp.seek(pos + self.start)
+		self.level = 0
+		self.last = 0
+	#
+	def readline(self):
+		if self.level > 0: return ''
+		line = self.fp.readline()
+		if not line:
+			self.level = len(self.stack)
+			self.last = (self.level > 0)
+			if self.last:
+				err('*** Sudden EOF in MultiFile.readline()\n')
+			return ''
+		if line[:2] <> '--': return line
+		n = len(line)
+		k = n
+		while k > 0 and line[k-1] in string.whitespace: k = k-1
+		mark = line[2:k]
+		if mark[-2:] == '--': mark1 = mark[:-2]
+		else: mark1 = None
+		for i in range(len(self.stack)):
+			sep = self.stack[i]
+			if sep == mark:
+				self.last = 0
+				break
+			elif mark1 <> None and sep == mark1:
+				self.last = 1
+				break
+		else:
+			return line
+		# Get here after break out of loop
+		self.lastpos = self.tell() - len(line)
+		self.level = i+1
+		if self.level > 1:
+			err('*** Missing endmarker in MultiFile.readline()\n')
+		return ''
+	#
+	def readlines(self):
+		list = []
+		while 1:
+			line = self.readline()
+			if not line: break
+			list.append(line)
+		return list
+	#
+	def read(self): # Note: no size argument -- read until EOF only!
+		return string.joinfields(self.readlines(), '')
+	#
+	def next(self):
+		while self.readline(): pass
+		if self.level > 1 or self.last:
+			return 0
+		self.level = 0
+		self.last = 0
+		self.start = self.fp.tell()
+		return 1
+	#
+	def push(self, sep):
+		if self.level > 0:
+			raise Error, 'bad MultiFile.push() call'
+		self.stack.insert(0, sep)
+		self.posstack.insert(0, self.start)
+		self.start = self.fp.tell()
+	#
+	def pop(self):
+		if self.stack == []:
+			raise Error, 'bad MultiFile.pop() call'
+		if self.level <= 1:
+			self.last = 0
+		else:
+			abslastpos = self.lastpos + self.start
+		self.level = max(0, self.level - 1)
+		del self.stack[0]
+		self.start = self.posstack[0]
+		del self.posstack[0]
+		if self.level > 0:
+			self.lastpos = abslastpos - self.start
+	#
diff --git a/Lib/dos_8x3/nturl2pa.py b/Lib/dos_8x3/nturl2pa.py
new file mode 100755
index 0000000..17f01f5
--- /dev/null
+++ b/Lib/dos_8x3/nturl2pa.py
@@ -0,0 +1,52 @@
+#
+# nturl2path convert a NT pathname to a file URL and 
+# vice versa  
+
+def url2pathname(url):
+	""" Convert a URL to a DOS path...
+	Currently only works for absolute paths
+
+		///C|/foo/bar/spam.foo
+
+			becomes
+
+		C:\foo\bar\spam.foo
+	"""
+	import string
+	comp = string.splitfields(url, '|')
+	if len(comp) != 2 or comp[0][-1] not in string.letters:
+		error = 'Bad URL: ' + url
+		raise IOError, error
+	drive = string.upper(comp[0][-1])
+	components = string.splitfields(comp[1], '/')
+	path = drive + ':'
+	for  comp in components:
+		if comp:
+			path = path + '\\' + comp
+	return path
+
+def pathname2url(p):
+
+	""" Convert a DOS path name to a file url...
+	Currently only works for absolute paths
+
+		C:\foo\bar\spam.foo
+
+			becomes
+
+		///C|/foo/bar/spam.foo
+	"""
+
+	import string
+	comp = string.splitfields(p, ':')
+	if len(comp) != 2 or len(comp[0]) > 1:
+		error = 'Bad path: ' + p
+		raise IOError, error
+
+	drive = string.upper(comp[0])
+	components = string.splitfields(comp[1], '\\')
+	path = '///' + drive + '|'
+	for comp in components:
+		if comp:
+			path = path + '/' + comp
+	return path
diff --git a/Lib/dos_8x3/para.py b/Lib/dos_8x3/para.py
new file mode 100755
index 0000000..c9b3bdd
--- /dev/null
+++ b/Lib/dos_8x3/para.py
@@ -0,0 +1,409 @@
+# Text formatting abstractions
+# Note -- this module is obsolete, it's too slow anyway
+
+
+# Oft-used type object
+Int = type(0)
+
+
+# Represent a paragraph.  This is a list of words with associated
+# font and size information, plus indents and justification for the
+# entire paragraph.
+# Once the words have been added to a paragraph, it can be laid out
+# for different line widths.  Once laid out, it can be rendered at
+# different screen locations.  Once rendered, it can be queried
+# for mouse hits, and parts of the text can be highlighted
+class Para:
+	#
+	def __init__(self):
+		self.words = [] # The words
+		self.just = 'l' # Justification: 'l', 'r', 'lr' or 'c'
+		self.indent_left = self.indent_right = self.indent_hang = 0
+		# Final lay-out parameters, may change
+		self.left = self.top = self.right = self.bottom = \
+			self.width = self.height = self.lines = None
+	#
+	# Add a word, computing size information for it.
+	# Words may also be added manually by appending to self.words
+	# Each word should be a 7-tuple:
+	# (font, text, width, space, stretch, ascent, descent)
+	def addword(self, d, font, text, space, stretch):
+		if font <> None:
+			d.setfont(font)
+		width = d.textwidth(text)
+		ascent = d.baseline()
+		descent = d.lineheight() - ascent
+		spw = d.textwidth(' ')
+		space = space * spw
+		stretch = stretch * spw
+		tuple = (font, text, width, space, stretch, ascent, descent)
+		self.words.append(tuple)
+	#
+	# Hooks to begin and end anchors -- insert numbers in the word list!
+	def bgn_anchor(self, id):
+		self.words.append(id)
+	#
+	def end_anchor(self, id):
+		self.words.append(0)
+	#
+	# Return the total length (width) of the text added so far, in pixels
+	def getlength(self):
+		total = 0
+		for word in self.words:
+			if type(word) <> Int:
+				total = total + word[2] + word[3]
+		return total
+	#
+	# Tab to a given position (relative to the current left indent):
+	# remove all stretch, add fixed space up to the new indent.
+	# If the current position is already beying the tab stop,
+	# don't add any new space (but still remove the stretch)
+	def tabto(self, tab):
+		total = 0
+		as, de = 1, 0
+		for i in range(len(self.words)):
+			word = self.words[i]
+			if type(word) == Int: continue
+			fo, te, wi, sp, st, as, de = word
+			self.words[i] = fo, te, wi, sp, 0, as, de
+			total = total + wi + sp
+		if total < tab:
+			self.words.append(None, '', 0, tab-total, 0, as, de)
+	#
+	# Make a hanging tag: tab to hang, increment indent_left by hang,
+	# and reset indent_hang to -hang
+	def makehangingtag(self, hang):
+		self.tabto(hang)
+		self.indent_left = self.indent_left + hang
+		self.indent_hang = -hang
+	#
+	# Decide where the line breaks will be given some screen width
+	def layout(self, linewidth):
+		self.width = linewidth
+		height = 0
+		self.lines = lines = []
+		avail1 = self.width - self.indent_left - self.indent_right
+		avail = avail1 - self.indent_hang
+		words = self.words
+		i = 0
+		n = len(words)
+		lastfont = None
+		while i < n:
+			firstfont = lastfont
+			charcount = 0
+			width = 0
+			stretch = 0
+			ascent = 0
+			descent = 0
+			lsp = 0
+			j = i
+			while i < n:
+				word = words[i]
+				if type(word) == Int:
+					if word > 0 and width >= avail:
+						break
+					i = i+1
+					continue
+				fo, te, wi, sp, st, as, de = word
+				if width + wi > avail and width > 0 and wi > 0:
+					break
+				if fo <> None:
+					lastfont = fo
+					if width == 0:
+						firstfont = fo
+				charcount = charcount + len(te) + (sp > 0)
+				width = width + wi + sp
+				lsp = sp
+				stretch = stretch + st
+				lst = st
+				ascent = max(ascent, as)
+				descent = max(descent, de)
+				i = i+1
+			while i > j and type(words[i-1]) == Int and \
+				words[i-1] > 0: i = i-1
+			width = width - lsp
+			if i < n:
+				stretch = stretch - lst
+			else:
+				stretch = 0
+			tuple = i-j, firstfont, charcount, width, stretch, \
+				ascent, descent
+			lines.append(tuple)
+			height = height + ascent + descent
+			avail = avail1
+		self.height = height
+	#
+	# Call a function for all words in a line
+	def visit(self, wordfunc, anchorfunc):
+		avail1 = self.width - self.indent_left - self.indent_right
+		avail = avail1 - self.indent_hang
+		v = self.top
+		i = 0
+		for tuple in self.lines:
+			wordcount, firstfont, charcount, width, stretch, \
+				ascent, descent = tuple
+			h = self.left + self.indent_left
+			if i == 0: h = h + self.indent_hang
+			extra = 0
+			if self.just == 'r': h = h + avail - width
+			elif self.just == 'c': h = h + (avail - width) / 2
+			elif self.just == 'lr' and stretch > 0:
+				extra = avail - width
+			v2 = v + ascent + descent
+			for j in range(i, i+wordcount):
+				word = self.words[j]
+				if type(word) == Int:
+					ok = anchorfunc(self, tuple, word, \
+							h, v)
+					if ok <> None: return ok
+					continue
+				fo, te, wi, sp, st, as, de = word
+				if extra > 0 and stretch > 0:
+					ex = extra * st / stretch
+					extra = extra - ex
+					stretch = stretch - st
+				else:
+					ex = 0
+				h2 = h + wi + sp + ex
+				ok = wordfunc(self, tuple, word, h, v, \
+					h2, v2, (j==i), (j==i+wordcount-1))
+				if ok <> None: return ok
+				h = h2
+			v = v2
+			i = i + wordcount
+			avail = avail1
+	#
+	# Render a paragraph in "drawing object" d, using the rectangle
+	# given by (left, top, right) with an unspecified bottom.
+	# Return the computed bottom of the text.
+	def render(self, d, left, top, right):
+		if self.width <> right-left:
+			self.layout(right-left)
+		self.left = left
+		self.top = top
+		self.right = right
+		self.bottom = self.top + self.height
+		self.anchorid = 0
+		try:
+			self.d = d
+			self.visit(self.__class__._renderword, \
+				   self.__class__._renderanchor)
+		finally:
+			self.d = None
+		return self.bottom
+	#
+	def _renderword(self, tuple, word, h, v, h2, v2, isfirst, islast):
+		if word[0] <> None: self.d.setfont(word[0])
+		baseline = v + tuple[5]
+		self.d.text((h, baseline - word[5]), word[1])
+		if self.anchorid > 0:
+			self.d.line((h, baseline+2), (h2, baseline+2))
+	#
+	def _renderanchor(self, tuple, word, h, v):
+		self.anchorid = word
+	#
+	# Return which anchor(s) was hit by the mouse
+	def hitcheck(self, mouseh, mousev):
+		self.mouseh = mouseh
+		self.mousev = mousev
+		self.anchorid = 0
+		self.hits = []
+		self.visit(self.__class__._hitcheckword, \
+			   self.__class__._hitcheckanchor)
+		return self.hits
+	#
+	def _hitcheckword(self, tuple, word, h, v, h2, v2, isfirst, islast):
+		if self.anchorid > 0 and h <= self.mouseh <= h2 and \
+			v <= self.mousev <= v2:
+			self.hits.append(self.anchorid)
+	#
+	def _hitcheckanchor(self, tuple, word, h, v):
+		self.anchorid = word
+	#
+	# Return whether the given anchor id is present
+	def hasanchor(self, id):
+		return id in self.words or -id in self.words
+	#
+	# Extract the raw text from the word list, substituting one space
+	# for non-empty inter-word space, and terminating with '\n'
+	def extract(self):
+		text = ''
+		for w in self.words:
+			if type(w) <> Int:
+				word = w[1]
+				if w[3]: word = word + ' '
+				text = text + word
+		return text + '\n'
+	#
+	# Return which character position was hit by the mouse, as
+	# an offset in the entire text as returned by extract().
+	# Return None if the mouse was not in this paragraph
+	def whereis(self, d, mouseh, mousev):
+		if mousev < self.top or mousev > self.bottom:
+			return None
+		self.mouseh = mouseh
+		self.mousev = mousev
+		self.lastfont = None
+		self.charcount = 0
+		try:
+			self.d = d
+			return self.visit(self.__class__._whereisword, \
+					  self.__class__._whereisanchor)
+		finally:
+			self.d = None
+	#
+	def _whereisword(self, tuple, word, h1, v1, h2, v2, isfirst, islast):
+		fo, te, wi, sp, st, as, de = word
+		if fo <> None: self.lastfont = fo
+		h = h1
+		if isfirst: h1 = 0
+		if islast: h2 = 999999
+		if not (v1 <= self.mousev <= v2 and h1 <= self.mouseh <= h2):
+			self.charcount = self.charcount + len(te) + (sp > 0)
+			return
+		if self.lastfont <> None:
+			self.d.setfont(self.lastfont)
+		cc = 0
+		for c in te:
+			cw = self.d.textwidth(c)
+			if self.mouseh <= h + cw/2:
+				return self.charcount + cc
+			cc = cc+1
+			h = h+cw
+		self.charcount = self.charcount + cc
+		if self.mouseh <= (h+h2) / 2:
+			return self.charcount
+		else:
+			return self.charcount + 1
+	#
+	def _whereisanchor(self, tuple, word, h, v):
+		pass
+	#
+	# Return screen position corresponding to position in paragraph.
+	# Return tuple (h, vtop, vbaseline, vbottom).
+	# This is more or less the inverse of whereis()
+	def screenpos(self, d, pos):
+		if pos < 0:
+			ascent, descent = self.lines[0][5:7]
+			return self.left, self.top, self.top + ascent, \
+				self.top + ascent + descent
+		self.pos = pos
+		self.lastfont = None
+		try:
+			self.d = d
+			ok = self.visit(self.__class__._screenposword, \
+					self.__class__._screenposanchor)
+		finally:
+			self.d = None
+		if ok == None:
+			ascent, descent = self.lines[-1][5:7]
+			ok = self.right, self.bottom - ascent - descent, \
+				self.bottom - descent, self.bottom
+		return ok
+	#
+	def _screenposword(self, tuple, word, h1, v1, h2, v2, isfirst, islast):
+		fo, te, wi, sp, st, as, de = word
+		if fo <> None: self.lastfont = fo
+		cc = len(te) + (sp > 0)
+		if self.pos > cc:
+			self.pos = self.pos - cc
+			return
+		if self.pos < cc:
+			self.d.setfont(self.lastfont)
+			h = h1 + self.d.textwidth(te[:self.pos])
+		else:
+			h = h2
+		ascent, descent = tuple[5:7]
+		return h, v1, v1+ascent, v2
+	#
+	def _screenposanchor(self, tuple, word, h, v):
+		pass
+	#
+	# Invert the stretch of text between pos1 and pos2.
+	# If pos1 is None, the beginning is implied;
+	# if pos2 is None, the end is implied.
+	# Undoes its own effect when called again with the same arguments
+	def invert(self, d, pos1, pos2):
+		if pos1 == None:
+			pos1 = self.left, self.top, self.top, self.top
+		else:
+			pos1 = self.screenpos(d, pos1)
+		if pos2 == None:
+			pos2 = self.right, self.bottom,self.bottom,self.bottom
+		else:
+			pos2 = self.screenpos(d, pos2)
+		h1, top1, baseline1, bottom1 = pos1
+		h2, top2, baseline2, bottom2 = pos2
+		if bottom1 <= top2:
+			d.invert((h1, top1), (self.right, bottom1))
+			h1 = self.left
+			if bottom1 < top2:
+				d.invert((h1, bottom1), (self.right, top2))
+			top1, bottom1 = top2, bottom2
+		d.invert((h1, top1), (h2, bottom2))
+
+
+# Test class Para
+# XXX This was last used on the Mac, hence the weird fonts...
+def test():
+	import stdwin
+	from stdwinevents import *
+	words = 'The', 'quick', 'brown', 'fox', 'jumps', 'over', \
+		'the', 'lazy', 'dog.'
+	paralist = []
+	for just in 'l', 'r', 'lr', 'c':
+		p = Para()
+		p.just = just
+		p.addword(stdwin, ('New York', 'p', 12), words[0], 1, 1)
+		for word in words[1:-1]:
+			p.addword(stdwin, None, word, 1, 1)
+		p.addword(stdwin, None, words[-1], 2, 4)
+		p.addword(stdwin, ('New York', 'b', 18), 'Bye!', 0, 0)
+		p.addword(stdwin, ('New York', 'p', 10), 'Bye!', 0, 0)
+		paralist.append(p)
+	window = stdwin.open('Para.test()')
+	start = stop = selpara = None
+	while 1:
+		etype, win, detail = stdwin.getevent()
+		if etype == WE_CLOSE:
+			break
+		if etype == WE_SIZE:
+			window.change((0, 0), (1000, 1000))
+		if etype == WE_DRAW:
+			width, height = window.getwinsize()
+			d = None
+			try:
+				d = window.begindrawing()
+				d.cliprect(detail)
+				d.erase(detail)
+				v = 0
+				for p in paralist:
+					v = p.render(d, 0, v, width)
+					if p == selpara and \
+					   start <> None and stop <> None:
+						p.invert(d, start, stop)
+			finally:
+				if d: d.close()
+		if etype == WE_MOUSE_DOWN:
+			if selpara and start <> None and stop <> None:
+				d = window.begindrawing()
+				selpara.invert(d, start, stop)
+				d.close()
+			start = stop = selpara = None
+			mouseh, mousev = detail[0]
+			for p in paralist:
+				start = p.whereis(stdwin, mouseh, mousev)
+				if start <> None:
+					selpara = p
+					break
+		if etype == WE_MOUSE_UP and start <> None and selpara:
+			mouseh, mousev = detail[0]
+			stop = selpara.whereis(stdwin, mouseh, mousev)
+			if stop == None: start = selpara = None
+			else:
+				if start > stop:
+					start, stop = stop, start
+				d = window.begindrawing()
+				selpara.invert(d, start, stop)
+				d.close()
+	window.close()
diff --git a/Lib/dos_8x3/posixfil.py b/Lib/dos_8x3/posixfil.py
new file mode 100755
index 0000000..459cd52
--- /dev/null
+++ b/Lib/dos_8x3/posixfil.py
@@ -0,0 +1,207 @@
+#
+# Start of posixfile.py
+#
+
+#
+# Extended file operations
+#
+# f = posixfile.open(filename, [mode, [bufsize]])
+#	will create a new posixfile object
+#
+# f = posixfile.fileopen(fileobject)
+#	will create a posixfile object from a builtin file object
+#
+# f.file()
+#	will return the original builtin file object
+#
+# f.dup()
+#	will return a new file object based on a new filedescriptor
+#
+# f.dup2(fd)
+#	will return a new file object based on the given filedescriptor
+#
+# f.flags(mode)
+#	will turn on the associated flag (merge)
+#	mode can contain the following characters:
+#
+#   (character representing a flag)
+#	a	append only flag
+#	c	close on exec flag
+#	n	no delay flag
+#	s	synchronization flag
+#   (modifiers)
+#	!	turn flags 'off' instead of default 'on'
+#	=	copy flags 'as is' instead of default 'merge'
+#	?	return a string in which the characters represent the flags
+#		that are set
+#
+#	note: - the '!' and '=' modifiers are mutually exclusive.
+#	      - the '?' modifier will return the status of the flags after they
+#		have been changed by other characters in the mode string
+#
+# f.lock(mode [, len [, start [, whence]]])
+#	will (un)lock a region
+#	mode can contain the following characters:
+#
+#   (character representing type of lock)
+#	u	unlock
+#	r	read lock
+#	w	write lock
+#   (modifiers)
+#	|	wait until the lock can be granted
+#	?	return the first lock conflicting with the requested lock
+#		or 'None' if there is no conflict. The lock returned is in the
+#		format (mode, len, start, whence, pid) where mode is a
+#		character representing the type of lock ('r' or 'w')
+#
+#	note: - the '?' modifier prevents a region from being locked; it is
+#		query only
+#
+
+class _posixfile_:
+    states = ['open', 'closed']
+
+    #
+    # Internal routines
+    #
+    def __repr__(self):
+	file = self._file_
+	return "<%s posixfile '%s', mode '%s' at %s>" % \
+		(self.states[file.closed], file.name, file.mode, \
+		 hex(id(self))[2:])
+
+    def __del__(self):
+	self._file_.close()
+
+    #
+    # Initialization routines
+    #
+    def open(self, name, mode='r', bufsize=-1):
+	import __builtin__
+	return self.fileopen(__builtin__.open(name, mode, bufsize))
+
+    def fileopen(self, file):
+	if `type(file)` != "<type 'file'>":
+	    raise TypeError, 'posixfile.fileopen() arg must be file object'
+	self._file_  = file
+	# Copy basic file methods
+	for method in file.__methods__:
+	    setattr(self, method, getattr(file, method))
+	return self
+
+    #
+    # New methods
+    #
+    def file(self):
+	return self._file_
+
+    def dup(self):
+	import posix
+
+	try: ignore = posix.fdopen
+	except: raise AttributeError, 'dup() method unavailable'
+
+	return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
+
+    def dup2(self, fd):
+	import posix
+
+	try: ignore = posix.fdopen
+	except: raise AttributeError, 'dup() method unavailable'
+
+	posix.dup2(self._file_.fileno(), fd)
+	return posix.fdopen(fd, self._file_.mode)
+
+    def flags(self, *which):
+	import fcntl, FCNTL
+
+	if which:
+	    if len(which) > 1:
+		raise TypeError, 'Too many arguments'
+	    which = which[0]
+	else: which = '?'
+
+	l_flags = 0
+	if 'n' in which: l_flags = l_flags | FCNTL.O_NDELAY
+	if 'a' in which: l_flags = l_flags | FCNTL.O_APPEND
+	if 's' in which: l_flags = l_flags | FCNTL.O_SYNC
+
+	file = self._file_
+
+	if '=' not in which:
+	    cur_fl = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
+	    if '!' in which: l_flags = cur_fl & ~ l_flags
+	    else: l_flags = cur_fl | l_flags
+
+	l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFL, l_flags)
+
+	if 'c' in which:	
+	    arg = ('!' not in which)	# 0 is don't, 1 is do close on exec
+	    l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFD, arg)
+
+	if '?' in which:
+	    which = ''			# Return current flags
+	    l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
+	    if FCNTL.O_APPEND & l_flags: which = which + 'a'
+	    if fcntl.fcntl(file.fileno(), FCNTL.F_GETFD, 0) & 1:
+		which = which + 'c'
+	    if FCNTL.O_NDELAY & l_flags: which = which + 'n'
+	    if FCNTL.O_SYNC & l_flags: which = which + 's'
+	    return which
+	
+    def lock(self, how, *args):
+	import struct, fcntl, FCNTL
+
+	if 'w' in how: l_type = FCNTL.F_WRLCK
+	elif 'r' in how: l_type = FCNTL.F_RDLCK
+	elif 'u' in how: l_type = FCNTL.F_UNLCK
+	else: raise TypeError, 'no type of lock specified'
+
+	if '|' in how: cmd = FCNTL.F_SETLKW
+	elif '?' in how: cmd = FCNTL.F_GETLK
+	else: cmd = FCNTL.F_SETLK
+
+	l_whence = 0
+	l_start = 0
+	l_len = 0
+
+	if len(args) == 1:
+	    l_len = args[0]
+	elif len(args) == 2:
+	    l_len, l_start = args
+	elif len(args) == 3:
+	    l_len, l_start, l_whence = args
+	elif len(args) > 3:
+	    raise TypeError, 'too many arguments'
+
+	flock = struct.pack('hhllhh', l_type, l_whence, l_start, l_len, 0, 0)
+	flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
+
+	if '?' in how:
+	    l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
+		struct.unpack('hhllhh', flock)
+	    if l_type != FCNTL.F_UNLCK:
+		if l_type == FCNTL.F_RDLCK:
+		    return 'r', l_len, l_start, l_whence, l_pid
+		else:
+		    return 'w', l_len, l_start, l_whence, l_pid
+
+#
+# Public routine to obtain a posixfile object
+#
+def open(name, mode='r', bufsize=-1):
+    return _posixfile_().open(name, mode, bufsize)
+
+def fileopen(file):
+    return _posixfile_().fileopen(file)
+
+#
+# Constants
+#
+SEEK_SET = 0
+SEEK_CUR = 1
+SEEK_END = 2
+
+#
+# End of posixfile.py
+#
diff --git a/Lib/dos_8x3/posixpat.py b/Lib/dos_8x3/posixpat.py
new file mode 100755
index 0000000..d284834
--- /dev/null
+++ b/Lib/dos_8x3/posixpat.py
@@ -0,0 +1,307 @@
+# Module 'posixpath' -- common operations on POSIX pathnames
+
+import posix
+import stat
+
+
+# Normalize the case of a pathname.  Trivial in Posix, string.lower on Mac.
+# On MS-DOS this may also turn slashes into backslashes; however, other
+# normalizations (such as optimizing '../' away) are not allowed
+# (another function should be defined to do that).
+
+def normcase(s):
+	return s
+
+
+# Return wheter a path is absolute.
+# Trivial in Posix, harder on the Mac or MS-DOS.
+
+def isabs(s):
+	return s[:1] == '/'
+
+
+# Join two pathnames.
+# Ignore the first part if the second part is absolute.
+# Insert a '/' unless the first part is empty or already ends in '/'.
+
+def join(a, b):
+	if b[:1] == '/': return b
+	if a == '' or a[-1:] == '/': return a + b
+	# Note: join('x', '') returns 'x/'; is this what we want?
+	return a + '/' + b
+
+
+# Split a path in head (everything up to the last '/') and tail (the
+# rest).  If the path ends in '/', tail will be empty.  If there is no
+# '/' in the path, head  will be empty.
+# Trailing '/'es are stripped from head unless it is the root.
+
+def split(p):
+	import string
+	i = string.rfind(p, '/') + 1
+	head, tail = p[:i], p[i:]
+	if head and head <> '/'*len(head):
+		while head[-1] == '/':
+			head = head[:-1]
+	return head, tail
+
+
+# Split a path in root and extension.
+# The extension is everything starting at the first dot in the last
+# pathname component; the root is everything before that.
+# It is always true that root + ext == p.
+
+def splitext(p):
+	root, ext = '', ''
+	for c in p:
+		if c == '/':
+			root, ext = root + ext + c, ''
+		elif c == '.':
+			if ext:
+				root, ext = root + ext, c
+			else:
+				ext = c
+		elif ext:
+			ext = ext + c
+		else:
+			root = root + c
+	return root, ext
+
+
+# Split a pathname into a drive specification and the rest of the
+# path.  Useful on DOS/Windows/NT; on Unix, the drive is always empty.
+
+def splitdrive(p):
+	return '', p
+
+
+# Return the tail (basename) part of a path.
+
+def basename(p):
+	return split(p)[1]
+
+
+# Return the head (dirname) part of a path.
+
+def dirname(p):
+	return split(p)[0]
+
+
+# Return the longest prefix of all list elements.
+
+def commonprefix(m):
+	if not m: return ''
+	prefix = m[0]
+	for item in m:
+		for i in range(len(prefix)):
+			if prefix[:i+1] <> item[:i+1]:
+				prefix = prefix[:i]
+				if i == 0: return ''
+				break
+	return prefix
+
+
+# Is a path a symbolic link?
+# This will always return false on systems where posix.lstat doesn't exist.
+
+def islink(path):
+	try:
+		st = posix.lstat(path)
+	except (posix.error, AttributeError):
+		return 0
+	return stat.S_ISLNK(st[stat.ST_MODE])
+
+
+# Does a path exist?
+# This is false for dangling symbolic links.
+
+def exists(path):
+	try:
+		st = posix.stat(path)
+	except posix.error:
+		return 0
+	return 1
+
+
+# Is a path a posix directory?
+# This follows symbolic links, so both islink() and isdir() can be true
+# for the same path.
+
+def isdir(path):
+	try:
+		st = posix.stat(path)
+	except posix.error:
+		return 0
+	return stat.S_ISDIR(st[stat.ST_MODE])
+
+
+# Is a path a regular file?
+# This follows symbolic links, so both islink() and isfile() can be true
+# for the same path.
+
+def isfile(path):
+	try:
+		st = posix.stat(path)
+	except posix.error:
+		return 0
+	return stat.S_ISREG(st[stat.ST_MODE])
+
+
+# Are two filenames really pointing to the same file?
+
+def samefile(f1, f2):
+	s1 = posix.stat(f1)
+	s2 = posix.stat(f2)
+	return samestat(s1, s2)
+
+
+# Are two open files really referencing the same file?
+# (Not necessarily the same file descriptor!)
+# XXX Oops, posix.fstat() doesn't exist yet!
+
+def sameopenfile(fp1, fp2):
+	s1 = posix.fstat(fp1)
+	s2 = posix.fstat(fp2)
+	return samestat(s1, s2)
+
+
+# Are two stat buffers (obtained from stat, fstat or lstat)
+# describing the same file?
+
+def samestat(s1, s2):
+	return s1[stat.ST_INO] == s2[stat.ST_INO] and \
+		s1[stat.ST_DEV] == s2[stat.ST_DEV]
+
+
+# Is a path a mount point?
+# (Does this work for all UNIXes?  Is it even guaranteed to work by POSIX?)
+
+def ismount(path):
+	try:
+		s1 = posix.stat(path)
+		s2 = posix.stat(join(path, '..'))
+	except posix.error:
+		return 0 # It doesn't exist -- so not a mount point :-)
+	dev1 = s1[stat.ST_DEV]
+	dev2 = s2[stat.ST_DEV]
+	if dev1 != dev2:
+		return 1		# path/.. on a different device as path
+	ino1 = s1[stat.ST_INO]
+	ino2 = s2[stat.ST_INO]
+	if ino1 == ino2:
+		return 1		# path/.. is the same i-node as path
+	return 0
+
+
+# Directory tree walk.
+# For each directory under top (including top itself, but excluding
+# '.' and '..'), func(arg, dirname, filenames) is called, where
+# dirname is the name of the directory and filenames is the list
+# files files (and subdirectories etc.) in the directory.
+# The func may modify the filenames list, to implement a filter,
+# or to impose a different order of visiting.
+
+def walk(top, func, arg):
+	try:
+		names = posix.listdir(top)
+	except posix.error:
+		return
+	func(arg, top, names)
+	exceptions = ('.', '..')
+	for name in names:
+		if name not in exceptions:
+			name = join(top, name)
+			if isdir(name) and not islink(name):
+				walk(name, func, arg)
+
+
+# Expand paths beginning with '~' or '~user'.
+# '~' means $HOME; '~user' means that user's home directory.
+# If the path doesn't begin with '~', or if the user or $HOME is unknown,
+# the path is returned unchanged (leaving error reporting to whatever
+# function is called with the expanded path as argument).
+# See also module 'glob' for expansion of *, ? and [...] in pathnames.
+# (A function should also be defined to do full *sh-style environment
+# variable expansion.)
+
+def expanduser(path):
+	if path[:1] <> '~':
+		return path
+	i, n = 1, len(path)
+	while i < n and path[i] <> '/':
+		i = i+1
+	if i == 1:
+		if not posix.environ.has_key('HOME'):
+			return path
+		userhome = posix.environ['HOME']
+	else:
+		import pwd
+		try:
+			pwent = pwd.getpwnam(path[1:i])
+		except KeyError:
+			return path
+		userhome = pwent[5]
+	if userhome[-1:] == '/': i = i+1
+	return userhome + path[i:]
+
+
+# Expand paths containing shell variable substitutions.
+# This expands the forms $variable and ${variable} only.
+# Non-existant variables are left unchanged.
+
+_varprog = None
+
+def expandvars(path):
+	global _varprog
+	if '$' not in path:
+		return path
+	if not _varprog:
+		import regex
+		_varprog = regex.compile('$\([a-zA-Z0-9_]+\|{[^}]*}\)')
+	i = 0
+	while 1:
+		i = _varprog.search(path, i)
+		if i < 0:
+			break
+		name = _varprog.group(1)
+		j = i + len(_varprog.group(0))
+		if name[:1] == '{' and name[-1:] == '}':
+			name = name[1:-1]
+		if posix.environ.has_key(name):
+			tail = path[j:]
+			path = path[:i] + posix.environ[name]
+			i = len(path)
+			path = path + tail
+		else:
+			i = j
+	return path
+
+
+# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
+# It should be understood that this may change the meaning of the path
+# if it contains symbolic links!
+
+def normpath(path):
+	import string
+	# Treat initial slashes specially
+	slashes = ''
+	while path[:1] == '/':
+		slashes = slashes + '/'
+		path = path[1:]
+	comps = string.splitfields(path, '/')
+	i = 0
+	while i < len(comps):
+		if comps[i] == '.':
+			del comps[i]
+		elif comps[i] == '..' and i > 0 and \
+					  comps[i-1] not in ('', '..'):
+			del comps[i-1:i+1]
+			i = i-1
+		elif comps[i] == '' and i > 0 and comps[i-1] <> '':
+			del comps[i]
+		else:
+			i = i+1
+	# If the path is now empty, substitute '.'
+	if not comps and not slashes:
+		comps.append('.')
+	return slashes + string.joinfields(comps, '/')
diff --git a/Lib/dos_8x3/py_compi.py b/Lib/dos_8x3/py_compi.py
new file mode 100755
index 0000000..2e68ba8
--- /dev/null
+++ b/Lib/dos_8x3/py_compi.py
@@ -0,0 +1,31 @@
+# Routine to "compile" a .py file to a .pyc file.
+# This has intimate knowledge of how Python/import.c does it.
+# By Sjoerd Mullender (I forced him to write it :-).
+
+import imp
+MAGIC = imp.get_magic()
+
+def wr_long(f, x):
+	f.write(chr( x        & 0xff))
+	f.write(chr((x >> 8)  & 0xff))
+	f.write(chr((x >> 16) & 0xff))
+	f.write(chr((x >> 24) & 0xff))
+
+def compile(file, cfile = None):
+	import os, marshal, __builtin__
+	f = open(file)
+	codestring = f.read()
+	f.close()
+	timestamp = os.stat(file)[8]
+	codeobject = __builtin__.compile(codestring, file, 'exec')
+	if not cfile:
+		cfile = file + 'c'
+	fc = open(cfile, 'wb')
+	fc.write(MAGIC)
+	wr_long(fc, timestamp)
+	marshal.dump(codeobject, fc)
+	fc.close()
+	if os.name == 'mac':
+		import macfs
+		macfs.FSSpec(cfile).SetCreatorType('Pyth', 'PYC ')
+		macfs.FSSpec(file).SetCreatorType('Pyth', 'TEXT')
diff --git a/Lib/dos_8x3/queue.py b/Lib/dos_8x3/queue.py
new file mode 100755
index 0000000..5125fd5
--- /dev/null
+++ b/Lib/dos_8x3/queue.py
@@ -0,0 +1,121 @@
+# A multi-producer, multi-consumer queue.
+
+Empty = 'Queue.Empty' # Exception raised by get_nowait()
+
+class Queue:
+
+	# Initialize a queue object with a given maximum size
+	# (If maxsize is <= 0, the maximum size is infinite)
+	def __init__(self, maxsize):
+		import thread
+		self._init(maxsize)
+		self.mutex = thread.allocate_lock()
+		self.esema = thread.allocate_lock()
+		self.esema.acquire_lock()
+		self.fsema = thread.allocate_lock()
+
+	# Get an approximation of the queue size (not reliable!)
+	def qsize(self):
+		self.mutex.acquire_lock()
+		n = self._qsize()
+		self.mutex.release_lock()
+		return n
+
+	# Check if the queue is empty (not reliable!)
+	def empty(self):
+		self.mutex.acquire_lock()
+		n = self._empty()
+		self.mutex.release_lock()
+		return n
+
+	# Check if the queue is full (not reliable!)
+	def full(self):
+		self.mutex.acquire_lock()
+		n = self._full()
+		self.mutex.release_lock()
+		return n
+
+	# Put a new item into the queue
+	def put(self, item):
+		self.fsema.acquire_lock()
+		self.mutex.acquire_lock()
+		was_empty = self._empty()
+		self._put(item)
+		if was_empty:
+			self.esema.release_lock()
+		if not self._full():
+			self.fsema.release_lock()
+		self.mutex.release_lock()
+
+	# Get an item from the queue,
+	# blocking if necessary until one is available
+	def get(self):
+		self.esema.acquire_lock()
+		self.mutex.acquire_lock()
+		was_full = self._full()
+		item = self._get()
+		if was_full:
+			self.fsema.release_lock()
+		if not self._empty():
+			self.esema.release_lock()
+		self.mutex.release_lock()
+		return item
+
+	# Get an item from the queue if one is immediately available,
+	# raise Empty if the queue is empty or temporarily unavailable
+	def get_nowait(self):
+		locked = self.esema.acquire_lock(0)
+		self.mutex.acquire_lock()
+		if self._empty():
+			# The queue is empyt -- we can't have esema
+			self.mutex.release_lock()
+			raise Empty
+		if not locked:
+			locked = self.esema.acquire_lock(0)
+			if not locked:
+				# Somebody else has esema
+				# but we have mutex --
+				# go out of their way
+				self.mutex.release_lock()
+				raise Empty
+		was_full = self._full()
+		item = self._get()
+		if was_full:
+			self.fsema.release_lock()
+		if not self._empty():
+			self.esema.release_lock()
+		self.mutex.release_lock()
+		return item
+
+	# XXX Need to define put_nowait() as well.
+		
+
+	# Override these methods to implement other queue organizations
+	# (e.g. stack or priority queue).
+	# These will only be called with appropriate locks held
+
+	# Initialize the queue representation
+	def _init(self, maxsize):
+		self.maxsize = maxsize
+		self.queue = []
+
+	def _qsize(self):
+		return len(self.queue)
+
+	# Check wheter the queue is empty
+	def _empty(self):
+		return not self.queue
+
+	# Check whether the queue is full
+	def _full(self):
+		return self.maxsize > 0 and len(self.queue) == self.maxsize
+
+	# Put a new item in the queue
+	def _put(self, item):
+		self.queue.append(item)
+
+	# Get an item from the queue
+	def _get(self):
+		item = self.queue[0]
+		del self.queue[0]
+		return item
diff --git a/Lib/dos_8x3/regex_sy.py b/Lib/dos_8x3/regex_sy.py
new file mode 100755
index 0000000..bb80f4e
--- /dev/null
+++ b/Lib/dos_8x3/regex_sy.py
@@ -0,0 +1,41 @@
+# These bits are passed to regex.set_syntax() to choose among
+# alternative regexp syntaxes.
+
+# 1 means plain parentheses serve as grouping, and backslash
+#   parentheses are needed for literal searching.
+# 0 means backslash-parentheses are grouping, and plain parentheses
+#   are for literal searching.
+RE_NO_BK_PARENS = 1
+
+# 1 means plain | serves as the "or"-operator, and \| is a literal.
+# 0 means \| serves as the "or"-operator, and | is a literal.
+RE_NO_BK_VBAR = 2
+
+# 0 means plain + or ? serves as an operator, and \+, \? are literals.
+# 1 means \+, \? are operators and plain +, ? are literals.
+RE_BK_PLUS_QM = 4
+
+# 1 means | binds tighter than ^ or $.
+# 0 means the contrary.
+RE_TIGHT_VBAR = 8
+
+# 1 means treat \n as an _OR operator
+# 0 means treat it as a normal character
+RE_NEWLINE_OR = 16
+
+# 0 means that a special characters (such as *, ^, and $) always have
+#   their special meaning regardless of the surrounding context.
+# 1 means that special characters may act as normal characters in some
+#   contexts.  Specifically, this applies to:
+#	^ - only special at the beginning, or after ( or |
+#	$ - only special at the end, or before ) or |
+#	*, +, ? - only special when not after the beginning, (, or |
+RE_CONTEXT_INDEP_OPS = 32
+
+# Now define combinations of bits for the standard possibilities.
+RE_SYNTAX_AWK = (RE_NO_BK_PARENS | RE_NO_BK_VBAR | RE_CONTEXT_INDEP_OPS)
+RE_SYNTAX_EGREP = (RE_SYNTAX_AWK | RE_NEWLINE_OR)
+RE_SYNTAX_GREP = (RE_BK_PLUS_QM | RE_NEWLINE_OR)
+RE_SYNTAX_EMACS = 0
+
+# (Python's obsolete "regexp" module used a syntax similar to awk.)
diff --git a/Lib/dos_8x3/simpleht.py b/Lib/dos_8x3/simpleht.py
new file mode 100755
index 0000000..dd3107a
--- /dev/null
+++ b/Lib/dos_8x3/simpleht.py
@@ -0,0 +1,168 @@
+"""Simple HTTP Server.
+
+This module builds on BaseHTTPServer by implementing the standard GET
+and HEAD requests in a fairly straightforward manner.
+
+"""
+
+
+__version__ = "0.3"
+
+
+import os
+import pwd
+import sys
+import time
+import socket
+import string
+import posixpath
+import SocketServer
+import BaseHTTPServer
+
+
+def nobody_uid():
+    """Internal routine to get nobody's uid"""
+    try:
+	nobody = pwd.getpwnam('nobody')[2]
+    except pwd.error:
+	nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
+    return nobody
+
+nobody = nobody_uid()
+
+
+class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+
+    """Simple HTTP request handler with GET and HEAD commands.
+
+    This serves files from the current directory and any of its
+    subdirectories.  It assumes that all files are plain text files
+    unless they have the extension ".html" in which case it assumes
+    they are HTML files.
+
+    The GET and HEAD requests are identical except that the HEAD
+    request omits the actual contents of the file.
+
+    """
+
+    server_version = "SimpleHTTP/" + __version__
+
+    def do_GET(self):
+	"""Serve a GET request."""
+	f = self.send_head()
+	if f:
+	    self.copyfile(f, self.wfile)
+	    f.close()
+
+    def do_HEAD(self):
+	"""Serve a HEAD request."""
+	f = self.send_head()
+	if f:
+	    f.close()
+
+    def send_head(self):
+	"""Common code for GET and HEAD commands.
+
+	This sends the response code and MIME headers.
+
+	Return value is either a file object (which has to be copied
+	to the outputfile by the caller unless the command was HEAD,
+	and must be closed by the caller under all circumstances), or
+	None, in which case the caller has nothing further to do.
+
+	"""
+	path = self.translate_path(self.path)
+	if os.path.isdir(path):
+	    self.send_error(403, "Directory listing not supported")
+	    return None
+	try:
+	    f = open(path)
+	except IOError:
+	    self.send_error(404, "File not found")
+	    return None
+	self.send_response(200)
+	self.send_header("Content-type", self.guess_type(path))
+	self.end_headers()
+	return f
+
+    def translate_path(self, path):
+	"""Translate a /-separated PATH to the local filename syntax.
+
+	Components that mean special things to the local file system
+	(e.g. drive or directory names) are ignored.  (XXX They should
+	probably be diagnosed.)
+
+	"""
+	path = posixpath.normpath(path)
+	words = string.splitfields(path, '/')
+	words = filter(None, words)
+	path = os.getcwd()
+	for word in words:
+	    drive, word = os.path.splitdrive(word)
+	    head, word = os.path.split(word)
+	    if word in (os.curdir, os.pardir): continue
+	    path = os.path.join(path, word)
+	return path
+
+    def copyfile(self, source, outputfile):
+	"""Copy all data between two file objects.
+
+	The SOURCE argument is a file object open for reading
+	(or anything with a read() method) and the DESTINATION
+	argument is a file object open for writing (or
+	anything with a write() method).
+
+	The only reason for overriding this would be to change
+	the block size or perhaps to replace newlines by CRLF
+	-- note however that this the default server uses this
+	to copy binary data as well.
+
+	"""
+
+	BLOCKSIZE = 8192
+	while 1:
+	    data = source.read(BLOCKSIZE)
+	    if not data: break
+	    outputfile.write(data)
+
+    def guess_type(self, path):
+	"""Guess the type of a file.
+
+	Argument is a PATH (a filename).
+
+	Return value is a string of the form type/subtype,
+	usable for a MIME Content-type header.
+
+	The default implementation looks the file's extension
+	up in the table self.extensions_map, using text/plain
+	as a default; however it would be permissible (if
+	slow) to look inside the data to make a better guess.
+
+	"""
+
+	base, ext = posixpath.splitext(path)
+	if self.extensions_map.has_key(ext):
+	    return self.extensions_map[ext]
+	ext = string.lower(ext)
+	if self.extensions_map.has_key(ext):
+	    return self.extensions_map[ext]
+	else:
+	    return self.extensions_map['']
+
+    extensions_map = {
+	    '': 'text/plain',	# Default, *must* be present
+	    '.html': 'text/html',
+	    '.htm': 'text/html',
+	    '.gif': 'image/gif',
+	    '.jpg': 'image/jpeg',
+	    '.jpeg': 'image/jpeg',
+	    }
+
+
+def test(HandlerClass = SimpleHTTPRequestHandler,
+	 ServerClass = SocketServer.TCPServer):
+    BaseHTTPServer.test(HandlerClass, ServerClass)
+
+
+if __name__ == '__main__':
+    test()
diff --git a/Lib/dos_8x3/socketse.py b/Lib/dos_8x3/socketse.py
new file mode 100755
index 0000000..9a646ab
--- /dev/null
+++ b/Lib/dos_8x3/socketse.py
@@ -0,0 +1,413 @@
+"""Generic socket server classes.
+
+This module tries to capture the various aspects of defining a server:
+
+- address family:
+	- AF_INET: IP (Internet Protocol) sockets (default)
+	- AF_UNIX: Unix domain sockets
+	- others, e.g. AF_DECNET are conceivable (see <socket.h>
+- socket type:
+	- SOCK_STREAM (reliable stream, e.g. TCP)
+	- SOCK_DGRAM (datagrams, e.g. UDP)
+- client address verification before further looking at the request
+	(This is actually a hook for any processing that needs to look
+	 at the request before anything else, e.g. logging)
+- how to handle multiple requests:
+	- synchronous (one request is handled at a time)
+	- forking (each request is handled by a new process)
+	- threading (each request is handled by a new thread)
+
+The classes in this module favor the server type that is simplest to
+write: a synchronous TCP/IP server.  This is bad class design, but
+save some typing.  (There's also the issue that a deep class hierarchy
+slows down method lookups.)
+
+There are four classes in an inheritance diagram that represent
+synchronous servers of four types:
+
+	+-----------+        +------------------+
+	| TCPServer |------->| UnixStreamServer |
+	+-----------+        +------------------+
+	      |
+	      v
+	+-----------+        +--------------------+
+	| UDPServer |------->| UnixDatagramServer |
+	+-----------+        +--------------------+
+
+(Note that UnixDatagramServer derives from UDPServer, not from
+UnixStreamServer -- the only difference between an IP and a Unix
+stream server is the address family, which is simply repeated in both
+unix server classes.)
+
+Forking and threading versions of each type of server can be created
+using the ForkingServer and ThreadingServer mix-in classes.  For
+instance, a threading UDP server class is created as follows:
+
+	class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+
+(The Mix-in class must come first, since it overrides a method defined
+in UDPServer!)
+
+To implement a service, you must derive a class from
+BaseRequestHandler and redefine its handle() method.  You can then run
+various versions of the service by combining one of the server classes
+with your request handler class.
+
+The request handler class must be different for datagram or stream
+services.  This can be hidden by using the mix-in request handler
+classes StreamRequestHandler or DatagramRequestHandler.
+
+Of course, you still have to use your head!
+
+For instance, it makes no sense to use a forking server if the service
+contains state in memory that can be modified by requests (since the
+modifications in the child process would never reach the initial state
+kept in the parent process and passed to each child).  In this case,
+you can use a threading server, but you will probably have to use
+locks to avoid two requests that come in nearly simultaneous to apply
+conflicting changes to the server state.
+
+On the other hand, if you are building e.g. an HTTP server, where all
+data is stored externally (e.g. in the file system), a synchronous
+class will essentially render the service "deaf" while one request is
+being handled -- which may be for a very long time if a client is slow
+to reqd all the data it has requested.  Here a threading or forking
+server is appropriate.
+
+In some cases, it may be appropriate to process part of a request
+synchronously, but to finish processing in a forked child depending on
+the request data.  This can be implemented by using a synchronous
+server and doing an explicit fork in the request handler class's
+handle() method.
+
+Another approach to handling multiple simultaneous requests in an
+environment that supports neither threads nor fork (or where these are
+too expensive or inappropriate for the service) is to maintain an
+explicit table of partially finished requests and to use select() to
+decide which request to work on next (or whether to handle a new
+incoming request).  This is particularly important for stream services
+where each client can potentially be connected for a long time (if
+threads or subprocesses can't be used).
+
+Future work:
+- Standard classes for Sun RPC (which uses either UDP or TCP)
+- Standard mix-in classes to implement various authentication
+  and encryption schemes
+- Standard framework for select-based multiplexing
+
+XXX Open problems:
+- What to do with out-of-band data?
+
+"""
+
+
+__version__ = "0.2"
+
+
+import socket
+import sys
+import os
+
+
+class TCPServer:
+
+    """Base class for various socket-based server classes.
+
+    Defaults to synchronous IP stream (i.e., TCP).
+
+    Methods for the caller:
+
+    - __init__(server_address, RequestHandlerClass)
+    - serve_forever()
+    - handle_request()	# if you don't use serve_forever()
+    - fileno() -> int	# for select()
+
+    Methods that may be overridden:
+
+    - server_bind()
+    - server_activate()
+    - get_request() -> request, client_address
+    - verify_request(request, client_address)
+    - process_request(request, client_address)
+    - handle_error()
+
+    Methods for derived classes:
+
+    - finish_request(request, client_address)
+
+    Class variables that may be overridden by derived classes or
+    instances:
+
+    - address_family
+    - socket_type
+    - request_queue_size (only for stream sockets)
+
+    Instance variables:
+
+    - server_address
+    - RequestHandlerClass
+    - socket
+
+    """
+
+    address_family = socket.AF_INET
+
+    socket_type = socket.SOCK_STREAM
+
+    request_queue_size = 5
+
+    def __init__(self, server_address, RequestHandlerClass):
+	"""Constructor.  May be extended, do not override."""
+	self.server_address = server_address
+	self.RequestHandlerClass = RequestHandlerClass
+	self.socket = socket.socket(self.address_family,
+				    self.socket_type)
+	self.server_bind()
+	self.server_activate()
+
+    def server_bind(self):
+	"""Called by constructor to bind the socket.
+
+	May be overridden.
+
+	"""
+	self.socket.bind(self.server_address)
+
+    def server_activate(self):
+	"""Called by constructor to activate the server.
+
+	May be overridden.
+
+	"""
+	self.socket.listen(self.request_queue_size)
+
+    def fileno(self):
+	"""Return socket file number.
+
+	Interface required by select().
+
+	"""
+	return self.socket.fileno()
+
+    def serve_forever(self):
+	"""Handle one request at a time until doomsday."""
+	while 1:
+	    self.handle_request()
+
+    # The distinction between handling, getting, processing and
+    # finishing a request is fairly arbitrary.  Remember:
+    #
+    # - handle_request() is the top-level call.  It calls
+    #   get_request(), verify_request() and process_request()
+    # - get_request() is different for stream or datagram sockets
+    # - process_request() is the place that may fork a new process
+    #   or create a new thread to finish the request
+    # - finish_request() instantiates the request handler class;
+    #   this constructor will handle the request all by itself
+
+    def handle_request(self):
+	"""Handle one request, possibly blocking."""
+	request, client_address = self.get_request()
+	if self.verify_request(request, client_address):
+	    try:
+		self.process_request(request, client_address)
+	    except:
+		self.handle_error(request, client_address)
+
+    def get_request(self):
+	"""Get the request and client address from the socket.
+
+	May be overridden.
+
+	"""
+	return self.socket.accept()
+
+    def verify_request(self, request, client_address):
+	"""Verify the request.  May be overridden.
+
+	Return true if we should proceed with this request.
+
+	"""
+	return 1
+
+    def process_request(self, request, client_address):
+	"""Call finish_request.
+
+	Overridden by ForkingMixIn and ThreadingMixIn.
+
+	"""
+	self.finish_request(request, client_address)
+
+    def finish_request(self, request, client_address):
+	"""Finish one request by instantiating RequestHandlerClass."""
+	self.RequestHandlerClass(request, client_address, self)
+
+    def handle_error(self, request, client_address):
+	"""Handle an error gracefully.  May be overridden.
+
+	The default is to print a traceback and continue.
+
+	"""
+	exc, value, tb = sys.exc_type, sys.exc_value, sys.exc_traceback
+	print '-'*40
+	print 'Exception happened during processing of request from',
+	print client_address
+	import traceback
+	traceback.print_exception(exc, value, tb)
+	print '-'*40
+
+
+class UDPServer(TCPServer):
+
+    """UDP server class."""
+
+    socket_type = socket.SOCK_DGRAM
+
+    max_packet_size = 8192
+
+    def get_request(self):
+	return self.socket.recvfrom(max_packet_size)
+
+
+if hasattr(socket, 'AF_UNIX'):
+
+    class UnixStreamServer(TCPServer):
+
+	address_family = socket.AF_UNIX
+
+
+    class UnixDatagramServer(UDPServer):
+
+	address_family = socket.AF_UNIX
+
+
+class ForkingMixIn:
+
+    """Mix-in class to handle each request in a new process."""
+
+    active_children = None
+
+    def collect_children(self):
+	"""Internal routine to wait for died children."""
+	while self.active_children:
+	    pid, status = os.waitpid(0, os.WNOHANG)
+	    if not pid: break
+	    self.active_children.remove(pid)
+
+    def process_request(self, request, client_address):
+	"""Fork a new subprocess to process the request."""
+	self.collect_children()
+	pid = os.fork()
+	if pid:
+	    # Parent process
+	    if self.active_children is None:
+		self.active_children = []
+	    self.active_children.append(pid)
+	    return
+	else:
+	    # Child process.
+	    # This must never return, hence os._exit()!
+	    try:
+		self.finish_request(request, client_address)
+		os._exit(0)
+	    except:
+		try:
+		    self.handle_error(request,
+				      client_address)
+		finally:
+		    os._exit(1)
+
+
+class ThreadingMixIn:
+
+    """Mix-in class to handle each request in a new thread."""
+
+    def process_request(self, request, client_address):
+	"""Start a new thread to process the request."""
+	import thread
+	thread.start_new_thread(self.finish_request,
+				(request, client_address))
+
+
+class ForkingUDPServer(ForkingMixIn, UDPServer): pass
+class ForkingTCPServer(ForkingMixIn, TCPServer): pass
+
+class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
+
+
+class BaseRequestHandler:
+
+    """Base class for request handler classes.
+
+    This class is instantiated for each request to be handled.  The
+    constructor sets the instance variables request, client_address
+    and server, and then calls the handle() method.  To implement a
+    specific service, all you need to do is to derive a class which
+    defines a handle() method.
+
+    The handle() method can find the request as self.request, the
+    client address as self.client_request, and the server (in case it
+    needs access to per-server information) as self.server.  Since a
+    separate instance is created for each request, the handle() method
+    can define arbitrary other instance variariables.
+
+    """
+
+    def __init__(self, request, client_address, server):
+	self.request = request
+	self.client_address = client_address
+	self.server = server
+	try:
+	    self.setup()
+	    self.handle()
+	    self.finish()
+	finally:
+	    sys.exc_traceback = None	# Help garbage collection
+
+    def setup(self):
+	pass
+
+    def __del__(self):
+	pass
+
+    def handle(self):
+	pass
+
+    def finish(self):
+	pass
+
+
+# The following two classes make it possible to use the same service
+# class for stream or datagram servers.
+# Each class sets up these instance variables:
+# - rfile: a file object from which receives the request is read
+# - wfile: a file object to which the reply is written
+# When the handle() method returns, wfile is flushed properly
+
+
+class StreamRequestHandler(BaseRequestHandler):
+
+    """Define self.rfile and self.wfile for stream sockets."""
+
+    def setup(self):
+	self.connection = self.request
+	self.rfile = self.connection.makefile('rb')
+	self.wfile = self.connection.makefile('wb', 0)
+
+    def finish(self):
+	self.wfile.flush()
+
+
+class DatagramRequestHandler(BaseRequestHandler):
+
+    """Define self.rfile and self.wfile for datagram sockets."""
+
+    def setup(self):
+	import StringIO
+	self.packet, self.socket = self.request
+	self.rfile = StringIO.StringIO(self.packet)
+	self.wfile = StringIO.StringIO(self.packet)
+
+    def finish(self):
+	self.socket.send(self.wfile.getvalue())
diff --git a/Lib/dos_8x3/statcach.py b/Lib/dos_8x3/statcach.py
new file mode 100755
index 0000000..770aef0
--- /dev/null
+++ b/Lib/dos_8x3/statcach.py
@@ -0,0 +1,82 @@
+# Module 'statcache'
+#
+# Maintain a cache of file stats.
+# There are functions to reset the cache or to selectively remove items.
+
+import os
+from stat import *
+
+# The cache.
+# Keys are pathnames, values are `os.stat' outcomes.
+#
+cache = {}
+
+
+# Stat a file, possibly out of the cache.
+#
+def stat(path):
+	if cache.has_key(path):
+		return cache[path]
+	cache[path] = ret = os.stat(path)
+	return ret
+
+
+# Reset the cache completely.
+#
+def reset():
+	global cache
+	cache = {}
+
+
+# Remove a given item from the cache, if it exists.
+#
+def forget(path):
+	if cache.has_key(path):
+		del cache[path]
+
+
+# Remove all pathnames with a given prefix.
+#
+def forget_prefix(prefix):
+	n = len(prefix)
+	for path in cache.keys():
+		if path[:n] == prefix:
+			del cache[path]
+
+
+# Forget about a directory and all entries in it, but not about
+# entries in subdirectories.
+#
+def forget_dir(prefix):
+	if prefix[-1:] == '/' and prefix <> '/':
+		prefix = prefix[:-1]
+	forget(prefix)
+	if prefix[-1:] <> '/':
+		prefix = prefix + '/'
+	n = len(prefix)
+	for path in cache.keys():
+		if path[:n] == prefix:
+			rest = path[n:]
+			if rest[-1:] == '/': rest = rest[:-1]
+			if '/' not in rest:
+				del cache[path]
+
+
+# Remove all pathnames except with a given prefix.
+# Normally used with prefix = '/' after a chdir().
+#
+def forget_except_prefix(prefix):
+	n = len(prefix)
+	for path in cache.keys():
+		if path[:n] <> prefix:
+			del cache[path]
+
+
+# Check for directory.
+#
+def isdir(path):
+	try:
+		st = stat(path)
+	except os.error:
+		return 0
+	return S_ISDIR(st[ST_MODE])
diff --git a/Lib/dos_8x3/stringio.py b/Lib/dos_8x3/stringio.py
new file mode 100755
index 0000000..bbd9428
--- /dev/null
+++ b/Lib/dos_8x3/stringio.py
@@ -0,0 +1,156 @@
+# class StringIO implements  file-like objects that read/write a
+# string buffer (a.k.a. "memory files").
+#
+# This implements (nearly) all stdio methods.
+#
+# f = StringIO()      # ready for writing
+# f = StringIO(buf)   # ready for reading
+# f.close()           # explicitly release resources held
+# flag = f.isatty()   # always false
+# pos = f.tell()      # get current position
+# f.seek(pos)         # set current position
+# f.seek(pos, mode)   # mode 0: absolute; 1: relative; 2: relative to EOF
+# buf = f.read()      # read until EOF
+# buf = f.read(n)     # read up to n bytes
+# buf = f.readline()  # read until end of line ('\n') or EOF
+# list = f.readlines()# list of f.readline() results until EOF
+# f.write(buf)        # write at current position
+# f.writelines(list)  # for line in list: f.write(line)
+# f.getvalue()        # return whole file's contents as a string
+#
+# Notes:
+# - Using a real file is often faster (but less convenient).
+# - fileno() is left unimplemented so that code which uses it triggers
+#   an exception early.
+# - Seeking far beyond EOF and then writing will insert real null
+#   bytes that occupy space in the buffer.
+# - There's a simple test set (see end of this file).
+
+import string
+
+class StringIO:
+	def __init__(self, buf = ''):
+		self.buf = buf
+		self.len = len(buf)
+		self.buflist = []
+		self.pos = 0
+		self.closed = 0
+		self.softspace = 0
+	def close(self):
+		if not self.closed:
+			self.closed = 1
+			del self.buf, self.pos
+	def isatty(self):
+		return 0
+	def seek(self, pos, mode = 0):
+		if self.buflist:
+			self.buf = self.buf + string.joinfields(self.buflist, '')
+			self.buflist = []
+		if mode == 1:
+			pos = pos + self.pos
+		elif mode == 2:
+			pos = pos + self.len
+		self.pos = max(0, pos)
+	def tell(self):
+		return self.pos
+	def read(self, n = -1):
+		if self.buflist:
+			self.buf = self.buf + string.joinfields(self.buflist, '')
+			self.buflist = []
+		if n < 0:
+			newpos = self.len
+		else:
+			newpos = min(self.pos+n, self.len)
+		r = self.buf[self.pos:newpos]
+		self.pos = newpos
+		return r
+	def readline(self):
+		if self.buflist:
+			self.buf = self.buf + string.joinfields(self.buflist, '')
+			self.buflist = []
+		i = string.find(self.buf, '\n', self.pos)
+		if i < 0:
+			newpos = self.len
+		else:
+			newpos = i+1
+		r = self.buf[self.pos:newpos]
+		self.pos = newpos
+		return r
+	def readlines(self):
+		lines = []
+		line = self.readline()
+		while line:
+			lines.append(line)
+			line = self.readline()
+		return lines
+	def write(self, s):
+		if not s: return
+		if self.pos > self.len:
+			self.buflist.append('\0'*(self.pos - self.len))
+			self.len = self.pos
+		newpos = self.pos + len(s)
+		if self.pos < self.len:
+			if self.buflist:
+				self.buf = self.buf + string.joinfields(self.buflist, '')
+				self.buflist = []
+			self.buflist = [self.buf[:self.pos], s, self.buf[newpos:]]
+			self.buf = ''
+		else:
+			self.buflist.append(s)
+			self.len = newpos
+		self.pos = newpos
+	def writelines(self, list):
+		self.write(string.joinfields(list, ''))
+	def flush(self):
+		pass
+	def getvalue(self):
+		if self.buflist:
+			self.buf = self.buf + string.joinfields(self.buflist, '')
+			self.buflist = []
+		return self.buf
+
+
+# A little test suite
+
+def test():
+	import sys
+	if sys.argv[1:]:
+		file = sys.argv[1]
+	else:
+		file = '/etc/passwd'
+	lines = open(file, 'r').readlines()
+	text = open(file, 'r').read()
+	f = StringIO()
+	for line in lines[:-2]:
+		f.write(line)
+	f.writelines(lines[-2:])
+	if f.getvalue() != text:
+		raise RuntimeError, 'write failed'
+	length = f.tell()
+	print 'File length =', length
+	f.seek(len(lines[0]))
+	f.write(lines[1])
+	f.seek(0)
+	print 'First line =', `f.readline()`
+	here = f.tell()
+	line = f.readline()
+	print 'Second line =', `line`
+	f.seek(-len(line), 1)
+	line2 = f.read(len(line))
+	if line != line2:
+		raise RuntimeError, 'bad result after seek back'
+	f.seek(len(line2), 1)
+	list = f.readlines()
+	line = list[-1]
+	f.seek(f.tell() - len(line))
+	line2 = f.read()
+	if line != line2:
+		raise RuntimeError, 'bad result after seek back from EOF'
+	print 'Read', len(list), 'more lines'
+	print 'File length =', f.tell()
+	if f.tell() != length:
+		raise RuntimeError, 'bad length'
+	f.close()
+
+if __name__ == '__main__':
+	test()
diff --git a/Lib/dos_8x3/test_aud.py b/Lib/dos_8x3/test_aud.py
new file mode 100755
index 0000000..3acf1bf
--- /dev/null
+++ b/Lib/dos_8x3/test_aud.py
@@ -0,0 +1,202 @@
+# Test audioop.
+import audioop
+
+def gendata1():
+	return '\0\1\2'
+
+def gendata2():
+	if audioop.getsample('\0\1', 2, 0) == 1:
+		return '\0\0\0\1\0\2'
+	else:
+		return '\0\0\1\0\2\0'
+
+def gendata4():
+	if audioop.getsample('\0\0\0\1', 4, 0) == 1:
+		return '\0\0\0\0\0\0\0\1\0\0\0\2'
+	else:
+		return '\0\0\0\0\1\0\0\0\2\0\0\0'
+
+def testmax(data):
+	if audioop.max(data[0], 1) <> 2 or \
+		  audioop.max(data[1], 2) <> 2 or \
+		  audioop.max(data[2], 4) <> 2:
+		return 0
+	return 1
+
+def testmaxpp(data):
+	if audioop.maxpp(data[0], 1) <> 0 or \
+		  audioop.maxpp(data[1], 2) <> 0 or \
+		  audioop.maxpp(data[2], 4) <> 0:
+		return 0
+	return 1
+
+def testavg(data):
+	if audioop.avg(data[0], 1) <> 1 or \
+		  audioop.avg(data[1], 2) <> 1 or \
+		  audioop.avg(data[2], 4) <> 1:
+		return 0
+	return 1
+
+def testavgpp(data):
+	if audioop.avgpp(data[0], 1) <> 0 or \
+		  audioop.avgpp(data[1], 2) <> 0 or \
+		  audioop.avgpp(data[2], 4) <> 0:
+		return 0
+	return 1
+
+def testrms(data):
+	if audioop.rms(data[0], 1) <> 1 or \
+		  audioop.rms(data[1], 2) <> 1 or \
+		  audioop.rms(data[2], 4) <> 1:
+		return 0
+	return 1
+
+def testcross(data):
+	if audioop.cross(data[0], 1) <> 0 or \
+		  audioop.cross(data[1], 2) <> 0 or \
+		  audioop.cross(data[2], 4) <> 0:
+		return 0
+	return 1
+
+def testadd(data):
+	data2 = []
+	for d in data:
+		str = ''
+		for s in d:
+			str = str + chr(ord(s)*2)
+		data2.append(str)
+	if audioop.add(data[0], data[0], 1) <> data2[0] or \
+		  audioop.add(data[1], data[1], 2) <> data2[1] or \
+		  audioop.add(data[2], data[2], 4) <> data2[2]:
+		return 0
+	return 1
+
+def testbias(data):
+	# Note: this test assumes that avg() works
+	d1 = audioop.bias(data[0], 1, 100)
+	d2 = audioop.bias(data[1], 2, 100)
+	d4 = audioop.bias(data[2], 4, 100)
+	if audioop.avg(d1, 1) <> 101 or \
+		  audioop.avg(d2, 2) <> 101 or \
+		  audioop.avg(d4, 4) <> 101:
+		return 0
+	return 1
+
+def testlin2lin(data):
+	# too simple: we test only the size
+	for d1 in data:
+		for d2 in data:
+			got = len(d1)/3
+			wtd = len(d2)/3
+			if len(audioop.lin2lin(d1, got, wtd)) <> len(d2):
+				return 0
+	return 1
+
+def testadpcm2lin(data):
+	# Very cursory test
+	if audioop.adpcm2lin('\0\0', 1, None) <> ('\0\0\0\0', (0,0)):
+		return 0
+	return 1
+
+def testlin2adpcm(data):
+	# Very cursory test
+	if audioop.lin2adpcm('\0\0\0\0', 1, None) <> ('\0\0', (0,0)):
+		return 0
+	return 1
+
+def testlin2ulaw(data):
+	if audioop.lin2ulaw(data[0], 1) <> '\377\347\333' or \
+		  audioop.lin2ulaw(data[1], 2) <> '\377\377\377' or \
+		  audioop.lin2ulaw(data[2], 4) <> '\377\377\377':
+		return 0
+	return 1
+
+def testulaw2lin(data):
+	# Cursory
+	d = audioop.lin2ulaw(data[0], 1)
+	if audioop.ulaw2lin(d, 1) <> data[0]:
+		return 0
+	return 1
+
+def testmul(data):
+	data2 = []
+	for d in data:
+		str = ''
+		for s in d:
+			str = str + chr(ord(s)*2)
+		data2.append(str)
+	if audioop.mul(data[0], 1, 2) <> data2[0] or \
+		  audioop.mul(data[1],2, 2) <> data2[1] or \
+		  audioop.mul(data[2], 4, 2) <> data2[2]:
+		return 0
+	return 1
+
+def testreverse(data):
+	if audioop.reverse(data[0], 1) <> '\2\1\0':
+		return 0
+	return 1
+
+def testtomono(data):
+	data2 = ''
+	for d in data[0]:
+		data2 = data2 + d + d
+	if audioop.tomono(data2, 1, 0.5, 0.5) <> data[0]:
+		return 0
+	return 1
+
+def testtostereo(data):
+	data2 = ''
+	for d in data[0]:
+		data2 = data2 + d + d
+	if audioop.tostereo(data[0], 1, 1, 1) <> data2:
+		return 0
+	return 1
+
+def testfindfactor(data):
+	if audioop.findfactor(data[1], data[1]) <> 1.0:
+		return 0
+	return 1
+
+def testfindfit(data):
+	if audioop.findfit(data[1], data[1]) <> (0, 1.0):
+		return 0
+	return 1
+
+def testfindmax(data):
+	if audioop.findmax(data[1], 1) <> 2:
+		return 0
+	return 1
+
+def testgetsample(data):
+	for i in range(3):
+		if audioop.getsample(data[0], 1, i) <> i or \
+			  audioop.getsample(data[1], 2, i) <> i or \
+			  audioop.getsample(data[2], 4, i) <> i:
+			return 0
+	return 1
+
+def testone(name, data):
+	try:
+		func = eval('test'+name)
+	except NameError:
+		print 'No test found for audioop.'+name+'()'
+		return
+	try:
+		rv = func(data)
+	except 'xx':
+		print 'Test FAILED for audioop.'+name+'() (with an exception)'
+		return
+	if not rv:
+		print 'Test FAILED for audioop.'+name+'()'
+
+def testall():
+	data = [gendata1(), gendata2(), gendata4()]
+	names = dir(audioop)
+	# We know there is a routine 'add'
+	routines = []
+	for n in names:
+		if type(eval('audioop.'+n)) == type(audioop.add):
+			routines.append(n)
+	for n in routines:
+		testone(n, data)
+testall()
diff --git a/Lib/dos_8x3/test_bui.py b/Lib/dos_8x3/test_bui.py
new file mode 100755
index 0000000..33fef8d
--- /dev/null
+++ b/Lib/dos_8x3/test_bui.py
@@ -0,0 +1,13 @@
+# Python test set -- part 4, built-in functions
+
+from test_support import *
+
+print '4. Built-in functions'
+
+print 'test_b1'
+unload('test_b1')
+import test_b1
+
+print 'test_b2'
+unload('test_b2')
+import test_b2
diff --git a/Lib/dos_8x3/test_exc.py b/Lib/dos_8x3/test_exc.py
new file mode 100755
index 0000000..4fbee3e
--- /dev/null
+++ b/Lib/dos_8x3/test_exc.py
@@ -0,0 +1,92 @@
+# Python test set -- part 5, built-in exceptions
+
+from test_support import *
+
+print '5. Built-in exceptions'
+# XXX This is not really enough, each *operation* should be tested!
+
+def r(name): print name
+
+r(AttributeError)
+import sys
+try: x = sys.undefined_attribute
+except AttributeError: pass
+
+r(EOFError)
+import sys
+fp = open(TESTFN, 'w')
+fp.close()
+fp = open(TESTFN, 'r')
+savestdin = sys.stdin
+try:
+	try:
+		sys.stdin = fp
+		x = raw_input()
+	except EOFError:
+		pass
+finally:
+	sys.stdin = savestdin
+	fp.close()
+
+r(IOError)
+try: open('this file does not exist', 'r')
+except IOError: pass
+
+r(ImportError)
+try: import undefined_module
+except ImportError: pass
+
+r(IndexError)
+x = []
+try: a = x[10]
+except IndexError: pass
+
+r(KeyError)
+x = {}
+try: a = x['key']
+except KeyError: pass
+
+r(KeyboardInterrupt)
+print '(not testable in a script)'
+
+r(MemoryError)
+print '(not safe to test)'
+
+r(NameError)
+try: x = undefined_variable
+except NameError: pass
+
+r(OverflowError)
+x = 1
+try:
+	while 1: x = x+x
+except OverflowError: pass
+
+r(RuntimeError)
+print '(not used any more?)'
+
+r(SyntaxError)
+try: exec '/\n'
+except SyntaxError: pass
+
+r(SystemError)
+print '(hard to reproduce)'
+
+r(SystemExit)
+import sys
+try: sys.exit(0)
+except SystemExit: pass
+
+r(TypeError)
+try: [] + ()
+except TypeError: pass
+
+r(ValueError)
+try: x = chr(10000)
+except ValueError: pass
+
+r(ZeroDivisionError)
+try: x = 1/0
+except ZeroDivisionError: pass
+
+unlink(TESTFN)
diff --git a/Lib/dos_8x3/test_gra.py b/Lib/dos_8x3/test_gra.py
new file mode 100755
index 0000000..b9607ac
--- /dev/null
+++ b/Lib/dos_8x3/test_gra.py
@@ -0,0 +1,513 @@
+# Python test set -- part 1, grammar.
+# This just tests whether the parser accepts them all.
+
+from test_support import *
+
+print '1. Parser'
+
+print '1.1 Tokens'
+
+print '1.1.1 Backslashes'
+
+# Backslash means line continuation:
+x = 1 \
++ 1
+if x <> 2: raise TestFailed, 'backslash for line continuation'
+
+# Backslash does not means continuation in comments :\
+x = 0
+if x <> 0: raise TestFailed, 'backslash ending comment'
+
+print '1.1.2 Numeric literals'
+
+print '1.1.2.1 Plain integers'
+if 0xff <> 255: raise TestFailed, 'hex int'
+if 0377 <> 255: raise TestFailed, 'octal int'
+if  2147483647   != 017777777777: raise TestFailed, 'large positive int'
+try:
+	from sys import maxint
+except ImportError:
+	maxint = 2147483647
+if maxint == 2147483647:
+	if -2147483647-1 != 020000000000: raise TestFailed, 'max negative int'
+	# XXX -2147483648
+	if 037777777777 != -1: raise TestFailed, 'oct -1'
+	if 0xffffffff != -1: raise TestFailed, 'hex -1'
+	for s in '2147483648', '040000000000', '0x100000000':
+		try:
+			x = eval(s)
+		except OverflowError:
+			continue
+##		raise TestFailed, \
+		print \
+			  'No OverflowError on huge integer literal ' + `s`
+elif eval('maxint == 9223372036854775807'):
+	if eval('-9223372036854775807-1 != 01000000000000000000000'):
+		raise TestFailed, 'max negative int'
+	if eval('01777777777777777777777') != -1: raise TestFailed, 'oct -1'
+	if eval('0xffffffffffffffff') != -1: raise TestFailed, 'hex -1'
+	for s in '9223372036854775808', '02000000000000000000000', \
+		 '0x10000000000000000':
+		try:
+			x = eval(s)
+		except OverflowError:
+			continue
+		raise TestFailed, \
+			  'No OverflowError on huge integer literal ' + `s`
+else:
+	print 'Weird maxint value', maxint
+
+print '1.1.2.2 Long integers'
+x = 0L
+x = 0l
+x = 0xffffffffffffffffL
+x = 0xffffffffffffffffl
+x = 077777777777777777L
+x = 077777777777777777l
+x = 123456789012345678901234567890L
+x = 123456789012345678901234567890l
+
+print '1.1.2.3 Floating point'
+x = 3.14
+x = 314.
+x = 0.314
+# XXX x = 000.314
+x = .314
+x = 3e14
+x = 3E14
+x = 3e-14
+x = 3e+14
+x = 3.e14
+x = .3e14
+x = 3.1e4
+
+print '1.1.3 String literals'
+
+def assert(s):
+	if not s: raise TestFailed, 'see traceback'
+
+x = ''; y = ""; assert(len(x) == 0 and x == y)
+x = '\''; y = "'"; assert(len(x) == 1 and x == y and ord(x) == 39)
+x = '"'; y = "\""; assert(len(x) == 1 and x == y and ord(x) == 34)
+x = "doesn't \"shrink\" does it"
+y = 'doesn\'t "shrink" does it'
+assert(len(x) == 24 and x == y)
+x = "does \"shrink\" doesn't it"
+y = 'does "shrink" doesn\'t it'
+assert(len(x) == 24 and x == y)
+x = """
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+"""
+y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
+assert(x == y)
+y = '''
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+'''; assert(x == y)
+y = "\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the 'lazy' dog.\n\
+"; assert(x == y)
+y = '\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the \'lazy\' dog.\n\
+'; assert(x == y)
+
+
+print '1.2 Grammar'
+
+print 'single_input' # NEWLINE | simple_stmt | compound_stmt NEWLINE
+# XXX can't test in a script -- this rule is only used when interactive
+
+print 'file_input' # (NEWLINE | stmt)* ENDMARKER
+# Being tested as this very moment this very module
+
+print 'expr_input' # testlist NEWLINE
+# XXX Hard to test -- used only in calls to input()
+
+print 'eval_input' # testlist ENDMARKER
+x = eval('1, 0 or 1')
+
+print 'funcdef'
+### 'def' NAME parameters ':' suite
+### parameters: '(' [varargslist] ')'
+### varargslist: (fpdef ['=' test] ',')* '*' NAME
+###            | fpdef ['=' test] (',' fpdef ['=' test])* [',']
+### fpdef: NAME | '(' fplist ')'
+### fplist: fpdef (',' fpdef)* [',']
+def f1(): pass
+def f2(one_argument): pass
+def f3(two, arguments): pass
+def f4(two, (compound, (argument, list))): pass
+def a1(one_arg,): pass
+def a2(two, args,): pass
+def v0(*rest): pass
+def v1(a, *rest): pass
+def v2(a, b, *rest): pass
+def v3(a, (b, c), *rest): pass
+def d01(a=1): pass
+d01()
+d01(1)
+def d11(a, b=1): pass
+d11(1)
+d11(1, 2)
+def d21(a, b, c=1): pass
+d21(1, 2)
+d21(1, 2, 3)
+def d02(a=1, b=2): pass
+d02()
+d02(1)
+d02(1, 2)
+def d12(a, b=1, c=2): pass
+d12(1)
+d12(1, 2)
+d12(1, 2, 3)
+def d22(a, b, c=1, d=2): pass
+d22(1, 2)
+d22(1, 2, 3)
+d22(1, 2, 3, 4)
+def d01v(a=1, *rest): pass
+d01v()
+d01v(1)
+d01v(1, 2)
+def d11v(a, b=1, *rest): pass
+d11v(1)
+d11v(1, 2)
+d11v(1, 2, 3)
+def d21v(a, b, c=1, *rest): pass
+d21v(1, 2)
+d21v(1, 2, 3)
+d21v(1, 2, 3, 4)
+def d02v(a=1, b=2, *rest): pass
+d02v()
+d02v(1)
+d02v(1, 2)
+d02v(1, 2, 3)
+def d12v(a, b=1, c=2, *rest): pass
+d12v(1)
+d12v(1, 2)
+d12v(1, 2, 3)
+d12v(1, 2, 3, 4)
+def d22v(a, b, c=1, d=2, *rest): pass
+d22v(1, 2)
+d22v(1, 2, 3)
+d22v(1, 2, 3, 4)
+d22v(1, 2, 3, 4, 5)
+
+### stmt: simple_stmt | compound_stmt
+# Tested below
+
+### simple_stmt: small_stmt (';' small_stmt)* [';']
+print 'simple_stmt'
+x = 1; pass; del x
+
+### small_stmt: expr_stmt | print_stmt  | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
+# Tested below
+
+print 'expr_stmt' # (exprlist '=')* exprlist
+1
+1, 2, 3
+x = 1
+x = 1, 2, 3
+x = y = z = 1, 2, 3
+x, y, z = 1, 2, 3
+abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
+# NB these variables are deleted below
+
+print 'print_stmt' # 'print' (test ',')* [test]
+print 1, 2, 3
+print 1, 2, 3,
+print
+print 0 or 1, 0 or 1,
+print 0 or 1
+
+print 'del_stmt' # 'del' exprlist
+del abc
+del x, y, (z, xyz)
+
+print 'pass_stmt' # 'pass'
+pass
+
+print 'flow_stmt' # break_stmt | continue_stmt | return_stmt | raise_stmt
+# Tested below
+
+print 'break_stmt' # 'break'
+while 1: break
+
+print 'continue_stmt' # 'continue'
+i = 1
+while i: i = 0; continue
+
+print 'return_stmt' # 'return' [testlist]
+def g1(): return
+def g2(): return 1
+g1()
+x = g2()
+
+print 'raise_stmt' # 'raise' test [',' test]
+try: raise RuntimeError, 'just testing'
+except RuntimeError: pass
+try: raise KeyboardInterrupt
+except KeyboardInterrupt: pass
+
+print 'import_stmt' # 'import' NAME (',' NAME)* | 'from' NAME 'import' ('*' | NAME (',' NAME)*)
+import sys
+import time, sys
+from time import time
+from sys import *
+from sys import path, argv
+
+print 'global_stmt' # 'global' NAME (',' NAME)*
+def f():
+	global a
+	global a, b
+	global one, two, three, four, five, six, seven, eight, nine, ten
+
+print 'exec_stmt' # 'exec' expr ['in' expr [',' expr]]
+def f():
+	z = None
+	del z
+	exec 'z=1+1\n'
+	if z <> 2: raise TestFailed, 'exec \'z=1+1\'\\n'
+	del z
+	exec 'z=1+1'
+	if z <> 2: raise TestFailed, 'exec \'z=1+1\''
+f()
+g = {}
+exec 'z = 1' in g
+if g.has_key('__builtins__'): del g['__builtins__']
+if g <> {'z': 1}: raise TestFailed, 'exec \'z = 1\' in g'
+g = {}
+l = {}
+exec 'global a; a = 1; b = 2' in g, l
+if g.has_key('__builtins__'): del g['__builtins__']
+if l.has_key('__builtins__'): del l['__builtins__']
+if (g, l) <> ({'a':1}, {'b':2}): raise TestFailed, 'exec ... in g, l'
+
+
+### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
+# Tested below
+
+print 'if_stmt' # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+if 1: pass
+if 1: pass
+else: pass
+if 0: pass
+elif 0: pass
+if 0: pass
+elif 0: pass
+elif 0: pass
+elif 0: pass
+else: pass
+
+print 'while_stmt' # 'while' test ':' suite ['else' ':' suite]
+while 0: pass
+while 0: pass
+else: pass
+
+print 'for_stmt' # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
+for i in 1, 2, 3: pass
+for i, j, k in (): pass
+else: pass
+class Squares:
+	def __init__(self, max):
+		self.max = max
+		self.sofar = []
+	def __len__(self): return len(self.sofar)
+	def __getitem__(self, i):
+		if not 0 <= i < self.max: raise IndexError
+		n = len(self.sofar)
+		while n <= i:
+			self.sofar.append(n*n)
+			n = n+1
+		return self.sofar[i]
+n = 0
+for x in Squares(10): n = n+x
+if n != 285: raise TestFailed, 'for over growing sequence'
+
+print 'try_stmt'
+### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
+###         | 'try' ':' suite 'finally' ':' suite
+### except_clause: 'except' [expr [',' expr]]
+try:
+	1/0
+except ZeroDivisionError:
+	pass
+else:
+	pass
+try: 1/0
+except EOFError: pass
+except TypeError, msg: pass
+except RuntimeError, msg: pass
+except: pass
+else: pass
+try: 1/0
+except (EOFError, TypeError, ZeroDivisionError): pass
+try: 1/0
+except (EOFError, TypeError, ZeroDivisionError), msg: pass
+try: pass
+finally: pass
+
+print 'suite' # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
+if 1: pass
+if 1:
+	pass
+if 1:
+	#
+	#
+	#
+	pass
+	pass
+	#
+	pass
+	#
+
+print 'test'
+### and_test ('or' and_test)*
+### and_test: not_test ('and' not_test)*
+### not_test: 'not' not_test | comparison
+if not 1: pass
+if 1 and 1: pass
+if 1 or 1: pass
+if not not not 1: pass
+if not 1 and 1 and 1: pass
+if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
+
+print 'comparison'
+### comparison: expr (comp_op expr)*
+### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+if 1: pass
+x = (1 == 1)
+if 1 == 1: pass
+if 1 != 1: pass
+if 1 <> 1: pass
+if 1 < 1: pass
+if 1 > 1: pass
+if 1 <= 1: pass
+if 1 >= 1: pass
+if 1 is 1: pass
+if 1 is not 1: pass
+if 1 in (): pass
+if 1 not in (): pass
+if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
+
+print 'binary mask ops'
+x = 1 & 1
+x = 1 ^ 1
+x = 1 | 1
+
+print 'shift ops'
+x = 1 << 1
+x = 1 >> 1
+x = 1 << 1 >> 1
+
+print 'additive ops'
+x = 1
+x = 1 + 1
+x = 1 - 1 - 1
+x = 1 - 1 + 1 - 1 + 1
+
+print 'multiplicative ops'
+x = 1 * 1
+x = 1 / 1
+x = 1 % 1
+x = 1 / 1 * 1 % 1
+
+print 'unary ops'
+x = +1
+x = -1
+x = ~1
+x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
+x = -1*1/1 + 1*1 - ---1*1
+
+print 'selectors'
+### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
+### subscript: expr | [expr] ':' [expr]
+f1()
+f2(1)
+f2(1,)
+f3(1, 2)
+f3(1, 2,)
+f4(1, (2, (3, 4)))
+v0()
+v0(1)
+v0(1,)
+v0(1,2)
+v0(1,2,3,4,5,6,7,8,9,0)
+v1(1)
+v1(1,)
+v1(1,2)
+v1(1,2,3)
+v1(1,2,3,4,5,6,7,8,9,0)
+v2(1,2)
+v2(1,2,3)
+v2(1,2,3,4)
+v2(1,2,3,4,5,6,7,8,9,0)
+v3(1,(2,3))
+v3(1,(2,3),4)
+v3(1,(2,3),4,5,6,7,8,9,0)
+import sys, time
+c = sys.path[0]
+x = time.time()
+x = sys.modules['time'].time()
+a = '01234'
+c = a[0]
+c = a[-1]
+s = a[0:5]
+s = a[:5]
+s = a[0:]
+s = a[:]
+s = a[-5:]
+s = a[:-1]
+s = a[-4:-3]
+
+print 'atoms'
+### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
+### dictmaker: test ':' test (',' test ':' test)* [',']
+
+x = (1)
+x = (1 or 2 or 3)
+x = (1 or 2 or 3, 2, 3)
+
+x = []
+x = [1]
+x = [1 or 2 or 3]
+x = [1 or 2 or 3, 2, 3]
+x = []
+
+x = {}
+x = {'one': 1}
+x = {'one': 1,}
+x = {'one' or 'two': 1 or 2}
+x = {'one': 1, 'two': 2}
+x = {'one': 1, 'two': 2,}
+x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
+
+x = `x`
+x = `1 or 2 or 3`
+x = x
+x = 'x'
+x = 123
+
+### exprlist: expr (',' expr)* [',']
+### testlist: test (',' test)* [',']
+# These have been exercised enough above
+
+print 'classdef' # 'class' NAME ['(' testlist ')'] ':' suite
+class B: pass
+class C1(B): pass
+class C2(B): pass
+class D(C1, C2, B): pass
+class C:
+	def meth1(self): pass
+	def meth2(self, arg): pass
+	def meth3(self, a1, a2): pass
diff --git a/Lib/dos_8x3/test_opc.py b/Lib/dos_8x3/test_opc.py
new file mode 100755
index 0000000..b1e944b
--- /dev/null
+++ b/Lib/dos_8x3/test_opc.py
@@ -0,0 +1,59 @@
+# Python test set -- part 2, opcodes
+
+from test_support import *
+
+
+print '2. Opcodes'
+print 'XXX Not yet fully implemented'
+
+print '2.1 try inside for loop'
+n = 0
+for i in range(10):
+	n = n+i
+	try: 1/0
+	except NameError: pass
+	except ZeroDivisionError: pass
+	except TypeError: pass
+	try: pass
+	except: pass
+	try: pass
+	finally: pass
+	n = n+i
+if n <> 90:
+	raise TestFailed, 'try inside for'
+
+
+print '2.2 raise class exceptions'
+
+class AClass: pass
+class BClass(AClass): pass
+class CClass: pass
+
+try: raise AClass()
+except: pass
+
+try: raise AClass()
+except AClass: pass
+
+try: raise BClass()
+except AClass: pass
+
+try: raise BClass()
+except CClass: raise TestFailed
+except: pass
+
+a = AClass()
+b = BClass()
+
+try: raise AClass, b
+except BClass, v: raise TestFailed
+except AClass, v:
+	if v != b: raise TestFailed
+
+
+try: raise b
+except AClass, v:
+	if v != b: raise TestFailed
+
+try:  raise BClass, a
+except TypeError: pass
diff --git a/Lib/dos_8x3/test_ope.py b/Lib/dos_8x3/test_ope.py
new file mode 100755
index 0000000..1a75065
--- /dev/null
+++ b/Lib/dos_8x3/test_ope.py
@@ -0,0 +1,5 @@
+# Python test set -- part 3, built-in operations.
+
+
+print '3. Operations'
+print 'XXX Not yet implemented'
diff --git a/Lib/dos_8x3/test_rgb.py b/Lib/dos_8x3/test_rgb.py
new file mode 100755
index 0000000..cdadc66
--- /dev/null
+++ b/Lib/dos_8x3/test_rgb.py
@@ -0,0 +1,49 @@
+# Testing rgbimg module
+
+import rgbimg, os
+
+error = 'test_rgbimg.error'
+
+print 'RGBimg test suite:'
+
+def findfile(file):
+	if os.path.isabs(file): return file
+	import sys
+	for dn in sys.path:
+		fn = os.path.join(dn, file)
+		if os.path.exists(fn): return fn
+	return file
+
+def testimg(rgb_file, raw_file):
+	rgb_file = findfile(rgb_file)
+	raw_file = findfile(raw_file)
+	width, height = rgbimg.sizeofimage(rgb_file)
+	rgb = rgbimg.longimagedata(rgb_file)
+	if len(rgb) != width * height * 4:
+		raise error, 'bad image length'
+	raw = open(raw_file, 'r').read()
+	if rgb != raw:
+		raise error, 'images don\'t match for '+rgb_file+' and '+raw_file
+	for depth in [1, 3, 4]:
+		rgbimg.longstoimage(rgb, width, height, depth, '@.rgb')
+	os.unlink('@.rgb')
+
+ttob = rgbimg.ttob(0)
+if ttob != 0:
+	raise error, 'ttob should start out as zero'
+
+testimg('test.rgb', 'test.rawimg')
+
+ttob = rgbimg.ttob(1)
+if ttob != 0:
+	raise error, 'ttob should be zero'
+
+testimg('test.rgb', 'test.rawimg.rev')
+
+ttob = rgbimg.ttob(0)
+if ttob != 1:
+	raise error, 'ttob should be one'
+
+ttob = rgbimg.ttob(0)
+if ttob != 0:
+	raise error, 'ttob should be zero'
diff --git a/Lib/dos_8x3/test_sel.py b/Lib/dos_8x3/test_sel.py
new file mode 100755
index 0000000..f185308
--- /dev/null
+++ b/Lib/dos_8x3/test_sel.py
@@ -0,0 +1,23 @@
+# Testing select module
+
+def test():
+	import select
+	import os
+	cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do date; sleep 3; done'
+	p = os.popen(cmd, 'r')
+	for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
+		print 'timeout =', tout
+		rfd, wfd, xfd = select.select([p], [], [], tout)
+		print rfd, wfd, xfd
+		if (rfd, wfd, xfd) == ([], [], []):
+			continue
+		if (rfd, wfd, xfd) == ([p], [], []):
+			line = p.readline()
+			print `line`
+			if not line:
+				print 'EOF'
+				break
+			continue
+		print 'Heh?'
+
+test()
diff --git a/Lib/dos_8x3/test_sig.py b/Lib/dos_8x3/test_sig.py
new file mode 100755
index 0000000..bfcf517
--- /dev/null
+++ b/Lib/dos_8x3/test_sig.py
@@ -0,0 +1,50 @@
+# Test the signal module
+
+import signal
+import os
+
+
+pid = os.getpid()
+
+# Shell script that will send us asynchronous signals
+script = """
+(
+	set -x
+	sleep 2
+	kill -5 %(pid)d
+	sleep 2
+	kill -2 %(pid)d
+	sleep 2
+	kill -3 %(pid)d
+) &
+""" % vars()
+
+def handlerA(*args):
+	print "handlerA", args
+
+HandlerBCalled = "HandlerBCalled"	# Exception
+
+def handlerB(*args):
+	print "handlerB", args
+	raise HandlerBCalled, args
+
+signal.alarm(20)			# Entire test lasts at most 20 sec.
+signal.signal(5, handlerA)
+signal.signal(2, handlerB)
+signal.signal(3, signal.SIG_IGN)
+signal.signal(signal.SIGALRM, signal.default_int_handler)
+
+os.system(script)
+
+print "starting pause() loop..."
+
+try:
+	while 1:
+		print "call pause()..."
+		try:
+			signal.pause()
+			print "pause() returned"
+		except HandlerBCalled:
+			print "HandlerBCalled exception caught"
+except KeyboardInterrupt:
+	print "KeyboardInterrupt (assume the alarm() went off)"
diff --git a/Lib/dos_8x3/test_sup.py b/Lib/dos_8x3/test_sup.py
new file mode 100755
index 0000000..7a76664
--- /dev/null
+++ b/Lib/dos_8x3/test_sup.py
@@ -0,0 +1,41 @@
+# Python test set -- supporting definitions.
+
+TestFailed = 'test_support -- test failed'	# Exception
+
+def unload(name):
+	import sys
+	try:
+		del sys.modules[name]
+	except KeyError:
+		pass
+
+def forget(modname):
+	unload(modname)
+	import sys, os
+	for dirname in sys.path:
+		try:
+			os.unlink(os.path.join(dirname, modname + '.pyc'))
+		except os.error:
+			pass
+
+FUZZ = 1e-6
+
+def fcmp(x, y): # fuzzy comparison function
+	if type(x) == type(0.0) or type(y) == type(0.0):
+		try:
+			x, y = coerce(x, y)
+			fuzz = (abs(x) + abs(y)) * FUZZ
+			if abs(x-y) <= fuzz:
+				return 0
+		except:
+			pass
+	elif type(x) == type(y) and type(x) in (type(()), type([])):
+		for i in range(min(len(x), len(y))):
+			outcome = fcmp(x[i], y[i])
+			if outcome <> 0:
+				return outcome
+		return cmp(len(x), len(y))
+	return cmp(x, y)
+
+TESTFN = '@test' # Filename used for testing
+from os import unlink
diff --git a/Lib/dos_8x3/test_thr.py b/Lib/dos_8x3/test_thr.py
new file mode 100755
index 0000000..4e0eb70
--- /dev/null
+++ b/Lib/dos_8x3/test_thr.py
@@ -0,0 +1,106 @@
+# Very rudimentary test of thread module
+
+# Create a bunch of threads, let each do some work, wait until all are done
+
+import whrandom
+import thread
+import time
+
+mutex = thread.allocate_lock()
+whmutex = thread.allocate_lock() # for calls to whrandom
+running = 0
+done = thread.allocate_lock()
+done.acquire()
+
+numtasks = 10
+
+def task(ident):
+	global running
+	whmutex.acquire()
+	delay = whrandom.random() * numtasks
+	whmutex.release()
+	print 'task', ident, 'will run for', delay, 'sec'
+	time.sleep(delay)
+	print 'task', ident, 'done'
+	mutex.acquire()
+	running = running - 1
+	if running == 0:
+		done.release()
+	mutex.release()
+
+next_ident = 0
+def newtask():
+	global next_ident, running
+	mutex.acquire()
+	next_ident = next_ident + 1
+	print 'creating task', next_ident
+	thread.start_new_thread(task, (next_ident,))
+	running = running + 1
+	mutex.release()
+
+for i in range(numtasks):
+	newtask()
+
+print 'waiting for all tasks to complete'
+done.acquire()
+print 'all tasks done'
+
+class barrier:
+	def __init__(self, n):
+		self.n = n
+		self.waiting = 0
+		self.checkin  = thread.allocate_lock()
+		self.checkout = thread.allocate_lock()
+		self.checkout.acquire()
+
+	def enter(self):
+		checkin, checkout = self.checkin, self.checkout
+
+		checkin.acquire()
+		self.waiting = self.waiting + 1
+		if self.waiting == self.n:
+			self.waiting = self.n - 1
+			checkout.release()
+			return
+		checkin.release()
+
+		checkout.acquire()
+		self.waiting = self.waiting - 1
+		if self.waiting == 0:
+			checkin.release()
+			return
+		checkout.release()
+
+numtrips = 3
+def task2(ident):
+	global running
+	for i in range(numtrips):
+		if ident == 0:
+			# give it a good chance to enter the next
+			# barrier before the others are all out
+			# of the current one
+			delay = 0.001
+		else:
+			whmutex.acquire()
+			delay = whrandom.random() * numtasks
+			whmutex.release()
+		print 'task', ident, 'will run for', delay, 'sec'
+		time.sleep(delay)
+		print 'task', ident, 'entering barrier', i
+		bar.enter()
+		print 'task', ident, 'leaving barrier', i
+	mutex.acquire()
+	running = running - 1
+	if running == 0:
+		done.release()
+	mutex.release()
+
+print '\n*** Barrier Test ***'
+if done.acquire(0):
+	raise ValueError, "'done' should have remained acquired"
+bar = barrier(numtasks)
+running = numtasks
+for i in range(numtasks):
+	thread.start_new_thread(task2, (i,))
+done.acquire()
+print 'all tasks done'
diff --git a/Lib/dos_8x3/test_typ.py b/Lib/dos_8x3/test_typ.py
new file mode 100755
index 0000000..51c76dc
--- /dev/null
+++ b/Lib/dos_8x3/test_typ.py
@@ -0,0 +1,182 @@
+# Python test set -- part 6, built-in types
+
+from test_support import *
+
+print '6. Built-in types'
+
+print '6.1 Truth value testing'
+if None: raise TestFailed, 'None is true instead of false'
+if 0: raise TestFailed, '0 is true instead of false'
+if 0L: raise TestFailed, '0L is true instead of false'
+if 0.0: raise TestFailed, '0.0 is true instead of false'
+if '': raise TestFailed, '\'\' is true instead of false'
+if (): raise TestFailed, '() is true instead of false'
+if []: raise TestFailed, '[] is true instead of false'
+if {}: raise TestFailed, '{} is true instead of false'
+if not 1: raise TestFailed, '1 is false instead of true'
+if not 1L: raise TestFailed, '1L is false instead of true'
+if not 1.0: raise TestFailed, '1.0 is false instead of true'
+if not 'x': raise TestFailed, '\'x\' is false instead of true'
+if not (1, 1): raise TestFailed, '(1, 1) is false instead of true'
+if not [1]: raise TestFailed, '[1] is false instead of true'
+if not {'x': 1}: raise TestFailed, '{\'x\': 1} is false instead of true'
+def f(): pass
+class C: pass
+import sys
+x = C()
+if not f: raise TestFailed, 'f is false instead of true'
+if not C: raise TestFailed, 'C is false instead of true'
+if not sys: raise TestFailed, 'sys is false instead of true'
+if not x: raise TestFailed, 'x is false instead of true'
+
+print '6.2 Boolean operations'
+if 0 or 0: raise TestFailed, '0 or 0 is true instead of false'
+if 1 and 1: pass
+else: raise TestFailed, '1 and 1 is false instead of false'
+if not 1: raise TestFailed, 'not 1 is true instead of false'
+
+print '6.3 Comparisons'
+if 0 < 1 <= 1 == 1 >= 1 > 0 <> 1: pass
+else: raise TestFailed, 'int comparisons failed'
+if 0L < 1L <= 1L == 1L >= 1L > 0L <> 1L: pass
+else: raise TestFailed, 'long int comparisons failed'
+if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 <> 1.0: pass
+else: raise TestFailed, 'float comparisons failed'
+if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
+else: raise TestFailed, 'string comparisons failed'
+if 0 in [0] and 0 not in [1]: pass
+else: raise TestFailed, 'membership test failed'
+if None is None and [] is not []: pass
+else: raise TestFailed, 'identity test failed'
+
+print '6.4 Numeric types (mostly conversions)'
+if 0 <> 0L or 0 <> 0.0 or 0L <> 0.0: raise TestFailed, 'mixed comparisons'
+if 1 <> 1L or 1 <> 1.0 or 1L <> 1.0: raise TestFailed, 'mixed comparisons'
+if -1 <> -1L or -1 <> -1.0 or -1L <> -1.0:
+	raise TestFailed, 'int/long/float value not equal'
+if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
+else: raise TestFailed, 'int() does not round properly'
+if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
+else: raise TestFailed, 'long() does not round properly'
+if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
+else: raise TestFailed, 'float() does not work properly'
+print '6.4.1 32-bit integers'
+if 12 + 24 <> 36: raise TestFailed, 'int op'
+if 12 + (-24) <> -12: raise TestFailed, 'int op'
+if (-12) + 24 <> 12: raise TestFailed, 'int op'
+if (-12) + (-24) <> -36: raise TestFailed, 'int op'
+if not 12 < 24: raise TestFailed, 'int op'
+if not -24 < -12: raise TestFailed, 'int op'
+# Test for a particular bug in integer multiply
+xsize, ysize, zsize = 238, 356, 4
+if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
+	raise TestFailed, 'int mul commutativity'
+print '6.4.2 Long integers'
+if 12L + 24L <> 36L: raise TestFailed, 'long op'
+if 12L + (-24L) <> -12L: raise TestFailed, 'long op'
+if (-12L) + 24L <> 12L: raise TestFailed, 'long op'
+if (-12L) + (-24L) <> -36L: raise TestFailed, 'long op'
+if not 12L < 24L: raise TestFailed, 'long op'
+if not -24L < -12L: raise TestFailed, 'long op'
+print '6.4.3 Floating point numbers'
+if 12.0 + 24.0 <> 36.0: raise TestFailed, 'float op'
+if 12.0 + (-24.0) <> -12.0: raise TestFailed, 'float op'
+if (-12.0) + 24.0 <> 12.0: raise TestFailed, 'float op'
+if (-12.0) + (-24.0) <> -36.0: raise TestFailed, 'float op'
+if not 12.0 < 24.0: raise TestFailed, 'float op'
+if not -24.0 < -12.0: raise TestFailed, 'float op'
+
+print '6.5 Sequence types'
+
+print '6.5.1 Strings'
+if len('') <> 0: raise TestFailed, 'len(\'\')'
+if len('a') <> 1: raise TestFailed, 'len(\'a\')'
+if len('abcdef') <> 6: raise TestFailed, 'len(\'abcdef\')'
+if 'xyz' + 'abcde' <> 'xyzabcde': raise TestFailed, 'string concatenation'
+if 'xyz'*3 <> 'xyzxyzxyz': raise TestFailed, 'string repetition *3'
+if 0*'abcde' <> '': raise TestFailed, 'string repetition 0*'
+if min('abc') <> 'a' or max('abc') <> 'c': raise TestFailed, 'min/max string'
+if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
+else: raise TestFailed, 'in/not in string'
+x = 'x'*103
+if '%s!'%x != x+'!': raise TestFailed, 'nasty string formatting bug'
+
+print '6.5.2 Tuples'
+if len(()) <> 0: raise TestFailed, 'len(())'
+if len((1,)) <> 1: raise TestFailed, 'len((1,))'
+if len((1,2,3,4,5,6)) <> 6: raise TestFailed, 'len((1,2,3,4,5,6))'
+if (1,2)+(3,4) <> (1,2,3,4): raise TestFailed, 'tuple concatenation'
+if (1,2)*3 <> (1,2,1,2,1,2): raise TestFailed, 'tuple repetition *3'
+if 0*(1,2,3) <> (): raise TestFailed, 'tuple repetition 0*'
+if min((1,2)) <> 1 or max((1,2)) <> 2: raise TestFailed, 'min/max tuple'
+if 0 in (0,1,2) and 1 in (0,1,2) and 2 in (0,1,2) and 3 not in (0,1,2): pass
+else: raise TestFailed, 'in/not in tuple'
+
+print '6.5.3 Lists'
+if len([]) <> 0: raise TestFailed, 'len([])'
+if len([1,]) <> 1: raise TestFailed, 'len([1,])'
+if len([1,2,3,4,5,6]) <> 6: raise TestFailed, 'len([1,2,3,4,5,6])'
+if [1,2]+[3,4] <> [1,2,3,4]: raise TestFailed, 'list concatenation'
+if [1,2]*3 <> [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3'
+if 0*[1,2,3] <> []: raise TestFailed, 'list repetition 0*'
+if min([1,2]) <> 1 or max([1,2]) <> 2: raise TestFailed, 'min/max list'
+if 0 in [0,1,2] and 1 in [0,1,2] and 2 in [0,1,2] and 3 not in [0,1,2]: pass
+else: raise TestFailed, 'in/not in list'
+
+print '6.5.3a Additional list operations'
+a = [0,1,2,3,4]
+a[0] = 5
+a[1] = 6
+a[2] = 7
+if a <> [5,6,7,3,4]: raise TestFailed, 'list item assignment [0], [1], [2]'
+a[-2] = 8
+a[-1] = 9
+if a <> [5,6,7,8,9]: raise TestFailed, 'list item assignment [-2], [-1]'
+a[:2] = [0,4]
+a[-3:] = []
+a[1:1] = [1,2,3]
+if a <> [0,1,2,3,4]: raise TestFailed, 'list slice assignment'
+del a[1:4]
+if a <> [0,4]: raise TestFailed, 'list slice deletion'
+del a[0]
+if a <> [4]: raise TestFailed, 'list item deletion [0]'
+del a[-1]
+if a <> []: raise TestFailed, 'list item deletion [-1]'
+a.append(0)
+a.append(1)
+a.append(2)
+if a <> [0,1,2]: raise TestFailed, 'list append'
+a.insert(0, -2)
+a.insert(1, -1)
+a.insert(2,0)
+if a <> [-2,-1,0,0,1,2]: raise TestFailed, 'list insert'
+if a.count(0) <> 2: raise TestFailed, ' list count'
+if a.index(0) <> 2: raise TestFailed, 'list index'
+a.remove(0)
+if a <> [-2,-1,0,1,2]: raise TestFailed, 'list remove'
+a.reverse()
+if a <> [2,1,0,-1,-2]: raise TestFailed, 'list reverse'
+a.sort()
+if a <> [-2,-1,0,1,2]: raise TestFailed, 'list sort'
+def revcmp(a, b): return cmp(b, a)
+a.sort(revcmp)
+if a <> [2,1,0,-1,-2]: raise TestFailed, 'list sort with cmp func'
+
+print '6.6 Mappings == Dictionaries'
+d = {}
+if d.keys() <> []: raise TestFailed, '{}.keys()'
+if d.has_key('a') <> 0: raise TestFailed, '{}.has_key(\'a\')'
+if len(d) <> 0: raise TestFailed, 'len({})'
+d = {'a': 1, 'b': 2}
+if len(d) <> 2: raise TestFailed, 'len(dict)'
+k = d.keys()
+k.sort()
+if k <> ['a', 'b']: raise TestFailed, 'dict keys()'
+if d.has_key('a') and d.has_key('b') and not d.has_key('c'): pass
+else: raise TestFailed, 'dict keys()'
+if d['a'] <> 1 or d['b'] <> 2: raise TestFailed, 'dict item'
+d['c'] = 3
+d['a'] = 4
+if d['c'] <> 3 or d['a'] <> 4: raise TestFailed, 'dict item assignment'
+del d['b']
+if d <> {'a': 4, 'c': 3}: raise TestFailed, 'dict item deletion'
diff --git a/Lib/dos_8x3/tracebac.py b/Lib/dos_8x3/tracebac.py
new file mode 100755
index 0000000..8d2dfdc
--- /dev/null
+++ b/Lib/dos_8x3/tracebac.py
@@ -0,0 +1,125 @@
+# Format and print Python stack traces
+
+import linecache
+import string
+import sys
+import types
+
+def _print(file, str='', terminator='\n'):
+	file.write(str+terminator)
+	
+
+def print_tb(tb, limit=None, file=None):
+	if not file:
+		file = sys.stderr
+	if limit is None:
+		if hasattr(sys, 'tracebacklimit'):
+			limit = sys.tracebacklimit
+	n = 0
+	while tb is not None and (limit is None or n < limit):
+		f = tb.tb_frame
+		lineno = tb.tb_lineno
+		co = f.f_code
+		filename = co.co_filename
+		name = co.co_name
+		_print(file,
+		       '  File "%s", line %d, in %s' % (filename,lineno,name))
+		line = linecache.getline(filename, lineno)
+		if line: _print(file, '    ' + string.strip(line))
+		tb = tb.tb_next
+		n = n+1
+
+def format_tb(tb, limit = None):
+	list = []
+	for filename, lineno, name, line in extract_tb(tb, limit):
+		item = '  File "%s", line %d, in %s\n' % (filename,lineno,name)
+		if line:
+			item = item + '    %s\n' % string.strip(line)
+		list.append(item)
+	return list
+
+def extract_tb(tb, limit = None):
+	if limit is None:
+		if hasattr(sys, 'tracebacklimit'):
+			limit = sys.tracebacklimit
+	list = []
+	n = 0
+	while tb is not None and (limit is None or n < limit):
+		f = tb.tb_frame
+		lineno = tb.tb_lineno
+		co = f.f_code
+		filename = co.co_filename
+		name = co.co_name
+		line = linecache.getline(filename, lineno)
+		if line: line = string.strip(line)
+		else: line = None
+		list.append(filename, lineno, name, line)
+		tb = tb.tb_next
+		n = n+1
+	return list
+
+
+def print_exception(etype, value, tb, limit=None, file=None):
+	if not file:
+		file = sys.stderr
+	if tb:
+		_print(file, 'Traceback (innermost last):')
+		print_tb(tb, limit, file)
+	lines = format_exception_only(etype, value)
+	for line in lines[:-1]:
+		_print(file, line, ' ')
+	_print(file, lines[-1], '')
+
+def format_exception(etype, value, tb, limit = None):
+	if tb:
+		list = ['Traceback (innermost last):\n']
+		list = list + format_tb(tb, limit)
+	list = list + format_exception_only(etype, value)
+	return list
+
+def format_exception_only(etype, value):
+	list = []
+	if type(etype) == types.ClassType:
+		stype = etype.__name__
+	else:
+		stype = etype
+	if value is None:
+		list.append(str(stype) + '\n')
+	else:
+		if etype is SyntaxError:
+			try:
+				msg, (filename, lineno, offset, line) = value
+			except:
+				pass
+			else:
+				if not filename: filename = "<string>"
+				list.append('  File "%s", line %d\n' %
+					    (filename, lineno))
+				i = 0
+				while i < len(line) and \
+				      line[i] in string.whitespace:
+					i = i+1
+				list.append('    %s\n' % string.strip(line))
+				s = '    '
+				for c in line[i:offset-1]:
+					if c in string.whitespace:
+						s = s + c
+					else:
+						s = s + ' '
+				list.append('%s^\n' % s)
+				value = msg
+		list.append('%s: %s\n' % (str(stype), str(value)))
+	return list
+
+
+def print_exc(limit=None, file=None):
+	if not file:
+		file = sys.stderr
+	print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback,
+			limit, file)
+
+def print_last(limit=None, file=None):
+	if not file:
+		file = sys.stderr
+	print_exception(sys.last_type, sys.last_value, sys.last_traceback,
+			limit, file)
diff --git a/Lib/dos_8x3/userdict.py b/Lib/dos_8x3/userdict.py
new file mode 100755
index 0000000..f6b2f82
--- /dev/null
+++ b/Lib/dos_8x3/userdict.py
@@ -0,0 +1,18 @@
+# A more or less complete user-defined wrapper around dictionary objects
+
+class UserDict:
+	def __init__(self): self.data = {}
+	def __repr__(self): return repr(self.data)
+	def __cmp__(self, dict):
+		if type(dict) == type(self.data):
+			return cmp(self.data, dict)
+		else:
+			return cmp(self.data, dict.data)
+	def __len__(self): return len(self.data)
+	def __getitem__(self, key): return self.data[key]
+	def __setitem__(self, key, item): self.data[key] = item
+	def __delitem__(self, key): del self.data[key]
+	def keys(self): return self.data.keys()
+	def items(self): return self.data.items()
+	def values(self): return self.data.values()
+	def has_key(self, key): return self.data.has_key(key)
diff --git a/Lib/dos_8x3/userlist.py b/Lib/dos_8x3/userlist.py
new file mode 100755
index 0000000..1f19ad9
--- /dev/null
+++ b/Lib/dos_8x3/userlist.py
@@ -0,0 +1,50 @@
+# A more or less complete user-defined wrapper around list objects
+
+class UserList:
+	def __init__(self, list = None):
+		self.data = []
+		if list is not None:
+			if type(list) == type(self.data):
+				self.data[:] = list
+			else:
+				self.data[:] = list.data[:]
+	def __repr__(self): return repr(self.data)
+	def __cmp__(self, list):
+		if type(list) == type(self.data):
+			return cmp(self.data, list)
+		else:
+			return cmp(self.data, list.data)
+	def __len__(self): return len(self.data)
+	def __getitem__(self, i): return self.data[i]
+	def __setitem__(self, i, item): self.data[i] = item
+	def __delitem__(self, i): del self.data[i]
+	def __getslice__(self, i, j):
+		userlist = UserList()
+		userlist.data[:] = self.data[i:j]
+		return userlist
+	def __setslice__(self, i, j, list):
+		if type(list) == type(self.data):
+			self.data[i:j] = list
+		else:
+			self.data[i:j] = list.data
+	def __delslice__(self, i, j): del self.data[i:j]
+	def __add__(self, list):
+		if type(list) == type(self.data):
+			return self.__class__(self.data + list)
+		else:
+			return self.__class__(self.data + list.data)
+	def __radd__(self, list):
+		if type(list) == type(self.data):
+			return self.__class__(list + self.data)
+		else:
+			return self.__class__(list.data + self.data)
+	def __mul__(self, n):
+		return self.__class__(self.data*n)
+	__rmul__ = __mul__
+	def append(self, item): self.data.append(item)
+	def insert(self, i, item): self.data.insert(i, item)
+	def remove(self, item): self.data.remove(item)
+	def count(self, item): return self.data.count(item)
+	def index(self, item): return self.data.index(item)
+	def reverse(self): self.data.reverse()
+	def sort(self, *args): apply(self.data.sort, args)
diff --git a/Lib/dos_8x3/whatsoun.py b/Lib/dos_8x3/whatsoun.py
new file mode 100755
index 0000000..b7b349c
--- /dev/null
+++ b/Lib/dos_8x3/whatsoun.py
@@ -0,0 +1,270 @@
+# This module contains several routines that help recognizing sound
+# files.
+#
+# Function whathdr() recognizes various types of sound file headers.
+# It understands almost all headers that SOX can decode.
+#
+# The return tuple contains the following items, in this order:
+# - file type (as SOX understands it)
+# - sampling rate (0 if unknown or hard to decode)
+# - number of channels (0 if unknown or hard to decode)
+# - number of frames in the file (-1 if unknown or hard to decode)
+# - number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
+#
+# If the file doesn't have a recognizable type, it returns None.
+# If the file can't be opened, IOError is raised.
+#
+# To compute the total time, divide the number of frames by the
+# sampling rate (a frame contains a sample for each channel).
+#
+# Function whatraw() calls the "whatsound" program and interprets its
+# output.  You'll have to guess the sampling rate by listening though!
+#
+# Function what() calls whathdr() and if it doesn't recognize the file
+# then calls whatraw().
+#
+# Finally, the function test() is a simple main program that calls
+# what() for all files mentioned on the argument list.  For directory
+# arguments it calls what() for all files in that directory.  Default
+# argument is "." (testing all files in the current directory).  The
+# option -r tells it to recurse down directories found inside
+# explicitly given directories.
+#
+# The file structure is top-down except that the test program and its
+# subroutine come last.
+
+
+#------------------------------------------------------#
+# Guess the type of any sound file, raw or with header #
+#------------------------------------------------------#
+
+def what(filename):
+	res = whathdr(filename)
+	if not res:
+		res = whatraw(filename)
+	return res
+
+
+#-----------------------------#
+# Guess the type of raw sound #
+#-----------------------------#
+
+def whatraw(filename):
+	# Assume it's always 1 channel, byte-sized samples
+	# Don't assume anything about the rate
+	import os
+	from stat import ST_SIZE
+	# XXX "whatsound" should be part of the distribution somehow...
+	cmd = 'whatsound ' + filename + ' 2>/dev/null'
+	cmd = 'PATH=$PATH:/ufs/guido/bin/sgi\n' + cmd
+	pipe = os.popen(cmd, 'r')
+	data = pipe.read()
+	sts = pipe.close()
+	if sts:
+		return None
+	if data[:13] == '-t raw -b -s ':
+		type = 'sb'
+		sample_size = 8
+	elif data[:13] == '-t raw -b -u ':
+		type = 'ub'
+		sample_size = 8
+	elif data[:13] == '-t raw -b -U ':
+		type = 'ul'
+		sample_size = 'U'
+	else:
+		return None
+	try:
+		frame_count = os.stat(filename)[ST_SIZE]
+	except IOError:
+		frame_count = -1
+	return type, 0, 1, frame_count, sample_size
+
+
+#-------------------------#
+# Recognize sound headers #
+#-------------------------#
+
+def whathdr(filename):
+	f = open(filename, 'r')
+	h = f.read(512)
+	for tf in tests:
+		res = tf(h, f)
+		if res:
+			return res
+	return None
+
+
+#-----------------------------------#
+# Subroutines per sound header type #
+#-----------------------------------#
+
+tests = []
+
+def test_aifc(h, f):
+	import aifc
+	if h[:4] <> 'FORM':
+		return None
+	if h[8:12] == 'AIFC':
+		fmt = 'aifc'
+	elif h[8:12] == 'AIFF':
+		fmt = 'aiff'
+	else:
+		return None
+	f.seek(0)
+	try:
+		a = aifc.openfp(f, 'r')
+	except (EOFError, aifc.Error):
+		return None
+	return (fmt, a.getframerate(), a.getnchannels(), \
+		a.getnframes(), 8*a.getsampwidth())
+
+tests.append(test_aifc)
+
+
+def test_au(h, f):
+	if h[:4] == '.snd':
+		f = get_long_be
+	elif h[:4] in ('\0ds.', 'dns.'):
+		f = get_long_le
+	else:
+		return None
+	type = 'au'
+	hdr_size = f(h[4:8])
+	data_size = f(h[8:12])
+	encoding = f(h[12:16])
+	rate = f(h[16:20])
+	nchannels = f(h[20:24])
+	sample_size = 1 # default
+	if encoding == 1:
+		sample_bits = 'U'
+	elif encoding == 2:
+		sample_bits = 8
+	elif encoding == 3:
+		sample_bits = 16
+		sample_size = 2
+	else:
+		sample_bits = '?'
+	frame_size = sample_size * nchannels
+	return type, rate, nchannels, data_size/frame_size, sample_bits
+
+tests.append(test_au)
+
+
+def test_hcom(h, f):
+	if h[65:69] <> 'FSSD' or h[128:132] <> 'HCOM':
+		return None
+	divisor = get_long_be(h[128+16:128+20])
+	return 'hcom', 22050/divisor, 1, -1, 8
+
+tests.append(test_hcom)
+
+
+def test_voc(h, f):
+	if h[:20] <> 'Creative Voice File\032':
+		return None
+	sbseek = get_short_le(h[20:22])
+	rate = 0
+	if 0 <= sbseek < 500 and h[sbseek] == '\1':
+		ratecode = ord(h[sbseek+4])
+		rate = int(1000000.0 / (256 - ratecode))
+	return 'voc', rate, 1, -1, 8
+
+tests.append(test_voc)
+
+
+def test_wav(h, f):
+	# 'RIFF' <len> 'WAVE' 'fmt ' <len>
+	if h[:4] <> 'RIFF' or h[8:12] <> 'WAVE' or h[12:16] <> 'fmt ':
+		return None
+	style = get_short_le(h[20:22])
+	nchannels = get_short_le(h[22:24])
+	rate = get_long_le(h[24:28])
+	sample_bits = get_short_le(h[34:36])
+	return 'wav', rate, nchannels, -1, sample_bits
+
+tests.append(test_wav)
+
+
+def test_8svx(h, f):
+	if h[:4] <> 'FORM' or h[8:12] <> '8SVX':
+		return None
+	# Should decode it to get #channels -- assume always 1
+	return '8svx', 0, 1, 0, 8
+
+tests.append(test_8svx)
+
+
+def test_sndt(h, f):
+	if h[:5] == 'SOUND':
+		nsamples = get_long_le(h[8:12])
+		rate = get_short_le(h[20:22])
+		return 'sndt', rate, 1, nsamples, 8
+
+tests.append(test_sndt)
+
+
+def test_sndr(h, f):
+	if h[:2] == '\0\0':
+		rate = get_short_le(h[2:4])
+		if 4000 <= rate <= 25000:
+			return 'sndr', rate, 1, -1, 8
+
+tests.append(test_sndr)
+
+
+#---------------------------------------------#
+# Subroutines to extract numbers from strings #
+#---------------------------------------------#
+
+def get_long_be(s):
+	return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3])
+
+def get_long_le(s):
+	return (ord(s[3])<<24) | (ord(s[2])<<16) | (ord(s[1])<<8) | ord(s[0])
+
+def get_short_be(s):
+	return (ord(s[0])<<8) | ord(s[1])
+
+def get_short_le(s):
+	return (ord(s[1])<<8) | ord(s[0])
+
+
+#--------------------#
+# Small test program #
+#--------------------#
+
+def test():
+	import sys
+	recursive = 0
+	if sys.argv[1:] and sys.argv[1] == '-r':
+		del sys.argv[1:2]
+		recursive = 1
+	try:
+		if sys.argv[1:]:
+			testall(sys.argv[1:], recursive, 1)
+		else:
+			testall(['.'], recursive, 1)
+	except KeyboardInterrupt:
+		sys.stderr.write('\n[Interrupted]\n')
+		sys.exit(1)
+
+def testall(list, recursive, toplevel):
+	import sys
+	import os
+	for filename in list:
+		if os.path.isdir(filename):
+			print filename + '/:',
+			if recursive or toplevel:
+				print 'recursing down:'
+				import glob
+				names = glob.glob(os.path.join(filename, '*'))
+				testall(names, recursive, 0)
+			else:
+				print '*** directory (use -r) ***'
+		else:
+			print filename + ':',
+			sys.stdout.flush()
+			try:
+				print what(filename)
+			except IOError:
+				print '*** not found ***'