Whitespace standardization.
diff --git a/Lib/asyncore.py b/Lib/asyncore.py
index eaeb2cd..8b585d4 100644
--- a/Lib/asyncore.py
+++ b/Lib/asyncore.py
@@ -1,12 +1,12 @@
 # -*- Mode: Python -*-
-#   Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp 
+#   Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
 #   Author: Sam Rushing <rushing@nightmare.com>
 
 # ======================================================================
 # Copyright 1996 by Sam Rushing
-# 
+#
 #                         All Rights Reserved
-# 
+#
 # Permission to use, copy, modify, and distribute this software and
 # its documentation for any purpose and without fee is hereby
 # granted, provided that the above copyright notice appear in all
@@ -15,7 +15,7 @@
 # Rushing not be used in advertising or publicity pertaining to
 # distribution of the software without specific, written prior
 # permission.
-# 
+#
 # SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
 # INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
 # NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
@@ -28,22 +28,22 @@
 """Basic infrastructure for asynchronous socket service clients and servers.
 
 There are only two ways to have a program on a single processor do "more
-than one thing at a time".  Multi-threaded programming is the simplest and 
+than one thing at a time".  Multi-threaded programming is the simplest and
 most popular way to do it, but there is another very different technique,
 that lets you have nearly all the advantages of multi-threading, without
 actually using multiple threads. it's really only practical if your program
 is largely I/O bound. If your program is CPU bound, then pre-emptive
 scheduled threads are probably what you really need. Network servers are
-rarely CPU-bound, however. 
+rarely CPU-bound, however.
 
-If your operating system supports the select() system call in its I/O 
+If your operating system supports the select() system call in its I/O
 library (and nearly all do), then you can use it to juggle multiple
 communication channels at once; doing other work while your I/O is taking
 place in the "background."  Although this strategy can seem strange and
 complex, especially at first, it is in many ways easier to understand and
 control than multi-threaded programming. The module documented here solves
 many of the difficult problems for you, making the task of building
-sophisticated high-performance network servers and clients a snap. 
+sophisticated high-performance network servers and clients a snap.
 """
 
 import exceptions
@@ -191,7 +191,7 @@
                 ar = repr(self.addr)
             except:
                 ar = 'no self.addr!'
-                
+
             return '<__repr__ (self) failed for object at %x (addr=%s)>' % (id(self),ar)
 
     def add_channel (self, map=None):
@@ -324,7 +324,7 @@
 
     # log and log_info maybe overriden to provide more sophisitcated
     # logging and warning methods. In general, log is for 'hit' logging
-    # and 'log_info' is for informational, warning and error logging. 
+    # and 'log_info' is for informational, warning and error logging.
 
     def log (self, message):
         sys.stderr.write ('log: %s\n' % str(message))
@@ -433,7 +433,7 @@
     while 1:
         tbinfo.append ((
             tb.tb_frame.f_code.co_filename,
-            tb.tb_frame.f_code.co_name,             
+            tb.tb_frame.f_code.co_name,
             str(tb.tb_lineno)
             ))
         tb = tb.tb_next