Add next() and __iter__() methods to StreamReader, StreamReaderWriter
and StreamRecoder.

This closes SF bug #634246.
diff --git a/Lib/codecs.py b/Lib/codecs.py
index 40f0a2e..0b43a72 100644
--- a/Lib/codecs.py
+++ b/Lib/codecs.py
@@ -299,6 +299,17 @@
         """
         pass
 
+    def next(self):
+
+        """ Return the next decoded line from the input stream."""
+        line = self.readline()
+        if line:
+            return line
+        raise StopIteration
+
+    def __iter__(self):
+        return self
+
     def __getattr__(self, name,
                     getattr=getattr):
 
@@ -351,6 +362,14 @@
 
         return self.reader.readlines(sizehint)
 
+    def next(self):
+
+        """ Return the next decoded line from the input stream."""
+        return self.reader.next()
+
+    def __iter__(self):
+        return self
+
     def write(self, data):
 
         return self.writer.write(data)
@@ -451,6 +470,14 @@
         data, bytesencoded = self.encode(data, self.errors)
         return data.splitlines(1)
 
+    def next(self):
+
+        """ Return the next decoded line from the input stream."""
+        return self.reader.next()
+
+    def __iter__(self):
+        return self
+
     def write(self, data):
 
         data, bytesdecoded = self.decode(data, self.errors)
diff --git a/Misc/NEWS b/Misc/NEWS
index cccaff5..4fc52ea 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -362,6 +362,9 @@
 Library
 -------
 
+- StreamReader, StreamReaderWriter and StreamRecoder in the codecs
+  modules are iterators now.
+
 - gzip.py now handles files exceeding 2GB.  Files over 4GB also work
   now (provided the OS supports it, and Python is configured with large
   file support), but in that case the underlying gzip file format can