If a file is opened with an explicit buffer size >= 1, repeated
close() calls would attempt to free() the buffer already free()ed on
the first close().     [bug introduced with patch #788249]

Making sure that the buffer is free()ed in file object deallocation is
a belt-n-braces bit of insurance against a memory leak.
diff --git a/Lib/test/test_file.py b/Lib/test/test_file.py
index b8bcab7..22db9a2 100644
--- a/Lib/test/test_file.py
+++ b/Lib/test/test_file.py
@@ -109,6 +109,23 @@
 if not f.closed:
     raise TestFailed, 'file.closed should be true'
 
+# make sure that explicitly setting the buffer size doesn't cause
+# misbehaviour especially with repeated close() calls
+for s in (-1, 0, 1, 512):
+    try:
+        f = open(TESTFN, 'w', s)
+        f.write(str(s))
+        f.close()
+        f.close()
+        f = open(TESTFN, 'r', s)
+        d = int(f.read())
+        f.close()
+        f.close()
+    except IOError, msg:
+        raise TestFailed, 'error setting buffer size %d: %s' % (s, str(msg))
+    if d != s:
+        raise TestFailed, 'readback failure using buffer size %d'
+
 methods = ['fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
            'readline', 'readlines', 'seek', 'tell', 'truncate', 'write',
            'xreadlines', '__iter__']