Issue #1159051: GzipFile now raises EOFError when reading a corrupted file
with truncated header or footer.
Added tests for reading truncated gzip and bzip2 files.
diff --git a/Lib/gzip.py b/Lib/gzip.py
index a613bae..a2f2367 100644
--- a/Lib/gzip.py
+++ b/Lib/gzip.py
@@ -21,9 +21,6 @@
# or unsigned.
output.write(struct.pack("<L", value))
-def read32(input):
- return struct.unpack("<I", input.read(4))[0]
-
def open(filename, mode="rb", compresslevel=9):
"""Shorthand for GzipFile(filename, mode, compresslevel).
@@ -184,24 +181,28 @@
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
+ def _read_exact(self, n):
+ data = self.fileobj.read(n)
+ while len(data) < n:
+ b = self.fileobj.read(n - len(data))
+ if not b:
+ raise EOFError("Compressed file ended before the "
+ "end-of-stream marker was reached")
+ data += b
+ return data
+
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise IOError, 'Not a gzipped file'
- method = ord( self.fileobj.read(1) )
+
+ method, flag, self.mtime = struct.unpack("<BBIxx", self._read_exact(8))
if method != 8:
raise IOError, 'Unknown compression method'
- flag = ord( self.fileobj.read(1) )
- self.mtime = read32(self.fileobj)
- # extraflag = self.fileobj.read(1)
- # os = self.fileobj.read(1)
- self.fileobj.read(2)
if flag & FEXTRA:
# Read & discard the extra field, if present
- xlen = ord(self.fileobj.read(1))
- xlen = xlen + 256*ord(self.fileobj.read(1))
- self.fileobj.read(xlen)
+ self._read_exact(struct.unpack("<H", self._read_exact(2)))
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
@@ -215,7 +216,7 @@
if not s or s=='\000':
break
if flag & FHCRC:
- self.fileobj.read(2) # Read & discard the 16-bit header CRC
+ self._read_exact(2) # Read & discard the 16-bit header CRC
def write(self,data):
self._check_closed()
@@ -249,20 +250,16 @@
readsize = 1024
if size < 0: # get the whole thing
- try:
- while True:
- self._read(readsize)
- readsize = min(self.max_read_chunk, readsize * 2)
- except EOFError:
- size = self.extrasize
+ while self._read(readsize):
+ readsize = min(self.max_read_chunk, readsize * 2)
+ size = self.extrasize
else: # just get some more of it
- try:
- while size > self.extrasize:
- self._read(readsize)
- readsize = min(self.max_read_chunk, readsize * 2)
- except EOFError:
- if size > self.extrasize:
- size = self.extrasize
+ while size > self.extrasize:
+ if not self._read(readsize):
+ if size > self.extrasize:
+ size = self.extrasize
+ break
+ readsize = min(self.max_read_chunk, readsize * 2)
offset = self.offset - self.extrastart
chunk = self.extrabuf[offset: offset + size]
@@ -277,7 +274,7 @@
def _read(self, size=1024):
if self.fileobj is None:
- raise EOFError, "Reached EOF"
+ return False
if self._new_member:
# If the _new_member flag is set, we have to
@@ -288,7 +285,7 @@
pos = self.fileobj.tell() # Save current position
self.fileobj.seek(0, 2) # Seek to end of file
if pos == self.fileobj.tell():
- raise EOFError, "Reached EOF"
+ return False
else:
self.fileobj.seek( pos ) # Return to original position
@@ -305,9 +302,10 @@
if buf == "":
uncompress = self.decompress.flush()
+ self.fileobj.seek(-len(self.decompress.unused_data), 1)
self._read_eof()
self._add_read_data( uncompress )
- raise EOFError, 'Reached EOF'
+ return False
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
@@ -317,13 +315,14 @@
# so seek back to the start of the unused data, finish up
# this member, and read a new gzip header.
# (The number of bytes to seek back is the length of the unused
- # data, minus 8 because _read_eof() will rewind a further 8 bytes)
- self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
+ # data)
+ self.fileobj.seek(-len(self.decompress.unused_data), 1)
# Check the CRC and file size, and set the flag so we read
# a new member on the next call
self._read_eof()
self._new_member = True
+ return True
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
@@ -334,14 +333,11 @@
self.size = self.size + len(data)
def _read_eof(self):
- # We've read to the end of the file, so we have to rewind in order
- # to reread the 8 bytes containing the CRC and the file size.
+ # We've read to the end of the file.
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
- self.fileobj.seek(-8, 1)
- crc32 = read32(self.fileobj)
- isize = read32(self.fileobj) # may exceed 2GB
+ crc32, isize = struct.unpack("<II", self._read_exact(8))
if crc32 != self.crc:
raise IOError("CRC check failed %s != %s" % (hex(crc32),
hex(self.crc)))