blob: 3c9f4efd29b627da2645d3f38326b7646a077a3c [file] [log] [blame]
Antoine Pitrou37dc5f82011-04-03 17:05:46 +02001"""Interface to the libbzip2 compression library.
2
3This module provides a file interface, classes for incremental
4(de)compression, and functions for one-shot (de)compression.
5"""
6
Nadeem Vawdaaf518c12012-06-04 23:32:38 +02007__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor",
8 "open", "compress", "decompress"]
Antoine Pitrou37dc5f82011-04-03 17:05:46 +02009
10__author__ = "Nadeem Vawda <nadeem.vawda@gmail.com>"
11
12import io
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020013import warnings
14
Nadeem Vawda72750a82012-01-18 01:57:14 +020015try:
16 from threading import RLock
17except ImportError:
18 from dummy_threading import RLock
19
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020020from _bz2 import BZ2Compressor, BZ2Decompressor
21
22
23_MODE_CLOSED = 0
24_MODE_READ = 1
25_MODE_READ_EOF = 2
26_MODE_WRITE = 3
27
28_BUFFER_SIZE = 8192
29
Nadeem Vawda3b4a4f52012-10-08 19:20:49 +020030_builtin_open = open
31
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020032
33class BZ2File(io.BufferedIOBase):
34
35 """A file object providing transparent bzip2 (de)compression.
36
37 A BZ2File can act as a wrapper for an existing file object, or refer
38 directly to a named file on disk.
39
40 Note that BZ2File provides a *binary* file interface - data read is
41 returned as bytes, and data to be written should be given as bytes.
42 """
43
Nadeem Vawdaaebcdba2012-06-04 23:31:20 +020044 def __init__(self, filename, mode="r", buffering=None, compresslevel=9):
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020045 """Open a bzip2-compressed file.
46
Nadeem Vawdaaebcdba2012-06-04 23:31:20 +020047 If filename is a str or bytes object, is gives the name of the file to
48 be opened. Otherwise, it should be a file object, which will be used to
49 read or write the compressed data.
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020050
Nadeem Vawda50cb9362012-06-04 23:31:22 +020051 mode can be 'r' for reading (default), 'w' for (over)writing, or 'a' for
52 appending. These can equivalently be given as 'rb', 'wb', and 'ab'.
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020053
54 buffering is ignored. Its use is deprecated.
55
Nadeem Vawdacac89092012-02-04 13:08:11 +020056 If mode is 'w' or 'a', compresslevel can be a number between 1
57 and 9 specifying the level of compression: 1 produces the least
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020058 compression, and 9 (default) produces the most compression.
Nadeem Vawdacac89092012-02-04 13:08:11 +020059
60 If mode is 'r', the input file may be the concatenation of
61 multiple compressed streams.
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020062 """
63 # This lock must be recursive, so that BufferedIOBase's
64 # readline(), readlines() and writelines() don't deadlock.
Nadeem Vawda72750a82012-01-18 01:57:14 +020065 self._lock = RLock()
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020066 self._fp = None
67 self._closefp = False
68 self._mode = _MODE_CLOSED
69 self._pos = 0
70 self._size = -1
71
72 if buffering is not None:
73 warnings.warn("Use of 'buffering' argument is deprecated",
74 DeprecationWarning)
75
76 if not (1 <= compresslevel <= 9):
77 raise ValueError("compresslevel must be between 1 and 9")
78
79 if mode in ("", "r", "rb"):
80 mode = "rb"
81 mode_code = _MODE_READ
82 self._decompressor = BZ2Decompressor()
Nadeem Vawda6c573182012-09-30 03:57:33 +020083 self._buffer = b""
84 self._buffer_offset = 0
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020085 elif mode in ("w", "wb"):
86 mode = "wb"
87 mode_code = _MODE_WRITE
Nadeem Vawda249ab5e2011-09-11 22:38:11 +020088 self._compressor = BZ2Compressor(compresslevel)
Nadeem Vawda55b43382011-05-27 01:52:15 +020089 elif mode in ("a", "ab"):
90 mode = "ab"
91 mode_code = _MODE_WRITE
Nadeem Vawda249ab5e2011-09-11 22:38:11 +020092 self._compressor = BZ2Compressor(compresslevel)
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020093 else:
Nadeem Vawda3b4a4f52012-10-08 19:20:49 +020094 raise ValueError("Invalid mode: %r" % (mode,))
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020095
Nadeem Vawdaaebcdba2012-06-04 23:31:20 +020096 if isinstance(filename, (str, bytes)):
Nadeem Vawda3b4a4f52012-10-08 19:20:49 +020097 self._fp = _builtin_open(filename, mode)
Antoine Pitrou37dc5f82011-04-03 17:05:46 +020098 self._closefp = True
99 self._mode = mode_code
Nadeem Vawdaaebcdba2012-06-04 23:31:20 +0200100 elif hasattr(filename, "read") or hasattr(filename, "write"):
101 self._fp = filename
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200102 self._mode = mode_code
103 else:
Nadeem Vawdaaebcdba2012-06-04 23:31:20 +0200104 raise TypeError("filename must be a str or bytes object, or a file")
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200105
106 def close(self):
107 """Flush and close the file.
108
109 May be called more than once without error. Once the file is
110 closed, any other operation on it will raise a ValueError.
111 """
112 with self._lock:
113 if self._mode == _MODE_CLOSED:
114 return
115 try:
116 if self._mode in (_MODE_READ, _MODE_READ_EOF):
117 self._decompressor = None
118 elif self._mode == _MODE_WRITE:
119 self._fp.write(self._compressor.flush())
120 self._compressor = None
121 finally:
Antoine Pitrou24ce3862011-04-03 17:08:49 +0200122 try:
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200123 if self._closefp:
124 self._fp.close()
125 finally:
126 self._fp = None
127 self._closefp = False
128 self._mode = _MODE_CLOSED
Nadeem Vawda6c573182012-09-30 03:57:33 +0200129 self._buffer = b""
130 self._buffer_offset = 0
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200131
132 @property
133 def closed(self):
134 """True if this file is closed."""
135 return self._mode == _MODE_CLOSED
136
137 def fileno(self):
138 """Return the file descriptor for the underlying file."""
Nadeem Vawda44ae4a22011-11-30 17:39:30 +0200139 self._check_not_closed()
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200140 return self._fp.fileno()
141
142 def seekable(self):
143 """Return whether the file supports seeking."""
Nadeem Vawdaae557d72012-02-12 01:51:38 +0200144 return self.readable() and self._fp.seekable()
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200145
146 def readable(self):
147 """Return whether the file was opened for reading."""
Nadeem Vawda44ae4a22011-11-30 17:39:30 +0200148 self._check_not_closed()
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200149 return self._mode in (_MODE_READ, _MODE_READ_EOF)
150
151 def writable(self):
152 """Return whether the file was opened for writing."""
Nadeem Vawda44ae4a22011-11-30 17:39:30 +0200153 self._check_not_closed()
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200154 return self._mode == _MODE_WRITE
155
156 # Mode-checking helper functions.
157
158 def _check_not_closed(self):
159 if self.closed:
160 raise ValueError("I/O operation on closed file")
161
162 def _check_can_read(self):
Nadeem Vawdab7a0bfe2012-09-30 23:58:01 +0200163 if self._mode not in (_MODE_READ, _MODE_READ_EOF):
Nadeem Vawda452add02012-10-01 23:02:50 +0200164 self._check_not_closed()
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200165 raise io.UnsupportedOperation("File not open for reading")
166
167 def _check_can_write(self):
Nadeem Vawdab7a0bfe2012-09-30 23:58:01 +0200168 if self._mode != _MODE_WRITE:
Nadeem Vawda452add02012-10-01 23:02:50 +0200169 self._check_not_closed()
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200170 raise io.UnsupportedOperation("File not open for writing")
171
172 def _check_can_seek(self):
Nadeem Vawdab7a0bfe2012-09-30 23:58:01 +0200173 if self._mode not in (_MODE_READ, _MODE_READ_EOF):
Nadeem Vawda452add02012-10-01 23:02:50 +0200174 self._check_not_closed()
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200175 raise io.UnsupportedOperation("Seeking is only supported "
Nadeem Vawdaf1a1af22011-05-25 00:32:08 +0200176 "on files open for reading")
Nadeem Vawdaae557d72012-02-12 01:51:38 +0200177 if not self._fp.seekable():
178 raise io.UnsupportedOperation("The underlying file object "
179 "does not support seeking")
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200180
181 # Fill the readahead buffer if it is empty. Returns False on EOF.
182 def _fill_buffer(self):
Nadeem Vawda6c573182012-09-30 03:57:33 +0200183 if self._mode == _MODE_READ_EOF:
184 return False
Nadeem Vawda8280b4b2012-08-04 15:29:28 +0200185 # Depending on the input data, our call to the decompressor may not
186 # return any data. In this case, try again after reading another block.
Nadeem Vawda6c573182012-09-30 03:57:33 +0200187 while self._buffer_offset == len(self._buffer):
188 rawblock = (self._decompressor.unused_data or
189 self._fp.read(_BUFFER_SIZE))
Nadeem Vawda55b43382011-05-27 01:52:15 +0200190
Nadeem Vawda8280b4b2012-08-04 15:29:28 +0200191 if not rawblock:
192 if self._decompressor.eof:
Nadeem Vawda3b4a4f52012-10-08 19:20:49 +0200193 # End-of-stream marker and end of file. We're good.
Nadeem Vawda8280b4b2012-08-04 15:29:28 +0200194 self._mode = _MODE_READ_EOF
195 self._size = self._pos
196 return False
197 else:
Nadeem Vawda3b4a4f52012-10-08 19:20:49 +0200198 # Problem - we were expecting more compressed data.
Nadeem Vawda8280b4b2012-08-04 15:29:28 +0200199 raise EOFError("Compressed file ended before the "
200 "end-of-stream marker was reached")
Nadeem Vawda55b43382011-05-27 01:52:15 +0200201
Nadeem Vawda8280b4b2012-08-04 15:29:28 +0200202 if self._decompressor.eof:
Nadeem Vawda3b4a4f52012-10-08 19:20:49 +0200203 # Continue to next stream.
Nadeem Vawda8280b4b2012-08-04 15:29:28 +0200204 self._decompressor = BZ2Decompressor()
205
206 self._buffer = self._decompressor.decompress(rawblock)
Nadeem Vawda6c573182012-09-30 03:57:33 +0200207 self._buffer_offset = 0
208 return True
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200209
210 # Read data until EOF.
211 # If return_data is false, consume the data without returning it.
212 def _read_all(self, return_data=True):
Nadeem Vawda6c573182012-09-30 03:57:33 +0200213 # The loop assumes that _buffer_offset is 0. Ensure that this is true.
214 self._buffer = self._buffer[self._buffer_offset:]
215 self._buffer_offset = 0
216
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200217 blocks = []
218 while self._fill_buffer():
219 if return_data:
220 blocks.append(self._buffer)
221 self._pos += len(self._buffer)
Nadeem Vawda6c573182012-09-30 03:57:33 +0200222 self._buffer = b""
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200223 if return_data:
224 return b"".join(blocks)
225
226 # Read a block of up to n bytes.
227 # If return_data is false, consume the data without returning it.
228 def _read_block(self, n, return_data=True):
Nadeem Vawda6c573182012-09-30 03:57:33 +0200229 # If we have enough data buffered, return immediately.
230 end = self._buffer_offset + n
231 if end <= len(self._buffer):
232 data = self._buffer[self._buffer_offset : end]
233 self._buffer_offset = end
234 self._pos += len(data)
Nadeem Vawda9e2a28e2012-09-30 13:41:29 +0200235 return data if return_data else None
Nadeem Vawda6c573182012-09-30 03:57:33 +0200236
237 # The loop assumes that _buffer_offset is 0. Ensure that this is true.
238 self._buffer = self._buffer[self._buffer_offset:]
239 self._buffer_offset = 0
240
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200241 blocks = []
242 while n > 0 and self._fill_buffer():
243 if n < len(self._buffer):
244 data = self._buffer[:n]
Nadeem Vawda6c573182012-09-30 03:57:33 +0200245 self._buffer_offset = n
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200246 else:
247 data = self._buffer
Nadeem Vawda6c573182012-09-30 03:57:33 +0200248 self._buffer = b""
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200249 if return_data:
250 blocks.append(data)
251 self._pos += len(data)
252 n -= len(data)
253 if return_data:
254 return b"".join(blocks)
255
256 def peek(self, n=0):
257 """Return buffered data without advancing the file position.
258
259 Always returns at least one byte of data, unless at EOF.
260 The exact number of bytes returned is unspecified.
261 """
262 with self._lock:
263 self._check_can_read()
Nadeem Vawda6c573182012-09-30 03:57:33 +0200264 if not self._fill_buffer():
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200265 return b""
Nadeem Vawda6c573182012-09-30 03:57:33 +0200266 return self._buffer[self._buffer_offset:]
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200267
268 def read(self, size=-1):
269 """Read up to size uncompressed bytes from the file.
270
271 If size is negative or omitted, read until EOF is reached.
272 Returns b'' if the file is already at EOF.
273 """
274 with self._lock:
275 self._check_can_read()
Nadeem Vawda6c573182012-09-30 03:57:33 +0200276 if size == 0:
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200277 return b""
278 elif size < 0:
279 return self._read_all()
280 else:
281 return self._read_block(size)
282
283 def read1(self, size=-1):
Nadeem Vawda8280b4b2012-08-04 15:29:28 +0200284 """Read up to size uncompressed bytes, while trying to avoid
285 making multiple reads from the underlying stream.
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200286
287 Returns b'' if the file is at EOF.
288 """
Nadeem Vawda8280b4b2012-08-04 15:29:28 +0200289 # Usually, read1() calls _fp.read() at most once. However, sometimes
290 # this does not give enough data for the decompressor to make progress.
291 # In this case we make multiple reads, to avoid returning b"".
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200292 with self._lock:
293 self._check_can_read()
Nadeem Vawda6c573182012-09-30 03:57:33 +0200294 if (size == 0 or
295 # Only call _fill_buffer() if the buffer is actually empty.
296 # This gives a significant speedup if *size* is small.
297 (self._buffer_offset == len(self._buffer) and not self._fill_buffer())):
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200298 return b""
Nadeem Vawda6c573182012-09-30 03:57:33 +0200299 if size > 0:
300 data = self._buffer[self._buffer_offset :
301 self._buffer_offset + size]
302 self._buffer_offset += len(data)
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200303 else:
Nadeem Vawda6c573182012-09-30 03:57:33 +0200304 data = self._buffer[self._buffer_offset:]
305 self._buffer = b""
306 self._buffer_offset = 0
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200307 self._pos += len(data)
308 return data
309
310 def readinto(self, b):
311 """Read up to len(b) bytes into b.
Antoine Pitrou24ce3862011-04-03 17:08:49 +0200312
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200313 Returns the number of bytes read (0 for EOF).
314 """
315 with self._lock:
316 return io.BufferedIOBase.readinto(self, b)
317
318 def readline(self, size=-1):
319 """Read a line of uncompressed bytes from the file.
320
321 The terminating newline (if present) is retained. If size is
322 non-negative, no more than size bytes will be read (in which
323 case the line may be incomplete). Returns b'' if already at EOF.
324 """
Nadeem Vawdaeb70be22012-10-01 23:05:32 +0200325 if not isinstance(size, int):
326 if not hasattr(size, "__index__"):
327 raise TypeError("Integer argument expected")
328 size = size.__index__()
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200329 with self._lock:
Nadeem Vawda138ad502012-10-01 23:04:11 +0200330 self._check_can_read()
Nadeem Vawda6c573182012-09-30 03:57:33 +0200331 # Shortcut for the common case - the whole line is in the buffer.
332 if size < 0:
333 end = self._buffer.find(b"\n", self._buffer_offset) + 1
334 if end > 0:
335 line = self._buffer[self._buffer_offset : end]
336 self._buffer_offset = end
337 self._pos += len(line)
338 return line
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200339 return io.BufferedIOBase.readline(self, size)
340
341 def readlines(self, size=-1):
342 """Read a list of lines of uncompressed bytes from the file.
343
344 size can be specified to control the number of lines read: no
345 further lines will be read once the total size of the lines read
346 so far equals or exceeds size.
347 """
Nadeem Vawdaeb70be22012-10-01 23:05:32 +0200348 if not isinstance(size, int):
349 if not hasattr(size, "__index__"):
350 raise TypeError("Integer argument expected")
351 size = size.__index__()
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200352 with self._lock:
353 return io.BufferedIOBase.readlines(self, size)
354
355 def write(self, data):
356 """Write a byte string to the file.
357
358 Returns the number of uncompressed bytes written, which is
359 always len(data). Note that due to buffering, the file on disk
360 may not reflect the data written until close() is called.
361 """
362 with self._lock:
363 self._check_can_write()
364 compressed = self._compressor.compress(data)
365 self._fp.write(compressed)
366 self._pos += len(data)
367 return len(data)
368
369 def writelines(self, seq):
370 """Write a sequence of byte strings to the file.
371
372 Returns the number of uncompressed bytes written.
373 seq can be any iterable yielding byte strings.
374
375 Line separators are not added between the written byte strings.
376 """
377 with self._lock:
378 return io.BufferedIOBase.writelines(self, seq)
379
380 # Rewind the file to the beginning of the data stream.
381 def _rewind(self):
382 self._fp.seek(0, 0)
383 self._mode = _MODE_READ
384 self._pos = 0
385 self._decompressor = BZ2Decompressor()
Nadeem Vawda6c573182012-09-30 03:57:33 +0200386 self._buffer = b""
387 self._buffer_offset = 0
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200388
389 def seek(self, offset, whence=0):
390 """Change the file position.
391
392 The new position is specified by offset, relative to the
393 position indicated by whence. Values for whence are:
394
395 0: start of stream (default); offset must not be negative
396 1: current stream position
397 2: end of stream; offset must not be positive
398
399 Returns the new file position.
400
401 Note that seeking is emulated, so depending on the parameters,
402 this operation may be extremely slow.
403 """
404 with self._lock:
405 self._check_can_seek()
406
407 # Recalculate offset as an absolute file position.
408 if whence == 0:
409 pass
410 elif whence == 1:
411 offset = self._pos + offset
412 elif whence == 2:
413 # Seeking relative to EOF - we need to know the file's size.
414 if self._size < 0:
415 self._read_all(return_data=False)
416 offset = self._size + offset
417 else:
Nadeem Vawda3b4a4f52012-10-08 19:20:49 +0200418 raise ValueError("Invalid value for whence: %s" % (whence,))
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200419
420 # Make it so that offset is the number of bytes to skip forward.
421 if offset < self._pos:
422 self._rewind()
423 else:
424 offset -= self._pos
425
426 # Read and discard data until we reach the desired position.
Nadeem Vawda6c573182012-09-30 03:57:33 +0200427 self._read_block(offset, return_data=False)
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200428
429 return self._pos
430
431 def tell(self):
432 """Return the current file position."""
433 with self._lock:
434 self._check_not_closed()
435 return self._pos
436
437
Nadeem Vawdaaf518c12012-06-04 23:32:38 +0200438def open(filename, mode="rb", compresslevel=9,
439 encoding=None, errors=None, newline=None):
440 """Open a bzip2-compressed file in binary or text mode.
441
442 The filename argument can be an actual filename (a str or bytes object), or
443 an existing file object to read from or write to.
444
445 The mode argument can be "r", "rb", "w", "wb", "a" or "ab" for binary mode,
446 or "rt", "wt" or "at" for text mode. The default mode is "rb", and the
447 default compresslevel is 9.
448
449 For binary mode, this function is equivalent to the BZ2File constructor:
450 BZ2File(filename, mode, compresslevel). In this case, the encoding, errors
451 and newline arguments must not be provided.
452
453 For text mode, a BZ2File object is created, and wrapped in an
454 io.TextIOWrapper instance with the specified encoding, error handling
455 behavior, and line ending(s).
456
457 """
458 if "t" in mode:
459 if "b" in mode:
460 raise ValueError("Invalid mode: %r" % (mode,))
461 else:
462 if encoding is not None:
463 raise ValueError("Argument 'encoding' not supported in binary mode")
464 if errors is not None:
465 raise ValueError("Argument 'errors' not supported in binary mode")
466 if newline is not None:
467 raise ValueError("Argument 'newline' not supported in binary mode")
468
469 bz_mode = mode.replace("t", "")
470 binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel)
471
472 if "t" in mode:
473 return io.TextIOWrapper(binary_file, encoding, errors, newline)
474 else:
475 return binary_file
476
477
Antoine Pitrou37dc5f82011-04-03 17:05:46 +0200478def compress(data, compresslevel=9):
479 """Compress a block of data.
480
481 compresslevel, if given, must be a number between 1 and 9.
482
483 For incremental compression, use a BZ2Compressor object instead.
484 """
485 comp = BZ2Compressor(compresslevel)
486 return comp.compress(data) + comp.flush()
487
488
489def decompress(data):
490 """Decompress a block of data.
491
492 For incremental decompression, use a BZ2Decompressor object instead.
493 """
494 if len(data) == 0:
495 return b""
Nadeem Vawda55b43382011-05-27 01:52:15 +0200496
Nadeem Vawda98838ba2011-05-30 01:12:24 +0200497 results = []
Nadeem Vawda55b43382011-05-27 01:52:15 +0200498 while True:
499 decomp = BZ2Decompressor()
Nadeem Vawda98838ba2011-05-30 01:12:24 +0200500 results.append(decomp.decompress(data))
Nadeem Vawda55b43382011-05-27 01:52:15 +0200501 if not decomp.eof:
502 raise ValueError("Compressed data ended before the "
503 "end-of-stream marker was reached")
504 if not decomp.unused_data:
Nadeem Vawda98838ba2011-05-30 01:12:24 +0200505 return b"".join(results)
Nadeem Vawda55b43382011-05-27 01:52:15 +0200506 # There is unused data left over. Proceed to next stream.
507 data = decomp.unused_data