Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1 | """ |
| 2 | Python implementation of the io module. |
| 3 | """ |
| 4 | |
Benjamin Peterson | fed4abc | 2010-04-27 21:17:22 +0000 | [diff] [blame] | 5 | from __future__ import (print_function, unicode_literals) |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 6 | |
| 7 | import os |
| 8 | import abc |
| 9 | import codecs |
| 10 | import warnings |
Antoine Pitrou | 5aa7df3 | 2011-11-21 20:16:44 +0100 | [diff] [blame] | 11 | import errno |
Benjamin Peterson | 5e9cc5e | 2010-04-27 21:15:28 +0000 | [diff] [blame] | 12 | # Import thread instead of threading to reduce startup cost |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 13 | try: |
| 14 | from thread import allocate_lock as Lock |
| 15 | except ImportError: |
| 16 | from dummy_thread import allocate_lock as Lock |
| 17 | |
| 18 | import io |
Benjamin Peterson | 2773725 | 2010-04-27 21:18:30 +0000 | [diff] [blame] | 19 | from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END) |
Antoine Pitrou | 6439c00 | 2011-02-25 21:35:47 +0000 | [diff] [blame] | 20 | from errno import EINTR |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 21 | |
| 22 | __metaclass__ = type |
| 23 | |
| 24 | # open() uses st_blksize whenever we can |
| 25 | DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes |
| 26 | |
| 27 | # NOTE: Base classes defined here are registered with the "official" ABCs |
| 28 | # defined in io.py. We don't use real inheritance though, because we don't |
| 29 | # want to inherit the C implementations. |
| 30 | |
| 31 | |
| 32 | class BlockingIOError(IOError): |
| 33 | |
| 34 | """Exception raised when I/O would block on a non-blocking I/O stream.""" |
| 35 | |
| 36 | def __init__(self, errno, strerror, characters_written=0): |
| 37 | super(IOError, self).__init__(errno, strerror) |
| 38 | if not isinstance(characters_written, (int, long)): |
| 39 | raise TypeError("characters_written must be a integer") |
| 40 | self.characters_written = characters_written |
| 41 | |
| 42 | |
Benjamin Peterson | a9bd6d5 | 2010-04-27 21:01:54 +0000 | [diff] [blame] | 43 | def open(file, mode="r", buffering=-1, |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 44 | encoding=None, errors=None, |
| 45 | newline=None, closefd=True): |
| 46 | |
| 47 | r"""Open file and return a stream. Raise IOError upon failure. |
| 48 | |
| 49 | file is either a text or byte string giving the name (and the path |
| 50 | if the file isn't in the current working directory) of the file to |
| 51 | be opened or an integer file descriptor of the file to be |
| 52 | wrapped. (If a file descriptor is given, it is closed when the |
| 53 | returned I/O object is closed, unless closefd is set to False.) |
| 54 | |
| 55 | mode is an optional string that specifies the mode in which the file |
| 56 | is opened. It defaults to 'r' which means open for reading in text |
| 57 | mode. Other common values are 'w' for writing (truncating the file if |
| 58 | it already exists), and 'a' for appending (which on some Unix systems, |
| 59 | means that all writes append to the end of the file regardless of the |
| 60 | current seek position). In text mode, if encoding is not specified the |
| 61 | encoding used is platform dependent. (For reading and writing raw |
| 62 | bytes use binary mode and leave encoding unspecified.) The available |
| 63 | modes are: |
| 64 | |
| 65 | ========= =============================================================== |
| 66 | Character Meaning |
| 67 | --------- --------------------------------------------------------------- |
| 68 | 'r' open for reading (default) |
| 69 | 'w' open for writing, truncating the file first |
| 70 | 'a' open for writing, appending to the end of the file if it exists |
| 71 | 'b' binary mode |
| 72 | 't' text mode (default) |
| 73 | '+' open a disk file for updating (reading and writing) |
| 74 | 'U' universal newline mode (for backwards compatibility; unneeded |
| 75 | for new code) |
| 76 | ========= =============================================================== |
| 77 | |
| 78 | The default mode is 'rt' (open for reading text). For binary random |
| 79 | access, the mode 'w+b' opens and truncates the file to 0 bytes, while |
| 80 | 'r+b' opens the file without truncation. |
| 81 | |
| 82 | Python distinguishes between files opened in binary and text modes, |
| 83 | even when the underlying operating system doesn't. Files opened in |
| 84 | binary mode (appending 'b' to the mode argument) return contents as |
| 85 | bytes objects without any decoding. In text mode (the default, or when |
| 86 | 't' is appended to the mode argument), the contents of the file are |
| 87 | returned as strings, the bytes having been first decoded using a |
| 88 | platform-dependent encoding or using the specified encoding if given. |
| 89 | |
Antoine Pitrou | e812d29 | 2009-12-19 21:01:10 +0000 | [diff] [blame] | 90 | buffering is an optional integer used to set the buffering policy. |
| 91 | Pass 0 to switch buffering off (only allowed in binary mode), 1 to select |
| 92 | line buffering (only usable in text mode), and an integer > 1 to indicate |
| 93 | the size of a fixed-size chunk buffer. When no buffering argument is |
| 94 | given, the default buffering policy works as follows: |
| 95 | |
| 96 | * Binary files are buffered in fixed-size chunks; the size of the buffer |
| 97 | is chosen using a heuristic trying to determine the underlying device's |
| 98 | "block size" and falling back on `io.DEFAULT_BUFFER_SIZE`. |
| 99 | On many systems, the buffer will typically be 4096 or 8192 bytes long. |
| 100 | |
| 101 | * "Interactive" text files (files for which isatty() returns True) |
| 102 | use line buffering. Other text files use the policy described above |
| 103 | for binary files. |
| 104 | |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 105 | encoding is the name of the encoding used to decode or encode the |
| 106 | file. This should only be used in text mode. The default encoding is |
| 107 | platform dependent, but any encoding supported by Python can be |
| 108 | passed. See the codecs module for the list of supported encodings. |
| 109 | |
| 110 | errors is an optional string that specifies how encoding errors are to |
| 111 | be handled---this argument should not be used in binary mode. Pass |
| 112 | 'strict' to raise a ValueError exception if there is an encoding error |
| 113 | (the default of None has the same effect), or pass 'ignore' to ignore |
| 114 | errors. (Note that ignoring encoding errors can lead to data loss.) |
| 115 | See the documentation for codecs.register for a list of the permitted |
| 116 | encoding error strings. |
| 117 | |
| 118 | newline controls how universal newlines works (it only applies to text |
| 119 | mode). It can be None, '', '\n', '\r', and '\r\n'. It works as |
| 120 | follows: |
| 121 | |
| 122 | * On input, if newline is None, universal newlines mode is |
| 123 | enabled. Lines in the input can end in '\n', '\r', or '\r\n', and |
| 124 | these are translated into '\n' before being returned to the |
| 125 | caller. If it is '', universal newline mode is enabled, but line |
| 126 | endings are returned to the caller untranslated. If it has any of |
| 127 | the other legal values, input lines are only terminated by the given |
| 128 | string, and the line ending is returned to the caller untranslated. |
| 129 | |
| 130 | * On output, if newline is None, any '\n' characters written are |
| 131 | translated to the system default line separator, os.linesep. If |
| 132 | newline is '', no translation takes place. If newline is any of the |
| 133 | other legal values, any '\n' characters written are translated to |
| 134 | the given string. |
| 135 | |
| 136 | If closefd is False, the underlying file descriptor will be kept open |
| 137 | when the file is closed. This does not work when a file name is given |
| 138 | and must be True in that case. |
| 139 | |
| 140 | open() returns a file object whose type depends on the mode, and |
| 141 | through which the standard file operations such as reading and writing |
| 142 | are performed. When open() is used to open a file in a text mode ('w', |
| 143 | 'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open |
| 144 | a file in a binary mode, the returned class varies: in read binary |
| 145 | mode, it returns a BufferedReader; in write binary and append binary |
| 146 | modes, it returns a BufferedWriter, and in read/write mode, it returns |
| 147 | a BufferedRandom. |
| 148 | |
| 149 | It is also possible to use a string or bytearray as a file for both |
| 150 | reading and writing. For strings StringIO can be used like a file |
| 151 | opened in a text mode, and for bytes a BytesIO can be used like a file |
| 152 | opened in a binary mode. |
| 153 | """ |
| 154 | if not isinstance(file, (basestring, int, long)): |
| 155 | raise TypeError("invalid file: %r" % file) |
| 156 | if not isinstance(mode, basestring): |
| 157 | raise TypeError("invalid mode: %r" % mode) |
Benjamin Peterson | a9bd6d5 | 2010-04-27 21:01:54 +0000 | [diff] [blame] | 158 | if not isinstance(buffering, (int, long)): |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 159 | raise TypeError("invalid buffering: %r" % buffering) |
| 160 | if encoding is not None and not isinstance(encoding, basestring): |
| 161 | raise TypeError("invalid encoding: %r" % encoding) |
| 162 | if errors is not None and not isinstance(errors, basestring): |
| 163 | raise TypeError("invalid errors: %r" % errors) |
| 164 | modes = set(mode) |
| 165 | if modes - set("arwb+tU") or len(mode) > len(modes): |
| 166 | raise ValueError("invalid mode: %r" % mode) |
| 167 | reading = "r" in modes |
| 168 | writing = "w" in modes |
| 169 | appending = "a" in modes |
| 170 | updating = "+" in modes |
| 171 | text = "t" in modes |
| 172 | binary = "b" in modes |
| 173 | if "U" in modes: |
| 174 | if writing or appending: |
| 175 | raise ValueError("can't use U and writing mode at once") |
| 176 | reading = True |
| 177 | if text and binary: |
| 178 | raise ValueError("can't have text and binary mode at once") |
| 179 | if reading + writing + appending > 1: |
| 180 | raise ValueError("can't have read/write/append mode at once") |
| 181 | if not (reading or writing or appending): |
| 182 | raise ValueError("must have exactly one of read/write/append mode") |
| 183 | if binary and encoding is not None: |
| 184 | raise ValueError("binary mode doesn't take an encoding argument") |
| 185 | if binary and errors is not None: |
| 186 | raise ValueError("binary mode doesn't take an errors argument") |
| 187 | if binary and newline is not None: |
| 188 | raise ValueError("binary mode doesn't take a newline argument") |
| 189 | raw = FileIO(file, |
| 190 | (reading and "r" or "") + |
| 191 | (writing and "w" or "") + |
| 192 | (appending and "a" or "") + |
| 193 | (updating and "+" or ""), |
| 194 | closefd) |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 195 | line_buffering = False |
| 196 | if buffering == 1 or buffering < 0 and raw.isatty(): |
| 197 | buffering = -1 |
| 198 | line_buffering = True |
| 199 | if buffering < 0: |
| 200 | buffering = DEFAULT_BUFFER_SIZE |
| 201 | try: |
| 202 | bs = os.fstat(raw.fileno()).st_blksize |
| 203 | except (os.error, AttributeError): |
| 204 | pass |
| 205 | else: |
| 206 | if bs > 1: |
| 207 | buffering = bs |
| 208 | if buffering < 0: |
| 209 | raise ValueError("invalid buffering size") |
| 210 | if buffering == 0: |
| 211 | if binary: |
| 212 | return raw |
| 213 | raise ValueError("can't have unbuffered text I/O") |
| 214 | if updating: |
| 215 | buffer = BufferedRandom(raw, buffering) |
| 216 | elif writing or appending: |
| 217 | buffer = BufferedWriter(raw, buffering) |
| 218 | elif reading: |
| 219 | buffer = BufferedReader(raw, buffering) |
| 220 | else: |
| 221 | raise ValueError("unknown mode: %r" % mode) |
| 222 | if binary: |
| 223 | return buffer |
| 224 | text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) |
| 225 | text.mode = mode |
| 226 | return text |
| 227 | |
| 228 | |
| 229 | class DocDescriptor: |
| 230 | """Helper for builtins.open.__doc__ |
| 231 | """ |
| 232 | def __get__(self, obj, typ): |
| 233 | return ( |
Benjamin Peterson | ae9f8bd | 2010-04-27 21:19:06 +0000 | [diff] [blame] | 234 | "open(file, mode='r', buffering=-1, encoding=None, " |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 235 | "errors=None, newline=None, closefd=True)\n\n" + |
| 236 | open.__doc__) |
| 237 | |
| 238 | class OpenWrapper: |
| 239 | """Wrapper for builtins.open |
| 240 | |
| 241 | Trick so that open won't become a bound method when stored |
| 242 | as a class variable (as dbm.dumb does). |
| 243 | |
| 244 | See initstdio() in Python/pythonrun.c. |
| 245 | """ |
| 246 | __doc__ = DocDescriptor() |
| 247 | |
| 248 | def __new__(cls, *args, **kwargs): |
| 249 | return open(*args, **kwargs) |
| 250 | |
| 251 | |
| 252 | class UnsupportedOperation(ValueError, IOError): |
| 253 | pass |
| 254 | |
| 255 | |
| 256 | class IOBase: |
| 257 | __metaclass__ = abc.ABCMeta |
| 258 | |
| 259 | """The abstract base class for all I/O classes, acting on streams of |
| 260 | bytes. There is no public constructor. |
| 261 | |
| 262 | This class provides dummy implementations for many methods that |
| 263 | derived classes can override selectively; the default implementations |
| 264 | represent a file that cannot be read, written or seeked. |
| 265 | |
| 266 | Even though IOBase does not declare read, readinto, or write because |
| 267 | their signatures will vary, implementations and clients should |
| 268 | consider those methods part of the interface. Also, implementations |
| 269 | may raise a IOError when operations they do not support are called. |
| 270 | |
| 271 | The basic type used for binary data read from or written to a file is |
| 272 | bytes. bytearrays are accepted too, and in some cases (such as |
| 273 | readinto) needed. Text I/O classes work with str data. |
| 274 | |
| 275 | Note that calling any method (even inquiries) on a closed stream is |
| 276 | undefined. Implementations may raise IOError in this case. |
| 277 | |
| 278 | IOBase (and its subclasses) support the iterator protocol, meaning |
| 279 | that an IOBase object can be iterated over yielding the lines in a |
| 280 | stream. |
| 281 | |
| 282 | IOBase also supports the :keyword:`with` statement. In this example, |
| 283 | fp is closed after the suite of the with statement is complete: |
| 284 | |
| 285 | with open('spam.txt', 'r') as fp: |
| 286 | fp.write('Spam and eggs!') |
| 287 | """ |
| 288 | |
| 289 | ### Internal ### |
| 290 | |
| 291 | def _unsupported(self, name): |
| 292 | """Internal: raise an exception for unsupported operations.""" |
| 293 | raise UnsupportedOperation("%s.%s() not supported" % |
| 294 | (self.__class__.__name__, name)) |
| 295 | |
| 296 | ### Positioning ### |
| 297 | |
| 298 | def seek(self, pos, whence=0): |
| 299 | """Change stream position. |
| 300 | |
| 301 | Change the stream position to byte offset offset. offset is |
| 302 | interpreted relative to the position indicated by whence. Values |
| 303 | for whence are: |
| 304 | |
| 305 | * 0 -- start of stream (the default); offset should be zero or positive |
| 306 | * 1 -- current stream position; offset may be negative |
| 307 | * 2 -- end of stream; offset is usually negative |
| 308 | |
| 309 | Return the new absolute position. |
| 310 | """ |
| 311 | self._unsupported("seek") |
| 312 | |
| 313 | def tell(self): |
| 314 | """Return current stream position.""" |
| 315 | return self.seek(0, 1) |
| 316 | |
| 317 | def truncate(self, pos=None): |
| 318 | """Truncate file to size bytes. |
| 319 | |
| 320 | Size defaults to the current IO position as reported by tell(). Return |
| 321 | the new size. |
| 322 | """ |
| 323 | self._unsupported("truncate") |
| 324 | |
| 325 | ### Flush and close ### |
| 326 | |
| 327 | def flush(self): |
| 328 | """Flush write buffers, if applicable. |
| 329 | |
| 330 | This is not implemented for read-only and non-blocking streams. |
| 331 | """ |
Antoine Pitrou | f7fd8e4 | 2010-05-03 16:25:33 +0000 | [diff] [blame] | 332 | self._checkClosed() |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 333 | # XXX Should this return the number of bytes written??? |
| 334 | |
| 335 | __closed = False |
| 336 | |
| 337 | def close(self): |
| 338 | """Flush and close the IO object. |
| 339 | |
| 340 | This method has no effect if the file is already closed. |
| 341 | """ |
| 342 | if not self.__closed: |
Antoine Pitrou | f7fd8e4 | 2010-05-03 16:25:33 +0000 | [diff] [blame] | 343 | self.flush() |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 344 | self.__closed = True |
| 345 | |
| 346 | def __del__(self): |
| 347 | """Destructor. Calls close().""" |
| 348 | # The try/except block is in case this is called at program |
| 349 | # exit time, when it's possible that globals have already been |
| 350 | # deleted, and then the close() call might fail. Since |
| 351 | # there's nothing we can do about such failures and they annoy |
| 352 | # the end users, we suppress the traceback. |
| 353 | try: |
| 354 | self.close() |
| 355 | except: |
| 356 | pass |
| 357 | |
| 358 | ### Inquiries ### |
| 359 | |
| 360 | def seekable(self): |
| 361 | """Return whether object supports random access. |
| 362 | |
| 363 | If False, seek(), tell() and truncate() will raise IOError. |
| 364 | This method may need to do a test seek(). |
| 365 | """ |
| 366 | return False |
| 367 | |
| 368 | def _checkSeekable(self, msg=None): |
| 369 | """Internal: raise an IOError if file is not seekable |
| 370 | """ |
| 371 | if not self.seekable(): |
| 372 | raise IOError("File or stream is not seekable." |
| 373 | if msg is None else msg) |
| 374 | |
| 375 | |
| 376 | def readable(self): |
| 377 | """Return whether object was opened for reading. |
| 378 | |
| 379 | If False, read() will raise IOError. |
| 380 | """ |
| 381 | return False |
| 382 | |
| 383 | def _checkReadable(self, msg=None): |
| 384 | """Internal: raise an IOError if file is not readable |
| 385 | """ |
| 386 | if not self.readable(): |
| 387 | raise IOError("File or stream is not readable." |
| 388 | if msg is None else msg) |
| 389 | |
| 390 | def writable(self): |
| 391 | """Return whether object was opened for writing. |
| 392 | |
| 393 | If False, write() and truncate() will raise IOError. |
| 394 | """ |
| 395 | return False |
| 396 | |
| 397 | def _checkWritable(self, msg=None): |
| 398 | """Internal: raise an IOError if file is not writable |
| 399 | """ |
| 400 | if not self.writable(): |
| 401 | raise IOError("File or stream is not writable." |
| 402 | if msg is None else msg) |
| 403 | |
| 404 | @property |
| 405 | def closed(self): |
| 406 | """closed: bool. True iff the file has been closed. |
| 407 | |
| 408 | For backwards compatibility, this is a property, not a predicate. |
| 409 | """ |
| 410 | return self.__closed |
| 411 | |
| 412 | def _checkClosed(self, msg=None): |
| 413 | """Internal: raise an ValueError if file is closed |
| 414 | """ |
| 415 | if self.closed: |
| 416 | raise ValueError("I/O operation on closed file." |
| 417 | if msg is None else msg) |
| 418 | |
| 419 | ### Context manager ### |
| 420 | |
| 421 | def __enter__(self): |
| 422 | """Context management protocol. Returns self.""" |
| 423 | self._checkClosed() |
| 424 | return self |
| 425 | |
| 426 | def __exit__(self, *args): |
| 427 | """Context management protocol. Calls close()""" |
| 428 | self.close() |
| 429 | |
| 430 | ### Lower-level APIs ### |
| 431 | |
| 432 | # XXX Should these be present even if unimplemented? |
| 433 | |
| 434 | def fileno(self): |
| 435 | """Returns underlying file descriptor if one exists. |
| 436 | |
| 437 | An IOError is raised if the IO object does not use a file descriptor. |
| 438 | """ |
| 439 | self._unsupported("fileno") |
| 440 | |
| 441 | def isatty(self): |
| 442 | """Return whether this is an 'interactive' stream. |
| 443 | |
| 444 | Return False if it can't be determined. |
| 445 | """ |
| 446 | self._checkClosed() |
| 447 | return False |
| 448 | |
| 449 | ### Readline[s] and writelines ### |
| 450 | |
| 451 | def readline(self, limit=-1): |
| 452 | r"""Read and return a line from the stream. |
| 453 | |
| 454 | If limit is specified, at most limit bytes will be read. |
| 455 | |
| 456 | The line terminator is always b'\n' for binary files; for text |
| 457 | files, the newlines argument to open can be used to select the line |
| 458 | terminator(s) recognized. |
| 459 | """ |
| 460 | # For backwards compatibility, a (slowish) readline(). |
| 461 | if hasattr(self, "peek"): |
| 462 | def nreadahead(): |
| 463 | readahead = self.peek(1) |
| 464 | if not readahead: |
| 465 | return 1 |
| 466 | n = (readahead.find(b"\n") + 1) or len(readahead) |
| 467 | if limit >= 0: |
| 468 | n = min(n, limit) |
| 469 | return n |
| 470 | else: |
| 471 | def nreadahead(): |
| 472 | return 1 |
| 473 | if limit is None: |
| 474 | limit = -1 |
| 475 | elif not isinstance(limit, (int, long)): |
| 476 | raise TypeError("limit must be an integer") |
| 477 | res = bytearray() |
| 478 | while limit < 0 or len(res) < limit: |
| 479 | b = self.read(nreadahead()) |
| 480 | if not b: |
| 481 | break |
| 482 | res += b |
| 483 | if res.endswith(b"\n"): |
| 484 | break |
| 485 | return bytes(res) |
| 486 | |
| 487 | def __iter__(self): |
| 488 | self._checkClosed() |
| 489 | return self |
| 490 | |
| 491 | def next(self): |
| 492 | line = self.readline() |
| 493 | if not line: |
| 494 | raise StopIteration |
| 495 | return line |
| 496 | |
| 497 | def readlines(self, hint=None): |
| 498 | """Return a list of lines from the stream. |
| 499 | |
| 500 | hint can be specified to control the number of lines read: no more |
| 501 | lines will be read if the total size (in bytes/characters) of all |
| 502 | lines so far exceeds hint. |
| 503 | """ |
| 504 | if hint is not None and not isinstance(hint, (int, long)): |
| 505 | raise TypeError("integer or None expected") |
| 506 | if hint is None or hint <= 0: |
| 507 | return list(self) |
| 508 | n = 0 |
| 509 | lines = [] |
| 510 | for line in self: |
| 511 | lines.append(line) |
| 512 | n += len(line) |
| 513 | if n >= hint: |
| 514 | break |
| 515 | return lines |
| 516 | |
| 517 | def writelines(self, lines): |
| 518 | self._checkClosed() |
| 519 | for line in lines: |
| 520 | self.write(line) |
| 521 | |
| 522 | io.IOBase.register(IOBase) |
| 523 | |
| 524 | |
| 525 | class RawIOBase(IOBase): |
| 526 | |
| 527 | """Base class for raw binary I/O.""" |
| 528 | |
| 529 | # The read() method is implemented by calling readinto(); derived |
| 530 | # classes that want to support read() only need to implement |
| 531 | # readinto() as a primitive operation. In general, readinto() can be |
| 532 | # more efficient than read(). |
| 533 | |
| 534 | # (It would be tempting to also provide an implementation of |
| 535 | # readinto() in terms of read(), in case the latter is a more suitable |
| 536 | # primitive operation, but that would lead to nasty recursion in case |
| 537 | # a subclass doesn't implement either.) |
| 538 | |
| 539 | def read(self, n=-1): |
| 540 | """Read and return up to n bytes. |
| 541 | |
| 542 | Returns an empty bytes object on EOF, or None if the object is |
| 543 | set not to block and has no data to read. |
| 544 | """ |
| 545 | if n is None: |
| 546 | n = -1 |
| 547 | if n < 0: |
| 548 | return self.readall() |
| 549 | b = bytearray(n.__index__()) |
| 550 | n = self.readinto(b) |
Antoine Pitrou | 6391b34 | 2010-09-14 18:48:19 +0000 | [diff] [blame] | 551 | if n is None: |
| 552 | return None |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 553 | del b[n:] |
| 554 | return bytes(b) |
| 555 | |
| 556 | def readall(self): |
| 557 | """Read until EOF, using multiple read() call.""" |
| 558 | res = bytearray() |
| 559 | while True: |
| 560 | data = self.read(DEFAULT_BUFFER_SIZE) |
| 561 | if not data: |
| 562 | break |
| 563 | res += data |
Victor Stinner | daf17e9 | 2011-05-25 22:52:37 +0200 | [diff] [blame] | 564 | if res: |
| 565 | return bytes(res) |
| 566 | else: |
| 567 | # b'' or None |
| 568 | return data |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 569 | |
| 570 | def readinto(self, b): |
| 571 | """Read up to len(b) bytes into b. |
| 572 | |
| 573 | Returns number of bytes read (0 for EOF), or None if the object |
Antoine Pitrou | 6391b34 | 2010-09-14 18:48:19 +0000 | [diff] [blame] | 574 | is set not to block and has no data to read. |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 575 | """ |
| 576 | self._unsupported("readinto") |
| 577 | |
| 578 | def write(self, b): |
| 579 | """Write the given buffer to the IO stream. |
| 580 | |
| 581 | Returns the number of bytes written, which may be less than len(b). |
| 582 | """ |
| 583 | self._unsupported("write") |
| 584 | |
| 585 | io.RawIOBase.register(RawIOBase) |
| 586 | from _io import FileIO |
| 587 | RawIOBase.register(FileIO) |
| 588 | |
| 589 | |
| 590 | class BufferedIOBase(IOBase): |
| 591 | |
| 592 | """Base class for buffered IO objects. |
| 593 | |
| 594 | The main difference with RawIOBase is that the read() method |
| 595 | supports omitting the size argument, and does not have a default |
| 596 | implementation that defers to readinto(). |
| 597 | |
| 598 | In addition, read(), readinto() and write() may raise |
| 599 | BlockingIOError if the underlying raw stream is in non-blocking |
| 600 | mode and not ready; unlike their raw counterparts, they will never |
| 601 | return None. |
| 602 | |
| 603 | A typical implementation should not inherit from a RawIOBase |
| 604 | implementation, but wrap one. |
| 605 | """ |
| 606 | |
| 607 | def read(self, n=None): |
| 608 | """Read and return up to n bytes. |
| 609 | |
| 610 | If the argument is omitted, None, or negative, reads and |
| 611 | returns all data until EOF. |
| 612 | |
| 613 | If the argument is positive, and the underlying raw stream is |
| 614 | not 'interactive', multiple raw reads may be issued to satisfy |
| 615 | the byte count (unless EOF is reached first). But for |
| 616 | interactive raw streams (XXX and for pipes?), at most one raw |
| 617 | read will be issued, and a short result does not imply that |
| 618 | EOF is imminent. |
| 619 | |
| 620 | Returns an empty bytes array on EOF. |
| 621 | |
| 622 | Raises BlockingIOError if the underlying raw stream has no |
| 623 | data at the moment. |
| 624 | """ |
| 625 | self._unsupported("read") |
| 626 | |
| 627 | def read1(self, n=None): |
| 628 | """Read up to n bytes with at most one read() system call.""" |
| 629 | self._unsupported("read1") |
| 630 | |
| 631 | def readinto(self, b): |
| 632 | """Read up to len(b) bytes into b. |
| 633 | |
| 634 | Like read(), this may issue multiple reads to the underlying raw |
| 635 | stream, unless the latter is 'interactive'. |
| 636 | |
| 637 | Returns the number of bytes read (0 for EOF). |
| 638 | |
| 639 | Raises BlockingIOError if the underlying raw stream has no |
| 640 | data at the moment. |
| 641 | """ |
| 642 | # XXX This ought to work with anything that supports the buffer API |
| 643 | data = self.read(len(b)) |
| 644 | n = len(data) |
| 645 | try: |
| 646 | b[:n] = data |
| 647 | except TypeError as err: |
| 648 | import array |
| 649 | if not isinstance(b, array.array): |
| 650 | raise err |
| 651 | b[:n] = array.array(b'b', data) |
| 652 | return n |
| 653 | |
| 654 | def write(self, b): |
| 655 | """Write the given buffer to the IO stream. |
| 656 | |
| 657 | Return the number of bytes written, which is never less than |
| 658 | len(b). |
| 659 | |
| 660 | Raises BlockingIOError if the buffer is full and the |
| 661 | underlying raw stream cannot accept more data at the moment. |
| 662 | """ |
| 663 | self._unsupported("write") |
| 664 | |
| 665 | def detach(self): |
| 666 | """ |
| 667 | Separate the underlying raw stream from the buffer and return it. |
| 668 | |
| 669 | After the raw stream has been detached, the buffer is in an unusable |
| 670 | state. |
| 671 | """ |
| 672 | self._unsupported("detach") |
| 673 | |
| 674 | io.BufferedIOBase.register(BufferedIOBase) |
| 675 | |
| 676 | |
| 677 | class _BufferedIOMixin(BufferedIOBase): |
| 678 | |
| 679 | """A mixin implementation of BufferedIOBase with an underlying raw stream. |
| 680 | |
| 681 | This passes most requests on to the underlying raw stream. It |
| 682 | does *not* provide implementations of read(), readinto() or |
| 683 | write(). |
| 684 | """ |
| 685 | |
| 686 | def __init__(self, raw): |
Antoine Pitrou | fc9ead6 | 2010-12-21 21:26:55 +0000 | [diff] [blame] | 687 | self._raw = raw |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 688 | |
| 689 | ### Positioning ### |
| 690 | |
| 691 | def seek(self, pos, whence=0): |
| 692 | new_position = self.raw.seek(pos, whence) |
| 693 | if new_position < 0: |
| 694 | raise IOError("seek() returned an invalid position") |
| 695 | return new_position |
| 696 | |
| 697 | def tell(self): |
| 698 | pos = self.raw.tell() |
| 699 | if pos < 0: |
| 700 | raise IOError("tell() returned an invalid position") |
| 701 | return pos |
| 702 | |
| 703 | def truncate(self, pos=None): |
| 704 | # Flush the stream. We're mixing buffered I/O with lower-level I/O, |
| 705 | # and a flush may be necessary to synch both views of the current |
| 706 | # file state. |
| 707 | self.flush() |
| 708 | |
| 709 | if pos is None: |
| 710 | pos = self.tell() |
| 711 | # XXX: Should seek() be used, instead of passing the position |
| 712 | # XXX directly to truncate? |
| 713 | return self.raw.truncate(pos) |
| 714 | |
| 715 | ### Flush and close ### |
| 716 | |
| 717 | def flush(self): |
Antoine Pitrou | f7fd8e4 | 2010-05-03 16:25:33 +0000 | [diff] [blame] | 718 | if self.closed: |
| 719 | raise ValueError("flush of closed file") |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 720 | self.raw.flush() |
| 721 | |
| 722 | def close(self): |
Antoine Pitrou | f7fd8e4 | 2010-05-03 16:25:33 +0000 | [diff] [blame] | 723 | if self.raw is not None and not self.closed: |
Antoine Pitrou | 5aa7df3 | 2011-11-21 20:16:44 +0100 | [diff] [blame] | 724 | try: |
| 725 | # may raise BlockingIOError or BrokenPipeError etc |
| 726 | self.flush() |
| 727 | finally: |
| 728 | self.raw.close() |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 729 | |
| 730 | def detach(self): |
| 731 | if self.raw is None: |
| 732 | raise ValueError("raw stream already detached") |
| 733 | self.flush() |
Antoine Pitrou | fc9ead6 | 2010-12-21 21:26:55 +0000 | [diff] [blame] | 734 | raw = self._raw |
| 735 | self._raw = None |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 736 | return raw |
| 737 | |
| 738 | ### Inquiries ### |
| 739 | |
| 740 | def seekable(self): |
| 741 | return self.raw.seekable() |
| 742 | |
| 743 | def readable(self): |
| 744 | return self.raw.readable() |
| 745 | |
| 746 | def writable(self): |
| 747 | return self.raw.writable() |
| 748 | |
| 749 | @property |
Antoine Pitrou | fc9ead6 | 2010-12-21 21:26:55 +0000 | [diff] [blame] | 750 | def raw(self): |
| 751 | return self._raw |
| 752 | |
| 753 | @property |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 754 | def closed(self): |
| 755 | return self.raw.closed |
| 756 | |
| 757 | @property |
| 758 | def name(self): |
| 759 | return self.raw.name |
| 760 | |
| 761 | @property |
| 762 | def mode(self): |
| 763 | return self.raw.mode |
| 764 | |
| 765 | def __repr__(self): |
| 766 | clsname = self.__class__.__name__ |
| 767 | try: |
| 768 | name = self.name |
| 769 | except AttributeError: |
| 770 | return "<_pyio.{0}>".format(clsname) |
| 771 | else: |
| 772 | return "<_pyio.{0} name={1!r}>".format(clsname, name) |
| 773 | |
| 774 | ### Lower-level APIs ### |
| 775 | |
| 776 | def fileno(self): |
| 777 | return self.raw.fileno() |
| 778 | |
| 779 | def isatty(self): |
| 780 | return self.raw.isatty() |
| 781 | |
| 782 | |
| 783 | class BytesIO(BufferedIOBase): |
| 784 | |
| 785 | """Buffered I/O implementation using an in-memory bytes buffer.""" |
| 786 | |
| 787 | def __init__(self, initial_bytes=None): |
| 788 | buf = bytearray() |
| 789 | if initial_bytes is not None: |
| 790 | buf.extend(initial_bytes) |
| 791 | self._buffer = buf |
| 792 | self._pos = 0 |
| 793 | |
Antoine Pitrou | fa94e80 | 2009-10-24 12:23:18 +0000 | [diff] [blame] | 794 | def __getstate__(self): |
| 795 | if self.closed: |
| 796 | raise ValueError("__getstate__ on closed file") |
| 797 | return self.__dict__.copy() |
| 798 | |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 799 | def getvalue(self): |
| 800 | """Return the bytes value (contents) of the buffer |
| 801 | """ |
| 802 | if self.closed: |
| 803 | raise ValueError("getvalue on closed file") |
| 804 | return bytes(self._buffer) |
| 805 | |
| 806 | def read(self, n=None): |
| 807 | if self.closed: |
| 808 | raise ValueError("read from closed file") |
| 809 | if n is None: |
| 810 | n = -1 |
| 811 | if not isinstance(n, (int, long)): |
| 812 | raise TypeError("integer argument expected, got {0!r}".format( |
| 813 | type(n))) |
| 814 | if n < 0: |
| 815 | n = len(self._buffer) |
| 816 | if len(self._buffer) <= self._pos: |
| 817 | return b"" |
| 818 | newpos = min(len(self._buffer), self._pos + n) |
| 819 | b = self._buffer[self._pos : newpos] |
| 820 | self._pos = newpos |
| 821 | return bytes(b) |
| 822 | |
| 823 | def read1(self, n): |
| 824 | """This is the same as read. |
| 825 | """ |
| 826 | return self.read(n) |
| 827 | |
| 828 | def write(self, b): |
| 829 | if self.closed: |
| 830 | raise ValueError("write to closed file") |
| 831 | if isinstance(b, unicode): |
| 832 | raise TypeError("can't write unicode to binary stream") |
| 833 | n = len(b) |
| 834 | if n == 0: |
| 835 | return 0 |
| 836 | pos = self._pos |
| 837 | if pos > len(self._buffer): |
| 838 | # Inserts null bytes between the current end of the file |
| 839 | # and the new write position. |
| 840 | padding = b'\x00' * (pos - len(self._buffer)) |
| 841 | self._buffer += padding |
| 842 | self._buffer[pos:pos + n] = b |
| 843 | self._pos += n |
| 844 | return n |
| 845 | |
| 846 | def seek(self, pos, whence=0): |
| 847 | if self.closed: |
| 848 | raise ValueError("seek on closed file") |
| 849 | try: |
Florent Xicluna | 1f3b4e1 | 2010-03-07 12:14:25 +0000 | [diff] [blame] | 850 | pos.__index__ |
| 851 | except AttributeError: |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 852 | raise TypeError("an integer is required") |
| 853 | if whence == 0: |
| 854 | if pos < 0: |
| 855 | raise ValueError("negative seek position %r" % (pos,)) |
| 856 | self._pos = pos |
| 857 | elif whence == 1: |
| 858 | self._pos = max(0, self._pos + pos) |
| 859 | elif whence == 2: |
| 860 | self._pos = max(0, len(self._buffer) + pos) |
| 861 | else: |
| 862 | raise ValueError("invalid whence value") |
| 863 | return self._pos |
| 864 | |
| 865 | def tell(self): |
| 866 | if self.closed: |
| 867 | raise ValueError("tell on closed file") |
| 868 | return self._pos |
| 869 | |
| 870 | def truncate(self, pos=None): |
| 871 | if self.closed: |
| 872 | raise ValueError("truncate on closed file") |
| 873 | if pos is None: |
| 874 | pos = self._pos |
Florent Xicluna | 1f3b4e1 | 2010-03-07 12:14:25 +0000 | [diff] [blame] | 875 | else: |
| 876 | try: |
| 877 | pos.__index__ |
| 878 | except AttributeError: |
| 879 | raise TypeError("an integer is required") |
| 880 | if pos < 0: |
| 881 | raise ValueError("negative truncate position %r" % (pos,)) |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 882 | del self._buffer[pos:] |
Antoine Pitrou | f3fa074 | 2010-01-31 22:26:04 +0000 | [diff] [blame] | 883 | return pos |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 884 | |
| 885 | def readable(self): |
Antoine Pitrou | c5eec0e | 2012-09-05 20:11:49 +0200 | [diff] [blame] | 886 | if self.closed: |
| 887 | raise ValueError("I/O operation on closed file.") |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 888 | return True |
| 889 | |
| 890 | def writable(self): |
Antoine Pitrou | c5eec0e | 2012-09-05 20:11:49 +0200 | [diff] [blame] | 891 | if self.closed: |
| 892 | raise ValueError("I/O operation on closed file.") |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 893 | return True |
| 894 | |
| 895 | def seekable(self): |
Antoine Pitrou | c5eec0e | 2012-09-05 20:11:49 +0200 | [diff] [blame] | 896 | if self.closed: |
| 897 | raise ValueError("I/O operation on closed file.") |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 898 | return True |
| 899 | |
| 900 | |
| 901 | class BufferedReader(_BufferedIOMixin): |
| 902 | |
| 903 | """BufferedReader(raw[, buffer_size]) |
| 904 | |
| 905 | A buffer for a readable, sequential BaseRawIO object. |
| 906 | |
| 907 | The constructor creates a BufferedReader for the given readable raw |
| 908 | stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE |
| 909 | is used. |
| 910 | """ |
| 911 | |
| 912 | def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE): |
| 913 | """Create a new buffered reader using the given readable raw IO object. |
| 914 | """ |
| 915 | if not raw.readable(): |
| 916 | raise IOError('"raw" argument must be readable.') |
| 917 | |
| 918 | _BufferedIOMixin.__init__(self, raw) |
| 919 | if buffer_size <= 0: |
| 920 | raise ValueError("invalid buffer size") |
| 921 | self.buffer_size = buffer_size |
| 922 | self._reset_read_buf() |
| 923 | self._read_lock = Lock() |
| 924 | |
| 925 | def _reset_read_buf(self): |
| 926 | self._read_buf = b"" |
| 927 | self._read_pos = 0 |
| 928 | |
| 929 | def read(self, n=None): |
| 930 | """Read n bytes. |
| 931 | |
| 932 | Returns exactly n bytes of data unless the underlying raw IO |
| 933 | stream reaches EOF or if the call would block in non-blocking |
| 934 | mode. If n is negative, read until EOF or until read() would |
| 935 | block. |
| 936 | """ |
| 937 | if n is not None and n < -1: |
| 938 | raise ValueError("invalid number of bytes to read") |
| 939 | with self._read_lock: |
| 940 | return self._read_unlocked(n) |
| 941 | |
| 942 | def _read_unlocked(self, n=None): |
| 943 | nodata_val = b"" |
| 944 | empty_values = (b"", None) |
| 945 | buf = self._read_buf |
| 946 | pos = self._read_pos |
| 947 | |
| 948 | # Special case for when the number of bytes to read is unspecified. |
| 949 | if n is None or n == -1: |
| 950 | self._reset_read_buf() |
| 951 | chunks = [buf[pos:]] # Strip the consumed bytes. |
| 952 | current_size = 0 |
| 953 | while True: |
| 954 | # Read until EOF or until read() would block. |
Antoine Pitrou | 6439c00 | 2011-02-25 21:35:47 +0000 | [diff] [blame] | 955 | try: |
| 956 | chunk = self.raw.read() |
| 957 | except IOError as e: |
| 958 | if e.errno != EINTR: |
| 959 | raise |
| 960 | continue |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 961 | if chunk in empty_values: |
| 962 | nodata_val = chunk |
| 963 | break |
| 964 | current_size += len(chunk) |
| 965 | chunks.append(chunk) |
| 966 | return b"".join(chunks) or nodata_val |
| 967 | |
| 968 | # The number of bytes to read is specified, return at most n bytes. |
| 969 | avail = len(buf) - pos # Length of the available buffered data. |
| 970 | if n <= avail: |
| 971 | # Fast path: the data to read is fully buffered. |
| 972 | self._read_pos += n |
| 973 | return buf[pos:pos+n] |
| 974 | # Slow path: read from the stream until enough bytes are read, |
| 975 | # or until an EOF occurs or until read() would block. |
| 976 | chunks = [buf[pos:]] |
| 977 | wanted = max(self.buffer_size, n) |
| 978 | while avail < n: |
Antoine Pitrou | 6439c00 | 2011-02-25 21:35:47 +0000 | [diff] [blame] | 979 | try: |
| 980 | chunk = self.raw.read(wanted) |
| 981 | except IOError as e: |
| 982 | if e.errno != EINTR: |
| 983 | raise |
| 984 | continue |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 985 | if chunk in empty_values: |
| 986 | nodata_val = chunk |
| 987 | break |
| 988 | avail += len(chunk) |
| 989 | chunks.append(chunk) |
| 990 | # n is more then avail only when an EOF occurred or when |
| 991 | # read() would have blocked. |
| 992 | n = min(n, avail) |
| 993 | out = b"".join(chunks) |
| 994 | self._read_buf = out[n:] # Save the extra data in the buffer. |
| 995 | self._read_pos = 0 |
| 996 | return out[:n] if out else nodata_val |
| 997 | |
| 998 | def peek(self, n=0): |
| 999 | """Returns buffered bytes without advancing the position. |
| 1000 | |
| 1001 | The argument indicates a desired minimal number of bytes; we |
| 1002 | do at most one raw read to satisfy it. We never return more |
| 1003 | than self.buffer_size. |
| 1004 | """ |
| 1005 | with self._read_lock: |
| 1006 | return self._peek_unlocked(n) |
| 1007 | |
| 1008 | def _peek_unlocked(self, n=0): |
| 1009 | want = min(n, self.buffer_size) |
| 1010 | have = len(self._read_buf) - self._read_pos |
| 1011 | if have < want or have <= 0: |
| 1012 | to_read = self.buffer_size - have |
Antoine Pitrou | 6439c00 | 2011-02-25 21:35:47 +0000 | [diff] [blame] | 1013 | while True: |
| 1014 | try: |
| 1015 | current = self.raw.read(to_read) |
| 1016 | except IOError as e: |
| 1017 | if e.errno != EINTR: |
| 1018 | raise |
| 1019 | continue |
| 1020 | break |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1021 | if current: |
| 1022 | self._read_buf = self._read_buf[self._read_pos:] + current |
| 1023 | self._read_pos = 0 |
| 1024 | return self._read_buf[self._read_pos:] |
| 1025 | |
| 1026 | def read1(self, n): |
| 1027 | """Reads up to n bytes, with at most one read() system call.""" |
| 1028 | # Returns up to n bytes. If at least one byte is buffered, we |
| 1029 | # only return buffered bytes. Otherwise, we do one raw read. |
| 1030 | if n < 0: |
| 1031 | raise ValueError("number of bytes to read must be positive") |
| 1032 | if n == 0: |
| 1033 | return b"" |
| 1034 | with self._read_lock: |
| 1035 | self._peek_unlocked(1) |
| 1036 | return self._read_unlocked( |
| 1037 | min(n, len(self._read_buf) - self._read_pos)) |
| 1038 | |
| 1039 | def tell(self): |
| 1040 | return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos |
| 1041 | |
| 1042 | def seek(self, pos, whence=0): |
| 1043 | if not (0 <= whence <= 2): |
| 1044 | raise ValueError("invalid whence value") |
| 1045 | with self._read_lock: |
| 1046 | if whence == 1: |
| 1047 | pos -= len(self._read_buf) - self._read_pos |
| 1048 | pos = _BufferedIOMixin.seek(self, pos, whence) |
| 1049 | self._reset_read_buf() |
| 1050 | return pos |
| 1051 | |
| 1052 | class BufferedWriter(_BufferedIOMixin): |
| 1053 | |
| 1054 | """A buffer for a writeable sequential RawIO object. |
| 1055 | |
| 1056 | The constructor creates a BufferedWriter for the given writeable raw |
| 1057 | stream. If the buffer_size is not given, it defaults to |
| 1058 | DEFAULT_BUFFER_SIZE. |
| 1059 | """ |
| 1060 | |
| 1061 | _warning_stack_offset = 2 |
| 1062 | |
| 1063 | def __init__(self, raw, |
| 1064 | buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None): |
| 1065 | if not raw.writable(): |
| 1066 | raise IOError('"raw" argument must be writable.') |
| 1067 | |
| 1068 | _BufferedIOMixin.__init__(self, raw) |
| 1069 | if buffer_size <= 0: |
| 1070 | raise ValueError("invalid buffer size") |
| 1071 | if max_buffer_size is not None: |
| 1072 | warnings.warn("max_buffer_size is deprecated", DeprecationWarning, |
| 1073 | self._warning_stack_offset) |
| 1074 | self.buffer_size = buffer_size |
| 1075 | self._write_buf = bytearray() |
| 1076 | self._write_lock = Lock() |
| 1077 | |
| 1078 | def write(self, b): |
| 1079 | if self.closed: |
| 1080 | raise ValueError("write to closed file") |
| 1081 | if isinstance(b, unicode): |
| 1082 | raise TypeError("can't write unicode to binary stream") |
| 1083 | with self._write_lock: |
| 1084 | # XXX we can implement some more tricks to try and avoid |
| 1085 | # partial writes |
| 1086 | if len(self._write_buf) > self.buffer_size: |
Antoine Pitrou | 5aa7df3 | 2011-11-21 20:16:44 +0100 | [diff] [blame] | 1087 | # We're full, so let's pre-flush the buffer. (This may |
| 1088 | # raise BlockingIOError with characters_written == 0.) |
| 1089 | self._flush_unlocked() |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1090 | before = len(self._write_buf) |
| 1091 | self._write_buf.extend(b) |
| 1092 | written = len(self._write_buf) - before |
| 1093 | if len(self._write_buf) > self.buffer_size: |
| 1094 | try: |
| 1095 | self._flush_unlocked() |
| 1096 | except BlockingIOError as e: |
| 1097 | if len(self._write_buf) > self.buffer_size: |
| 1098 | # We've hit the buffer_size. We have to accept a partial |
| 1099 | # write and cut back our buffer. |
| 1100 | overage = len(self._write_buf) - self.buffer_size |
| 1101 | written -= overage |
| 1102 | self._write_buf = self._write_buf[:self.buffer_size] |
| 1103 | raise BlockingIOError(e.errno, e.strerror, written) |
| 1104 | return written |
| 1105 | |
| 1106 | def truncate(self, pos=None): |
| 1107 | with self._write_lock: |
| 1108 | self._flush_unlocked() |
| 1109 | if pos is None: |
| 1110 | pos = self.raw.tell() |
| 1111 | return self.raw.truncate(pos) |
| 1112 | |
| 1113 | def flush(self): |
| 1114 | with self._write_lock: |
| 1115 | self._flush_unlocked() |
| 1116 | |
| 1117 | def _flush_unlocked(self): |
| 1118 | if self.closed: |
| 1119 | raise ValueError("flush of closed file") |
Antoine Pitrou | 5aa7df3 | 2011-11-21 20:16:44 +0100 | [diff] [blame] | 1120 | while self._write_buf: |
| 1121 | try: |
| 1122 | n = self.raw.write(self._write_buf) |
| 1123 | except BlockingIOError: |
| 1124 | raise RuntimeError("self.raw should implement RawIOBase: it " |
| 1125 | "should not raise BlockingIOError") |
| 1126 | except IOError as e: |
| 1127 | if e.errno != EINTR: |
| 1128 | raise |
| 1129 | continue |
| 1130 | if n is None: |
| 1131 | raise BlockingIOError( |
| 1132 | errno.EAGAIN, |
| 1133 | "write could not complete without blocking", 0) |
| 1134 | if n > len(self._write_buf) or n < 0: |
| 1135 | raise IOError("write() returned incorrect number of bytes") |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1136 | del self._write_buf[:n] |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1137 | |
| 1138 | def tell(self): |
| 1139 | return _BufferedIOMixin.tell(self) + len(self._write_buf) |
| 1140 | |
| 1141 | def seek(self, pos, whence=0): |
| 1142 | if not (0 <= whence <= 2): |
| 1143 | raise ValueError("invalid whence") |
| 1144 | with self._write_lock: |
| 1145 | self._flush_unlocked() |
| 1146 | return _BufferedIOMixin.seek(self, pos, whence) |
| 1147 | |
| 1148 | |
| 1149 | class BufferedRWPair(BufferedIOBase): |
| 1150 | |
| 1151 | """A buffered reader and writer object together. |
| 1152 | |
| 1153 | A buffered reader object and buffered writer object put together to |
| 1154 | form a sequential IO object that can read and write. This is typically |
| 1155 | used with a socket or two-way pipe. |
| 1156 | |
| 1157 | reader and writer are RawIOBase objects that are readable and |
| 1158 | writeable respectively. If the buffer_size is omitted it defaults to |
| 1159 | DEFAULT_BUFFER_SIZE. |
| 1160 | """ |
| 1161 | |
| 1162 | # XXX The usefulness of this (compared to having two separate IO |
| 1163 | # objects) is questionable. |
| 1164 | |
| 1165 | def __init__(self, reader, writer, |
| 1166 | buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None): |
| 1167 | """Constructor. |
| 1168 | |
| 1169 | The arguments are two RawIO instances. |
| 1170 | """ |
| 1171 | if max_buffer_size is not None: |
| 1172 | warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2) |
| 1173 | |
| 1174 | if not reader.readable(): |
| 1175 | raise IOError('"reader" argument must be readable.') |
| 1176 | |
| 1177 | if not writer.writable(): |
| 1178 | raise IOError('"writer" argument must be writable.') |
| 1179 | |
| 1180 | self.reader = BufferedReader(reader, buffer_size) |
| 1181 | self.writer = BufferedWriter(writer, buffer_size) |
| 1182 | |
| 1183 | def read(self, n=None): |
| 1184 | if n is None: |
| 1185 | n = -1 |
| 1186 | return self.reader.read(n) |
| 1187 | |
| 1188 | def readinto(self, b): |
| 1189 | return self.reader.readinto(b) |
| 1190 | |
| 1191 | def write(self, b): |
| 1192 | return self.writer.write(b) |
| 1193 | |
| 1194 | def peek(self, n=0): |
| 1195 | return self.reader.peek(n) |
| 1196 | |
| 1197 | def read1(self, n): |
| 1198 | return self.reader.read1(n) |
| 1199 | |
| 1200 | def readable(self): |
| 1201 | return self.reader.readable() |
| 1202 | |
| 1203 | def writable(self): |
| 1204 | return self.writer.writable() |
| 1205 | |
| 1206 | def flush(self): |
| 1207 | return self.writer.flush() |
| 1208 | |
| 1209 | def close(self): |
| 1210 | self.writer.close() |
| 1211 | self.reader.close() |
| 1212 | |
| 1213 | def isatty(self): |
| 1214 | return self.reader.isatty() or self.writer.isatty() |
| 1215 | |
| 1216 | @property |
| 1217 | def closed(self): |
| 1218 | return self.writer.closed |
| 1219 | |
| 1220 | |
| 1221 | class BufferedRandom(BufferedWriter, BufferedReader): |
| 1222 | |
| 1223 | """A buffered interface to random access streams. |
| 1224 | |
| 1225 | The constructor creates a reader and writer for a seekable stream, |
| 1226 | raw, given in the first argument. If the buffer_size is omitted it |
| 1227 | defaults to DEFAULT_BUFFER_SIZE. |
| 1228 | """ |
| 1229 | |
| 1230 | _warning_stack_offset = 3 |
| 1231 | |
| 1232 | def __init__(self, raw, |
| 1233 | buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None): |
| 1234 | raw._checkSeekable() |
| 1235 | BufferedReader.__init__(self, raw, buffer_size) |
| 1236 | BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size) |
| 1237 | |
| 1238 | def seek(self, pos, whence=0): |
| 1239 | if not (0 <= whence <= 2): |
| 1240 | raise ValueError("invalid whence") |
| 1241 | self.flush() |
| 1242 | if self._read_buf: |
| 1243 | # Undo read ahead. |
| 1244 | with self._read_lock: |
| 1245 | self.raw.seek(self._read_pos - len(self._read_buf), 1) |
| 1246 | # First do the raw seek, then empty the read buffer, so that |
| 1247 | # if the raw seek fails, we don't lose buffered data forever. |
| 1248 | pos = self.raw.seek(pos, whence) |
| 1249 | with self._read_lock: |
| 1250 | self._reset_read_buf() |
| 1251 | if pos < 0: |
| 1252 | raise IOError("seek() returned invalid position") |
| 1253 | return pos |
| 1254 | |
| 1255 | def tell(self): |
| 1256 | if self._write_buf: |
| 1257 | return BufferedWriter.tell(self) |
| 1258 | else: |
| 1259 | return BufferedReader.tell(self) |
| 1260 | |
| 1261 | def truncate(self, pos=None): |
| 1262 | if pos is None: |
| 1263 | pos = self.tell() |
| 1264 | # Use seek to flush the read buffer. |
Antoine Pitrou | f3fa074 | 2010-01-31 22:26:04 +0000 | [diff] [blame] | 1265 | return BufferedWriter.truncate(self, pos) |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1266 | |
| 1267 | def read(self, n=None): |
| 1268 | if n is None: |
| 1269 | n = -1 |
| 1270 | self.flush() |
| 1271 | return BufferedReader.read(self, n) |
| 1272 | |
| 1273 | def readinto(self, b): |
| 1274 | self.flush() |
| 1275 | return BufferedReader.readinto(self, b) |
| 1276 | |
| 1277 | def peek(self, n=0): |
| 1278 | self.flush() |
| 1279 | return BufferedReader.peek(self, n) |
| 1280 | |
| 1281 | def read1(self, n): |
| 1282 | self.flush() |
| 1283 | return BufferedReader.read1(self, n) |
| 1284 | |
| 1285 | def write(self, b): |
| 1286 | if self._read_buf: |
| 1287 | # Undo readahead |
| 1288 | with self._read_lock: |
| 1289 | self.raw.seek(self._read_pos - len(self._read_buf), 1) |
| 1290 | self._reset_read_buf() |
| 1291 | return BufferedWriter.write(self, b) |
| 1292 | |
| 1293 | |
| 1294 | class TextIOBase(IOBase): |
| 1295 | |
| 1296 | """Base class for text I/O. |
| 1297 | |
| 1298 | This class provides a character and line based interface to stream |
| 1299 | I/O. There is no readinto method because Python's character strings |
| 1300 | are immutable. There is no public constructor. |
| 1301 | """ |
| 1302 | |
| 1303 | def read(self, n=-1): |
| 1304 | """Read at most n characters from stream. |
| 1305 | |
| 1306 | Read from underlying buffer until we have n characters or we hit EOF. |
| 1307 | If n is negative or omitted, read until EOF. |
| 1308 | """ |
| 1309 | self._unsupported("read") |
| 1310 | |
| 1311 | def write(self, s): |
| 1312 | """Write string s to stream.""" |
| 1313 | self._unsupported("write") |
| 1314 | |
| 1315 | def truncate(self, pos=None): |
| 1316 | """Truncate size to pos.""" |
| 1317 | self._unsupported("truncate") |
| 1318 | |
| 1319 | def readline(self): |
| 1320 | """Read until newline or EOF. |
| 1321 | |
| 1322 | Returns an empty string if EOF is hit immediately. |
| 1323 | """ |
| 1324 | self._unsupported("readline") |
| 1325 | |
| 1326 | def detach(self): |
| 1327 | """ |
| 1328 | Separate the underlying buffer from the TextIOBase and return it. |
| 1329 | |
| 1330 | After the underlying buffer has been detached, the TextIO is in an |
| 1331 | unusable state. |
| 1332 | """ |
| 1333 | self._unsupported("detach") |
| 1334 | |
| 1335 | @property |
| 1336 | def encoding(self): |
| 1337 | """Subclasses should override.""" |
| 1338 | return None |
| 1339 | |
| 1340 | @property |
| 1341 | def newlines(self): |
| 1342 | """Line endings translated so far. |
| 1343 | |
| 1344 | Only line endings translated during reading are considered. |
| 1345 | |
| 1346 | Subclasses should override. |
| 1347 | """ |
| 1348 | return None |
| 1349 | |
| 1350 | @property |
| 1351 | def errors(self): |
| 1352 | """Error setting of the decoder or encoder. |
| 1353 | |
| 1354 | Subclasses should override.""" |
| 1355 | return None |
| 1356 | |
| 1357 | io.TextIOBase.register(TextIOBase) |
| 1358 | |
| 1359 | |
| 1360 | class IncrementalNewlineDecoder(codecs.IncrementalDecoder): |
| 1361 | r"""Codec used when reading a file in universal newlines mode. It wraps |
| 1362 | another incremental decoder, translating \r\n and \r into \n. It also |
| 1363 | records the types of newlines encountered. When used with |
| 1364 | translate=False, it ensures that the newline sequence is returned in |
| 1365 | one piece. |
| 1366 | """ |
| 1367 | def __init__(self, decoder, translate, errors='strict'): |
| 1368 | codecs.IncrementalDecoder.__init__(self, errors=errors) |
| 1369 | self.translate = translate |
| 1370 | self.decoder = decoder |
| 1371 | self.seennl = 0 |
| 1372 | self.pendingcr = False |
| 1373 | |
| 1374 | def decode(self, input, final=False): |
| 1375 | # decode input (with the eventual \r from a previous pass) |
| 1376 | if self.decoder is None: |
| 1377 | output = input |
| 1378 | else: |
| 1379 | output = self.decoder.decode(input, final=final) |
| 1380 | if self.pendingcr and (output or final): |
| 1381 | output = "\r" + output |
| 1382 | self.pendingcr = False |
| 1383 | |
| 1384 | # retain last \r even when not translating data: |
| 1385 | # then readline() is sure to get \r\n in one pass |
| 1386 | if output.endswith("\r") and not final: |
| 1387 | output = output[:-1] |
| 1388 | self.pendingcr = True |
| 1389 | |
| 1390 | # Record which newlines are read |
| 1391 | crlf = output.count('\r\n') |
| 1392 | cr = output.count('\r') - crlf |
| 1393 | lf = output.count('\n') - crlf |
| 1394 | self.seennl |= (lf and self._LF) | (cr and self._CR) \ |
| 1395 | | (crlf and self._CRLF) |
| 1396 | |
| 1397 | if self.translate: |
| 1398 | if crlf: |
| 1399 | output = output.replace("\r\n", "\n") |
| 1400 | if cr: |
| 1401 | output = output.replace("\r", "\n") |
| 1402 | |
| 1403 | return output |
| 1404 | |
| 1405 | def getstate(self): |
| 1406 | if self.decoder is None: |
| 1407 | buf = b"" |
| 1408 | flag = 0 |
| 1409 | else: |
| 1410 | buf, flag = self.decoder.getstate() |
| 1411 | flag <<= 1 |
| 1412 | if self.pendingcr: |
| 1413 | flag |= 1 |
| 1414 | return buf, flag |
| 1415 | |
| 1416 | def setstate(self, state): |
| 1417 | buf, flag = state |
| 1418 | self.pendingcr = bool(flag & 1) |
| 1419 | if self.decoder is not None: |
| 1420 | self.decoder.setstate((buf, flag >> 1)) |
| 1421 | |
| 1422 | def reset(self): |
| 1423 | self.seennl = 0 |
| 1424 | self.pendingcr = False |
| 1425 | if self.decoder is not None: |
| 1426 | self.decoder.reset() |
| 1427 | |
| 1428 | _LF = 1 |
| 1429 | _CR = 2 |
| 1430 | _CRLF = 4 |
| 1431 | |
| 1432 | @property |
| 1433 | def newlines(self): |
| 1434 | return (None, |
| 1435 | "\n", |
| 1436 | "\r", |
| 1437 | ("\r", "\n"), |
| 1438 | "\r\n", |
| 1439 | ("\n", "\r\n"), |
| 1440 | ("\r", "\r\n"), |
| 1441 | ("\r", "\n", "\r\n") |
| 1442 | )[self.seennl] |
| 1443 | |
| 1444 | |
| 1445 | class TextIOWrapper(TextIOBase): |
| 1446 | |
| 1447 | r"""Character and line based layer over a BufferedIOBase object, buffer. |
| 1448 | |
| 1449 | encoding gives the name of the encoding that the stream will be |
| 1450 | decoded or encoded with. It defaults to locale.getpreferredencoding. |
| 1451 | |
| 1452 | errors determines the strictness of encoding and decoding (see the |
| 1453 | codecs.register) and defaults to "strict". |
| 1454 | |
| 1455 | newline can be None, '', '\n', '\r', or '\r\n'. It controls the |
| 1456 | handling of line endings. If it is None, universal newlines is |
| 1457 | enabled. With this enabled, on input, the lines endings '\n', '\r', |
| 1458 | or '\r\n' are translated to '\n' before being returned to the |
| 1459 | caller. Conversely, on output, '\n' is translated to the system |
Éric Araujo | 7f4b3be | 2012-02-26 01:41:39 +0100 | [diff] [blame] | 1460 | default line separator, os.linesep. If newline is any other of its |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1461 | legal values, that newline becomes the newline when the file is read |
| 1462 | and it is returned untranslated. On output, '\n' is converted to the |
| 1463 | newline. |
| 1464 | |
| 1465 | If line_buffering is True, a call to flush is implied when a call to |
| 1466 | write contains a newline character. |
| 1467 | """ |
| 1468 | |
| 1469 | _CHUNK_SIZE = 2048 |
| 1470 | |
| 1471 | def __init__(self, buffer, encoding=None, errors=None, newline=None, |
| 1472 | line_buffering=False): |
| 1473 | if newline is not None and not isinstance(newline, basestring): |
| 1474 | raise TypeError("illegal newline type: %r" % (type(newline),)) |
| 1475 | if newline not in (None, "", "\n", "\r", "\r\n"): |
| 1476 | raise ValueError("illegal newline value: %r" % (newline,)) |
| 1477 | if encoding is None: |
| 1478 | try: |
Victor Stinner | 7120219 | 2010-05-04 11:35:36 +0000 | [diff] [blame] | 1479 | import locale |
| 1480 | except ImportError: |
| 1481 | # Importing locale may fail if Python is being built |
| 1482 | encoding = "ascii" |
| 1483 | else: |
| 1484 | encoding = locale.getpreferredencoding() |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1485 | |
| 1486 | if not isinstance(encoding, basestring): |
| 1487 | raise ValueError("invalid encoding: %r" % encoding) |
| 1488 | |
| 1489 | if errors is None: |
| 1490 | errors = "strict" |
| 1491 | else: |
| 1492 | if not isinstance(errors, basestring): |
| 1493 | raise ValueError("invalid errors: %r" % errors) |
| 1494 | |
Antoine Pitrou | fc9ead6 | 2010-12-21 21:26:55 +0000 | [diff] [blame] | 1495 | self._buffer = buffer |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1496 | self._line_buffering = line_buffering |
| 1497 | self._encoding = encoding |
| 1498 | self._errors = errors |
| 1499 | self._readuniversal = not newline |
| 1500 | self._readtranslate = newline is None |
| 1501 | self._readnl = newline |
| 1502 | self._writetranslate = newline != '' |
| 1503 | self._writenl = newline or os.linesep |
| 1504 | self._encoder = None |
| 1505 | self._decoder = None |
| 1506 | self._decoded_chars = '' # buffer for text returned from decoder |
| 1507 | self._decoded_chars_used = 0 # offset into _decoded_chars for read() |
| 1508 | self._snapshot = None # info for reconstructing decoder state |
| 1509 | self._seekable = self._telling = self.buffer.seekable() |
| 1510 | |
| 1511 | if self._seekable and self.writable(): |
| 1512 | position = self.buffer.tell() |
| 1513 | if position != 0: |
| 1514 | try: |
| 1515 | self._get_encoder().setstate(0) |
| 1516 | except LookupError: |
| 1517 | # Sometimes the encoder doesn't exist |
| 1518 | pass |
| 1519 | |
| 1520 | # self._snapshot is either None, or a tuple (dec_flags, next_input) |
| 1521 | # where dec_flags is the second (integer) item of the decoder state |
| 1522 | # and next_input is the chunk of input bytes that comes next after the |
| 1523 | # snapshot point. We use this to reconstruct decoder states in tell(). |
| 1524 | |
| 1525 | # Naming convention: |
| 1526 | # - "bytes_..." for integer variables that count input bytes |
| 1527 | # - "chars_..." for integer variables that count decoded characters |
| 1528 | |
| 1529 | def __repr__(self): |
| 1530 | try: |
| 1531 | name = self.name |
| 1532 | except AttributeError: |
| 1533 | return "<_pyio.TextIOWrapper encoding='{0}'>".format(self.encoding) |
| 1534 | else: |
| 1535 | return "<_pyio.TextIOWrapper name={0!r} encoding='{1}'>".format( |
| 1536 | name, self.encoding) |
| 1537 | |
| 1538 | @property |
| 1539 | def encoding(self): |
| 1540 | return self._encoding |
| 1541 | |
| 1542 | @property |
| 1543 | def errors(self): |
| 1544 | return self._errors |
| 1545 | |
| 1546 | @property |
| 1547 | def line_buffering(self): |
| 1548 | return self._line_buffering |
| 1549 | |
Antoine Pitrou | fc9ead6 | 2010-12-21 21:26:55 +0000 | [diff] [blame] | 1550 | @property |
| 1551 | def buffer(self): |
| 1552 | return self._buffer |
| 1553 | |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1554 | def seekable(self): |
Antoine Pitrou | c5eec0e | 2012-09-05 20:11:49 +0200 | [diff] [blame] | 1555 | if self.closed: |
| 1556 | raise ValueError("I/O operation on closed file.") |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1557 | return self._seekable |
| 1558 | |
| 1559 | def readable(self): |
| 1560 | return self.buffer.readable() |
| 1561 | |
| 1562 | def writable(self): |
| 1563 | return self.buffer.writable() |
| 1564 | |
| 1565 | def flush(self): |
| 1566 | self.buffer.flush() |
| 1567 | self._telling = self._seekable |
| 1568 | |
| 1569 | def close(self): |
Antoine Pitrou | f7fd8e4 | 2010-05-03 16:25:33 +0000 | [diff] [blame] | 1570 | if self.buffer is not None and not self.closed: |
| 1571 | self.flush() |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1572 | self.buffer.close() |
| 1573 | |
| 1574 | @property |
| 1575 | def closed(self): |
| 1576 | return self.buffer.closed |
| 1577 | |
| 1578 | @property |
| 1579 | def name(self): |
| 1580 | return self.buffer.name |
| 1581 | |
| 1582 | def fileno(self): |
| 1583 | return self.buffer.fileno() |
| 1584 | |
| 1585 | def isatty(self): |
| 1586 | return self.buffer.isatty() |
| 1587 | |
| 1588 | def write(self, s): |
| 1589 | if self.closed: |
| 1590 | raise ValueError("write to closed file") |
| 1591 | if not isinstance(s, unicode): |
| 1592 | raise TypeError("can't write %s to text stream" % |
| 1593 | s.__class__.__name__) |
| 1594 | length = len(s) |
| 1595 | haslf = (self._writetranslate or self._line_buffering) and "\n" in s |
| 1596 | if haslf and self._writetranslate and self._writenl != "\n": |
| 1597 | s = s.replace("\n", self._writenl) |
| 1598 | encoder = self._encoder or self._get_encoder() |
| 1599 | # XXX What if we were just reading? |
| 1600 | b = encoder.encode(s) |
| 1601 | self.buffer.write(b) |
| 1602 | if self._line_buffering and (haslf or "\r" in s): |
| 1603 | self.flush() |
| 1604 | self._snapshot = None |
| 1605 | if self._decoder: |
| 1606 | self._decoder.reset() |
| 1607 | return length |
| 1608 | |
| 1609 | def _get_encoder(self): |
| 1610 | make_encoder = codecs.getincrementalencoder(self._encoding) |
| 1611 | self._encoder = make_encoder(self._errors) |
| 1612 | return self._encoder |
| 1613 | |
| 1614 | def _get_decoder(self): |
| 1615 | make_decoder = codecs.getincrementaldecoder(self._encoding) |
| 1616 | decoder = make_decoder(self._errors) |
| 1617 | if self._readuniversal: |
| 1618 | decoder = IncrementalNewlineDecoder(decoder, self._readtranslate) |
| 1619 | self._decoder = decoder |
| 1620 | return decoder |
| 1621 | |
| 1622 | # The following three methods implement an ADT for _decoded_chars. |
| 1623 | # Text returned from the decoder is buffered here until the client |
| 1624 | # requests it by calling our read() or readline() method. |
| 1625 | def _set_decoded_chars(self, chars): |
| 1626 | """Set the _decoded_chars buffer.""" |
| 1627 | self._decoded_chars = chars |
| 1628 | self._decoded_chars_used = 0 |
| 1629 | |
| 1630 | def _get_decoded_chars(self, n=None): |
| 1631 | """Advance into the _decoded_chars buffer.""" |
| 1632 | offset = self._decoded_chars_used |
| 1633 | if n is None: |
| 1634 | chars = self._decoded_chars[offset:] |
| 1635 | else: |
| 1636 | chars = self._decoded_chars[offset:offset + n] |
| 1637 | self._decoded_chars_used += len(chars) |
| 1638 | return chars |
| 1639 | |
| 1640 | def _rewind_decoded_chars(self, n): |
| 1641 | """Rewind the _decoded_chars buffer.""" |
| 1642 | if self._decoded_chars_used < n: |
| 1643 | raise AssertionError("rewind decoded_chars out of bounds") |
| 1644 | self._decoded_chars_used -= n |
| 1645 | |
| 1646 | def _read_chunk(self): |
| 1647 | """ |
| 1648 | Read and decode the next chunk of data from the BufferedReader. |
| 1649 | """ |
| 1650 | |
| 1651 | # The return value is True unless EOF was reached. The decoded |
| 1652 | # string is placed in self._decoded_chars (replacing its previous |
| 1653 | # value). The entire input chunk is sent to the decoder, though |
| 1654 | # some of it may remain buffered in the decoder, yet to be |
| 1655 | # converted. |
| 1656 | |
| 1657 | if self._decoder is None: |
| 1658 | raise ValueError("no decoder") |
| 1659 | |
| 1660 | if self._telling: |
| 1661 | # To prepare for tell(), we need to snapshot a point in the |
| 1662 | # file where the decoder's input buffer is empty. |
| 1663 | |
| 1664 | dec_buffer, dec_flags = self._decoder.getstate() |
| 1665 | # Given this, we know there was a valid snapshot point |
| 1666 | # len(dec_buffer) bytes ago with decoder state (b'', dec_flags). |
| 1667 | |
| 1668 | # Read a chunk, decode it, and put the result in self._decoded_chars. |
| 1669 | input_chunk = self.buffer.read1(self._CHUNK_SIZE) |
| 1670 | eof = not input_chunk |
| 1671 | self._set_decoded_chars(self._decoder.decode(input_chunk, eof)) |
| 1672 | |
| 1673 | if self._telling: |
| 1674 | # At the snapshot point, len(dec_buffer) bytes before the read, |
| 1675 | # the next input to be decoded is dec_buffer + input_chunk. |
| 1676 | self._snapshot = (dec_flags, dec_buffer + input_chunk) |
| 1677 | |
| 1678 | return not eof |
| 1679 | |
| 1680 | def _pack_cookie(self, position, dec_flags=0, |
| 1681 | bytes_to_feed=0, need_eof=0, chars_to_skip=0): |
| 1682 | # The meaning of a tell() cookie is: seek to position, set the |
| 1683 | # decoder flags to dec_flags, read bytes_to_feed bytes, feed them |
| 1684 | # into the decoder with need_eof as the EOF flag, then skip |
| 1685 | # chars_to_skip characters of the decoded result. For most simple |
| 1686 | # decoders, tell() will often just give a byte offset in the file. |
| 1687 | return (position | (dec_flags<<64) | (bytes_to_feed<<128) | |
| 1688 | (chars_to_skip<<192) | bool(need_eof)<<256) |
| 1689 | |
| 1690 | def _unpack_cookie(self, bigint): |
| 1691 | rest, position = divmod(bigint, 1<<64) |
| 1692 | rest, dec_flags = divmod(rest, 1<<64) |
| 1693 | rest, bytes_to_feed = divmod(rest, 1<<64) |
| 1694 | need_eof, chars_to_skip = divmod(rest, 1<<64) |
| 1695 | return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip |
| 1696 | |
| 1697 | def tell(self): |
| 1698 | if not self._seekable: |
| 1699 | raise IOError("underlying stream is not seekable") |
| 1700 | if not self._telling: |
| 1701 | raise IOError("telling position disabled by next() call") |
| 1702 | self.flush() |
| 1703 | position = self.buffer.tell() |
| 1704 | decoder = self._decoder |
| 1705 | if decoder is None or self._snapshot is None: |
| 1706 | if self._decoded_chars: |
| 1707 | # This should never happen. |
| 1708 | raise AssertionError("pending decoded text") |
| 1709 | return position |
| 1710 | |
| 1711 | # Skip backward to the snapshot point (see _read_chunk). |
| 1712 | dec_flags, next_input = self._snapshot |
| 1713 | position -= len(next_input) |
| 1714 | |
| 1715 | # How many decoded characters have been used up since the snapshot? |
| 1716 | chars_to_skip = self._decoded_chars_used |
| 1717 | if chars_to_skip == 0: |
| 1718 | # We haven't moved from the snapshot point. |
| 1719 | return self._pack_cookie(position, dec_flags) |
| 1720 | |
| 1721 | # Starting from the snapshot position, we will walk the decoder |
| 1722 | # forward until it gives us enough decoded characters. |
| 1723 | saved_state = decoder.getstate() |
| 1724 | try: |
| 1725 | # Note our initial start point. |
| 1726 | decoder.setstate((b'', dec_flags)) |
| 1727 | start_pos = position |
| 1728 | start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0 |
| 1729 | need_eof = 0 |
| 1730 | |
| 1731 | # Feed the decoder one byte at a time. As we go, note the |
| 1732 | # nearest "safe start point" before the current location |
| 1733 | # (a point where the decoder has nothing buffered, so seek() |
| 1734 | # can safely start from there and advance to this location). |
| 1735 | for next_byte in next_input: |
| 1736 | bytes_fed += 1 |
| 1737 | chars_decoded += len(decoder.decode(next_byte)) |
| 1738 | dec_buffer, dec_flags = decoder.getstate() |
| 1739 | if not dec_buffer and chars_decoded <= chars_to_skip: |
| 1740 | # Decoder buffer is empty, so this is a safe start point. |
| 1741 | start_pos += bytes_fed |
| 1742 | chars_to_skip -= chars_decoded |
| 1743 | start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0 |
| 1744 | if chars_decoded >= chars_to_skip: |
| 1745 | break |
| 1746 | else: |
| 1747 | # We didn't get enough decoded data; signal EOF to get more. |
| 1748 | chars_decoded += len(decoder.decode(b'', final=True)) |
| 1749 | need_eof = 1 |
| 1750 | if chars_decoded < chars_to_skip: |
| 1751 | raise IOError("can't reconstruct logical file position") |
| 1752 | |
| 1753 | # The returned cookie corresponds to the last safe start point. |
| 1754 | return self._pack_cookie( |
| 1755 | start_pos, start_flags, bytes_fed, need_eof, chars_to_skip) |
| 1756 | finally: |
| 1757 | decoder.setstate(saved_state) |
| 1758 | |
| 1759 | def truncate(self, pos=None): |
| 1760 | self.flush() |
| 1761 | if pos is None: |
| 1762 | pos = self.tell() |
Antoine Pitrou | f3fa074 | 2010-01-31 22:26:04 +0000 | [diff] [blame] | 1763 | return self.buffer.truncate(pos) |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1764 | |
| 1765 | def detach(self): |
| 1766 | if self.buffer is None: |
| 1767 | raise ValueError("buffer is already detached") |
| 1768 | self.flush() |
Antoine Pitrou | fc9ead6 | 2010-12-21 21:26:55 +0000 | [diff] [blame] | 1769 | buffer = self._buffer |
| 1770 | self._buffer = None |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1771 | return buffer |
| 1772 | |
| 1773 | def seek(self, cookie, whence=0): |
| 1774 | if self.closed: |
| 1775 | raise ValueError("tell on closed file") |
| 1776 | if not self._seekable: |
| 1777 | raise IOError("underlying stream is not seekable") |
| 1778 | if whence == 1: # seek relative to current position |
| 1779 | if cookie != 0: |
| 1780 | raise IOError("can't do nonzero cur-relative seeks") |
| 1781 | # Seeking to the current position should attempt to |
| 1782 | # sync the underlying buffer with the current position. |
| 1783 | whence = 0 |
| 1784 | cookie = self.tell() |
| 1785 | if whence == 2: # seek relative to end of file |
| 1786 | if cookie != 0: |
| 1787 | raise IOError("can't do nonzero end-relative seeks") |
| 1788 | self.flush() |
| 1789 | position = self.buffer.seek(0, 2) |
| 1790 | self._set_decoded_chars('') |
| 1791 | self._snapshot = None |
| 1792 | if self._decoder: |
| 1793 | self._decoder.reset() |
| 1794 | return position |
| 1795 | if whence != 0: |
| 1796 | raise ValueError("invalid whence (%r, should be 0, 1 or 2)" % |
| 1797 | (whence,)) |
| 1798 | if cookie < 0: |
| 1799 | raise ValueError("negative seek position %r" % (cookie,)) |
| 1800 | self.flush() |
| 1801 | |
| 1802 | # The strategy of seek() is to go back to the safe start point |
| 1803 | # and replay the effect of read(chars_to_skip) from there. |
| 1804 | start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \ |
| 1805 | self._unpack_cookie(cookie) |
| 1806 | |
| 1807 | # Seek back to the safe start point. |
| 1808 | self.buffer.seek(start_pos) |
| 1809 | self._set_decoded_chars('') |
| 1810 | self._snapshot = None |
| 1811 | |
| 1812 | # Restore the decoder to its state from the safe start point. |
| 1813 | if cookie == 0 and self._decoder: |
| 1814 | self._decoder.reset() |
| 1815 | elif self._decoder or dec_flags or chars_to_skip: |
| 1816 | self._decoder = self._decoder or self._get_decoder() |
| 1817 | self._decoder.setstate((b'', dec_flags)) |
| 1818 | self._snapshot = (dec_flags, b'') |
| 1819 | |
| 1820 | if chars_to_skip: |
| 1821 | # Just like _read_chunk, feed the decoder and save a snapshot. |
| 1822 | input_chunk = self.buffer.read(bytes_to_feed) |
| 1823 | self._set_decoded_chars( |
| 1824 | self._decoder.decode(input_chunk, need_eof)) |
| 1825 | self._snapshot = (dec_flags, input_chunk) |
| 1826 | |
| 1827 | # Skip chars_to_skip of the decoded characters. |
| 1828 | if len(self._decoded_chars) < chars_to_skip: |
| 1829 | raise IOError("can't restore logical file position") |
| 1830 | self._decoded_chars_used = chars_to_skip |
| 1831 | |
| 1832 | # Finally, reset the encoder (merely useful for proper BOM handling) |
| 1833 | try: |
| 1834 | encoder = self._encoder or self._get_encoder() |
| 1835 | except LookupError: |
| 1836 | # Sometimes the encoder doesn't exist |
| 1837 | pass |
| 1838 | else: |
| 1839 | if cookie != 0: |
| 1840 | encoder.setstate(0) |
| 1841 | else: |
| 1842 | encoder.reset() |
| 1843 | return cookie |
| 1844 | |
| 1845 | def read(self, n=None): |
| 1846 | self._checkReadable() |
| 1847 | if n is None: |
| 1848 | n = -1 |
| 1849 | decoder = self._decoder or self._get_decoder() |
Florent Xicluna | 1f3b4e1 | 2010-03-07 12:14:25 +0000 | [diff] [blame] | 1850 | try: |
| 1851 | n.__index__ |
| 1852 | except AttributeError: |
| 1853 | raise TypeError("an integer is required") |
Antoine Pitrou | 1969059 | 2009-06-12 20:14:08 +0000 | [diff] [blame] | 1854 | if n < 0: |
| 1855 | # Read everything. |
| 1856 | result = (self._get_decoded_chars() + |
| 1857 | decoder.decode(self.buffer.read(), final=True)) |
| 1858 | self._set_decoded_chars('') |
| 1859 | self._snapshot = None |
| 1860 | return result |
| 1861 | else: |
| 1862 | # Keep reading chunks until we have n characters to return. |
| 1863 | eof = False |
| 1864 | result = self._get_decoded_chars(n) |
| 1865 | while len(result) < n and not eof: |
| 1866 | eof = not self._read_chunk() |
| 1867 | result += self._get_decoded_chars(n - len(result)) |
| 1868 | return result |
| 1869 | |
| 1870 | def next(self): |
| 1871 | self._telling = False |
| 1872 | line = self.readline() |
| 1873 | if not line: |
| 1874 | self._snapshot = None |
| 1875 | self._telling = self._seekable |
| 1876 | raise StopIteration |
| 1877 | return line |
| 1878 | |
| 1879 | def readline(self, limit=None): |
| 1880 | if self.closed: |
| 1881 | raise ValueError("read from closed file") |
| 1882 | if limit is None: |
| 1883 | limit = -1 |
| 1884 | elif not isinstance(limit, (int, long)): |
| 1885 | raise TypeError("limit must be an integer") |
| 1886 | |
| 1887 | # Grab all the decoded text (we will rewind any extra bits later). |
| 1888 | line = self._get_decoded_chars() |
| 1889 | |
| 1890 | start = 0 |
| 1891 | # Make the decoder if it doesn't already exist. |
| 1892 | if not self._decoder: |
| 1893 | self._get_decoder() |
| 1894 | |
| 1895 | pos = endpos = None |
| 1896 | while True: |
| 1897 | if self._readtranslate: |
| 1898 | # Newlines are already translated, only search for \n |
| 1899 | pos = line.find('\n', start) |
| 1900 | if pos >= 0: |
| 1901 | endpos = pos + 1 |
| 1902 | break |
| 1903 | else: |
| 1904 | start = len(line) |
| 1905 | |
| 1906 | elif self._readuniversal: |
| 1907 | # Universal newline search. Find any of \r, \r\n, \n |
| 1908 | # The decoder ensures that \r\n are not split in two pieces |
| 1909 | |
| 1910 | # In C we'd look for these in parallel of course. |
| 1911 | nlpos = line.find("\n", start) |
| 1912 | crpos = line.find("\r", start) |
| 1913 | if crpos == -1: |
| 1914 | if nlpos == -1: |
| 1915 | # Nothing found |
| 1916 | start = len(line) |
| 1917 | else: |
| 1918 | # Found \n |
| 1919 | endpos = nlpos + 1 |
| 1920 | break |
| 1921 | elif nlpos == -1: |
| 1922 | # Found lone \r |
| 1923 | endpos = crpos + 1 |
| 1924 | break |
| 1925 | elif nlpos < crpos: |
| 1926 | # Found \n |
| 1927 | endpos = nlpos + 1 |
| 1928 | break |
| 1929 | elif nlpos == crpos + 1: |
| 1930 | # Found \r\n |
| 1931 | endpos = crpos + 2 |
| 1932 | break |
| 1933 | else: |
| 1934 | # Found \r |
| 1935 | endpos = crpos + 1 |
| 1936 | break |
| 1937 | else: |
| 1938 | # non-universal |
| 1939 | pos = line.find(self._readnl) |
| 1940 | if pos >= 0: |
| 1941 | endpos = pos + len(self._readnl) |
| 1942 | break |
| 1943 | |
| 1944 | if limit >= 0 and len(line) >= limit: |
| 1945 | endpos = limit # reached length limit |
| 1946 | break |
| 1947 | |
| 1948 | # No line ending seen yet - get more data' |
| 1949 | while self._read_chunk(): |
| 1950 | if self._decoded_chars: |
| 1951 | break |
| 1952 | if self._decoded_chars: |
| 1953 | line += self._get_decoded_chars() |
| 1954 | else: |
| 1955 | # end of file |
| 1956 | self._set_decoded_chars('') |
| 1957 | self._snapshot = None |
| 1958 | return line |
| 1959 | |
| 1960 | if limit >= 0 and endpos > limit: |
| 1961 | endpos = limit # don't exceed limit |
| 1962 | |
| 1963 | # Rewind _decoded_chars to just after the line ending we found. |
| 1964 | self._rewind_decoded_chars(len(line) - endpos) |
| 1965 | return line[:endpos] |
| 1966 | |
| 1967 | @property |
| 1968 | def newlines(self): |
| 1969 | return self._decoder.newlines if self._decoder else None |
| 1970 | |
| 1971 | |
| 1972 | class StringIO(TextIOWrapper): |
| 1973 | """Text I/O implementation using an in-memory buffer. |
| 1974 | |
| 1975 | The initial_value argument sets the value of object. The newline |
| 1976 | argument is like the one of TextIOWrapper's constructor. |
| 1977 | """ |
| 1978 | |
| 1979 | def __init__(self, initial_value="", newline="\n"): |
| 1980 | super(StringIO, self).__init__(BytesIO(), |
| 1981 | encoding="utf-8", |
| 1982 | errors="strict", |
| 1983 | newline=newline) |
| 1984 | # Issue #5645: make universal newlines semantics the same as in the |
| 1985 | # C version, even under Windows. |
| 1986 | if newline is None: |
| 1987 | self._writetranslate = False |
| 1988 | if initial_value: |
| 1989 | if not isinstance(initial_value, unicode): |
| 1990 | initial_value = unicode(initial_value) |
| 1991 | self.write(initial_value) |
| 1992 | self.seek(0) |
| 1993 | |
| 1994 | def getvalue(self): |
| 1995 | self.flush() |
| 1996 | return self.buffer.getvalue().decode(self._encoding, self._errors) |
| 1997 | |
| 1998 | def __repr__(self): |
| 1999 | # TextIOWrapper tells the encoding in its repr. In StringIO, |
| 2000 | # that's a implementation detail. |
| 2001 | return object.__repr__(self) |
| 2002 | |
| 2003 | @property |
| 2004 | def errors(self): |
| 2005 | return None |
| 2006 | |
| 2007 | @property |
| 2008 | def encoding(self): |
| 2009 | return None |
| 2010 | |
| 2011 | def detach(self): |
| 2012 | # This doesn't make sense on StringIO. |
| 2013 | self._unsupported("detach") |