Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1 | """ |
| 2 | Python implementation of the io module. |
| 3 | """ |
| 4 | |
| 5 | import os |
| 6 | import abc |
| 7 | import codecs |
Benjamin Peterson | 59406a9 | 2009-03-26 17:10:29 +0000 | [diff] [blame] | 8 | import warnings |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 9 | # Import _thread instead of threading to reduce startup cost |
| 10 | try: |
| 11 | from _thread import allocate_lock as Lock |
| 12 | except ImportError: |
| 13 | from _dummy_thread import allocate_lock as Lock |
| 14 | |
| 15 | import io |
| 16 | from io import __all__ |
Benjamin Peterson | 8d5fd4e | 2009-04-02 01:03:26 +0000 | [diff] [blame] | 17 | from io import SEEK_SET, SEEK_CUR, SEEK_END |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 18 | |
| 19 | # open() uses st_blksize whenever we can |
| 20 | DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes |
| 21 | |
| 22 | # NOTE: Base classes defined here are registered with the "official" ABCs |
| 23 | # defined in io.py. We don't use real inheritance though, because we don't |
| 24 | # want to inherit the C implementations. |
| 25 | |
| 26 | |
| 27 | class BlockingIOError(IOError): |
| 28 | |
| 29 | """Exception raised when I/O would block on a non-blocking I/O stream.""" |
| 30 | |
| 31 | def __init__(self, errno, strerror, characters_written=0): |
| 32 | super().__init__(errno, strerror) |
| 33 | if not isinstance(characters_written, int): |
| 34 | raise TypeError("characters_written must be a integer") |
| 35 | self.characters_written = characters_written |
| 36 | |
| 37 | |
Benjamin Peterson | 9990e8c | 2009-04-18 14:47:50 +0000 | [diff] [blame] | 38 | def open(file: (str, bytes), mode: str = "r", buffering: int = None, |
| 39 | encoding: str = None, errors: str = None, |
| 40 | newline: str = None, closefd: bool = True) -> "IOBase": |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 41 | |
| 42 | r"""Open file and return a stream. Raise IOError upon failure. |
| 43 | |
| 44 | file is either a text or byte string giving the name (and the path |
| 45 | if the file isn't in the current working directory) of the file to |
| 46 | be opened or an integer file descriptor of the file to be |
| 47 | wrapped. (If a file descriptor is given, it is closed when the |
| 48 | returned I/O object is closed, unless closefd is set to False.) |
| 49 | |
| 50 | mode is an optional string that specifies the mode in which the file |
| 51 | is opened. It defaults to 'r' which means open for reading in text |
| 52 | mode. Other common values are 'w' for writing (truncating the file if |
| 53 | it already exists), and 'a' for appending (which on some Unix systems, |
| 54 | means that all writes append to the end of the file regardless of the |
| 55 | current seek position). In text mode, if encoding is not specified the |
| 56 | encoding used is platform dependent. (For reading and writing raw |
| 57 | bytes use binary mode and leave encoding unspecified.) The available |
| 58 | modes are: |
| 59 | |
| 60 | ========= =============================================================== |
| 61 | Character Meaning |
| 62 | --------- --------------------------------------------------------------- |
| 63 | 'r' open for reading (default) |
| 64 | 'w' open for writing, truncating the file first |
| 65 | 'a' open for writing, appending to the end of the file if it exists |
| 66 | 'b' binary mode |
| 67 | 't' text mode (default) |
| 68 | '+' open a disk file for updating (reading and writing) |
| 69 | 'U' universal newline mode (for backwards compatibility; unneeded |
| 70 | for new code) |
| 71 | ========= =============================================================== |
| 72 | |
| 73 | The default mode is 'rt' (open for reading text). For binary random |
| 74 | access, the mode 'w+b' opens and truncates the file to 0 bytes, while |
| 75 | 'r+b' opens the file without truncation. |
| 76 | |
| 77 | Python distinguishes between files opened in binary and text modes, |
| 78 | even when the underlying operating system doesn't. Files opened in |
| 79 | binary mode (appending 'b' to the mode argument) return contents as |
| 80 | bytes objects without any decoding. In text mode (the default, or when |
| 81 | 't' is appended to the mode argument), the contents of the file are |
| 82 | returned as strings, the bytes having been first decoded using a |
| 83 | platform-dependent encoding or using the specified encoding if given. |
| 84 | |
| 85 | buffering is an optional integer used to set the buffering policy. By |
| 86 | default full buffering is on. Pass 0 to switch buffering off (only |
| 87 | allowed in binary mode), 1 to set line buffering, and an integer > 1 |
| 88 | for full buffering. |
| 89 | |
| 90 | encoding is the name of the encoding used to decode or encode the |
| 91 | file. This should only be used in text mode. The default encoding is |
| 92 | platform dependent, but any encoding supported by Python can be |
| 93 | passed. See the codecs module for the list of supported encodings. |
| 94 | |
| 95 | errors is an optional string that specifies how encoding errors are to |
| 96 | be handled---this argument should not be used in binary mode. Pass |
| 97 | 'strict' to raise a ValueError exception if there is an encoding error |
| 98 | (the default of None has the same effect), or pass 'ignore' to ignore |
| 99 | errors. (Note that ignoring encoding errors can lead to data loss.) |
| 100 | See the documentation for codecs.register for a list of the permitted |
| 101 | encoding error strings. |
| 102 | |
| 103 | newline controls how universal newlines works (it only applies to text |
| 104 | mode). It can be None, '', '\n', '\r', and '\r\n'. It works as |
| 105 | follows: |
| 106 | |
| 107 | * On input, if newline is None, universal newlines mode is |
| 108 | enabled. Lines in the input can end in '\n', '\r', or '\r\n', and |
| 109 | these are translated into '\n' before being returned to the |
| 110 | caller. If it is '', universal newline mode is enabled, but line |
| 111 | endings are returned to the caller untranslated. If it has any of |
| 112 | the other legal values, input lines are only terminated by the given |
| 113 | string, and the line ending is returned to the caller untranslated. |
| 114 | |
| 115 | * On output, if newline is None, any '\n' characters written are |
| 116 | translated to the system default line separator, os.linesep. If |
| 117 | newline is '', no translation takes place. If newline is any of the |
| 118 | other legal values, any '\n' characters written are translated to |
| 119 | the given string. |
| 120 | |
| 121 | If closefd is False, the underlying file descriptor will be kept open |
| 122 | when the file is closed. This does not work when a file name is given |
| 123 | and must be True in that case. |
| 124 | |
| 125 | open() returns a file object whose type depends on the mode, and |
| 126 | through which the standard file operations such as reading and writing |
| 127 | are performed. When open() is used to open a file in a text mode ('w', |
| 128 | 'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open |
| 129 | a file in a binary mode, the returned class varies: in read binary |
| 130 | mode, it returns a BufferedReader; in write binary and append binary |
| 131 | modes, it returns a BufferedWriter, and in read/write mode, it returns |
| 132 | a BufferedRandom. |
| 133 | |
| 134 | It is also possible to use a string or bytearray as a file for both |
| 135 | reading and writing. For strings StringIO can be used like a file |
| 136 | opened in a text mode, and for bytes a BytesIO can be used like a file |
| 137 | opened in a binary mode. |
| 138 | """ |
| 139 | if not isinstance(file, (str, bytes, int)): |
| 140 | raise TypeError("invalid file: %r" % file) |
| 141 | if not isinstance(mode, str): |
| 142 | raise TypeError("invalid mode: %r" % mode) |
| 143 | if buffering is not None and not isinstance(buffering, int): |
| 144 | raise TypeError("invalid buffering: %r" % buffering) |
| 145 | if encoding is not None and not isinstance(encoding, str): |
| 146 | raise TypeError("invalid encoding: %r" % encoding) |
| 147 | if errors is not None and not isinstance(errors, str): |
| 148 | raise TypeError("invalid errors: %r" % errors) |
| 149 | modes = set(mode) |
| 150 | if modes - set("arwb+tU") or len(mode) > len(modes): |
| 151 | raise ValueError("invalid mode: %r" % mode) |
| 152 | reading = "r" in modes |
| 153 | writing = "w" in modes |
| 154 | appending = "a" in modes |
| 155 | updating = "+" in modes |
| 156 | text = "t" in modes |
| 157 | binary = "b" in modes |
| 158 | if "U" in modes: |
| 159 | if writing or appending: |
| 160 | raise ValueError("can't use U and writing mode at once") |
| 161 | reading = True |
| 162 | if text and binary: |
| 163 | raise ValueError("can't have text and binary mode at once") |
| 164 | if reading + writing + appending > 1: |
| 165 | raise ValueError("can't have read/write/append mode at once") |
| 166 | if not (reading or writing or appending): |
| 167 | raise ValueError("must have exactly one of read/write/append mode") |
| 168 | if binary and encoding is not None: |
| 169 | raise ValueError("binary mode doesn't take an encoding argument") |
| 170 | if binary and errors is not None: |
| 171 | raise ValueError("binary mode doesn't take an errors argument") |
| 172 | if binary and newline is not None: |
| 173 | raise ValueError("binary mode doesn't take a newline argument") |
| 174 | raw = FileIO(file, |
| 175 | (reading and "r" or "") + |
| 176 | (writing and "w" or "") + |
| 177 | (appending and "a" or "") + |
| 178 | (updating and "+" or ""), |
| 179 | closefd) |
| 180 | if buffering is None: |
| 181 | buffering = -1 |
| 182 | line_buffering = False |
| 183 | if buffering == 1 or buffering < 0 and raw.isatty(): |
| 184 | buffering = -1 |
| 185 | line_buffering = True |
| 186 | if buffering < 0: |
| 187 | buffering = DEFAULT_BUFFER_SIZE |
| 188 | try: |
| 189 | bs = os.fstat(raw.fileno()).st_blksize |
| 190 | except (os.error, AttributeError): |
| 191 | pass |
| 192 | else: |
| 193 | if bs > 1: |
| 194 | buffering = bs |
| 195 | if buffering < 0: |
| 196 | raise ValueError("invalid buffering size") |
| 197 | if buffering == 0: |
| 198 | if binary: |
| 199 | return raw |
| 200 | raise ValueError("can't have unbuffered text I/O") |
| 201 | if updating: |
| 202 | buffer = BufferedRandom(raw, buffering) |
| 203 | elif writing or appending: |
| 204 | buffer = BufferedWriter(raw, buffering) |
| 205 | elif reading: |
| 206 | buffer = BufferedReader(raw, buffering) |
| 207 | else: |
| 208 | raise ValueError("unknown mode: %r" % mode) |
| 209 | if binary: |
| 210 | return buffer |
| 211 | text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) |
| 212 | text.mode = mode |
| 213 | return text |
| 214 | |
| 215 | |
| 216 | class DocDescriptor: |
| 217 | """Helper for builtins.open.__doc__ |
| 218 | """ |
| 219 | def __get__(self, obj, typ): |
| 220 | return ( |
| 221 | "open(file, mode='r', buffering=None, encoding=None, " |
| 222 | "errors=None, newline=None, closefd=True)\n\n" + |
| 223 | open.__doc__) |
| 224 | |
| 225 | class OpenWrapper: |
| 226 | """Wrapper for builtins.open |
| 227 | |
| 228 | Trick so that open won't become a bound method when stored |
| 229 | as a class variable (as dbm.dumb does). |
| 230 | |
| 231 | See initstdio() in Python/pythonrun.c. |
| 232 | """ |
| 233 | __doc__ = DocDescriptor() |
| 234 | |
| 235 | def __new__(cls, *args, **kwargs): |
| 236 | return open(*args, **kwargs) |
| 237 | |
| 238 | |
| 239 | class UnsupportedOperation(ValueError, IOError): |
| 240 | pass |
| 241 | |
| 242 | |
| 243 | class IOBase(metaclass=abc.ABCMeta): |
| 244 | |
| 245 | """The abstract base class for all I/O classes, acting on streams of |
| 246 | bytes. There is no public constructor. |
| 247 | |
| 248 | This class provides dummy implementations for many methods that |
| 249 | derived classes can override selectively; the default implementations |
| 250 | represent a file that cannot be read, written or seeked. |
| 251 | |
| 252 | Even though IOBase does not declare read, readinto, or write because |
| 253 | their signatures will vary, implementations and clients should |
| 254 | consider those methods part of the interface. Also, implementations |
| 255 | may raise a IOError when operations they do not support are called. |
| 256 | |
| 257 | The basic type used for binary data read from or written to a file is |
| 258 | bytes. bytearrays are accepted too, and in some cases (such as |
| 259 | readinto) needed. Text I/O classes work with str data. |
| 260 | |
| 261 | Note that calling any method (even inquiries) on a closed stream is |
| 262 | undefined. Implementations may raise IOError in this case. |
| 263 | |
| 264 | IOBase (and its subclasses) support the iterator protocol, meaning |
| 265 | that an IOBase object can be iterated over yielding the lines in a |
| 266 | stream. |
| 267 | |
| 268 | IOBase also supports the :keyword:`with` statement. In this example, |
| 269 | fp is closed after the suite of the with statement is complete: |
| 270 | |
| 271 | with open('spam.txt', 'r') as fp: |
| 272 | fp.write('Spam and eggs!') |
| 273 | """ |
| 274 | |
| 275 | ### Internal ### |
| 276 | |
| 277 | def _unsupported(self, name: str) -> IOError: |
| 278 | """Internal: raise an exception for unsupported operations.""" |
| 279 | raise UnsupportedOperation("%s.%s() not supported" % |
| 280 | (self.__class__.__name__, name)) |
| 281 | |
| 282 | ### Positioning ### |
| 283 | |
| 284 | def seek(self, pos: int, whence: int = 0) -> int: |
| 285 | """Change stream position. |
| 286 | |
| 287 | Change the stream position to byte offset offset. offset is |
| 288 | interpreted relative to the position indicated by whence. Values |
| 289 | for whence are: |
| 290 | |
| 291 | * 0 -- start of stream (the default); offset should be zero or positive |
| 292 | * 1 -- current stream position; offset may be negative |
| 293 | * 2 -- end of stream; offset is usually negative |
| 294 | |
| 295 | Return the new absolute position. |
| 296 | """ |
| 297 | self._unsupported("seek") |
| 298 | |
| 299 | def tell(self) -> int: |
| 300 | """Return current stream position.""" |
| 301 | return self.seek(0, 1) |
| 302 | |
| 303 | def truncate(self, pos: int = None) -> int: |
| 304 | """Truncate file to size bytes. |
| 305 | |
| 306 | Size defaults to the current IO position as reported by tell(). Return |
| 307 | the new size. |
| 308 | """ |
| 309 | self._unsupported("truncate") |
| 310 | |
| 311 | ### Flush and close ### |
| 312 | |
| 313 | def flush(self) -> None: |
| 314 | """Flush write buffers, if applicable. |
| 315 | |
| 316 | This is not implemented for read-only and non-blocking streams. |
| 317 | """ |
| 318 | # XXX Should this return the number of bytes written??? |
| 319 | |
| 320 | __closed = False |
| 321 | |
| 322 | def close(self) -> None: |
| 323 | """Flush and close the IO object. |
| 324 | |
| 325 | This method has no effect if the file is already closed. |
| 326 | """ |
| 327 | if not self.__closed: |
| 328 | try: |
| 329 | self.flush() |
| 330 | except IOError: |
| 331 | pass # If flush() fails, just give up |
| 332 | self.__closed = True |
| 333 | |
| 334 | def __del__(self) -> None: |
| 335 | """Destructor. Calls close().""" |
| 336 | # The try/except block is in case this is called at program |
| 337 | # exit time, when it's possible that globals have already been |
| 338 | # deleted, and then the close() call might fail. Since |
| 339 | # there's nothing we can do about such failures and they annoy |
| 340 | # the end users, we suppress the traceback. |
| 341 | try: |
| 342 | self.close() |
| 343 | except: |
| 344 | pass |
| 345 | |
| 346 | ### Inquiries ### |
| 347 | |
| 348 | def seekable(self) -> bool: |
| 349 | """Return whether object supports random access. |
| 350 | |
| 351 | If False, seek(), tell() and truncate() will raise IOError. |
| 352 | This method may need to do a test seek(). |
| 353 | """ |
| 354 | return False |
| 355 | |
| 356 | def _checkSeekable(self, msg=None): |
| 357 | """Internal: raise an IOError if file is not seekable |
| 358 | """ |
| 359 | if not self.seekable(): |
| 360 | raise IOError("File or stream is not seekable." |
| 361 | if msg is None else msg) |
| 362 | |
| 363 | |
| 364 | def readable(self) -> bool: |
| 365 | """Return whether object was opened for reading. |
| 366 | |
| 367 | If False, read() will raise IOError. |
| 368 | """ |
| 369 | return False |
| 370 | |
| 371 | def _checkReadable(self, msg=None): |
| 372 | """Internal: raise an IOError if file is not readable |
| 373 | """ |
| 374 | if not self.readable(): |
| 375 | raise IOError("File or stream is not readable." |
| 376 | if msg is None else msg) |
| 377 | |
| 378 | def writable(self) -> bool: |
| 379 | """Return whether object was opened for writing. |
| 380 | |
| 381 | If False, write() and truncate() will raise IOError. |
| 382 | """ |
| 383 | return False |
| 384 | |
| 385 | def _checkWritable(self, msg=None): |
| 386 | """Internal: raise an IOError if file is not writable |
| 387 | """ |
| 388 | if not self.writable(): |
| 389 | raise IOError("File or stream is not writable." |
| 390 | if msg is None else msg) |
| 391 | |
| 392 | @property |
| 393 | def closed(self): |
| 394 | """closed: bool. True iff the file has been closed. |
| 395 | |
| 396 | For backwards compatibility, this is a property, not a predicate. |
| 397 | """ |
| 398 | return self.__closed |
| 399 | |
| 400 | def _checkClosed(self, msg=None): |
| 401 | """Internal: raise an ValueError if file is closed |
| 402 | """ |
| 403 | if self.closed: |
| 404 | raise ValueError("I/O operation on closed file." |
| 405 | if msg is None else msg) |
| 406 | |
| 407 | ### Context manager ### |
| 408 | |
| 409 | def __enter__(self) -> "IOBase": # That's a forward reference |
| 410 | """Context management protocol. Returns self.""" |
| 411 | self._checkClosed() |
| 412 | return self |
| 413 | |
| 414 | def __exit__(self, *args) -> None: |
| 415 | """Context management protocol. Calls close()""" |
| 416 | self.close() |
| 417 | |
| 418 | ### Lower-level APIs ### |
| 419 | |
| 420 | # XXX Should these be present even if unimplemented? |
| 421 | |
| 422 | def fileno(self) -> int: |
| 423 | """Returns underlying file descriptor if one exists. |
| 424 | |
| 425 | An IOError is raised if the IO object does not use a file descriptor. |
| 426 | """ |
| 427 | self._unsupported("fileno") |
| 428 | |
| 429 | def isatty(self) -> bool: |
| 430 | """Return whether this is an 'interactive' stream. |
| 431 | |
| 432 | Return False if it can't be determined. |
| 433 | """ |
| 434 | self._checkClosed() |
| 435 | return False |
| 436 | |
| 437 | ### Readline[s] and writelines ### |
| 438 | |
| 439 | def readline(self, limit: int = -1) -> bytes: |
| 440 | r"""Read and return a line from the stream. |
| 441 | |
| 442 | If limit is specified, at most limit bytes will be read. |
| 443 | |
| 444 | The line terminator is always b'\n' for binary files; for text |
| 445 | files, the newlines argument to open can be used to select the line |
| 446 | terminator(s) recognized. |
| 447 | """ |
| 448 | # For backwards compatibility, a (slowish) readline(). |
| 449 | if hasattr(self, "peek"): |
| 450 | def nreadahead(): |
| 451 | readahead = self.peek(1) |
| 452 | if not readahead: |
| 453 | return 1 |
| 454 | n = (readahead.find(b"\n") + 1) or len(readahead) |
| 455 | if limit >= 0: |
| 456 | n = min(n, limit) |
| 457 | return n |
| 458 | else: |
| 459 | def nreadahead(): |
| 460 | return 1 |
| 461 | if limit is None: |
| 462 | limit = -1 |
Benjamin Peterson | b01138a | 2009-04-24 22:59:52 +0000 | [diff] [blame] | 463 | elif not isinstance(limit, int): |
| 464 | raise TypeError("limit must be an integer") |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 465 | res = bytearray() |
| 466 | while limit < 0 or len(res) < limit: |
| 467 | b = self.read(nreadahead()) |
| 468 | if not b: |
| 469 | break |
| 470 | res += b |
| 471 | if res.endswith(b"\n"): |
| 472 | break |
| 473 | return bytes(res) |
| 474 | |
| 475 | def __iter__(self): |
| 476 | self._checkClosed() |
| 477 | return self |
| 478 | |
| 479 | def __next__(self): |
| 480 | line = self.readline() |
| 481 | if not line: |
| 482 | raise StopIteration |
| 483 | return line |
| 484 | |
| 485 | def readlines(self, hint=None): |
| 486 | """Return a list of lines from the stream. |
| 487 | |
| 488 | hint can be specified to control the number of lines read: no more |
| 489 | lines will be read if the total size (in bytes/characters) of all |
| 490 | lines so far exceeds hint. |
| 491 | """ |
| 492 | if hint is None or hint <= 0: |
| 493 | return list(self) |
| 494 | n = 0 |
| 495 | lines = [] |
| 496 | for line in self: |
| 497 | lines.append(line) |
| 498 | n += len(line) |
| 499 | if n >= hint: |
| 500 | break |
| 501 | return lines |
| 502 | |
| 503 | def writelines(self, lines): |
| 504 | self._checkClosed() |
| 505 | for line in lines: |
| 506 | self.write(line) |
| 507 | |
| 508 | io.IOBase.register(IOBase) |
| 509 | |
| 510 | |
| 511 | class RawIOBase(IOBase): |
| 512 | |
| 513 | """Base class for raw binary I/O.""" |
| 514 | |
| 515 | # The read() method is implemented by calling readinto(); derived |
| 516 | # classes that want to support read() only need to implement |
| 517 | # readinto() as a primitive operation. In general, readinto() can be |
| 518 | # more efficient than read(). |
| 519 | |
| 520 | # (It would be tempting to also provide an implementation of |
| 521 | # readinto() in terms of read(), in case the latter is a more suitable |
| 522 | # primitive operation, but that would lead to nasty recursion in case |
| 523 | # a subclass doesn't implement either.) |
| 524 | |
| 525 | def read(self, n: int = -1) -> bytes: |
| 526 | """Read and return up to n bytes. |
| 527 | |
| 528 | Returns an empty bytes object on EOF, or None if the object is |
| 529 | set not to block and has no data to read. |
| 530 | """ |
| 531 | if n is None: |
| 532 | n = -1 |
| 533 | if n < 0: |
| 534 | return self.readall() |
| 535 | b = bytearray(n.__index__()) |
| 536 | n = self.readinto(b) |
| 537 | del b[n:] |
| 538 | return bytes(b) |
| 539 | |
| 540 | def readall(self): |
| 541 | """Read until EOF, using multiple read() call.""" |
| 542 | res = bytearray() |
| 543 | while True: |
| 544 | data = self.read(DEFAULT_BUFFER_SIZE) |
| 545 | if not data: |
| 546 | break |
| 547 | res += data |
| 548 | return bytes(res) |
| 549 | |
| 550 | def readinto(self, b: bytearray) -> int: |
| 551 | """Read up to len(b) bytes into b. |
| 552 | |
| 553 | Returns number of bytes read (0 for EOF), or None if the object |
| 554 | is set not to block as has no data to read. |
| 555 | """ |
| 556 | self._unsupported("readinto") |
| 557 | |
| 558 | def write(self, b: bytes) -> int: |
| 559 | """Write the given buffer to the IO stream. |
| 560 | |
| 561 | Returns the number of bytes written, which may be less than len(b). |
| 562 | """ |
| 563 | self._unsupported("write") |
| 564 | |
| 565 | io.RawIOBase.register(RawIOBase) |
| 566 | from _io import FileIO |
| 567 | RawIOBase.register(FileIO) |
| 568 | |
| 569 | |
| 570 | class BufferedIOBase(IOBase): |
| 571 | |
| 572 | """Base class for buffered IO objects. |
| 573 | |
| 574 | The main difference with RawIOBase is that the read() method |
| 575 | supports omitting the size argument, and does not have a default |
| 576 | implementation that defers to readinto(). |
| 577 | |
| 578 | In addition, read(), readinto() and write() may raise |
| 579 | BlockingIOError if the underlying raw stream is in non-blocking |
| 580 | mode and not ready; unlike their raw counterparts, they will never |
| 581 | return None. |
| 582 | |
| 583 | A typical implementation should not inherit from a RawIOBase |
| 584 | implementation, but wrap one. |
| 585 | """ |
| 586 | |
| 587 | def read(self, n: int = None) -> bytes: |
| 588 | """Read and return up to n bytes. |
| 589 | |
| 590 | If the argument is omitted, None, or negative, reads and |
| 591 | returns all data until EOF. |
| 592 | |
| 593 | If the argument is positive, and the underlying raw stream is |
| 594 | not 'interactive', multiple raw reads may be issued to satisfy |
| 595 | the byte count (unless EOF is reached first). But for |
| 596 | interactive raw streams (XXX and for pipes?), at most one raw |
| 597 | read will be issued, and a short result does not imply that |
| 598 | EOF is imminent. |
| 599 | |
| 600 | Returns an empty bytes array on EOF. |
| 601 | |
| 602 | Raises BlockingIOError if the underlying raw stream has no |
| 603 | data at the moment. |
| 604 | """ |
| 605 | self._unsupported("read") |
| 606 | |
| 607 | def read1(self, n: int=None) -> bytes: |
| 608 | """Read up to n bytes with at most one read() system call.""" |
| 609 | self._unsupported("read1") |
| 610 | |
| 611 | def readinto(self, b: bytearray) -> int: |
| 612 | """Read up to len(b) bytes into b. |
| 613 | |
| 614 | Like read(), this may issue multiple reads to the underlying raw |
| 615 | stream, unless the latter is 'interactive'. |
| 616 | |
| 617 | Returns the number of bytes read (0 for EOF). |
| 618 | |
| 619 | Raises BlockingIOError if the underlying raw stream has no |
| 620 | data at the moment. |
| 621 | """ |
| 622 | # XXX This ought to work with anything that supports the buffer API |
| 623 | data = self.read(len(b)) |
| 624 | n = len(data) |
| 625 | try: |
| 626 | b[:n] = data |
| 627 | except TypeError as err: |
| 628 | import array |
| 629 | if not isinstance(b, array.array): |
| 630 | raise err |
| 631 | b[:n] = array.array('b', data) |
| 632 | return n |
| 633 | |
| 634 | def write(self, b: bytes) -> int: |
| 635 | """Write the given buffer to the IO stream. |
| 636 | |
| 637 | Return the number of bytes written, which is never less than |
| 638 | len(b). |
| 639 | |
| 640 | Raises BlockingIOError if the buffer is full and the |
| 641 | underlying raw stream cannot accept more data at the moment. |
| 642 | """ |
| 643 | self._unsupported("write") |
| 644 | |
Benjamin Peterson | d2e0c79 | 2009-05-01 20:40:59 +0000 | [diff] [blame] | 645 | def detach(self) -> None: |
| 646 | """ |
| 647 | Separate the underlying raw stream from the buffer and return it. |
| 648 | |
| 649 | After the raw stream has been detached, the buffer is in an unusable |
| 650 | state. |
| 651 | """ |
| 652 | self._unsupported("detach") |
| 653 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 654 | io.BufferedIOBase.register(BufferedIOBase) |
| 655 | |
| 656 | |
| 657 | class _BufferedIOMixin(BufferedIOBase): |
| 658 | |
| 659 | """A mixin implementation of BufferedIOBase with an underlying raw stream. |
| 660 | |
| 661 | This passes most requests on to the underlying raw stream. It |
| 662 | does *not* provide implementations of read(), readinto() or |
| 663 | write(). |
| 664 | """ |
| 665 | |
| 666 | def __init__(self, raw): |
| 667 | self.raw = raw |
| 668 | |
| 669 | ### Positioning ### |
| 670 | |
| 671 | def seek(self, pos, whence=0): |
| 672 | new_position = self.raw.seek(pos, whence) |
| 673 | if new_position < 0: |
| 674 | raise IOError("seek() returned an invalid position") |
| 675 | return new_position |
| 676 | |
| 677 | def tell(self): |
| 678 | pos = self.raw.tell() |
| 679 | if pos < 0: |
| 680 | raise IOError("tell() returned an invalid position") |
| 681 | return pos |
| 682 | |
| 683 | def truncate(self, pos=None): |
| 684 | # Flush the stream. We're mixing buffered I/O with lower-level I/O, |
| 685 | # and a flush may be necessary to synch both views of the current |
| 686 | # file state. |
| 687 | self.flush() |
| 688 | |
| 689 | if pos is None: |
| 690 | pos = self.tell() |
| 691 | # XXX: Should seek() be used, instead of passing the position |
| 692 | # XXX directly to truncate? |
| 693 | return self.raw.truncate(pos) |
| 694 | |
| 695 | ### Flush and close ### |
| 696 | |
| 697 | def flush(self): |
| 698 | self.raw.flush() |
| 699 | |
| 700 | def close(self): |
Benjamin Peterson | d2e0c79 | 2009-05-01 20:40:59 +0000 | [diff] [blame] | 701 | if not self.closed and self.raw is not None: |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 702 | try: |
| 703 | self.flush() |
| 704 | except IOError: |
| 705 | pass # If flush() fails, just give up |
| 706 | self.raw.close() |
| 707 | |
Benjamin Peterson | d2e0c79 | 2009-05-01 20:40:59 +0000 | [diff] [blame] | 708 | def detach(self): |
| 709 | if self.raw is None: |
| 710 | raise ValueError("raw stream already detached") |
| 711 | self.flush() |
| 712 | raw = self.raw |
| 713 | self.raw = None |
| 714 | return raw |
| 715 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 716 | ### Inquiries ### |
| 717 | |
| 718 | def seekable(self): |
| 719 | return self.raw.seekable() |
| 720 | |
| 721 | def readable(self): |
| 722 | return self.raw.readable() |
| 723 | |
| 724 | def writable(self): |
| 725 | return self.raw.writable() |
| 726 | |
| 727 | @property |
| 728 | def closed(self): |
| 729 | return self.raw.closed |
| 730 | |
| 731 | @property |
| 732 | def name(self): |
| 733 | return self.raw.name |
| 734 | |
| 735 | @property |
| 736 | def mode(self): |
| 737 | return self.raw.mode |
| 738 | |
Antoine Pitrou | 716c444 | 2009-05-23 19:04:03 +0000 | [diff] [blame] | 739 | def __repr__(self): |
| 740 | clsname = self.__class__.__name__ |
| 741 | try: |
| 742 | name = self.name |
| 743 | except AttributeError: |
| 744 | return "<_pyio.{0}>".format(clsname) |
| 745 | else: |
| 746 | return "<_pyio.{0} name={1!r}>".format(clsname, name) |
| 747 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 748 | ### Lower-level APIs ### |
| 749 | |
| 750 | def fileno(self): |
| 751 | return self.raw.fileno() |
| 752 | |
| 753 | def isatty(self): |
| 754 | return self.raw.isatty() |
| 755 | |
| 756 | |
| 757 | class BytesIO(BufferedIOBase): |
| 758 | |
| 759 | """Buffered I/O implementation using an in-memory bytes buffer.""" |
| 760 | |
| 761 | def __init__(self, initial_bytes=None): |
| 762 | buf = bytearray() |
| 763 | if initial_bytes is not None: |
| 764 | buf += initial_bytes |
| 765 | self._buffer = buf |
| 766 | self._pos = 0 |
| 767 | |
| 768 | def getvalue(self): |
| 769 | """Return the bytes value (contents) of the buffer |
| 770 | """ |
| 771 | if self.closed: |
| 772 | raise ValueError("getvalue on closed file") |
| 773 | return bytes(self._buffer) |
| 774 | |
| 775 | def read(self, n=None): |
| 776 | if self.closed: |
| 777 | raise ValueError("read from closed file") |
| 778 | if n is None: |
| 779 | n = -1 |
| 780 | if n < 0: |
| 781 | n = len(self._buffer) |
| 782 | if len(self._buffer) <= self._pos: |
| 783 | return b"" |
| 784 | newpos = min(len(self._buffer), self._pos + n) |
| 785 | b = self._buffer[self._pos : newpos] |
| 786 | self._pos = newpos |
| 787 | return bytes(b) |
| 788 | |
| 789 | def read1(self, n): |
| 790 | """This is the same as read. |
| 791 | """ |
| 792 | return self.read(n) |
| 793 | |
| 794 | def write(self, b): |
| 795 | if self.closed: |
| 796 | raise ValueError("write to closed file") |
| 797 | if isinstance(b, str): |
| 798 | raise TypeError("can't write str to binary stream") |
| 799 | n = len(b) |
| 800 | if n == 0: |
| 801 | return 0 |
| 802 | pos = self._pos |
| 803 | if pos > len(self._buffer): |
| 804 | # Inserts null bytes between the current end of the file |
| 805 | # and the new write position. |
| 806 | padding = b'\x00' * (pos - len(self._buffer)) |
| 807 | self._buffer += padding |
| 808 | self._buffer[pos:pos + n] = b |
| 809 | self._pos += n |
| 810 | return n |
| 811 | |
| 812 | def seek(self, pos, whence=0): |
| 813 | if self.closed: |
| 814 | raise ValueError("seek on closed file") |
| 815 | try: |
| 816 | pos = pos.__index__() |
| 817 | except AttributeError as err: |
| 818 | raise TypeError("an integer is required") from err |
| 819 | if whence == 0: |
| 820 | if pos < 0: |
| 821 | raise ValueError("negative seek position %r" % (pos,)) |
| 822 | self._pos = pos |
| 823 | elif whence == 1: |
| 824 | self._pos = max(0, self._pos + pos) |
| 825 | elif whence == 2: |
| 826 | self._pos = max(0, len(self._buffer) + pos) |
| 827 | else: |
| 828 | raise ValueError("invalid whence value") |
| 829 | return self._pos |
| 830 | |
| 831 | def tell(self): |
| 832 | if self.closed: |
| 833 | raise ValueError("tell on closed file") |
| 834 | return self._pos |
| 835 | |
| 836 | def truncate(self, pos=None): |
| 837 | if self.closed: |
| 838 | raise ValueError("truncate on closed file") |
| 839 | if pos is None: |
| 840 | pos = self._pos |
| 841 | elif pos < 0: |
| 842 | raise ValueError("negative truncate position %r" % (pos,)) |
| 843 | del self._buffer[pos:] |
| 844 | return self.seek(pos) |
| 845 | |
| 846 | def readable(self): |
| 847 | return True |
| 848 | |
| 849 | def writable(self): |
| 850 | return True |
| 851 | |
| 852 | def seekable(self): |
| 853 | return True |
| 854 | |
| 855 | |
| 856 | class BufferedReader(_BufferedIOMixin): |
| 857 | |
| 858 | """BufferedReader(raw[, buffer_size]) |
| 859 | |
| 860 | A buffer for a readable, sequential BaseRawIO object. |
| 861 | |
| 862 | The constructor creates a BufferedReader for the given readable raw |
| 863 | stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE |
| 864 | is used. |
| 865 | """ |
| 866 | |
| 867 | def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE): |
| 868 | """Create a new buffered reader using the given readable raw IO object. |
| 869 | """ |
Antoine Pitrou | cf4c749 | 2009-04-19 00:09:36 +0000 | [diff] [blame] | 870 | if not raw.readable(): |
| 871 | raise IOError('"raw" argument must be readable.') |
| 872 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 873 | _BufferedIOMixin.__init__(self, raw) |
| 874 | if buffer_size <= 0: |
| 875 | raise ValueError("invalid buffer size") |
| 876 | self.buffer_size = buffer_size |
| 877 | self._reset_read_buf() |
| 878 | self._read_lock = Lock() |
| 879 | |
| 880 | def _reset_read_buf(self): |
| 881 | self._read_buf = b"" |
| 882 | self._read_pos = 0 |
| 883 | |
| 884 | def read(self, n=None): |
| 885 | """Read n bytes. |
| 886 | |
| 887 | Returns exactly n bytes of data unless the underlying raw IO |
| 888 | stream reaches EOF or if the call would block in non-blocking |
| 889 | mode. If n is negative, read until EOF or until read() would |
| 890 | block. |
| 891 | """ |
| 892 | if n is not None and n < -1: |
| 893 | raise ValueError("invalid number of bytes to read") |
| 894 | with self._read_lock: |
| 895 | return self._read_unlocked(n) |
| 896 | |
| 897 | def _read_unlocked(self, n=None): |
| 898 | nodata_val = b"" |
| 899 | empty_values = (b"", None) |
| 900 | buf = self._read_buf |
| 901 | pos = self._read_pos |
| 902 | |
| 903 | # Special case for when the number of bytes to read is unspecified. |
| 904 | if n is None or n == -1: |
| 905 | self._reset_read_buf() |
| 906 | chunks = [buf[pos:]] # Strip the consumed bytes. |
| 907 | current_size = 0 |
| 908 | while True: |
| 909 | # Read until EOF or until read() would block. |
| 910 | chunk = self.raw.read() |
| 911 | if chunk in empty_values: |
| 912 | nodata_val = chunk |
| 913 | break |
| 914 | current_size += len(chunk) |
| 915 | chunks.append(chunk) |
| 916 | return b"".join(chunks) or nodata_val |
| 917 | |
| 918 | # The number of bytes to read is specified, return at most n bytes. |
| 919 | avail = len(buf) - pos # Length of the available buffered data. |
| 920 | if n <= avail: |
| 921 | # Fast path: the data to read is fully buffered. |
| 922 | self._read_pos += n |
| 923 | return buf[pos:pos+n] |
| 924 | # Slow path: read from the stream until enough bytes are read, |
| 925 | # or until an EOF occurs or until read() would block. |
| 926 | chunks = [buf[pos:]] |
| 927 | wanted = max(self.buffer_size, n) |
| 928 | while avail < n: |
| 929 | chunk = self.raw.read(wanted) |
| 930 | if chunk in empty_values: |
| 931 | nodata_val = chunk |
| 932 | break |
| 933 | avail += len(chunk) |
| 934 | chunks.append(chunk) |
| 935 | # n is more then avail only when an EOF occurred or when |
| 936 | # read() would have blocked. |
| 937 | n = min(n, avail) |
| 938 | out = b"".join(chunks) |
| 939 | self._read_buf = out[n:] # Save the extra data in the buffer. |
| 940 | self._read_pos = 0 |
| 941 | return out[:n] if out else nodata_val |
| 942 | |
| 943 | def peek(self, n=0): |
| 944 | """Returns buffered bytes without advancing the position. |
| 945 | |
| 946 | The argument indicates a desired minimal number of bytes; we |
| 947 | do at most one raw read to satisfy it. We never return more |
| 948 | than self.buffer_size. |
| 949 | """ |
| 950 | with self._read_lock: |
| 951 | return self._peek_unlocked(n) |
| 952 | |
| 953 | def _peek_unlocked(self, n=0): |
| 954 | want = min(n, self.buffer_size) |
| 955 | have = len(self._read_buf) - self._read_pos |
| 956 | if have < want or have <= 0: |
| 957 | to_read = self.buffer_size - have |
| 958 | current = self.raw.read(to_read) |
| 959 | if current: |
| 960 | self._read_buf = self._read_buf[self._read_pos:] + current |
| 961 | self._read_pos = 0 |
| 962 | return self._read_buf[self._read_pos:] |
| 963 | |
| 964 | def read1(self, n): |
| 965 | """Reads up to n bytes, with at most one read() system call.""" |
| 966 | # Returns up to n bytes. If at least one byte is buffered, we |
| 967 | # only return buffered bytes. Otherwise, we do one raw read. |
| 968 | if n < 0: |
| 969 | raise ValueError("number of bytes to read must be positive") |
| 970 | if n == 0: |
| 971 | return b"" |
| 972 | with self._read_lock: |
| 973 | self._peek_unlocked(1) |
| 974 | return self._read_unlocked( |
| 975 | min(n, len(self._read_buf) - self._read_pos)) |
| 976 | |
| 977 | def tell(self): |
| 978 | return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos |
| 979 | |
| 980 | def seek(self, pos, whence=0): |
| 981 | if not (0 <= whence <= 2): |
| 982 | raise ValueError("invalid whence value") |
| 983 | with self._read_lock: |
| 984 | if whence == 1: |
| 985 | pos -= len(self._read_buf) - self._read_pos |
| 986 | pos = _BufferedIOMixin.seek(self, pos, whence) |
| 987 | self._reset_read_buf() |
| 988 | return pos |
| 989 | |
| 990 | class BufferedWriter(_BufferedIOMixin): |
| 991 | |
| 992 | """A buffer for a writeable sequential RawIO object. |
| 993 | |
| 994 | The constructor creates a BufferedWriter for the given writeable raw |
| 995 | stream. If the buffer_size is not given, it defaults to |
Benjamin Peterson | 59406a9 | 2009-03-26 17:10:29 +0000 | [diff] [blame] | 996 | DEFAULT_BUFFER_SIZE. |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 997 | """ |
| 998 | |
Benjamin Peterson | 59406a9 | 2009-03-26 17:10:29 +0000 | [diff] [blame] | 999 | _warning_stack_offset = 2 |
| 1000 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1001 | def __init__(self, raw, |
| 1002 | buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None): |
Antoine Pitrou | cf4c749 | 2009-04-19 00:09:36 +0000 | [diff] [blame] | 1003 | if not raw.writable(): |
| 1004 | raise IOError('"raw" argument must be writable.') |
| 1005 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1006 | _BufferedIOMixin.__init__(self, raw) |
| 1007 | if buffer_size <= 0: |
| 1008 | raise ValueError("invalid buffer size") |
Benjamin Peterson | 59406a9 | 2009-03-26 17:10:29 +0000 | [diff] [blame] | 1009 | if max_buffer_size is not None: |
| 1010 | warnings.warn("max_buffer_size is deprecated", DeprecationWarning, |
| 1011 | self._warning_stack_offset) |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1012 | self.buffer_size = buffer_size |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1013 | self._write_buf = bytearray() |
| 1014 | self._write_lock = Lock() |
| 1015 | |
| 1016 | def write(self, b): |
| 1017 | if self.closed: |
| 1018 | raise ValueError("write to closed file") |
| 1019 | if isinstance(b, str): |
| 1020 | raise TypeError("can't write str to binary stream") |
| 1021 | with self._write_lock: |
| 1022 | # XXX we can implement some more tricks to try and avoid |
| 1023 | # partial writes |
| 1024 | if len(self._write_buf) > self.buffer_size: |
| 1025 | # We're full, so let's pre-flush the buffer |
| 1026 | try: |
| 1027 | self._flush_unlocked() |
| 1028 | except BlockingIOError as e: |
| 1029 | # We can't accept anything else. |
| 1030 | # XXX Why not just let the exception pass through? |
| 1031 | raise BlockingIOError(e.errno, e.strerror, 0) |
| 1032 | before = len(self._write_buf) |
| 1033 | self._write_buf.extend(b) |
| 1034 | written = len(self._write_buf) - before |
| 1035 | if len(self._write_buf) > self.buffer_size: |
| 1036 | try: |
| 1037 | self._flush_unlocked() |
| 1038 | except BlockingIOError as e: |
Benjamin Peterson | 394ee00 | 2009-03-05 22:33:59 +0000 | [diff] [blame] | 1039 | if len(self._write_buf) > self.buffer_size: |
| 1040 | # We've hit the buffer_size. We have to accept a partial |
| 1041 | # write and cut back our buffer. |
| 1042 | overage = len(self._write_buf) - self.buffer_size |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1043 | written -= overage |
Benjamin Peterson | 394ee00 | 2009-03-05 22:33:59 +0000 | [diff] [blame] | 1044 | self._write_buf = self._write_buf[:self.buffer_size] |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1045 | raise BlockingIOError(e.errno, e.strerror, written) |
| 1046 | return written |
| 1047 | |
| 1048 | def truncate(self, pos=None): |
| 1049 | with self._write_lock: |
| 1050 | self._flush_unlocked() |
| 1051 | if pos is None: |
| 1052 | pos = self.raw.tell() |
| 1053 | return self.raw.truncate(pos) |
| 1054 | |
| 1055 | def flush(self): |
| 1056 | with self._write_lock: |
| 1057 | self._flush_unlocked() |
| 1058 | |
| 1059 | def _flush_unlocked(self): |
| 1060 | if self.closed: |
| 1061 | raise ValueError("flush of closed file") |
| 1062 | written = 0 |
| 1063 | try: |
| 1064 | while self._write_buf: |
| 1065 | n = self.raw.write(self._write_buf) |
| 1066 | if n > len(self._write_buf) or n < 0: |
| 1067 | raise IOError("write() returned incorrect number of bytes") |
| 1068 | del self._write_buf[:n] |
| 1069 | written += n |
| 1070 | except BlockingIOError as e: |
| 1071 | n = e.characters_written |
| 1072 | del self._write_buf[:n] |
| 1073 | written += n |
| 1074 | raise BlockingIOError(e.errno, e.strerror, written) |
| 1075 | |
| 1076 | def tell(self): |
| 1077 | return _BufferedIOMixin.tell(self) + len(self._write_buf) |
| 1078 | |
| 1079 | def seek(self, pos, whence=0): |
| 1080 | if not (0 <= whence <= 2): |
| 1081 | raise ValueError("invalid whence") |
| 1082 | with self._write_lock: |
| 1083 | self._flush_unlocked() |
| 1084 | return _BufferedIOMixin.seek(self, pos, whence) |
| 1085 | |
| 1086 | |
| 1087 | class BufferedRWPair(BufferedIOBase): |
| 1088 | |
| 1089 | """A buffered reader and writer object together. |
| 1090 | |
| 1091 | A buffered reader object and buffered writer object put together to |
| 1092 | form a sequential IO object that can read and write. This is typically |
| 1093 | used with a socket or two-way pipe. |
| 1094 | |
| 1095 | reader and writer are RawIOBase objects that are readable and |
| 1096 | writeable respectively. If the buffer_size is omitted it defaults to |
Benjamin Peterson | 59406a9 | 2009-03-26 17:10:29 +0000 | [diff] [blame] | 1097 | DEFAULT_BUFFER_SIZE. |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1098 | """ |
| 1099 | |
| 1100 | # XXX The usefulness of this (compared to having two separate IO |
| 1101 | # objects) is questionable. |
| 1102 | |
| 1103 | def __init__(self, reader, writer, |
| 1104 | buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None): |
| 1105 | """Constructor. |
| 1106 | |
| 1107 | The arguments are two RawIO instances. |
| 1108 | """ |
Benjamin Peterson | 59406a9 | 2009-03-26 17:10:29 +0000 | [diff] [blame] | 1109 | if max_buffer_size is not None: |
| 1110 | warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2) |
Antoine Pitrou | cf4c749 | 2009-04-19 00:09:36 +0000 | [diff] [blame] | 1111 | |
| 1112 | if not reader.readable(): |
| 1113 | raise IOError('"reader" argument must be readable.') |
| 1114 | |
| 1115 | if not writer.writable(): |
| 1116 | raise IOError('"writer" argument must be writable.') |
| 1117 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1118 | self.reader = BufferedReader(reader, buffer_size) |
Benjamin Peterson | 59406a9 | 2009-03-26 17:10:29 +0000 | [diff] [blame] | 1119 | self.writer = BufferedWriter(writer, buffer_size) |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1120 | |
| 1121 | def read(self, n=None): |
| 1122 | if n is None: |
| 1123 | n = -1 |
| 1124 | return self.reader.read(n) |
| 1125 | |
| 1126 | def readinto(self, b): |
| 1127 | return self.reader.readinto(b) |
| 1128 | |
| 1129 | def write(self, b): |
| 1130 | return self.writer.write(b) |
| 1131 | |
| 1132 | def peek(self, n=0): |
| 1133 | return self.reader.peek(n) |
| 1134 | |
| 1135 | def read1(self, n): |
| 1136 | return self.reader.read1(n) |
| 1137 | |
| 1138 | def readable(self): |
| 1139 | return self.reader.readable() |
| 1140 | |
| 1141 | def writable(self): |
| 1142 | return self.writer.writable() |
| 1143 | |
| 1144 | def flush(self): |
| 1145 | return self.writer.flush() |
| 1146 | |
| 1147 | def close(self): |
| 1148 | self.writer.close() |
| 1149 | self.reader.close() |
| 1150 | |
| 1151 | def isatty(self): |
| 1152 | return self.reader.isatty() or self.writer.isatty() |
| 1153 | |
| 1154 | @property |
| 1155 | def closed(self): |
| 1156 | return self.writer.closed |
| 1157 | |
| 1158 | |
| 1159 | class BufferedRandom(BufferedWriter, BufferedReader): |
| 1160 | |
| 1161 | """A buffered interface to random access streams. |
| 1162 | |
| 1163 | The constructor creates a reader and writer for a seekable stream, |
| 1164 | raw, given in the first argument. If the buffer_size is omitted it |
Benjamin Peterson | 59406a9 | 2009-03-26 17:10:29 +0000 | [diff] [blame] | 1165 | defaults to DEFAULT_BUFFER_SIZE. |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1166 | """ |
| 1167 | |
Benjamin Peterson | 59406a9 | 2009-03-26 17:10:29 +0000 | [diff] [blame] | 1168 | _warning_stack_offset = 3 |
| 1169 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1170 | def __init__(self, raw, |
| 1171 | buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None): |
| 1172 | raw._checkSeekable() |
| 1173 | BufferedReader.__init__(self, raw, buffer_size) |
| 1174 | BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size) |
| 1175 | |
| 1176 | def seek(self, pos, whence=0): |
| 1177 | if not (0 <= whence <= 2): |
| 1178 | raise ValueError("invalid whence") |
| 1179 | self.flush() |
| 1180 | if self._read_buf: |
| 1181 | # Undo read ahead. |
| 1182 | with self._read_lock: |
| 1183 | self.raw.seek(self._read_pos - len(self._read_buf), 1) |
| 1184 | # First do the raw seek, then empty the read buffer, so that |
| 1185 | # if the raw seek fails, we don't lose buffered data forever. |
| 1186 | pos = self.raw.seek(pos, whence) |
| 1187 | with self._read_lock: |
| 1188 | self._reset_read_buf() |
| 1189 | if pos < 0: |
| 1190 | raise IOError("seek() returned invalid position") |
| 1191 | return pos |
| 1192 | |
| 1193 | def tell(self): |
| 1194 | if self._write_buf: |
| 1195 | return BufferedWriter.tell(self) |
| 1196 | else: |
| 1197 | return BufferedReader.tell(self) |
| 1198 | |
| 1199 | def truncate(self, pos=None): |
| 1200 | if pos is None: |
| 1201 | pos = self.tell() |
| 1202 | # Use seek to flush the read buffer. |
| 1203 | self.seek(pos) |
| 1204 | return BufferedWriter.truncate(self) |
| 1205 | |
| 1206 | def read(self, n=None): |
| 1207 | if n is None: |
| 1208 | n = -1 |
| 1209 | self.flush() |
| 1210 | return BufferedReader.read(self, n) |
| 1211 | |
| 1212 | def readinto(self, b): |
| 1213 | self.flush() |
| 1214 | return BufferedReader.readinto(self, b) |
| 1215 | |
| 1216 | def peek(self, n=0): |
| 1217 | self.flush() |
| 1218 | return BufferedReader.peek(self, n) |
| 1219 | |
| 1220 | def read1(self, n): |
| 1221 | self.flush() |
| 1222 | return BufferedReader.read1(self, n) |
| 1223 | |
| 1224 | def write(self, b): |
| 1225 | if self._read_buf: |
| 1226 | # Undo readahead |
| 1227 | with self._read_lock: |
| 1228 | self.raw.seek(self._read_pos - len(self._read_buf), 1) |
| 1229 | self._reset_read_buf() |
| 1230 | return BufferedWriter.write(self, b) |
| 1231 | |
| 1232 | |
| 1233 | class TextIOBase(IOBase): |
| 1234 | |
| 1235 | """Base class for text I/O. |
| 1236 | |
| 1237 | This class provides a character and line based interface to stream |
| 1238 | I/O. There is no readinto method because Python's character strings |
| 1239 | are immutable. There is no public constructor. |
| 1240 | """ |
| 1241 | |
| 1242 | def read(self, n: int = -1) -> str: |
| 1243 | """Read at most n characters from stream. |
| 1244 | |
| 1245 | Read from underlying buffer until we have n characters or we hit EOF. |
| 1246 | If n is negative or omitted, read until EOF. |
| 1247 | """ |
| 1248 | self._unsupported("read") |
| 1249 | |
| 1250 | def write(self, s: str) -> int: |
| 1251 | """Write string s to stream.""" |
| 1252 | self._unsupported("write") |
| 1253 | |
| 1254 | def truncate(self, pos: int = None) -> int: |
| 1255 | """Truncate size to pos.""" |
| 1256 | self._unsupported("truncate") |
| 1257 | |
| 1258 | def readline(self) -> str: |
| 1259 | """Read until newline or EOF. |
| 1260 | |
| 1261 | Returns an empty string if EOF is hit immediately. |
| 1262 | """ |
| 1263 | self._unsupported("readline") |
| 1264 | |
Benjamin Peterson | d2e0c79 | 2009-05-01 20:40:59 +0000 | [diff] [blame] | 1265 | def detach(self) -> None: |
| 1266 | """ |
| 1267 | Separate the underlying buffer from the TextIOBase and return it. |
| 1268 | |
| 1269 | After the underlying buffer has been detached, the TextIO is in an |
| 1270 | unusable state. |
| 1271 | """ |
| 1272 | self._unsupported("detach") |
| 1273 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1274 | @property |
| 1275 | def encoding(self): |
| 1276 | """Subclasses should override.""" |
| 1277 | return None |
| 1278 | |
| 1279 | @property |
| 1280 | def newlines(self): |
| 1281 | """Line endings translated so far. |
| 1282 | |
| 1283 | Only line endings translated during reading are considered. |
| 1284 | |
| 1285 | Subclasses should override. |
| 1286 | """ |
| 1287 | return None |
| 1288 | |
Benjamin Peterson | 0926ad1 | 2009-06-06 18:02:12 +0000 | [diff] [blame] | 1289 | @property |
| 1290 | def errors(self): |
| 1291 | """Error setting of the decoder or encoder. |
| 1292 | |
| 1293 | Subclasses should override.""" |
| 1294 | return None |
| 1295 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1296 | io.TextIOBase.register(TextIOBase) |
| 1297 | |
| 1298 | |
| 1299 | class IncrementalNewlineDecoder(codecs.IncrementalDecoder): |
| 1300 | r"""Codec used when reading a file in universal newlines mode. It wraps |
| 1301 | another incremental decoder, translating \r\n and \r into \n. It also |
| 1302 | records the types of newlines encountered. When used with |
| 1303 | translate=False, it ensures that the newline sequence is returned in |
| 1304 | one piece. |
| 1305 | """ |
| 1306 | def __init__(self, decoder, translate, errors='strict'): |
| 1307 | codecs.IncrementalDecoder.__init__(self, errors=errors) |
| 1308 | self.translate = translate |
| 1309 | self.decoder = decoder |
| 1310 | self.seennl = 0 |
| 1311 | self.pendingcr = False |
| 1312 | |
| 1313 | def decode(self, input, final=False): |
| 1314 | # decode input (with the eventual \r from a previous pass) |
| 1315 | if self.decoder is None: |
| 1316 | output = input |
| 1317 | else: |
| 1318 | output = self.decoder.decode(input, final=final) |
| 1319 | if self.pendingcr and (output or final): |
| 1320 | output = "\r" + output |
| 1321 | self.pendingcr = False |
| 1322 | |
| 1323 | # retain last \r even when not translating data: |
| 1324 | # then readline() is sure to get \r\n in one pass |
| 1325 | if output.endswith("\r") and not final: |
| 1326 | output = output[:-1] |
| 1327 | self.pendingcr = True |
| 1328 | |
| 1329 | # Record which newlines are read |
| 1330 | crlf = output.count('\r\n') |
| 1331 | cr = output.count('\r') - crlf |
| 1332 | lf = output.count('\n') - crlf |
| 1333 | self.seennl |= (lf and self._LF) | (cr and self._CR) \ |
| 1334 | | (crlf and self._CRLF) |
| 1335 | |
| 1336 | if self.translate: |
| 1337 | if crlf: |
| 1338 | output = output.replace("\r\n", "\n") |
| 1339 | if cr: |
| 1340 | output = output.replace("\r", "\n") |
| 1341 | |
| 1342 | return output |
| 1343 | |
| 1344 | def getstate(self): |
| 1345 | if self.decoder is None: |
| 1346 | buf = b"" |
| 1347 | flag = 0 |
| 1348 | else: |
| 1349 | buf, flag = self.decoder.getstate() |
| 1350 | flag <<= 1 |
| 1351 | if self.pendingcr: |
| 1352 | flag |= 1 |
| 1353 | return buf, flag |
| 1354 | |
| 1355 | def setstate(self, state): |
| 1356 | buf, flag = state |
| 1357 | self.pendingcr = bool(flag & 1) |
| 1358 | if self.decoder is not None: |
| 1359 | self.decoder.setstate((buf, flag >> 1)) |
| 1360 | |
| 1361 | def reset(self): |
| 1362 | self.seennl = 0 |
| 1363 | self.pendingcr = False |
| 1364 | if self.decoder is not None: |
| 1365 | self.decoder.reset() |
| 1366 | |
| 1367 | _LF = 1 |
| 1368 | _CR = 2 |
| 1369 | _CRLF = 4 |
| 1370 | |
| 1371 | @property |
| 1372 | def newlines(self): |
| 1373 | return (None, |
| 1374 | "\n", |
| 1375 | "\r", |
| 1376 | ("\r", "\n"), |
| 1377 | "\r\n", |
| 1378 | ("\n", "\r\n"), |
| 1379 | ("\r", "\r\n"), |
| 1380 | ("\r", "\n", "\r\n") |
| 1381 | )[self.seennl] |
| 1382 | |
| 1383 | |
| 1384 | class TextIOWrapper(TextIOBase): |
| 1385 | |
| 1386 | r"""Character and line based layer over a BufferedIOBase object, buffer. |
| 1387 | |
| 1388 | encoding gives the name of the encoding that the stream will be |
| 1389 | decoded or encoded with. It defaults to locale.getpreferredencoding. |
| 1390 | |
| 1391 | errors determines the strictness of encoding and decoding (see the |
| 1392 | codecs.register) and defaults to "strict". |
| 1393 | |
| 1394 | newline can be None, '', '\n', '\r', or '\r\n'. It controls the |
| 1395 | handling of line endings. If it is None, universal newlines is |
| 1396 | enabled. With this enabled, on input, the lines endings '\n', '\r', |
| 1397 | or '\r\n' are translated to '\n' before being returned to the |
| 1398 | caller. Conversely, on output, '\n' is translated to the system |
| 1399 | default line seperator, os.linesep. If newline is any other of its |
| 1400 | legal values, that newline becomes the newline when the file is read |
| 1401 | and it is returned untranslated. On output, '\n' is converted to the |
| 1402 | newline. |
| 1403 | |
| 1404 | If line_buffering is True, a call to flush is implied when a call to |
| 1405 | write contains a newline character. |
| 1406 | """ |
| 1407 | |
| 1408 | _CHUNK_SIZE = 2048 |
| 1409 | |
| 1410 | def __init__(self, buffer, encoding=None, errors=None, newline=None, |
| 1411 | line_buffering=False): |
| 1412 | if newline is not None and not isinstance(newline, str): |
| 1413 | raise TypeError("illegal newline type: %r" % (type(newline),)) |
| 1414 | if newline not in (None, "", "\n", "\r", "\r\n"): |
| 1415 | raise ValueError("illegal newline value: %r" % (newline,)) |
| 1416 | if encoding is None: |
| 1417 | try: |
| 1418 | encoding = os.device_encoding(buffer.fileno()) |
| 1419 | except (AttributeError, UnsupportedOperation): |
| 1420 | pass |
| 1421 | if encoding is None: |
| 1422 | try: |
| 1423 | import locale |
| 1424 | except ImportError: |
| 1425 | # Importing locale may fail if Python is being built |
| 1426 | encoding = "ascii" |
| 1427 | else: |
| 1428 | encoding = locale.getpreferredencoding() |
| 1429 | |
| 1430 | if not isinstance(encoding, str): |
| 1431 | raise ValueError("invalid encoding: %r" % encoding) |
| 1432 | |
| 1433 | if errors is None: |
| 1434 | errors = "strict" |
| 1435 | else: |
| 1436 | if not isinstance(errors, str): |
| 1437 | raise ValueError("invalid errors: %r" % errors) |
| 1438 | |
| 1439 | self.buffer = buffer |
| 1440 | self._line_buffering = line_buffering |
| 1441 | self._encoding = encoding |
| 1442 | self._errors = errors |
| 1443 | self._readuniversal = not newline |
| 1444 | self._readtranslate = newline is None |
| 1445 | self._readnl = newline |
| 1446 | self._writetranslate = newline != '' |
| 1447 | self._writenl = newline or os.linesep |
| 1448 | self._encoder = None |
| 1449 | self._decoder = None |
| 1450 | self._decoded_chars = '' # buffer for text returned from decoder |
| 1451 | self._decoded_chars_used = 0 # offset into _decoded_chars for read() |
| 1452 | self._snapshot = None # info for reconstructing decoder state |
| 1453 | self._seekable = self._telling = self.buffer.seekable() |
| 1454 | |
Antoine Pitrou | e450185 | 2009-05-14 18:55:55 +0000 | [diff] [blame] | 1455 | if self._seekable and self.writable(): |
| 1456 | position = self.buffer.tell() |
| 1457 | if position != 0: |
| 1458 | try: |
| 1459 | self._get_encoder().setstate(0) |
| 1460 | except LookupError: |
| 1461 | # Sometimes the encoder doesn't exist |
| 1462 | pass |
| 1463 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1464 | # self._snapshot is either None, or a tuple (dec_flags, next_input) |
| 1465 | # where dec_flags is the second (integer) item of the decoder state |
| 1466 | # and next_input is the chunk of input bytes that comes next after the |
| 1467 | # snapshot point. We use this to reconstruct decoder states in tell(). |
| 1468 | |
| 1469 | # Naming convention: |
| 1470 | # - "bytes_..." for integer variables that count input bytes |
| 1471 | # - "chars_..." for integer variables that count decoded characters |
| 1472 | |
Benjamin Peterson | c4c0eae | 2009-03-09 00:07:03 +0000 | [diff] [blame] | 1473 | def __repr__(self): |
Antoine Pitrou | 716c444 | 2009-05-23 19:04:03 +0000 | [diff] [blame] | 1474 | try: |
| 1475 | name = self.name |
| 1476 | except AttributeError: |
| 1477 | return "<_pyio.TextIOWrapper encoding={0!r}>".format(self.encoding) |
| 1478 | else: |
| 1479 | return "<_pyio.TextIOWrapper name={0!r} encoding={1!r}>".format( |
| 1480 | name, self.encoding) |
Benjamin Peterson | c4c0eae | 2009-03-09 00:07:03 +0000 | [diff] [blame] | 1481 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1482 | @property |
| 1483 | def encoding(self): |
| 1484 | return self._encoding |
| 1485 | |
| 1486 | @property |
| 1487 | def errors(self): |
| 1488 | return self._errors |
| 1489 | |
| 1490 | @property |
| 1491 | def line_buffering(self): |
| 1492 | return self._line_buffering |
| 1493 | |
| 1494 | def seekable(self): |
| 1495 | return self._seekable |
| 1496 | |
| 1497 | def readable(self): |
| 1498 | return self.buffer.readable() |
| 1499 | |
| 1500 | def writable(self): |
| 1501 | return self.buffer.writable() |
| 1502 | |
| 1503 | def flush(self): |
| 1504 | self.buffer.flush() |
| 1505 | self._telling = self._seekable |
| 1506 | |
| 1507 | def close(self): |
Benjamin Peterson | d2e0c79 | 2009-05-01 20:40:59 +0000 | [diff] [blame] | 1508 | if self.buffer is not None: |
| 1509 | try: |
| 1510 | self.flush() |
| 1511 | except IOError: |
| 1512 | pass # If flush() fails, just give up |
| 1513 | self.buffer.close() |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1514 | |
| 1515 | @property |
| 1516 | def closed(self): |
| 1517 | return self.buffer.closed |
| 1518 | |
| 1519 | @property |
| 1520 | def name(self): |
| 1521 | return self.buffer.name |
| 1522 | |
| 1523 | def fileno(self): |
| 1524 | return self.buffer.fileno() |
| 1525 | |
| 1526 | def isatty(self): |
| 1527 | return self.buffer.isatty() |
| 1528 | |
| 1529 | def write(self, s: str): |
| 1530 | if self.closed: |
| 1531 | raise ValueError("write to closed file") |
| 1532 | if not isinstance(s, str): |
| 1533 | raise TypeError("can't write %s to text stream" % |
| 1534 | s.__class__.__name__) |
| 1535 | length = len(s) |
| 1536 | haslf = (self._writetranslate or self._line_buffering) and "\n" in s |
| 1537 | if haslf and self._writetranslate and self._writenl != "\n": |
| 1538 | s = s.replace("\n", self._writenl) |
| 1539 | encoder = self._encoder or self._get_encoder() |
| 1540 | # XXX What if we were just reading? |
| 1541 | b = encoder.encode(s) |
| 1542 | self.buffer.write(b) |
| 1543 | if self._line_buffering and (haslf or "\r" in s): |
| 1544 | self.flush() |
| 1545 | self._snapshot = None |
| 1546 | if self._decoder: |
| 1547 | self._decoder.reset() |
| 1548 | return length |
| 1549 | |
| 1550 | def _get_encoder(self): |
| 1551 | make_encoder = codecs.getincrementalencoder(self._encoding) |
| 1552 | self._encoder = make_encoder(self._errors) |
| 1553 | return self._encoder |
| 1554 | |
| 1555 | def _get_decoder(self): |
| 1556 | make_decoder = codecs.getincrementaldecoder(self._encoding) |
| 1557 | decoder = make_decoder(self._errors) |
| 1558 | if self._readuniversal: |
| 1559 | decoder = IncrementalNewlineDecoder(decoder, self._readtranslate) |
| 1560 | self._decoder = decoder |
| 1561 | return decoder |
| 1562 | |
| 1563 | # The following three methods implement an ADT for _decoded_chars. |
| 1564 | # Text returned from the decoder is buffered here until the client |
| 1565 | # requests it by calling our read() or readline() method. |
| 1566 | def _set_decoded_chars(self, chars): |
| 1567 | """Set the _decoded_chars buffer.""" |
| 1568 | self._decoded_chars = chars |
| 1569 | self._decoded_chars_used = 0 |
| 1570 | |
| 1571 | def _get_decoded_chars(self, n=None): |
| 1572 | """Advance into the _decoded_chars buffer.""" |
| 1573 | offset = self._decoded_chars_used |
| 1574 | if n is None: |
| 1575 | chars = self._decoded_chars[offset:] |
| 1576 | else: |
| 1577 | chars = self._decoded_chars[offset:offset + n] |
| 1578 | self._decoded_chars_used += len(chars) |
| 1579 | return chars |
| 1580 | |
| 1581 | def _rewind_decoded_chars(self, n): |
| 1582 | """Rewind the _decoded_chars buffer.""" |
| 1583 | if self._decoded_chars_used < n: |
| 1584 | raise AssertionError("rewind decoded_chars out of bounds") |
| 1585 | self._decoded_chars_used -= n |
| 1586 | |
| 1587 | def _read_chunk(self): |
| 1588 | """ |
| 1589 | Read and decode the next chunk of data from the BufferedReader. |
| 1590 | """ |
| 1591 | |
| 1592 | # The return value is True unless EOF was reached. The decoded |
| 1593 | # string is placed in self._decoded_chars (replacing its previous |
| 1594 | # value). The entire input chunk is sent to the decoder, though |
| 1595 | # some of it may remain buffered in the decoder, yet to be |
| 1596 | # converted. |
| 1597 | |
| 1598 | if self._decoder is None: |
| 1599 | raise ValueError("no decoder") |
| 1600 | |
| 1601 | if self._telling: |
| 1602 | # To prepare for tell(), we need to snapshot a point in the |
| 1603 | # file where the decoder's input buffer is empty. |
| 1604 | |
| 1605 | dec_buffer, dec_flags = self._decoder.getstate() |
| 1606 | # Given this, we know there was a valid snapshot point |
| 1607 | # len(dec_buffer) bytes ago with decoder state (b'', dec_flags). |
| 1608 | |
| 1609 | # Read a chunk, decode it, and put the result in self._decoded_chars. |
| 1610 | input_chunk = self.buffer.read1(self._CHUNK_SIZE) |
| 1611 | eof = not input_chunk |
| 1612 | self._set_decoded_chars(self._decoder.decode(input_chunk, eof)) |
| 1613 | |
| 1614 | if self._telling: |
| 1615 | # At the snapshot point, len(dec_buffer) bytes before the read, |
| 1616 | # the next input to be decoded is dec_buffer + input_chunk. |
| 1617 | self._snapshot = (dec_flags, dec_buffer + input_chunk) |
| 1618 | |
| 1619 | return not eof |
| 1620 | |
| 1621 | def _pack_cookie(self, position, dec_flags=0, |
| 1622 | bytes_to_feed=0, need_eof=0, chars_to_skip=0): |
| 1623 | # The meaning of a tell() cookie is: seek to position, set the |
| 1624 | # decoder flags to dec_flags, read bytes_to_feed bytes, feed them |
| 1625 | # into the decoder with need_eof as the EOF flag, then skip |
| 1626 | # chars_to_skip characters of the decoded result. For most simple |
| 1627 | # decoders, tell() will often just give a byte offset in the file. |
| 1628 | return (position | (dec_flags<<64) | (bytes_to_feed<<128) | |
| 1629 | (chars_to_skip<<192) | bool(need_eof)<<256) |
| 1630 | |
| 1631 | def _unpack_cookie(self, bigint): |
| 1632 | rest, position = divmod(bigint, 1<<64) |
| 1633 | rest, dec_flags = divmod(rest, 1<<64) |
| 1634 | rest, bytes_to_feed = divmod(rest, 1<<64) |
| 1635 | need_eof, chars_to_skip = divmod(rest, 1<<64) |
| 1636 | return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip |
| 1637 | |
| 1638 | def tell(self): |
| 1639 | if not self._seekable: |
| 1640 | raise IOError("underlying stream is not seekable") |
| 1641 | if not self._telling: |
| 1642 | raise IOError("telling position disabled by next() call") |
| 1643 | self.flush() |
| 1644 | position = self.buffer.tell() |
| 1645 | decoder = self._decoder |
| 1646 | if decoder is None or self._snapshot is None: |
| 1647 | if self._decoded_chars: |
| 1648 | # This should never happen. |
| 1649 | raise AssertionError("pending decoded text") |
| 1650 | return position |
| 1651 | |
| 1652 | # Skip backward to the snapshot point (see _read_chunk). |
| 1653 | dec_flags, next_input = self._snapshot |
| 1654 | position -= len(next_input) |
| 1655 | |
| 1656 | # How many decoded characters have been used up since the snapshot? |
| 1657 | chars_to_skip = self._decoded_chars_used |
| 1658 | if chars_to_skip == 0: |
| 1659 | # We haven't moved from the snapshot point. |
| 1660 | return self._pack_cookie(position, dec_flags) |
| 1661 | |
| 1662 | # Starting from the snapshot position, we will walk the decoder |
| 1663 | # forward until it gives us enough decoded characters. |
| 1664 | saved_state = decoder.getstate() |
| 1665 | try: |
| 1666 | # Note our initial start point. |
| 1667 | decoder.setstate((b'', dec_flags)) |
| 1668 | start_pos = position |
| 1669 | start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0 |
| 1670 | need_eof = 0 |
| 1671 | |
| 1672 | # Feed the decoder one byte at a time. As we go, note the |
| 1673 | # nearest "safe start point" before the current location |
| 1674 | # (a point where the decoder has nothing buffered, so seek() |
| 1675 | # can safely start from there and advance to this location). |
| 1676 | next_byte = bytearray(1) |
| 1677 | for next_byte[0] in next_input: |
| 1678 | bytes_fed += 1 |
| 1679 | chars_decoded += len(decoder.decode(next_byte)) |
| 1680 | dec_buffer, dec_flags = decoder.getstate() |
| 1681 | if not dec_buffer and chars_decoded <= chars_to_skip: |
| 1682 | # Decoder buffer is empty, so this is a safe start point. |
| 1683 | start_pos += bytes_fed |
| 1684 | chars_to_skip -= chars_decoded |
| 1685 | start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0 |
| 1686 | if chars_decoded >= chars_to_skip: |
| 1687 | break |
| 1688 | else: |
| 1689 | # We didn't get enough decoded data; signal EOF to get more. |
| 1690 | chars_decoded += len(decoder.decode(b'', final=True)) |
| 1691 | need_eof = 1 |
| 1692 | if chars_decoded < chars_to_skip: |
| 1693 | raise IOError("can't reconstruct logical file position") |
| 1694 | |
| 1695 | # The returned cookie corresponds to the last safe start point. |
| 1696 | return self._pack_cookie( |
| 1697 | start_pos, start_flags, bytes_fed, need_eof, chars_to_skip) |
| 1698 | finally: |
| 1699 | decoder.setstate(saved_state) |
| 1700 | |
| 1701 | def truncate(self, pos=None): |
| 1702 | self.flush() |
| 1703 | if pos is None: |
| 1704 | pos = self.tell() |
| 1705 | self.seek(pos) |
| 1706 | return self.buffer.truncate() |
| 1707 | |
Benjamin Peterson | d2e0c79 | 2009-05-01 20:40:59 +0000 | [diff] [blame] | 1708 | def detach(self): |
| 1709 | if self.buffer is None: |
| 1710 | raise ValueError("buffer is already detached") |
| 1711 | self.flush() |
| 1712 | buffer = self.buffer |
| 1713 | self.buffer = None |
| 1714 | return buffer |
| 1715 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1716 | def seek(self, cookie, whence=0): |
| 1717 | if self.closed: |
| 1718 | raise ValueError("tell on closed file") |
| 1719 | if not self._seekable: |
| 1720 | raise IOError("underlying stream is not seekable") |
| 1721 | if whence == 1: # seek relative to current position |
| 1722 | if cookie != 0: |
| 1723 | raise IOError("can't do nonzero cur-relative seeks") |
| 1724 | # Seeking to the current position should attempt to |
| 1725 | # sync the underlying buffer with the current position. |
| 1726 | whence = 0 |
| 1727 | cookie = self.tell() |
| 1728 | if whence == 2: # seek relative to end of file |
| 1729 | if cookie != 0: |
| 1730 | raise IOError("can't do nonzero end-relative seeks") |
| 1731 | self.flush() |
| 1732 | position = self.buffer.seek(0, 2) |
| 1733 | self._set_decoded_chars('') |
| 1734 | self._snapshot = None |
| 1735 | if self._decoder: |
| 1736 | self._decoder.reset() |
| 1737 | return position |
| 1738 | if whence != 0: |
| 1739 | raise ValueError("invalid whence (%r, should be 0, 1 or 2)" % |
| 1740 | (whence,)) |
| 1741 | if cookie < 0: |
| 1742 | raise ValueError("negative seek position %r" % (cookie,)) |
| 1743 | self.flush() |
| 1744 | |
| 1745 | # The strategy of seek() is to go back to the safe start point |
| 1746 | # and replay the effect of read(chars_to_skip) from there. |
| 1747 | start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \ |
| 1748 | self._unpack_cookie(cookie) |
| 1749 | |
| 1750 | # Seek back to the safe start point. |
| 1751 | self.buffer.seek(start_pos) |
| 1752 | self._set_decoded_chars('') |
| 1753 | self._snapshot = None |
| 1754 | |
| 1755 | # Restore the decoder to its state from the safe start point. |
Benjamin Peterson | 9363a65 | 2009-03-05 00:42:09 +0000 | [diff] [blame] | 1756 | if cookie == 0 and self._decoder: |
| 1757 | self._decoder.reset() |
| 1758 | elif self._decoder or dec_flags or chars_to_skip: |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1759 | self._decoder = self._decoder or self._get_decoder() |
| 1760 | self._decoder.setstate((b'', dec_flags)) |
| 1761 | self._snapshot = (dec_flags, b'') |
| 1762 | |
| 1763 | if chars_to_skip: |
| 1764 | # Just like _read_chunk, feed the decoder and save a snapshot. |
| 1765 | input_chunk = self.buffer.read(bytes_to_feed) |
| 1766 | self._set_decoded_chars( |
| 1767 | self._decoder.decode(input_chunk, need_eof)) |
| 1768 | self._snapshot = (dec_flags, input_chunk) |
| 1769 | |
| 1770 | # Skip chars_to_skip of the decoded characters. |
| 1771 | if len(self._decoded_chars) < chars_to_skip: |
| 1772 | raise IOError("can't restore logical file position") |
| 1773 | self._decoded_chars_used = chars_to_skip |
| 1774 | |
Antoine Pitrou | e450185 | 2009-05-14 18:55:55 +0000 | [diff] [blame] | 1775 | # Finally, reset the encoder (merely useful for proper BOM handling) |
| 1776 | try: |
| 1777 | encoder = self._encoder or self._get_encoder() |
| 1778 | except LookupError: |
| 1779 | # Sometimes the encoder doesn't exist |
| 1780 | pass |
| 1781 | else: |
| 1782 | if cookie != 0: |
| 1783 | encoder.setstate(0) |
| 1784 | else: |
| 1785 | encoder.reset() |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1786 | return cookie |
| 1787 | |
| 1788 | def read(self, n=None): |
Benjamin Peterson | a1b4901 | 2009-03-31 23:11:32 +0000 | [diff] [blame] | 1789 | self._checkReadable() |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1790 | if n is None: |
| 1791 | n = -1 |
| 1792 | decoder = self._decoder or self._get_decoder() |
| 1793 | if n < 0: |
| 1794 | # Read everything. |
| 1795 | result = (self._get_decoded_chars() + |
| 1796 | decoder.decode(self.buffer.read(), final=True)) |
| 1797 | self._set_decoded_chars('') |
| 1798 | self._snapshot = None |
| 1799 | return result |
| 1800 | else: |
| 1801 | # Keep reading chunks until we have n characters to return. |
| 1802 | eof = False |
| 1803 | result = self._get_decoded_chars(n) |
| 1804 | while len(result) < n and not eof: |
| 1805 | eof = not self._read_chunk() |
| 1806 | result += self._get_decoded_chars(n - len(result)) |
| 1807 | return result |
| 1808 | |
| 1809 | def __next__(self): |
| 1810 | self._telling = False |
| 1811 | line = self.readline() |
| 1812 | if not line: |
| 1813 | self._snapshot = None |
| 1814 | self._telling = self._seekable |
| 1815 | raise StopIteration |
| 1816 | return line |
| 1817 | |
| 1818 | def readline(self, limit=None): |
| 1819 | if self.closed: |
| 1820 | raise ValueError("read from closed file") |
| 1821 | if limit is None: |
| 1822 | limit = -1 |
Benjamin Peterson | b01138a | 2009-04-24 22:59:52 +0000 | [diff] [blame] | 1823 | elif not isinstance(limit, int): |
| 1824 | raise TypeError("limit must be an integer") |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1825 | |
| 1826 | # Grab all the decoded text (we will rewind any extra bits later). |
| 1827 | line = self._get_decoded_chars() |
| 1828 | |
| 1829 | start = 0 |
| 1830 | # Make the decoder if it doesn't already exist. |
| 1831 | if not self._decoder: |
| 1832 | self._get_decoder() |
| 1833 | |
| 1834 | pos = endpos = None |
| 1835 | while True: |
| 1836 | if self._readtranslate: |
| 1837 | # Newlines are already translated, only search for \n |
| 1838 | pos = line.find('\n', start) |
| 1839 | if pos >= 0: |
| 1840 | endpos = pos + 1 |
| 1841 | break |
| 1842 | else: |
| 1843 | start = len(line) |
| 1844 | |
| 1845 | elif self._readuniversal: |
| 1846 | # Universal newline search. Find any of \r, \r\n, \n |
| 1847 | # The decoder ensures that \r\n are not split in two pieces |
| 1848 | |
| 1849 | # In C we'd look for these in parallel of course. |
| 1850 | nlpos = line.find("\n", start) |
| 1851 | crpos = line.find("\r", start) |
| 1852 | if crpos == -1: |
| 1853 | if nlpos == -1: |
| 1854 | # Nothing found |
| 1855 | start = len(line) |
| 1856 | else: |
| 1857 | # Found \n |
| 1858 | endpos = nlpos + 1 |
| 1859 | break |
| 1860 | elif nlpos == -1: |
| 1861 | # Found lone \r |
| 1862 | endpos = crpos + 1 |
| 1863 | break |
| 1864 | elif nlpos < crpos: |
| 1865 | # Found \n |
| 1866 | endpos = nlpos + 1 |
| 1867 | break |
| 1868 | elif nlpos == crpos + 1: |
| 1869 | # Found \r\n |
| 1870 | endpos = crpos + 2 |
| 1871 | break |
| 1872 | else: |
| 1873 | # Found \r |
| 1874 | endpos = crpos + 1 |
| 1875 | break |
| 1876 | else: |
| 1877 | # non-universal |
| 1878 | pos = line.find(self._readnl) |
| 1879 | if pos >= 0: |
| 1880 | endpos = pos + len(self._readnl) |
| 1881 | break |
| 1882 | |
| 1883 | if limit >= 0 and len(line) >= limit: |
| 1884 | endpos = limit # reached length limit |
| 1885 | break |
| 1886 | |
| 1887 | # No line ending seen yet - get more data' |
| 1888 | while self._read_chunk(): |
| 1889 | if self._decoded_chars: |
| 1890 | break |
| 1891 | if self._decoded_chars: |
| 1892 | line += self._get_decoded_chars() |
| 1893 | else: |
| 1894 | # end of file |
| 1895 | self._set_decoded_chars('') |
| 1896 | self._snapshot = None |
| 1897 | return line |
| 1898 | |
| 1899 | if limit >= 0 and endpos > limit: |
| 1900 | endpos = limit # don't exceed limit |
| 1901 | |
| 1902 | # Rewind _decoded_chars to just after the line ending we found. |
| 1903 | self._rewind_decoded_chars(len(line) - endpos) |
| 1904 | return line[:endpos] |
| 1905 | |
| 1906 | @property |
| 1907 | def newlines(self): |
| 1908 | return self._decoder.newlines if self._decoder else None |
| 1909 | |
| 1910 | |
| 1911 | class StringIO(TextIOWrapper): |
| 1912 | """Text I/O implementation using an in-memory buffer. |
| 1913 | |
| 1914 | The initial_value argument sets the value of object. The newline |
| 1915 | argument is like the one of TextIOWrapper's constructor. |
| 1916 | """ |
| 1917 | |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1918 | def __init__(self, initial_value="", newline="\n"): |
| 1919 | super(StringIO, self).__init__(BytesIO(), |
| 1920 | encoding="utf-8", |
| 1921 | errors="strict", |
| 1922 | newline=newline) |
Antoine Pitrou | 1144648 | 2009-04-04 14:09:30 +0000 | [diff] [blame] | 1923 | # Issue #5645: make universal newlines semantics the same as in the |
| 1924 | # C version, even under Windows. |
| 1925 | if newline is None: |
| 1926 | self._writetranslate = False |
Benjamin Peterson | 4fa88fa | 2009-03-04 00:14:51 +0000 | [diff] [blame] | 1927 | if initial_value: |
| 1928 | if not isinstance(initial_value, str): |
| 1929 | initial_value = str(initial_value) |
| 1930 | self.write(initial_value) |
| 1931 | self.seek(0) |
| 1932 | |
| 1933 | def getvalue(self): |
| 1934 | self.flush() |
| 1935 | return self.buffer.getvalue().decode(self._encoding, self._errors) |
Benjamin Peterson | 9fd459a | 2009-03-09 00:09:44 +0000 | [diff] [blame] | 1936 | |
| 1937 | def __repr__(self): |
| 1938 | # TextIOWrapper tells the encoding in its repr. In StringIO, |
| 1939 | # that's a implementation detail. |
| 1940 | return object.__repr__(self) |
Benjamin Peterson | b487e63 | 2009-03-21 03:08:31 +0000 | [diff] [blame] | 1941 | |
| 1942 | @property |
Benjamin Peterson | 0926ad1 | 2009-06-06 18:02:12 +0000 | [diff] [blame] | 1943 | def errors(self): |
| 1944 | return None |
| 1945 | |
| 1946 | @property |
Benjamin Peterson | b487e63 | 2009-03-21 03:08:31 +0000 | [diff] [blame] | 1947 | def encoding(self): |
| 1948 | return None |
Benjamin Peterson | d2e0c79 | 2009-05-01 20:40:59 +0000 | [diff] [blame] | 1949 | |
| 1950 | def detach(self): |
| 1951 | # This doesn't make sense on StringIO. |
| 1952 | self._unsupported("detach") |