mirror of
https://github.com/davidhalter/typeshed.git
synced 2025-12-08 04:54:47 +08:00
tokenize: you can tokenize bytearrays too (#9102)
This commit is contained in:
@@ -122,8 +122,8 @@ class Untokenizer:
|
||||
# the docstring says "returns bytes" but is incorrect --
|
||||
# if the ENCODING token is missing, it skips the encode
|
||||
def untokenize(iterable: Iterable[_Token]) -> Any: ...
|
||||
def detect_encoding(readline: Callable[[], bytes]) -> tuple[str, Sequence[bytes]]: ...
|
||||
def tokenize(readline: Callable[[], bytes]) -> Generator[TokenInfo, None, None]: ...
|
||||
def detect_encoding(readline: Callable[[], bytes | bytearray]) -> tuple[str, Sequence[bytes]]: ...
|
||||
def tokenize(readline: Callable[[], bytes | bytearray]) -> Generator[TokenInfo, None, None]: ...
|
||||
def generate_tokens(readline: Callable[[], str]) -> Generator[TokenInfo, None, None]: ... # undocumented
|
||||
def open(filename: StrOrBytesPath | int) -> TextIO: ...
|
||||
def group(*choices: str) -> str: ... # undocumented
|
||||
|
||||
Reference in New Issue
Block a user