tokenize: you can tokenize bytearrays too (#9102)

This commit is contained in:
Jelle Zijlstra
2022-11-04 22:59:05 -07:00
committed by GitHub
parent cff08b674a
commit f678fac75a

View File

@@ -122,8 +122,8 @@ class Untokenizer:
# the docstring says "returns bytes" but is incorrect --
# if the ENCODING token is missing, it skips the encode
def untokenize(iterable: Iterable[_Token]) -> Any: ...
def detect_encoding(readline: Callable[[], bytes]) -> tuple[str, Sequence[bytes]]: ...
def tokenize(readline: Callable[[], bytes]) -> Generator[TokenInfo, None, None]: ...
def detect_encoding(readline: Callable[[], bytes | bytearray]) -> tuple[str, Sequence[bytes]]: ...
def tokenize(readline: Callable[[], bytes | bytearray]) -> Generator[TokenInfo, None, None]: ...
def generate_tokens(readline: Callable[[], str]) -> Generator[TokenInfo, None, None]: ... # undocumented
def open(filename: StrOrBytesPath | int) -> TextIO: ...
def group(*choices: str) -> str: ... # undocumented