From f678fac75a0a0375237c22541052bc76284e53c0 Mon Sep 17 00:00:00 2001 From: Jelle Zijlstra Date: Fri, 4 Nov 2022 22:59:05 -0700 Subject: [PATCH] tokenize: you can tokenize bytearrays too (#9102) --- stdlib/tokenize.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stdlib/tokenize.pyi b/stdlib/tokenize.pyi index 6f242a6cd..7c00b507a 100644 --- a/stdlib/tokenize.pyi +++ b/stdlib/tokenize.pyi @@ -122,8 +122,8 @@ class Untokenizer: # the docstring says "returns bytes" but is incorrect -- # if the ENCODING token is missing, it skips the encode def untokenize(iterable: Iterable[_Token]) -> Any: ... -def detect_encoding(readline: Callable[[], bytes]) -> tuple[str, Sequence[bytes]]: ... -def tokenize(readline: Callable[[], bytes]) -> Generator[TokenInfo, None, None]: ... +def detect_encoding(readline: Callable[[], bytes | bytearray]) -> tuple[str, Sequence[bytes]]: ... +def tokenize(readline: Callable[[], bytes | bytearray]) -> Generator[TokenInfo, None, None]: ... def generate_tokens(readline: Callable[[], str]) -> Generator[TokenInfo, None, None]: ... # undocumented def open(filename: StrOrBytesPath | int) -> TextIO: ... def group(*choices: str) -> str: ... # undocumented