From 3f0eb995aae85afddcdb9238152a593734af9634 Mon Sep 17 00:00:00 2001 From: Martijn Pieters Date: Wed, 15 Mar 2017 09:57:17 -0700 Subject: [PATCH] Complete the tokenize module type hints (#984) * Complete the tokenize module type hints * Add missing import for Optional * Use a 3.5-style named tuple, untokenize speaks with forked tongue so use Any * Use explicit types for fields --- stdlib/3/tokenize.pyi | 47 +++++++++++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/stdlib/3/tokenize.pyi b/stdlib/3/tokenize.pyi index 9849f68ba..8e8fc1324 100644 --- a/stdlib/3/tokenize.pyi +++ b/stdlib/3/tokenize.pyi @@ -2,34 +2,47 @@ # # NOTE: This dynamically typed stub was automatically generated by stubgen. -from typing import Any, Union, TextIO +from typing import Any, Callable, Generator, Iterable, List, NamedTuple, Optional, Union, Sequence, TextIO, Tuple from builtins import open as _builtin_open from token import * # noqa: F403 -COMMENT = ... # type: Any -NL = ... # type: Any -ENCODING = ... # type: Any +COMMENT = ... # type: int +NL = ... # type: int +ENCODING = ... # type: int -class TokenInfo: +_Position = Tuple[int, int] + +_TokenInfo = NamedTuple('TokenInfo', [ + ('type', int), + ('string', str), + ('start', _Position), + ('end', _Position), + ('line', str) +]) + +class TokenInfo(_TokenInfo): @property - def exact_type(self): ... + def exact_type(self) -> int: ... + +# Backwards compatible tokens can be sequences of a shorter length too +_Token = Union[TokenInfo, Sequence[Union[int, str, _Position]]] class TokenError(Exception): ... class StopTokenizing(Exception): ... class Untokenizer: - tokens = ... # type: Any - prev_row = ... # type: Any - prev_col = ... # type: Any - encoding = ... # type: Any - def __init__(self): ... - def add_whitespace(self, start): ... - def untokenize(self, iterable): ... - def compat(self, token, iterable): ... + tokens = ... # type: List[str] + prev_row = ... # type: int + prev_col = ... # type: int + encoding = ... # type: Optional[str] + def __init__(self) -> None: ... + def add_whitespace(self, start: _Position) -> None: ... + def untokenize(self, iterable: Iterable[_Token]) -> str: ... + def compat(self, token: Sequence[Union[int, str]], iterable: Iterable[_Token]) -> None: ... -def untokenize(iterable): ... -def detect_encoding(readline): ... -def tokenize(readline): ... +def untokenize(iterable: Iterable[_Token]) -> Any: ... +def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, Sequence[bytes]]: ... +def tokenize(readline: Callable[[], bytes]) -> Generator[TokenInfo, None, None]: ... def open(filename: Union[str, bytes, int]) -> TextIO: ...