diff --git a/parso/python/tokenize.py b/parso/python/tokenize.py index 63abff3..64610dd 100644 --- a/parso/python/tokenize.py +++ b/parso/python/tokenize.py @@ -52,7 +52,7 @@ class TokenCollection(NamedTuple): BOM_UTF8_STRING = BOM_UTF8.decode('utf-8') -_token_collection_cache: Dict[tuple[int, int], TokenCollection] = {} +_token_collection_cache: Dict[Tuple[int, int], TokenCollection] = {} def group(*choices, capture=False, **kwargs): @@ -340,7 +340,7 @@ def _find_fstring_string(endpats, fstring_stack, line, lnum, pos): def tokenize( - code: str, *, version_info: PythonVersionInfo, start_pos: Tuple[int, int] = (1, 0) + code: str, *, version_info: Tuple[int, int], start_pos: Tuple[int, int] = (1, 0) ) -> Iterator[PythonToken]: """Generate tokens from a the source code (string).""" lines = split_lines(code, keepends=True) @@ -363,7 +363,7 @@ def _print_tokens(func): def tokenize_lines( lines: Iterable[str], *, - version_info: PythonVersionInfo, + version_info: Tuple[int, int], indents: List[int] = None, start_pos: Tuple[int, int] = (1, 0), is_first_token=True, diff --git a/parso/utils.py b/parso/utils.py index 6ad1ea2..7ff342a 100644 --- a/parso/utils.py +++ b/parso/utils.py @@ -2,7 +2,7 @@ import re import sys from ast import literal_eval from functools import total_ordering -from typing import NamedTuple, Sequence, Union +from typing import NamedTuple, Union # The following is a list in Python that are line breaks in str.splitlines, but # not in Python. In Python only \r (Carriage Return, 0xD) and \n (Line Feed, @@ -26,7 +26,7 @@ class Version(NamedTuple): micro: int -def split_lines(string: str, keepends: bool = False) -> Sequence[str]: +def split_lines(string: str, keepends: bool = False) -> "list[str]": r""" Intended for Python code. In contrast to Python's :py:meth:`str.splitlines`, looks at form feeds and other special characters as normal text. Just diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..6afd8e5 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,14 @@ +[tool.zuban] +enable_error_code = ["ignore-without-code"] + +disallow_subclassing_any = true + +# Avoid creating future gotchas emerging from bad typing +warn_redundant_casts = true +warn_unused_ignores = true +warn_unused_configs = true +warn_unreachable = true + +strict_equality = true +implicit_optional = true +exclude = "^test/normalizer_issue_files" diff --git a/test/test_load_grammar.py b/test/test_load_grammar.py index 0c70436..4a03a0f 100644 --- a/test/test_load_grammar.py +++ b/test/test_load_grammar.py @@ -33,4 +33,4 @@ def test_invalid_grammar_version(string): def test_grammar_int_version(): with pytest.raises(TypeError): - load_grammar(version=3.8) + load_grammar(version=3.8) # type: ignore