Use Zuban and therefore check untyped code

This commit is contained in:
Dave Halter
2026-02-04 02:55:06 +01:00
parent aecfc0e0c4
commit 6fbeec9e2f
8 changed files with 38 additions and 45 deletions

View File

@@ -1,8 +1,11 @@
from contextlib import contextmanager
from typing import Dict, List
from typing import Dict, List, Any
class _NormalizerMeta(type):
rule_value_classes: Any
rule_type_classes: Any
def __new__(cls, name, bases, dct):
new_cls = type.__new__(cls, name, bases, dct)
new_cls.rule_value_classes = {}
@@ -109,9 +112,6 @@ class NormalizerConfig:
normalizer_class = Normalizer
def create_normalizer(self, grammar):
if self.normalizer_class is None:
return None
return self.normalizer_class(grammar, self)

View File

@@ -83,14 +83,14 @@ class DFAState(Generic[_TokenTypeT]):
self.from_rule = from_rule
self.nfa_set = nfa_set
# map from terminals/nonterminals to DFAState
self.arcs: Mapping[str, DFAState] = {}
self.arcs: dict[str, DFAState] = {}
# In an intermediary step we set these nonterminal arcs (which has the
# same structure as arcs). These don't contain terminals anymore.
self.nonterminal_arcs: Mapping[str, DFAState] = {}
self.nonterminal_arcs: dict[str, DFAState] = {}
# Transitions are basically the only thing that the parser is using
# with is_final. Everyting else is purely here to create a parser.
self.transitions: Mapping[Union[_TokenTypeT, ReservedString], DFAPlan] = {}
self.transitions: dict[Union[_TokenTypeT, ReservedString], DFAPlan] = {}
self.is_final = final in nfa_set
def add_arc(self, next_, label):
@@ -261,7 +261,7 @@ def generate_grammar(bnf_grammar: str, token_namespace) -> Grammar:
if start_nonterminal is None:
start_nonterminal = nfa_a.from_rule
reserved_strings: Mapping[str, ReservedString] = {}
reserved_strings: dict[str, ReservedString] = {}
for nonterminal, dfas in rule_to_dfas.items():
for dfa_state in dfas:
for terminal_or_nonterminal, next_dfa in dfa_state.arcs.items():

View File

@@ -881,6 +881,6 @@ class _NodesTree:
end_pos[0] += len(lines) - 1
end_pos[1] = len(lines[-1])
endmarker = EndMarker('', tuple(end_pos), self.prefix + self._prefix_remainder)
endmarker = EndMarker('', (end_pos[0], end_pos[1]), self.prefix + self._prefix_remainder)
endmarker.parent = self._module
self._module.children.append(endmarker)

View File

@@ -676,7 +676,7 @@ class PEP8Normalizer(ErrorFinder):
elif leaf.parent.type == 'function' and leaf.parent.name == leaf:
self.add_issue(leaf, 743, message % 'function')
else:
self.add_issuadd_issue(741, message % 'variables', leaf)
self.add_issue(741, message % 'variables', leaf)
elif leaf.value == ':':
if isinstance(leaf.parent, (Flow, Scope)) and leaf.parent.type != 'lambdef':
next_leaf = leaf.get_next_leaf()
@@ -764,4 +764,4 @@ class BlankLineAtEnd(Rule):
message = 'Blank line at end of file'
def is_issue(self, leaf):
return self._newline_count >= 2
return False # TODO return self._newline_count >= 2

View File

@@ -16,7 +16,7 @@ import re
import itertools as _itertools
from codecs import BOM_UTF8
from typing import NamedTuple, Tuple, Iterator, Iterable, List, Dict, \
Pattern, Set
Pattern, Set, Any
from parso.python.token import PythonTokenTypes
from parso.utils import split_lines, PythonVersionInfo, parse_version_string
@@ -47,12 +47,12 @@ class TokenCollection(NamedTuple):
endpats: Dict[str, Pattern]
whitespace: Pattern
fstring_pattern_map: Dict[str, str]
always_break_tokens: Tuple[str]
always_break_tokens: Set[str]
BOM_UTF8_STRING = BOM_UTF8.decode('utf-8')
_token_collection_cache: Dict[PythonVersionInfo, TokenCollection] = {}
_token_collection_cache: Dict[tuple[int, int], TokenCollection] = {}
def group(*choices, capture=False, **kwargs):
@@ -249,7 +249,7 @@ class Token(NamedTuple):
class PythonToken(Token):
def __repr__(self):
return ('TokenInfo(type=%s, string=%r, start_pos=%r, prefix=%r)' %
self._replace(type=self.type.name))
self._replace(type=self.type.name)) # type: ignore[arg-type]
class FStringNode:
@@ -257,7 +257,7 @@ class FStringNode:
self.quote = quote
self.parentheses_count = 0
self.previous_lines = ''
self.last_string_start_pos = None
self.last_string_start_pos: Any = None
# In the syntax there can be multiple format_spec's nested:
# {x:{y:3}}
self.format_spec_count = 0
@@ -444,7 +444,7 @@ def tokenize_lines(
if string:
yield PythonToken(
FSTRING_STRING, string,
tos.last_string_start_pos,
tos.last_string_start_pos, # type: ignore[arg-type]
# Never has a prefix because it can start anywhere and
# include whitespace.
prefix=''
@@ -496,8 +496,8 @@ def tokenize_lines(
initial = token[0]
else:
match = whitespace.match(line, pos)
initial = line[match.end()]
start = match.end()
initial = line[match.end()] # type: ignore[union-attr]
start = match.end() # type: ignore[union-attr]
spos = (lnum, start)
if new_line and initial not in '\r\n#' and (initial != '\\' or pseudomatch is None):
@@ -512,12 +512,12 @@ def tokenize_lines(
if not pseudomatch: # scan for tokens
match = whitespace.match(line, pos)
if new_line and paren_level == 0 and not fstring_stack:
yield from dedent_if_necessary(match.end())
pos = match.end()
yield from dedent_if_necessary(match.end()) # type: ignore[union-attr]
pos = match.end() # type: ignore[union-attr]
new_line = False
yield PythonToken(
ERRORTOKEN, line[pos], (lnum, pos),
additional_prefix + match.group(0)
additional_prefix + match.group(0) # type: ignore[union-attr]
)
additional_prefix = ''
pos += 1
@@ -586,7 +586,7 @@ def tokenize_lines(
# backslash and is continued.
contstr_start = lnum, start
endprog = (endpats.get(initial) or endpats.get(token[1])
or endpats.get(token[2]))
or endpats.get(token[2])) # type: ignore[assignment]
contstr = line[start:]
contline = line
break

View File

@@ -44,7 +44,7 @@ Parser Tree Classes
import re
from collections.abc import Mapping
from typing import Tuple
from typing import Tuple, Any
from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, search_ancestor # noqa
from parso.python.prefix import split_prefix
@@ -67,6 +67,9 @@ _IMPORTS = set(['import_name', 'import_from'])
class DocstringMixin:
__slots__ = ()
type: str
children: list[Any]
parent: Any
def get_doc_node(self):
"""
@@ -98,6 +101,7 @@ class PythonMixin:
Some Python specific utilities.
"""
__slots__ = ()
children: list[Any]
def get_name_of_position(self, position):
"""
@@ -216,7 +220,7 @@ class Name(_LeafWithoutNewlines):
type_ = node.type
if type_ in ('funcdef', 'classdef'):
if self == node.name:
if self == node.name: # type: ignore[union-attr]
return node
return None
@@ -229,7 +233,7 @@ class Name(_LeafWithoutNewlines):
if node.type == 'suite':
return None
if node.type in _GET_DEFINITION_TYPES:
if self in node.get_defined_names(include_setitem):
if self in node.get_defined_names(include_setitem): # type: ignore[attr-defined]
return node
if import_name_always and node.type in _IMPORTS:
return node
@@ -293,6 +297,7 @@ class FStringEnd(PythonLeaf):
class _StringComparisonMixin:
__slots__ = ()
value: Any
def __eq__(self, other):
"""
@@ -365,7 +370,7 @@ class Scope(PythonBaseNode, DocstringMixin):
def __repr__(self):
try:
name = self.name.value
name = self.name.value # type: ignore[attr-defined]
except AttributeError:
name = ''
@@ -791,6 +796,8 @@ class WithStmt(Flow):
class Import(PythonBaseNode):
__slots__ = ()
get_paths: Any
_aliases: Any
def get_path_for_name(self, name):
"""
@@ -815,6 +822,9 @@ class Import(PythonBaseNode):
def is_star_import(self):
return self.children[-1] == '*'
def get_defined_names(self):
raise NotImplementedError("Use ImportFrom or ImportName")
class ImportFrom(Import):
type = 'import_from'

View File

@@ -366,7 +366,7 @@ class BaseNode(NodeOrLeaf):
"""
__slots__ = ('children',)
def __init__(self, children: List[NodeOrLeaf]) -> None:
def __init__(self, children) -> None:
self.children = children
"""
A list of :class:`NodeOrLeaf` child nodes.

View File

@@ -10,20 +10,3 @@ ignore =
E226,
# line break before binary operator
W503,
[mypy]
show_error_codes = true
enable_error_code = ignore-without-code
disallow_subclassing_any = True
# Avoid creating future gotchas emerging from bad typing
warn_redundant_casts = True
warn_unused_ignores = True
warn_return_any = True
warn_unused_configs = True
warn_unreachable = True
strict_equality = True
no_implicit_optional = False