Use Zuban and therefore check untyped code

This commit is contained in:
Dave Halter
2026-02-04 02:55:06 +01:00
parent aecfc0e0c4
commit 6fbeec9e2f
8 changed files with 38 additions and 45 deletions

View File

@@ -1,8 +1,11 @@
from contextlib import contextmanager from contextlib import contextmanager
from typing import Dict, List from typing import Dict, List, Any
class _NormalizerMeta(type): class _NormalizerMeta(type):
rule_value_classes: Any
rule_type_classes: Any
def __new__(cls, name, bases, dct): def __new__(cls, name, bases, dct):
new_cls = type.__new__(cls, name, bases, dct) new_cls = type.__new__(cls, name, bases, dct)
new_cls.rule_value_classes = {} new_cls.rule_value_classes = {}
@@ -109,9 +112,6 @@ class NormalizerConfig:
normalizer_class = Normalizer normalizer_class = Normalizer
def create_normalizer(self, grammar): def create_normalizer(self, grammar):
if self.normalizer_class is None:
return None
return self.normalizer_class(grammar, self) return self.normalizer_class(grammar, self)

View File

@@ -83,14 +83,14 @@ class DFAState(Generic[_TokenTypeT]):
self.from_rule = from_rule self.from_rule = from_rule
self.nfa_set = nfa_set self.nfa_set = nfa_set
# map from terminals/nonterminals to DFAState # map from terminals/nonterminals to DFAState
self.arcs: Mapping[str, DFAState] = {} self.arcs: dict[str, DFAState] = {}
# In an intermediary step we set these nonterminal arcs (which has the # In an intermediary step we set these nonterminal arcs (which has the
# same structure as arcs). These don't contain terminals anymore. # same structure as arcs). These don't contain terminals anymore.
self.nonterminal_arcs: Mapping[str, DFAState] = {} self.nonterminal_arcs: dict[str, DFAState] = {}
# Transitions are basically the only thing that the parser is using # Transitions are basically the only thing that the parser is using
# with is_final. Everyting else is purely here to create a parser. # with is_final. Everyting else is purely here to create a parser.
self.transitions: Mapping[Union[_TokenTypeT, ReservedString], DFAPlan] = {} self.transitions: dict[Union[_TokenTypeT, ReservedString], DFAPlan] = {}
self.is_final = final in nfa_set self.is_final = final in nfa_set
def add_arc(self, next_, label): def add_arc(self, next_, label):
@@ -261,7 +261,7 @@ def generate_grammar(bnf_grammar: str, token_namespace) -> Grammar:
if start_nonterminal is None: if start_nonterminal is None:
start_nonterminal = nfa_a.from_rule start_nonterminal = nfa_a.from_rule
reserved_strings: Mapping[str, ReservedString] = {} reserved_strings: dict[str, ReservedString] = {}
for nonterminal, dfas in rule_to_dfas.items(): for nonterminal, dfas in rule_to_dfas.items():
for dfa_state in dfas: for dfa_state in dfas:
for terminal_or_nonterminal, next_dfa in dfa_state.arcs.items(): for terminal_or_nonterminal, next_dfa in dfa_state.arcs.items():

View File

@@ -881,6 +881,6 @@ class _NodesTree:
end_pos[0] += len(lines) - 1 end_pos[0] += len(lines) - 1
end_pos[1] = len(lines[-1]) end_pos[1] = len(lines[-1])
endmarker = EndMarker('', tuple(end_pos), self.prefix + self._prefix_remainder) endmarker = EndMarker('', (end_pos[0], end_pos[1]), self.prefix + self._prefix_remainder)
endmarker.parent = self._module endmarker.parent = self._module
self._module.children.append(endmarker) self._module.children.append(endmarker)

View File

@@ -676,7 +676,7 @@ class PEP8Normalizer(ErrorFinder):
elif leaf.parent.type == 'function' and leaf.parent.name == leaf: elif leaf.parent.type == 'function' and leaf.parent.name == leaf:
self.add_issue(leaf, 743, message % 'function') self.add_issue(leaf, 743, message % 'function')
else: else:
self.add_issuadd_issue(741, message % 'variables', leaf) self.add_issue(741, message % 'variables', leaf)
elif leaf.value == ':': elif leaf.value == ':':
if isinstance(leaf.parent, (Flow, Scope)) and leaf.parent.type != 'lambdef': if isinstance(leaf.parent, (Flow, Scope)) and leaf.parent.type != 'lambdef':
next_leaf = leaf.get_next_leaf() next_leaf = leaf.get_next_leaf()
@@ -764,4 +764,4 @@ class BlankLineAtEnd(Rule):
message = 'Blank line at end of file' message = 'Blank line at end of file'
def is_issue(self, leaf): def is_issue(self, leaf):
return self._newline_count >= 2 return False # TODO return self._newline_count >= 2

View File

@@ -16,7 +16,7 @@ import re
import itertools as _itertools import itertools as _itertools
from codecs import BOM_UTF8 from codecs import BOM_UTF8
from typing import NamedTuple, Tuple, Iterator, Iterable, List, Dict, \ from typing import NamedTuple, Tuple, Iterator, Iterable, List, Dict, \
Pattern, Set Pattern, Set, Any
from parso.python.token import PythonTokenTypes from parso.python.token import PythonTokenTypes
from parso.utils import split_lines, PythonVersionInfo, parse_version_string from parso.utils import split_lines, PythonVersionInfo, parse_version_string
@@ -47,12 +47,12 @@ class TokenCollection(NamedTuple):
endpats: Dict[str, Pattern] endpats: Dict[str, Pattern]
whitespace: Pattern whitespace: Pattern
fstring_pattern_map: Dict[str, str] fstring_pattern_map: Dict[str, str]
always_break_tokens: Tuple[str] always_break_tokens: Set[str]
BOM_UTF8_STRING = BOM_UTF8.decode('utf-8') BOM_UTF8_STRING = BOM_UTF8.decode('utf-8')
_token_collection_cache: Dict[PythonVersionInfo, TokenCollection] = {} _token_collection_cache: Dict[tuple[int, int], TokenCollection] = {}
def group(*choices, capture=False, **kwargs): def group(*choices, capture=False, **kwargs):
@@ -249,7 +249,7 @@ class Token(NamedTuple):
class PythonToken(Token): class PythonToken(Token):
def __repr__(self): def __repr__(self):
return ('TokenInfo(type=%s, string=%r, start_pos=%r, prefix=%r)' % return ('TokenInfo(type=%s, string=%r, start_pos=%r, prefix=%r)' %
self._replace(type=self.type.name)) self._replace(type=self.type.name)) # type: ignore[arg-type]
class FStringNode: class FStringNode:
@@ -257,7 +257,7 @@ class FStringNode:
self.quote = quote self.quote = quote
self.parentheses_count = 0 self.parentheses_count = 0
self.previous_lines = '' self.previous_lines = ''
self.last_string_start_pos = None self.last_string_start_pos: Any = None
# In the syntax there can be multiple format_spec's nested: # In the syntax there can be multiple format_spec's nested:
# {x:{y:3}} # {x:{y:3}}
self.format_spec_count = 0 self.format_spec_count = 0
@@ -444,7 +444,7 @@ def tokenize_lines(
if string: if string:
yield PythonToken( yield PythonToken(
FSTRING_STRING, string, FSTRING_STRING, string,
tos.last_string_start_pos, tos.last_string_start_pos, # type: ignore[arg-type]
# Never has a prefix because it can start anywhere and # Never has a prefix because it can start anywhere and
# include whitespace. # include whitespace.
prefix='' prefix=''
@@ -496,8 +496,8 @@ def tokenize_lines(
initial = token[0] initial = token[0]
else: else:
match = whitespace.match(line, pos) match = whitespace.match(line, pos)
initial = line[match.end()] initial = line[match.end()] # type: ignore[union-attr]
start = match.end() start = match.end() # type: ignore[union-attr]
spos = (lnum, start) spos = (lnum, start)
if new_line and initial not in '\r\n#' and (initial != '\\' or pseudomatch is None): if new_line and initial not in '\r\n#' and (initial != '\\' or pseudomatch is None):
@@ -512,12 +512,12 @@ def tokenize_lines(
if not pseudomatch: # scan for tokens if not pseudomatch: # scan for tokens
match = whitespace.match(line, pos) match = whitespace.match(line, pos)
if new_line and paren_level == 0 and not fstring_stack: if new_line and paren_level == 0 and not fstring_stack:
yield from dedent_if_necessary(match.end()) yield from dedent_if_necessary(match.end()) # type: ignore[union-attr]
pos = match.end() pos = match.end() # type: ignore[union-attr]
new_line = False new_line = False
yield PythonToken( yield PythonToken(
ERRORTOKEN, line[pos], (lnum, pos), ERRORTOKEN, line[pos], (lnum, pos),
additional_prefix + match.group(0) additional_prefix + match.group(0) # type: ignore[union-attr]
) )
additional_prefix = '' additional_prefix = ''
pos += 1 pos += 1
@@ -586,7 +586,7 @@ def tokenize_lines(
# backslash and is continued. # backslash and is continued.
contstr_start = lnum, start contstr_start = lnum, start
endprog = (endpats.get(initial) or endpats.get(token[1]) endprog = (endpats.get(initial) or endpats.get(token[1])
or endpats.get(token[2])) or endpats.get(token[2])) # type: ignore[assignment]
contstr = line[start:] contstr = line[start:]
contline = line contline = line
break break

View File

@@ -44,7 +44,7 @@ Parser Tree Classes
import re import re
from collections.abc import Mapping from collections.abc import Mapping
from typing import Tuple from typing import Tuple, Any
from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, search_ancestor # noqa from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, search_ancestor # noqa
from parso.python.prefix import split_prefix from parso.python.prefix import split_prefix
@@ -67,6 +67,9 @@ _IMPORTS = set(['import_name', 'import_from'])
class DocstringMixin: class DocstringMixin:
__slots__ = () __slots__ = ()
type: str
children: list[Any]
parent: Any
def get_doc_node(self): def get_doc_node(self):
""" """
@@ -98,6 +101,7 @@ class PythonMixin:
Some Python specific utilities. Some Python specific utilities.
""" """
__slots__ = () __slots__ = ()
children: list[Any]
def get_name_of_position(self, position): def get_name_of_position(self, position):
""" """
@@ -216,7 +220,7 @@ class Name(_LeafWithoutNewlines):
type_ = node.type type_ = node.type
if type_ in ('funcdef', 'classdef'): if type_ in ('funcdef', 'classdef'):
if self == node.name: if self == node.name: # type: ignore[union-attr]
return node return node
return None return None
@@ -229,7 +233,7 @@ class Name(_LeafWithoutNewlines):
if node.type == 'suite': if node.type == 'suite':
return None return None
if node.type in _GET_DEFINITION_TYPES: if node.type in _GET_DEFINITION_TYPES:
if self in node.get_defined_names(include_setitem): if self in node.get_defined_names(include_setitem): # type: ignore[attr-defined]
return node return node
if import_name_always and node.type in _IMPORTS: if import_name_always and node.type in _IMPORTS:
return node return node
@@ -293,6 +297,7 @@ class FStringEnd(PythonLeaf):
class _StringComparisonMixin: class _StringComparisonMixin:
__slots__ = () __slots__ = ()
value: Any
def __eq__(self, other): def __eq__(self, other):
""" """
@@ -365,7 +370,7 @@ class Scope(PythonBaseNode, DocstringMixin):
def __repr__(self): def __repr__(self):
try: try:
name = self.name.value name = self.name.value # type: ignore[attr-defined]
except AttributeError: except AttributeError:
name = '' name = ''
@@ -791,6 +796,8 @@ class WithStmt(Flow):
class Import(PythonBaseNode): class Import(PythonBaseNode):
__slots__ = () __slots__ = ()
get_paths: Any
_aliases: Any
def get_path_for_name(self, name): def get_path_for_name(self, name):
""" """
@@ -815,6 +822,9 @@ class Import(PythonBaseNode):
def is_star_import(self): def is_star_import(self):
return self.children[-1] == '*' return self.children[-1] == '*'
def get_defined_names(self):
raise NotImplementedError("Use ImportFrom or ImportName")
class ImportFrom(Import): class ImportFrom(Import):
type = 'import_from' type = 'import_from'

View File

@@ -366,7 +366,7 @@ class BaseNode(NodeOrLeaf):
""" """
__slots__ = ('children',) __slots__ = ('children',)
def __init__(self, children: List[NodeOrLeaf]) -> None: def __init__(self, children) -> None:
self.children = children self.children = children
""" """
A list of :class:`NodeOrLeaf` child nodes. A list of :class:`NodeOrLeaf` child nodes.

View File

@@ -10,20 +10,3 @@ ignore =
E226, E226,
# line break before binary operator # line break before binary operator
W503, W503,
[mypy]
show_error_codes = true
enable_error_code = ignore-without-code
disallow_subclassing_any = True
# Avoid creating future gotchas emerging from bad typing
warn_redundant_casts = True
warn_unused_ignores = True
warn_return_any = True
warn_unused_configs = True
warn_unreachable = True
strict_equality = True
no_implicit_optional = False