Use lowercase tuple where possible (#6170)

This commit is contained in:
Akuli
2021-10-15 00:18:19 +00:00
committed by GitHub
parent 5f386b0575
commit 994b69ef8f
242 changed files with 1212 additions and 1224 deletions

View File

@@ -1,6 +1,6 @@
from lib2to3.pgen2.grammar import _DFAS, Grammar
from lib2to3.pytree import _NL, _Convert, _RawNode
from typing import Any, Sequence, Set, Tuple
from typing import Any, Sequence, Set
_Context = Sequence[Any]
@@ -14,7 +14,7 @@ class ParseError(Exception):
class Parser:
grammar: Grammar
convert: _Convert
stack: list[Tuple[_DFAS, int, _RawNode]]
stack: list[tuple[_DFAS, int, _RawNode]]
rootnode: _NL | None
used_names: Set[str]
def __init__(self, grammar: Grammar, convert: _Convert | None = ...) -> None: ...

View File

@@ -1,7 +1,7 @@
from _typeshed import StrPath
from lib2to3.pgen2 import grammar
from lib2to3.pgen2.tokenize import _TokenInfo
from typing import IO, Any, Iterable, Iterator, NoReturn, Tuple
from typing import IO, Any, Iterable, Iterator, NoReturn
class PgenGrammar(grammar.Grammar): ...
@@ -16,21 +16,21 @@ class ParserGenerator:
def make_label(self, c: PgenGrammar, label: str) -> int: ...
def addfirstsets(self) -> None: ...
def calcfirst(self, name: str) -> None: ...
def parse(self) -> Tuple[dict[str, list[DFAState]], str]: ...
def parse(self) -> tuple[dict[str, list[DFAState]], str]: ...
def make_dfa(self, start: NFAState, finish: NFAState) -> list[DFAState]: ...
def dump_nfa(self, name: str, start: NFAState, finish: NFAState) -> list[DFAState]: ...
def dump_dfa(self, name: str, dfa: Iterable[DFAState]) -> None: ...
def simplify_dfa(self, dfa: list[DFAState]) -> None: ...
def parse_rhs(self) -> Tuple[NFAState, NFAState]: ...
def parse_alt(self) -> Tuple[NFAState, NFAState]: ...
def parse_item(self) -> Tuple[NFAState, NFAState]: ...
def parse_atom(self) -> Tuple[NFAState, NFAState]: ...
def parse_rhs(self) -> tuple[NFAState, NFAState]: ...
def parse_alt(self) -> tuple[NFAState, NFAState]: ...
def parse_item(self) -> tuple[NFAState, NFAState]: ...
def parse_atom(self) -> tuple[NFAState, NFAState]: ...
def expect(self, type: int, value: Any | None = ...) -> str: ...
def gettoken(self) -> None: ...
def raise_error(self, msg: str, *args: Any) -> NoReturn: ...
class NFAState:
arcs: list[Tuple[str | None, NFAState]]
arcs: list[tuple[str | None, NFAState]]
def __init__(self) -> None: ...
def addarc(self, next: NFAState, label: str | None = ...) -> None: ...

View File

@@ -17,7 +17,7 @@ class Untokenizer:
def __init__(self) -> None: ...
def add_whitespace(self, start: _Coord) -> None: ...
def untokenize(self, iterable: Iterable[_TokenInfo]) -> str: ...
def compat(self, token: Tuple[int, str], iterable: Iterable[_TokenInfo]) -> None: ...
def compat(self, token: tuple[int, str], iterable: Iterable[_TokenInfo]) -> None: ...
def untokenize(iterable: Iterable[_TokenInfo]) -> str: ...
def generate_tokens(readline: Callable[[], str]) -> Iterator[_TokenInfo]: ...

View File

@@ -68,7 +68,7 @@ class BasePattern:
def optimize(self) -> BasePattern: ... # sic, subclasses are free to optimize themselves into different patterns
def match(self, node: _NL, results: _Results | None = ...) -> bool: ...
def match_seq(self, nodes: list[_NL], results: _Results | None = ...) -> bool: ...
def generate_matches(self, nodes: list[_NL]) -> Iterator[Tuple[int, _Results]]: ...
def generate_matches(self, nodes: list[_NL]) -> Iterator[tuple[int, _Results]]: ...
class LeafPattern(BasePattern):
def __init__(self, type: int | None = ..., content: str | None = ..., name: str | None = ...) -> None: ...
@@ -85,4 +85,4 @@ class WildcardPattern(BasePattern):
class NegatedPattern(BasePattern):
def __init__(self, content: str | None = ...) -> None: ...
def generate_matches(patterns: list[BasePattern], nodes: list[_NL]) -> Iterator[Tuple[int, _Results]]: ...
def generate_matches(patterns: list[BasePattern], nodes: list[_NL]) -> Iterator[tuple[int, _Results]]: ...