mirror of
https://github.com/davidhalter/typeshed.git
synced 2025-12-09 05:24:52 +08:00
Use PEP 585 syntax in Python 2, protobuf & _ast stubs, where possible (#6949)
This commit is contained in:
@@ -1,19 +1,19 @@
|
||||
from _typeshed import Self, StrPath
|
||||
from typing import Dict, List, Optional, Text, Tuple
|
||||
from typing import Optional, Text
|
||||
|
||||
_Label = Tuple[int, Optional[Text]]
|
||||
_DFA = List[List[Tuple[int, int]]]
|
||||
_DFAS = Tuple[_DFA, Dict[int, int]]
|
||||
_Label = tuple[int, Optional[Text]]
|
||||
_DFA = list[list[tuple[int, int]]]
|
||||
_DFAS = tuple[_DFA, dict[int, int]]
|
||||
|
||||
class Grammar:
|
||||
symbol2number: Dict[Text, int]
|
||||
number2symbol: Dict[int, Text]
|
||||
states: List[_DFA]
|
||||
dfas: Dict[int, _DFAS]
|
||||
labels: List[_Label]
|
||||
keywords: Dict[Text, int]
|
||||
tokens: Dict[int, int]
|
||||
symbol2label: Dict[Text, int]
|
||||
symbol2number: dict[Text, int]
|
||||
number2symbol: dict[int, Text]
|
||||
states: list[_DFA]
|
||||
dfas: dict[int, _DFAS]
|
||||
labels: list[_Label]
|
||||
keywords: dict[Text, int]
|
||||
tokens: dict[int, int]
|
||||
symbol2label: dict[Text, int]
|
||||
start: int
|
||||
def __init__(self) -> None: ...
|
||||
def dump(self, filename: StrPath) -> None: ...
|
||||
@@ -22,4 +22,4 @@ class Grammar:
|
||||
def report(self) -> None: ...
|
||||
|
||||
opmap_raw: Text
|
||||
opmap: Dict[Text, Text]
|
||||
opmap: dict[Text, Text]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Dict, Match, Text
|
||||
from typing import Match, Text
|
||||
|
||||
simple_escapes: Dict[Text, Text]
|
||||
simple_escapes: dict[Text, Text]
|
||||
|
||||
def escape(m: Match[str]) -> Text: ...
|
||||
def evalString(s: Text) -> Text: ...
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from lib2to3.pgen2.grammar import _DFAS, Grammar
|
||||
from lib2to3.pytree import _NL, _Convert, _RawNode
|
||||
from typing import Any, List, Sequence, Set, Text, Tuple
|
||||
from typing import Any, Sequence, Text
|
||||
|
||||
_Context = Sequence[Any]
|
||||
|
||||
@@ -14,9 +14,9 @@ class ParseError(Exception):
|
||||
class Parser:
|
||||
grammar: Grammar
|
||||
convert: _Convert
|
||||
stack: List[Tuple[_DFAS, int, _RawNode]]
|
||||
stack: list[tuple[_DFAS, int, _RawNode]]
|
||||
rootnode: _NL | None
|
||||
used_names: Set[Text]
|
||||
used_names: set[Text]
|
||||
def __init__(self, grammar: Grammar, convert: _Convert | None = ...) -> None: ...
|
||||
def setup(self, start: int | None = ...) -> None: ...
|
||||
def addtoken(self, type: int, value: Text | None, context: _Context) -> bool: ...
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from _typeshed import StrPath
|
||||
from lib2to3.pgen2 import grammar
|
||||
from lib2to3.pgen2.tokenize import _TokenInfo
|
||||
from typing import IO, Any, Dict, Iterable, Iterator, List, NoReturn, Text, Tuple
|
||||
from typing import IO, Any, Iterable, Iterator, NoReturn, Text
|
||||
|
||||
class PgenGrammar(grammar.Grammar): ...
|
||||
|
||||
@@ -9,36 +9,36 @@ class ParserGenerator:
|
||||
filename: StrPath
|
||||
stream: IO[Text]
|
||||
generator: Iterator[_TokenInfo]
|
||||
first: Dict[Text, Dict[Text, int]]
|
||||
first: dict[Text, dict[Text, int]]
|
||||
def __init__(self, filename: StrPath, stream: IO[Text] | None = ...) -> None: ...
|
||||
def make_grammar(self) -> PgenGrammar: ...
|
||||
def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]: ...
|
||||
def make_first(self, c: PgenGrammar, name: Text) -> dict[int, int]: ...
|
||||
def make_label(self, c: PgenGrammar, label: Text) -> int: ...
|
||||
def addfirstsets(self) -> None: ...
|
||||
def calcfirst(self, name: Text) -> None: ...
|
||||
def parse(self) -> Tuple[Dict[Text, List[DFAState]], Text]: ...
|
||||
def make_dfa(self, start: NFAState, finish: NFAState) -> List[DFAState]: ...
|
||||
def dump_nfa(self, name: Text, start: NFAState, finish: NFAState) -> List[DFAState]: ...
|
||||
def parse(self) -> tuple[dict[Text, list[DFAState]], Text]: ...
|
||||
def make_dfa(self, start: NFAState, finish: NFAState) -> list[DFAState]: ...
|
||||
def dump_nfa(self, name: Text, start: NFAState, finish: NFAState) -> list[DFAState]: ...
|
||||
def dump_dfa(self, name: Text, dfa: Iterable[DFAState]) -> None: ...
|
||||
def simplify_dfa(self, dfa: List[DFAState]) -> None: ...
|
||||
def parse_rhs(self) -> Tuple[NFAState, NFAState]: ...
|
||||
def parse_alt(self) -> Tuple[NFAState, NFAState]: ...
|
||||
def parse_item(self) -> Tuple[NFAState, NFAState]: ...
|
||||
def parse_atom(self) -> Tuple[NFAState, NFAState]: ...
|
||||
def simplify_dfa(self, dfa: list[DFAState]) -> None: ...
|
||||
def parse_rhs(self) -> tuple[NFAState, NFAState]: ...
|
||||
def parse_alt(self) -> tuple[NFAState, NFAState]: ...
|
||||
def parse_item(self) -> tuple[NFAState, NFAState]: ...
|
||||
def parse_atom(self) -> tuple[NFAState, NFAState]: ...
|
||||
def expect(self, type: int, value: Any | None = ...) -> Text: ...
|
||||
def gettoken(self) -> None: ...
|
||||
def raise_error(self, msg: str, *args: Any) -> NoReturn: ...
|
||||
|
||||
class NFAState:
|
||||
arcs: List[Tuple[Text | None, NFAState]]
|
||||
arcs: list[tuple[Text | None, NFAState]]
|
||||
def __init__(self) -> None: ...
|
||||
def addarc(self, next: NFAState, label: Text | None = ...) -> None: ...
|
||||
|
||||
class DFAState:
|
||||
nfaset: Dict[NFAState, Any]
|
||||
nfaset: dict[NFAState, Any]
|
||||
isfinal: bool
|
||||
arcs: Dict[Text, DFAState]
|
||||
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None: ...
|
||||
arcs: dict[Text, DFAState]
|
||||
def __init__(self, nfaset: dict[NFAState, Any], final: NFAState) -> None: ...
|
||||
def addarc(self, next: DFAState, label: Text) -> None: ...
|
||||
def unifystate(self, old: DFAState, new: DFAState) -> None: ...
|
||||
def __eq__(self, other: Any) -> bool: ...
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Dict, Text
|
||||
from typing import Text
|
||||
|
||||
ENDMARKER: int
|
||||
NAME: int
|
||||
@@ -56,7 +56,7 @@ NL: int
|
||||
ERRORTOKEN: int
|
||||
N_TOKENS: int
|
||||
NT_OFFSET: int
|
||||
tok_name: Dict[int, Text]
|
||||
tok_name: dict[int, Text]
|
||||
|
||||
def ISTERMINAL(x: int) -> bool: ...
|
||||
def ISNONTERMINAL(x: int) -> bool: ...
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
from lib2to3.pgen2.token import * # noqa
|
||||
from typing import Callable, Iterable, Iterator, List, Text, Tuple
|
||||
from typing import Callable, Iterable, Iterator, Text
|
||||
|
||||
_Coord = Tuple[int, int]
|
||||
_Coord = tuple[int, int]
|
||||
_TokenEater = Callable[[int, Text, _Coord, _Coord, Text], None]
|
||||
_TokenInfo = Tuple[int, Text, _Coord, _Coord, Text]
|
||||
_TokenInfo = tuple[int, Text, _Coord, _Coord, Text]
|
||||
|
||||
class TokenError(Exception): ...
|
||||
class StopTokenizing(Exception): ...
|
||||
@@ -11,13 +11,13 @@ class StopTokenizing(Exception): ...
|
||||
def tokenize(readline: Callable[[], Text], tokeneater: _TokenEater = ...) -> None: ...
|
||||
|
||||
class Untokenizer:
|
||||
tokens: List[Text]
|
||||
tokens: list[Text]
|
||||
prev_row: int
|
||||
prev_col: int
|
||||
def __init__(self) -> None: ...
|
||||
def add_whitespace(self, start: _Coord) -> None: ...
|
||||
def untokenize(self, iterable: Iterable[_TokenInfo]) -> Text: ...
|
||||
def compat(self, token: Tuple[int, Text], iterable: Iterable[_TokenInfo]) -> None: ...
|
||||
def compat(self, token: tuple[int, Text], iterable: Iterable[_TokenInfo]) -> None: ...
|
||||
|
||||
def untokenize(iterable: Iterable[_TokenInfo]) -> Text: ...
|
||||
def generate_tokens(readline: Callable[[], Text]) -> Iterator[_TokenInfo]: ...
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from _typeshed import Self
|
||||
from lib2to3.pgen2.grammar import Grammar
|
||||
from typing import Any, Callable, Dict, Iterator, List, Optional, Text, Tuple, TypeVar, Union
|
||||
from typing import Any, Callable, Iterator, Optional, Text, TypeVar, Union
|
||||
|
||||
_P = TypeVar("_P")
|
||||
_NL = Union[Node, Leaf]
|
||||
_Context = Tuple[Text, int, int]
|
||||
_Results = Dict[Text, _NL]
|
||||
_RawNode = Tuple[int, Text, _Context, Optional[List[_NL]]]
|
||||
_Context = tuple[Text, int, int]
|
||||
_Results = dict[Text, _NL]
|
||||
_RawNode = tuple[int, Text, _Context, Optional[list[_NL]]]
|
||||
_Convert = Callable[[Grammar, _RawNode], Any]
|
||||
|
||||
HUGE: int
|
||||
@@ -17,7 +17,7 @@ class Base:
|
||||
type: int
|
||||
parent: Node | None
|
||||
prefix: Text
|
||||
children: List[_NL]
|
||||
children: list[_NL]
|
||||
was_changed: bool
|
||||
was_checked: bool
|
||||
def __eq__(self, other: Any) -> bool: ...
|
||||
@@ -25,7 +25,7 @@ class Base:
|
||||
def clone(self: Self) -> Self: ...
|
||||
def post_order(self) -> Iterator[_NL]: ...
|
||||
def pre_order(self) -> Iterator[_NL]: ...
|
||||
def replace(self, new: _NL | List[_NL]) -> None: ...
|
||||
def replace(self, new: _NL | list[_NL]) -> None: ...
|
||||
def get_lineno(self) -> int: ...
|
||||
def changed(self) -> None: ...
|
||||
def remove(self) -> int | None: ...
|
||||
@@ -40,14 +40,14 @@ class Base:
|
||||
def set_prefix(self, prefix: Text) -> None: ...
|
||||
|
||||
class Node(Base):
|
||||
fixers_applied: List[Any]
|
||||
fixers_applied: list[Any]
|
||||
def __init__(
|
||||
self,
|
||||
type: int,
|
||||
children: List[_NL],
|
||||
children: list[_NL],
|
||||
context: Any | None = ...,
|
||||
prefix: Text | None = ...,
|
||||
fixers_applied: List[Any] | None = ...,
|
||||
fixers_applied: list[Any] | None = ...,
|
||||
) -> None: ...
|
||||
def set_child(self, i: int, child: _NL) -> None: ...
|
||||
def insert_child(self, i: int, child: _NL) -> None: ...
|
||||
@@ -57,9 +57,9 @@ class Leaf(Base):
|
||||
lineno: int
|
||||
column: int
|
||||
value: Text
|
||||
fixers_applied: List[Any]
|
||||
fixers_applied: list[Any]
|
||||
def __init__(
|
||||
self, type: int, value: Text, context: _Context | None = ..., prefix: Text | None = ..., fixers_applied: List[Any] = ...
|
||||
self, type: int, value: Text, context: _Context | None = ..., prefix: Text | None = ..., fixers_applied: list[Any] = ...
|
||||
) -> None: ...
|
||||
|
||||
def convert(gr: Grammar, raw_node: _RawNode) -> _NL: ...
|
||||
@@ -70,8 +70,8 @@ class BasePattern:
|
||||
name: Text | None
|
||||
def optimize(self) -> BasePattern: ... # sic, subclasses are free to optimize themselves into different patterns
|
||||
def match(self, node: _NL, results: _Results | None = ...) -> bool: ...
|
||||
def match_seq(self, nodes: List[_NL], results: _Results | None = ...) -> bool: ...
|
||||
def generate_matches(self, nodes: List[_NL]) -> Iterator[Tuple[int, _Results]]: ...
|
||||
def match_seq(self, nodes: list[_NL], results: _Results | None = ...) -> bool: ...
|
||||
def generate_matches(self, nodes: list[_NL]) -> Iterator[tuple[int, _Results]]: ...
|
||||
|
||||
class LeafPattern(BasePattern):
|
||||
def __init__(self, type: int | None = ..., content: Text | None = ..., name: Text | None = ...) -> None: ...
|
||||
@@ -88,4 +88,4 @@ class WildcardPattern(BasePattern):
|
||||
class NegatedPattern(BasePattern):
|
||||
def __init__(self, content: Text | None = ...) -> None: ...
|
||||
|
||||
def generate_matches(patterns: List[BasePattern], nodes: List[_NL]) -> Iterator[Tuple[int, _Results]]: ...
|
||||
def generate_matches(patterns: list[BasePattern], nodes: list[_NL]) -> Iterator[tuple[int, _Results]]: ...
|
||||
|
||||
Reference in New Issue
Block a user