Replace 'Text' with 'str' in py3 stdlib (#5466)

This commit is contained in:
Sebastian Rittau
2021-05-16 16:10:48 +02:00
committed by GitHub
parent dbe77b6ae9
commit 6a9c89e928
49 changed files with 328 additions and 349 deletions

View File

@@ -2,7 +2,7 @@ from _typeshed import StrPath
from lib2to3.pgen2.grammar import Grammar
from lib2to3.pytree import _NL, _Convert
from logging import Logger
from typing import IO, Any, Iterable, Optional, Text
from typing import IO, Any, Iterable, Optional
class Driver:
grammar: Grammar
@@ -10,11 +10,11 @@ class Driver:
convert: _Convert
def __init__(self, grammar: Grammar, convert: Optional[_Convert] = ..., logger: Optional[Logger] = ...) -> None: ...
def parse_tokens(self, tokens: Iterable[Any], debug: bool = ...) -> _NL: ...
def parse_stream_raw(self, stream: IO[Text], debug: bool = ...) -> _NL: ...
def parse_stream(self, stream: IO[Text], debug: bool = ...) -> _NL: ...
def parse_file(self, filename: StrPath, encoding: Optional[Text] = ..., debug: bool = ...) -> _NL: ...
def parse_string(self, text: Text, debug: bool = ...) -> _NL: ...
def parse_stream_raw(self, stream: IO[str], debug: bool = ...) -> _NL: ...
def parse_stream(self, stream: IO[str], debug: bool = ...) -> _NL: ...
def parse_file(self, filename: StrPath, encoding: Optional[str] = ..., debug: bool = ...) -> _NL: ...
def parse_string(self, text: str, debug: bool = ...) -> _NL: ...
def load_grammar(
gt: Text = ..., gp: Optional[Text] = ..., save: bool = ..., force: bool = ..., logger: Optional[Logger] = ...
gt: str = ..., gp: Optional[str] = ..., save: bool = ..., force: bool = ..., logger: Optional[Logger] = ...
) -> Grammar: ...

View File

@@ -1,20 +1,20 @@
from _typeshed import StrPath
from typing import Dict, List, Optional, Text, Tuple, TypeVar
from typing import Dict, List, Optional, Tuple, TypeVar
_P = TypeVar("_P")
_Label = Tuple[int, Optional[Text]]
_Label = Tuple[int, Optional[str]]
_DFA = List[List[Tuple[int, int]]]
_DFAS = Tuple[_DFA, Dict[int, int]]
class Grammar:
symbol2number: Dict[Text, int]
number2symbol: Dict[int, Text]
symbol2number: Dict[str, int]
number2symbol: Dict[int, str]
states: List[_DFA]
dfas: Dict[int, _DFAS]
labels: List[_Label]
keywords: Dict[Text, int]
keywords: Dict[str, int]
tokens: Dict[int, int]
symbol2label: Dict[Text, int]
symbol2label: Dict[str, int]
start: int
def __init__(self) -> None: ...
def dump(self, filename: StrPath) -> None: ...
@@ -22,5 +22,5 @@ class Grammar:
def copy(self: _P) -> _P: ...
def report(self) -> None: ...
opmap_raw: Text
opmap: Dict[Text, Text]
opmap_raw: str
opmap: Dict[str, str]

View File

@@ -1,7 +1,7 @@
from typing import Dict, Match, Text
from typing import Dict, Match
simple_escapes: Dict[Text, Text]
simple_escapes: Dict[str, str]
def escape(m: Match[str]) -> Text: ...
def evalString(s: Text) -> Text: ...
def escape(m: Match[str]) -> str: ...
def evalString(s: str) -> str: ...
def test() -> None: ...

View File

@@ -1,26 +1,26 @@
from lib2to3.pgen2.grammar import _DFAS, Grammar
from lib2to3.pytree import _NL, _Convert, _RawNode
from typing import Any, List, Optional, Sequence, Set, Text, Tuple
from typing import Any, List, Optional, Sequence, Set, Tuple
_Context = Sequence[Any]
class ParseError(Exception):
msg: Text
msg: str
type: int
value: Optional[Text]
value: Optional[str]
context: _Context
def __init__(self, msg: Text, type: int, value: Optional[Text], context: _Context) -> None: ...
def __init__(self, msg: str, type: int, value: Optional[str], context: _Context) -> None: ...
class Parser:
grammar: Grammar
convert: _Convert
stack: List[Tuple[_DFAS, int, _RawNode]]
rootnode: Optional[_NL]
used_names: Set[Text]
used_names: Set[str]
def __init__(self, grammar: Grammar, convert: Optional[_Convert] = ...) -> None: ...
def setup(self, start: Optional[int] = ...) -> None: ...
def addtoken(self, type: int, value: Optional[Text], context: _Context) -> bool: ...
def classify(self, type: int, value: Optional[Text], context: _Context) -> int: ...
def shift(self, type: int, value: Optional[Text], newstate: int, context: _Context) -> None: ...
def addtoken(self, type: int, value: Optional[str], context: _Context) -> bool: ...
def classify(self, type: int, value: Optional[str], context: _Context) -> int: ...
def shift(self, type: int, value: Optional[str], newstate: int, context: _Context) -> None: ...
def push(self, type: int, newdfa: _DFAS, newstate: int, context: _Context) -> None: ...
def pop(self) -> None: ...

View File

@@ -1,45 +1,45 @@
from _typeshed import StrPath
from lib2to3.pgen2 import grammar
from lib2to3.pgen2.tokenize import _TokenInfo
from typing import IO, Any, Dict, Iterable, Iterator, List, NoReturn, Optional, Text, Tuple
from typing import IO, Any, Dict, Iterable, Iterator, List, NoReturn, Optional, Tuple
class PgenGrammar(grammar.Grammar): ...
class ParserGenerator:
filename: StrPath
stream: IO[Text]
stream: IO[str]
generator: Iterator[_TokenInfo]
first: Dict[Text, Dict[Text, int]]
def __init__(self, filename: StrPath, stream: Optional[IO[Text]] = ...) -> None: ...
first: Dict[str, Dict[str, int]]
def __init__(self, filename: StrPath, stream: Optional[IO[str]] = ...) -> None: ...
def make_grammar(self) -> PgenGrammar: ...
def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]: ...
def make_label(self, c: PgenGrammar, label: Text) -> int: ...
def make_first(self, c: PgenGrammar, name: str) -> Dict[int, int]: ...
def make_label(self, c: PgenGrammar, label: str) -> int: ...
def addfirstsets(self) -> None: ...
def calcfirst(self, name: Text) -> None: ...
def parse(self) -> Tuple[Dict[Text, List[DFAState]], Text]: ...
def calcfirst(self, name: str) -> None: ...
def parse(self) -> Tuple[Dict[str, List[DFAState]], str]: ...
def make_dfa(self, start: NFAState, finish: NFAState) -> List[DFAState]: ...
def dump_nfa(self, name: Text, start: NFAState, finish: NFAState) -> List[DFAState]: ...
def dump_dfa(self, name: Text, dfa: Iterable[DFAState]) -> None: ...
def dump_nfa(self, name: str, start: NFAState, finish: NFAState) -> List[DFAState]: ...
def dump_dfa(self, name: str, dfa: Iterable[DFAState]) -> None: ...
def simplify_dfa(self, dfa: List[DFAState]) -> None: ...
def parse_rhs(self) -> Tuple[NFAState, NFAState]: ...
def parse_alt(self) -> Tuple[NFAState, NFAState]: ...
def parse_item(self) -> Tuple[NFAState, NFAState]: ...
def parse_atom(self) -> Tuple[NFAState, NFAState]: ...
def expect(self, type: int, value: Optional[Any] = ...) -> Text: ...
def expect(self, type: int, value: Optional[Any] = ...) -> str: ...
def gettoken(self) -> None: ...
def raise_error(self, msg: str, *args: Any) -> NoReturn: ...
class NFAState:
arcs: List[Tuple[Optional[Text], NFAState]]
arcs: List[Tuple[Optional[str], NFAState]]
def __init__(self) -> None: ...
def addarc(self, next: NFAState, label: Optional[Text] = ...) -> None: ...
def addarc(self, next: NFAState, label: Optional[str] = ...) -> None: ...
class DFAState:
nfaset: Dict[NFAState, Any]
isfinal: bool
arcs: Dict[Text, DFAState]
arcs: Dict[str, DFAState]
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None: ...
def addarc(self, next: DFAState, label: Text) -> None: ...
def addarc(self, next: DFAState, label: str) -> None: ...
def unifystate(self, old: DFAState, new: DFAState) -> None: ...
def __eq__(self, other: Any) -> bool: ...

View File

@@ -1,4 +1,4 @@
from typing import Dict, Text
from typing import Dict
ENDMARKER: int
NAME: int
@@ -61,7 +61,7 @@ ASYNC: int
ERRORTOKEN: int
N_TOKENS: int
NT_OFFSET: int
tok_name: Dict[int, Text]
tok_name: Dict[int, str]
def ISTERMINAL(x: int) -> bool: ...
def ISNONTERMINAL(x: int) -> bool: ...

View File

@@ -1,23 +1,23 @@
from lib2to3.pgen2.token import * # noqa
from typing import Callable, Iterable, Iterator, List, Text, Tuple
from typing import Callable, Iterable, Iterator, List, Tuple
_Coord = Tuple[int, int]
_TokenEater = Callable[[int, Text, _Coord, _Coord, Text], None]
_TokenInfo = Tuple[int, Text, _Coord, _Coord, Text]
_TokenEater = Callable[[int, str, _Coord, _Coord, str], None]
_TokenInfo = Tuple[int, str, _Coord, _Coord, str]
class TokenError(Exception): ...
class StopTokenizing(Exception): ...
def tokenize(readline: Callable[[], Text], tokeneater: _TokenEater = ...) -> None: ...
def tokenize(readline: Callable[[], str], tokeneater: _TokenEater = ...) -> None: ...
class Untokenizer:
tokens: List[Text]
tokens: List[str]
prev_row: int
prev_col: int
def __init__(self) -> None: ...
def add_whitespace(self, start: _Coord) -> None: ...
def untokenize(self, iterable: Iterable[_TokenInfo]) -> Text: ...
def compat(self, token: Tuple[int, Text], iterable: Iterable[_TokenInfo]) -> None: ...
def untokenize(self, iterable: Iterable[_TokenInfo]) -> str: ...
def compat(self, token: Tuple[int, str], iterable: Iterable[_TokenInfo]) -> None: ...
def untokenize(iterable: Iterable[_TokenInfo]) -> Text: ...
def generate_tokens(readline: Callable[[], Text]) -> Iterator[_TokenInfo]: ...
def untokenize(iterable: Iterable[_TokenInfo]) -> str: ...
def generate_tokens(readline: Callable[[], str]) -> Iterator[_TokenInfo]: ...

View File

@@ -1,21 +1,21 @@
from lib2to3.pgen2.grammar import Grammar
from typing import Any, Callable, Dict, Iterator, List, Optional, Text, Tuple, TypeVar, Union
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, TypeVar, Union
_P = TypeVar("_P")
_NL = Union[Node, Leaf]
_Context = Tuple[Text, int, int]
_Results = Dict[Text, _NL]
_RawNode = Tuple[int, Text, _Context, Optional[List[_NL]]]
_Context = Tuple[str, int, int]
_Results = Dict[str, _NL]
_RawNode = Tuple[int, str, _Context, Optional[List[_NL]]]
_Convert = Callable[[Grammar, _RawNode], Any]
HUGE: int
def type_repr(type_num: int) -> Text: ...
def type_repr(type_num: int) -> str: ...
class Base:
type: int
parent: Optional[Node]
prefix: Text
prefix: str
children: List[_NL]
was_changed: bool
was_checked: bool
@@ -34,7 +34,7 @@ class Base:
def prev_sibling(self) -> Optional[_NL]: ...
def leaves(self) -> Iterator[Leaf]: ...
def depth(self) -> int: ...
def get_suffix(self) -> Text: ...
def get_suffix(self) -> str: ...
class Node(Base):
fixers_applied: List[Any]
@@ -43,7 +43,7 @@ class Node(Base):
type: int,
children: List[_NL],
context: Optional[Any] = ...,
prefix: Optional[Text] = ...,
prefix: Optional[str] = ...,
fixers_applied: Optional[List[Any]] = ...,
) -> None: ...
def set_child(self, i: int, child: _NL) -> None: ...
@@ -53,14 +53,14 @@ class Node(Base):
class Leaf(Base):
lineno: int
column: int
value: Text
value: str
fixers_applied: List[Any]
def __init__(
self,
type: int,
value: Text,
value: str,
context: Optional[_Context] = ...,
prefix: Optional[Text] = ...,
prefix: Optional[str] = ...,
fixers_applied: List[Any] = ...,
) -> None: ...
@@ -68,26 +68,26 @@ def convert(gr: Grammar, raw_node: _RawNode) -> _NL: ...
class BasePattern:
type: int
content: Optional[Text]
name: Optional[Text]
content: Optional[str]
name: Optional[str]
def optimize(self) -> BasePattern: ... # sic, subclasses are free to optimize themselves into different patterns
def match(self, node: _NL, results: Optional[_Results] = ...) -> bool: ...
def match_seq(self, nodes: List[_NL], results: Optional[_Results] = ...) -> bool: ...
def generate_matches(self, nodes: List[_NL]) -> Iterator[Tuple[int, _Results]]: ...
class LeafPattern(BasePattern):
def __init__(self, type: Optional[int] = ..., content: Optional[Text] = ..., name: Optional[Text] = ...) -> None: ...
def __init__(self, type: Optional[int] = ..., content: Optional[str] = ..., name: Optional[str] = ...) -> None: ...
class NodePattern(BasePattern):
wildcards: bool
def __init__(self, type: Optional[int] = ..., content: Optional[Text] = ..., name: Optional[Text] = ...) -> None: ...
def __init__(self, type: Optional[int] = ..., content: Optional[str] = ..., name: Optional[str] = ...) -> None: ...
class WildcardPattern(BasePattern):
min: int
max: int
def __init__(self, content: Optional[Text] = ..., min: int = ..., max: int = ..., name: Optional[Text] = ...) -> None: ...
def __init__(self, content: Optional[str] = ..., min: int = ..., max: int = ..., name: Optional[str] = ...) -> None: ...
class NegatedPattern(BasePattern):
def __init__(self, content: Optional[Text] = ...) -> None: ...
def __init__(self, content: Optional[str] = ...) -> None: ...
def generate_matches(patterns: List[BasePattern], nodes: List[_NL]) -> Iterator[Tuple[int, _Results]]: ...