Partial lib2to3 stubs (#1096)

* Partial lib2to3 stubs

* Use typing.Text instead of str

* Add a _Path shortcut for managing PathLike

* Respond to review, pgen2/driver

* Respond to review, pgen2/grammar

* Respond to review, pgen2/parse

* Respond to review, pgen2/pgen

* Respond to review, pgen2/token

* Respond to review, pgen2/tokenize

* Respond to review, pytree

* Move to 2and3

* Make pytype happy

* Respond to review nits

* Move _RawNode, _Convert to pytree and make the latter return None

* Make concrete Symbols subclasses for python and pattern

* Add missing Callable import
This commit is contained in:
Łukasz Langa
2017-04-13 20:14:35 -07:00
committed by Jelle Zijlstra
parent 359c8cc313
commit 03119a237d
11 changed files with 456 additions and 0 deletions

View File

@@ -0,0 +1 @@
# Stubs for lib2to3 (Python 3.6)

View File

@@ -0,0 +1,10 @@
# Stubs for lib2to3.pgen2 (Python 3.6)
import os
import sys
from typing import Text, Union
if sys.version_info >= (3, 6):
_Path = Union[Text, os.PathLike]
else:
_Path = Text

View File

@@ -0,0 +1,24 @@
# Stubs for lib2to3.pgen2.driver (Python 3.6)
import os
import sys
from typing import Any, Callable, IO, Iterable, List, Optional, Text, Tuple, Union
from logging import Logger
from lib2to3.pytree import _Convert, _NL
from lib2to3.pgen2 import _Path
from lib2to3.pgen2.grammar import Grammar
class Driver:
grammar: Grammar
logger: Logger
convert: _Convert
def __init__(self, grammar: Grammar, convert: Optional[_Convert] = ..., logger: Optional[Logger] = ...) -> None: ...
def parse_tokens(self, tokens: Iterable[Any], debug: bool = ...) -> _NL: ...
def parse_stream_raw(self, stream: IO[Text], debug: bool = ...) -> _NL: ...
def parse_stream(self, stream: IO[Text], debug: bool = ...) -> _NL: ...
def parse_file(self, filename: _Path, encoding: Optional[Text] = ..., debug: bool = ...) -> _NL: ...
def parse_string(self, text: Text, debug: bool = ...) -> _NL: ...
def load_grammar(gt: Text = ..., gp: Optional[Text] = ..., save: bool = ..., force: bool = ..., logger: Optional[Logger] = ...) -> Grammar: ...

View File

@@ -0,0 +1,29 @@
# Stubs for lib2to3.pgen2.grammar (Python 3.6)
from lib2to3.pgen2 import _Path
from typing import Any, Dict, List, Optional, Text, Tuple, TypeVar
_P = TypeVar('_P')
_Label = Tuple[int, Optional[Text]]
_DFA = List[List[Tuple[int, int]]]
_DFAS = Tuple[_DFA, Dict[int, int]]
class Grammar:
symbol2number: Dict[Text, int]
number2symbol: Dict[int, Text]
states: List[_DFA]
dfas: Dict[int, _DFAS]
labels: List[_Label]
keywords: Dict[Text, int]
tokens: Dict[int, int]
symbol2label: Dict[Text, int]
start: int
def __init__(self) -> None: ...
def dump(self, filename: _Path) -> None: ...
def load(self, filename: _Path) -> None: ...
def copy(self: _P) -> _P: ...
def report(self) -> None: ...
opmap_raw: Text
opmap: Dict[Text, Text]

View File

@@ -0,0 +1,9 @@
# Stubs for lib2to3.pgen2.literals (Python 3.6)
from typing import Dict, Match, Text
simple_escapes: Dict[Text, Text]
def escape(m: Match) -> Text: ...
def evalString(s: Text) -> Text: ...
def test() -> None: ...

View File

@@ -0,0 +1,29 @@
# Stubs for lib2to3.pgen2.parse (Python 3.6)
from typing import Any, Dict, List, Optional, Sequence, Set, Text, Tuple
from lib2to3.pgen2.grammar import Grammar, _DFAS
from lib2to3.pytree import _NL, _Convert, _RawNode
_Context = Sequence[Any]
class ParseError(Exception):
msg: Text
type: int
value: Optional[Text]
context: _Context
def __init__(self, msg: Text, type: int, value: Optional[Text], context: _Context) -> None: ...
class Parser:
grammar: Grammar
convert: _Convert
stack: List[Tuple[_DFAS, int, _RawNode]]
rootnode: Optional[_NL]
used_names: Set[Text]
def __init__(self, grammar: Grammar, convert: Optional[_Convert] = ...) -> None: ...
def setup(self, start: Optional[int] = ...) -> None: ...
def addtoken(self, type: int, value: Optional[Text], context: _Context) -> bool: ...
def classify(self, type: int, value: Optional[Text], context: _Context) -> int: ...
def shift(self, type: int, value: Optional[Text], newstate: int, context: _Context) -> None: ...
def push(self, type: int, newdfa: _DFAS, newstate: int, context: _Context) -> None: ...
def pop(self) -> None: ...

View File

@@ -0,0 +1,49 @@
# Stubs for lib2to3.pgen2.pgen (Python 3.6)
from typing import Any, Dict, IO, Iterable, Iterator, List, Optional, Text, Tuple
from mypy_extensions import NoReturn
from lib2to3.pgen2 import _Path, grammar
from lib2to3.pgen2.tokenize import _TokenInfo
class PgenGrammar(grammar.Grammar): ...
class ParserGenerator:
filename: _Path
stream: IO[Text]
generator: Iterator[_TokenInfo]
first: Dict[Text, Dict[Text, int]]
def __init__(self, filename: _Path, stream: Optional[IO[Text]] = ...) -> None: ...
def make_grammar(self) -> PgenGrammar: ...
def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]: ...
def make_label(self, c: PgenGrammar, label: Text) -> int: ...
def addfirstsets(self) -> None: ...
def calcfirst(self, name: Text) -> None: ...
def parse(self) -> Tuple[Dict[Text, List[DFAState]], Text]: ...
def make_dfa(self, start: NFAState, finish: NFAState) -> List[DFAState]: ...
def dump_nfa(self, name: Text, start: NFAState, finish: NFAState) -> List[DFAState]: ...
def dump_dfa(self, name: Text, dfa: Iterable[DFAState]) -> None: ...
def simplify_dfa(self, dfa: List[DFAState]) -> None: ...
def parse_rhs(self) -> Tuple[NFAState, NFAState]: ...
def parse_alt(self) -> Tuple[NFAState, NFAState]: ...
def parse_item(self) -> Tuple[NFAState, NFAState]: ...
def parse_atom(self) -> Tuple[NFAState, NFAState]: ...
def expect(self, type: int, value: Optional[Any] = ...) -> Text: ...
def gettoken(self) -> None: ...
def raise_error(self, msg: str, *args: Any) -> NoReturn: ...
class NFAState:
arcs: List[Tuple[Optional[Text], NFAState]]
def __init__(self) -> None: ...
def addarc(self, next: NFAState, label: Optional[Text] = ...) -> None: ...
class DFAState:
nfaset: Dict[NFAState, Any]
isfinal: bool
arcs: Dict[Text, DFAState]
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None: ...
def addarc(self, next: DFAState, label: Text) -> None: ...
def unifystate(self, old: DFAState, new: DFAState) -> None: ...
def __eq__(self, other: Any) -> bool: ...
def generate_grammar(filename: _Path = ...) -> PgenGrammar: ...

View File

@@ -0,0 +1,73 @@
# Stubs for lib2to3.pgen2.token (Python 3.6)
import sys
from typing import Dict, Text
ENDMARKER: int
NAME: int
NUMBER: int
STRING: int
NEWLINE: int
INDENT: int
DEDENT: int
LPAR: int
RPAR: int
LSQB: int
RSQB: int
COLON: int
COMMA: int
SEMI: int
PLUS: int
MINUS: int
STAR: int
SLASH: int
VBAR: int
AMPER: int
LESS: int
GREATER: int
EQUAL: int
DOT: int
PERCENT: int
BACKQUOTE: int
LBRACE: int
RBRACE: int
EQEQUAL: int
NOTEQUAL: int
LESSEQUAL: int
GREATEREQUAL: int
TILDE: int
CIRCUMFLEX: int
LEFTSHIFT: int
RIGHTSHIFT: int
DOUBLESTAR: int
PLUSEQUAL: int
MINEQUAL: int
STAREQUAL: int
SLASHEQUAL: int
PERCENTEQUAL: int
AMPEREQUAL: int
VBAREQUAL: int
CIRCUMFLEXEQUAL: int
LEFTSHIFTEQUAL: int
RIGHTSHIFTEQUAL: int
DOUBLESTAREQUAL: int
DOUBLESLASH: int
DOUBLESLASHEQUAL: int
OP: int
COMMENT: int
NL: int
if sys.version_info >= (3,):
RARROW: int
if sys.version_info >= (3, 5):
AT: int
ATEQUAL: int
AWAIT: int
ASYNC: int
ERRORTOKEN: int
N_TOKENS: int
NT_OFFSET: int
tok_name: Dict[int, Text]
def ISTERMINAL(x: int) -> bool: ...
def ISNONTERMINAL(x: int) -> bool: ...
def ISEOF(x: int) -> bool: ...

View File

@@ -0,0 +1,30 @@
# Stubs for lib2to3.pgen2.tokenize (Python 3.6)
# NOTE: Only elements from __all__ are present.
from typing import Callable, Iterable, Iterator, List, Text, Tuple
from lib2to3.pgen2.token import * # noqa
_Coord = Tuple[int, int]
_TokenEater = Callable[[int, Text, _Coord, _Coord, Text], None]
_TokenInfo = Tuple[int, Text, _Coord, _Coord, Text]
class TokenError(Exception): ...
class StopTokenizing(Exception): ...
def tokenize(readline: Callable[[], Text], tokeneater: _TokenEater = ...) -> None: ...
class Untokenizer:
tokens: List[Text]
prev_row: int
prev_col: int
def __init__(self) -> None: ...
def add_whitespace(self, start: _Coord) -> None: ...
def untokenize(self, iterable: Iterable[_TokenInfo]) -> Text: ...
def compat(self, token: Tuple[int, Text], iterable: Iterable[_TokenInfo]) -> None: ...
def untokenize(iterable: Iterable[_TokenInfo]) -> Text: ...
def generate_tokens(
readline: Callable[[], Text]
) -> Iterator[_TokenInfo]: ...

View File

@@ -0,0 +1,116 @@
# Stubs for lib2to3.pygram (Python 3.6)
from typing import Any
from lib2to3.pgen2.grammar import Grammar
class Symbols:
def __init__(self, grammar: Grammar) -> None: ...
class python_symbols(Symbols):
and_expr: int
and_test: int
annassign: int
arglist: int
argument: int
arith_expr: int
assert_stmt: int
async_funcdef: int
async_stmt: int
atom: int
augassign: int
break_stmt: int
classdef: int
comp_for: int
comp_if: int
comp_iter: int
comp_op: int
comparison: int
compound_stmt: int
continue_stmt: int
decorated: int
decorator: int
decorators: int
del_stmt: int
dictsetmaker: int
dotted_as_name: int
dotted_as_names: int
dotted_name: int
encoding_decl: int
eval_input: int
except_clause: int
exec_stmt: int
expr: int
expr_stmt: int
exprlist: int
factor: int
file_input: int
flow_stmt: int
for_stmt: int
funcdef: int
global_stmt: int
if_stmt: int
import_as_name: int
import_as_names: int
import_from: int
import_name: int
import_stmt: int
lambdef: int
listmaker: int
not_test: int
old_lambdef: int
old_test: int
or_test: int
parameters: int
pass_stmt: int
power: int
print_stmt: int
raise_stmt: int
return_stmt: int
shift_expr: int
simple_stmt: int
single_input: int
sliceop: int
small_stmt: int
star_expr: int
stmt: int
subscript: int
subscriptlist: int
suite: int
term: int
test: int
testlist: int
testlist1: int
testlist_gexp: int
testlist_safe: int
testlist_star_expr: int
tfpdef: int
tfplist: int
tname: int
trailer: int
try_stmt: int
typedargslist: int
varargslist: int
vfpdef: int
vfplist: int
vname: int
while_stmt: int
with_item: int
with_stmt: int
with_var: int
xor_expr: int
yield_arg: int
yield_expr: int
yield_stmt: int
class pattern_symbols(Symbols):
Alternative: int
Alternatives: int
Details: int
Matcher: int
NegatedUnit: int
Repeater: int
Unit: int
python_grammar: Grammar
python_grammar_no_print_statement: Grammar
pattern_grammar: Grammar

View File

@@ -0,0 +1,86 @@
# Stubs for lib2to3.pytree (Python 3.6)
import sys
from typing import Any, Callable, Dict, Iterator, List, Optional, Text, Tuple, TypeVar, Union
from lib2to3.pgen2.grammar import Grammar
_P = TypeVar('_P')
_NL = Union[Node, Leaf]
_Context = Tuple[Text, int, int]
_Results = Dict[Text, _NL]
_RawNode = Tuple[int, Text, _Context, Optional[List[_NL]]]
_Convert = Callable[[Grammar, _RawNode], Any]
HUGE: int
def type_repr(type_num: int) -> Text: ...
class Base:
type: int
parent: Optional[Node]
prefix: Text
children: List[_NL]
was_changed: bool
was_checked: bool
def __eq__(self, other: Any) -> bool: ...
def _eq(self: _P, other: _P) -> bool: ...
def clone(self: _P) -> _P: ...
def post_order(self) -> Iterator[_NL]: ...
def pre_order(self) -> Iterator[_NL]: ...
def replace(self, new: Union[_NL, List[_NL]]) -> None: ...
def get_lineno(self) -> int: ...
def changed(self) -> None: ...
def remove(self) -> Optional[int]: ...
@property
def next_sibling(self) -> Optional[_NL]: ...
@property
def prev_sibling(self) -> Optional[_NL]: ...
def leaves(self) -> Iterator[Leaf]: ...
def depth(self) -> int: ...
def get_suffix(self) -> Text: ...
if sys.version_info < (3,):
def get_prefix(self) -> Text: ...
def set_prefix(self, prefix: Text) -> None: ...
class Node(Base):
fixers_applied: List[Any]
def __init__(self, type: int, children: List[_NL], context: Optional[Any] = ..., prefix: Optional[Text] = ..., fixers_applied: Optional[List[Any]] = ...) -> None: ...
def set_child(self, i: int, child: _NL) -> None: ...
def insert_child(self, i: int, child: _NL) -> None: ...
def append_child(self, child: _NL) -> None: ...
class Leaf(Base):
lineno: int
column: int
value: Text
fixers_applied: List[Any]
def __init__(self, type: int, value: Text, context: Optional[_Context] = ..., prefix: Optional[Text] = ..., fixers_applied: List[Any] = ...) -> None: ...
def convert(gr: Grammar, raw_node: _RawNode) -> _NL: ...
class BasePattern:
type: int
content: Optional[Text]
name: Optional[Text]
def optimize(self) -> BasePattern: ... # sic, subclasses are free to optimize themselves into different patterns
def match(self, node: _NL, results: Optional[_Results] = ...) -> bool: ...
def match_seq(self, nodes: List[_NL], results: Optional[_Results] = ...) -> bool: ...
def generate_matches(self, nodes: List[_NL]) -> Iterator[Tuple[int, _Results]]: ...
class LeafPattern(BasePattern):
def __init__(self, type: Optional[int] = ..., content: Optional[Text] = ..., name: Optional[Text] = ...) -> None: ...
class NodePattern(BasePattern):
wildcards: bool
def __init__(self, type: Optional[int] = ..., content: Optional[Text] = ..., name: Optional[Text] = ...) -> None: ...
class WildcardPattern(BasePattern):
min: int
max: int
def __init__(self, content: Optional[Text] = ..., min: int = ..., max: int = ..., name: Optional[Text] = ...) -> None: ...
class NegatedPattern(BasePattern):
def __init__(self, content: Optional[Text] = ...) -> None: ...
def generate_matches(patterns: List[BasePattern], nodes: List[_NL]) -> Iterator[Tuple[int, _Results]]: ...