18 Commits

Author SHA1 Message Date
Dave Halter
59df3fab43 Some small changes to the changelog 2019-06-20 21:15:53 +02:00
Dave Halter
803cb5f25f Make parso work at least somewhat with an older Jedi version 2019-06-20 20:33:14 +02:00
Dave Halter
3fa8630ba9 Use an immutable map for used names, so that it can be use for hashing 2019-06-18 09:12:33 +02:00
Dave Halter
1ca5ae4008 Bump the version number to the next release: 0.5.0 2019-06-13 17:26:08 +02:00
Dave Halter
c3c16169b5 Ignore positional only arguments slash when listing params 2019-06-09 22:55:37 +02:00
Dave Halter
ecbe2b9926 Add positional only arguments to grammar 2019-06-09 21:15:03 +02:00
Dave Halter
1929c144dc Increate the _PICKLE_VERSION to avoid issues with the latest breaking change 2019-06-09 18:11:21 +02:00
Dave Halter
b5d50392a4 comp_for is now called sync_comp_for for all Python versions to be compatible with the Python 3.8 Grammar 2019-06-09 18:00:32 +02:00
Dave Halter
a7aa23a7f0 Parse named expressions 2019-06-02 23:34:37 +02:00
Dave Halter
5430415d44 Change a test, because it doesn't really matter
The test had changed behavior for Python 3.8, a syntax error of:

SyntaxError: unexpected EOF while parsing

instead of

SyntaxError: invalid syntax
2019-06-02 22:54:45 +02:00
Dave Halter
6cdd47fe2b f-string syntax in Python 3.8 was enhanced
See e.g. https://twitter.com/raymondh/status/1135253771846471680
2019-06-02 22:48:47 +02:00
Dave Halter
917b4421f3 Fix fstring format spec parsing, fixes #74 2019-06-02 15:18:42 +02:00
Dave Halter
4f5fdd5a70 Add release notes for the next release 0.4.1 2019-06-02 11:28:00 +02:00
prim
93ddf5322a parse long number notation (#72)
* parse long number notation

* parse long number notation
2019-06-02 11:14:15 +02:00
Dave Halter
a9b61149eb Fix get_decorators for async functions 2019-05-27 01:08:42 +02:00
Dave Halter
de416b082e Make it clear that get_last_modified should not raise an exception, but return None, if it cannot look up a file 2019-05-22 00:16:26 +02:00
Carl Meyer
4b440159b1 Fix __init__.pyi re-exports. 2019-05-10 09:12:32 +02:00
Carl Meyer
6f2d2362c9 Add type stubs. 2019-05-10 09:12:32 +02:00
29 changed files with 396 additions and 69 deletions

View File

@@ -3,6 +3,14 @@
Changelog Changelog
--------- ---------
0.5.0 (2019-06-20)
++++++++++++++++++
- **Breaking Change** comp_for is now called sync_comp_for for all Python
versions to be compatible with the Python 3.8 Grammar
- Added .pyi stubs for a lot of the parso API
- Small FileIO changes
0.4.0 (2019-04-05) 0.4.0 (2019-04-05)
++++++++++++++++++ ++++++++++++++++++

View File

@@ -14,7 +14,7 @@ from parso.utils import parse_version_string
collect_ignore = ["setup.py"] collect_ignore = ["setup.py"]
VERSIONS_2 = '2.6', '2.7' VERSIONS_2 = '2.6', '2.7'
VERSIONS_3 = '3.3', '3.4', '3.5', '3.6', '3.7' VERSIONS_3 = '3.3', '3.4', '3.5', '3.6', '3.7', '3.8'
@pytest.fixture(scope='session') @pytest.fixture(scope='session')
@@ -155,3 +155,9 @@ def works_ge_py3(each_version):
def works_ge_py35(each_version): def works_ge_py35(each_version):
version_info = parse_version_string(each_version) version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 5)) return Checker(each_version, version_info >= (3, 5))
@pytest.fixture
def works_ge_py38(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 8))

View File

@@ -43,7 +43,7 @@ from parso.grammar import Grammar, load_grammar
from parso.utils import split_lines, python_bytes_to_unicode from parso.utils import split_lines, python_bytes_to_unicode
__version__ = '0.4.0' __version__ = '0.5.0'
def parse(code=None, **kwargs): def parse(code=None, **kwargs):

19
parso/__init__.pyi Normal file
View File

@@ -0,0 +1,19 @@
from typing import Any, Optional, Union
from parso.grammar import Grammar as Grammar, load_grammar as load_grammar
from parso.parser import ParserSyntaxError as ParserSyntaxError
from parso.utils import python_bytes_to_unicode as python_bytes_to_unicode, split_lines as split_lines
__version__: str = ...
def parse(
code: Optional[Union[str, bytes]],
*,
version: Optional[str] = None,
error_recovery: bool = True,
path: Optional[str] = None,
start_symbol: Optional[str] = None,
cache: bool = False,
diff_cache: bool = False,
cache_path: Optional[str] = None,
) -> Any: ...

View File

@@ -18,7 +18,7 @@ from parso._compatibility import FileNotFoundError
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_PICKLE_VERSION = 31 _PICKLE_VERSION = 32
""" """
Version number (integer) for file system cache. Version number (integer) for file system cache.
@@ -82,9 +82,8 @@ def load_module(hashed_grammar, file_io, cache_path=None):
""" """
Returns a module or None, if it fails. Returns a module or None, if it fails.
""" """
try: p_time = file_io.get_last_modified()
p_time = file_io.get_last_modified() if p_time is None:
except FileNotFoundError:
return None return None
try: try:

View File

@@ -14,10 +14,13 @@ class FileIO(object):
def get_last_modified(self): def get_last_modified(self):
""" """
Returns float - timestamp Returns float - timestamp or None, if path doesn't exist.
Might raise FileNotFoundError
""" """
return os.path.getmtime(self.path) try:
return os.path.getmtime(self.path)
except OSError:
# Might raise FileNotFoundError, OSError for Python 2
return None
def __repr__(self): def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.path) return '%s(%s)' % (self.__class__.__name__, self.path)

38
parso/grammar.pyi Normal file
View File

@@ -0,0 +1,38 @@
from typing import Any, Callable, Generic, Optional, Sequence, TypeVar, Union
from typing_extensions import Literal
from parso.utils import PythonVersionInfo
_Token = Any
_NodeT = TypeVar("_NodeT")
class Grammar(Generic[_NodeT]):
_default_normalizer_config: Optional[Any] = ...
_error_normalizer_config: Optional[Any] = None
_start_nonterminal: str = ...
_token_namespace: Optional[str] = None
def __init__(
self,
text: str,
tokenizer: Callable[[Sequence[str], int], Sequence[_Token]],
parser: Any = ...,
diff_parser: Any = ...,
) -> None: ...
def parse(
self,
code: Union[str, bytes] = ...,
error_recovery: bool = ...,
path: Optional[str] = ...,
start_symbol: Optional[str] = ...,
cache: bool = ...,
diff_cache: bool = ...,
cache_path: Optional[str] = ...,
) -> _NodeT: ...
class PythonGrammar(Grammar):
version_info: PythonVersionInfo
def __init__(self, version_info: PythonVersionInfo, bnf_text: str) -> None: ...
def load_grammar(
language: Literal["python"] = "python", version: Optional[str] = ..., path: str = ...
) -> Grammar: ...

View File

@@ -41,8 +41,8 @@ class Normalizer(use_metaclass(_NormalizerMeta)):
except AttributeError: except AttributeError:
return self.visit_leaf(node) return self.visit_leaf(node)
else: else:
with self.visit_node(node): with self.visit_node(node):
return ''.join(self.visit(child) for child in children) return ''.join(self.visit(child) for child in children)
@contextmanager @contextmanager
def visit_node(self, node): def visit_node(self, node):
@@ -147,7 +147,6 @@ class Issue(object):
return '<%s: %s>' % (self.__class__.__name__, self.code) return '<%s: %s>' % (self.__class__.__name__, self.code)
class Rule(object): class Rule(object):
code = None code = None
message = None message = None

1
parso/pgen2/__init__.pyi Normal file
View File

@@ -0,0 +1 @@
from parso.pgen2.generator import generate_grammar as generate_grammar

38
parso/pgen2/generator.pyi Normal file
View File

@@ -0,0 +1,38 @@
from typing import Any, Generic, Mapping, Sequence, Set, TypeVar, Union
from parso.pgen2.grammar_parser import NFAState
_TokenTypeT = TypeVar("_TokenTypeT")
class Grammar(Generic[_TokenTypeT]):
nonterminal_to_dfas: Mapping[str, Sequence[DFAState[_TokenTypeT]]]
reserved_syntax_strings: Mapping[str, ReservedString]
start_nonterminal: str
def __init__(
self,
start_nonterminal: str,
rule_to_dfas: Mapping[str, Sequence[DFAState]],
reserved_syntax_strings: Mapping[str, ReservedString],
) -> None: ...
class DFAPlan:
next_dfa: DFAState
dfa_pushes: Sequence[DFAState]
class DFAState(Generic[_TokenTypeT]):
from_rule: str
nfa_set: Set[NFAState]
is_final: bool
arcs: Mapping[str, DFAState] # map from all terminals/nonterminals to DFAState
nonterminal_arcs: Mapping[str, DFAState]
transitions: Mapping[Union[_TokenTypeT, ReservedString], DFAPlan]
def __init__(
self, from_rule: str, nfa_set: Set[NFAState], final: NFAState
) -> None: ...
class ReservedString:
value: str
def __init__(self, value: str) -> None: ...
def __repr__(self) -> str: ...
def generate_grammar(bnf_grammar: str, token_namespace: Any) -> Grammar[Any]: ...

View File

@@ -0,0 +1,20 @@
from typing import Generator, List, Optional, Tuple
from parso.python.token import TokenType
class GrammarParser:
generator: Generator[TokenType, None, None]
def __init__(self, bnf_grammar: str) -> None: ...
def parse(self) -> Generator[Tuple[NFAState, NFAState], None, None]: ...
class NFAArc:
next: NFAState
nonterminal_or_string: Optional[str]
def __init__(
self, next_: NFAState, nonterminal_or_string: Optional[str]
) -> None: ...
class NFAState:
from_rule: str
arcs: List[NFAArc]
def __init__(self, from_rule: str) -> None: ...

View File

@@ -953,20 +953,17 @@ class _CheckAssignmentRule(SyntaxRule):
self.add_issue(node, message=message) self.add_issue(node, message=message)
@ErrorFinder.register_rule(type='comp_for')
@ErrorFinder.register_rule(type='sync_comp_for') @ErrorFinder.register_rule(type='sync_comp_for')
class _CompForRule(_CheckAssignmentRule): class _CompForRule(_CheckAssignmentRule):
message = "asynchronous comprehension outside of an asynchronous function" message = "asynchronous comprehension outside of an asynchronous function"
def is_issue(self, node): def is_issue(self, node):
# Some of the nodes here are already used, so no else if expr_list = node.children[1]
if node.type != 'comp_for' or self._normalizer.version < (3, 8): print(expr_list)
# comp_for was replaced by sync_comp_for in Python 3.8. if expr_list.type != 'expr_list': # Already handled.
expr_list = node.children[1 + int(node.children[0] == 'async')] self._check_assignment(expr_list)
if expr_list.type != 'expr_list': # Already handled.
self._check_assignment(expr_list)
return node.children[0] == 'async' \ return node.parent.children[0] == 'async' \
and not self._normalizer.context.is_async_funcdef() and not self._normalizer.context.is_async_funcdef()

View File

@@ -107,7 +107,7 @@ atom: ('(' [yield_expr|testlist_comp] ')' |
NAME | NUMBER | strings) NAME | NUMBER | strings)
strings: STRING+ strings: STRING+
listmaker: test ( list_for | (',' test)* [','] ) listmaker: test ( list_for | (',' test)* [','] )
testlist_comp: test ( comp_for | (',' test)* [','] ) testlist_comp: test ( sync_comp_for | (',' test)* [','] )
lambdef: 'lambda' [varargslist] ':' test lambdef: 'lambda' [varargslist] ':' test
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
subscriptlist: subscript (',' subscript)* [','] subscriptlist: subscript (',' subscript)* [',']
@@ -115,8 +115,8 @@ subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop]
sliceop: ':' [test] sliceop: ':' [test]
exprlist: expr (',' expr)* [','] exprlist: expr (',' expr)* [',']
testlist: test (',' test)* [','] testlist: test (',' test)* [',']
dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | dictorsetmaker: ( (test ':' test (sync_comp_for | (',' test ':' test)* [','])) |
(test (comp_for | (',' test)* [','])) ) (test (sync_comp_for | (',' test)* [','])) )
classdef: 'class' NAME ['(' [testlist] ')'] ':' suite classdef: 'class' NAME ['(' [testlist] ')'] ':' suite
@@ -125,14 +125,14 @@ arglist: (argument ',')* (argument [',']
|'**' test) |'**' test)
# The reason that keywords are test nodes instead of NAME is that using NAME # The reason that keywords are test nodes instead of NAME is that using NAME
# results in an ambiguity. ast.c makes sure it's a NAME. # results in an ambiguity. ast.c makes sure it's a NAME.
argument: test [comp_for] | test '=' test argument: test [sync_comp_for] | test '=' test
list_iter: list_for | list_if list_iter: list_for | list_if
list_for: 'for' exprlist 'in' testlist_safe [list_iter] list_for: 'for' exprlist 'in' testlist_safe [list_iter]
list_if: 'if' old_test [list_iter] list_if: 'if' old_test [list_iter]
comp_iter: comp_for | comp_if comp_iter: sync_comp_for | comp_if
comp_for: 'for' exprlist 'in' or_test [comp_iter] sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
comp_if: 'if' old_test [comp_iter] comp_if: 'if' old_test [comp_iter]
testlist1: test (',' test)* testlist1: test (',' test)*

View File

@@ -105,15 +105,15 @@ atom: ('(' [yield_expr|testlist_comp] ')' |
'{' [dictorsetmaker] '}' | '{' [dictorsetmaker] '}' |
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
strings: STRING+ strings: STRING+
testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) testlist_comp: (test|star_expr) ( sync_comp_for | (',' (test|star_expr))* [','] )
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
subscriptlist: subscript (',' subscript)* [','] subscriptlist: subscript (',' subscript)* [',']
subscript: test | [test] ':' [test] [sliceop] subscript: test | [test] ':' [test] [sliceop]
sliceop: ':' [test] sliceop: ':' [test]
exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
testlist: test (',' test)* [','] testlist: test (',' test)* [',']
dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | dictorsetmaker: ( (test ':' test (sync_comp_for | (',' test ':' test)* [','])) |
(test (comp_for | (',' test)* [','])) ) (test (sync_comp_for | (',' test)* [','])) )
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
@@ -122,9 +122,9 @@ arglist: (argument ',')* (argument [',']
|'**' test) |'**' test)
# The reason that keywords are test nodes instead of NAME is that using NAME # The reason that keywords are test nodes instead of NAME is that using NAME
# results in an ambiguity. ast.c makes sure it's a NAME. # results in an ambiguity. ast.c makes sure it's a NAME.
argument: test [comp_for] | test '=' test # Really [keyword '='] test argument: test [sync_comp_for] | test '=' test # Really [keyword '='] test
comp_iter: comp_for | comp_if comp_iter: sync_comp_for | comp_if
comp_for: 'for' exprlist 'in' or_test [comp_iter] sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
comp_if: 'if' test_nocond [comp_iter] comp_if: 'if' test_nocond [comp_iter]
# not used in grammar, but may appear in "node" passed from Parser to Compiler # not used in grammar, but may appear in "node" passed from Parser to Compiler

View File

@@ -105,15 +105,15 @@ atom: ('(' [yield_expr|testlist_comp] ')' |
'{' [dictorsetmaker] '}' | '{' [dictorsetmaker] '}' |
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
strings: STRING+ strings: STRING+
testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) testlist_comp: (test|star_expr) ( sync_comp_for | (',' (test|star_expr))* [','] )
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
subscriptlist: subscript (',' subscript)* [','] subscriptlist: subscript (',' subscript)* [',']
subscript: test | [test] ':' [test] [sliceop] subscript: test | [test] ':' [test] [sliceop]
sliceop: ':' [test] sliceop: ':' [test]
exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
testlist: test (',' test)* [','] testlist: test (',' test)* [',']
dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | dictorsetmaker: ( (test ':' test (sync_comp_for | (',' test ':' test)* [','])) |
(test (comp_for | (',' test)* [','])) ) (test (sync_comp_for | (',' test)* [','])) )
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
@@ -122,9 +122,9 @@ arglist: (argument ',')* (argument [',']
|'**' test) |'**' test)
# The reason that keywords are test nodes instead of NAME is that using NAME # The reason that keywords are test nodes instead of NAME is that using NAME
# results in an ambiguity. ast.c makes sure it's a NAME. # results in an ambiguity. ast.c makes sure it's a NAME.
argument: test [comp_for] | test '=' test # Really [keyword '='] test argument: test [sync_comp_for] | test '=' test # Really [keyword '='] test
comp_iter: comp_for | comp_if comp_iter: sync_comp_for | comp_if
comp_for: 'for' exprlist 'in' or_test [comp_iter] sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
comp_if: 'if' test_nocond [comp_iter] comp_if: 'if' test_nocond [comp_iter]
# not used in grammar, but may appear in "node" passed from Parser to Compiler # not used in grammar, but may appear in "node" passed from Parser to Compiler

View File

@@ -112,7 +112,7 @@ atom: ('(' [yield_expr|testlist_comp] ')' |
'{' [dictorsetmaker] '}' | '{' [dictorsetmaker] '}' |
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
strings: STRING+ strings: STRING+
testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) testlist_comp: (test|star_expr) ( sync_comp_for | (',' (test|star_expr))* [','] )
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
subscriptlist: subscript (',' subscript)* [','] subscriptlist: subscript (',' subscript)* [',']
subscript: test | [test] ':' [test] [sliceop] subscript: test | [test] ':' [test] [sliceop]
@@ -120,9 +120,9 @@ sliceop: ':' [test]
exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
testlist: test (',' test)* [','] testlist: test (',' test)* [',']
dictorsetmaker: ( ((test ':' test | '**' expr) dictorsetmaker: ( ((test ':' test | '**' expr)
(comp_for | (',' (test ':' test | '**' expr))* [','])) | (sync_comp_for | (',' (test ':' test | '**' expr))* [','])) |
((test | star_expr) ((test | star_expr)
(comp_for | (',' (test | star_expr))* [','])) ) (sync_comp_for | (',' (test | star_expr))* [','])) )
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
@@ -137,13 +137,13 @@ arglist: argument (',' argument)* [',']
# Illegal combinations and orderings are blocked in ast.c: # Illegal combinations and orderings are blocked in ast.c:
# multiple (test comp_for) arguments are blocked; keyword unpackings # multiple (test comp_for) arguments are blocked; keyword unpackings
# that precede iterable unpackings are blocked; etc. # that precede iterable unpackings are blocked; etc.
argument: ( test [comp_for] | argument: ( test [sync_comp_for] |
test '=' test | test '=' test |
'**' test | '**' test |
'*' test ) '*' test )
comp_iter: comp_for | comp_if comp_iter: sync_comp_for | comp_if
comp_for: 'for' exprlist 'in' or_test [comp_iter] sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
comp_if: 'if' test_nocond [comp_iter] comp_if: 'if' test_nocond [comp_iter]
# not used in grammar, but may appear in "node" passed from Parser to Compiler # not used in grammar, but may appear in "node" passed from Parser to Compiler

View File

@@ -140,7 +140,8 @@ argument: ( test [comp_for] |
'*' test ) '*' test )
comp_iter: comp_for | comp_if comp_iter: comp_for | comp_if
comp_for: ['async'] 'for' exprlist 'in' or_test [comp_iter] sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
comp_for: ['async'] sync_comp_for
comp_if: 'if' test_nocond [comp_iter] comp_if: 'if' test_nocond [comp_iter]
# not used in grammar, but may appear in "node" passed from Parser to Compiler # not used in grammar, but may appear in "node" passed from Parser to Compiler

View File

@@ -138,7 +138,8 @@ argument: ( test [comp_for] |
'*' test ) '*' test )
comp_iter: comp_for | comp_if comp_iter: comp_for | comp_if
comp_for: ['async'] 'for' exprlist 'in' or_test [comp_iter] sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
comp_for: ['async'] sync_comp_for
comp_if: 'if' test_nocond [comp_iter] comp_if: 'if' test_nocond [comp_iter]
# not used in grammar, but may appear in "node" passed from Parser to Compiler # not used in grammar, but may appear in "node" passed from Parser to Compiler

View File

@@ -20,13 +20,25 @@ async_funcdef: 'async' funcdef
funcdef: 'def' NAME parameters ['->' test] ':' suite funcdef: 'def' NAME parameters ['->' test] ':' suite
parameters: '(' [typedargslist] ')' parameters: '(' [typedargslist] ')'
typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ typedargslist: (
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
',' tfpdef ['=' test])* ([',' [
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
| '**' tfpdef [',']]])
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
| '**' tfpdef [',']]] )
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
| '**' tfpdef [',']]] | '**' tfpdef [',']]]
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
| '**' tfpdef [',']) | '**' tfpdef [','])
)
tfpdef: NAME [':' test] tfpdef: NAME [':' test]
varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
| '**' vfpdef [',']]]
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
| '**' vfpdef [',']]] | '**' vfpdef [',']]]
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
@@ -69,8 +81,8 @@ assert_stmt: 'assert' test [',' test]
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
async_stmt: 'async' (funcdef | with_stmt | for_stmt) async_stmt: 'async' (funcdef | with_stmt | for_stmt)
if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
while_stmt: 'while' test ':' suite ['else' ':' suite] while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
try_stmt: ('try' ':' suite try_stmt: ('try' ':' suite
((except_clause ':' suite)+ ((except_clause ':' suite)+
@@ -83,6 +95,7 @@ with_item: test ['as' expr]
except_clause: 'except' [test ['as' NAME]] except_clause: 'except' [test ['as' NAME]]
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
namedexpr_test: test [':=' test]
test: or_test ['if' or_test 'else' test] | lambdef test: or_test ['if' or_test 'else' test] | lambdef
test_nocond: or_test | lambdef_nocond test_nocond: or_test | lambdef_nocond
lambdef: 'lambda' [varargslist] ':' test lambdef: 'lambda' [varargslist] ':' test
@@ -108,7 +121,7 @@ atom: ('(' [yield_expr|testlist_comp] ')' |
'[' [testlist_comp] ']' | '[' [testlist_comp] ']' |
'{' [dictorsetmaker] '}' | '{' [dictorsetmaker] '}' |
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
subscriptlist: subscript (',' subscript)* [','] subscriptlist: subscript (',' subscript)* [',']
subscript: test | [test] ':' [test] [sliceop] subscript: test | [test] ':' [test] [sliceop]
@@ -134,6 +147,7 @@ arglist: argument (',' argument)* [',']
# multiple (test comp_for) arguments are blocked; keyword unpackings # multiple (test comp_for) arguments are blocked; keyword unpackings
# that precede iterable unpackings are blocked; etc. # that precede iterable unpackings are blocked; etc.
argument: ( test [comp_for] | argument: ( test [comp_for] |
test ':=' test |
test '=' test | test '=' test |
'**' test | '**' test |
'*' test ) '*' test )
@@ -153,5 +167,5 @@ strings: (STRING | fstring)+
fstring: FSTRING_START fstring_content* FSTRING_END fstring: FSTRING_START fstring_content* FSTRING_END
fstring_content: FSTRING_STRING | fstring_expr fstring_content: FSTRING_STRING | fstring_expr
fstring_conversion: '!' NAME fstring_conversion: '!' NAME
fstring_expr: '{' testlist [ fstring_conversion ] [ fstring_format_spec ] '}' fstring_expr: '{' testlist ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
fstring_format_spec: ':' fstring_content* fstring_format_spec: ':' fstring_content*

View File

@@ -39,13 +39,13 @@ class Parser(BaseParser):
'for_stmt': tree.ForStmt, 'for_stmt': tree.ForStmt,
'while_stmt': tree.WhileStmt, 'while_stmt': tree.WhileStmt,
'try_stmt': tree.TryStmt, 'try_stmt': tree.TryStmt,
'comp_for': tree.CompFor, 'sync_comp_for': tree.SyncCompFor,
# Not sure if this is the best idea, but IMO it's the easiest way to # Not sure if this is the best idea, but IMO it's the easiest way to
# avoid extreme amounts of work around the subtle difference of 2/3 # avoid extreme amounts of work around the subtle difference of 2/3
# grammar in list comoprehensions. # grammar in list comoprehensions.
'list_for': tree.CompFor, 'list_for': tree.SyncCompFor,
# Same here. This just exists in Python 2.6. # Same here. This just exists in Python 2.6.
'gen_for': tree.CompFor, 'gen_for': tree.SyncCompFor,
'decorator': tree.Decorator, 'decorator': tree.Decorator,
'lambdef': tree.Lambda, 'lambdef': tree.Lambda,
'old_lambdef': tree.Lambda, 'old_lambdef': tree.Lambda,

30
parso/python/token.pyi Normal file
View File

@@ -0,0 +1,30 @@
from typing import Container, Iterable
class TokenType:
name: str
contains_syntax: bool
def __init__(self, name: str, contains_syntax: bool) -> None: ...
class TokenTypes:
def __init__(
self, names: Iterable[str], contains_syntax: Container[str]
) -> None: ...
# not an actual class in the source code, but we need this class to type the fields of
# PythonTokenTypes
class _FakePythonTokenTypesClass(TokenTypes):
STRING: TokenType
NUMBER: TokenType
NAME: TokenType
ERRORTOKEN: TokenType
NEWLINE: TokenType
INDENT: TokenType
DEDENT: TokenType
ERROR_DEDENT: TokenType
FSTRING_STRING: TokenType
FSTRING_START: TokenType
FSTRING_END: TokenType
OP: TokenType
ENDMARKER: TokenType
PythonTokenTypes: _FakePythonTokenTypesClass = ...

View File

@@ -120,6 +120,8 @@ def _get_token_collection(version_info):
fstring_string_single_line = _compile(r'(?:[^{}\r\n]+|\{\{|\}\})+') fstring_string_single_line = _compile(r'(?:[^{}\r\n]+|\{\{|\}\})+')
fstring_string_multi_line = _compile(r'(?:[^{}]+|\{\{|\}\})+') fstring_string_multi_line = _compile(r'(?:[^{}]+|\{\{|\}\})+')
fstring_format_spec_single_line = _compile(r'[^{}\r\n]+')
fstring_format_spec_multi_line = _compile(r'[^{}]+')
def _create_token_collection(version_info): def _create_token_collection(version_info):
@@ -151,6 +153,8 @@ def _create_token_collection(version_info):
Octnumber = '0[oO]?[0-7]+' Octnumber = '0[oO]?[0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)' Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
if version_info[0] < 3:
Intnumber += '[lL]?'
Exponent = r'[eE][-+]?[0-9]+' Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent) Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent Expfloat = r'[0-9]+' + Exponent
@@ -186,9 +190,13 @@ def _create_token_collection(version_info):
Bracket = '[][(){}]' Bracket = '[][(){}]'
special_args = [r'\r\n?', r'\n', r'[:;.,@]'] special_args = [r'\r\n?', r'\n', r'[;.,@]']
if version_info >= (3, 0): if version_info >= (3, 0):
special_args.insert(0, r'\.\.\.') special_args.insert(0, r'\.\.\.')
if version_info >= (3, 8):
special_args.insert(0, ":=?")
else:
special_args.insert(0, ":")
Special = group(*special_args) Special = group(*special_args)
Funny = group(Operator, Bracket, Special) Funny = group(Operator, Bracket, Special)
@@ -281,7 +289,10 @@ class FStringNode(object):
return len(self.quote) == 3 return len(self.quote) == 3
def is_in_expr(self): def is_in_expr(self):
return (self.parentheses_count - self.format_spec_count) > 0 return self.parentheses_count > self.format_spec_count
def is_in_format_spec(self):
return not self.is_in_expr() and self.format_spec_count
def _close_fstring_if_necessary(fstring_stack, string, start_pos, additional_prefix): def _close_fstring_if_necessary(fstring_stack, string, start_pos, additional_prefix):
@@ -303,10 +314,18 @@ def _close_fstring_if_necessary(fstring_stack, string, start_pos, additional_pre
def _find_fstring_string(endpats, fstring_stack, line, lnum, pos): def _find_fstring_string(endpats, fstring_stack, line, lnum, pos):
tos = fstring_stack[-1] tos = fstring_stack[-1]
allow_multiline = tos.allow_multiline() allow_multiline = tos.allow_multiline()
if allow_multiline: if tos.is_in_format_spec():
match = fstring_string_multi_line.match(line, pos) if allow_multiline:
regex = fstring_format_spec_multi_line
else:
regex = fstring_format_spec_single_line
else: else:
match = fstring_string_single_line.match(line, pos) if allow_multiline:
regex = fstring_string_multi_line
else:
regex = fstring_string_single_line
match = regex.match(line, pos)
if match is None: if match is None:
return tos.previous_lines, pos return tos.previous_lines, pos
@@ -575,7 +594,8 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
if paren_level: if paren_level:
paren_level -= 1 paren_level -= 1
elif token == ':' and fstring_stack \ elif token == ':' and fstring_stack \
and fstring_stack[-1].parentheses_count == 1: and fstring_stack[-1].parentheses_count \
- fstring_stack[-1].format_spec_count == 1:
fstring_stack[-1].format_spec_count += 1 fstring_stack[-1].format_spec_count += 1
yield PythonToken(OP, token, spos, prefix) yield PythonToken(OP, token, spos, prefix)

24
parso/python/tokenize.pyi Normal file
View File

@@ -0,0 +1,24 @@
from typing import Generator, Iterable, NamedTuple, Tuple
from parso.python.token import TokenType
from parso.utils import PythonVersionInfo
class Token(NamedTuple):
type: TokenType
string: str
start_pos: Tuple[int, int]
prefix: str
@property
def end_pos(self) -> Tuple[int, int]: ...
class PythonToken(Token):
def __repr__(self) -> str: ...
def tokenize(
code: str, version_info: PythonVersionInfo, start_pos: Tuple[int, int] = (1, 0)
) -> Generator[PythonToken, None, None]: ...
def tokenize_lines(
lines: Iterable[str],
version_info: PythonVersionInfo,
start_pos: Tuple[int, int] = (1, 0),
) -> Generator[PythonToken, None, None]: ...

View File

@@ -43,6 +43,7 @@ Parser Tree Classes
""" """
import re import re
from collections import Mapping
from parso._compatibility import utf8_repr, unicode from parso._compatibility import utf8_repr, unicode
from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, \ from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, \
@@ -55,7 +56,7 @@ _FLOW_CONTAINERS = set(['if_stmt', 'while_stmt', 'for_stmt', 'try_stmt',
_RETURN_STMT_CONTAINERS = set(['suite', 'simple_stmt']) | _FLOW_CONTAINERS _RETURN_STMT_CONTAINERS = set(['suite', 'simple_stmt']) | _FLOW_CONTAINERS
_FUNC_CONTAINERS = set(['suite', 'simple_stmt', 'decorated']) | _FLOW_CONTAINERS _FUNC_CONTAINERS = set(['suite', 'simple_stmt', 'decorated']) | _FLOW_CONTAINERS
_GET_DEFINITION_TYPES = set([ _GET_DEFINITION_TYPES = set([
'expr_stmt', 'comp_for', 'with_stmt', 'for_stmt', 'import_name', 'expr_stmt', 'sync_comp_for', 'with_stmt', 'for_stmt', 'import_name',
'import_from', 'param' 'import_from', 'param'
]) ])
_IMPORTS = set(['import_name', 'import_from']) _IMPORTS = set(['import_name', 'import_from'])
@@ -442,7 +443,7 @@ class Module(Scope):
recurse(child) recurse(child)
recurse(self) recurse(self)
self._used_names = dct self._used_names = UsedNamesMapping(dct)
return self._used_names return self._used_names
@@ -466,6 +467,9 @@ class ClassOrFunc(Scope):
:rtype: list of :class:`Decorator` :rtype: list of :class:`Decorator`
""" """
decorated = self.parent decorated = self.parent
if decorated.type == 'async_funcdef':
decorated = decorated.parent
if decorated.type == 'decorated': if decorated.type == 'decorated':
if decorated.children[0].type == 'decorators': if decorated.children[0].type == 'decorators':
return decorated.children[0].children return decorated.children[0].children
@@ -545,7 +549,8 @@ def _create_params(parent, argslist_list):
if param_children[0] == '*' \ if param_children[0] == '*' \
and (len(param_children) == 1 and (len(param_children) == 1
or param_children[1] == ',') \ or param_children[1] == ',') \
or check_python2_nested_param(param_children[0]): or check_python2_nested_param(param_children[0]) \
or param_children[0] == '/':
for p in param_children: for p in param_children:
p.parent = parent p.parent = parent
new_children += param_children new_children += param_children
@@ -1158,6 +1163,13 @@ class Param(PythonBaseNode):
index -= 2 index -= 2
except ValueError: except ValueError:
pass pass
try:
keyword_only_index = self.parent.children.index('/')
if index > keyword_only_index:
# Skip the ` /, `
index -= 2
except ValueError:
pass
return index - 1 return index - 1
def get_parent_function(self): def get_parent_function(self):
@@ -1189,8 +1201,8 @@ class Param(PythonBaseNode):
return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default) return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default)
class CompFor(PythonBaseNode): class SyncCompFor(PythonBaseNode):
type = 'comp_for' type = 'sync_comp_for'
__slots__ = () __slots__ = ()
def get_defined_names(self): def get_defined_names(self):
@@ -1198,4 +1210,33 @@ class CompFor(PythonBaseNode):
Returns the a list of `Name` that the comprehension defines. Returns the a list of `Name` that the comprehension defines.
""" """
# allow async for # allow async for
return _defined_names(self.children[self.children.index('for') + 1]) return _defined_names(self.children[1])
# This is simply here so an older Jedi version can work with this new parso
# version. Can be deleted in the next release.
CompFor = SyncCompFor
class UsedNamesMapping(Mapping):
"""
This class exists for the sole purpose of creating an immutable dict.
"""
def __init__(self, dct):
self._dict = dct
def __getitem__(self, key):
return self._dict[key]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __hash__(self):
return id(self)
def __eq__(self, other):
# Comparing these dicts does not make sense.
return self is other

29
parso/utils.pyi Normal file
View File

@@ -0,0 +1,29 @@
from typing import NamedTuple, Optional, Sequence, Union
class Version(NamedTuple):
major: int
minor: int
micro: int
def split_lines(string: str, keepends: bool = ...) -> Sequence[str]: ...
def python_bytes_to_unicode(
source: Union[str, bytes], encoding: str = ..., errors: str = ...
) -> str: ...
def version_info() -> Version:
"""
Returns a namedtuple of parso's version, similar to Python's
``sys.version_info``.
"""
...
class PythonVersionInfo(NamedTuple):
major: int
minor: int
def parse_version_string(version: Optional[str]) -> PythonVersionInfo:
"""
Checks for a valid version number (e.g. `3.2` or `2.7.1` or `3`) and
returns a corresponding version info that is always two characters long in
decimal.
"""
...

View File

@@ -146,7 +146,7 @@ FAILING_EXAMPLES = [
# Now nested parsing # Now nested parsing
"f'{continue}'", "f'{continue}'",
"f'{1;1}'", "f'{1;1}'",
"f'{a=3}'", "f'{a;}'",
"f'{b\"\" \"\"}'", "f'{b\"\" \"\"}'",
] ]

View File

@@ -7,7 +7,7 @@ from parso.python.tokenize import tokenize
@pytest.fixture @pytest.fixture
def grammar(): def grammar():
return load_grammar(version='3.6') return load_grammar(version='3.8')
@pytest.mark.parametrize( @pytest.mark.parametrize(
@@ -21,6 +21,9 @@ def grammar():
'{1:1.{32}}', '{1:1.{32}}',
'{1::>4}', '{1::>4}',
'{foo} {bar}', '{foo} {bar}',
'{x:{y}}',
'{x:{y:}}',
'{x:{y:1}}',
# Escapes # Escapes
'{{}}', '{{}}',
@@ -28,6 +31,10 @@ def grammar():
'{{{1}', '{{{1}',
'1{{2{{3', '1{{2{{3',
'}}', '}}',
# New Python 3.8 syntax f'{a=}'
'{a=}',
'{a()=}',
] ]
) )
def test_valid(code, grammar): def test_valid(code, grammar):

View File

@@ -189,3 +189,22 @@ def test_no_error_nodes(each_version):
check(child) check(child)
check(parse("if foo:\n bar", version=each_version)) check(parse("if foo:\n bar", version=each_version))
def test_named_expression(works_ge_py38):
works_ge_py38.parse("(a := 1, a + 1)")
@pytest.mark.parametrize(
'param_code', [
'a=1, /',
'a, /',
'a=1, /, b=3',
'a, /, b',
'a, /, b',
'a, /, *, b',
'a, /, **kwargs',
]
)
def test_positional_only_arguments(works_ge_py38, param_code):
works_ge_py38.parse("def x(%s): pass" % param_code)

View File

@@ -190,6 +190,19 @@ def test_old_octal_notation(works_in_py2):
works_in_py2.parse("07") works_in_py2.parse("07")
def test_long_notation(works_in_py2):
works_in_py2.parse("0xFl")
works_in_py2.parse("0xFL")
works_in_py2.parse("0b1l")
works_in_py2.parse("0B1L")
works_in_py2.parse("0o7l")
works_in_py2.parse("0O7L")
works_in_py2.parse("0l")
works_in_py2.parse("0L")
works_in_py2.parse("10l")
works_in_py2.parse("10L")
def test_new_binary_notation(each_version): def test_new_binary_notation(each_version):
_parse("""0b101010""", each_version) _parse("""0b101010""", each_version)
_invalid_syntax("""0b0101021""", each_version) _invalid_syntax("""0b0101021""", each_version)