mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-07 05:14:29 +08:00
Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee5edaf22f | ||
|
|
285492f4ed | ||
|
|
da3a7488f9 | ||
|
|
c5e8602cae | ||
|
|
ae491cbf55 | ||
|
|
9f32dde163 | ||
|
|
d26d0d57fe | ||
|
|
5570975a7d | ||
|
|
e1523014e4 | ||
|
|
7652d3904b | ||
|
|
ed47650fbe | ||
|
|
60fed7b9f8 | ||
|
|
7000dd24d7 | ||
|
|
86f3f1096b | ||
|
|
f2b1ff9429 | ||
|
|
cbb61fb819 | ||
|
|
966d5446eb | ||
|
|
b42135fb1a | ||
|
|
d76c890667 | ||
|
|
885f623c4b | ||
|
|
b5429ccbdc | ||
|
|
60ec880422 | ||
|
|
bd03b21446 | ||
|
|
8dee324d0c |
@@ -4,6 +4,8 @@ source = parso
|
||||
[report]
|
||||
# Regexes for lines to exclude from consideration
|
||||
exclude_lines =
|
||||
pragma: no cover
|
||||
|
||||
# Don't complain about missing debug-only code:
|
||||
def __repr__
|
||||
|
||||
|
||||
68
.github/workflows/build.yml
vendored
Normal file
68
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
name: Build
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
PYTEST_ADDOPTS: --color=yes
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
pip install .[qa]
|
||||
- name: Run Flake8
|
||||
# Ignore F401, which are unused imports. flake8 is a primitive tool and is sometimes wrong.
|
||||
run: flake8 --extend-ignore F401 parso test/*.py setup.py scripts/
|
||||
- name: Run Mypy
|
||||
run: mypy parso
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: ${{ matrix.experimental }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ['3.6', '3.7', '3.8', '3.9']
|
||||
experimental: [false]
|
||||
# include:
|
||||
# - python-version: '3.10-dev'
|
||||
# experimental: true
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
pip install .[testing]
|
||||
- name: Run pytest
|
||||
run: pytest
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
pip install .[testing] coverage coveralls
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
coverage run -m pytest
|
||||
coverage report
|
||||
- name: Upload coverage report to Coveralls
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: coveralls --service=github
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -11,3 +11,4 @@ parso.egg-info/
|
||||
/.pytest_cache
|
||||
test/fuzz-redo.pickle
|
||||
/venv/
|
||||
/htmlcov/
|
||||
|
||||
31
.travis.yml
31
.travis.yml
@@ -1,31 +0,0 @@
|
||||
dist: xenial
|
||||
language: python
|
||||
python:
|
||||
- 3.6
|
||||
- 3.7
|
||||
- 3.8.2
|
||||
- nightly
|
||||
matrix:
|
||||
allow_failures:
|
||||
- python: nightly
|
||||
include:
|
||||
- python: 3.8
|
||||
install:
|
||||
- 'pip install .[qa]'
|
||||
script:
|
||||
# Ignore F401, which are unused imports. flake8 is a primitive tool and is sometimes wrong.
|
||||
- 'flake8 --extend-ignore F401 parso test/*.py setup.py scripts/'
|
||||
- mypy parso
|
||||
- python: 3.8.2
|
||||
script:
|
||||
- 'pip install coverage'
|
||||
- 'coverage run -m pytest'
|
||||
- 'coverage report'
|
||||
after_script:
|
||||
- |
|
||||
pip install --quiet coveralls
|
||||
coveralls
|
||||
install:
|
||||
- pip install .[testing]
|
||||
script:
|
||||
- pytest
|
||||
@@ -6,6 +6,11 @@ Changelog
|
||||
Unreleased
|
||||
++++++++++
|
||||
|
||||
0.8.3 (2021-11-30)
|
||||
++++++++++++++++++
|
||||
|
||||
- Add basic support for Python 3.11 and 3.12
|
||||
|
||||
0.8.2 (2021-03-30)
|
||||
++++++++++++++++++
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ parso - A Python Parser
|
||||
###################################################################
|
||||
|
||||
|
||||
.. image:: https://travis-ci.org/davidhalter/parso.svg?branch=master
|
||||
:target: https://travis-ci.org/davidhalter/parso
|
||||
:alt: Travis CI build status
|
||||
.. image:: https://github.com/davidhalter/parso/workflows/Build/badge.svg?branch=master
|
||||
:target: https://github.com/davidhalter/parso/actions
|
||||
:alt: GitHub Actions build status
|
||||
|
||||
.. image:: https://coveralls.io/repos/github/davidhalter/parso/badge.svg?branch=master
|
||||
:target: https://coveralls.io/github/davidhalter/parso?branch=master
|
||||
|
||||
@@ -34,5 +34,5 @@ easy as::
|
||||
|
||||
python3.9 -m pytest
|
||||
|
||||
Tests are also run automatically on `Travis CI
|
||||
<https://travis-ci.org/davidhalter/parso/>`_.
|
||||
Tests are also run automatically on `GitHub Actions
|
||||
<https://github.com/davidhalter/parso/actions>`_.
|
||||
|
||||
@@ -27,5 +27,5 @@ Resources
|
||||
---------
|
||||
|
||||
- `Source Code on Github <https://github.com/davidhalter/parso>`_
|
||||
- `Travis Testing <https://travis-ci.org/davidhalter/parso>`_
|
||||
- `GitHub Actions Testing <https://github.com/davidhalter/parso/actions>`_
|
||||
- `Python Package Index <http://pypi.python.org/pypi/parso/>`_
|
||||
|
||||
@@ -43,7 +43,7 @@ from parso.grammar import Grammar, load_grammar
|
||||
from parso.utils import split_lines, python_bytes_to_unicode
|
||||
|
||||
|
||||
__version__ = '0.8.2'
|
||||
__version__ = '0.8.3'
|
||||
|
||||
|
||||
def parse(code=None, **kwargs):
|
||||
|
||||
@@ -23,7 +23,7 @@ within the statement. This lowers memory usage and cpu time and reduces the
|
||||
complexity of the ``Parser`` (there's another parser sitting inside
|
||||
``Statement``, which produces ``Array`` and ``Call``).
|
||||
"""
|
||||
from typing import Dict
|
||||
from typing import Dict, Type
|
||||
|
||||
from parso import tree
|
||||
from parso.pgen2.generator import ReservedString
|
||||
@@ -110,10 +110,10 @@ class BaseParser:
|
||||
When a syntax error occurs, error_recovery() is called.
|
||||
"""
|
||||
|
||||
node_map: Dict[str, type] = {}
|
||||
node_map: Dict[str, Type[tree.BaseNode]] = {}
|
||||
default_node = tree.Node
|
||||
|
||||
leaf_map: Dict[str, type] = {}
|
||||
leaf_map: Dict[str, Type[tree.Leaf]] = {}
|
||||
default_leaf = tree.Leaf
|
||||
|
||||
def __init__(self, pgen_grammar, start_nonterminal='file_input', error_recovery=False):
|
||||
@@ -156,8 +156,6 @@ class BaseParser:
|
||||
node = self.node_map[nonterminal](children)
|
||||
except KeyError:
|
||||
node = self.default_node(nonterminal, children)
|
||||
for c in children:
|
||||
c.parent = node
|
||||
return node
|
||||
|
||||
def convert_leaf(self, type_, value, prefix, start_pos):
|
||||
|
||||
@@ -5,7 +5,6 @@ import re
|
||||
from contextlib import contextmanager
|
||||
|
||||
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
|
||||
from parso.python.tree import search_ancestor
|
||||
from parso.python.tokenize import _get_token_collection
|
||||
|
||||
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
|
||||
@@ -231,7 +230,7 @@ def _any_fstring_error(version, node):
|
||||
elif node.type == "fstring":
|
||||
return True
|
||||
else:
|
||||
return search_ancestor(node, "fstring")
|
||||
return node.search_ancestor("fstring")
|
||||
|
||||
|
||||
class _Context:
|
||||
@@ -1265,7 +1264,7 @@ class _NamedExprRule(_CheckAssignmentRule):
|
||||
def search_all_comp_ancestors(node):
|
||||
has_ancestors = False
|
||||
while True:
|
||||
node = search_ancestor(node, 'testlist_comp', 'dictorsetmaker')
|
||||
node = node.search_ancestor('testlist_comp', 'dictorsetmaker')
|
||||
if node is None:
|
||||
break
|
||||
for child in node.children:
|
||||
|
||||
@@ -97,9 +97,7 @@ suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||
|
||||
namedexpr_test: test [':=' test]
|
||||
test: or_test ['if' or_test 'else' test] | lambdef
|
||||
test_nocond: or_test | lambdef_nocond
|
||||
lambdef: 'lambda' [varargslist] ':' test
|
||||
lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
|
||||
or_test: and_test ('or' and_test)*
|
||||
and_test: not_test ('and' not_test)*
|
||||
not_test: 'not' not_test | comparison
|
||||
@@ -155,7 +153,7 @@ argument: ( test [comp_for] |
|
||||
comp_iter: comp_for | comp_if
|
||||
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||
comp_for: ['async'] sync_comp_for
|
||||
comp_if: 'if' test_nocond [comp_iter]
|
||||
comp_if: 'if' or_test [comp_iter]
|
||||
|
||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||
encoding_decl: NAME
|
||||
|
||||
169
parso/python/grammar311.txt
Normal file
169
parso/python/grammar311.txt
Normal file
@@ -0,0 +1,169 @@
|
||||
# Grammar for Python
|
||||
|
||||
# NOTE WELL: You should also follow all the steps listed at
|
||||
# https://devguide.python.org/grammar/
|
||||
|
||||
# Start symbols for the grammar:
|
||||
# single_input is a single interactive statement;
|
||||
# file_input is a module or sequence of commands read from an input file;
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' namedexpr_test NEWLINE
|
||||
decorators: decorator+
|
||||
decorated: decorators (classdef | funcdef | async_funcdef)
|
||||
|
||||
async_funcdef: 'async' funcdef
|
||||
funcdef: 'def' NAME parameters ['->' test] ':' suite
|
||||
|
||||
parameters: '(' [typedargslist] ')'
|
||||
typedargslist: (
|
||||
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
|
||||
',' tfpdef ['=' test])* ([',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]])
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
|
||||
| '**' tfpdef [',']]] )
|
||||
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]]
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [','])
|
||||
)
|
||||
tfpdef: NAME [':' test]
|
||||
varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']
|
||||
)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
||||
('=' (yield_expr|testlist_star_expr))*)
|
||||
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
|
||||
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
||||
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
||||
'<<=' | '>>=' | '**=' | '//=')
|
||||
# For normal and annotated assignments, additional restrictions enforced by the interpreter
|
||||
del_stmt: 'del' exprlist
|
||||
pass_stmt: 'pass'
|
||||
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
|
||||
break_stmt: 'break'
|
||||
continue_stmt: 'continue'
|
||||
return_stmt: 'return' [testlist_star_expr]
|
||||
yield_stmt: yield_expr
|
||||
raise_stmt: 'raise' [test ['from' test]]
|
||||
import_stmt: import_name | import_from
|
||||
import_name: 'import' dotted_as_names
|
||||
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
|
||||
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
|
||||
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
||||
import_as_name: NAME ['as' NAME]
|
||||
dotted_as_name: dotted_name ['as' NAME]
|
||||
import_as_names: import_as_name (',' import_as_name)* [',']
|
||||
dotted_as_names: dotted_as_name (',' dotted_as_name)*
|
||||
dotted_name: NAME ('.' NAME)*
|
||||
global_stmt: 'global' NAME (',' NAME)*
|
||||
nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
|
||||
assert_stmt: 'assert' test [',' test]
|
||||
|
||||
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
|
||||
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
|
||||
if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
|
||||
while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
|
||||
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
||||
try_stmt: ('try' ':' suite
|
||||
((except_clause ':' suite)+
|
||||
['else' ':' suite]
|
||||
['finally' ':' suite] |
|
||||
'finally' ':' suite))
|
||||
with_stmt: 'with' with_item (',' with_item)* ':' suite
|
||||
with_item: test ['as' expr]
|
||||
# NB compile.c makes sure that the default except clause is last
|
||||
except_clause: 'except' [test ['as' NAME]]
|
||||
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||
|
||||
namedexpr_test: test [':=' test]
|
||||
test: or_test ['if' or_test 'else' test] | lambdef
|
||||
lambdef: 'lambda' [varargslist] ':' test
|
||||
or_test: and_test ('or' and_test)*
|
||||
and_test: not_test ('and' not_test)*
|
||||
not_test: 'not' not_test | comparison
|
||||
comparison: expr (comp_op expr)*
|
||||
# <> isn't actually a valid comparison operator in Python. It's here for the
|
||||
# sake of a __future__ import described in PEP 401 (which really works :-)
|
||||
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
||||
star_expr: '*' expr
|
||||
expr: xor_expr ('|' xor_expr)*
|
||||
xor_expr: and_expr ('^' and_expr)*
|
||||
and_expr: shift_expr ('&' shift_expr)*
|
||||
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
|
||||
arith_expr: term (('+'|'-') term)*
|
||||
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
|
||||
factor: ('+'|'-'|'~') factor | power
|
||||
power: atom_expr ['**' factor]
|
||||
atom_expr: ['await'] atom trailer*
|
||||
atom: ('(' [yield_expr|testlist_comp] ')' |
|
||||
'[' [testlist_comp] ']' |
|
||||
'{' [dictorsetmaker] '}' |
|
||||
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||
testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
|
||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||
subscriptlist: subscript (',' subscript)* [',']
|
||||
subscript: test [':=' test] | [test] ':' [test] [sliceop]
|
||||
sliceop: ':' [test]
|
||||
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||
testlist: test (',' test)* [',']
|
||||
dictorsetmaker: ( ((test ':' test | '**' expr)
|
||||
(comp_for | (',' (test ':' test | '**' expr))* [','])) |
|
||||
((test [':=' test] | star_expr)
|
||||
(comp_for | (',' (test [':=' test] | star_expr))* [','])) )
|
||||
|
||||
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
||||
|
||||
arglist: argument (',' argument)* [',']
|
||||
|
||||
# The reason that keywords are test nodes instead of NAME is that using NAME
|
||||
# results in an ambiguity. ast.c makes sure it's a NAME.
|
||||
# "test '=' test" is really "keyword '=' test", but we have no such token.
|
||||
# These need to be in a single rule to avoid grammar that is ambiguous
|
||||
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
|
||||
# we explicitly match '*' here, too, to give it proper precedence.
|
||||
# Illegal combinations and orderings are blocked in ast.c:
|
||||
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
||||
# that precede iterable unpackings are blocked; etc.
|
||||
argument: ( test [comp_for] |
|
||||
test ':=' test |
|
||||
test '=' test |
|
||||
'**' test |
|
||||
'*' test )
|
||||
|
||||
comp_iter: comp_for | comp_if
|
||||
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||
comp_for: ['async'] sync_comp_for
|
||||
comp_if: 'if' or_test [comp_iter]
|
||||
|
||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||
encoding_decl: NAME
|
||||
|
||||
yield_expr: 'yield' [yield_arg]
|
||||
yield_arg: 'from' test | testlist_star_expr
|
||||
|
||||
strings: (STRING | fstring)+
|
||||
fstring: FSTRING_START fstring_content* FSTRING_END
|
||||
fstring_content: FSTRING_STRING | fstring_expr
|
||||
fstring_conversion: '!' NAME
|
||||
fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
|
||||
fstring_format_spec: ':' fstring_content*
|
||||
169
parso/python/grammar312.txt
Normal file
169
parso/python/grammar312.txt
Normal file
@@ -0,0 +1,169 @@
|
||||
# Grammar for Python
|
||||
|
||||
# NOTE WELL: You should also follow all the steps listed at
|
||||
# https://devguide.python.org/grammar/
|
||||
|
||||
# Start symbols for the grammar:
|
||||
# single_input is a single interactive statement;
|
||||
# file_input is a module or sequence of commands read from an input file;
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' namedexpr_test NEWLINE
|
||||
decorators: decorator+
|
||||
decorated: decorators (classdef | funcdef | async_funcdef)
|
||||
|
||||
async_funcdef: 'async' funcdef
|
||||
funcdef: 'def' NAME parameters ['->' test] ':' suite
|
||||
|
||||
parameters: '(' [typedargslist] ')'
|
||||
typedargslist: (
|
||||
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
|
||||
',' tfpdef ['=' test])* ([',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]])
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
|
||||
| '**' tfpdef [',']]] )
|
||||
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]]
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [','])
|
||||
)
|
||||
tfpdef: NAME [':' test]
|
||||
varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']
|
||||
)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
||||
('=' (yield_expr|testlist_star_expr))*)
|
||||
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
|
||||
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
||||
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
||||
'<<=' | '>>=' | '**=' | '//=')
|
||||
# For normal and annotated assignments, additional restrictions enforced by the interpreter
|
||||
del_stmt: 'del' exprlist
|
||||
pass_stmt: 'pass'
|
||||
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
|
||||
break_stmt: 'break'
|
||||
continue_stmt: 'continue'
|
||||
return_stmt: 'return' [testlist_star_expr]
|
||||
yield_stmt: yield_expr
|
||||
raise_stmt: 'raise' [test ['from' test]]
|
||||
import_stmt: import_name | import_from
|
||||
import_name: 'import' dotted_as_names
|
||||
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
|
||||
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
|
||||
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
||||
import_as_name: NAME ['as' NAME]
|
||||
dotted_as_name: dotted_name ['as' NAME]
|
||||
import_as_names: import_as_name (',' import_as_name)* [',']
|
||||
dotted_as_names: dotted_as_name (',' dotted_as_name)*
|
||||
dotted_name: NAME ('.' NAME)*
|
||||
global_stmt: 'global' NAME (',' NAME)*
|
||||
nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
|
||||
assert_stmt: 'assert' test [',' test]
|
||||
|
||||
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
|
||||
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
|
||||
if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
|
||||
while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
|
||||
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
||||
try_stmt: ('try' ':' suite
|
||||
((except_clause ':' suite)+
|
||||
['else' ':' suite]
|
||||
['finally' ':' suite] |
|
||||
'finally' ':' suite))
|
||||
with_stmt: 'with' with_item (',' with_item)* ':' suite
|
||||
with_item: test ['as' expr]
|
||||
# NB compile.c makes sure that the default except clause is last
|
||||
except_clause: 'except' [test ['as' NAME]]
|
||||
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||
|
||||
namedexpr_test: test [':=' test]
|
||||
test: or_test ['if' or_test 'else' test] | lambdef
|
||||
lambdef: 'lambda' [varargslist] ':' test
|
||||
or_test: and_test ('or' and_test)*
|
||||
and_test: not_test ('and' not_test)*
|
||||
not_test: 'not' not_test | comparison
|
||||
comparison: expr (comp_op expr)*
|
||||
# <> isn't actually a valid comparison operator in Python. It's here for the
|
||||
# sake of a __future__ import described in PEP 401 (which really works :-)
|
||||
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
||||
star_expr: '*' expr
|
||||
expr: xor_expr ('|' xor_expr)*
|
||||
xor_expr: and_expr ('^' and_expr)*
|
||||
and_expr: shift_expr ('&' shift_expr)*
|
||||
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
|
||||
arith_expr: term (('+'|'-') term)*
|
||||
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
|
||||
factor: ('+'|'-'|'~') factor | power
|
||||
power: atom_expr ['**' factor]
|
||||
atom_expr: ['await'] atom trailer*
|
||||
atom: ('(' [yield_expr|testlist_comp] ')' |
|
||||
'[' [testlist_comp] ']' |
|
||||
'{' [dictorsetmaker] '}' |
|
||||
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||
testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
|
||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||
subscriptlist: subscript (',' subscript)* [',']
|
||||
subscript: test [':=' test] | [test] ':' [test] [sliceop]
|
||||
sliceop: ':' [test]
|
||||
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||
testlist: test (',' test)* [',']
|
||||
dictorsetmaker: ( ((test ':' test | '**' expr)
|
||||
(comp_for | (',' (test ':' test | '**' expr))* [','])) |
|
||||
((test [':=' test] | star_expr)
|
||||
(comp_for | (',' (test [':=' test] | star_expr))* [','])) )
|
||||
|
||||
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
||||
|
||||
arglist: argument (',' argument)* [',']
|
||||
|
||||
# The reason that keywords are test nodes instead of NAME is that using NAME
|
||||
# results in an ambiguity. ast.c makes sure it's a NAME.
|
||||
# "test '=' test" is really "keyword '=' test", but we have no such token.
|
||||
# These need to be in a single rule to avoid grammar that is ambiguous
|
||||
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
|
||||
# we explicitly match '*' here, too, to give it proper precedence.
|
||||
# Illegal combinations and orderings are blocked in ast.c:
|
||||
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
||||
# that precede iterable unpackings are blocked; etc.
|
||||
argument: ( test [comp_for] |
|
||||
test ':=' test |
|
||||
test '=' test |
|
||||
'**' test |
|
||||
'*' test )
|
||||
|
||||
comp_iter: comp_for | comp_if
|
||||
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||
comp_for: ['async'] sync_comp_for
|
||||
comp_if: 'if' or_test [comp_iter]
|
||||
|
||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||
encoding_decl: NAME
|
||||
|
||||
yield_expr: 'yield' [yield_arg]
|
||||
yield_arg: 'from' test | testlist_star_expr
|
||||
|
||||
strings: (STRING | fstring)+
|
||||
fstring: FSTRING_START fstring_content* FSTRING_END
|
||||
fstring_content: FSTRING_STRING | fstring_expr
|
||||
fstring_conversion: '!' NAME
|
||||
fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
|
||||
fstring_format_spec: ':' fstring_content*
|
||||
@@ -97,9 +97,7 @@ suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||
|
||||
namedexpr_test: test [':=' test]
|
||||
test: or_test ['if' or_test 'else' test] | lambdef
|
||||
test_nocond: or_test | lambdef_nocond
|
||||
lambdef: 'lambda' [varargslist] ':' test
|
||||
lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
|
||||
or_test: and_test ('or' and_test)*
|
||||
and_test: not_test ('and' not_test)*
|
||||
not_test: 'not' not_test | comparison
|
||||
@@ -155,7 +153,7 @@ argument: ( test [comp_for] |
|
||||
comp_iter: comp_for | comp_if
|
||||
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||
comp_for: ['async'] sync_comp_for
|
||||
comp_if: 'if' test_nocond [comp_iter]
|
||||
comp_if: 'if' or_test [comp_iter]
|
||||
|
||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||
encoding_decl: NAME
|
||||
|
||||
@@ -96,8 +96,6 @@ class Parser(BaseParser):
|
||||
# prefixes. Just ignore them.
|
||||
children = [children[0]] + children[2:-1]
|
||||
node = self.default_node(nonterminal, children)
|
||||
for c in children:
|
||||
c.parent = node
|
||||
return node
|
||||
|
||||
def convert_leaf(self, type, value, prefix, start_pos):
|
||||
@@ -185,8 +183,6 @@ class Parser(BaseParser):
|
||||
|
||||
if all_nodes:
|
||||
node = tree.PythonErrorNode(all_nodes)
|
||||
for n in all_nodes:
|
||||
n.parent = node
|
||||
self.stack[start_index - 1].nodes.append(node)
|
||||
|
||||
self.stack[start_index:] = []
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import Tuple
|
||||
|
||||
from parso.python.errors import ErrorFinder, ErrorFinderConfig
|
||||
from parso.normalizer import Rule
|
||||
from parso.python.tree import search_ancestor, Flow, Scope
|
||||
from parso.python.tree import Flow, Scope
|
||||
|
||||
|
||||
_IMPORT_TYPES = ('import_name', 'import_from')
|
||||
@@ -74,7 +74,7 @@ class BracketNode(IndentationNode):
|
||||
parent_indentation = n.indentation
|
||||
|
||||
next_leaf = leaf.get_next_leaf()
|
||||
if '\n' in next_leaf.prefix:
|
||||
if '\n' in next_leaf.prefix or '\r' in next_leaf.prefix:
|
||||
# This implies code like:
|
||||
# foobarbaz(
|
||||
# a,
|
||||
@@ -116,7 +116,7 @@ class ImplicitNode(BracketNode):
|
||||
self.type = IndentationTypes.IMPLICIT
|
||||
|
||||
next_leaf = leaf.get_next_leaf()
|
||||
if leaf == ':' and '\n' not in next_leaf.prefix:
|
||||
if leaf == ':' and '\n' not in next_leaf.prefix and '\r' not in next_leaf.prefix:
|
||||
self.indentation += ' '
|
||||
|
||||
|
||||
@@ -124,7 +124,7 @@ class BackslashNode(IndentationNode):
|
||||
type = IndentationTypes.BACKSLASH
|
||||
|
||||
def __init__(self, config, parent_indentation, containing_leaf, spacing, parent=None):
|
||||
expr_stmt = search_ancestor(containing_leaf, 'expr_stmt')
|
||||
expr_stmt = containing_leaf.search_ancestor('expr_stmt')
|
||||
if expr_stmt is not None:
|
||||
equals = expr_stmt.children[-2]
|
||||
|
||||
@@ -216,8 +216,8 @@ class PEP8Normalizer(ErrorFinder):
|
||||
endmarker = node.children[-1]
|
||||
prev = endmarker.get_previous_leaf()
|
||||
prefix = endmarker.prefix
|
||||
if (not prefix.endswith('\n') and (
|
||||
prefix or prev is None or prev.value != '\n')):
|
||||
if (not prefix.endswith('\n') and not prefix.endswith('\r') and (
|
||||
prefix or prev is None or prev.value not in {'\n', '\r\n', '\r'})):
|
||||
self.add_issue(endmarker, 292, "No newline at end of file")
|
||||
|
||||
if typ in _IMPORT_TYPES:
|
||||
@@ -465,7 +465,8 @@ class PEP8Normalizer(ErrorFinder):
|
||||
+ self._config.indentation:
|
||||
self.add_issue(part, 129, "Line with same indent as next logical block")
|
||||
elif indentation != should_be_indentation:
|
||||
if not self._check_tabs_spaces(spacing) and part.value != '\n':
|
||||
if not self._check_tabs_spaces(spacing) and part.value not in \
|
||||
{'\n', '\r\n', '\r'}:
|
||||
if value in '])}':
|
||||
if node.type == IndentationTypes.VERTICAL_BRACKET:
|
||||
self.add_issue(
|
||||
@@ -652,7 +653,8 @@ class PEP8Normalizer(ErrorFinder):
|
||||
else:
|
||||
prev_spacing = self._previous_spacing
|
||||
if prev in _ALLOW_SPACE and spaces != prev_spacing.value \
|
||||
and '\n' not in self._previous_leaf.prefix:
|
||||
and '\n' not in self._previous_leaf.prefix \
|
||||
and '\r' not in self._previous_leaf.prefix:
|
||||
message = "Whitespace before operator doesn't match with whitespace after"
|
||||
self.add_issue(spacing, 229, message)
|
||||
|
||||
@@ -724,11 +726,11 @@ class PEP8Normalizer(ErrorFinder):
|
||||
|
||||
def add_issue(self, node, code, message):
|
||||
if self._previous_leaf is not None:
|
||||
if search_ancestor(self._previous_leaf, 'error_node') is not None:
|
||||
if self._previous_leaf.search_ancestor('error_node') is not None:
|
||||
return
|
||||
if self._previous_leaf.type == 'error_leaf':
|
||||
return
|
||||
if search_ancestor(node, 'error_node') is not None:
|
||||
if node.search_ancestor('error_node') is not None:
|
||||
return
|
||||
if code in (901, 903):
|
||||
# 901 and 903 are raised by the ErrorFinder.
|
||||
|
||||
@@ -18,7 +18,7 @@ class PrefixPart:
|
||||
|
||||
@property
|
||||
def end_pos(self) -> Tuple[int, int]:
|
||||
if self.value.endswith('\n'):
|
||||
if self.value.endswith('\n') or self.value.endswith('\r'):
|
||||
return self.start_pos[0] + 1, 0
|
||||
if self.value == unicode_bom:
|
||||
# The bom doesn't have a length at the start of a Python file.
|
||||
@@ -40,10 +40,18 @@ class PrefixPart:
|
||||
self.start_pos
|
||||
)
|
||||
|
||||
def search_ancestor(self, *node_types):
|
||||
node = self.parent
|
||||
while node is not None:
|
||||
if node.type in node_types:
|
||||
return node
|
||||
node = node.parent
|
||||
return None
|
||||
|
||||
|
||||
_comment = r'#[^\n\r\f]*'
|
||||
_backslash = r'\\\r?\n'
|
||||
_newline = r'\r?\n'
|
||||
_backslash = r'\\\r?\n|\\\r'
|
||||
_newline = r'\r?\n|\r'
|
||||
_form_feed = r'\f'
|
||||
_only_spacing = '$'
|
||||
_spacing = r'[ \t]*'
|
||||
@@ -86,7 +94,7 @@ def split_prefix(leaf, start_pos):
|
||||
bom = True
|
||||
|
||||
start = match.end(0)
|
||||
if value.endswith('\n'):
|
||||
if value.endswith('\n') or value.endswith('\r'):
|
||||
line += 1
|
||||
column = -start
|
||||
|
||||
|
||||
@@ -548,7 +548,7 @@ def tokenize_lines(
|
||||
additional_prefix = prefix + token
|
||||
new_line = True
|
||||
elif initial == '#': # Comments
|
||||
assert not token.endswith("\n")
|
||||
assert not token.endswith("\n") and not token.endswith("\r")
|
||||
if fstring_stack and fstring_stack[-1].is_in_expr():
|
||||
# `#` is not allowed in f-string expressions
|
||||
yield PythonToken(ERRORTOKEN, initial, spos, prefix)
|
||||
|
||||
@@ -49,8 +49,7 @@ except ImportError:
|
||||
from collections import Mapping
|
||||
from typing import Tuple
|
||||
|
||||
from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, \
|
||||
search_ancestor
|
||||
from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, search_ancestor # noqa
|
||||
from parso.python.prefix import split_prefix
|
||||
from parso.utils import split_lines
|
||||
|
||||
@@ -549,7 +548,11 @@ class Function(ClassOrFunc):
|
||||
def __init__(self, children):
|
||||
super().__init__(children)
|
||||
parameters = self.children[2] # After `def foo`
|
||||
parameters.children[1:-1] = _create_params(parameters, parameters.children[1:-1])
|
||||
parameters_children = parameters.children[1:-1]
|
||||
# If input parameters list already has Param objects, keep it as is;
|
||||
# otherwise, convert it to a list of Param objects.
|
||||
if not any(isinstance(child, Param) for child in parameters_children):
|
||||
parameters.children[1:-1] = _create_params(parameters, parameters_children)
|
||||
|
||||
def _get_param_nodes(self):
|
||||
return self.children[2].children
|
||||
@@ -652,7 +655,11 @@ class Lambda(Function):
|
||||
# We don't want to call the Function constructor, call its parent.
|
||||
super(Function, self).__init__(children)
|
||||
# Everything between `lambda` and the `:` operator is a parameter.
|
||||
self.children[1:-2] = _create_params(self, self.children[1:-2])
|
||||
parameters_children = self.children[1:-2]
|
||||
# If input children list already has Param objects, keep it as is;
|
||||
# otherwise, convert it to a list of Param objects.
|
||||
if not any(isinstance(child, Param) for child in parameters_children):
|
||||
self.children[1:-2] = _create_params(self, parameters_children)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@@ -776,7 +783,7 @@ class WithStmt(Flow):
|
||||
return names
|
||||
|
||||
def get_test_node_from_name(self, name):
|
||||
node = search_ancestor(name, "with_item")
|
||||
node = name.search_ancestor("with_item")
|
||||
if node is None:
|
||||
raise ValueError('The name is not actually part of a with statement.')
|
||||
return node.children[0]
|
||||
@@ -1080,11 +1087,9 @@ class Param(PythonBaseNode):
|
||||
"""
|
||||
type = 'param'
|
||||
|
||||
def __init__(self, children, parent):
|
||||
def __init__(self, children, parent=None):
|
||||
super().__init__(children)
|
||||
self.parent = parent
|
||||
for child in children:
|
||||
child.parent = self
|
||||
|
||||
@property
|
||||
def star_count(self):
|
||||
@@ -1171,7 +1176,7 @@ class Param(PythonBaseNode):
|
||||
"""
|
||||
Returns the function/lambda of a parameter.
|
||||
"""
|
||||
return search_ancestor(self, 'funcdef', 'lambdef')
|
||||
return self.search_ancestor('funcdef', 'lambdef')
|
||||
|
||||
def get_code(self, include_prefix=True, include_comma=True):
|
||||
"""
|
||||
|
||||
138
parso/tree.py
138
parso/tree.py
@@ -1,33 +1,41 @@
|
||||
from abc import abstractmethod, abstractproperty
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
from parso.utils import split_lines
|
||||
|
||||
|
||||
def search_ancestor(node, *node_types):
|
||||
def search_ancestor(node: 'NodeOrLeaf', *node_types: str) -> 'Optional[BaseNode]':
|
||||
"""
|
||||
Recursively looks at the parents of a node and returns the first found node
|
||||
that matches node_types. Returns ``None`` if no matching node is found.
|
||||
that matches ``node_types``. Returns ``None`` if no matching node is found.
|
||||
|
||||
This function is deprecated, use :meth:`NodeOrLeaf.search_ancestor` instead.
|
||||
|
||||
:param node: The ancestors of this node will be checked.
|
||||
:param node_types: type names that are searched for.
|
||||
:type node_types: tuple of str
|
||||
"""
|
||||
while True:
|
||||
node = node.parent
|
||||
if node is None or node.type in node_types:
|
||||
return node
|
||||
n = node.parent
|
||||
while n is not None:
|
||||
if n.type in node_types:
|
||||
return n
|
||||
n = n.parent
|
||||
return None
|
||||
|
||||
|
||||
class NodeOrLeaf:
|
||||
"""
|
||||
The base class for nodes and leaves.
|
||||
"""
|
||||
__slots__ = ()
|
||||
__slots__ = ('parent',)
|
||||
type: str
|
||||
'''
|
||||
The type is a string that typically matches the types of the grammar file.
|
||||
'''
|
||||
parent: 'Optional[BaseNode]'
|
||||
'''
|
||||
The parent :class:`BaseNode` of this node or leaf.
|
||||
None if this is the root node.
|
||||
'''
|
||||
|
||||
def get_root_node(self):
|
||||
"""
|
||||
@@ -173,13 +181,117 @@ class NodeOrLeaf:
|
||||
e.g. a statement.
|
||||
"""
|
||||
|
||||
def search_ancestor(self, *node_types: str) -> 'Optional[BaseNode]':
|
||||
"""
|
||||
Recursively looks at the parents of this node or leaf and returns the
|
||||
first found node that matches ``node_types``. Returns ``None`` if no
|
||||
matching node is found.
|
||||
|
||||
:param node_types: type names that are searched for.
|
||||
"""
|
||||
node = self.parent
|
||||
while node is not None:
|
||||
if node.type in node_types:
|
||||
return node
|
||||
node = node.parent
|
||||
return None
|
||||
|
||||
def dump(self, *, indent: Optional[Union[int, str]] = 4) -> str:
|
||||
"""
|
||||
Returns a formatted dump of the parser tree rooted at this node or leaf. This is
|
||||
mainly useful for debugging purposes.
|
||||
|
||||
The ``indent`` parameter is interpreted in a similar way as :py:func:`ast.dump`.
|
||||
If ``indent`` is a non-negative integer or string, then the tree will be
|
||||
pretty-printed with that indent level. An indent level of 0, negative, or ``""``
|
||||
will only insert newlines. ``None`` selects the single line representation.
|
||||
Using a positive integer indent indents that many spaces per level. If
|
||||
``indent`` is a string (such as ``"\\t"``), that string is used to indent each
|
||||
level.
|
||||
|
||||
:param indent: Indentation style as described above. The default indentation is
|
||||
4 spaces, which yields a pretty-printed dump.
|
||||
|
||||
>>> import parso
|
||||
>>> print(parso.parse("lambda x, y: x + y").dump())
|
||||
Module([
|
||||
Lambda([
|
||||
Keyword('lambda', (1, 0)),
|
||||
Param([
|
||||
Name('x', (1, 7), prefix=' '),
|
||||
Operator(',', (1, 8)),
|
||||
]),
|
||||
Param([
|
||||
Name('y', (1, 10), prefix=' '),
|
||||
]),
|
||||
Operator(':', (1, 11)),
|
||||
PythonNode('arith_expr', [
|
||||
Name('x', (1, 13), prefix=' '),
|
||||
Operator('+', (1, 15), prefix=' '),
|
||||
Name('y', (1, 17), prefix=' '),
|
||||
]),
|
||||
]),
|
||||
EndMarker('', (1, 18)),
|
||||
])
|
||||
"""
|
||||
if indent is None:
|
||||
newline = False
|
||||
indent_string = ''
|
||||
elif isinstance(indent, int):
|
||||
newline = True
|
||||
indent_string = ' ' * indent
|
||||
elif isinstance(indent, str):
|
||||
newline = True
|
||||
indent_string = indent
|
||||
else:
|
||||
raise TypeError(f"expect 'indent' to be int, str or None, got {indent!r}")
|
||||
|
||||
def _format_dump(node: NodeOrLeaf, indent: str = '', top_level: bool = True) -> str:
|
||||
result = ''
|
||||
node_type = type(node).__name__
|
||||
if isinstance(node, Leaf):
|
||||
result += f'{indent}{node_type}('
|
||||
if isinstance(node, ErrorLeaf):
|
||||
result += f'{node.token_type!r}, '
|
||||
elif isinstance(node, TypedLeaf):
|
||||
result += f'{node.type!r}, '
|
||||
result += f'{node.value!r}, {node.start_pos!r}'
|
||||
if node.prefix:
|
||||
result += f', prefix={node.prefix!r}'
|
||||
result += ')'
|
||||
elif isinstance(node, BaseNode):
|
||||
result += f'{indent}{node_type}('
|
||||
if isinstance(node, Node):
|
||||
result += f'{node.type!r}, '
|
||||
result += '['
|
||||
if newline:
|
||||
result += '\n'
|
||||
for child in node.children:
|
||||
result += _format_dump(child, indent=indent + indent_string, top_level=False)
|
||||
result += f'{indent}])'
|
||||
else: # pragma: no cover
|
||||
# We shouldn't ever reach here, unless:
|
||||
# - `NodeOrLeaf` is incorrectly subclassed else where
|
||||
# - or a node's children list contains invalid nodes or leafs
|
||||
# Both are unexpected internal errors.
|
||||
raise TypeError(f'unsupported node encountered: {node!r}')
|
||||
if not top_level:
|
||||
if newline:
|
||||
result += ',\n'
|
||||
else:
|
||||
result += ', '
|
||||
return result
|
||||
|
||||
return _format_dump(self)
|
||||
|
||||
|
||||
class Leaf(NodeOrLeaf):
|
||||
'''
|
||||
Leafs are basically tokens with a better API. Leafs exactly know where they
|
||||
were defined and what text preceeds them.
|
||||
'''
|
||||
__slots__ = ('value', 'parent', 'line', 'column', 'prefix')
|
||||
__slots__ = ('value', 'line', 'column', 'prefix')
|
||||
prefix: str
|
||||
|
||||
def __init__(self, value: str, start_pos: Tuple[int, int], prefix: str = '') -> None:
|
||||
self.value = value
|
||||
@@ -257,7 +369,7 @@ class BaseNode(NodeOrLeaf):
|
||||
The super class for all nodes.
|
||||
A node has children, a type and possibly a parent node.
|
||||
"""
|
||||
__slots__ = ('children', 'parent')
|
||||
__slots__ = ('children',)
|
||||
|
||||
def __init__(self, children: List[NodeOrLeaf]) -> None:
|
||||
self.children = children
|
||||
@@ -266,9 +378,11 @@ class BaseNode(NodeOrLeaf):
|
||||
"""
|
||||
self.parent: Optional[BaseNode] = None
|
||||
'''
|
||||
The parent :class:`BaseNode` of this leaf.
|
||||
The parent :class:`BaseNode` of this node.
|
||||
None if this is the root node.
|
||||
'''
|
||||
for child in children:
|
||||
child.parent = self
|
||||
|
||||
@property
|
||||
def start_pos(self) -> Tuple[int, int]:
|
||||
|
||||
@@ -92,7 +92,7 @@ def python_bytes_to_unicode(
|
||||
# UTF-8 byte-order mark
|
||||
return 'utf-8'
|
||||
|
||||
first_two_lines = re.match(br'(?:[^\n]*\n){0,2}', source).group(0)
|
||||
first_two_lines = re.match(br'(?:[^\r\n]*(?:\r\n|\r|\n)){0,2}', source).group(0)
|
||||
possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)",
|
||||
first_two_lines)
|
||||
if possible_encoding:
|
||||
|
||||
@@ -137,7 +137,7 @@ def test_cache_last_used_update(diff_cache, use_file_io):
|
||||
parse('somecode', cache=True, path=p)
|
||||
node_cache_item = next(iter(parser_cache.values()))[p]
|
||||
now = time.time()
|
||||
assert node_cache_item.last_used < now
|
||||
assert node_cache_item.last_used <= now
|
||||
|
||||
if use_file_io:
|
||||
f = _FixedTimeFileIO(p, 'code', node_cache_item.last_used - 10)
|
||||
@@ -185,6 +185,9 @@ def test_permission_error(monkeypatch):
|
||||
was_called = False
|
||||
|
||||
monkeypatch.setattr(cache, '_save_to_file_system', save)
|
||||
with pytest.warns(Warning):
|
||||
parse(path=__file__, cache=True, diff_cache=True)
|
||||
assert was_called
|
||||
try:
|
||||
with pytest.warns(Warning):
|
||||
parse(path=__file__, cache=True, diff_cache=True)
|
||||
assert was_called
|
||||
finally:
|
||||
parser_cache.clear()
|
||||
|
||||
182
test/test_dump_tree.py
Normal file
182
test/test_dump_tree.py
Normal file
@@ -0,0 +1,182 @@
|
||||
from textwrap import dedent
|
||||
|
||||
import pytest
|
||||
|
||||
from parso import parse
|
||||
# Using star import for easier eval testing below.
|
||||
from parso.python.tree import * # noqa: F403
|
||||
from parso.tree import * # noqa: F403
|
||||
from parso.tree import ErrorLeaf, TypedLeaf
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'indent,expected_dump', [
|
||||
(None, "Module(["
|
||||
"Lambda(["
|
||||
"Keyword('lambda', (1, 0)), "
|
||||
"Param(["
|
||||
"Name('x', (1, 7), prefix=' '), "
|
||||
"Operator(',', (1, 8)), "
|
||||
"]), "
|
||||
"Param(["
|
||||
"Name('y', (1, 10), prefix=' '), "
|
||||
"]), "
|
||||
"Operator(':', (1, 11)), "
|
||||
"PythonNode('arith_expr', ["
|
||||
"Name('x', (1, 13), prefix=' '), "
|
||||
"Operator('+', (1, 15), prefix=' '), "
|
||||
"Name('y', (1, 17), prefix=' '), "
|
||||
"]), "
|
||||
"]), "
|
||||
"EndMarker('', (1, 18)), "
|
||||
"])"),
|
||||
(0, dedent('''\
|
||||
Module([
|
||||
Lambda([
|
||||
Keyword('lambda', (1, 0)),
|
||||
Param([
|
||||
Name('x', (1, 7), prefix=' '),
|
||||
Operator(',', (1, 8)),
|
||||
]),
|
||||
Param([
|
||||
Name('y', (1, 10), prefix=' '),
|
||||
]),
|
||||
Operator(':', (1, 11)),
|
||||
PythonNode('arith_expr', [
|
||||
Name('x', (1, 13), prefix=' '),
|
||||
Operator('+', (1, 15), prefix=' '),
|
||||
Name('y', (1, 17), prefix=' '),
|
||||
]),
|
||||
]),
|
||||
EndMarker('', (1, 18)),
|
||||
])''')),
|
||||
(4, dedent('''\
|
||||
Module([
|
||||
Lambda([
|
||||
Keyword('lambda', (1, 0)),
|
||||
Param([
|
||||
Name('x', (1, 7), prefix=' '),
|
||||
Operator(',', (1, 8)),
|
||||
]),
|
||||
Param([
|
||||
Name('y', (1, 10), prefix=' '),
|
||||
]),
|
||||
Operator(':', (1, 11)),
|
||||
PythonNode('arith_expr', [
|
||||
Name('x', (1, 13), prefix=' '),
|
||||
Operator('+', (1, 15), prefix=' '),
|
||||
Name('y', (1, 17), prefix=' '),
|
||||
]),
|
||||
]),
|
||||
EndMarker('', (1, 18)),
|
||||
])''')),
|
||||
('\t', dedent('''\
|
||||
Module([
|
||||
\tLambda([
|
||||
\t\tKeyword('lambda', (1, 0)),
|
||||
\t\tParam([
|
||||
\t\t\tName('x', (1, 7), prefix=' '),
|
||||
\t\t\tOperator(',', (1, 8)),
|
||||
\t\t]),
|
||||
\t\tParam([
|
||||
\t\t\tName('y', (1, 10), prefix=' '),
|
||||
\t\t]),
|
||||
\t\tOperator(':', (1, 11)),
|
||||
\t\tPythonNode('arith_expr', [
|
||||
\t\t\tName('x', (1, 13), prefix=' '),
|
||||
\t\t\tOperator('+', (1, 15), prefix=' '),
|
||||
\t\t\tName('y', (1, 17), prefix=' '),
|
||||
\t\t]),
|
||||
\t]),
|
||||
\tEndMarker('', (1, 18)),
|
||||
])''')),
|
||||
]
|
||||
)
|
||||
def test_dump_parser_tree(indent, expected_dump):
|
||||
code = "lambda x, y: x + y"
|
||||
module = parse(code)
|
||||
assert module.dump(indent=indent) == expected_dump
|
||||
|
||||
# Check that dumped tree can be eval'd to recover the parser tree and original code.
|
||||
recovered_code = eval(expected_dump).get_code()
|
||||
assert recovered_code == code
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'node,expected_dump,expected_code', [
|
||||
( # Dump intermediate node (not top level module)
|
||||
parse("def foo(x, y): return x + y").children[0], dedent('''\
|
||||
Function([
|
||||
Keyword('def', (1, 0)),
|
||||
Name('foo', (1, 4), prefix=' '),
|
||||
PythonNode('parameters', [
|
||||
Operator('(', (1, 7)),
|
||||
Param([
|
||||
Name('x', (1, 8)),
|
||||
Operator(',', (1, 9)),
|
||||
]),
|
||||
Param([
|
||||
Name('y', (1, 11), prefix=' '),
|
||||
]),
|
||||
Operator(')', (1, 12)),
|
||||
]),
|
||||
Operator(':', (1, 13)),
|
||||
ReturnStmt([
|
||||
Keyword('return', (1, 15), prefix=' '),
|
||||
PythonNode('arith_expr', [
|
||||
Name('x', (1, 22), prefix=' '),
|
||||
Operator('+', (1, 24), prefix=' '),
|
||||
Name('y', (1, 26), prefix=' '),
|
||||
]),
|
||||
]),
|
||||
])'''),
|
||||
"def foo(x, y): return x + y",
|
||||
),
|
||||
( # Dump leaf
|
||||
parse("def foo(x, y): return x + y").children[0].children[0],
|
||||
"Keyword('def', (1, 0))",
|
||||
'def',
|
||||
),
|
||||
( # Dump ErrorLeaf
|
||||
ErrorLeaf('error_type', 'error_code', (1, 1), prefix=' '),
|
||||
"ErrorLeaf('error_type', 'error_code', (1, 1), prefix=' ')",
|
||||
' error_code',
|
||||
),
|
||||
( # Dump TypedLeaf
|
||||
TypedLeaf('type', 'value', (1, 1)),
|
||||
"TypedLeaf('type', 'value', (1, 1))",
|
||||
'value',
|
||||
),
|
||||
]
|
||||
)
|
||||
def test_dump_parser_tree_not_top_level_module(node, expected_dump, expected_code):
|
||||
dump_result = node.dump()
|
||||
assert dump_result == expected_dump
|
||||
|
||||
# Check that dumped tree can be eval'd to recover the parser tree and original code.
|
||||
recovered_code = eval(dump_result).get_code()
|
||||
assert recovered_code == expected_code
|
||||
|
||||
|
||||
def test_dump_parser_tree_invalid_args():
|
||||
module = parse("lambda x, y: x + y")
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
module.dump(indent=1.1)
|
||||
|
||||
|
||||
def test_eval_dump_recovers_parent():
|
||||
module = parse("lambda x, y: x + y")
|
||||
module2 = eval(module.dump())
|
||||
assert module2.parent is None
|
||||
lambda_node = module2.children[0]
|
||||
assert lambda_node.parent is module2
|
||||
assert module2.children[1].parent is module2
|
||||
assert lambda_node.children[0].parent is lambda_node
|
||||
param_node = lambda_node.children[1]
|
||||
assert param_node.parent is lambda_node
|
||||
assert param_node.children[0].parent is param_node
|
||||
assert param_node.children[1].parent is param_node
|
||||
arith_expr_node = lambda_node.children[-1]
|
||||
assert arith_expr_node.parent is lambda_node
|
||||
assert arith_expr_node.children[0].parent is arith_expr_node
|
||||
@@ -6,6 +6,7 @@ import pytest
|
||||
|
||||
from parso import parse
|
||||
from parso.python import tree
|
||||
from parso.tree import search_ancestor
|
||||
|
||||
|
||||
class TestsFunctionAndLambdaParsing:
|
||||
@@ -239,3 +240,27 @@ def test_with_stmt_get_test_node_from_name():
|
||||
for name in with_stmt.get_defined_names(include_setitem=True)
|
||||
]
|
||||
assert tests == ["A", "B", "C", "D"]
|
||||
|
||||
|
||||
sample_module = parse('x + y')
|
||||
sample_node = sample_module.children[0]
|
||||
sample_leaf = sample_node.children[0]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'node,node_types,expected_ancestor', [
|
||||
(sample_module, ('file_input',), None),
|
||||
(sample_node, ('arith_expr',), None),
|
||||
(sample_node, ('file_input', 'eval_input'), sample_module),
|
||||
(sample_leaf, ('name',), None),
|
||||
(sample_leaf, ('arith_expr',), sample_node),
|
||||
(sample_leaf, ('file_input',), sample_module),
|
||||
(sample_leaf, ('file_input', 'arith_expr'), sample_node),
|
||||
(sample_leaf, ('shift_expr',), None),
|
||||
(sample_leaf, ('name', 'shift_expr',), None),
|
||||
(sample_leaf, (), None),
|
||||
]
|
||||
)
|
||||
def test_search_ancestor(node, node_types, expected_ancestor):
|
||||
assert node.search_ancestor(*node_types) is expected_ancestor
|
||||
assert search_ancestor(node, *node_types) is expected_ancestor # deprecated
|
||||
|
||||
@@ -15,6 +15,8 @@ def test_eof_newline():
|
||||
assert issue.code == 292
|
||||
|
||||
assert not issues('asdf = 1\n')
|
||||
assert not issues('asdf = 1\r\n')
|
||||
assert not issues('asdf = 1\r')
|
||||
assert_issue('asdf = 1')
|
||||
assert_issue('asdf = 1\n# foo')
|
||||
assert_issue('# foobar')
|
||||
|
||||
@@ -339,7 +339,7 @@ def test_left_recursion():
|
||||
@pytest.mark.parametrize(
|
||||
'grammar, error_match', [
|
||||
['foo: bar | baz\nbar: NAME\nbaz: NAME\n',
|
||||
r"foo is ambiguous.*given a PythonTokenTypes\.NAME.*bar or baz"],
|
||||
r"foo is ambiguous.*given a (PythonTokenTypes\.)?NAME.*bar or baz"],
|
||||
['''foo: bar | baz\nbar: 'x'\nbaz: "x"\n''',
|
||||
r"foo is ambiguous.*given a ReservedString\(x\).*bar or baz"],
|
||||
['''foo: bar | 'x'\nbar: 'x'\n''',
|
||||
|
||||
@@ -19,6 +19,7 @@ unicode_bom = BOM_UTF8.decode('utf-8')
|
||||
(' \f ', ['\f', ' ']),
|
||||
(' \f ', ['\f', ' ']),
|
||||
(' \r\n', ['\r\n', '']),
|
||||
(' \r', ['\r', '']),
|
||||
('\\\n', ['\\\n', '']),
|
||||
('\\\r\n', ['\\\r\n', '']),
|
||||
('\t\t\n\t', ['\n', '\t']),
|
||||
@@ -34,7 +35,7 @@ def test_simple_prefix_splitting(string, tokens):
|
||||
assert pt.value == expected
|
||||
|
||||
# Calculate the estimated end_pos
|
||||
if expected.endswith('\n'):
|
||||
if expected.endswith('\n') or expected.endswith('\r'):
|
||||
end_pos = start_pos[0] + 1, 0
|
||||
else:
|
||||
end_pos = start_pos[0], start_pos[1] + len(expected) + len(pt.spacing)
|
||||
|
||||
@@ -57,10 +57,10 @@ def test_non_async_in_async():
|
||||
error, = errors
|
||||
actual = error.message
|
||||
assert actual in wanted
|
||||
if sys.version_info[:2] < (3, 8):
|
||||
if sys.version_info[:2] not in ((3, 8), (3, 9)):
|
||||
assert line_nr == error.start_pos[0]
|
||||
else:
|
||||
assert line_nr == 0 # For whatever reason this is zero in Python 3.8+
|
||||
assert line_nr == 0 # For whatever reason this is zero in Python 3.8/3.9
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -140,13 +140,16 @@ def _get_actual_exception(code):
|
||||
|
||||
|
||||
def test_default_except_error_postition():
|
||||
# For this error the position seemed to be one line off, but that doesn't
|
||||
# really matter.
|
||||
# For this error the position seemed to be one line off in Python < 3.10,
|
||||
# but that doesn't really matter.
|
||||
code = 'try: pass\nexcept: pass\nexcept X: pass'
|
||||
wanted, line_nr = _get_actual_exception(code)
|
||||
error, = _get_error_list(code)
|
||||
assert error.message in wanted
|
||||
assert line_nr != error.start_pos[0]
|
||||
if sys.version_info[:2] >= (3, 10):
|
||||
assert line_nr == error.start_pos[0]
|
||||
else:
|
||||
assert line_nr != error.start_pos[0]
|
||||
# I think this is the better position.
|
||||
assert error.start_pos[0] == 2
|
||||
|
||||
@@ -494,3 +497,14 @@ def test_valid_empty_assignment(code):
|
||||
)
|
||||
def test_valid_del(code):
|
||||
assert not _get_error_list(code)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('source', 'version', 'no_errors'), [
|
||||
('[x for x in range(10) if lambda: 1]', '3.8', True),
|
||||
('[x for x in range(10) if lambda: 1]', '3.9', False),
|
||||
('[x for x in range(10) if (lambda: 1)]', '3.9', True),
|
||||
]
|
||||
)
|
||||
def test_lambda_in_comp_if(source, version, no_errors):
|
||||
assert bool(_get_error_list(source, version=version)) ^ no_errors
|
||||
|
||||
@@ -74,6 +74,10 @@ def test_utf8_bom():
|
||||
('code', 'errors'), [
|
||||
(b'# coding: wtf-12\nfoo', 'strict'),
|
||||
(b'# coding: wtf-12\nfoo', 'replace'),
|
||||
(b'# coding: wtf-12\r\nfoo', 'strict'),
|
||||
(b'# coding: wtf-12\r\nfoo', 'replace'),
|
||||
(b'# coding: wtf-12\rfoo', 'strict'),
|
||||
(b'# coding: wtf-12\rfoo', 'replace'),
|
||||
]
|
||||
)
|
||||
def test_bytes_to_unicode_failing_encoding(code, errors):
|
||||
|
||||
Reference in New Issue
Block a user