mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-07 05:14:29 +08:00
Compare commits
75 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
be9f5a401f | ||
|
|
7e4777b775 | ||
|
|
e99dbdd536 | ||
|
|
e22dc67aa1 | ||
|
|
baa3c90d85 | ||
|
|
23b1cdf73d | ||
|
|
a73af5c709 | ||
|
|
9328cffce3 | ||
|
|
f670e6e7dc | ||
|
|
338a576027 | ||
|
|
9ddffca4da | ||
|
|
06db036e23 | ||
|
|
c792ae546c | ||
|
|
1c01dafc2b | ||
|
|
1ca6b1f3e8 | ||
|
|
5a9349ae58 | ||
|
|
279fd6903e | ||
|
|
e255b69cb8 | ||
|
|
744f2ac39e | ||
|
|
3c04eef132 | ||
|
|
f7bea28bcc | ||
|
|
27af7ef106 | ||
|
|
171fd33cb6 | ||
|
|
4eba7d697f | ||
|
|
cf240c7d2b | ||
|
|
ffadfca81b | ||
|
|
378e645bbc | ||
|
|
df34112b5b | ||
|
|
e0a1caecc4 | ||
|
|
7d43001f9d | ||
|
|
cf5969d7a1 | ||
|
|
6b6b59f6d7 | ||
|
|
7af5259159 | ||
|
|
8ee84d005e | ||
|
|
0740450899 | ||
|
|
ee5edaf22f | ||
|
|
285492f4ed | ||
|
|
da3a7488f9 | ||
|
|
c5e8602cae | ||
|
|
ae491cbf55 | ||
|
|
9f32dde163 | ||
|
|
d26d0d57fe | ||
|
|
5570975a7d | ||
|
|
e1523014e4 | ||
|
|
7652d3904b | ||
|
|
ed47650fbe | ||
|
|
60fed7b9f8 | ||
|
|
7000dd24d7 | ||
|
|
86f3f1096b | ||
|
|
f2b1ff9429 | ||
|
|
cbb61fb819 | ||
|
|
966d5446eb | ||
|
|
b42135fb1a | ||
|
|
d76c890667 | ||
|
|
885f623c4b | ||
|
|
b5429ccbdc | ||
|
|
60ec880422 | ||
|
|
bd03b21446 | ||
|
|
8dee324d0c | ||
|
|
5edab0407a | ||
|
|
c4f297a57a | ||
|
|
5bba083af8 | ||
|
|
2799a7a3c2 | ||
|
|
fac5c089ff | ||
|
|
e5d6663721 | ||
|
|
e5731d3932 | ||
|
|
93206f6eba | ||
|
|
cbe0b91d6a | ||
|
|
771fe6bf33 | ||
|
|
1139e53429 | ||
|
|
0e20c33c21 | ||
|
|
14c88c1f4b | ||
|
|
257ac768fb | ||
|
|
79aeb2a801 | ||
|
|
ef90bba3b3 |
@@ -4,6 +4,8 @@ source = parso
|
||||
[report]
|
||||
# Regexes for lines to exclude from consideration
|
||||
exclude_lines =
|
||||
pragma: no cover
|
||||
|
||||
# Don't complain about missing debug-only code:
|
||||
def __repr__
|
||||
|
||||
|
||||
65
.github/workflows/build.yml
vendored
Normal file
65
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: Build
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
PYTEST_ADDOPTS: --color=yes
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
pip install .[qa]
|
||||
- name: Run Flake8
|
||||
# Ignore F401, which are unused imports. flake8 is a primitive tool and is sometimes wrong.
|
||||
run: flake8 --extend-ignore F401 parso test/*.py setup.py scripts/
|
||||
- name: Run Mypy
|
||||
run: mypy parso setup.py
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: ${{ matrix.experimental }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13']
|
||||
experimental: [false]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
pip install .[testing]
|
||||
- name: Run pytest
|
||||
run: pytest
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
pip install .[testing] coverage coveralls
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
coverage run -m pytest
|
||||
coverage report
|
||||
- name: Upload coverage report to Coveralls
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: coveralls --service=github
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -11,3 +11,4 @@ parso.egg-info/
|
||||
/.pytest_cache
|
||||
test/fuzz-redo.pickle
|
||||
/venv/
|
||||
/htmlcov/
|
||||
|
||||
16
.readthedocs.yml
Normal file
16
.readthedocs.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
version: 2
|
||||
|
||||
python:
|
||||
install:
|
||||
- method: pip
|
||||
path: .
|
||||
extra_requirements:
|
||||
- docs
|
||||
|
||||
submodules:
|
||||
include: all
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
31
.travis.yml
31
.travis.yml
@@ -1,31 +0,0 @@
|
||||
dist: xenial
|
||||
language: python
|
||||
python:
|
||||
- 3.6
|
||||
- 3.7
|
||||
- 3.8.2
|
||||
- nightly
|
||||
matrix:
|
||||
allow_failures:
|
||||
- python: nightly
|
||||
include:
|
||||
- python: 3.8
|
||||
install:
|
||||
- 'pip install .[qa]'
|
||||
script:
|
||||
# Ignore F401, which are unused imports. flake8 is a primitive tool and is sometimes wrong.
|
||||
- 'flake8 --extend-ignore F401 parso test/*.py setup.py scripts/'
|
||||
- mypy parso
|
||||
- python: 3.8.2
|
||||
script:
|
||||
- 'pip install coverage'
|
||||
- 'coverage run -m pytest'
|
||||
- 'coverage report'
|
||||
after_script:
|
||||
- |
|
||||
pip install --quiet coveralls
|
||||
coveralls
|
||||
install:
|
||||
- pip install .[testing]
|
||||
script:
|
||||
- pytest
|
||||
@@ -6,6 +6,7 @@ David Halter (@davidhalter) <davidhalter88@gmail.com>
|
||||
Code Contributors
|
||||
=================
|
||||
Alisdair Robertson (@robodair)
|
||||
Bryan Forbes (@bryanforbes) <bryan@reigndropsfall.net>
|
||||
|
||||
|
||||
Code Contributors (to Jedi and therefore possibly to this library)
|
||||
|
||||
@@ -6,6 +6,26 @@ Changelog
|
||||
Unreleased
|
||||
++++++++++
|
||||
|
||||
0.8.5 (2025-08-23)
|
||||
++++++++++++++++++
|
||||
|
||||
- Add a fallback grammar for Python 3.14+
|
||||
|
||||
0.8.4 (2024-04-05)
|
||||
++++++++++++++++++
|
||||
|
||||
- Add basic support for Python 3.13
|
||||
|
||||
0.8.3 (2021-11-30)
|
||||
++++++++++++++++++
|
||||
|
||||
- Add basic support for Python 3.11 and 3.12
|
||||
|
||||
0.8.2 (2021-03-30)
|
||||
++++++++++++++++++
|
||||
|
||||
- Various small bugfixes
|
||||
|
||||
0.8.1 (2020-12-10)
|
||||
++++++++++++++++++
|
||||
|
||||
|
||||
11
README.rst
11
README.rst
@@ -3,9 +3,9 @@ parso - A Python Parser
|
||||
###################################################################
|
||||
|
||||
|
||||
.. image:: https://travis-ci.org/davidhalter/parso.svg?branch=master
|
||||
:target: https://travis-ci.org/davidhalter/parso
|
||||
:alt: Travis CI build status
|
||||
.. image:: https://github.com/davidhalter/parso/workflows/Build/badge.svg?branch=master
|
||||
:target: https://github.com/davidhalter/parso/actions
|
||||
:alt: GitHub Actions build status
|
||||
|
||||
.. image:: https://coveralls.io/repos/github/davidhalter/parso/badge.svg?branch=master
|
||||
:target: https://coveralls.io/github/davidhalter/parso?branch=master
|
||||
@@ -68,6 +68,8 @@ Resources
|
||||
Installation
|
||||
============
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install parso
|
||||
|
||||
Future
|
||||
@@ -88,8 +90,7 @@ Acknowledgements
|
||||
|
||||
- Guido van Rossum (@gvanrossum) for creating the parser generator pgen2
|
||||
(originally used in lib2to3).
|
||||
- `Salome Schneider <https://www.crepes-schnaegg.ch/cr%C3%AApes-schn%C3%A4gg/kunst-f%C3%BCrs-cr%C3%AApes-mobil/>`_
|
||||
for the extremely awesome parso logo.
|
||||
- Salome Schneider for the extremely awesome parso logo.
|
||||
|
||||
|
||||
.. _jedi: https://github.com/davidhalter/jedi
|
||||
|
||||
9
SECURITY.md
Normal file
9
SECURITY.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Security Policy
|
||||
|
||||
If security issues arise, we will try to fix those as soon as possible.
|
||||
|
||||
Due to Parso's nature, Security Issues will probably be extremely rare, but we will of course treat them seriously.
|
||||
|
||||
## Reporting Security Problems
|
||||
|
||||
If you need to report a security vulnerability, please send an email to davidhalter88@gmail.com. Typically, I will respond in the next few business days.
|
||||
@@ -13,7 +13,7 @@ from parso.utils import parse_version_string
|
||||
|
||||
collect_ignore = ["setup.py"]
|
||||
|
||||
_SUPPORTED_VERSIONS = '3.6', '3.7', '3.8', '3.9', '3.10'
|
||||
_SUPPORTED_VERSIONS = '3.6', '3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13', '3.14'
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
|
||||
@@ -34,5 +34,5 @@ easy as::
|
||||
|
||||
python3.9 -m pytest
|
||||
|
||||
Tests are also run automatically on `Travis CI
|
||||
<https://travis-ci.org/davidhalter/parso/>`_.
|
||||
Tests are also run automatically on `GitHub Actions
|
||||
<https://github.com/davidhalter/parso/actions>`_.
|
||||
|
||||
@@ -16,7 +16,7 @@ From git
|
||||
--------
|
||||
If you want to install the current development version (master branch)::
|
||||
|
||||
sudo pip install -e git://github.com/davidhalter/parso.git#egg=parso
|
||||
sudo pip install -e git+https://github.com/davidhalter/parso.git#egg=parso
|
||||
|
||||
|
||||
Manual installation from a downloaded package (not recommended)
|
||||
|
||||
@@ -27,5 +27,5 @@ Resources
|
||||
---------
|
||||
|
||||
- `Source Code on Github <https://github.com/davidhalter/parso>`_
|
||||
- `Travis Testing <https://travis-ci.org/davidhalter/parso>`_
|
||||
- `GitHub Actions Testing <https://github.com/davidhalter/parso/actions>`_
|
||||
- `Python Package Index <http://pypi.python.org/pypi/parso/>`_
|
||||
|
||||
@@ -43,7 +43,7 @@ from parso.grammar import Grammar, load_grammar
|
||||
from parso.utils import split_lines, python_bytes_to_unicode
|
||||
|
||||
|
||||
__version__ = '0.8.1'
|
||||
__version__ = '0.8.5'
|
||||
|
||||
|
||||
def parse(code=None, **kwargs):
|
||||
|
||||
@@ -187,7 +187,7 @@ def try_to_save_module(hashed_grammar, file_io, module, lines, pickling=True, ca
|
||||
# file system. It's still in RAM in that case. However we should
|
||||
# still warn the user that this is happening.
|
||||
warnings.warn(
|
||||
'Tried to save a file to %s, but got permission denied.',
|
||||
'Tried to save a file to %s, but got permission denied.' % path,
|
||||
Warning
|
||||
)
|
||||
else:
|
||||
|
||||
@@ -106,14 +106,14 @@ class Grammar(Generic[_NodeT]):
|
||||
|
||||
if file_io is None:
|
||||
if code is None:
|
||||
file_io = FileIO(path) # type: ignore
|
||||
file_io = FileIO(path) # type: ignore[arg-type]
|
||||
else:
|
||||
file_io = KnownContentFileIO(path, code)
|
||||
|
||||
if cache and file_io.path is not None:
|
||||
module_node = load_module(self._hashed, file_io, cache_path=cache_path)
|
||||
if module_node is not None:
|
||||
return module_node # type: ignore
|
||||
return module_node # type: ignore[no-any-return]
|
||||
|
||||
if code is None:
|
||||
code = file_io.read()
|
||||
@@ -132,7 +132,7 @@ class Grammar(Generic[_NodeT]):
|
||||
module_node = module_cache_item.node
|
||||
old_lines = module_cache_item.lines
|
||||
if old_lines == lines:
|
||||
return module_node # type: ignore
|
||||
return module_node # type: ignore[no-any-return]
|
||||
|
||||
new_node = self._diff_parser(
|
||||
self._pgen_grammar, self._tokenizer, module_node
|
||||
@@ -144,7 +144,7 @@ class Grammar(Generic[_NodeT]):
|
||||
# Never pickle in pypy, it's slow as hell.
|
||||
pickling=cache and not is_pypy,
|
||||
cache_path=cache_path)
|
||||
return new_node # type: ignore
|
||||
return new_node # type: ignore[no-any-return]
|
||||
|
||||
tokens = self._tokenizer(lines)
|
||||
|
||||
@@ -160,7 +160,7 @@ class Grammar(Generic[_NodeT]):
|
||||
# Never pickle in pypy, it's slow as hell.
|
||||
pickling=cache and not is_pypy,
|
||||
cache_path=cache_path)
|
||||
return root_node # type: ignore
|
||||
return root_node # type: ignore[no-any-return]
|
||||
|
||||
def _get_token_namespace(self):
|
||||
ns = self._token_namespace
|
||||
@@ -239,7 +239,16 @@ def load_grammar(*, version: str = None, path: str = None):
|
||||
:param str version: A python version string, e.g. ``version='3.8'``.
|
||||
:param str path: A path to a grammar file
|
||||
"""
|
||||
version_info = parse_version_string(version)
|
||||
# NOTE: this (3, 14) should be updated to the latest version parso supports.
|
||||
# (if this doesn't happen, users will get older syntaxes and spurious warnings)
|
||||
passed_version_info = parse_version_string(version)
|
||||
version_info = min(passed_version_info, PythonVersionInfo(3, 14))
|
||||
|
||||
# # NOTE: this is commented out until parso properly supports newer Python grammars.
|
||||
# if passed_version_info != version_info:
|
||||
# warnings.warn('parso does not support %s.%s yet.' % (
|
||||
# passed_version_info.major, passed_version_info.minor
|
||||
# ))
|
||||
|
||||
file = path or os.path.join(
|
||||
'python',
|
||||
|
||||
@@ -23,7 +23,7 @@ within the statement. This lowers memory usage and cpu time and reduces the
|
||||
complexity of the ``Parser`` (there's another parser sitting inside
|
||||
``Statement``, which produces ``Array`` and ``Call``).
|
||||
"""
|
||||
from typing import Dict
|
||||
from typing import Dict, Type
|
||||
|
||||
from parso import tree
|
||||
from parso.pgen2.generator import ReservedString
|
||||
@@ -110,10 +110,10 @@ class BaseParser:
|
||||
When a syntax error occurs, error_recovery() is called.
|
||||
"""
|
||||
|
||||
node_map: Dict[str, type] = {}
|
||||
node_map: Dict[str, Type[tree.BaseNode]] = {}
|
||||
default_node = tree.Node
|
||||
|
||||
leaf_map: Dict[str, type] = {}
|
||||
leaf_map: Dict[str, Type[tree.Leaf]] = {}
|
||||
default_leaf = tree.Leaf
|
||||
|
||||
def __init__(self, pgen_grammar, start_nonterminal='file_input', error_recovery=False):
|
||||
@@ -156,8 +156,6 @@ class BaseParser:
|
||||
node = self.node_map[nonterminal](children)
|
||||
except KeyError:
|
||||
node = self.default_node(nonterminal, children)
|
||||
for c in children:
|
||||
c.parent = node
|
||||
return node
|
||||
|
||||
def convert_leaf(self, type_, value, prefix, start_pos):
|
||||
|
||||
@@ -276,7 +276,7 @@ def generate_grammar(bnf_grammar: str, token_namespace) -> Grammar:
|
||||
dfa_state.transitions[transition] = DFAPlan(next_dfa)
|
||||
|
||||
_calculate_tree_traversal(rule_to_dfas)
|
||||
return Grammar(start_nonterminal, rule_to_dfas, reserved_strings) # type: ignore
|
||||
return Grammar(start_nonterminal, rule_to_dfas, reserved_strings) # type: ignore[arg-type]
|
||||
|
||||
|
||||
def _make_transition(token_namespace, reserved_syntax_strings, label):
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import codecs
|
||||
import sys
|
||||
import warnings
|
||||
import re
|
||||
from contextlib import contextmanager
|
||||
|
||||
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
|
||||
from parso.python.tree import search_ancestor
|
||||
from parso.python.tokenize import _get_token_collection
|
||||
|
||||
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
|
||||
@@ -34,7 +34,10 @@ def _get_rhs_name(node, version):
|
||||
return "literal"
|
||||
else:
|
||||
if second.children[1] == ":" or second.children[0] == "**":
|
||||
return "dict display"
|
||||
if version < (3, 10):
|
||||
return "dict display"
|
||||
else:
|
||||
return "dict literal"
|
||||
else:
|
||||
return "set display"
|
||||
elif (
|
||||
@@ -48,7 +51,10 @@ def _get_rhs_name(node, version):
|
||||
elif first == "[":
|
||||
return "list"
|
||||
elif first == "{" and second == "}":
|
||||
return "dict display"
|
||||
if version < (3, 10):
|
||||
return "dict display"
|
||||
else:
|
||||
return "dict literal"
|
||||
elif first == "{" and len(node.children) > 2:
|
||||
return "set display"
|
||||
elif type_ == "keyword":
|
||||
@@ -59,7 +65,10 @@ def _get_rhs_name(node, version):
|
||||
else:
|
||||
return str(node.value)
|
||||
elif type_ == "operator" and node.value == "...":
|
||||
return "Ellipsis"
|
||||
if version < (3, 10):
|
||||
return "Ellipsis"
|
||||
else:
|
||||
return "ellipsis"
|
||||
elif type_ == "comparison":
|
||||
return "comparison"
|
||||
elif type_ in ("string", "number", "strings"):
|
||||
@@ -84,7 +93,10 @@ def _get_rhs_name(node, version):
|
||||
or "_test" in type_
|
||||
or type_ in ("term", "factor")
|
||||
):
|
||||
return "operator"
|
||||
if version < (3, 10):
|
||||
return "operator"
|
||||
else:
|
||||
return "expression"
|
||||
elif type_ == "star_expr":
|
||||
return "starred"
|
||||
elif type_ == "testlist_star_expr":
|
||||
@@ -147,8 +159,20 @@ def _remove_parens(atom):
|
||||
return atom
|
||||
|
||||
|
||||
def _skip_parens_bottom_up(node):
|
||||
"""
|
||||
Returns an ancestor node of an expression, skipping all levels of parens
|
||||
bottom-up.
|
||||
"""
|
||||
while node.parent is not None:
|
||||
node = node.parent
|
||||
if node.type != 'atom' or node.children[0] != '(':
|
||||
return node
|
||||
return None
|
||||
|
||||
|
||||
def _iter_params(parent_node):
|
||||
return (n for n in parent_node.children if n.type == 'param')
|
||||
return (n for n in parent_node.children if n.type == 'param' or n.type == 'operator')
|
||||
|
||||
|
||||
def _is_future_import_first(import_from):
|
||||
@@ -219,7 +243,7 @@ def _any_fstring_error(version, node):
|
||||
elif node.type == "fstring":
|
||||
return True
|
||||
else:
|
||||
return search_ancestor(node, "fstring")
|
||||
return node.search_ancestor("fstring")
|
||||
|
||||
|
||||
class _Context:
|
||||
@@ -229,6 +253,7 @@ class _Context:
|
||||
self.parent_context = parent_context
|
||||
self._used_name_dict = {}
|
||||
self._global_names = []
|
||||
self._local_params_names = []
|
||||
self._nonlocal_names = []
|
||||
self._nonlocal_names_in_subscopes = []
|
||||
self._add_syntax_error = add_syntax_error
|
||||
@@ -252,6 +277,10 @@ class _Context:
|
||||
self._global_names.append(name)
|
||||
elif parent_type == 'nonlocal_stmt':
|
||||
self._nonlocal_names.append(name)
|
||||
elif parent_type == 'funcdef':
|
||||
self._local_params_names.extend(
|
||||
[param.name.value for param in name.parent.get_params()]
|
||||
)
|
||||
else:
|
||||
self._used_name_dict.setdefault(name.value, []).append(name)
|
||||
|
||||
@@ -279,6 +308,8 @@ class _Context:
|
||||
nonlocals_not_handled = []
|
||||
for nonlocal_name in self._nonlocal_names_in_subscopes:
|
||||
search = nonlocal_name.value
|
||||
if search in self._local_params_names:
|
||||
continue
|
||||
if search in global_name_strs or self.parent_context is None:
|
||||
message = "no binding for nonlocal '%s' found" % nonlocal_name.value
|
||||
self._add_syntax_error(nonlocal_name, message)
|
||||
@@ -592,7 +623,10 @@ class _NameChecks(SyntaxRule):
|
||||
|
||||
@ErrorFinder.register_rule(type='string')
|
||||
class _StringChecks(SyntaxRule):
|
||||
message = "bytes can only contain ASCII literal characters."
|
||||
if sys.version_info < (3, 10):
|
||||
message = "bytes can only contain ASCII literal characters."
|
||||
else:
|
||||
message = "bytes can only contain ASCII literal characters"
|
||||
|
||||
def is_issue(self, leaf):
|
||||
string_prefix = leaf.string_prefix.lower()
|
||||
@@ -730,9 +764,34 @@ class _FutureImportRule(SyntaxRule):
|
||||
@ErrorFinder.register_rule(type='star_expr')
|
||||
class _StarExprRule(SyntaxRule):
|
||||
message_iterable_unpacking = "iterable unpacking cannot be used in comprehension"
|
||||
message_assignment = "can use starred expression only as assignment target"
|
||||
|
||||
def is_issue(self, node):
|
||||
def check_delete_starred(node):
|
||||
while node.parent is not None:
|
||||
node = node.parent
|
||||
if node.type == 'del_stmt':
|
||||
return True
|
||||
if node.type not in (*_STAR_EXPR_PARENTS, 'atom'):
|
||||
return False
|
||||
return False
|
||||
|
||||
if self._normalizer.version >= (3, 9):
|
||||
ancestor = node.parent
|
||||
else:
|
||||
ancestor = _skip_parens_bottom_up(node)
|
||||
# starred expression not in tuple/list/set
|
||||
if ancestor.type not in (*_STAR_EXPR_PARENTS, 'dictorsetmaker') \
|
||||
and not (ancestor.type == 'atom' and ancestor.children[0] != '('):
|
||||
self.add_issue(node, message="can't use starred expression here")
|
||||
return
|
||||
|
||||
if check_delete_starred(node):
|
||||
if self._normalizer.version >= (3, 9):
|
||||
self.add_issue(node, message="cannot delete starred")
|
||||
else:
|
||||
self.add_issue(node, message="can't use starred expression here")
|
||||
return
|
||||
|
||||
if node.parent.type == 'testlist_comp':
|
||||
# [*[] for a in [1]]
|
||||
if node.parent.children[1].type in _COMP_FOR_TYPES:
|
||||
@@ -742,39 +801,33 @@ class _StarExprRule(SyntaxRule):
|
||||
@ErrorFinder.register_rule(types=_STAR_EXPR_PARENTS)
|
||||
class _StarExprParentRule(SyntaxRule):
|
||||
def is_issue(self, node):
|
||||
if node.parent.type == 'del_stmt':
|
||||
if self._normalizer.version >= (3, 9):
|
||||
self.add_issue(node.parent, message="cannot delete starred")
|
||||
else:
|
||||
self.add_issue(node.parent, message="can't use starred expression here")
|
||||
else:
|
||||
def is_definition(node, ancestor):
|
||||
if ancestor is None:
|
||||
return False
|
||||
def is_definition(node, ancestor):
|
||||
if ancestor is None:
|
||||
return False
|
||||
|
||||
type_ = ancestor.type
|
||||
if type_ == 'trailer':
|
||||
return False
|
||||
type_ = ancestor.type
|
||||
if type_ == 'trailer':
|
||||
return False
|
||||
|
||||
if type_ == 'expr_stmt':
|
||||
return node.start_pos < ancestor.children[-1].start_pos
|
||||
if type_ == 'expr_stmt':
|
||||
return node.start_pos < ancestor.children[-1].start_pos
|
||||
|
||||
return is_definition(node, ancestor.parent)
|
||||
return is_definition(node, ancestor.parent)
|
||||
|
||||
if is_definition(node, node.parent):
|
||||
args = [c for c in node.children if c != ',']
|
||||
starred = [c for c in args if c.type == 'star_expr']
|
||||
if len(starred) > 1:
|
||||
if self._normalizer.version < (3, 9):
|
||||
message = "two starred expressions in assignment"
|
||||
else:
|
||||
message = "multiple starred expressions in assignment"
|
||||
self.add_issue(starred[1], message=message)
|
||||
elif starred:
|
||||
count = args.index(starred[0])
|
||||
if count >= 256:
|
||||
message = "too many expressions in star-unpacking assignment"
|
||||
self.add_issue(starred[0], message=message)
|
||||
if is_definition(node, node.parent):
|
||||
args = [c for c in node.children if c != ',']
|
||||
starred = [c for c in args if c.type == 'star_expr']
|
||||
if len(starred) > 1:
|
||||
if self._normalizer.version < (3, 9):
|
||||
message = "two starred expressions in assignment"
|
||||
else:
|
||||
message = "multiple starred expressions in assignment"
|
||||
self.add_issue(starred[1], message=message)
|
||||
elif starred:
|
||||
count = args.index(starred[0])
|
||||
if count >= 256:
|
||||
message = "too many expressions in star-unpacking assignment"
|
||||
self.add_issue(starred[0], message=message)
|
||||
|
||||
|
||||
@ErrorFinder.register_rule(type='annassign')
|
||||
@@ -911,17 +964,28 @@ class _ParameterRule(SyntaxRule):
|
||||
def is_issue(self, node):
|
||||
param_names = set()
|
||||
default_only = False
|
||||
star_seen = False
|
||||
for p in _iter_params(node):
|
||||
if p.type == 'operator':
|
||||
if p.value == '*':
|
||||
star_seen = True
|
||||
default_only = False
|
||||
continue
|
||||
|
||||
if p.name.value in param_names:
|
||||
message = "duplicate argument '%s' in function definition"
|
||||
self.add_issue(p.name, message=message % p.name.value)
|
||||
param_names.add(p.name.value)
|
||||
|
||||
if p.default is None and not p.star_count:
|
||||
if default_only:
|
||||
return True
|
||||
else:
|
||||
default_only = True
|
||||
if not star_seen:
|
||||
if p.default is None and not p.star_count:
|
||||
if default_only:
|
||||
return True
|
||||
elif p.star_count:
|
||||
star_seen = True
|
||||
default_only = False
|
||||
else:
|
||||
default_only = True
|
||||
|
||||
|
||||
@ErrorFinder.register_rule(type='try_stmt')
|
||||
@@ -995,14 +1059,20 @@ class _CheckAssignmentRule(SyntaxRule):
|
||||
error = 'literal'
|
||||
else:
|
||||
if second.children[1] == ':':
|
||||
error = 'dict display'
|
||||
if self._normalizer.version < (3, 10):
|
||||
error = 'dict display'
|
||||
else:
|
||||
error = 'dict literal'
|
||||
else:
|
||||
error = 'set display'
|
||||
elif first == "{" and second == "}":
|
||||
if self._normalizer.version < (3, 8):
|
||||
error = 'literal'
|
||||
else:
|
||||
error = "dict display"
|
||||
if self._normalizer.version < (3, 10):
|
||||
error = "dict display"
|
||||
else:
|
||||
error = "dict literal"
|
||||
elif first == "{" and len(node.children) > 2:
|
||||
if self._normalizer.version < (3, 8):
|
||||
error = 'literal'
|
||||
@@ -1035,7 +1105,10 @@ class _CheckAssignmentRule(SyntaxRule):
|
||||
error = str(node.value)
|
||||
elif type_ == 'operator':
|
||||
if node.value == '...':
|
||||
error = 'Ellipsis'
|
||||
if self._normalizer.version < (3, 10):
|
||||
error = 'Ellipsis'
|
||||
else:
|
||||
error = 'ellipsis'
|
||||
elif type_ == 'comparison':
|
||||
error = 'comparison'
|
||||
elif type_ in ('string', 'number', 'strings'):
|
||||
@@ -1050,7 +1123,10 @@ class _CheckAssignmentRule(SyntaxRule):
|
||||
if node.children[0] == 'await':
|
||||
error = 'await expression'
|
||||
elif node.children[-2] == '**':
|
||||
error = 'operator'
|
||||
if self._normalizer.version < (3, 10):
|
||||
error = 'operator'
|
||||
else:
|
||||
error = 'expression'
|
||||
else:
|
||||
# Has a trailer
|
||||
trailer = node.children[-1]
|
||||
@@ -1072,15 +1148,25 @@ class _CheckAssignmentRule(SyntaxRule):
|
||||
elif ('expr' in type_ and type_ != 'star_expr' # is a substring
|
||||
or '_test' in type_
|
||||
or type_ in ('term', 'factor')):
|
||||
error = 'operator'
|
||||
if self._normalizer.version < (3, 10):
|
||||
error = 'operator'
|
||||
else:
|
||||
error = 'expression'
|
||||
elif type_ == "star_expr":
|
||||
if is_deletion:
|
||||
if self._normalizer.version >= (3, 9):
|
||||
error = "starred"
|
||||
else:
|
||||
self.add_issue(node, message="can't use starred expression here")
|
||||
elif not search_ancestor(node, *_STAR_EXPR_PARENTS) and not is_aug_assign:
|
||||
self.add_issue(node, message="starred assignment target must be in a list or tuple")
|
||||
else:
|
||||
if self._normalizer.version >= (3, 9):
|
||||
ancestor = node.parent
|
||||
else:
|
||||
ancestor = _skip_parens_bottom_up(node)
|
||||
if ancestor.type not in _STAR_EXPR_PARENTS and not is_aug_assign \
|
||||
and not (ancestor.type == 'atom' and ancestor.children[0] == '['):
|
||||
message = "starred assignment target must be in a list or tuple"
|
||||
self.add_issue(node, message=message)
|
||||
|
||||
self._check_assignment(node.children[1])
|
||||
|
||||
@@ -1209,7 +1295,7 @@ class _NamedExprRule(_CheckAssignmentRule):
|
||||
def search_all_comp_ancestors(node):
|
||||
has_ancestors = False
|
||||
while True:
|
||||
node = search_ancestor(node, 'testlist_comp', 'dictorsetmaker')
|
||||
node = node.search_ancestor('testlist_comp', 'dictorsetmaker')
|
||||
if node is None:
|
||||
break
|
||||
for child in node.children:
|
||||
|
||||
@@ -97,9 +97,7 @@ suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||
|
||||
namedexpr_test: test [':=' test]
|
||||
test: or_test ['if' or_test 'else' test] | lambdef
|
||||
test_nocond: or_test | lambdef_nocond
|
||||
lambdef: 'lambda' [varargslist] ':' test
|
||||
lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
|
||||
or_test: and_test ('or' and_test)*
|
||||
and_test: not_test ('and' not_test)*
|
||||
not_test: 'not' not_test | comparison
|
||||
@@ -155,7 +153,7 @@ argument: ( test [comp_for] |
|
||||
comp_iter: comp_for | comp_if
|
||||
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||
comp_for: ['async'] sync_comp_for
|
||||
comp_if: 'if' test_nocond [comp_iter]
|
||||
comp_if: 'if' or_test [comp_iter]
|
||||
|
||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||
encoding_decl: NAME
|
||||
|
||||
169
parso/python/grammar311.txt
Normal file
169
parso/python/grammar311.txt
Normal file
@@ -0,0 +1,169 @@
|
||||
# Grammar for Python
|
||||
|
||||
# NOTE WELL: You should also follow all the steps listed at
|
||||
# https://devguide.python.org/grammar/
|
||||
|
||||
# Start symbols for the grammar:
|
||||
# single_input is a single interactive statement;
|
||||
# file_input is a module or sequence of commands read from an input file;
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' namedexpr_test NEWLINE
|
||||
decorators: decorator+
|
||||
decorated: decorators (classdef | funcdef | async_funcdef)
|
||||
|
||||
async_funcdef: 'async' funcdef
|
||||
funcdef: 'def' NAME parameters ['->' test] ':' suite
|
||||
|
||||
parameters: '(' [typedargslist] ')'
|
||||
typedargslist: (
|
||||
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
|
||||
',' tfpdef ['=' test])* ([',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]])
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
|
||||
| '**' tfpdef [',']]] )
|
||||
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]]
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [','])
|
||||
)
|
||||
tfpdef: NAME [':' test]
|
||||
varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']
|
||||
)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
||||
('=' (yield_expr|testlist_star_expr))*)
|
||||
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
|
||||
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
||||
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
||||
'<<=' | '>>=' | '**=' | '//=')
|
||||
# For normal and annotated assignments, additional restrictions enforced by the interpreter
|
||||
del_stmt: 'del' exprlist
|
||||
pass_stmt: 'pass'
|
||||
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
|
||||
break_stmt: 'break'
|
||||
continue_stmt: 'continue'
|
||||
return_stmt: 'return' [testlist_star_expr]
|
||||
yield_stmt: yield_expr
|
||||
raise_stmt: 'raise' [test ['from' test]]
|
||||
import_stmt: import_name | import_from
|
||||
import_name: 'import' dotted_as_names
|
||||
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
|
||||
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
|
||||
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
||||
import_as_name: NAME ['as' NAME]
|
||||
dotted_as_name: dotted_name ['as' NAME]
|
||||
import_as_names: import_as_name (',' import_as_name)* [',']
|
||||
dotted_as_names: dotted_as_name (',' dotted_as_name)*
|
||||
dotted_name: NAME ('.' NAME)*
|
||||
global_stmt: 'global' NAME (',' NAME)*
|
||||
nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
|
||||
assert_stmt: 'assert' test [',' test]
|
||||
|
||||
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
|
||||
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
|
||||
if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
|
||||
while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
|
||||
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
||||
try_stmt: ('try' ':' suite
|
||||
((except_clause ':' suite)+
|
||||
['else' ':' suite]
|
||||
['finally' ':' suite] |
|
||||
'finally' ':' suite))
|
||||
with_stmt: 'with' with_item (',' with_item)* ':' suite
|
||||
with_item: test ['as' expr]
|
||||
# NB compile.c makes sure that the default except clause is last
|
||||
except_clause: 'except' [test ['as' NAME]]
|
||||
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||
|
||||
namedexpr_test: test [':=' test]
|
||||
test: or_test ['if' or_test 'else' test] | lambdef
|
||||
lambdef: 'lambda' [varargslist] ':' test
|
||||
or_test: and_test ('or' and_test)*
|
||||
and_test: not_test ('and' not_test)*
|
||||
not_test: 'not' not_test | comparison
|
||||
comparison: expr (comp_op expr)*
|
||||
# <> isn't actually a valid comparison operator in Python. It's here for the
|
||||
# sake of a __future__ import described in PEP 401 (which really works :-)
|
||||
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
||||
star_expr: '*' expr
|
||||
expr: xor_expr ('|' xor_expr)*
|
||||
xor_expr: and_expr ('^' and_expr)*
|
||||
and_expr: shift_expr ('&' shift_expr)*
|
||||
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
|
||||
arith_expr: term (('+'|'-') term)*
|
||||
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
|
||||
factor: ('+'|'-'|'~') factor | power
|
||||
power: atom_expr ['**' factor]
|
||||
atom_expr: ['await'] atom trailer*
|
||||
atom: ('(' [yield_expr|testlist_comp] ')' |
|
||||
'[' [testlist_comp] ']' |
|
||||
'{' [dictorsetmaker] '}' |
|
||||
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||
testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
|
||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||
subscriptlist: subscript (',' subscript)* [',']
|
||||
subscript: test [':=' test] | [test] ':' [test] [sliceop]
|
||||
sliceop: ':' [test]
|
||||
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||
testlist: test (',' test)* [',']
|
||||
dictorsetmaker: ( ((test ':' test | '**' expr)
|
||||
(comp_for | (',' (test ':' test | '**' expr))* [','])) |
|
||||
((test [':=' test] | star_expr)
|
||||
(comp_for | (',' (test [':=' test] | star_expr))* [','])) )
|
||||
|
||||
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
||||
|
||||
arglist: argument (',' argument)* [',']
|
||||
|
||||
# The reason that keywords are test nodes instead of NAME is that using NAME
|
||||
# results in an ambiguity. ast.c makes sure it's a NAME.
|
||||
# "test '=' test" is really "keyword '=' test", but we have no such token.
|
||||
# These need to be in a single rule to avoid grammar that is ambiguous
|
||||
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
|
||||
# we explicitly match '*' here, too, to give it proper precedence.
|
||||
# Illegal combinations and orderings are blocked in ast.c:
|
||||
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
||||
# that precede iterable unpackings are blocked; etc.
|
||||
argument: ( test [comp_for] |
|
||||
test ':=' test |
|
||||
test '=' test |
|
||||
'**' test |
|
||||
'*' test )
|
||||
|
||||
comp_iter: comp_for | comp_if
|
||||
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||
comp_for: ['async'] sync_comp_for
|
||||
comp_if: 'if' or_test [comp_iter]
|
||||
|
||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||
encoding_decl: NAME
|
||||
|
||||
yield_expr: 'yield' [yield_arg]
|
||||
yield_arg: 'from' test | testlist_star_expr
|
||||
|
||||
strings: (STRING | fstring)+
|
||||
fstring: FSTRING_START fstring_content* FSTRING_END
|
||||
fstring_content: FSTRING_STRING | fstring_expr
|
||||
fstring_conversion: '!' NAME
|
||||
fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
|
||||
fstring_format_spec: ':' fstring_content*
|
||||
169
parso/python/grammar312.txt
Normal file
169
parso/python/grammar312.txt
Normal file
@@ -0,0 +1,169 @@
|
||||
# Grammar for Python
|
||||
|
||||
# NOTE WELL: You should also follow all the steps listed at
|
||||
# https://devguide.python.org/grammar/
|
||||
|
||||
# Start symbols for the grammar:
|
||||
# single_input is a single interactive statement;
|
||||
# file_input is a module or sequence of commands read from an input file;
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' namedexpr_test NEWLINE
|
||||
decorators: decorator+
|
||||
decorated: decorators (classdef | funcdef | async_funcdef)
|
||||
|
||||
async_funcdef: 'async' funcdef
|
||||
funcdef: 'def' NAME parameters ['->' test] ':' suite
|
||||
|
||||
parameters: '(' [typedargslist] ')'
|
||||
typedargslist: (
|
||||
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
|
||||
',' tfpdef ['=' test])* ([',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]])
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
|
||||
| '**' tfpdef [',']]] )
|
||||
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]]
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [','])
|
||||
)
|
||||
tfpdef: NAME [':' test]
|
||||
varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']
|
||||
)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
||||
('=' (yield_expr|testlist_star_expr))*)
|
||||
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
|
||||
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
||||
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
||||
'<<=' | '>>=' | '**=' | '//=')
|
||||
# For normal and annotated assignments, additional restrictions enforced by the interpreter
|
||||
del_stmt: 'del' exprlist
|
||||
pass_stmt: 'pass'
|
||||
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
|
||||
break_stmt: 'break'
|
||||
continue_stmt: 'continue'
|
||||
return_stmt: 'return' [testlist_star_expr]
|
||||
yield_stmt: yield_expr
|
||||
raise_stmt: 'raise' [test ['from' test]]
|
||||
import_stmt: import_name | import_from
|
||||
import_name: 'import' dotted_as_names
|
||||
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
|
||||
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
|
||||
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
||||
import_as_name: NAME ['as' NAME]
|
||||
dotted_as_name: dotted_name ['as' NAME]
|
||||
import_as_names: import_as_name (',' import_as_name)* [',']
|
||||
dotted_as_names: dotted_as_name (',' dotted_as_name)*
|
||||
dotted_name: NAME ('.' NAME)*
|
||||
global_stmt: 'global' NAME (',' NAME)*
|
||||
nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
|
||||
assert_stmt: 'assert' test [',' test]
|
||||
|
||||
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
|
||||
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
|
||||
if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
|
||||
while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
|
||||
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
||||
try_stmt: ('try' ':' suite
|
||||
((except_clause ':' suite)+
|
||||
['else' ':' suite]
|
||||
['finally' ':' suite] |
|
||||
'finally' ':' suite))
|
||||
with_stmt: 'with' with_item (',' with_item)* ':' suite
|
||||
with_item: test ['as' expr]
|
||||
# NB compile.c makes sure that the default except clause is last
|
||||
except_clause: 'except' [test ['as' NAME]]
|
||||
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||
|
||||
namedexpr_test: test [':=' test]
|
||||
test: or_test ['if' or_test 'else' test] | lambdef
|
||||
lambdef: 'lambda' [varargslist] ':' test
|
||||
or_test: and_test ('or' and_test)*
|
||||
and_test: not_test ('and' not_test)*
|
||||
not_test: 'not' not_test | comparison
|
||||
comparison: expr (comp_op expr)*
|
||||
# <> isn't actually a valid comparison operator in Python. It's here for the
|
||||
# sake of a __future__ import described in PEP 401 (which really works :-)
|
||||
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
||||
star_expr: '*' expr
|
||||
expr: xor_expr ('|' xor_expr)*
|
||||
xor_expr: and_expr ('^' and_expr)*
|
||||
and_expr: shift_expr ('&' shift_expr)*
|
||||
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
|
||||
arith_expr: term (('+'|'-') term)*
|
||||
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
|
||||
factor: ('+'|'-'|'~') factor | power
|
||||
power: atom_expr ['**' factor]
|
||||
atom_expr: ['await'] atom trailer*
|
||||
atom: ('(' [yield_expr|testlist_comp] ')' |
|
||||
'[' [testlist_comp] ']' |
|
||||
'{' [dictorsetmaker] '}' |
|
||||
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||
testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
|
||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||
subscriptlist: subscript (',' subscript)* [',']
|
||||
subscript: test [':=' test] | [test] ':' [test] [sliceop]
|
||||
sliceop: ':' [test]
|
||||
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||
testlist: test (',' test)* [',']
|
||||
dictorsetmaker: ( ((test ':' test | '**' expr)
|
||||
(comp_for | (',' (test ':' test | '**' expr))* [','])) |
|
||||
((test [':=' test] | star_expr)
|
||||
(comp_for | (',' (test [':=' test] | star_expr))* [','])) )
|
||||
|
||||
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
||||
|
||||
arglist: argument (',' argument)* [',']
|
||||
|
||||
# The reason that keywords are test nodes instead of NAME is that using NAME
|
||||
# results in an ambiguity. ast.c makes sure it's a NAME.
|
||||
# "test '=' test" is really "keyword '=' test", but we have no such token.
|
||||
# These need to be in a single rule to avoid grammar that is ambiguous
|
||||
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
|
||||
# we explicitly match '*' here, too, to give it proper precedence.
|
||||
# Illegal combinations and orderings are blocked in ast.c:
|
||||
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
||||
# that precede iterable unpackings are blocked; etc.
|
||||
argument: ( test [comp_for] |
|
||||
test ':=' test |
|
||||
test '=' test |
|
||||
'**' test |
|
||||
'*' test )
|
||||
|
||||
comp_iter: comp_for | comp_if
|
||||
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||
comp_for: ['async'] sync_comp_for
|
||||
comp_if: 'if' or_test [comp_iter]
|
||||
|
||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||
encoding_decl: NAME
|
||||
|
||||
yield_expr: 'yield' [yield_arg]
|
||||
yield_arg: 'from' test | testlist_star_expr
|
||||
|
||||
strings: (STRING | fstring)+
|
||||
fstring: FSTRING_START fstring_content* FSTRING_END
|
||||
fstring_content: FSTRING_STRING | fstring_expr
|
||||
fstring_conversion: '!' NAME
|
||||
fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
|
||||
fstring_format_spec: ':' fstring_content*
|
||||
169
parso/python/grammar313.txt
Normal file
169
parso/python/grammar313.txt
Normal file
@@ -0,0 +1,169 @@
|
||||
# Grammar for Python
|
||||
|
||||
# NOTE WELL: You should also follow all the steps listed at
|
||||
# https://devguide.python.org/grammar/
|
||||
|
||||
# Start symbols for the grammar:
|
||||
# single_input is a single interactive statement;
|
||||
# file_input is a module or sequence of commands read from an input file;
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' namedexpr_test NEWLINE
|
||||
decorators: decorator+
|
||||
decorated: decorators (classdef | funcdef | async_funcdef)
|
||||
|
||||
async_funcdef: 'async' funcdef
|
||||
funcdef: 'def' NAME parameters ['->' test] ':' suite
|
||||
|
||||
parameters: '(' [typedargslist] ')'
|
||||
typedargslist: (
|
||||
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
|
||||
',' tfpdef ['=' test])* ([',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]])
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
|
||||
| '**' tfpdef [',']]] )
|
||||
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]]
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [','])
|
||||
)
|
||||
tfpdef: NAME [':' test]
|
||||
varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']
|
||||
)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
||||
('=' (yield_expr|testlist_star_expr))*)
|
||||
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
|
||||
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
||||
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
||||
'<<=' | '>>=' | '**=' | '//=')
|
||||
# For normal and annotated assignments, additional restrictions enforced by the interpreter
|
||||
del_stmt: 'del' exprlist
|
||||
pass_stmt: 'pass'
|
||||
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
|
||||
break_stmt: 'break'
|
||||
continue_stmt: 'continue'
|
||||
return_stmt: 'return' [testlist_star_expr]
|
||||
yield_stmt: yield_expr
|
||||
raise_stmt: 'raise' [test ['from' test]]
|
||||
import_stmt: import_name | import_from
|
||||
import_name: 'import' dotted_as_names
|
||||
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
|
||||
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
|
||||
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
||||
import_as_name: NAME ['as' NAME]
|
||||
dotted_as_name: dotted_name ['as' NAME]
|
||||
import_as_names: import_as_name (',' import_as_name)* [',']
|
||||
dotted_as_names: dotted_as_name (',' dotted_as_name)*
|
||||
dotted_name: NAME ('.' NAME)*
|
||||
global_stmt: 'global' NAME (',' NAME)*
|
||||
nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
|
||||
assert_stmt: 'assert' test [',' test]
|
||||
|
||||
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
|
||||
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
|
||||
if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
|
||||
while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
|
||||
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
||||
try_stmt: ('try' ':' suite
|
||||
((except_clause ':' suite)+
|
||||
['else' ':' suite]
|
||||
['finally' ':' suite] |
|
||||
'finally' ':' suite))
|
||||
with_stmt: 'with' with_item (',' with_item)* ':' suite
|
||||
with_item: test ['as' expr]
|
||||
# NB compile.c makes sure that the default except clause is last
|
||||
except_clause: 'except' [test ['as' NAME]]
|
||||
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||
|
||||
namedexpr_test: test [':=' test]
|
||||
test: or_test ['if' or_test 'else' test] | lambdef
|
||||
lambdef: 'lambda' [varargslist] ':' test
|
||||
or_test: and_test ('or' and_test)*
|
||||
and_test: not_test ('and' not_test)*
|
||||
not_test: 'not' not_test | comparison
|
||||
comparison: expr (comp_op expr)*
|
||||
# <> isn't actually a valid comparison operator in Python. It's here for the
|
||||
# sake of a __future__ import described in PEP 401 (which really works :-)
|
||||
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
||||
star_expr: '*' expr
|
||||
expr: xor_expr ('|' xor_expr)*
|
||||
xor_expr: and_expr ('^' and_expr)*
|
||||
and_expr: shift_expr ('&' shift_expr)*
|
||||
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
|
||||
arith_expr: term (('+'|'-') term)*
|
||||
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
|
||||
factor: ('+'|'-'|'~') factor | power
|
||||
power: atom_expr ['**' factor]
|
||||
atom_expr: ['await'] atom trailer*
|
||||
atom: ('(' [yield_expr|testlist_comp] ')' |
|
||||
'[' [testlist_comp] ']' |
|
||||
'{' [dictorsetmaker] '}' |
|
||||
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||
testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
|
||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||
subscriptlist: subscript (',' subscript)* [',']
|
||||
subscript: test [':=' test] | [test] ':' [test] [sliceop]
|
||||
sliceop: ':' [test]
|
||||
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||
testlist: test (',' test)* [',']
|
||||
dictorsetmaker: ( ((test ':' test | '**' expr)
|
||||
(comp_for | (',' (test ':' test | '**' expr))* [','])) |
|
||||
((test [':=' test] | star_expr)
|
||||
(comp_for | (',' (test [':=' test] | star_expr))* [','])) )
|
||||
|
||||
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
||||
|
||||
arglist: argument (',' argument)* [',']
|
||||
|
||||
# The reason that keywords are test nodes instead of NAME is that using NAME
|
||||
# results in an ambiguity. ast.c makes sure it's a NAME.
|
||||
# "test '=' test" is really "keyword '=' test", but we have no such token.
|
||||
# These need to be in a single rule to avoid grammar that is ambiguous
|
||||
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
|
||||
# we explicitly match '*' here, too, to give it proper precedence.
|
||||
# Illegal combinations and orderings are blocked in ast.c:
|
||||
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
||||
# that precede iterable unpackings are blocked; etc.
|
||||
argument: ( test [comp_for] |
|
||||
test ':=' test |
|
||||
test '=' test |
|
||||
'**' test |
|
||||
'*' test )
|
||||
|
||||
comp_iter: comp_for | comp_if
|
||||
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||
comp_for: ['async'] sync_comp_for
|
||||
comp_if: 'if' or_test [comp_iter]
|
||||
|
||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||
encoding_decl: NAME
|
||||
|
||||
yield_expr: 'yield' [yield_arg]
|
||||
yield_arg: 'from' test | testlist_star_expr
|
||||
|
||||
strings: (STRING | fstring)+
|
||||
fstring: FSTRING_START fstring_content* FSTRING_END
|
||||
fstring_content: FSTRING_STRING | fstring_expr
|
||||
fstring_conversion: '!' NAME
|
||||
fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
|
||||
fstring_format_spec: ':' fstring_content*
|
||||
169
parso/python/grammar314.txt
Normal file
169
parso/python/grammar314.txt
Normal file
@@ -0,0 +1,169 @@
|
||||
# Grammar for Python
|
||||
|
||||
# NOTE WELL: You should also follow all the steps listed at
|
||||
# https://devguide.python.org/grammar/
|
||||
|
||||
# Start symbols for the grammar:
|
||||
# single_input is a single interactive statement;
|
||||
# file_input is a module or sequence of commands read from an input file;
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' namedexpr_test NEWLINE
|
||||
decorators: decorator+
|
||||
decorated: decorators (classdef | funcdef | async_funcdef)
|
||||
|
||||
async_funcdef: 'async' funcdef
|
||||
funcdef: 'def' NAME parameters ['->' test] ':' suite
|
||||
|
||||
parameters: '(' [typedargslist] ')'
|
||||
typedargslist: (
|
||||
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
|
||||
',' tfpdef ['=' test])* ([',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]])
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
|
||||
| '**' tfpdef [',']]] )
|
||||
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
|
||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [',']]]
|
||||
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||
| '**' tfpdef [','])
|
||||
)
|
||||
tfpdef: NAME [':' test]
|
||||
varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']]]
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||
| '**' vfpdef [',']
|
||||
)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
||||
('=' (yield_expr|testlist_star_expr))*)
|
||||
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
|
||||
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
||||
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
||||
'<<=' | '>>=' | '**=' | '//=')
|
||||
# For normal and annotated assignments, additional restrictions enforced by the interpreter
|
||||
del_stmt: 'del' exprlist
|
||||
pass_stmt: 'pass'
|
||||
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
|
||||
break_stmt: 'break'
|
||||
continue_stmt: 'continue'
|
||||
return_stmt: 'return' [testlist_star_expr]
|
||||
yield_stmt: yield_expr
|
||||
raise_stmt: 'raise' [test ['from' test]]
|
||||
import_stmt: import_name | import_from
|
||||
import_name: 'import' dotted_as_names
|
||||
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
|
||||
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
|
||||
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
||||
import_as_name: NAME ['as' NAME]
|
||||
dotted_as_name: dotted_name ['as' NAME]
|
||||
import_as_names: import_as_name (',' import_as_name)* [',']
|
||||
dotted_as_names: dotted_as_name (',' dotted_as_name)*
|
||||
dotted_name: NAME ('.' NAME)*
|
||||
global_stmt: 'global' NAME (',' NAME)*
|
||||
nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
|
||||
assert_stmt: 'assert' test [',' test]
|
||||
|
||||
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
|
||||
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
|
||||
if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
|
||||
while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
|
||||
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
||||
try_stmt: ('try' ':' suite
|
||||
((except_clause ':' suite)+
|
||||
['else' ':' suite]
|
||||
['finally' ':' suite] |
|
||||
'finally' ':' suite))
|
||||
with_stmt: 'with' with_item (',' with_item)* ':' suite
|
||||
with_item: test ['as' expr]
|
||||
# NB compile.c makes sure that the default except clause is last
|
||||
except_clause: 'except' [test ['as' NAME]]
|
||||
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||
|
||||
namedexpr_test: test [':=' test]
|
||||
test: or_test ['if' or_test 'else' test] | lambdef
|
||||
lambdef: 'lambda' [varargslist] ':' test
|
||||
or_test: and_test ('or' and_test)*
|
||||
and_test: not_test ('and' not_test)*
|
||||
not_test: 'not' not_test | comparison
|
||||
comparison: expr (comp_op expr)*
|
||||
# <> isn't actually a valid comparison operator in Python. It's here for the
|
||||
# sake of a __future__ import described in PEP 401 (which really works :-)
|
||||
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
||||
star_expr: '*' expr
|
||||
expr: xor_expr ('|' xor_expr)*
|
||||
xor_expr: and_expr ('^' and_expr)*
|
||||
and_expr: shift_expr ('&' shift_expr)*
|
||||
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
|
||||
arith_expr: term (('+'|'-') term)*
|
||||
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
|
||||
factor: ('+'|'-'|'~') factor | power
|
||||
power: atom_expr ['**' factor]
|
||||
atom_expr: ['await'] atom trailer*
|
||||
atom: ('(' [yield_expr|testlist_comp] ')' |
|
||||
'[' [testlist_comp] ']' |
|
||||
'{' [dictorsetmaker] '}' |
|
||||
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||
testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
|
||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||
subscriptlist: subscript (',' subscript)* [',']
|
||||
subscript: test [':=' test] | [test] ':' [test] [sliceop]
|
||||
sliceop: ':' [test]
|
||||
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||
testlist: test (',' test)* [',']
|
||||
dictorsetmaker: ( ((test ':' test | '**' expr)
|
||||
(comp_for | (',' (test ':' test | '**' expr))* [','])) |
|
||||
((test [':=' test] | star_expr)
|
||||
(comp_for | (',' (test [':=' test] | star_expr))* [','])) )
|
||||
|
||||
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
||||
|
||||
arglist: argument (',' argument)* [',']
|
||||
|
||||
# The reason that keywords are test nodes instead of NAME is that using NAME
|
||||
# results in an ambiguity. ast.c makes sure it's a NAME.
|
||||
# "test '=' test" is really "keyword '=' test", but we have no such token.
|
||||
# These need to be in a single rule to avoid grammar that is ambiguous
|
||||
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
|
||||
# we explicitly match '*' here, too, to give it proper precedence.
|
||||
# Illegal combinations and orderings are blocked in ast.c:
|
||||
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
||||
# that precede iterable unpackings are blocked; etc.
|
||||
argument: ( test [comp_for] |
|
||||
test ':=' test |
|
||||
test '=' test |
|
||||
'**' test |
|
||||
'*' test )
|
||||
|
||||
comp_iter: comp_for | comp_if
|
||||
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||
comp_for: ['async'] sync_comp_for
|
||||
comp_if: 'if' or_test [comp_iter]
|
||||
|
||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||
encoding_decl: NAME
|
||||
|
||||
yield_expr: 'yield' [yield_arg]
|
||||
yield_arg: 'from' test | testlist_star_expr
|
||||
|
||||
strings: (STRING | fstring)+
|
||||
fstring: FSTRING_START fstring_content* FSTRING_END
|
||||
fstring_content: FSTRING_STRING | fstring_expr
|
||||
fstring_conversion: '!' NAME
|
||||
fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
|
||||
fstring_format_spec: ':' fstring_content*
|
||||
@@ -97,9 +97,7 @@ suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||
|
||||
namedexpr_test: test [':=' test]
|
||||
test: or_test ['if' or_test 'else' test] | lambdef
|
||||
test_nocond: or_test | lambdef_nocond
|
||||
lambdef: 'lambda' [varargslist] ':' test
|
||||
lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
|
||||
or_test: and_test ('or' and_test)*
|
||||
and_test: not_test ('and' not_test)*
|
||||
not_test: 'not' not_test | comparison
|
||||
@@ -155,7 +153,7 @@ argument: ( test [comp_for] |
|
||||
comp_iter: comp_for | comp_if
|
||||
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||
comp_for: ['async'] sync_comp_for
|
||||
comp_if: 'if' test_nocond [comp_iter]
|
||||
comp_if: 'if' or_test [comp_iter]
|
||||
|
||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||
encoding_decl: NAME
|
||||
|
||||
@@ -96,8 +96,6 @@ class Parser(BaseParser):
|
||||
# prefixes. Just ignore them.
|
||||
children = [children[0]] + children[2:-1]
|
||||
node = self.default_node(nonterminal, children)
|
||||
for c in children:
|
||||
c.parent = node
|
||||
return node
|
||||
|
||||
def convert_leaf(self, type, value, prefix, start_pos):
|
||||
@@ -185,8 +183,6 @@ class Parser(BaseParser):
|
||||
|
||||
if all_nodes:
|
||||
node = tree.PythonErrorNode(all_nodes)
|
||||
for n in all_nodes:
|
||||
n.parent = node
|
||||
self.stack[start_index - 1].nodes.append(node)
|
||||
|
||||
self.stack[start_index:] = []
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import Tuple
|
||||
|
||||
from parso.python.errors import ErrorFinder, ErrorFinderConfig
|
||||
from parso.normalizer import Rule
|
||||
from parso.python.tree import search_ancestor, Flow, Scope
|
||||
from parso.python.tree import Flow, Scope
|
||||
|
||||
|
||||
_IMPORT_TYPES = ('import_name', 'import_from')
|
||||
@@ -74,7 +74,7 @@ class BracketNode(IndentationNode):
|
||||
parent_indentation = n.indentation
|
||||
|
||||
next_leaf = leaf.get_next_leaf()
|
||||
if '\n' in next_leaf.prefix:
|
||||
if '\n' in next_leaf.prefix or '\r' in next_leaf.prefix:
|
||||
# This implies code like:
|
||||
# foobarbaz(
|
||||
# a,
|
||||
@@ -116,7 +116,7 @@ class ImplicitNode(BracketNode):
|
||||
self.type = IndentationTypes.IMPLICIT
|
||||
|
||||
next_leaf = leaf.get_next_leaf()
|
||||
if leaf == ':' and '\n' not in next_leaf.prefix:
|
||||
if leaf == ':' and '\n' not in next_leaf.prefix and '\r' not in next_leaf.prefix:
|
||||
self.indentation += ' '
|
||||
|
||||
|
||||
@@ -124,7 +124,7 @@ class BackslashNode(IndentationNode):
|
||||
type = IndentationTypes.BACKSLASH
|
||||
|
||||
def __init__(self, config, parent_indentation, containing_leaf, spacing, parent=None):
|
||||
expr_stmt = search_ancestor(containing_leaf, 'expr_stmt')
|
||||
expr_stmt = containing_leaf.search_ancestor('expr_stmt')
|
||||
if expr_stmt is not None:
|
||||
equals = expr_stmt.children[-2]
|
||||
|
||||
@@ -216,8 +216,8 @@ class PEP8Normalizer(ErrorFinder):
|
||||
endmarker = node.children[-1]
|
||||
prev = endmarker.get_previous_leaf()
|
||||
prefix = endmarker.prefix
|
||||
if (not prefix.endswith('\n') and (
|
||||
prefix or prev is None or prev.value != '\n')):
|
||||
if (not prefix.endswith('\n') and not prefix.endswith('\r') and (
|
||||
prefix or prev is None or prev.value not in {'\n', '\r\n', '\r'})):
|
||||
self.add_issue(endmarker, 292, "No newline at end of file")
|
||||
|
||||
if typ in _IMPORT_TYPES:
|
||||
@@ -465,7 +465,8 @@ class PEP8Normalizer(ErrorFinder):
|
||||
+ self._config.indentation:
|
||||
self.add_issue(part, 129, "Line with same indent as next logical block")
|
||||
elif indentation != should_be_indentation:
|
||||
if not self._check_tabs_spaces(spacing) and part.value != '\n':
|
||||
if not self._check_tabs_spaces(spacing) and part.value not in \
|
||||
{'\n', '\r\n', '\r'}:
|
||||
if value in '])}':
|
||||
if node.type == IndentationTypes.VERTICAL_BRACKET:
|
||||
self.add_issue(
|
||||
@@ -652,7 +653,8 @@ class PEP8Normalizer(ErrorFinder):
|
||||
else:
|
||||
prev_spacing = self._previous_spacing
|
||||
if prev in _ALLOW_SPACE and spaces != prev_spacing.value \
|
||||
and '\n' not in self._previous_leaf.prefix:
|
||||
and '\n' not in self._previous_leaf.prefix \
|
||||
and '\r' not in self._previous_leaf.prefix:
|
||||
message = "Whitespace before operator doesn't match with whitespace after"
|
||||
self.add_issue(spacing, 229, message)
|
||||
|
||||
@@ -724,11 +726,11 @@ class PEP8Normalizer(ErrorFinder):
|
||||
|
||||
def add_issue(self, node, code, message):
|
||||
if self._previous_leaf is not None:
|
||||
if search_ancestor(self._previous_leaf, 'error_node') is not None:
|
||||
if self._previous_leaf.search_ancestor('error_node') is not None:
|
||||
return
|
||||
if self._previous_leaf.type == 'error_leaf':
|
||||
return
|
||||
if search_ancestor(node, 'error_node') is not None:
|
||||
if node.search_ancestor('error_node') is not None:
|
||||
return
|
||||
if code in (901, 903):
|
||||
# 901 and 903 are raised by the ErrorFinder.
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import re
|
||||
from codecs import BOM_UTF8
|
||||
from typing import Tuple
|
||||
|
||||
from parso.python.tokenize import group
|
||||
|
||||
@@ -13,11 +14,11 @@ class PrefixPart:
|
||||
self.type = typ
|
||||
self.value = value
|
||||
self.spacing = spacing
|
||||
self.start_pos = start_pos
|
||||
self.start_pos: Tuple[int, int] = start_pos
|
||||
|
||||
@property
|
||||
def end_pos(self):
|
||||
if self.value.endswith('\n'):
|
||||
def end_pos(self) -> Tuple[int, int]:
|
||||
if self.value.endswith('\n') or self.value.endswith('\r'):
|
||||
return self.start_pos[0] + 1, 0
|
||||
if self.value == unicode_bom:
|
||||
# The bom doesn't have a length at the start of a Python file.
|
||||
@@ -39,10 +40,18 @@ class PrefixPart:
|
||||
self.start_pos
|
||||
)
|
||||
|
||||
def search_ancestor(self, *node_types):
|
||||
node = self.parent
|
||||
while node is not None:
|
||||
if node.type in node_types:
|
||||
return node
|
||||
node = node.parent
|
||||
return None
|
||||
|
||||
|
||||
_comment = r'#[^\n\r\f]*'
|
||||
_backslash = r'\\\r?\n'
|
||||
_newline = r'\r?\n'
|
||||
_backslash = r'\\\r?\n|\\\r'
|
||||
_newline = r'\r?\n|\r'
|
||||
_form_feed = r'\f'
|
||||
_only_spacing = '$'
|
||||
_spacing = r'[ \t]*'
|
||||
@@ -85,7 +94,7 @@ def split_prefix(leaf, start_pos):
|
||||
bom = True
|
||||
|
||||
start = match.end(0)
|
||||
if value.endswith('\n'):
|
||||
if value.endswith('\n') or value.endswith('\r'):
|
||||
line += 1
|
||||
column = -start
|
||||
|
||||
|
||||
@@ -110,10 +110,11 @@ def _get_token_collection(version_info):
|
||||
_create_token_collection(version_info)
|
||||
return result
|
||||
|
||||
|
||||
unicode_character_name = r'[A-Za-z0-9\-]+(?: [A-Za-z0-9\-]+)*'
|
||||
fstring_string_single_line = _compile(
|
||||
r'(?:\{\{|\}\}|\\N\{' + unicode_character_name +
|
||||
r'\}|\\(?:\r\n?|\n)|\\[^\r\nN]|[^{}\r\n\\])+'
|
||||
r'(?:\{\{|\}\}|\\N\{' + unicode_character_name
|
||||
+ r'\}|\\(?:\r\n?|\n)|\\[^\r\nN]|[^{}\r\n\\])+'
|
||||
)
|
||||
fstring_string_multi_line = _compile(
|
||||
r'(?:\{\{|\}\}|\\N\{' + unicode_character_name + r'\}|\\[^N]|[^{}\\])+'
|
||||
@@ -547,7 +548,7 @@ def tokenize_lines(
|
||||
additional_prefix = prefix + token
|
||||
new_line = True
|
||||
elif initial == '#': # Comments
|
||||
assert not token.endswith("\n")
|
||||
assert not token.endswith("\n") and not token.endswith("\r")
|
||||
if fstring_stack and fstring_stack[-1].is_in_expr():
|
||||
# `#` is not allowed in f-string expressions
|
||||
yield PythonToken(ERRORTOKEN, initial, spos, prefix)
|
||||
|
||||
@@ -47,9 +47,9 @@ try:
|
||||
from collections.abc import Mapping
|
||||
except ImportError:
|
||||
from collections import Mapping
|
||||
from typing import Tuple
|
||||
|
||||
from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, \
|
||||
search_ancestor
|
||||
from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, search_ancestor # noqa
|
||||
from parso.python.prefix import split_prefix
|
||||
from parso.utils import split_lines
|
||||
|
||||
@@ -149,7 +149,7 @@ class _LeafWithoutNewlines(PythonLeaf):
|
||||
__slots__ = ()
|
||||
|
||||
@property
|
||||
def end_pos(self):
|
||||
def end_pos(self) -> Tuple[int, int]:
|
||||
return self.line, self.column + len(self.value)
|
||||
|
||||
|
||||
@@ -295,6 +295,8 @@ class FStringEnd(PythonLeaf):
|
||||
|
||||
|
||||
class _StringComparisonMixin:
|
||||
__slots__ = ()
|
||||
|
||||
def __eq__(self, other):
|
||||
"""
|
||||
Make comparisons with strings easy.
|
||||
@@ -544,11 +546,16 @@ class Function(ClassOrFunc):
|
||||
4. annotation (if present)
|
||||
"""
|
||||
type = 'funcdef'
|
||||
__slots__ = ()
|
||||
|
||||
def __init__(self, children):
|
||||
super().__init__(children)
|
||||
parameters = self.children[2] # After `def foo`
|
||||
parameters.children[1:-1] = _create_params(parameters, parameters.children[1:-1])
|
||||
parameters_children = parameters.children[1:-1]
|
||||
# If input parameters list already has Param objects, keep it as is;
|
||||
# otherwise, convert it to a list of Param objects.
|
||||
if not any(isinstance(child, Param) for child in parameters_children):
|
||||
parameters.children[1:-1] = _create_params(parameters, parameters_children)
|
||||
|
||||
def _get_param_nodes(self):
|
||||
return self.children[2].children
|
||||
@@ -651,7 +658,11 @@ class Lambda(Function):
|
||||
# We don't want to call the Function constructor, call its parent.
|
||||
super(Function, self).__init__(children)
|
||||
# Everything between `lambda` and the `:` operator is a parameter.
|
||||
self.children[1:-2] = _create_params(self, self.children[1:-2])
|
||||
parameters_children = self.children[1:-2]
|
||||
# If input children list already has Param objects, keep it as is;
|
||||
# otherwise, convert it to a list of Param objects.
|
||||
if not any(isinstance(child, Param) for child in parameters_children):
|
||||
self.children[1:-2] = _create_params(self, parameters_children)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@@ -775,7 +786,7 @@ class WithStmt(Flow):
|
||||
return names
|
||||
|
||||
def get_test_node_from_name(self, name):
|
||||
node = search_ancestor(name, "with_item")
|
||||
node = name.search_ancestor("with_item")
|
||||
if node is None:
|
||||
raise ValueError('The name is not actually part of a with statement.')
|
||||
return node.children[0]
|
||||
@@ -1079,11 +1090,9 @@ class Param(PythonBaseNode):
|
||||
"""
|
||||
type = 'param'
|
||||
|
||||
def __init__(self, children, parent):
|
||||
def __init__(self, children, parent=None):
|
||||
super().__init__(children)
|
||||
self.parent = parent
|
||||
for child in children:
|
||||
child.parent = self
|
||||
|
||||
@property
|
||||
def star_count(self):
|
||||
@@ -1170,7 +1179,7 @@ class Param(PythonBaseNode):
|
||||
"""
|
||||
Returns the function/lambda of a parameter.
|
||||
"""
|
||||
return search_ancestor(self, 'funcdef', 'lambdef')
|
||||
return self.search_ancestor('funcdef', 'lambdef')
|
||||
|
||||
def get_code(self, include_prefix=True, include_comma=True):
|
||||
"""
|
||||
|
||||
159
parso/tree.py
159
parso/tree.py
@@ -1,32 +1,41 @@
|
||||
from abc import abstractmethod, abstractproperty
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
from parso.utils import split_lines
|
||||
|
||||
|
||||
def search_ancestor(node, *node_types):
|
||||
def search_ancestor(node: 'NodeOrLeaf', *node_types: str) -> 'Optional[BaseNode]':
|
||||
"""
|
||||
Recursively looks at the parents of a node and returns the first found node
|
||||
that matches node_types. Returns ``None`` if no matching node is found.
|
||||
that matches ``node_types``. Returns ``None`` if no matching node is found.
|
||||
|
||||
This function is deprecated, use :meth:`NodeOrLeaf.search_ancestor` instead.
|
||||
|
||||
:param node: The ancestors of this node will be checked.
|
||||
:param node_types: type names that are searched for.
|
||||
:type node_types: tuple of str
|
||||
"""
|
||||
while True:
|
||||
node = node.parent
|
||||
if node is None or node.type in node_types:
|
||||
return node
|
||||
n = node.parent
|
||||
while n is not None:
|
||||
if n.type in node_types:
|
||||
return n
|
||||
n = n.parent
|
||||
return None
|
||||
|
||||
|
||||
class NodeOrLeaf:
|
||||
"""
|
||||
The base class for nodes and leaves.
|
||||
"""
|
||||
__slots__ = ()
|
||||
__slots__ = ('parent',)
|
||||
type: str
|
||||
'''
|
||||
The type is a string that typically matches the types of the grammar file.
|
||||
'''
|
||||
parent: 'Optional[BaseNode]'
|
||||
'''
|
||||
The parent :class:`BaseNode` of this node or leaf.
|
||||
None if this is the root node.
|
||||
'''
|
||||
|
||||
def get_root_node(self):
|
||||
"""
|
||||
@@ -125,7 +134,7 @@ class NodeOrLeaf:
|
||||
return node
|
||||
|
||||
@abstractproperty
|
||||
def start_pos(self):
|
||||
def start_pos(self) -> Tuple[int, int]:
|
||||
"""
|
||||
Returns the starting position of the prefix as a tuple, e.g. `(3, 4)`.
|
||||
|
||||
@@ -133,7 +142,7 @@ class NodeOrLeaf:
|
||||
"""
|
||||
|
||||
@abstractproperty
|
||||
def end_pos(self):
|
||||
def end_pos(self) -> Tuple[int, int]:
|
||||
"""
|
||||
Returns the end position of the prefix as a tuple, e.g. `(3, 4)`.
|
||||
|
||||
@@ -172,15 +181,119 @@ class NodeOrLeaf:
|
||||
e.g. a statement.
|
||||
"""
|
||||
|
||||
def search_ancestor(self, *node_types: str) -> 'Optional[BaseNode]':
|
||||
"""
|
||||
Recursively looks at the parents of this node or leaf and returns the
|
||||
first found node that matches ``node_types``. Returns ``None`` if no
|
||||
matching node is found.
|
||||
|
||||
:param node_types: type names that are searched for.
|
||||
"""
|
||||
node = self.parent
|
||||
while node is not None:
|
||||
if node.type in node_types:
|
||||
return node
|
||||
node = node.parent
|
||||
return None
|
||||
|
||||
def dump(self, *, indent: Optional[Union[int, str]] = 4) -> str:
|
||||
"""
|
||||
Returns a formatted dump of the parser tree rooted at this node or leaf. This is
|
||||
mainly useful for debugging purposes.
|
||||
|
||||
The ``indent`` parameter is interpreted in a similar way as :py:func:`ast.dump`.
|
||||
If ``indent`` is a non-negative integer or string, then the tree will be
|
||||
pretty-printed with that indent level. An indent level of 0, negative, or ``""``
|
||||
will only insert newlines. ``None`` selects the single line representation.
|
||||
Using a positive integer indent indents that many spaces per level. If
|
||||
``indent`` is a string (such as ``"\\t"``), that string is used to indent each
|
||||
level.
|
||||
|
||||
:param indent: Indentation style as described above. The default indentation is
|
||||
4 spaces, which yields a pretty-printed dump.
|
||||
|
||||
>>> import parso
|
||||
>>> print(parso.parse("lambda x, y: x + y").dump())
|
||||
Module([
|
||||
Lambda([
|
||||
Keyword('lambda', (1, 0)),
|
||||
Param([
|
||||
Name('x', (1, 7), prefix=' '),
|
||||
Operator(',', (1, 8)),
|
||||
]),
|
||||
Param([
|
||||
Name('y', (1, 10), prefix=' '),
|
||||
]),
|
||||
Operator(':', (1, 11)),
|
||||
PythonNode('arith_expr', [
|
||||
Name('x', (1, 13), prefix=' '),
|
||||
Operator('+', (1, 15), prefix=' '),
|
||||
Name('y', (1, 17), prefix=' '),
|
||||
]),
|
||||
]),
|
||||
EndMarker('', (1, 18)),
|
||||
])
|
||||
"""
|
||||
if indent is None:
|
||||
newline = False
|
||||
indent_string = ''
|
||||
elif isinstance(indent, int):
|
||||
newline = True
|
||||
indent_string = ' ' * indent
|
||||
elif isinstance(indent, str):
|
||||
newline = True
|
||||
indent_string = indent
|
||||
else:
|
||||
raise TypeError(f"expect 'indent' to be int, str or None, got {indent!r}")
|
||||
|
||||
def _format_dump(node: NodeOrLeaf, indent: str = '', top_level: bool = True) -> str:
|
||||
result = ''
|
||||
node_type = type(node).__name__
|
||||
if isinstance(node, Leaf):
|
||||
result += f'{indent}{node_type}('
|
||||
if isinstance(node, ErrorLeaf):
|
||||
result += f'{node.token_type!r}, '
|
||||
elif isinstance(node, TypedLeaf):
|
||||
result += f'{node.type!r}, '
|
||||
result += f'{node.value!r}, {node.start_pos!r}'
|
||||
if node.prefix:
|
||||
result += f', prefix={node.prefix!r}'
|
||||
result += ')'
|
||||
elif isinstance(node, BaseNode):
|
||||
result += f'{indent}{node_type}('
|
||||
if isinstance(node, Node):
|
||||
result += f'{node.type!r}, '
|
||||
result += '['
|
||||
if newline:
|
||||
result += '\n'
|
||||
for child in node.children:
|
||||
result += _format_dump(child, indent=indent + indent_string, top_level=False)
|
||||
result += f'{indent}])'
|
||||
else: # pragma: no cover
|
||||
# We shouldn't ever reach here, unless:
|
||||
# - `NodeOrLeaf` is incorrectly subclassed else where
|
||||
# - or a node's children list contains invalid nodes or leafs
|
||||
# Both are unexpected internal errors.
|
||||
raise TypeError(f'unsupported node encountered: {node!r}')
|
||||
if not top_level:
|
||||
if newline:
|
||||
result += ',\n'
|
||||
else:
|
||||
result += ', '
|
||||
return result
|
||||
|
||||
return _format_dump(self)
|
||||
|
||||
|
||||
class Leaf(NodeOrLeaf):
|
||||
'''
|
||||
Leafs are basically tokens with a better API. Leafs exactly know where they
|
||||
were defined and what text preceeds them.
|
||||
'''
|
||||
__slots__ = ('value', 'parent', 'line', 'column', 'prefix')
|
||||
__slots__ = ('value', 'line', 'column', 'prefix')
|
||||
prefix: str
|
||||
|
||||
def __init__(self, value, start_pos, prefix=''):
|
||||
def __init__(self, value: str, start_pos: Tuple[int, int], prefix: str = '') -> None:
|
||||
self.value = value
|
||||
'''
|
||||
:py:func:`str` The value of the current token.
|
||||
@@ -191,17 +304,17 @@ class Leaf(NodeOrLeaf):
|
||||
:py:func:`str` Typically a mixture of whitespace and comments. Stuff
|
||||
that is syntactically irrelevant for the syntax tree.
|
||||
'''
|
||||
self.parent = None
|
||||
self.parent: Optional[BaseNode] = None
|
||||
'''
|
||||
The parent :class:`BaseNode` of this leaf.
|
||||
'''
|
||||
|
||||
@property
|
||||
def start_pos(self):
|
||||
def start_pos(self) -> Tuple[int, int]:
|
||||
return self.line, self.column
|
||||
|
||||
@start_pos.setter
|
||||
def start_pos(self, value):
|
||||
def start_pos(self, value: Tuple[int, int]) -> None:
|
||||
self.line = value[0]
|
||||
self.column = value[1]
|
||||
|
||||
@@ -226,7 +339,7 @@ class Leaf(NodeOrLeaf):
|
||||
return self.value
|
||||
|
||||
@property
|
||||
def end_pos(self):
|
||||
def end_pos(self) -> Tuple[int, int]:
|
||||
lines = split_lines(self.value)
|
||||
end_pos_line = self.line + len(lines) - 1
|
||||
# Check for multiline token
|
||||
@@ -256,28 +369,30 @@ class BaseNode(NodeOrLeaf):
|
||||
The super class for all nodes.
|
||||
A node has children, a type and possibly a parent node.
|
||||
"""
|
||||
__slots__ = ('children', 'parent')
|
||||
__slots__ = ('children',)
|
||||
|
||||
def __init__(self, children):
|
||||
def __init__(self, children: List[NodeOrLeaf]) -> None:
|
||||
self.children = children
|
||||
"""
|
||||
A list of :class:`NodeOrLeaf` child nodes.
|
||||
"""
|
||||
self.parent = None
|
||||
self.parent: Optional[BaseNode] = None
|
||||
'''
|
||||
The parent :class:`BaseNode` of this leaf.
|
||||
The parent :class:`BaseNode` of this node.
|
||||
None if this is the root node.
|
||||
'''
|
||||
for child in children:
|
||||
child.parent = self
|
||||
|
||||
@property
|
||||
def start_pos(self):
|
||||
def start_pos(self) -> Tuple[int, int]:
|
||||
return self.children[0].start_pos
|
||||
|
||||
def get_start_pos_of_prefix(self):
|
||||
return self.children[0].get_start_pos_of_prefix()
|
||||
|
||||
@property
|
||||
def end_pos(self):
|
||||
def end_pos(self) -> Tuple[int, int]:
|
||||
return self.children[-1].end_pos
|
||||
|
||||
def _get_code_for_children(self, children, include_prefix):
|
||||
|
||||
@@ -92,7 +92,7 @@ def python_bytes_to_unicode(
|
||||
# UTF-8 byte-order mark
|
||||
return 'utf-8'
|
||||
|
||||
first_two_lines = re.match(br'(?:[^\n]*\n){0,2}', source).group(0)
|
||||
first_two_lines = re.match(br'(?:[^\r\n]*(?:\r\n|\r|\n)){0,2}', source).group(0)
|
||||
possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)",
|
||||
first_two_lines)
|
||||
if possible_encoding:
|
||||
|
||||
@@ -10,3 +10,6 @@ norecursedirs = .* docs scripts normalizer_issue_files build
|
||||
# fine as long as we are using `clean_jedi_cache` as a session scoped
|
||||
# fixture.
|
||||
usefixtures = clean_parso_cache
|
||||
|
||||
# Disallow warnings
|
||||
filterwarnings = error
|
||||
|
||||
@@ -13,6 +13,9 @@ ignore =
|
||||
|
||||
|
||||
[mypy]
|
||||
show_error_codes = true
|
||||
enable_error_code = ignore-without-code
|
||||
|
||||
disallow_subclassing_any = True
|
||||
|
||||
# Avoid creating future gotchas emerging from bad typing
|
||||
@@ -23,3 +26,4 @@ warn_unused_configs = True
|
||||
warn_unreachable = True
|
||||
|
||||
strict_equality = True
|
||||
no_implicit_optional = False
|
||||
|
||||
15
setup.py
15
setup.py
@@ -40,6 +40,11 @@ setup(
|
||||
'Programming Language :: Python :: 3.7',
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Programming Language :: Python :: 3.9',
|
||||
'Programming Language :: Python :: 3.10',
|
||||
'Programming Language :: Python :: 3.11',
|
||||
'Programming Language :: Python :: 3.12',
|
||||
'Programming Language :: Python :: 3.13',
|
||||
'Programming Language :: Python :: 3.14',
|
||||
'Topic :: Software Development :: Libraries :: Python Modules',
|
||||
'Topic :: Text Editors :: Integrated Development Environments (IDE)',
|
||||
'Topic :: Utilities',
|
||||
@@ -47,12 +52,16 @@ setup(
|
||||
],
|
||||
extras_require={
|
||||
'testing': [
|
||||
'pytest<6.0.0',
|
||||
'pytest',
|
||||
'docopt',
|
||||
],
|
||||
'qa': [
|
||||
'flake8==3.8.3',
|
||||
'mypy==0.782',
|
||||
# Latest version which supports Python 3.6
|
||||
'flake8==5.0.4',
|
||||
# Latest version which supports Python 3.6
|
||||
'mypy==0.971',
|
||||
# Arbitrary pins, latest at the time of pinning
|
||||
'types-setuptools==67.2.0.1',
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
@@ -29,7 +29,6 @@ FAILING_EXAMPLES = [
|
||||
'from foo import a,',
|
||||
'from __future__ import whatever',
|
||||
'from __future__ import braces',
|
||||
'from .__future__ import whatever',
|
||||
'def f(x=3, y): pass',
|
||||
'lambda x=3, y: x',
|
||||
'__debug__ = 1',
|
||||
@@ -145,6 +144,44 @@ FAILING_EXAMPLES = [
|
||||
'([False], a) = x',
|
||||
'def x(): from math import *',
|
||||
|
||||
# invalid del statements
|
||||
'del x + y',
|
||||
'del x(y)',
|
||||
'async def foo(): del await x',
|
||||
'def foo(): del (yield x)',
|
||||
'del [x for x in range(10)]',
|
||||
'del *x',
|
||||
'del *x,',
|
||||
'del (*x,)',
|
||||
'del [*x]',
|
||||
'del x, *y',
|
||||
'del *x.y,',
|
||||
'del *x[y],',
|
||||
'del *x[y::], z',
|
||||
'del x, (y, *z)',
|
||||
'del (x, *[y, z])',
|
||||
'del [x, *(y, [*z])]',
|
||||
'del {}',
|
||||
'del {x}',
|
||||
'del {x, y}',
|
||||
'del {x, *y}',
|
||||
|
||||
# invalid starred expressions
|
||||
'*x',
|
||||
'(*x)',
|
||||
'((*x))',
|
||||
'1 + (*x)',
|
||||
'*x; 1',
|
||||
'1; *x',
|
||||
'1\n*x',
|
||||
'x = *y',
|
||||
'x: int = *y',
|
||||
'def foo(): return *x',
|
||||
'def foo(): yield *x',
|
||||
'f"{*x}"',
|
||||
'for *x in 1: pass',
|
||||
'[1 for *x in 1]',
|
||||
|
||||
# str/bytes combinations
|
||||
'"s" b""',
|
||||
'"s" b"" ""',
|
||||
@@ -178,7 +215,6 @@ FAILING_EXAMPLES = [
|
||||
'f"{\'\\\'}"',
|
||||
'f"{#}"',
|
||||
"f'{1!b}'",
|
||||
"f'{1:{5:{3}}}'",
|
||||
"f'{'",
|
||||
"f'{'",
|
||||
"f'}'",
|
||||
@@ -189,8 +225,6 @@ FAILING_EXAMPLES = [
|
||||
"f'{1;1}'",
|
||||
"f'{a;}'",
|
||||
"f'{b\"\" \"\"}'",
|
||||
# f-string expression part cannot include a backslash
|
||||
r'''f"{'\n'}"''',
|
||||
|
||||
'async def foo():\n yield x\n return 1',
|
||||
'async def foo():\n yield x\n return 1',
|
||||
@@ -198,6 +232,9 @@ FAILING_EXAMPLES = [
|
||||
'[*[] for a in [1]]',
|
||||
'async def bla():\n def x(): await bla()',
|
||||
'del None',
|
||||
'del True',
|
||||
'del False',
|
||||
'del ...',
|
||||
|
||||
# Errors of global / nonlocal
|
||||
dedent('''
|
||||
@@ -296,6 +333,13 @@ FAILING_EXAMPLES = [
|
||||
def z():
|
||||
nonlocal a
|
||||
'''),
|
||||
# Name is assigned before nonlocal declaration
|
||||
dedent('''
|
||||
def x(a):
|
||||
def y():
|
||||
a = 10
|
||||
nonlocal a
|
||||
'''),
|
||||
]
|
||||
|
||||
if sys.version_info[:2] >= (3, 7):
|
||||
@@ -365,3 +409,17 @@ if sys.version_info[:2] >= (3, 8):
|
||||
FAILING_EXAMPLES += [
|
||||
"f'{1=!b}'",
|
||||
]
|
||||
|
||||
if sys.version_info[:2] < (3, 12):
|
||||
FAILING_EXAMPLES += [
|
||||
# f-string expression part cannot include a backslash before 3.12
|
||||
r'''f"{'\n'}"''',
|
||||
# this compiles successfully but fails when evaluated in 3.12
|
||||
"f'{1:{5:{3}}}'",
|
||||
]
|
||||
|
||||
if sys.version_info[:2] < (3, 13):
|
||||
# this compiles successfully but fails when evaluated in 3.13
|
||||
FAILING_EXAMPLES += [
|
||||
'from .__future__ import whatever',
|
||||
]
|
||||
|
||||
@@ -46,6 +46,28 @@ def x(b=a):
|
||||
global a
|
||||
|
||||
|
||||
def x(*args, c=2, d):
|
||||
pass
|
||||
|
||||
|
||||
def x(*, c=2, d):
|
||||
pass
|
||||
|
||||
|
||||
def x(a, b=1, *args, c=2, d):
|
||||
pass
|
||||
|
||||
|
||||
def x(a, b=1, *, c=2, d):
|
||||
pass
|
||||
|
||||
|
||||
lambda *args, c=2, d: (c, d)
|
||||
lambda *, c=2, d: (c, d)
|
||||
lambda a, b=1, *args, c=2, d: (c, d)
|
||||
lambda a, b=1, *, c=2, d: (c, d)
|
||||
|
||||
|
||||
*foo, a = (1,)
|
||||
*foo[0], a = (1,)
|
||||
*[], a = (1,)
|
||||
@@ -113,6 +135,29 @@ def x():
|
||||
nonlocal a
|
||||
|
||||
|
||||
def x(a):
|
||||
def y():
|
||||
nonlocal a
|
||||
|
||||
|
||||
def x(a, b):
|
||||
def y():
|
||||
nonlocal b
|
||||
nonlocal a
|
||||
|
||||
|
||||
def x(a):
|
||||
def y():
|
||||
def z():
|
||||
nonlocal a
|
||||
|
||||
|
||||
def x():
|
||||
def y(a):
|
||||
def z():
|
||||
nonlocal a
|
||||
|
||||
|
||||
a = *args, *args
|
||||
error[(*args, *args)] = 3
|
||||
*args, *args
|
||||
|
||||
@@ -137,7 +137,7 @@ def test_cache_last_used_update(diff_cache, use_file_io):
|
||||
parse('somecode', cache=True, path=p)
|
||||
node_cache_item = next(iter(parser_cache.values()))[p]
|
||||
now = time.time()
|
||||
assert node_cache_item.last_used < now
|
||||
assert node_cache_item.last_used <= now
|
||||
|
||||
if use_file_io:
|
||||
f = _FixedTimeFileIO(p, 'code', node_cache_item.last_used - 10)
|
||||
@@ -146,7 +146,7 @@ def test_cache_last_used_update(diff_cache, use_file_io):
|
||||
parse('somecode2', cache=True, path=p, diff_cache=diff_cache)
|
||||
|
||||
node_cache_item = next(iter(parser_cache.values()))[p]
|
||||
assert now < node_cache_item.last_used < time.time()
|
||||
assert now <= node_cache_item.last_used <= time.time()
|
||||
|
||||
|
||||
@skip_pypy
|
||||
@@ -185,6 +185,9 @@ def test_permission_error(monkeypatch):
|
||||
was_called = False
|
||||
|
||||
monkeypatch.setattr(cache, '_save_to_file_system', save)
|
||||
with pytest.warns(Warning):
|
||||
parse(path=__file__, cache=True, diff_cache=True)
|
||||
assert was_called
|
||||
try:
|
||||
with pytest.warns(Warning):
|
||||
parse(path=__file__, cache=True, diff_cache=True)
|
||||
assert was_called
|
||||
finally:
|
||||
parser_cache.clear()
|
||||
|
||||
182
test/test_dump_tree.py
Normal file
182
test/test_dump_tree.py
Normal file
@@ -0,0 +1,182 @@
|
||||
from textwrap import dedent
|
||||
|
||||
import pytest
|
||||
|
||||
from parso import parse
|
||||
# Using star import for easier eval testing below.
|
||||
from parso.python.tree import * # noqa: F403
|
||||
from parso.tree import * # noqa: F403
|
||||
from parso.tree import ErrorLeaf, TypedLeaf
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'indent,expected_dump', [
|
||||
(None, "Module(["
|
||||
"Lambda(["
|
||||
"Keyword('lambda', (1, 0)), "
|
||||
"Param(["
|
||||
"Name('x', (1, 7), prefix=' '), "
|
||||
"Operator(',', (1, 8)), "
|
||||
"]), "
|
||||
"Param(["
|
||||
"Name('y', (1, 10), prefix=' '), "
|
||||
"]), "
|
||||
"Operator(':', (1, 11)), "
|
||||
"PythonNode('arith_expr', ["
|
||||
"Name('x', (1, 13), prefix=' '), "
|
||||
"Operator('+', (1, 15), prefix=' '), "
|
||||
"Name('y', (1, 17), prefix=' '), "
|
||||
"]), "
|
||||
"]), "
|
||||
"EndMarker('', (1, 18)), "
|
||||
"])"),
|
||||
(0, dedent('''\
|
||||
Module([
|
||||
Lambda([
|
||||
Keyword('lambda', (1, 0)),
|
||||
Param([
|
||||
Name('x', (1, 7), prefix=' '),
|
||||
Operator(',', (1, 8)),
|
||||
]),
|
||||
Param([
|
||||
Name('y', (1, 10), prefix=' '),
|
||||
]),
|
||||
Operator(':', (1, 11)),
|
||||
PythonNode('arith_expr', [
|
||||
Name('x', (1, 13), prefix=' '),
|
||||
Operator('+', (1, 15), prefix=' '),
|
||||
Name('y', (1, 17), prefix=' '),
|
||||
]),
|
||||
]),
|
||||
EndMarker('', (1, 18)),
|
||||
])''')),
|
||||
(4, dedent('''\
|
||||
Module([
|
||||
Lambda([
|
||||
Keyword('lambda', (1, 0)),
|
||||
Param([
|
||||
Name('x', (1, 7), prefix=' '),
|
||||
Operator(',', (1, 8)),
|
||||
]),
|
||||
Param([
|
||||
Name('y', (1, 10), prefix=' '),
|
||||
]),
|
||||
Operator(':', (1, 11)),
|
||||
PythonNode('arith_expr', [
|
||||
Name('x', (1, 13), prefix=' '),
|
||||
Operator('+', (1, 15), prefix=' '),
|
||||
Name('y', (1, 17), prefix=' '),
|
||||
]),
|
||||
]),
|
||||
EndMarker('', (1, 18)),
|
||||
])''')),
|
||||
('\t', dedent('''\
|
||||
Module([
|
||||
\tLambda([
|
||||
\t\tKeyword('lambda', (1, 0)),
|
||||
\t\tParam([
|
||||
\t\t\tName('x', (1, 7), prefix=' '),
|
||||
\t\t\tOperator(',', (1, 8)),
|
||||
\t\t]),
|
||||
\t\tParam([
|
||||
\t\t\tName('y', (1, 10), prefix=' '),
|
||||
\t\t]),
|
||||
\t\tOperator(':', (1, 11)),
|
||||
\t\tPythonNode('arith_expr', [
|
||||
\t\t\tName('x', (1, 13), prefix=' '),
|
||||
\t\t\tOperator('+', (1, 15), prefix=' '),
|
||||
\t\t\tName('y', (1, 17), prefix=' '),
|
||||
\t\t]),
|
||||
\t]),
|
||||
\tEndMarker('', (1, 18)),
|
||||
])''')),
|
||||
]
|
||||
)
|
||||
def test_dump_parser_tree(indent, expected_dump):
|
||||
code = "lambda x, y: x + y"
|
||||
module = parse(code)
|
||||
assert module.dump(indent=indent) == expected_dump
|
||||
|
||||
# Check that dumped tree can be eval'd to recover the parser tree and original code.
|
||||
recovered_code = eval(expected_dump).get_code()
|
||||
assert recovered_code == code
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'node,expected_dump,expected_code', [
|
||||
( # Dump intermediate node (not top level module)
|
||||
parse("def foo(x, y): return x + y").children[0], dedent('''\
|
||||
Function([
|
||||
Keyword('def', (1, 0)),
|
||||
Name('foo', (1, 4), prefix=' '),
|
||||
PythonNode('parameters', [
|
||||
Operator('(', (1, 7)),
|
||||
Param([
|
||||
Name('x', (1, 8)),
|
||||
Operator(',', (1, 9)),
|
||||
]),
|
||||
Param([
|
||||
Name('y', (1, 11), prefix=' '),
|
||||
]),
|
||||
Operator(')', (1, 12)),
|
||||
]),
|
||||
Operator(':', (1, 13)),
|
||||
ReturnStmt([
|
||||
Keyword('return', (1, 15), prefix=' '),
|
||||
PythonNode('arith_expr', [
|
||||
Name('x', (1, 22), prefix=' '),
|
||||
Operator('+', (1, 24), prefix=' '),
|
||||
Name('y', (1, 26), prefix=' '),
|
||||
]),
|
||||
]),
|
||||
])'''),
|
||||
"def foo(x, y): return x + y",
|
||||
),
|
||||
( # Dump leaf
|
||||
parse("def foo(x, y): return x + y").children[0].children[0],
|
||||
"Keyword('def', (1, 0))",
|
||||
'def',
|
||||
),
|
||||
( # Dump ErrorLeaf
|
||||
ErrorLeaf('error_type', 'error_code', (1, 1), prefix=' '),
|
||||
"ErrorLeaf('error_type', 'error_code', (1, 1), prefix=' ')",
|
||||
' error_code',
|
||||
),
|
||||
( # Dump TypedLeaf
|
||||
TypedLeaf('type', 'value', (1, 1)),
|
||||
"TypedLeaf('type', 'value', (1, 1))",
|
||||
'value',
|
||||
),
|
||||
]
|
||||
)
|
||||
def test_dump_parser_tree_not_top_level_module(node, expected_dump, expected_code):
|
||||
dump_result = node.dump()
|
||||
assert dump_result == expected_dump
|
||||
|
||||
# Check that dumped tree can be eval'd to recover the parser tree and original code.
|
||||
recovered_code = eval(dump_result).get_code()
|
||||
assert recovered_code == expected_code
|
||||
|
||||
|
||||
def test_dump_parser_tree_invalid_args():
|
||||
module = parse("lambda x, y: x + y")
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
module.dump(indent=1.1)
|
||||
|
||||
|
||||
def test_eval_dump_recovers_parent():
|
||||
module = parse("lambda x, y: x + y")
|
||||
module2 = eval(module.dump())
|
||||
assert module2.parent is None
|
||||
lambda_node = module2.children[0]
|
||||
assert lambda_node.parent is module2
|
||||
assert module2.children[1].parent is module2
|
||||
assert lambda_node.children[0].parent is lambda_node
|
||||
param_node = lambda_node.children[1]
|
||||
assert param_node.parent is lambda_node
|
||||
assert param_node.children[0].parent is param_node
|
||||
assert param_node.children[1].parent is param_node
|
||||
arith_expr_node = lambda_node.children[-1]
|
||||
assert arith_expr_node.parent is lambda_node
|
||||
assert arith_expr_node.children[0].parent is arith_expr_node
|
||||
@@ -4,15 +4,20 @@ from parso import utils
|
||||
|
||||
|
||||
def test_load_inexisting_grammar():
|
||||
# This version shouldn't be out for a while, but if we ever do, wow!
|
||||
with pytest.raises(NotImplementedError):
|
||||
load_grammar(version='15.8')
|
||||
# The same is true for very old grammars (even though this is probably not
|
||||
# going to be an issue.
|
||||
# We support future grammars assuming future compatibility,
|
||||
# but we don't know how to parse old grammars.
|
||||
with pytest.raises(NotImplementedError):
|
||||
load_grammar(version='1.5')
|
||||
|
||||
|
||||
def test_load_grammar_uses_older_syntax():
|
||||
load_grammar(version='4.0')
|
||||
|
||||
|
||||
def test_load_grammar_doesnt_warn(each_version):
|
||||
load_grammar(version=each_version)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('string', 'result'), [
|
||||
('2', (2, 7)), ('3', (3, 6)), ('1.1', (1, 1)), ('1.1.1', (1, 1)), ('300.1.31', (300, 1))
|
||||
])
|
||||
|
||||
@@ -6,6 +6,7 @@ tests of pydocstyle.
|
||||
import difflib
|
||||
import re
|
||||
from functools import total_ordering
|
||||
from typing import Iterator, Tuple
|
||||
|
||||
import parso
|
||||
from parso.utils import python_bytes_to_unicode
|
||||
@@ -13,7 +14,7 @@ from parso.utils import python_bytes_to_unicode
|
||||
|
||||
@total_ordering
|
||||
class WantedIssue:
|
||||
def __init__(self, code, line, column):
|
||||
def __init__(self, code: str, line: int, column: int) -> None:
|
||||
self.code = code
|
||||
self._line = line
|
||||
self._column = column
|
||||
@@ -21,18 +22,18 @@ class WantedIssue:
|
||||
def __eq__(self, other):
|
||||
return self.code == other.code and self.start_pos == other.start_pos
|
||||
|
||||
def __lt__(self, other):
|
||||
def __lt__(self, other: 'WantedIssue') -> bool:
|
||||
return self.start_pos < other.start_pos or self.code < other.code
|
||||
|
||||
def __hash__(self):
|
||||
def __hash__(self) -> int:
|
||||
return hash(str(self.code) + str(self._line) + str(self._column))
|
||||
|
||||
@property
|
||||
def start_pos(self):
|
||||
def start_pos(self) -> Tuple[int, int]:
|
||||
return self._line, self._column
|
||||
|
||||
|
||||
def collect_errors(code):
|
||||
def collect_errors(code: str) -> Iterator[WantedIssue]:
|
||||
for line_nr, line in enumerate(code.splitlines(), 1):
|
||||
match = re.match(r'(\s*)#: (.*)$', line)
|
||||
if match is not None:
|
||||
|
||||
@@ -6,6 +6,7 @@ import pytest
|
||||
|
||||
from parso import parse
|
||||
from parso.python import tree
|
||||
from parso.tree import search_ancestor
|
||||
|
||||
|
||||
class TestsFunctionAndLambdaParsing:
|
||||
@@ -239,3 +240,27 @@ def test_with_stmt_get_test_node_from_name():
|
||||
for name in with_stmt.get_defined_names(include_setitem=True)
|
||||
]
|
||||
assert tests == ["A", "B", "C", "D"]
|
||||
|
||||
|
||||
sample_module = parse('x + y')
|
||||
sample_node = sample_module.children[0]
|
||||
sample_leaf = sample_node.children[0]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'node,node_types,expected_ancestor', [
|
||||
(sample_module, ('file_input',), None),
|
||||
(sample_node, ('arith_expr',), None),
|
||||
(sample_node, ('file_input', 'eval_input'), sample_module),
|
||||
(sample_leaf, ('name',), None),
|
||||
(sample_leaf, ('arith_expr',), sample_node),
|
||||
(sample_leaf, ('file_input',), sample_module),
|
||||
(sample_leaf, ('file_input', 'arith_expr'), sample_node),
|
||||
(sample_leaf, ('shift_expr',), None),
|
||||
(sample_leaf, ('name', 'shift_expr',), None),
|
||||
(sample_leaf, (), None),
|
||||
]
|
||||
)
|
||||
def test_search_ancestor(node, node_types, expected_ancestor):
|
||||
assert node.search_ancestor(*node_types) is expected_ancestor
|
||||
assert search_ancestor(node, *node_types) is expected_ancestor # deprecated
|
||||
|
||||
@@ -15,6 +15,8 @@ def test_eof_newline():
|
||||
assert issue.code == 292
|
||||
|
||||
assert not issues('asdf = 1\n')
|
||||
assert not issues('asdf = 1\r\n')
|
||||
assert not issues('asdf = 1\r')
|
||||
assert_issue('asdf = 1')
|
||||
assert_issue('asdf = 1\n# foo')
|
||||
assert_issue('# foobar')
|
||||
|
||||
@@ -339,7 +339,7 @@ def test_left_recursion():
|
||||
@pytest.mark.parametrize(
|
||||
'grammar, error_match', [
|
||||
['foo: bar | baz\nbar: NAME\nbaz: NAME\n',
|
||||
r"foo is ambiguous.*given a PythonTokenTypes\.NAME.*bar or baz"],
|
||||
r"foo is ambiguous.*given a (PythonTokenTypes\.)?NAME.*bar or baz"],
|
||||
['''foo: bar | baz\nbar: 'x'\nbaz: "x"\n''',
|
||||
r"foo is ambiguous.*given a ReservedString\(x\).*bar or baz"],
|
||||
['''foo: bar | 'x'\nbar: 'x'\n''',
|
||||
|
||||
@@ -19,6 +19,7 @@ unicode_bom = BOM_UTF8.decode('utf-8')
|
||||
(' \f ', ['\f', ' ']),
|
||||
(' \f ', ['\f', ' ']),
|
||||
(' \r\n', ['\r\n', '']),
|
||||
(' \r', ['\r', '']),
|
||||
('\\\n', ['\\\n', '']),
|
||||
('\\\r\n', ['\\\r\n', '']),
|
||||
('\t\t\n\t', ['\n', '\t']),
|
||||
@@ -34,7 +35,7 @@ def test_simple_prefix_splitting(string, tokens):
|
||||
assert pt.value == expected
|
||||
|
||||
# Calculate the estimated end_pos
|
||||
if expected.endswith('\n'):
|
||||
if expected.endswith('\n') or expected.endswith('\r'):
|
||||
end_pos = start_pos[0] + 1, 0
|
||||
else:
|
||||
end_pos = start_pos[0], start_pos[1] + len(expected) + len(pt.spacing)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""
|
||||
Testing if parso finds syntax errors and indentation errors.
|
||||
"""
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
@@ -57,10 +58,10 @@ def test_non_async_in_async():
|
||||
error, = errors
|
||||
actual = error.message
|
||||
assert actual in wanted
|
||||
if sys.version_info[:2] < (3, 8):
|
||||
if sys.version_info[:2] not in ((3, 8), (3, 9)):
|
||||
assert line_nr == error.start_pos[0]
|
||||
else:
|
||||
assert line_nr == 0 # For whatever reason this is zero in Python 3.8+
|
||||
assert line_nr == 0 # For whatever reason this is zero in Python 3.8/3.9
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -117,36 +118,103 @@ def _get_actual_exception(code):
|
||||
assert False, "The piece of code should raise an exception."
|
||||
|
||||
# SyntaxError
|
||||
if wanted == 'SyntaxError: assignment to keyword':
|
||||
# Some errors have changed error message in later versions of Python,
|
||||
# and we give a translation table here. We deal with special cases
|
||||
# below.
|
||||
translations = {
|
||||
'SyntaxError: f-string: unterminated string':
|
||||
'SyntaxError: EOL while scanning string literal',
|
||||
"SyntaxError: f-string: expecting '}'":
|
||||
'SyntaxError: EOL while scanning string literal',
|
||||
'SyntaxError: f-string: empty expression not allowed':
|
||||
'SyntaxError: invalid syntax',
|
||||
"SyntaxError: f-string expression part cannot include '#'":
|
||||
'SyntaxError: invalid syntax',
|
||||
"SyntaxError: f-string: single '}' is not allowed":
|
||||
'SyntaxError: invalid syntax',
|
||||
'SyntaxError: cannot use starred expression here':
|
||||
"SyntaxError: can't use starred expression here",
|
||||
'SyntaxError: f-string: cannot use starred expression here':
|
||||
"SyntaxError: f-string: can't use starred expression here",
|
||||
'SyntaxError: unterminated string literal':
|
||||
'SyntaxError: EOL while scanning string literal',
|
||||
'SyntaxError: parameter without a default follows parameter with a default':
|
||||
'SyntaxError: non-default argument follows default argument',
|
||||
"SyntaxError: 'yield from' outside function":
|
||||
"SyntaxError: 'yield' outside function",
|
||||
"SyntaxError: f-string: valid expression required before '}'":
|
||||
'SyntaxError: invalid syntax',
|
||||
"SyntaxError: '{' was never closed":
|
||||
'SyntaxError: invalid syntax',
|
||||
"SyntaxError: f-string: invalid conversion character 'b': expected 's', 'r', or 'a'":
|
||||
"SyntaxError: f-string: invalid conversion character: expected 's', 'r', or 'a'",
|
||||
"SyntaxError: (value error) Invalid format specifier ' 5' for object of type 'int'":
|
||||
'SyntaxError: f-string: expressions nested too deeply',
|
||||
"SyntaxError: f-string: expecting a valid expression after '{'":
|
||||
'SyntaxError: f-string: invalid syntax',
|
||||
"SyntaxError: f-string: expecting '=', or '!', or ':', or '}'":
|
||||
'SyntaxError: f-string: invalid syntax',
|
||||
"SyntaxError: f-string: expecting '=', or '!', or ':', or '}'":
|
||||
'SyntaxError: f-string: invalid syntax',
|
||||
}
|
||||
|
||||
if wanted in translations:
|
||||
wanted = translations[wanted]
|
||||
elif wanted == 'SyntaxError: assignment to keyword':
|
||||
return [wanted, "SyntaxError: can't assign to keyword",
|
||||
'SyntaxError: cannot assign to __debug__'], line_nr
|
||||
elif wanted == 'SyntaxError: f-string: unterminated string':
|
||||
wanted = 'SyntaxError: EOL while scanning string literal'
|
||||
elif wanted == 'SyntaxError: f-string expression part cannot include a backslash':
|
||||
return [
|
||||
wanted,
|
||||
"SyntaxError: EOL while scanning string literal",
|
||||
"SyntaxError: unexpected character after line continuation character",
|
||||
], line_nr
|
||||
elif wanted == "SyntaxError: f-string: expecting '}'":
|
||||
wanted = 'SyntaxError: EOL while scanning string literal'
|
||||
elif wanted == 'SyntaxError: f-string: empty expression not allowed':
|
||||
wanted = 'SyntaxError: invalid syntax'
|
||||
elif wanted == "SyntaxError: f-string expression part cannot include '#'":
|
||||
wanted = 'SyntaxError: invalid syntax'
|
||||
elif wanted == "SyntaxError: f-string: single '}' is not allowed":
|
||||
wanted = 'SyntaxError: invalid syntax'
|
||||
return [wanted], line_nr
|
||||
elif "Maybe you meant '==' instead of '='?" in wanted:
|
||||
wanted = wanted.removesuffix(" here. Maybe you meant '==' instead of '='?")
|
||||
elif re.match(
|
||||
r"SyntaxError: unterminated string literal \(detected at line \d+\)", wanted
|
||||
):
|
||||
wanted = "SyntaxError: EOL while scanning string literal"
|
||||
elif re.match(
|
||||
r"SyntaxError: unterminated triple-quoted string literal \(detected at line \d+\)",
|
||||
wanted,
|
||||
):
|
||||
wanted = 'SyntaxError: EOF while scanning triple-quoted string literal'
|
||||
elif re.match(
|
||||
r"IndentationError: expected an indented block after '[^']*' statement on line \d",
|
||||
wanted,
|
||||
):
|
||||
wanted = 'IndentationError: expected an indented block'
|
||||
# The following two errors are produced for both some f-strings and
|
||||
# some non-f-strings in Python 3.13:
|
||||
elif wanted == "SyntaxError: can't use starred expression here":
|
||||
wanted = [
|
||||
"SyntaxError: can't use starred expression here",
|
||||
"SyntaxError: f-string: can't use starred expression here"
|
||||
]
|
||||
elif wanted == 'SyntaxError: cannot mix bytes and nonbytes literals':
|
||||
wanted = [
|
||||
'SyntaxError: cannot mix bytes and nonbytes literals',
|
||||
'SyntaxError: f-string: cannot mix bytes and nonbytes literals'
|
||||
]
|
||||
|
||||
if isinstance(wanted, list):
|
||||
return wanted, line_nr
|
||||
else:
|
||||
return [wanted], line_nr
|
||||
|
||||
|
||||
def test_default_except_error_postition():
|
||||
# For this error the position seemed to be one line off, but that doesn't
|
||||
# really matter.
|
||||
# For this error the position seemed to be one line off in Python < 3.10,
|
||||
# but that doesn't really matter.
|
||||
code = 'try: pass\nexcept: pass\nexcept X: pass'
|
||||
wanted, line_nr = _get_actual_exception(code)
|
||||
error, = _get_error_list(code)
|
||||
assert error.message in wanted
|
||||
assert line_nr != error.start_pos[0]
|
||||
if sys.version_info[:2] >= (3, 10):
|
||||
assert line_nr == error.start_pos[0]
|
||||
else:
|
||||
assert line_nr != error.start_pos[0]
|
||||
# I think this is the better position.
|
||||
assert error.start_pos[0] == 2
|
||||
|
||||
@@ -415,11 +483,28 @@ def test_unparenthesized_genexp(source, no_errors):
|
||||
('*x = 2', False),
|
||||
('(*y) = 1', False),
|
||||
('((*z)) = 1', False),
|
||||
('*a,', True),
|
||||
('*a, = 1', True),
|
||||
('(*a,)', True),
|
||||
('(*a,) = 1', True),
|
||||
('[*a]', True),
|
||||
('[*a] = 1', True),
|
||||
('a, *b', True),
|
||||
('a, *b = 1', True),
|
||||
('a, *b, c', True),
|
||||
('a, *b, c = 1', True),
|
||||
('a, (*b), c = 1', True),
|
||||
('a, ((*b)), c = 1', True),
|
||||
('a, (*b, c), d', True),
|
||||
('a, (*b, c), d = 1', True),
|
||||
('*a.b,', True),
|
||||
('*a.b, = 1', True),
|
||||
('*a[b],', True),
|
||||
('*a[b], = 1', True),
|
||||
('*a[b::], c', True),
|
||||
('*a[b::], c = 1', True),
|
||||
('(a, *[b, c])', True),
|
||||
('(a, *[b, c]) = 1', True),
|
||||
('[a, *(b, [*c])]', True),
|
||||
('[a, *(b, [*c])] = 1', True),
|
||||
('[*(1,2,3)]', True),
|
||||
('{*(1,2,3)}', True),
|
||||
('[*(1,2,3),]', True),
|
||||
@@ -432,3 +517,59 @@ def test_unparenthesized_genexp(source, no_errors):
|
||||
)
|
||||
def test_starred_expr(source, no_errors):
|
||||
assert bool(_get_error_list(source, version="3")) ^ no_errors
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'code', [
|
||||
'a, (*b), c',
|
||||
'a, (*b), c = 1',
|
||||
'a, ((*b)), c',
|
||||
'a, ((*b)), c = 1',
|
||||
]
|
||||
)
|
||||
def test_parenthesized_single_starred_expr(code):
|
||||
assert not _get_error_list(code, version='3.8')
|
||||
assert _get_error_list(code, version='3.9')
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'code', [
|
||||
'() = ()',
|
||||
'() = []',
|
||||
'[] = ()',
|
||||
'[] = []',
|
||||
]
|
||||
)
|
||||
def test_valid_empty_assignment(code):
|
||||
assert not _get_error_list(code)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'code', [
|
||||
'del ()',
|
||||
'del []',
|
||||
'del x',
|
||||
'del x,',
|
||||
'del x, y',
|
||||
'del (x, y)',
|
||||
'del [x, y]',
|
||||
'del (x, [y, z])',
|
||||
'del x.y, x[y]',
|
||||
'del f(x)[y::]',
|
||||
'del x[[*y]]',
|
||||
'del x[[*y]::]',
|
||||
]
|
||||
)
|
||||
def test_valid_del(code):
|
||||
assert not _get_error_list(code)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('source', 'version', 'no_errors'), [
|
||||
('[x for x in range(10) if lambda: 1]', '3.8', True),
|
||||
('[x for x in range(10) if lambda: 1]', '3.9', False),
|
||||
('[x for x in range(10) if (lambda: 1)]', '3.9', True),
|
||||
]
|
||||
)
|
||||
def test_lambda_in_comp_if(source, version, no_errors):
|
||||
assert bool(_get_error_list(source, version=version)) ^ no_errors
|
||||
|
||||
@@ -74,6 +74,10 @@ def test_utf8_bom():
|
||||
('code', 'errors'), [
|
||||
(b'# coding: wtf-12\nfoo', 'strict'),
|
||||
(b'# coding: wtf-12\nfoo', 'replace'),
|
||||
(b'# coding: wtf-12\r\nfoo', 'strict'),
|
||||
(b'# coding: wtf-12\r\nfoo', 'replace'),
|
||||
(b'# coding: wtf-12\rfoo', 'strict'),
|
||||
(b'# coding: wtf-12\rfoo', 'replace'),
|
||||
]
|
||||
)
|
||||
def test_bytes_to_unicode_failing_encoding(code, errors):
|
||||
|
||||
Reference in New Issue
Block a user