mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-08 13:45:01 +08:00
Compare commits
59 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3b263f0a0d | ||
|
|
f52103f236 | ||
|
|
c53321a440 | ||
|
|
d8a70abf19 | ||
|
|
c19d7c4e6d | ||
|
|
d42c0f1b3b | ||
|
|
40e78ff7e0 | ||
|
|
c88a2675b0 | ||
|
|
88874a5a9f | ||
|
|
1e4076f9d9 | ||
|
|
73796f309d | ||
|
|
1cacdf366e | ||
|
|
d352bede13 | ||
|
|
572be783f3 | ||
|
|
31171d7ae6 | ||
|
|
7e0586b0b9 | ||
|
|
cc347b1d3b | ||
|
|
841a5d96b3 | ||
|
|
d68b4e0cab | ||
|
|
d55b4f08dc | ||
|
|
58790c119e | ||
|
|
3923ecf12f | ||
|
|
bd33e4ef7e | ||
|
|
891bfdaa04 | ||
|
|
5e1828b3f0 | ||
|
|
6daf91880b | ||
|
|
44cf64a5f7 | ||
|
|
fe24f0dc1b | ||
|
|
450e9d0a19 | ||
|
|
93b5e6dffc | ||
|
|
4403b5cac5 | ||
|
|
6f29c551fd | ||
|
|
d6b1d19d87 | ||
|
|
e0dc415bbc | ||
|
|
4c2c0ad077 | ||
|
|
5daa8b1db6 | ||
|
|
c05e14c24e | ||
|
|
846513584e | ||
|
|
6b0e01c220 | ||
|
|
92396a9a16 | ||
|
|
fe54800cdd | ||
|
|
6ecd975516 | ||
|
|
27a7c16803 | ||
|
|
a06521d912 | ||
|
|
216a77dce5 | ||
|
|
8bb211fafb | ||
|
|
342e308f57 | ||
|
|
8f46481aaf | ||
|
|
00621977b7 | ||
|
|
077e34be84 | ||
|
|
a3f851d8f6 | ||
|
|
261132e74c | ||
|
|
345374d040 | ||
|
|
f8709852e3 | ||
|
|
2dcc0d3770 | ||
|
|
34b8b7dd79 | ||
|
|
caadf3bf4c | ||
|
|
1b4c75608a | ||
|
|
15403fd998 |
@@ -7,9 +7,12 @@ python:
|
|||||||
- 3.6
|
- 3.6
|
||||||
- 3.7
|
- 3.7
|
||||||
- 3.8.2
|
- 3.8.2
|
||||||
|
- nightly
|
||||||
- pypy2.7-6.0
|
- pypy2.7-6.0
|
||||||
- pypy3.5-6.0
|
- pypy3.5-6.0
|
||||||
matrix:
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- python: nightly
|
||||||
include:
|
include:
|
||||||
- python: 3.5
|
- python: 3.5
|
||||||
env: TOXENV=py35-coverage
|
env: TOXENV=py35-coverage
|
||||||
|
|||||||
@@ -50,6 +50,8 @@ Daniel Fiterman (@dfit99) <fitermandaniel2@gmail.com>
|
|||||||
Simon Ruggier (@sruggier)
|
Simon Ruggier (@sruggier)
|
||||||
Élie Gouzien (@ElieGouzien)
|
Élie Gouzien (@ElieGouzien)
|
||||||
Tim Gates (@timgates42) <tim.gates@iress.com>
|
Tim Gates (@timgates42) <tim.gates@iress.com>
|
||||||
|
Batuhan Taskaya (@isidentical) <isidentical@gmail.com>
|
||||||
|
Jocelyn Boullier (@Kazy) <jocelyn@boullier.bzh>
|
||||||
|
|
||||||
|
|
||||||
Note: (@user) means a github user name.
|
Note: (@user) means a github user name.
|
||||||
|
|||||||
@@ -3,6 +3,14 @@
|
|||||||
Changelog
|
Changelog
|
||||||
---------
|
---------
|
||||||
|
|
||||||
|
0.7.1 (2020-07-24)
|
||||||
|
++++++++++++++++++
|
||||||
|
|
||||||
|
- Fixed a couple of smaller bugs (mostly syntax error detection in
|
||||||
|
``Grammar.iter_errors``)
|
||||||
|
|
||||||
|
This is going to be the last release that supports Python 2.7, 3.4 and 3.5.
|
||||||
|
|
||||||
0.7.0 (2020-04-13)
|
0.7.0 (2020-04-13)
|
||||||
++++++++++++++++++
|
++++++++++++++++++
|
||||||
|
|
||||||
|
|||||||
@@ -11,6 +11,10 @@ parso - A Python Parser
|
|||||||
:target: https://coveralls.io/github/davidhalter/parso?branch=master
|
:target: https://coveralls.io/github/davidhalter/parso?branch=master
|
||||||
:alt: Coverage Status
|
:alt: Coverage Status
|
||||||
|
|
||||||
|
.. image:: https://pepy.tech/badge/parso
|
||||||
|
:target: https://pepy.tech/project/parso
|
||||||
|
:alt: PyPI Downloads
|
||||||
|
|
||||||
.. image:: https://raw.githubusercontent.com/davidhalter/parso/master/docs/_static/logo_characters.png
|
.. image:: https://raw.githubusercontent.com/davidhalter/parso/master/docs/_static/logo_characters.png
|
||||||
|
|
||||||
Parso is a Python parser that supports error recovery and round-trip parsing
|
Parso is a Python parser that supports error recovery and round-trip parsing
|
||||||
|
|||||||
@@ -158,8 +158,17 @@ def works_ge_py35(each_version):
|
|||||||
version_info = parse_version_string(each_version)
|
version_info = parse_version_string(each_version)
|
||||||
return Checker(each_version, version_info >= (3, 5))
|
return Checker(each_version, version_info >= (3, 5))
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def works_ge_py36(each_version):
|
||||||
|
version_info = parse_version_string(each_version)
|
||||||
|
return Checker(each_version, version_info >= (3, 6))
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def works_ge_py38(each_version):
|
def works_ge_py38(each_version):
|
||||||
version_info = parse_version_string(each_version)
|
version_info = parse_version_string(each_version)
|
||||||
return Checker(each_version, version_info >= (3, 8))
|
return Checker(each_version, version_info >= (3, 8))
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def works_ge_py39(each_version):
|
||||||
|
version_info = parse_version_string(each_version)
|
||||||
|
return Checker(each_version, version_info >= (3, 9))
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ git checkout $BRANCH
|
|||||||
tox
|
tox
|
||||||
|
|
||||||
# Create tag
|
# Create tag
|
||||||
tag=v$(python -c "import $PROJECT_NAME; print($PROJECT_NAME.__version__)")
|
tag=v$(python3 -c "import $PROJECT_NAME; print($PROJECT_NAME.__version__)")
|
||||||
|
|
||||||
master_ref=$(git show-ref -s heads/$BRANCH)
|
master_ref=$(git show-ref -s heads/$BRANCH)
|
||||||
tag_ref=$(git show-ref -s $tag || true)
|
tag_ref=$(git show-ref -s $tag || true)
|
||||||
@@ -43,7 +43,7 @@ fi
|
|||||||
# Package and upload to PyPI
|
# Package and upload to PyPI
|
||||||
#rm -rf dist/ - Not needed anymore, because the folder is never reused.
|
#rm -rf dist/ - Not needed anymore, because the folder is never reused.
|
||||||
echo `pwd`
|
echo `pwd`
|
||||||
python setup.py sdist bdist_wheel
|
python3 setup.py sdist bdist_wheel
|
||||||
# Maybe do a pip install twine before.
|
# Maybe do a pip install twine before.
|
||||||
twine upload dist/*
|
twine upload dist/*
|
||||||
|
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ from parso.grammar import Grammar, load_grammar
|
|||||||
from parso.utils import split_lines, python_bytes_to_unicode
|
from parso.utils import split_lines, python_bytes_to_unicode
|
||||||
|
|
||||||
|
|
||||||
__version__ = '0.7.0'
|
__version__ = '0.7.1'
|
||||||
|
|
||||||
|
|
||||||
def parse(code=None, **kwargs):
|
def parse(code=None, **kwargs):
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
To ensure compatibility from Python ``2.7`` - ``3.3``, a module has been
|
To ensure compatibility from Python ``2.7`` - ``3.3``, a module has been
|
||||||
created. Clearly there is huge need to use conforming syntax.
|
created. Clearly there is huge need to use conforming syntax.
|
||||||
"""
|
"""
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
import platform
|
import platform
|
||||||
|
|
||||||
@@ -44,11 +45,17 @@ def u(string):
|
|||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Python 2.7
|
# Python 3.3+
|
||||||
FileNotFoundError = FileNotFoundError
|
FileNotFoundError = FileNotFoundError
|
||||||
except NameError:
|
except NameError:
|
||||||
|
# Python 2.7 (both IOError + OSError)
|
||||||
|
FileNotFoundError = EnvironmentError
|
||||||
|
try:
|
||||||
# Python 3.3+
|
# Python 3.3+
|
||||||
FileNotFoundError = IOError
|
PermissionError = PermissionError
|
||||||
|
except NameError:
|
||||||
|
# Python 2.7 (both IOError + OSError)
|
||||||
|
PermissionError = EnvironmentError
|
||||||
|
|
||||||
|
|
||||||
def utf8_repr(func):
|
def utf8_repr(func):
|
||||||
@@ -67,3 +74,28 @@ def utf8_repr(func):
|
|||||||
return func
|
return func
|
||||||
else:
|
else:
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info < (3, 5):
|
||||||
|
"""
|
||||||
|
A super-minimal shim around listdir that behave like
|
||||||
|
scandir for the information we need.
|
||||||
|
"""
|
||||||
|
class _DirEntry:
|
||||||
|
|
||||||
|
def __init__(self, name, basepath):
|
||||||
|
self.name = name
|
||||||
|
self.basepath = basepath
|
||||||
|
|
||||||
|
@property
|
||||||
|
def path(self):
|
||||||
|
return os.path.join(self.basepath, self.name)
|
||||||
|
|
||||||
|
def stat(self):
|
||||||
|
# won't follow symlinks
|
||||||
|
return os.lstat(os.path.join(self.basepath, self.name))
|
||||||
|
|
||||||
|
def scandir(dir):
|
||||||
|
return [_DirEntry(name, dir) for name in os.listdir(dir)]
|
||||||
|
else:
|
||||||
|
from os import scandir
|
||||||
|
|||||||
@@ -7,13 +7,15 @@ import shutil
|
|||||||
import platform
|
import platform
|
||||||
import errno
|
import errno
|
||||||
import logging
|
import logging
|
||||||
|
import warnings
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import cPickle as pickle
|
import cPickle as pickle
|
||||||
except:
|
except:
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
from parso._compatibility import FileNotFoundError
|
from parso._compatibility import FileNotFoundError, PermissionError, scandir
|
||||||
|
from parso.file_io import FileIO
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -21,6 +23,13 @@ _CACHED_FILE_MINIMUM_SURVIVAL = 60 * 10 # 10 minutes
|
|||||||
"""
|
"""
|
||||||
Cached files should survive at least a few minutes.
|
Cached files should survive at least a few minutes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
_CACHED_FILE_MAXIMUM_SURVIVAL = 60 * 60 * 24 * 30
|
||||||
|
"""
|
||||||
|
Maximum time for a cached file to survive if it is not
|
||||||
|
accessed within.
|
||||||
|
"""
|
||||||
|
|
||||||
_CACHED_SIZE_TRIGGER = 600
|
_CACHED_SIZE_TRIGGER = 600
|
||||||
"""
|
"""
|
||||||
This setting limits the amount of cached files. It's basically a way to start
|
This setting limits the amount of cached files. It's basically a way to start
|
||||||
@@ -63,7 +72,8 @@ http://docs.python.org/3/library/sys.html#sys.implementation
|
|||||||
|
|
||||||
def _get_default_cache_path():
|
def _get_default_cache_path():
|
||||||
if platform.system().lower() == 'windows':
|
if platform.system().lower() == 'windows':
|
||||||
dir_ = os.path.join(os.getenv('LOCALAPPDATA') or '~', 'Parso', 'Parso')
|
dir_ = os.path.join(os.getenv('LOCALAPPDATA')
|
||||||
|
or os.path.expanduser('~'), 'Parso', 'Parso')
|
||||||
elif platform.system().lower() == 'darwin':
|
elif platform.system().lower() == 'darwin':
|
||||||
dir_ = os.path.join('~', 'Library', 'Caches', 'Parso')
|
dir_ = os.path.join('~', 'Library', 'Caches', 'Parso')
|
||||||
else:
|
else:
|
||||||
@@ -81,6 +91,19 @@ On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
|
|||||||
``$XDG_CACHE_HOME/parso`` is used instead of the default one.
|
``$XDG_CACHE_HOME/parso`` is used instead of the default one.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
_CACHE_CLEAR_THRESHOLD = 60 * 60 * 24
|
||||||
|
|
||||||
|
def _get_cache_clear_lock(cache_path = None):
|
||||||
|
"""
|
||||||
|
The path where the cache lock is stored.
|
||||||
|
|
||||||
|
Cache lock will prevent continous cache clearing and only allow garbage
|
||||||
|
collection once a day (can be configured in _CACHE_CLEAR_THRESHOLD).
|
||||||
|
"""
|
||||||
|
cache_path = cache_path or _get_default_cache_path()
|
||||||
|
return FileIO(os.path.join(cache_path, "PARSO-CACHE-LOCK"))
|
||||||
|
|
||||||
|
|
||||||
parser_cache = {}
|
parser_cache = {}
|
||||||
|
|
||||||
|
|
||||||
@@ -160,7 +183,7 @@ def _set_cache_item(hashed_grammar, path, module_cache_item):
|
|||||||
parser_cache.setdefault(hashed_grammar, {})[path] = module_cache_item
|
parser_cache.setdefault(hashed_grammar, {})[path] = module_cache_item
|
||||||
|
|
||||||
|
|
||||||
def save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_path=None):
|
def try_to_save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_path=None):
|
||||||
path = file_io.path
|
path = file_io.path
|
||||||
try:
|
try:
|
||||||
p_time = None if path is None else file_io.get_last_modified()
|
p_time = None if path is None else file_io.get_last_modified()
|
||||||
@@ -171,7 +194,18 @@ def save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_pat
|
|||||||
item = _NodeCacheItem(module, lines, p_time)
|
item = _NodeCacheItem(module, lines, p_time)
|
||||||
_set_cache_item(hashed_grammar, path, item)
|
_set_cache_item(hashed_grammar, path, item)
|
||||||
if pickling and path is not None:
|
if pickling and path is not None:
|
||||||
|
try:
|
||||||
_save_to_file_system(hashed_grammar, path, item, cache_path=cache_path)
|
_save_to_file_system(hashed_grammar, path, item, cache_path=cache_path)
|
||||||
|
except PermissionError:
|
||||||
|
# It's not really a big issue if the cache cannot be saved to the
|
||||||
|
# file system. It's still in RAM in that case. However we should
|
||||||
|
# still warn the user that this is happening.
|
||||||
|
warnings.warn(
|
||||||
|
'Tried to save a file to %s, but got permission denied.',
|
||||||
|
Warning
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
_remove_cache_and_update_lock(cache_path=cache_path)
|
||||||
|
|
||||||
|
|
||||||
def _save_to_file_system(hashed_grammar, path, item, cache_path=None):
|
def _save_to_file_system(hashed_grammar, path, item, cache_path=None):
|
||||||
@@ -186,6 +220,46 @@ def clear_cache(cache_path=None):
|
|||||||
parser_cache.clear()
|
parser_cache.clear()
|
||||||
|
|
||||||
|
|
||||||
|
def clear_inactive_cache(
|
||||||
|
cache_path=None,
|
||||||
|
inactivity_threshold=_CACHED_FILE_MAXIMUM_SURVIVAL,
|
||||||
|
):
|
||||||
|
if cache_path is None:
|
||||||
|
cache_path = _get_default_cache_path()
|
||||||
|
if not os.path.exists(cache_path):
|
||||||
|
return False
|
||||||
|
for version_path in os.listdir(cache_path):
|
||||||
|
version_path = os.path.join(cache_path, version_path)
|
||||||
|
if not os.path.isdir(version_path):
|
||||||
|
continue
|
||||||
|
for file in scandir(version_path):
|
||||||
|
if (
|
||||||
|
file.stat().st_atime + _CACHED_FILE_MAXIMUM_SURVIVAL
|
||||||
|
<= time.time()
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
os.remove(file.path)
|
||||||
|
except OSError: # silently ignore all failures
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _remove_cache_and_update_lock(cache_path = None):
|
||||||
|
lock = _get_cache_clear_lock(cache_path=cache_path)
|
||||||
|
clear_lock_time = lock.get_last_modified()
|
||||||
|
if (
|
||||||
|
clear_lock_time is None # first time
|
||||||
|
or clear_lock_time + _CACHE_CLEAR_THRESHOLD <= time.time()
|
||||||
|
):
|
||||||
|
if not lock._touch():
|
||||||
|
# First make sure that as few as possible other cleanup jobs also
|
||||||
|
# get started. There is still a race condition but it's probably
|
||||||
|
# not a big problem.
|
||||||
|
return False
|
||||||
|
|
||||||
|
clear_inactive_cache(cache_path = cache_path)
|
||||||
|
|
||||||
def _get_hashed_path(hashed_grammar, path, cache_path=None):
|
def _get_hashed_path(hashed_grammar, path, cache_path=None):
|
||||||
directory = _get_cache_directory_path(cache_path=cache_path)
|
directory = _get_cache_directory_path(cache_path=cache_path)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import os
|
import os
|
||||||
|
from parso._compatibility import FileNotFoundError
|
||||||
|
|
||||||
|
|
||||||
class FileIO(object):
|
class FileIO(object):
|
||||||
@@ -22,6 +23,17 @@ class FileIO(object):
|
|||||||
# Might raise FileNotFoundError, OSError for Python 2
|
# Might raise FileNotFoundError, OSError for Python 2
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def _touch(self):
|
||||||
|
try:
|
||||||
|
os.utime(self.path, None)
|
||||||
|
except FileNotFoundError:
|
||||||
|
try:
|
||||||
|
file = open(self.path, 'a')
|
||||||
|
file.close()
|
||||||
|
except (OSError, IOError): # TODO Maybe log this?
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '%s(%s)' % (self.__class__.__name__, self.path)
|
return '%s(%s)' % (self.__class__.__name__, self.path)
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from parso.utils import split_lines, python_bytes_to_unicode, parse_version_stri
|
|||||||
from parso.python.diff import DiffParser
|
from parso.python.diff import DiffParser
|
||||||
from parso.python.tokenize import tokenize_lines, tokenize
|
from parso.python.tokenize import tokenize_lines, tokenize
|
||||||
from parso.python.token import PythonTokenTypes
|
from parso.python.token import PythonTokenTypes
|
||||||
from parso.cache import parser_cache, load_module, save_module
|
from parso.cache import parser_cache, load_module, try_to_save_module
|
||||||
from parso.parser import BaseParser
|
from parso.parser import BaseParser
|
||||||
from parso.python.parser import Parser as PythonParser
|
from parso.python.parser import Parser as PythonParser
|
||||||
from parso.python.errors import ErrorFinderConfig
|
from parso.python.errors import ErrorFinderConfig
|
||||||
@@ -132,7 +132,7 @@ class Grammar(object):
|
|||||||
old_lines=old_lines,
|
old_lines=old_lines,
|
||||||
new_lines=lines
|
new_lines=lines
|
||||||
)
|
)
|
||||||
save_module(self._hashed, file_io, new_node, lines,
|
try_to_save_module(self._hashed, file_io, new_node, lines,
|
||||||
# Never pickle in pypy, it's slow as hell.
|
# Never pickle in pypy, it's slow as hell.
|
||||||
pickling=cache and not is_pypy,
|
pickling=cache and not is_pypy,
|
||||||
cache_path=cache_path)
|
cache_path=cache_path)
|
||||||
@@ -148,7 +148,7 @@ class Grammar(object):
|
|||||||
root_node = p.parse(tokens=tokens)
|
root_node = p.parse(tokens=tokens)
|
||||||
|
|
||||||
if cache or diff_cache:
|
if cache or diff_cache:
|
||||||
save_module(self._hashed, file_io, root_node, lines,
|
try_to_save_module(self._hashed, file_io, root_node, lines,
|
||||||
# Never pickle in pypy, it's slow as hell.
|
# Never pickle in pypy, it's slow as hell.
|
||||||
pickling=cache and not is_pypy,
|
pickling=cache and not is_pypy,
|
||||||
cache_path=cache_path)
|
cache_path=cache_path)
|
||||||
@@ -252,7 +252,7 @@ def load_grammar(**kwargs):
|
|||||||
grammar = PythonGrammar(version_info, bnf_text)
|
grammar = PythonGrammar(version_info, bnf_text)
|
||||||
return _loaded_grammars.setdefault(path, grammar)
|
return _loaded_grammars.setdefault(path, grammar)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
message = "Python version %s is currently not supported." % version
|
message = "Python version %s.%s is currently not supported." % (version_info.major, version_info.minor)
|
||||||
raise NotImplementedError(message)
|
raise NotImplementedError(message)
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError("No support for language %s." % language)
|
raise NotImplementedError("No support for language %s." % language)
|
||||||
|
|||||||
@@ -163,7 +163,7 @@ class Rule(object):
|
|||||||
def get_node(self, node):
|
def get_node(self, node):
|
||||||
return node
|
return node
|
||||||
|
|
||||||
def _get_message(self, message):
|
def _get_message(self, message, node):
|
||||||
if message is None:
|
if message is None:
|
||||||
message = self.message
|
message = self.message
|
||||||
if message is None:
|
if message is None:
|
||||||
@@ -176,7 +176,7 @@ class Rule(object):
|
|||||||
if code is None:
|
if code is None:
|
||||||
raise ValueError("The error code on the class is not set.")
|
raise ValueError("The error code on the class is not set.")
|
||||||
|
|
||||||
message = self._get_message(message)
|
message = self._get_message(message, node)
|
||||||
|
|
||||||
self._normalizer.add_issue(node, code, message)
|
self._normalizer.add_issue(node, code, message)
|
||||||
|
|
||||||
|
|||||||
@@ -212,7 +212,8 @@ def _dump_nfa(start, finish):
|
|||||||
todo = [start]
|
todo = [start]
|
||||||
for i, state in enumerate(todo):
|
for i, state in enumerate(todo):
|
||||||
print(" State", i, state is finish and "(final)" or "")
|
print(" State", i, state is finish and "(final)" or "")
|
||||||
for label, next_ in state.arcs:
|
for arc in state.arcs:
|
||||||
|
label, next_ = arc.nonterminal_or_string, arc.next
|
||||||
if next_ in todo:
|
if next_ in todo:
|
||||||
j = todo.index(next_)
|
j = todo.index(next_)
|
||||||
else:
|
else:
|
||||||
@@ -244,7 +245,7 @@ def generate_grammar(bnf_grammar, token_namespace):
|
|||||||
rule_to_dfas = {}
|
rule_to_dfas = {}
|
||||||
start_nonterminal = None
|
start_nonterminal = None
|
||||||
for nfa_a, nfa_z in GrammarParser(bnf_grammar).parse():
|
for nfa_a, nfa_z in GrammarParser(bnf_grammar).parse():
|
||||||
#_dump_nfa(a, z)
|
#_dump_nfa(nfa_a, nfa_z)
|
||||||
dfas = _make_dfas(nfa_a, nfa_z)
|
dfas = _make_dfas(nfa_a, nfa_z)
|
||||||
#_dump_dfas(dfas)
|
#_dump_dfas(dfas)
|
||||||
# oldlen = len(dfas)
|
# oldlen = len(dfas)
|
||||||
|
|||||||
0
parso/py.typed
Normal file
0
parso/py.typed
Normal file
@@ -6,6 +6,7 @@ from contextlib import contextmanager
|
|||||||
|
|
||||||
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
|
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
|
||||||
from parso.python.tree import search_ancestor
|
from parso.python.tree import search_ancestor
|
||||||
|
from parso.python.tokenize import _get_token_collection
|
||||||
|
|
||||||
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
|
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
|
||||||
_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
|
_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
|
||||||
@@ -13,11 +14,84 @@ _STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
|
|||||||
_MAX_BLOCK_SIZE = 20
|
_MAX_BLOCK_SIZE = 20
|
||||||
_MAX_INDENT_COUNT = 100
|
_MAX_INDENT_COUNT = 100
|
||||||
ALLOWED_FUTURES = (
|
ALLOWED_FUTURES = (
|
||||||
'all_feature_names', 'nested_scopes', 'generators', 'division',
|
'nested_scopes', 'generators', 'division', 'absolute_import',
|
||||||
'absolute_import', 'with_statement', 'print_function', 'unicode_literals',
|
'with_statement', 'print_function', 'unicode_literals',
|
||||||
)
|
)
|
||||||
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
|
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
|
||||||
|
|
||||||
|
def _get_rhs_name(node, version):
|
||||||
|
type_ = node.type
|
||||||
|
if type_ == "lambdef":
|
||||||
|
return "lambda"
|
||||||
|
elif type_ == "atom":
|
||||||
|
comprehension = _get_comprehension_type(node)
|
||||||
|
first, second = node.children[:2]
|
||||||
|
if comprehension is not None:
|
||||||
|
return comprehension
|
||||||
|
elif second.type == "dictorsetmaker":
|
||||||
|
if version < (3, 8):
|
||||||
|
return "literal"
|
||||||
|
else:
|
||||||
|
if second.children[1] == ":" or second.children[0] == "**":
|
||||||
|
return "dict display"
|
||||||
|
else:
|
||||||
|
return "set display"
|
||||||
|
elif (
|
||||||
|
first == "("
|
||||||
|
and (second == ")"
|
||||||
|
or (len(node.children) == 3 and node.children[1].type == "testlist_comp"))
|
||||||
|
):
|
||||||
|
return "tuple"
|
||||||
|
elif first == "(":
|
||||||
|
return _get_rhs_name(_remove_parens(node), version=version)
|
||||||
|
elif first == "[":
|
||||||
|
return "list"
|
||||||
|
elif first == "{" and second == "}":
|
||||||
|
return "dict display"
|
||||||
|
elif first == "{" and len(node.children) > 2:
|
||||||
|
return "set display"
|
||||||
|
elif type_ == "keyword":
|
||||||
|
if "yield" in node.value:
|
||||||
|
return "yield expression"
|
||||||
|
if version < (3, 8):
|
||||||
|
return "keyword"
|
||||||
|
else:
|
||||||
|
return str(node.value)
|
||||||
|
elif type_ == "operator" and node.value == "...":
|
||||||
|
return "Ellipsis"
|
||||||
|
elif type_ == "comparison":
|
||||||
|
return "comparison"
|
||||||
|
elif type_ in ("string", "number", "strings"):
|
||||||
|
return "literal"
|
||||||
|
elif type_ == "yield_expr":
|
||||||
|
return "yield expression"
|
||||||
|
elif type_ == "test":
|
||||||
|
return "conditional expression"
|
||||||
|
elif type_ in ("atom_expr", "power"):
|
||||||
|
if node.children[0] == "await":
|
||||||
|
return "await expression"
|
||||||
|
elif node.children[-1].type == "trailer":
|
||||||
|
trailer = node.children[-1]
|
||||||
|
if trailer.children[0] == "(":
|
||||||
|
return "function call"
|
||||||
|
elif trailer.children[0] == "[":
|
||||||
|
return "subscript"
|
||||||
|
elif trailer.children[0] == ".":
|
||||||
|
return "attribute"
|
||||||
|
elif (
|
||||||
|
("expr" in type_
|
||||||
|
and "star_expr" not in type_) # is a substring
|
||||||
|
or "_test" in type_
|
||||||
|
or type_ in ("term", "factor")
|
||||||
|
):
|
||||||
|
return "operator"
|
||||||
|
elif type_ == "star_expr":
|
||||||
|
return "starred"
|
||||||
|
elif type_ == "testlist_star_expr":
|
||||||
|
return "tuple"
|
||||||
|
elif type_ == "fstring":
|
||||||
|
return "f-string expression"
|
||||||
|
return type_ # shouldn't reach here
|
||||||
|
|
||||||
def _iter_stmts(scope):
|
def _iter_stmts(scope):
|
||||||
"""
|
"""
|
||||||
@@ -136,6 +210,21 @@ def _get_for_stmt_definition_exprs(for_stmt):
|
|||||||
return list(_iter_definition_exprs_from_lists(exprlist))
|
return list(_iter_definition_exprs_from_lists(exprlist))
|
||||||
|
|
||||||
|
|
||||||
|
def _is_argument_comprehension(argument):
|
||||||
|
return argument.children[1].type in _COMP_FOR_TYPES
|
||||||
|
|
||||||
|
|
||||||
|
def _any_fstring_error(version, node):
|
||||||
|
if version < (3, 9) or node is None:
|
||||||
|
return False
|
||||||
|
if node.type == "error_node":
|
||||||
|
return any(child.type == "fstring_start" for child in node.children)
|
||||||
|
elif node.type == "fstring":
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return search_ancestor(node, "fstring")
|
||||||
|
|
||||||
|
|
||||||
class _Context(object):
|
class _Context(object):
|
||||||
def __init__(self, node, add_syntax_error, parent_context=None):
|
def __init__(self, node, add_syntax_error, parent_context=None):
|
||||||
self.node = node
|
self.node = node
|
||||||
@@ -333,6 +422,11 @@ class ErrorFinder(Normalizer):
|
|||||||
match = re.match('\\w{,2}("{1,3}|\'{1,3})', leaf.value)
|
match = re.match('\\w{,2}("{1,3}|\'{1,3})', leaf.value)
|
||||||
if match is None:
|
if match is None:
|
||||||
message = 'invalid syntax'
|
message = 'invalid syntax'
|
||||||
|
if (
|
||||||
|
self.version >= (3, 9)
|
||||||
|
and leaf.value in _get_token_collection(self.version).always_break_tokens
|
||||||
|
):
|
||||||
|
message = "f-string: " + message
|
||||||
else:
|
else:
|
||||||
if len(match.group(1)) == 1:
|
if len(match.group(1)) == 1:
|
||||||
message = 'EOL while scanning string literal'
|
message = 'EOL while scanning string literal'
|
||||||
@@ -371,8 +465,8 @@ class ErrorFinder(Normalizer):
|
|||||||
class IndentationRule(Rule):
|
class IndentationRule(Rule):
|
||||||
code = 903
|
code = 903
|
||||||
|
|
||||||
def _get_message(self, message):
|
def _get_message(self, message, node):
|
||||||
message = super(IndentationRule, self)._get_message(message)
|
message = super(IndentationRule, self)._get_message(message, node)
|
||||||
return "IndentationError: " + message
|
return "IndentationError: " + message
|
||||||
|
|
||||||
|
|
||||||
@@ -396,21 +490,34 @@ class ErrorFinderConfig(NormalizerConfig):
|
|||||||
class SyntaxRule(Rule):
|
class SyntaxRule(Rule):
|
||||||
code = 901
|
code = 901
|
||||||
|
|
||||||
def _get_message(self, message):
|
def _get_message(self, message, node):
|
||||||
message = super(SyntaxRule, self)._get_message(message)
|
message = super(SyntaxRule, self)._get_message(message, node)
|
||||||
|
if (
|
||||||
|
"f-string" not in message
|
||||||
|
and _any_fstring_error(self._normalizer.version, node)
|
||||||
|
):
|
||||||
|
message = "f-string: " + message
|
||||||
return "SyntaxError: " + message
|
return "SyntaxError: " + message
|
||||||
|
|
||||||
|
|
||||||
@ErrorFinder.register_rule(type='error_node')
|
@ErrorFinder.register_rule(type='error_node')
|
||||||
class _InvalidSyntaxRule(SyntaxRule):
|
class _InvalidSyntaxRule(SyntaxRule):
|
||||||
message = "invalid syntax"
|
message = "invalid syntax"
|
||||||
|
fstring_message = "f-string: invalid syntax"
|
||||||
|
|
||||||
def get_node(self, node):
|
def get_node(self, node):
|
||||||
return node.get_next_leaf()
|
return node.get_next_leaf()
|
||||||
|
|
||||||
def is_issue(self, node):
|
def is_issue(self, node):
|
||||||
|
error = node.get_next_leaf().type != 'error_leaf'
|
||||||
|
if (
|
||||||
|
error
|
||||||
|
and _any_fstring_error(self._normalizer.version, node)
|
||||||
|
):
|
||||||
|
self.add_issue(node, message=self.fstring_message)
|
||||||
|
else:
|
||||||
# Error leafs will be added later as an error.
|
# Error leafs will be added later as an error.
|
||||||
return node.get_next_leaf().type != 'error_leaf'
|
return error
|
||||||
|
|
||||||
|
|
||||||
@ErrorFinder.register_rule(value='await')
|
@ErrorFinder.register_rule(value='await')
|
||||||
@@ -449,7 +556,11 @@ class _ContinueChecks(SyntaxRule):
|
|||||||
in_loop = True
|
in_loop = True
|
||||||
if block.type == 'try_stmt':
|
if block.type == 'try_stmt':
|
||||||
last_block = block.children[-3]
|
last_block = block.children[-3]
|
||||||
if last_block == 'finally' and leaf.start_pos > last_block.start_pos:
|
if (
|
||||||
|
last_block == "finally"
|
||||||
|
and leaf.start_pos > last_block.start_pos
|
||||||
|
and self._normalizer.version < (3, 8)
|
||||||
|
):
|
||||||
self.add_issue(leaf, message=self.message_in_finally)
|
self.add_issue(leaf, message=self.message_in_finally)
|
||||||
return False # Error already added
|
return False # Error already added
|
||||||
if not in_loop:
|
if not in_loop:
|
||||||
@@ -622,26 +733,24 @@ class _FutureImportRule(SyntaxRule):
|
|||||||
allowed_futures = list(ALLOWED_FUTURES)
|
allowed_futures = list(ALLOWED_FUTURES)
|
||||||
if self._normalizer.version >= (3, 5):
|
if self._normalizer.version >= (3, 5):
|
||||||
allowed_futures.append('generator_stop')
|
allowed_futures.append('generator_stop')
|
||||||
|
if self._normalizer.version >= (3, 7):
|
||||||
|
allowed_futures.append('annotations')
|
||||||
if name == 'braces':
|
if name == 'braces':
|
||||||
self.add_issue(node, message="not a chance")
|
self.add_issue(node, message="not a chance")
|
||||||
elif name == 'barry_as_FLUFL':
|
elif name == 'barry_as_FLUFL':
|
||||||
m = "Seriously I'm not implementing this :) ~ Dave"
|
m = "Seriously I'm not implementing this :) ~ Dave"
|
||||||
self.add_issue(node, message=m)
|
self.add_issue(node, message=m)
|
||||||
elif name not in ALLOWED_FUTURES:
|
elif name not in allowed_futures:
|
||||||
message = "future feature %s is not defined" % name
|
message = "future feature %s is not defined" % name
|
||||||
self.add_issue(node, message=message)
|
self.add_issue(node, message=message)
|
||||||
|
|
||||||
|
|
||||||
@ErrorFinder.register_rule(type='star_expr')
|
@ErrorFinder.register_rule(type='star_expr')
|
||||||
class _StarExprRule(SyntaxRule):
|
class _StarExprRule(SyntaxRule):
|
||||||
message = "starred assignment target must be in a list or tuple"
|
|
||||||
message_iterable_unpacking = "iterable unpacking cannot be used in comprehension"
|
message_iterable_unpacking = "iterable unpacking cannot be used in comprehension"
|
||||||
message_assignment = "can use starred expression only as assignment target"
|
message_assignment = "can use starred expression only as assignment target"
|
||||||
|
|
||||||
def is_issue(self, node):
|
def is_issue(self, node):
|
||||||
if node.parent.type not in _STAR_EXPR_PARENTS:
|
|
||||||
return True
|
|
||||||
if node.parent.type == 'testlist_comp':
|
if node.parent.type == 'testlist_comp':
|
||||||
# [*[] for a in [1]]
|
# [*[] for a in [1]]
|
||||||
if node.parent.children[1].type in _COMP_FOR_TYPES:
|
if node.parent.children[1].type in _COMP_FOR_TYPES:
|
||||||
@@ -665,6 +774,9 @@ class _StarExprRule(SyntaxRule):
|
|||||||
class _StarExprParentRule(SyntaxRule):
|
class _StarExprParentRule(SyntaxRule):
|
||||||
def is_issue(self, node):
|
def is_issue(self, node):
|
||||||
if node.parent.type == 'del_stmt':
|
if node.parent.type == 'del_stmt':
|
||||||
|
if self._normalizer.version >= (3, 9):
|
||||||
|
self.add_issue(node.parent, message="cannot delete starred")
|
||||||
|
else:
|
||||||
self.add_issue(node.parent, message="can't use starred expression here")
|
self.add_issue(node.parent, message="can't use starred expression here")
|
||||||
else:
|
else:
|
||||||
def is_definition(node, ancestor):
|
def is_definition(node, ancestor):
|
||||||
@@ -684,7 +796,10 @@ class _StarExprParentRule(SyntaxRule):
|
|||||||
args = [c for c in node.children if c != ',']
|
args = [c for c in node.children if c != ',']
|
||||||
starred = [c for c in args if c.type == 'star_expr']
|
starred = [c for c in args if c.type == 'star_expr']
|
||||||
if len(starred) > 1:
|
if len(starred) > 1:
|
||||||
|
if self._normalizer.version < (3, 9):
|
||||||
message = "two starred expressions in assignment"
|
message = "two starred expressions in assignment"
|
||||||
|
else:
|
||||||
|
message = "multiple starred expressions in assignment"
|
||||||
self.add_issue(starred[1], message=message)
|
self.add_issue(starred[1], message=message)
|
||||||
elif starred:
|
elif starred:
|
||||||
count = args.index(starred[0])
|
count = args.index(starred[0])
|
||||||
@@ -734,6 +849,9 @@ class _AnnotatorRule(SyntaxRule):
|
|||||||
class _ArgumentRule(SyntaxRule):
|
class _ArgumentRule(SyntaxRule):
|
||||||
def is_issue(self, node):
|
def is_issue(self, node):
|
||||||
first = node.children[0]
|
first = node.children[0]
|
||||||
|
if self._normalizer.version < (3, 8):
|
||||||
|
# a((b)=c) is valid in <3.8
|
||||||
|
first = _remove_parens(first)
|
||||||
if node.children[1] == '=' and first.type != 'name':
|
if node.children[1] == '=' and first.type != 'name':
|
||||||
if first.type == 'lambdef':
|
if first.type == 'lambdef':
|
||||||
# f(lambda: 1=1)
|
# f(lambda: 1=1)
|
||||||
@@ -749,6 +867,9 @@ class _ArgumentRule(SyntaxRule):
|
|||||||
message = 'expression cannot contain assignment, perhaps you meant "=="?'
|
message = 'expression cannot contain assignment, perhaps you meant "=="?'
|
||||||
self.add_issue(first, message=message)
|
self.add_issue(first, message=message)
|
||||||
|
|
||||||
|
if _is_argument_comprehension(node) and node.parent.type == 'classdef':
|
||||||
|
self.add_issue(node, message='invalid syntax')
|
||||||
|
|
||||||
|
|
||||||
@ErrorFinder.register_rule(type='nonlocal_stmt')
|
@ErrorFinder.register_rule(type='nonlocal_stmt')
|
||||||
class _NonlocalModuleLevelRule(SyntaxRule):
|
class _NonlocalModuleLevelRule(SyntaxRule):
|
||||||
@@ -768,12 +889,6 @@ class _ArglistRule(SyntaxRule):
|
|||||||
return "Generator expression must be parenthesized"
|
return "Generator expression must be parenthesized"
|
||||||
|
|
||||||
def is_issue(self, node):
|
def is_issue(self, node):
|
||||||
first_arg = node.children[0]
|
|
||||||
if first_arg.type == 'argument' \
|
|
||||||
and first_arg.children[1].type in _COMP_FOR_TYPES:
|
|
||||||
# e.g. foo(x for x in [], b)
|
|
||||||
return len(node.children) >= 2
|
|
||||||
else:
|
|
||||||
arg_set = set()
|
arg_set = set()
|
||||||
kw_only = False
|
kw_only = False
|
||||||
kw_unpacking_only = False
|
kw_unpacking_only = False
|
||||||
@@ -795,6 +910,10 @@ class _ArglistRule(SyntaxRule):
|
|||||||
|
|
||||||
if argument.type == 'argument':
|
if argument.type == 'argument':
|
||||||
first = argument.children[0]
|
first = argument.children[0]
|
||||||
|
if _is_argument_comprehension(argument) and len(node.children) >= 2:
|
||||||
|
# a(a, b for b in c)
|
||||||
|
return True
|
||||||
|
|
||||||
if first in ('*', '**'):
|
if first in ('*', '**'):
|
||||||
if first == '*':
|
if first == '*':
|
||||||
if kw_unpacking_only:
|
if kw_unpacking_only:
|
||||||
@@ -809,7 +928,10 @@ class _ArglistRule(SyntaxRule):
|
|||||||
if first.type == 'name':
|
if first.type == 'name':
|
||||||
if first.value in arg_set:
|
if first.value in arg_set:
|
||||||
# f(x=1, x=2)
|
# f(x=1, x=2)
|
||||||
self.add_issue(first, message="keyword argument repeated")
|
message = "keyword argument repeated"
|
||||||
|
if self._normalizer.version >= (3, 9):
|
||||||
|
message += ": {}".format(first.value)
|
||||||
|
self.add_issue(first, message=message)
|
||||||
else:
|
else:
|
||||||
arg_set.add(first.value)
|
arg_set.add(first.value)
|
||||||
else:
|
else:
|
||||||
@@ -898,7 +1020,7 @@ class _FStringRule(SyntaxRule):
|
|||||||
|
|
||||||
|
|
||||||
class _CheckAssignmentRule(SyntaxRule):
|
class _CheckAssignmentRule(SyntaxRule):
|
||||||
def _check_assignment(self, node, is_deletion=False, is_namedexpr=False):
|
def _check_assignment(self, node, is_deletion=False, is_namedexpr=False, is_aug_assign=False):
|
||||||
error = None
|
error = None
|
||||||
type_ = node.type
|
type_ = node.type
|
||||||
if type_ == 'lambdef':
|
if type_ == 'lambdef':
|
||||||
@@ -915,6 +1037,16 @@ class _CheckAssignmentRule(SyntaxRule):
|
|||||||
error = 'dict display'
|
error = 'dict display'
|
||||||
else:
|
else:
|
||||||
error = 'set display'
|
error = 'set display'
|
||||||
|
elif first == "{" and second == "}":
|
||||||
|
if self._normalizer.version < (3, 8):
|
||||||
|
error = 'literal'
|
||||||
|
else:
|
||||||
|
error = "dict display"
|
||||||
|
elif first == "{" and len(node.children) > 2:
|
||||||
|
if self._normalizer.version < (3, 8):
|
||||||
|
error = 'literal'
|
||||||
|
else:
|
||||||
|
error = "set display"
|
||||||
elif first in ('(', '['):
|
elif first in ('(', '['):
|
||||||
if second.type == 'yield_expr':
|
if second.type == 'yield_expr':
|
||||||
error = 'yield expression'
|
error = 'yield expression'
|
||||||
@@ -930,11 +1062,13 @@ class _CheckAssignmentRule(SyntaxRule):
|
|||||||
# This is not a comprehension, they were handled
|
# This is not a comprehension, they were handled
|
||||||
# further above.
|
# further above.
|
||||||
for child in second.children[::2]:
|
for child in second.children[::2]:
|
||||||
self._check_assignment(child, is_deletion, is_namedexpr)
|
self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign)
|
||||||
else: # Everything handled, must be useless brackets.
|
else: # Everything handled, must be useless brackets.
|
||||||
self._check_assignment(second, is_deletion, is_namedexpr)
|
self._check_assignment(second, is_deletion, is_namedexpr, is_aug_assign)
|
||||||
elif type_ == 'keyword':
|
elif type_ == 'keyword':
|
||||||
if self._normalizer.version < (3, 8):
|
if node.value == "yield":
|
||||||
|
error = "yield expression"
|
||||||
|
elif self._normalizer.version < (3, 8):
|
||||||
error = 'keyword'
|
error = 'keyword'
|
||||||
else:
|
else:
|
||||||
error = str(node.value)
|
error = str(node.value)
|
||||||
@@ -966,13 +1100,28 @@ class _CheckAssignmentRule(SyntaxRule):
|
|||||||
error = 'subscript'
|
error = 'subscript'
|
||||||
elif is_namedexpr and trailer.children[0] == '.':
|
elif is_namedexpr and trailer.children[0] == '.':
|
||||||
error = 'attribute'
|
error = 'attribute'
|
||||||
|
elif type_ == "fstring":
|
||||||
|
if self._normalizer.version < (3, 8):
|
||||||
|
error = 'literal'
|
||||||
|
else:
|
||||||
|
error = "f-string expression"
|
||||||
elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'):
|
elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'):
|
||||||
for child in node.children[::2]:
|
for child in node.children[::2]:
|
||||||
self._check_assignment(child, is_deletion, is_namedexpr)
|
self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign)
|
||||||
elif ('expr' in type_ and type_ != 'star_expr' # is a substring
|
elif ('expr' in type_ and type_ != 'star_expr' # is a substring
|
||||||
or '_test' in type_
|
or '_test' in type_
|
||||||
or type_ in ('term', 'factor')):
|
or type_ in ('term', 'factor')):
|
||||||
error = 'operator'
|
error = 'operator'
|
||||||
|
elif type_ == "star_expr":
|
||||||
|
if is_deletion:
|
||||||
|
if self._normalizer.version >= (3, 9):
|
||||||
|
error = "starred"
|
||||||
|
else:
|
||||||
|
self.add_issue(node, message="can't use starred expression here")
|
||||||
|
elif not search_ancestor(node, *_STAR_EXPR_PARENTS) and not is_aug_assign:
|
||||||
|
self.add_issue(node, message="starred assignment target must be in a list or tuple")
|
||||||
|
|
||||||
|
self._check_assignment(node.children[1])
|
||||||
|
|
||||||
if error is not None:
|
if error is not None:
|
||||||
if is_namedexpr:
|
if is_namedexpr:
|
||||||
@@ -999,15 +1148,35 @@ class _CompForRule(_CheckAssignmentRule):
|
|||||||
@ErrorFinder.register_rule(type='expr_stmt')
|
@ErrorFinder.register_rule(type='expr_stmt')
|
||||||
class _ExprStmtRule(_CheckAssignmentRule):
|
class _ExprStmtRule(_CheckAssignmentRule):
|
||||||
message = "illegal expression for augmented assignment"
|
message = "illegal expression for augmented assignment"
|
||||||
|
extended_message = "'{target}' is an " + message
|
||||||
def is_issue(self, node):
|
def is_issue(self, node):
|
||||||
for before_equal in node.children[:-2:2]:
|
|
||||||
self._check_assignment(before_equal)
|
|
||||||
|
|
||||||
augassign = node.children[1]
|
augassign = node.children[1]
|
||||||
if augassign != '=' and augassign.type != 'annassign': # Is augassign.
|
is_aug_assign = augassign != '=' and augassign.type != 'annassign'
|
||||||
return node.children[0].type in ('testlist_star_expr', 'atom', 'testlist')
|
|
||||||
|
|
||||||
|
if self._normalizer.version <= (3, 8) or not is_aug_assign:
|
||||||
|
for before_equal in node.children[:-2:2]:
|
||||||
|
self._check_assignment(before_equal, is_aug_assign=is_aug_assign)
|
||||||
|
|
||||||
|
if is_aug_assign:
|
||||||
|
target = _remove_parens(node.children[0])
|
||||||
|
# a, a[b], a.b
|
||||||
|
|
||||||
|
if target.type == "name" or (
|
||||||
|
target.type in ("atom_expr", "power")
|
||||||
|
and target.children[1].type == "trailer"
|
||||||
|
and target.children[-1].children[0] != "("
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
|
||||||
|
if self._normalizer.version <= (3, 8):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.add_issue(
|
||||||
|
node,
|
||||||
|
message=self.extended_message.format(
|
||||||
|
target=_get_rhs_name(node.children[0], self._normalizer.version)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
@ErrorFinder.register_rule(type='with_item')
|
@ErrorFinder.register_rule(type='with_item')
|
||||||
class _WithItemRule(_CheckAssignmentRule):
|
class _WithItemRule(_CheckAssignmentRule):
|
||||||
|
|||||||
171
parso/python/grammar310.txt
Normal file
171
parso/python/grammar310.txt
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
# Grammar for Python
|
||||||
|
|
||||||
|
# NOTE WELL: You should also follow all the steps listed at
|
||||||
|
# https://devguide.python.org/grammar/
|
||||||
|
|
||||||
|
# Start symbols for the grammar:
|
||||||
|
# single_input is a single interactive statement;
|
||||||
|
# file_input is a module or sequence of commands read from an input file;
|
||||||
|
# eval_input is the input for the eval() functions.
|
||||||
|
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||||
|
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||||
|
file_input: stmt* ENDMARKER
|
||||||
|
eval_input: testlist NEWLINE* ENDMARKER
|
||||||
|
|
||||||
|
decorator: '@' namedexpr_test NEWLINE
|
||||||
|
decorators: decorator+
|
||||||
|
decorated: decorators (classdef | funcdef | async_funcdef)
|
||||||
|
|
||||||
|
async_funcdef: 'async' funcdef
|
||||||
|
funcdef: 'def' NAME parameters ['->' test] ':' suite
|
||||||
|
|
||||||
|
parameters: '(' [typedargslist] ')'
|
||||||
|
typedargslist: (
|
||||||
|
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
|
||||||
|
',' tfpdef ['=' test])* ([',' [
|
||||||
|
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||||
|
| '**' tfpdef [',']]])
|
||||||
|
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
|
||||||
|
| '**' tfpdef [',']]] )
|
||||||
|
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
|
||||||
|
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||||
|
| '**' tfpdef [',']]]
|
||||||
|
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||||
|
| '**' tfpdef [','])
|
||||||
|
)
|
||||||
|
tfpdef: NAME [':' test]
|
||||||
|
varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||||
|
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
|
| '**' vfpdef [',']]]
|
||||||
|
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
|
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||||
|
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
|
| '**' vfpdef [',']]]
|
||||||
|
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
|
| '**' vfpdef [',']
|
||||||
|
)
|
||||||
|
vfpdef: NAME
|
||||||
|
|
||||||
|
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||||
|
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||||
|
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||||
|
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||||
|
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
||||||
|
('=' (yield_expr|testlist_star_expr))*)
|
||||||
|
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
|
||||||
|
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
||||||
|
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
||||||
|
'<<=' | '>>=' | '**=' | '//=')
|
||||||
|
# For normal and annotated assignments, additional restrictions enforced by the interpreter
|
||||||
|
del_stmt: 'del' exprlist
|
||||||
|
pass_stmt: 'pass'
|
||||||
|
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
|
||||||
|
break_stmt: 'break'
|
||||||
|
continue_stmt: 'continue'
|
||||||
|
return_stmt: 'return' [testlist_star_expr]
|
||||||
|
yield_stmt: yield_expr
|
||||||
|
raise_stmt: 'raise' [test ['from' test]]
|
||||||
|
import_stmt: import_name | import_from
|
||||||
|
import_name: 'import' dotted_as_names
|
||||||
|
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
|
||||||
|
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
|
||||||
|
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
||||||
|
import_as_name: NAME ['as' NAME]
|
||||||
|
dotted_as_name: dotted_name ['as' NAME]
|
||||||
|
import_as_names: import_as_name (',' import_as_name)* [',']
|
||||||
|
dotted_as_names: dotted_as_name (',' dotted_as_name)*
|
||||||
|
dotted_name: NAME ('.' NAME)*
|
||||||
|
global_stmt: 'global' NAME (',' NAME)*
|
||||||
|
nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
|
||||||
|
assert_stmt: 'assert' test [',' test]
|
||||||
|
|
||||||
|
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
|
||||||
|
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
|
||||||
|
if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
|
||||||
|
while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
|
||||||
|
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
||||||
|
try_stmt: ('try' ':' suite
|
||||||
|
((except_clause ':' suite)+
|
||||||
|
['else' ':' suite]
|
||||||
|
['finally' ':' suite] |
|
||||||
|
'finally' ':' suite))
|
||||||
|
with_stmt: 'with' with_item (',' with_item)* ':' suite
|
||||||
|
with_item: test ['as' expr]
|
||||||
|
# NB compile.c makes sure that the default except clause is last
|
||||||
|
except_clause: 'except' [test ['as' NAME]]
|
||||||
|
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||||
|
|
||||||
|
namedexpr_test: test [':=' test]
|
||||||
|
test: or_test ['if' or_test 'else' test] | lambdef
|
||||||
|
test_nocond: or_test | lambdef_nocond
|
||||||
|
lambdef: 'lambda' [varargslist] ':' test
|
||||||
|
lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
|
||||||
|
or_test: and_test ('or' and_test)*
|
||||||
|
and_test: not_test ('and' not_test)*
|
||||||
|
not_test: 'not' not_test | comparison
|
||||||
|
comparison: expr (comp_op expr)*
|
||||||
|
# <> isn't actually a valid comparison operator in Python. It's here for the
|
||||||
|
# sake of a __future__ import described in PEP 401 (which really works :-)
|
||||||
|
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
||||||
|
star_expr: '*' expr
|
||||||
|
expr: xor_expr ('|' xor_expr)*
|
||||||
|
xor_expr: and_expr ('^' and_expr)*
|
||||||
|
and_expr: shift_expr ('&' shift_expr)*
|
||||||
|
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
|
||||||
|
arith_expr: term (('+'|'-') term)*
|
||||||
|
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
|
||||||
|
factor: ('+'|'-'|'~') factor | power
|
||||||
|
power: atom_expr ['**' factor]
|
||||||
|
atom_expr: ['await'] atom trailer*
|
||||||
|
atom: ('(' [yield_expr|testlist_comp] ')' |
|
||||||
|
'[' [testlist_comp] ']' |
|
||||||
|
'{' [dictorsetmaker] '}' |
|
||||||
|
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||||
|
testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
|
||||||
|
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||||
|
subscriptlist: subscript (',' subscript)* [',']
|
||||||
|
subscript: test | [test] ':' [test] [sliceop]
|
||||||
|
sliceop: ':' [test]
|
||||||
|
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||||
|
testlist: test (',' test)* [',']
|
||||||
|
dictorsetmaker: ( ((test ':' test | '**' expr)
|
||||||
|
(comp_for | (',' (test ':' test | '**' expr))* [','])) |
|
||||||
|
((test | star_expr)
|
||||||
|
(comp_for | (',' (test | star_expr))* [','])) )
|
||||||
|
|
||||||
|
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
||||||
|
|
||||||
|
arglist: argument (',' argument)* [',']
|
||||||
|
|
||||||
|
# The reason that keywords are test nodes instead of NAME is that using NAME
|
||||||
|
# results in an ambiguity. ast.c makes sure it's a NAME.
|
||||||
|
# "test '=' test" is really "keyword '=' test", but we have no such token.
|
||||||
|
# These need to be in a single rule to avoid grammar that is ambiguous
|
||||||
|
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
|
||||||
|
# we explicitly match '*' here, too, to give it proper precedence.
|
||||||
|
# Illegal combinations and orderings are blocked in ast.c:
|
||||||
|
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
||||||
|
# that precede iterable unpackings are blocked; etc.
|
||||||
|
argument: ( test [comp_for] |
|
||||||
|
test ':=' test |
|
||||||
|
test '=' test |
|
||||||
|
'**' test |
|
||||||
|
'*' test )
|
||||||
|
|
||||||
|
comp_iter: comp_for | comp_if
|
||||||
|
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||||
|
comp_for: ['async'] sync_comp_for
|
||||||
|
comp_if: 'if' test_nocond [comp_iter]
|
||||||
|
|
||||||
|
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||||
|
encoding_decl: NAME
|
||||||
|
|
||||||
|
yield_expr: 'yield' [yield_arg]
|
||||||
|
yield_arg: 'from' test | testlist_star_expr
|
||||||
|
|
||||||
|
strings: (STRING | fstring)+
|
||||||
|
fstring: FSTRING_START fstring_content* FSTRING_END
|
||||||
|
fstring_content: FSTRING_STRING | fstring_expr
|
||||||
|
fstring_conversion: '!' NAME
|
||||||
|
fstring_expr: '{' testlist ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
|
||||||
|
fstring_format_spec: ':' fstring_content*
|
||||||
@@ -52,7 +52,7 @@ small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
|||||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||||
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
||||||
('=' (yield_expr|testlist_star_expr))*)
|
('=' (yield_expr|testlist_star_expr))*)
|
||||||
annassign: ':' test ['=' test]
|
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
|
||||||
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
||||||
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
||||||
'<<=' | '>>=' | '**=' | '//=')
|
'<<=' | '>>=' | '**=' | '//=')
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
|||||||
file_input: stmt* ENDMARKER
|
file_input: stmt* ENDMARKER
|
||||||
eval_input: testlist NEWLINE* ENDMARKER
|
eval_input: testlist NEWLINE* ENDMARKER
|
||||||
|
|
||||||
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
|
decorator: '@' namedexpr_test NEWLINE
|
||||||
decorators: decorator+
|
decorators: decorator+
|
||||||
decorated: decorators (classdef | funcdef | async_funcdef)
|
decorated: decorators (classdef | funcdef | async_funcdef)
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
|||||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||||
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
||||||
('=' (yield_expr|testlist_star_expr))*)
|
('=' (yield_expr|testlist_star_expr))*)
|
||||||
annassign: ':' test ['=' test]
|
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
|
||||||
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
||||||
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
||||||
'<<=' | '>>=' | '**=' | '//=')
|
'<<=' | '>>=' | '**=' | '//=')
|
||||||
|
|||||||
@@ -260,7 +260,7 @@ def _create_token_collection(version_info):
|
|||||||
'finally', 'while', 'with', 'return', 'continue',
|
'finally', 'while', 'with', 'return', 'continue',
|
||||||
'break', 'del', 'pass', 'global', 'assert')
|
'break', 'del', 'pass', 'global', 'assert')
|
||||||
if version_info >= (3, 5):
|
if version_info >= (3, 5):
|
||||||
ALWAYS_BREAK_TOKENS += ('async', 'nonlocal')
|
ALWAYS_BREAK_TOKENS += ('nonlocal', )
|
||||||
pseudo_token_compiled = _compile(PseudoToken)
|
pseudo_token_compiled = _compile(PseudoToken)
|
||||||
return TokenCollection(
|
return TokenCollection(
|
||||||
pseudo_token_compiled, single_quoted, triple_quoted, endpats,
|
pseudo_token_compiled, single_quoted, triple_quoted, endpats,
|
||||||
|
|||||||
@@ -168,7 +168,7 @@ class NodeOrLeaf(object):
|
|||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_code(self, include_prefix=True):
|
def get_code(self, include_prefix=True):
|
||||||
"""
|
"""
|
||||||
Returns the code that was input the input for the parser for this node.
|
Returns the code that was the input for the parser for this node.
|
||||||
|
|
||||||
:param include_prefix: Removes the prefix (whitespace and comments) of
|
:param include_prefix: Removes the prefix (whitespace and comments) of
|
||||||
e.g. a statement.
|
e.g. a statement.
|
||||||
|
|||||||
@@ -105,8 +105,17 @@ def python_bytes_to_unicode(source, encoding='utf-8', errors='strict'):
|
|||||||
if not isinstance(encoding, unicode):
|
if not isinstance(encoding, unicode):
|
||||||
encoding = unicode(encoding, 'utf-8', 'replace')
|
encoding = unicode(encoding, 'utf-8', 'replace')
|
||||||
|
|
||||||
|
try:
|
||||||
# Cast to unicode
|
# Cast to unicode
|
||||||
return unicode(source, encoding, errors)
|
return unicode(source, encoding, errors)
|
||||||
|
except LookupError:
|
||||||
|
if errors == 'replace':
|
||||||
|
# This is a weird case that can happen if the given encoding is not
|
||||||
|
# a valid encoding. This usually shouldn't happen with provided
|
||||||
|
# encodings, but can happen if somebody uses encoding declarations
|
||||||
|
# like `# coding: foo-8`.
|
||||||
|
return unicode(source, 'utf-8', errors)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def version_info():
|
def version_info():
|
||||||
@@ -120,7 +129,7 @@ def version_info():
|
|||||||
|
|
||||||
|
|
||||||
def _parse_version(version):
|
def _parse_version(version):
|
||||||
match = re.match(r'(\d+)(?:\.(\d)(?:\.\d+)?)?$', version)
|
match = re.match(r'(\d+)(?:\.(\d{1,2})(?:\.\d+)?)?((a|b|rc)\d)?$', version)
|
||||||
if match is None:
|
if match is None:
|
||||||
raise ValueError('The given version is not in the right format. '
|
raise ValueError('The given version is not in the right format. '
|
||||||
'Use something like "3.8" or "3".')
|
'Use something like "3.8" or "3".')
|
||||||
|
|||||||
3
setup.py
3
setup.py
@@ -25,7 +25,7 @@ setup(name='parso',
|
|||||||
keywords='python parser parsing',
|
keywords='python parser parsing',
|
||||||
long_description=readme,
|
long_description=readme,
|
||||||
packages=find_packages(exclude=['test']),
|
packages=find_packages(exclude=['test']),
|
||||||
package_data={'parso': ['python/grammar*.txt']},
|
package_data={'parso': ['python/grammar*.txt', 'py.typed', '*.pyi', '**/*.pyi']},
|
||||||
platforms=['any'],
|
platforms=['any'],
|
||||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
|
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
|
||||||
classifiers=[
|
classifiers=[
|
||||||
@@ -44,6 +44,7 @@ setup(name='parso',
|
|||||||
'Topic :: Software Development :: Libraries :: Python Modules',
|
'Topic :: Software Development :: Libraries :: Python Modules',
|
||||||
'Topic :: Text Editors :: Integrated Development Environments (IDE)',
|
'Topic :: Text Editors :: Integrated Development Environments (IDE)',
|
||||||
'Topic :: Utilities',
|
'Topic :: Utilities',
|
||||||
|
'Typing :: Typed',
|
||||||
],
|
],
|
||||||
extras_require={
|
extras_require={
|
||||||
'testing': [
|
'testing': [
|
||||||
|
|||||||
@@ -52,9 +52,37 @@ FAILING_EXAMPLES = [
|
|||||||
'f(x=2, y)',
|
'f(x=2, y)',
|
||||||
'f(**x, *y)',
|
'f(**x, *y)',
|
||||||
'f(**x, y=3, z)',
|
'f(**x, y=3, z)',
|
||||||
|
# augassign
|
||||||
'a, b += 3',
|
'a, b += 3',
|
||||||
'(a, b) += 3',
|
'(a, b) += 3',
|
||||||
'[a, b] += 3',
|
'[a, b] += 3',
|
||||||
|
'f() += 1',
|
||||||
|
'lambda x:None+=1',
|
||||||
|
'{} += 1',
|
||||||
|
'{a:b} += 1',
|
||||||
|
'{1} += 1',
|
||||||
|
'{*x} += 1',
|
||||||
|
'(x,) += 1',
|
||||||
|
'(x, y if a else q) += 1',
|
||||||
|
'[] += 1',
|
||||||
|
'[1,2] += 1',
|
||||||
|
'[] += 1',
|
||||||
|
'None += 1',
|
||||||
|
'... += 1',
|
||||||
|
'a > 1 += 1',
|
||||||
|
'"test" += 1',
|
||||||
|
'1 += 1',
|
||||||
|
'1.0 += 1',
|
||||||
|
'(yield) += 1',
|
||||||
|
'(yield from x) += 1',
|
||||||
|
'(x if x else y) += 1',
|
||||||
|
'a() += 1',
|
||||||
|
'a + b += 1',
|
||||||
|
'+a += 1',
|
||||||
|
'a and b += 1',
|
||||||
|
'*a += 1',
|
||||||
|
'a, b += 1',
|
||||||
|
'f"xxx" += 1',
|
||||||
# All assignment tests
|
# All assignment tests
|
||||||
'lambda a: 1 = 1',
|
'lambda a: 1 = 1',
|
||||||
'[x for x in y] = 1',
|
'[x for x in y] = 1',
|
||||||
@@ -308,6 +336,12 @@ if sys.version_info[:2] <= (3, 4):
|
|||||||
'(*[1], 2)',
|
'(*[1], 2)',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
if sys.version_info[:2] >= (3, 7):
|
||||||
|
# This is somehow ok in previous versions.
|
||||||
|
FAILING_EXAMPLES += [
|
||||||
|
'class X(base for base in bases): pass',
|
||||||
|
]
|
||||||
|
|
||||||
if sys.version_info[:2] < (3, 8):
|
if sys.version_info[:2] < (3, 8):
|
||||||
FAILING_EXAMPLES += [
|
FAILING_EXAMPLES += [
|
||||||
# Python/compile.c
|
# Python/compile.c
|
||||||
|
|||||||
@@ -12,13 +12,6 @@ from .__future__ import absolute_import
|
|||||||
''r''u''
|
''r''u''
|
||||||
b'' BR''
|
b'' BR''
|
||||||
|
|
||||||
for x in [1]:
|
|
||||||
try:
|
|
||||||
continue # Only the other continue and pass is an error.
|
|
||||||
finally:
|
|
||||||
#: E901
|
|
||||||
continue
|
|
||||||
|
|
||||||
|
|
||||||
for x in [1]:
|
for x in [1]:
|
||||||
break
|
break
|
||||||
|
|||||||
@@ -2,28 +2,38 @@
|
|||||||
Test all things related to the ``jedi.cache`` module.
|
Test all things related to the ``jedi.cache`` module.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from os import unlink
|
import os
|
||||||
|
import os.path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from parso.cache import _NodeCacheItem, save_module, load_module, \
|
from parso.cache import (_CACHED_FILE_MAXIMUM_SURVIVAL, _VERSION_TAG,
|
||||||
_get_hashed_path, parser_cache, _load_from_file_system, _save_to_file_system
|
_get_cache_clear_lock, _get_hashed_path,
|
||||||
|
_load_from_file_system, _NodeCacheItem,
|
||||||
|
_remove_cache_and_update_lock, _save_to_file_system,
|
||||||
|
load_module, parser_cache, try_to_save_module)
|
||||||
|
from parso._compatibility import is_pypy, PermissionError
|
||||||
from parso import load_grammar
|
from parso import load_grammar
|
||||||
from parso import cache
|
from parso import cache
|
||||||
from parso import file_io
|
from parso import file_io
|
||||||
from parso import parse
|
from parso import parse
|
||||||
|
|
||||||
|
skip_pypy = pytest.mark.skipif(
|
||||||
|
is_pypy,
|
||||||
|
reason="pickling in pypy is slow, since we don't pickle,"
|
||||||
|
"we never go into path of auto-collecting garbage"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
def isolated_jedi_cache(monkeypatch, tmpdir):
|
def isolated_parso_cache(monkeypatch, tmpdir):
|
||||||
"""
|
"""Set `parso.cache._default_cache_path` to a temporary directory
|
||||||
Set `jedi.settings.cache_directory` to a temporary directory during test.
|
during the test. """
|
||||||
|
cache_path = str(os.path.join(str(tmpdir), "__parso_cache"))
|
||||||
Same as `clean_jedi_cache`, but create the temporary directory for
|
monkeypatch.setattr(cache, '_default_cache_path', cache_path)
|
||||||
each test case (scope='function').
|
monkeypatch.setattr(cache, '_get_default_cache_path', lambda *args, **kwargs: cache_path)
|
||||||
"""
|
return cache_path
|
||||||
monkeypatch.setattr(cache, '_default_cache_path', str(tmpdir))
|
|
||||||
|
|
||||||
|
|
||||||
def test_modulepickling_change_cache_dir(tmpdir):
|
def test_modulepickling_change_cache_dir(tmpdir):
|
||||||
@@ -57,7 +67,7 @@ def load_stored_item(hashed_grammar, path, item, cache_path):
|
|||||||
return item
|
return item
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.usefixtures("isolated_jedi_cache")
|
@pytest.mark.usefixtures("isolated_parso_cache")
|
||||||
def test_modulepickling_simulate_deleted_cache(tmpdir):
|
def test_modulepickling_simulate_deleted_cache(tmpdir):
|
||||||
"""
|
"""
|
||||||
Tests loading from a cache file after it is deleted.
|
Tests loading from a cache file after it is deleted.
|
||||||
@@ -81,10 +91,10 @@ def test_modulepickling_simulate_deleted_cache(tmpdir):
|
|||||||
pass
|
pass
|
||||||
io = file_io.FileIO(path)
|
io = file_io.FileIO(path)
|
||||||
|
|
||||||
save_module(grammar._hashed, io, module, lines=[])
|
try_to_save_module(grammar._hashed, io, module, lines=[])
|
||||||
assert load_module(grammar._hashed, io) == module
|
assert load_module(grammar._hashed, io) == module
|
||||||
|
|
||||||
unlink(_get_hashed_path(grammar._hashed, path))
|
os.unlink(_get_hashed_path(grammar._hashed, path))
|
||||||
parser_cache.clear()
|
parser_cache.clear()
|
||||||
|
|
||||||
cached2 = load_module(grammar._hashed, io)
|
cached2 = load_module(grammar._hashed, io)
|
||||||
@@ -139,3 +149,43 @@ def test_cache_last_used_update(diff_cache, use_file_io):
|
|||||||
|
|
||||||
node_cache_item = next(iter(parser_cache.values()))[p]
|
node_cache_item = next(iter(parser_cache.values()))[p]
|
||||||
assert now < node_cache_item.last_used < time.time()
|
assert now < node_cache_item.last_used < time.time()
|
||||||
|
|
||||||
|
|
||||||
|
@skip_pypy
|
||||||
|
def test_inactive_cache(tmpdir, isolated_parso_cache):
|
||||||
|
parser_cache.clear()
|
||||||
|
test_subjects = "abcdef"
|
||||||
|
for path in test_subjects:
|
||||||
|
parse('somecode', cache=True, path=os.path.join(str(tmpdir), path))
|
||||||
|
raw_cache_path = os.path.join(isolated_parso_cache, _VERSION_TAG)
|
||||||
|
assert os.path.exists(raw_cache_path)
|
||||||
|
paths = os.listdir(raw_cache_path)
|
||||||
|
a_while_ago = time.time() - _CACHED_FILE_MAXIMUM_SURVIVAL
|
||||||
|
old_paths = set()
|
||||||
|
for path in paths[:len(test_subjects) // 2]: # make certain number of paths old
|
||||||
|
os.utime(os.path.join(raw_cache_path, path), (a_while_ago, a_while_ago))
|
||||||
|
old_paths.add(path)
|
||||||
|
# nothing should be cleared while the lock is on
|
||||||
|
assert os.path.exists(_get_cache_clear_lock().path)
|
||||||
|
_remove_cache_and_update_lock() # it shouldn't clear anything
|
||||||
|
assert len(os.listdir(raw_cache_path)) == len(test_subjects)
|
||||||
|
assert old_paths.issubset(os.listdir(raw_cache_path))
|
||||||
|
|
||||||
|
os.utime(_get_cache_clear_lock().path, (a_while_ago, a_while_ago))
|
||||||
|
_remove_cache_and_update_lock()
|
||||||
|
assert len(os.listdir(raw_cache_path)) == len(test_subjects) // 2
|
||||||
|
assert not old_paths.intersection(os.listdir(raw_cache_path))
|
||||||
|
|
||||||
|
|
||||||
|
@skip_pypy
|
||||||
|
def test_permission_error(monkeypatch):
|
||||||
|
def save(*args, **kwargs):
|
||||||
|
was_called[0] = True # Python 2... Use nonlocal instead
|
||||||
|
raise PermissionError
|
||||||
|
|
||||||
|
was_called = [False]
|
||||||
|
|
||||||
|
monkeypatch.setattr(cache, '_save_to_file_system', save)
|
||||||
|
with pytest.warns(Warning):
|
||||||
|
parse(path=__file__, cache=True, diff_cache=True)
|
||||||
|
assert was_called[0]
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ def test_parse_version(string, result):
|
|||||||
assert utils._parse_version(string) == result
|
assert utils._parse_version(string) == result
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('string', ['1.', 'a', '#', '1.3.4.5', '1.12'])
|
@pytest.mark.parametrize('string', ['1.', 'a', '#', '1.3.4.5'])
|
||||||
def test_invalid_grammar_version(string):
|
def test_invalid_grammar_version(string):
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
load_grammar(version=string)
|
load_grammar(version=string)
|
||||||
|
|||||||
@@ -194,6 +194,9 @@ def test_no_error_nodes(each_version):
|
|||||||
def test_named_expression(works_ge_py38):
|
def test_named_expression(works_ge_py38):
|
||||||
works_ge_py38.parse("(a := 1, a + 1)")
|
works_ge_py38.parse("(a := 1, a + 1)")
|
||||||
|
|
||||||
|
def test_extended_rhs_annassign(works_ge_py38):
|
||||||
|
works_ge_py38.parse("x: y = z,")
|
||||||
|
works_ge_py38.parse("x: Tuple[int, ...] = z, *q, w")
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
'param_code', [
|
'param_code', [
|
||||||
@@ -208,3 +211,13 @@ def test_named_expression(works_ge_py38):
|
|||||||
)
|
)
|
||||||
def test_positional_only_arguments(works_ge_py38, param_code):
|
def test_positional_only_arguments(works_ge_py38, param_code):
|
||||||
works_ge_py38.parse("def x(%s): pass" % param_code)
|
works_ge_py38.parse("def x(%s): pass" % param_code)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'expression', [
|
||||||
|
'a + a',
|
||||||
|
'lambda x: x',
|
||||||
|
'a := lambda x: x'
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_decorator_expression(works_ge_py39, expression):
|
||||||
|
works_ge_py39.parse("@%s\ndef x(): pass" % expression)
|
||||||
|
|||||||
@@ -87,6 +87,39 @@ def test_async_for(works_ge_py35):
|
|||||||
works_ge_py35.parse("async def foo():\n async for a in b: pass")
|
works_ge_py35.parse("async def foo():\n async for a in b: pass")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("body", [
|
||||||
|
"""[1 async for a in b
|
||||||
|
]""",
|
||||||
|
"""[1 async
|
||||||
|
for a in b
|
||||||
|
]""",
|
||||||
|
"""[
|
||||||
|
1
|
||||||
|
async for a in b
|
||||||
|
]""",
|
||||||
|
"""[
|
||||||
|
1
|
||||||
|
async for a
|
||||||
|
in b
|
||||||
|
]""",
|
||||||
|
"""[
|
||||||
|
1
|
||||||
|
async
|
||||||
|
for
|
||||||
|
a
|
||||||
|
in
|
||||||
|
b
|
||||||
|
]""",
|
||||||
|
""" [
|
||||||
|
1 async for a in b
|
||||||
|
]""",
|
||||||
|
])
|
||||||
|
def test_async_for_comprehension_newline(works_ge_py36, body):
|
||||||
|
# Issue #139
|
||||||
|
works_ge_py36.parse("""async def foo():
|
||||||
|
{}""".format(body))
|
||||||
|
|
||||||
|
|
||||||
def test_async_with(works_ge_py35):
|
def test_async_with(works_ge_py35):
|
||||||
works_ge_py35.parse("async def foo():\n async with a: pass")
|
works_ge_py35.parse("async def foo():\n async with a: pass")
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,8 @@ import warnings
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
import parso
|
import parso
|
||||||
|
|
||||||
|
from textwrap import dedent
|
||||||
from parso._compatibility import is_pypy
|
from parso._compatibility import is_pypy
|
||||||
from .failing_examples import FAILING_EXAMPLES, indent, build_nested
|
from .failing_examples import FAILING_EXAMPLES, indent, build_nested
|
||||||
|
|
||||||
@@ -185,12 +187,13 @@ def test_statically_nested_blocks():
|
|||||||
|
|
||||||
|
|
||||||
def test_future_import_first():
|
def test_future_import_first():
|
||||||
def is_issue(code, *args):
|
def is_issue(code, *args, **kwargs):
|
||||||
code = code % args
|
code = code % args
|
||||||
return bool(_get_error_list(code))
|
return bool(_get_error_list(code, **kwargs))
|
||||||
|
|
||||||
i1 = 'from __future__ import division'
|
i1 = 'from __future__ import division'
|
||||||
i2 = 'from __future__ import absolute_import'
|
i2 = 'from __future__ import absolute_import'
|
||||||
|
i3 = 'from __future__ import annotations'
|
||||||
assert not is_issue(i1)
|
assert not is_issue(i1)
|
||||||
assert not is_issue(i1 + ';' + i2)
|
assert not is_issue(i1 + ';' + i2)
|
||||||
assert not is_issue(i1 + '\n' + i2)
|
assert not is_issue(i1 + '\n' + i2)
|
||||||
@@ -201,6 +204,8 @@ def test_future_import_first():
|
|||||||
assert not is_issue('""\n%s;%s', i1, i2)
|
assert not is_issue('""\n%s;%s', i1, i2)
|
||||||
assert not is_issue('"";%s;%s ', i1, i2)
|
assert not is_issue('"";%s;%s ', i1, i2)
|
||||||
assert not is_issue('"";%s\n%s ', i1, i2)
|
assert not is_issue('"";%s\n%s ', i1, i2)
|
||||||
|
assert not is_issue(i3, version="3.7")
|
||||||
|
assert is_issue(i3, version="3.6")
|
||||||
assert is_issue('1;' + i1)
|
assert is_issue('1;' + i1)
|
||||||
assert is_issue('1\n' + i1)
|
assert is_issue('1\n' + i1)
|
||||||
assert is_issue('"";1\n' + i1)
|
assert is_issue('"";1\n' + i1)
|
||||||
@@ -268,6 +273,9 @@ def test_too_many_levels_of_indentation():
|
|||||||
assert not _get_error_list(build_nested('pass', 49, base=base))
|
assert not _get_error_list(build_nested('pass', 49, base=base))
|
||||||
assert _get_error_list(build_nested('pass', 50, base=base))
|
assert _get_error_list(build_nested('pass', 50, base=base))
|
||||||
|
|
||||||
|
def test_paren_kwarg():
|
||||||
|
assert _get_error_list("print((sep)=seperator)", version="3.8")
|
||||||
|
assert not _get_error_list("print((sep)=seperator)", version="3.7")
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
'code', [
|
'code', [
|
||||||
@@ -321,3 +329,88 @@ def test_invalid_fstrings(code, message):
|
|||||||
def test_trailing_comma(code):
|
def test_trailing_comma(code):
|
||||||
errors = _get_error_list(code)
|
errors = _get_error_list(code)
|
||||||
assert not errors
|
assert not errors
|
||||||
|
|
||||||
|
def test_continue_in_finally():
|
||||||
|
code = dedent('''\
|
||||||
|
for a in [1]:
|
||||||
|
try:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
continue
|
||||||
|
''')
|
||||||
|
assert not _get_error_list(code, version="3.8")
|
||||||
|
assert _get_error_list(code, version="3.7")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'template', [
|
||||||
|
"a, b, {target}, c = d",
|
||||||
|
"a, b, *{target}, c = d",
|
||||||
|
"(a, *{target}), c = d",
|
||||||
|
"for x, {target} in y: pass",
|
||||||
|
"for x, q, {target} in y: pass",
|
||||||
|
"for x, q, *{target} in y: pass",
|
||||||
|
"for (x, *{target}), q in y: pass",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'target', [
|
||||||
|
"True",
|
||||||
|
"False",
|
||||||
|
"None",
|
||||||
|
"__debug__"
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_forbidden_name(template, target):
|
||||||
|
assert _get_error_list(template.format(target=target), version="3")
|
||||||
|
|
||||||
|
|
||||||
|
def test_repeated_kwarg():
|
||||||
|
# python 3.9+ shows which argument is repeated
|
||||||
|
assert (
|
||||||
|
_get_error_list("f(q=1, q=2)", version="3.8")[0].message
|
||||||
|
== "SyntaxError: keyword argument repeated"
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
_get_error_list("f(q=1, q=2)", version="3.9")[0].message
|
||||||
|
== "SyntaxError: keyword argument repeated: q"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
('source', 'no_errors'), [
|
||||||
|
('a(a for a in b,)', False),
|
||||||
|
('a(a for a in b, a)', False),
|
||||||
|
('a(a, a for a in b)', False),
|
||||||
|
('a(a, b, a for a in b, c, d)', False),
|
||||||
|
('a(a for a in b)', True),
|
||||||
|
('a((a for a in b), c)', True),
|
||||||
|
('a(c, (a for a in b))', True),
|
||||||
|
('a(a, b, (a for a in b), c, d)', True),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_unparenthesized_genexp(source, no_errors):
|
||||||
|
assert bool(_get_error_list(source)) ^ no_errors
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
('source', 'no_errors'), [
|
||||||
|
('*x = 2', False),
|
||||||
|
('(*y) = 1', False),
|
||||||
|
('((*z)) = 1', False),
|
||||||
|
('a, *b = 1', True),
|
||||||
|
('a, *b, c = 1', True),
|
||||||
|
('a, (*b), c = 1', True),
|
||||||
|
('a, ((*b)), c = 1', True),
|
||||||
|
('a, (*b, c), d = 1', True),
|
||||||
|
('[*(1,2,3)]', True),
|
||||||
|
('{*(1,2,3)}', True),
|
||||||
|
('[*(1,2,3),]', True),
|
||||||
|
('[*(1,2,3), *(4,5,6)]', True),
|
||||||
|
('[0, *(1,2,3)]', True),
|
||||||
|
('{*(1,2,3),}', True),
|
||||||
|
('{*(1,2,3), *(4,5,6)}', True),
|
||||||
|
('{0, *(4,5,6)}', True)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_starred_expr(source, no_errors):
|
||||||
|
assert bool(_get_error_list(source, version="3")) ^ no_errors
|
||||||
|
|||||||
@@ -1,6 +1,11 @@
|
|||||||
from codecs import BOM_UTF8
|
from codecs import BOM_UTF8
|
||||||
|
|
||||||
from parso.utils import split_lines, python_bytes_to_unicode
|
from parso.utils import (
|
||||||
|
split_lines,
|
||||||
|
parse_version_string,
|
||||||
|
python_bytes_to_unicode,
|
||||||
|
)
|
||||||
|
|
||||||
import parso
|
import parso
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@@ -63,3 +68,35 @@ def test_utf8_bom():
|
|||||||
expr_stmt = module.children[0]
|
expr_stmt = module.children[0]
|
||||||
assert expr_stmt.type == 'expr_stmt'
|
assert expr_stmt.type == 'expr_stmt'
|
||||||
assert unicode_bom == expr_stmt.get_first_leaf().prefix
|
assert unicode_bom == expr_stmt.get_first_leaf().prefix
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
('code', 'errors'), [
|
||||||
|
(b'# coding: wtf-12\nfoo', 'strict'),
|
||||||
|
(b'# coding: wtf-12\nfoo', 'replace'),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_bytes_to_unicode_failing_encoding(code, errors):
|
||||||
|
if errors == 'strict':
|
||||||
|
with pytest.raises(LookupError):
|
||||||
|
python_bytes_to_unicode(code, errors=errors)
|
||||||
|
else:
|
||||||
|
python_bytes_to_unicode(code, errors=errors)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
('version_str', 'version'), [
|
||||||
|
('3', (3,)),
|
||||||
|
('3.6', (3, 6)),
|
||||||
|
('3.6.10', (3, 6)),
|
||||||
|
('3.10', (3, 10)),
|
||||||
|
('3.10a9', (3, 10)),
|
||||||
|
('3.10b9', (3, 10)),
|
||||||
|
('3.10rc9', (3, 10)),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_parse_version_string(version_str, version):
|
||||||
|
parsed_version = parse_version_string(version_str)
|
||||||
|
if len(version) == 1:
|
||||||
|
assert parsed_version[0] == version[0]
|
||||||
|
else:
|
||||||
|
assert parsed_version == version
|
||||||
|
|||||||
Reference in New Issue
Block a user