mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-06 21:04:29 +08:00
Compare commits
100 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4306e8b34b | ||
|
|
2ce3898690 | ||
|
|
16f257356e | ||
|
|
c864ca60d1 | ||
|
|
a47b5433d4 | ||
|
|
6982cf8321 | ||
|
|
844ca3d35a | ||
|
|
9abe5d1e55 | ||
|
|
84874aace3 | ||
|
|
55531ab65b | ||
|
|
31c059fc30 | ||
|
|
cfef1d74e7 | ||
|
|
9ee7409d8a | ||
|
|
4090c80401 | ||
|
|
95f353a15f | ||
|
|
2b0b093276 | ||
|
|
29b57d93bd | ||
|
|
fb010f2b5d | ||
|
|
5e12ea5e04 | ||
|
|
ceb1ee81fa | ||
|
|
bc94293794 | ||
|
|
1122822b7d | ||
|
|
09abe42cce | ||
|
|
38cdcceba5 | ||
|
|
753e1999fe | ||
|
|
3c475b1e63 | ||
|
|
5f04dad9ab | ||
|
|
dbba1959f7 | ||
|
|
5fda85275b | ||
|
|
32584ac731 | ||
|
|
89c4d959e9 | ||
|
|
776e151370 | ||
|
|
53a6d0c17a | ||
|
|
b90e5cd758 | ||
|
|
e496b07b63 | ||
|
|
76fe4792e7 | ||
|
|
8cae7ed526 | ||
|
|
ee2995c110 | ||
|
|
76aaa2ddba | ||
|
|
3ecd4dddb4 | ||
|
|
8f83e9b3c5 | ||
|
|
e8653a49ff | ||
|
|
d3383b6c41 | ||
|
|
9da4df20d1 | ||
|
|
0341f69691 | ||
|
|
f6bdba65c0 | ||
|
|
3bb46563d4 | ||
|
|
e723b3e74b | ||
|
|
0032bae041 | ||
|
|
c0ace63a69 | ||
|
|
399e8e5043 | ||
|
|
0a5b5f3346 | ||
|
|
2b8544021f | ||
|
|
99dd4a84d4 | ||
|
|
9501b0bde0 | ||
|
|
ad57a51800 | ||
|
|
19de3eb5ca | ||
|
|
7441e6b1d2 | ||
|
|
df3c494e02 | ||
|
|
59df3fab43 | ||
|
|
803cb5f25f | ||
|
|
3fa8630ba9 | ||
|
|
1ca5ae4008 | ||
|
|
c3c16169b5 | ||
|
|
ecbe2b9926 | ||
|
|
1929c144dc | ||
|
|
b5d50392a4 | ||
|
|
a7aa23a7f0 | ||
|
|
5430415d44 | ||
|
|
6cdd47fe2b | ||
|
|
917b4421f3 | ||
|
|
4f5fdd5a70 | ||
|
|
93ddf5322a | ||
|
|
a9b61149eb | ||
|
|
de416b082e | ||
|
|
4b440159b1 | ||
|
|
6f2d2362c9 | ||
|
|
8a06f0da05 | ||
|
|
bd95989c2e | ||
|
|
57e91262cd | ||
|
|
476383cca9 | ||
|
|
b2ab64d8f9 | ||
|
|
18cbeb1a3d | ||
|
|
a5686d6cda | ||
|
|
dfe7fba08e | ||
|
|
6db7f40942 | ||
|
|
d5eb96309c | ||
|
|
4c65368056 | ||
|
|
3e2956264c | ||
|
|
e77a67cd36 | ||
|
|
c4d6de2aab | ||
|
|
7770e73609 | ||
|
|
acccb4f28d | ||
|
|
3f6fc8a5ad | ||
|
|
f1ee7614c9 | ||
|
|
58850f8bfa | ||
|
|
d38a60278e | ||
|
|
6c65aea47d | ||
|
|
0d37ff865c | ||
|
|
076e296497 |
@@ -1,4 +1,5 @@
|
|||||||
[run]
|
[run]
|
||||||
|
source = parso
|
||||||
|
|
||||||
[report]
|
[report]
|
||||||
# Regexes for lines to exclude from consideration
|
# Regexes for lines to exclude from consideration
|
||||||
|
|||||||
16
.travis.yml
16
.travis.yml
@@ -1,25 +1,25 @@
|
|||||||
|
dist: xenial
|
||||||
language: python
|
language: python
|
||||||
sudo: false
|
|
||||||
python:
|
python:
|
||||||
- 2.6
|
|
||||||
- 2.7
|
- 2.7
|
||||||
- 3.4
|
- 3.4
|
||||||
- 3.5
|
- 3.5
|
||||||
- 3.6
|
- 3.6
|
||||||
- pypy
|
- 3.7
|
||||||
|
- 3.8
|
||||||
|
- pypy2.7-6.0
|
||||||
|
- pypy3.5-6.0
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- { python: "3.7", dist: xenial, sudo: true }
|
|
||||||
- python: 3.5
|
- python: 3.5
|
||||||
env: TOXENV=cov
|
env: TOXENV=py35-coverage
|
||||||
allow_failures:
|
|
||||||
- env: TOXENV=cov
|
|
||||||
install:
|
install:
|
||||||
- pip install --quiet tox-travis
|
- pip install --quiet tox-travis
|
||||||
script:
|
script:
|
||||||
- tox
|
- tox
|
||||||
after_script:
|
after_script:
|
||||||
- if [ $TOXENV == "cov" ]; then
|
- |
|
||||||
|
if [ "${TOXENV%-coverage}" == "$TOXENV" ]; then
|
||||||
pip install --quiet coveralls;
|
pip install --quiet coveralls;
|
||||||
coveralls;
|
coveralls;
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -49,6 +49,7 @@ Mathias Rav (@Mortal) <rav@cs.au.dk>
|
|||||||
Daniel Fiterman (@dfit99) <fitermandaniel2@gmail.com>
|
Daniel Fiterman (@dfit99) <fitermandaniel2@gmail.com>
|
||||||
Simon Ruggier (@sruggier)
|
Simon Ruggier (@sruggier)
|
||||||
Élie Gouzien (@ElieGouzien)
|
Élie Gouzien (@ElieGouzien)
|
||||||
|
Tim Gates (@timgates42) <tim.gates@iress.com>
|
||||||
|
|
||||||
|
|
||||||
Note: (@user) means a github user name.
|
Note: (@user) means a github user name.
|
||||||
|
|||||||
@@ -3,7 +3,60 @@
|
|||||||
Changelog
|
Changelog
|
||||||
---------
|
---------
|
||||||
|
|
||||||
0.3.2 (2018-01-24)
|
0.6.1 (2020-02-03)
|
||||||
|
++++++++++++++++++
|
||||||
|
|
||||||
|
- Add ``parso.normalizer.Issue.end_pos`` to make it possible to know where an
|
||||||
|
issue ends
|
||||||
|
|
||||||
|
0.6.0 (2020-01-26)
|
||||||
|
++++++++++++++++++
|
||||||
|
|
||||||
|
- Dropped Python 2.6/Python 3.3 support
|
||||||
|
- del_stmt names are now considered as a definition
|
||||||
|
(for ``name.is_definition()``)
|
||||||
|
- Bugfixes
|
||||||
|
|
||||||
|
0.5.2 (2019-12-15)
|
||||||
|
++++++++++++++++++
|
||||||
|
|
||||||
|
- Add include_setitem to get_definition/is_definition and get_defined_names (#66)
|
||||||
|
- Fix named expression error listing (#89, #90)
|
||||||
|
- Fix some f-string tokenizer issues (#93)
|
||||||
|
|
||||||
|
0.5.1 (2019-07-13)
|
||||||
|
++++++++++++++++++
|
||||||
|
|
||||||
|
- Fix: Some unicode identifiers were not correctly tokenized
|
||||||
|
- Fix: Line continuations in f-strings are now working
|
||||||
|
|
||||||
|
0.5.0 (2019-06-20)
|
||||||
|
++++++++++++++++++
|
||||||
|
|
||||||
|
- **Breaking Change** comp_for is now called sync_comp_for for all Python
|
||||||
|
versions to be compatible with the Python 3.8 Grammar
|
||||||
|
- Added .pyi stubs for a lot of the parso API
|
||||||
|
- Small FileIO changes
|
||||||
|
|
||||||
|
0.4.0 (2019-04-05)
|
||||||
|
++++++++++++++++++
|
||||||
|
|
||||||
|
- Python 3.8 support
|
||||||
|
- FileIO support, it's now possible to use abstract file IO, support is alpha
|
||||||
|
|
||||||
|
0.3.4 (2019-02-13)
|
||||||
|
+++++++++++++++++++
|
||||||
|
|
||||||
|
- Fix an f-string tokenizer error
|
||||||
|
|
||||||
|
0.3.3 (2019-02-06)
|
||||||
|
+++++++++++++++++++
|
||||||
|
|
||||||
|
- Fix async errors in the diff parser
|
||||||
|
- A fix in iter_errors
|
||||||
|
- This is a very small bugfix release
|
||||||
|
|
||||||
|
0.3.2 (2019-01-24)
|
||||||
+++++++++++++++++++
|
+++++++++++++++++++
|
||||||
|
|
||||||
- 20+ bugfixes in the diff parser and 3 in the tokenizer
|
- 20+ bugfixes in the diff parser and 3 in the tokenizer
|
||||||
|
|||||||
14
conftest.py
14
conftest.py
@@ -13,8 +13,8 @@ from parso.utils import parse_version_string
|
|||||||
|
|
||||||
collect_ignore = ["setup.py"]
|
collect_ignore = ["setup.py"]
|
||||||
|
|
||||||
VERSIONS_2 = '2.6', '2.7'
|
VERSIONS_2 = '2.7',
|
||||||
VERSIONS_3 = '3.3', '3.4', '3.5', '3.6', '3.7'
|
VERSIONS_3 = '3.4', '3.5', '3.6', '3.7', '3.8'
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
@@ -58,7 +58,9 @@ def pytest_generate_tests(metafunc):
|
|||||||
elif 'each_py3_version' in metafunc.fixturenames:
|
elif 'each_py3_version' in metafunc.fixturenames:
|
||||||
metafunc.parametrize('each_py3_version', VERSIONS_3)
|
metafunc.parametrize('each_py3_version', VERSIONS_3)
|
||||||
elif 'version_ge_py36' in metafunc.fixturenames:
|
elif 'version_ge_py36' in metafunc.fixturenames:
|
||||||
metafunc.parametrize('version_ge_py36', ['3.6', '3.7'])
|
metafunc.parametrize('version_ge_py36', ['3.6', '3.7', '3.8'])
|
||||||
|
elif 'version_ge_py38' in metafunc.fixturenames:
|
||||||
|
metafunc.parametrize('version_ge_py38', ['3.8'])
|
||||||
|
|
||||||
|
|
||||||
class NormalizerIssueCase(object):
|
class NormalizerIssueCase(object):
|
||||||
@@ -155,3 +157,9 @@ def works_ge_py3(each_version):
|
|||||||
def works_ge_py35(each_version):
|
def works_ge_py35(each_version):
|
||||||
version_info = parse_version_string(each_version)
|
version_info = parse_version_string(each_version)
|
||||||
return Checker(each_version, version_info >= (3, 5))
|
return Checker(each_version, version_info >= (3, 5))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def works_ge_py38(each_version):
|
||||||
|
version_info = parse_version_string(each_version)
|
||||||
|
return Checker(each_version, version_info >= (3, 8))
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ from parso.grammar import Grammar, load_grammar
|
|||||||
from parso.utils import split_lines, python_bytes_to_unicode
|
from parso.utils import split_lines, python_bytes_to_unicode
|
||||||
|
|
||||||
|
|
||||||
__version__ = '0.3.2'
|
__version__ = '0.6.1'
|
||||||
|
|
||||||
|
|
||||||
def parse(code=None, **kwargs):
|
def parse(code=None, **kwargs):
|
||||||
|
|||||||
19
parso/__init__.pyi
Normal file
19
parso/__init__.pyi
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
|
from parso.grammar import Grammar as Grammar, load_grammar as load_grammar
|
||||||
|
from parso.parser import ParserSyntaxError as ParserSyntaxError
|
||||||
|
from parso.utils import python_bytes_to_unicode as python_bytes_to_unicode, split_lines as split_lines
|
||||||
|
|
||||||
|
__version__: str = ...
|
||||||
|
|
||||||
|
def parse(
|
||||||
|
code: Optional[Union[str, bytes]],
|
||||||
|
*,
|
||||||
|
version: Optional[str] = None,
|
||||||
|
error_recovery: bool = True,
|
||||||
|
path: Optional[str] = None,
|
||||||
|
start_symbol: Optional[str] = None,
|
||||||
|
cache: bool = False,
|
||||||
|
diff_cache: bool = False,
|
||||||
|
cache_path: Optional[str] = None,
|
||||||
|
) -> Any: ...
|
||||||
@@ -1,14 +1,10 @@
|
|||||||
"""
|
"""
|
||||||
To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been
|
To ensure compatibility from Python ``2.7`` - ``3.3``, a module has been
|
||||||
created. Clearly there is huge need to use conforming syntax.
|
created. Clearly there is huge need to use conforming syntax.
|
||||||
"""
|
"""
|
||||||
import sys
|
import sys
|
||||||
import platform
|
import platform
|
||||||
|
|
||||||
# Cannot use sys.version.major and minor names, because in Python 2.6 it's not
|
|
||||||
# a namedtuple.
|
|
||||||
py_version = int(str(sys.version_info[0]) + str(sys.version_info[1]))
|
|
||||||
|
|
||||||
# unicode function
|
# unicode function
|
||||||
try:
|
try:
|
||||||
unicode = unicode
|
unicode = unicode
|
||||||
@@ -39,7 +35,7 @@ def u(string):
|
|||||||
have to cast back to a unicode (and we know that we always deal with valid
|
have to cast back to a unicode (and we know that we always deal with valid
|
||||||
unicode, because we check that in the beginning).
|
unicode, because we check that in the beginning).
|
||||||
"""
|
"""
|
||||||
if py_version >= 30:
|
if sys.version_info.major >= 3:
|
||||||
return str(string)
|
return str(string)
|
||||||
|
|
||||||
if not isinstance(string, unicode):
|
if not isinstance(string, unicode):
|
||||||
@@ -48,8 +44,10 @@ def u(string):
|
|||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
# Python 2.7
|
||||||
FileNotFoundError = FileNotFoundError
|
FileNotFoundError = FileNotFoundError
|
||||||
except NameError:
|
except NameError:
|
||||||
|
# Python 3.3+
|
||||||
FileNotFoundError = IOError
|
FileNotFoundError = IOError
|
||||||
|
|
||||||
|
|
||||||
@@ -65,39 +63,7 @@ def utf8_repr(func):
|
|||||||
else:
|
else:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
if py_version >= 30:
|
if sys.version_info.major >= 3:
|
||||||
return func
|
return func
|
||||||
else:
|
else:
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
from functools import total_ordering
|
|
||||||
except ImportError:
|
|
||||||
# Python 2.6
|
|
||||||
def total_ordering(cls):
|
|
||||||
"""Class decorator that fills in missing ordering methods"""
|
|
||||||
convert = {
|
|
||||||
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
|
|
||||||
('__le__', lambda self, other: self < other or self == other),
|
|
||||||
('__ge__', lambda self, other: not self < other)],
|
|
||||||
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
|
|
||||||
('__lt__', lambda self, other: self <= other and not self == other),
|
|
||||||
('__gt__', lambda self, other: not self <= other)],
|
|
||||||
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
|
|
||||||
('__ge__', lambda self, other: self > other or self == other),
|
|
||||||
('__le__', lambda self, other: not self > other)],
|
|
||||||
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
|
|
||||||
('__gt__', lambda self, other: self >= other and not self == other),
|
|
||||||
('__lt__', lambda self, other: not self >= other)]
|
|
||||||
}
|
|
||||||
roots = set(dir(cls)) & set(convert)
|
|
||||||
if not roots:
|
|
||||||
raise ValueError('must define at least one ordering operation: < > <= >=')
|
|
||||||
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
|
|
||||||
for opname, opfunc in convert[root]:
|
|
||||||
if opname not in roots:
|
|
||||||
opfunc.__name__ = opname
|
|
||||||
opfunc.__doc__ = getattr(int, opname).__doc__
|
|
||||||
setattr(cls, opname, opfunc)
|
|
||||||
return cls
|
|
||||||
|
|||||||
@@ -17,8 +17,23 @@ from parso._compatibility import FileNotFoundError
|
|||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_CACHED_FILE_MINIMUM_SURVIVAL = 60 * 10 # 10 minutes
|
||||||
|
"""
|
||||||
|
Cached files should survive at least a few minutes.
|
||||||
|
"""
|
||||||
|
_CACHED_SIZE_TRIGGER = 600
|
||||||
|
"""
|
||||||
|
This setting limits the amount of cached files. It's basically a way to start
|
||||||
|
garbage collection.
|
||||||
|
|
||||||
_PICKLE_VERSION = 30
|
The reasoning for this limit being as big as it is, is the following:
|
||||||
|
|
||||||
|
Numpy, Pandas, Matplotlib and Tensorflow together use about 500 files. This
|
||||||
|
makes Jedi use ~500mb of memory. Since we might want a bit more than those few
|
||||||
|
libraries, we just increase it a bit.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_PICKLE_VERSION = 33
|
||||||
"""
|
"""
|
||||||
Version number (integer) for file system cache.
|
Version number (integer) for file system cache.
|
||||||
|
|
||||||
@@ -40,11 +55,12 @@ _VERSION_TAG = '%s-%s%s-%s' % (
|
|||||||
"""
|
"""
|
||||||
Short name for distinguish Python implementations and versions.
|
Short name for distinguish Python implementations and versions.
|
||||||
|
|
||||||
It's like `sys.implementation.cache_tag` but for Python < 3.3
|
It's like `sys.implementation.cache_tag` but for Python2
|
||||||
we generate something similar. See:
|
we generate something similar. See:
|
||||||
http://docs.python.org/3/library/sys.html#sys.implementation
|
http://docs.python.org/3/library/sys.html#sys.implementation
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def _get_default_cache_path():
|
def _get_default_cache_path():
|
||||||
if platform.system().lower() == 'windows':
|
if platform.system().lower() == 'windows':
|
||||||
dir_ = os.path.join(os.getenv('LOCALAPPDATA') or '~', 'Parso', 'Parso')
|
dir_ = os.path.join(os.getenv('LOCALAPPDATA') or '~', 'Parso', 'Parso')
|
||||||
@@ -54,6 +70,7 @@ def _get_default_cache_path():
|
|||||||
dir_ = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'parso')
|
dir_ = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'parso')
|
||||||
return os.path.expanduser(dir_)
|
return os.path.expanduser(dir_)
|
||||||
|
|
||||||
|
|
||||||
_default_cache_path = _get_default_cache_path()
|
_default_cache_path = _get_default_cache_path()
|
||||||
"""
|
"""
|
||||||
The path where the cache is stored.
|
The path where the cache is stored.
|
||||||
@@ -74,23 +91,29 @@ class _NodeCacheItem(object):
|
|||||||
if change_time is None:
|
if change_time is None:
|
||||||
change_time = time.time()
|
change_time = time.time()
|
||||||
self.change_time = change_time
|
self.change_time = change_time
|
||||||
|
self.last_used = change_time
|
||||||
|
|
||||||
|
|
||||||
def load_module(hashed_grammar, path, cache_path=None):
|
def load_module(hashed_grammar, file_io, cache_path=None):
|
||||||
"""
|
"""
|
||||||
Returns a module or None, if it fails.
|
Returns a module or None, if it fails.
|
||||||
"""
|
"""
|
||||||
try:
|
p_time = file_io.get_last_modified()
|
||||||
p_time = os.path.getmtime(path)
|
if p_time is None:
|
||||||
except FileNotFoundError:
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
module_cache_item = parser_cache[hashed_grammar][path]
|
module_cache_item = parser_cache[hashed_grammar][file_io.path]
|
||||||
if p_time <= module_cache_item.change_time:
|
if p_time <= module_cache_item.change_time:
|
||||||
|
module_cache_item.last_used = time.time()
|
||||||
return module_cache_item.node
|
return module_cache_item.node
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return _load_from_file_system(hashed_grammar, path, p_time, cache_path=cache_path)
|
return _load_from_file_system(
|
||||||
|
hashed_grammar,
|
||||||
|
file_io.path,
|
||||||
|
p_time,
|
||||||
|
cache_path=cache_path
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None):
|
def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None):
|
||||||
@@ -116,20 +139,37 @@ def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None):
|
|||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
parser_cache.setdefault(hashed_grammar, {})[path] = module_cache_item
|
_set_cache_item(hashed_grammar, path, module_cache_item)
|
||||||
LOG.debug('pickle loaded: %s', path)
|
LOG.debug('pickle loaded: %s', path)
|
||||||
return module_cache_item.node
|
return module_cache_item.node
|
||||||
|
|
||||||
|
|
||||||
def save_module(hashed_grammar, path, module, lines, pickling=True, cache_path=None):
|
def _set_cache_item(hashed_grammar, path, module_cache_item):
|
||||||
|
if sum(len(v) for v in parser_cache.values()) >= _CACHED_SIZE_TRIGGER:
|
||||||
|
# Garbage collection of old cache files.
|
||||||
|
# We are basically throwing everything away that hasn't been accessed
|
||||||
|
# in 10 minutes.
|
||||||
|
cutoff_time = time.time() - _CACHED_FILE_MINIMUM_SURVIVAL
|
||||||
|
for key, path_to_item_map in parser_cache.items():
|
||||||
|
parser_cache[key] = {
|
||||||
|
path: node_item
|
||||||
|
for path, node_item in path_to_item_map.items()
|
||||||
|
if node_item.last_used > cutoff_time
|
||||||
|
}
|
||||||
|
|
||||||
|
parser_cache.setdefault(hashed_grammar, {})[path] = module_cache_item
|
||||||
|
|
||||||
|
|
||||||
|
def save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_path=None):
|
||||||
|
path = file_io.path
|
||||||
try:
|
try:
|
||||||
p_time = None if path is None else os.path.getmtime(path)
|
p_time = None if path is None else file_io.get_last_modified()
|
||||||
except OSError:
|
except OSError:
|
||||||
p_time = None
|
p_time = None
|
||||||
pickling = False
|
pickling = False
|
||||||
|
|
||||||
item = _NodeCacheItem(module, lines, p_time)
|
item = _NodeCacheItem(module, lines, p_time)
|
||||||
parser_cache.setdefault(hashed_grammar, {})[path] = item
|
_set_cache_item(hashed_grammar, path, item)
|
||||||
if pickling and path is not None:
|
if pickling and path is not None:
|
||||||
_save_to_file_system(hashed_grammar, path, item, cache_path=cache_path)
|
_save_to_file_system(hashed_grammar, path, item, cache_path=cache_path)
|
||||||
|
|
||||||
|
|||||||
35
parso/file_io.py
Normal file
35
parso/file_io.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
class FileIO(object):
|
||||||
|
def __init__(self, path):
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def read(self): # Returns bytes/str
|
||||||
|
# We would like to read unicode here, but we cannot, because we are not
|
||||||
|
# sure if it is a valid unicode file. Therefore just read whatever is
|
||||||
|
# here.
|
||||||
|
with open(self.path, 'rb') as f:
|
||||||
|
return f.read()
|
||||||
|
|
||||||
|
def get_last_modified(self):
|
||||||
|
"""
|
||||||
|
Returns float - timestamp or None, if path doesn't exist.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return os.path.getmtime(self.path)
|
||||||
|
except OSError:
|
||||||
|
# Might raise FileNotFoundError, OSError for Python 2
|
||||||
|
return None
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '%s(%s)' % (self.__class__.__name__, self.path)
|
||||||
|
|
||||||
|
|
||||||
|
class KnownContentFileIO(FileIO):
|
||||||
|
def __init__(self, path, content):
|
||||||
|
super(KnownContentFileIO, self).__init__(path)
|
||||||
|
self._content = content
|
||||||
|
|
||||||
|
def read(self):
|
||||||
|
return self._content
|
||||||
@@ -12,6 +12,7 @@ from parso.parser import BaseParser
|
|||||||
from parso.python.parser import Parser as PythonParser
|
from parso.python.parser import Parser as PythonParser
|
||||||
from parso.python.errors import ErrorFinderConfig
|
from parso.python.errors import ErrorFinderConfig
|
||||||
from parso.python import pep8
|
from parso.python import pep8
|
||||||
|
from parso.file_io import FileIO, KnownContentFileIO
|
||||||
|
|
||||||
_loaded_grammars = {}
|
_loaded_grammars = {}
|
||||||
|
|
||||||
@@ -56,7 +57,8 @@ class Grammar(object):
|
|||||||
:param str path: The path to the file you want to open. Only needed for caching.
|
:param str path: The path to the file you want to open. Only needed for caching.
|
||||||
:param bool cache: Keeps a copy of the parser tree in RAM and on disk
|
:param bool cache: Keeps a copy of the parser tree in RAM and on disk
|
||||||
if a path is given. Returns the cached trees if the corresponding
|
if a path is given. Returns the cached trees if the corresponding
|
||||||
files on disk have not changed.
|
files on disk have not changed. Note that this stores pickle files
|
||||||
|
on your file system (e.g. for Linux in ``~/.cache/parso/``).
|
||||||
:param bool diff_cache: Diffs the cached python module against the new
|
:param bool diff_cache: Diffs the cached python module against the new
|
||||||
code and tries to parse only the parts that have changed. Returns
|
code and tries to parse only the parts that have changed. Returns
|
||||||
the same (changed) module that is found in cache. Using this option
|
the same (changed) module that is found in cache. Using this option
|
||||||
@@ -77,14 +79,14 @@ class Grammar(object):
|
|||||||
|
|
||||||
def _parse(self, code=None, error_recovery=True, path=None,
|
def _parse(self, code=None, error_recovery=True, path=None,
|
||||||
start_symbol=None, cache=False, diff_cache=False,
|
start_symbol=None, cache=False, diff_cache=False,
|
||||||
cache_path=None, start_pos=(1, 0)):
|
cache_path=None, file_io=None, start_pos=(1, 0)):
|
||||||
"""
|
"""
|
||||||
Wanted python3.5 * operator and keyword only arguments. Therefore just
|
Wanted python3.5 * operator and keyword only arguments. Therefore just
|
||||||
wrap it all.
|
wrap it all.
|
||||||
start_pos here is just a parameter internally used. Might be public
|
start_pos here is just a parameter internally used. Might be public
|
||||||
sometime in the future.
|
sometime in the future.
|
||||||
"""
|
"""
|
||||||
if code is None and path is None:
|
if code is None and path is None and file_io is None:
|
||||||
raise TypeError("Please provide either code or a path.")
|
raise TypeError("Please provide either code or a path.")
|
||||||
|
|
||||||
if start_symbol is None:
|
if start_symbol is None:
|
||||||
@@ -93,15 +95,19 @@ class Grammar(object):
|
|||||||
if error_recovery and start_symbol != 'file_input':
|
if error_recovery and start_symbol != 'file_input':
|
||||||
raise NotImplementedError("This is currently not implemented.")
|
raise NotImplementedError("This is currently not implemented.")
|
||||||
|
|
||||||
if cache and path is not None:
|
if file_io is None:
|
||||||
module_node = load_module(self._hashed, path, cache_path=cache_path)
|
if code is None:
|
||||||
|
file_io = FileIO(path)
|
||||||
|
else:
|
||||||
|
file_io = KnownContentFileIO(path, code)
|
||||||
|
|
||||||
|
if cache and file_io.path is not None:
|
||||||
|
module_node = load_module(self._hashed, file_io, cache_path=cache_path)
|
||||||
if module_node is not None:
|
if module_node is not None:
|
||||||
return module_node
|
return module_node
|
||||||
|
|
||||||
if code is None:
|
if code is None:
|
||||||
with open(path, 'rb') as f:
|
code = file_io.read()
|
||||||
code = f.read()
|
|
||||||
|
|
||||||
code = python_bytes_to_unicode(code)
|
code = python_bytes_to_unicode(code)
|
||||||
|
|
||||||
lines = split_lines(code, keepends=True)
|
lines = split_lines(code, keepends=True)
|
||||||
@@ -110,7 +116,7 @@ class Grammar(object):
|
|||||||
raise TypeError("You have to define a diff parser to be able "
|
raise TypeError("You have to define a diff parser to be able "
|
||||||
"to use this option.")
|
"to use this option.")
|
||||||
try:
|
try:
|
||||||
module_cache_item = parser_cache[self._hashed][path]
|
module_cache_item = parser_cache[self._hashed][file_io.path]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
@@ -125,7 +131,7 @@ class Grammar(object):
|
|||||||
old_lines=old_lines,
|
old_lines=old_lines,
|
||||||
new_lines=lines
|
new_lines=lines
|
||||||
)
|
)
|
||||||
save_module(self._hashed, path, new_node, lines,
|
save_module(self._hashed, file_io, new_node, lines,
|
||||||
# Never pickle in pypy, it's slow as hell.
|
# Never pickle in pypy, it's slow as hell.
|
||||||
pickling=cache and not is_pypy,
|
pickling=cache and not is_pypy,
|
||||||
cache_path=cache_path)
|
cache_path=cache_path)
|
||||||
@@ -141,7 +147,7 @@ class Grammar(object):
|
|||||||
root_node = p.parse(tokens=tokens)
|
root_node = p.parse(tokens=tokens)
|
||||||
|
|
||||||
if cache or diff_cache:
|
if cache or diff_cache:
|
||||||
save_module(self._hashed, path, root_node, lines,
|
save_module(self._hashed, file_io, root_node, lines,
|
||||||
# Never pickle in pypy, it's slow as hell.
|
# Never pickle in pypy, it's slow as hell.
|
||||||
pickling=cache and not is_pypy,
|
pickling=cache and not is_pypy,
|
||||||
cache_path=cache_path)
|
cache_path=cache_path)
|
||||||
@@ -218,7 +224,7 @@ def load_grammar(**kwargs):
|
|||||||
Loads a :py:class:`parso.Grammar`. The default version is the current Python
|
Loads a :py:class:`parso.Grammar`. The default version is the current Python
|
||||||
version.
|
version.
|
||||||
|
|
||||||
:param str version: A python version string, e.g. ``version='3.3'``.
|
:param str version: A python version string, e.g. ``version='3.8'``.
|
||||||
:param str path: A path to a grammar file
|
:param str path: A path to a grammar file
|
||||||
"""
|
"""
|
||||||
def load_grammar(language='python', version=None, path=None):
|
def load_grammar(language='python', version=None, path=None):
|
||||||
|
|||||||
38
parso/grammar.pyi
Normal file
38
parso/grammar.pyi
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
from typing import Any, Callable, Generic, Optional, Sequence, TypeVar, Union
|
||||||
|
from typing_extensions import Literal
|
||||||
|
|
||||||
|
from parso.utils import PythonVersionInfo
|
||||||
|
|
||||||
|
_Token = Any
|
||||||
|
_NodeT = TypeVar("_NodeT")
|
||||||
|
|
||||||
|
class Grammar(Generic[_NodeT]):
|
||||||
|
_default_normalizer_config: Optional[Any] = ...
|
||||||
|
_error_normalizer_config: Optional[Any] = None
|
||||||
|
_start_nonterminal: str = ...
|
||||||
|
_token_namespace: Optional[str] = None
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
text: str,
|
||||||
|
tokenizer: Callable[[Sequence[str], int], Sequence[_Token]],
|
||||||
|
parser: Any = ...,
|
||||||
|
diff_parser: Any = ...,
|
||||||
|
) -> None: ...
|
||||||
|
def parse(
|
||||||
|
self,
|
||||||
|
code: Union[str, bytes] = ...,
|
||||||
|
error_recovery: bool = ...,
|
||||||
|
path: Optional[str] = ...,
|
||||||
|
start_symbol: Optional[str] = ...,
|
||||||
|
cache: bool = ...,
|
||||||
|
diff_cache: bool = ...,
|
||||||
|
cache_path: Optional[str] = ...,
|
||||||
|
) -> _NodeT: ...
|
||||||
|
|
||||||
|
class PythonGrammar(Grammar):
|
||||||
|
version_info: PythonVersionInfo
|
||||||
|
def __init__(self, version_info: PythonVersionInfo, bnf_text: str) -> None: ...
|
||||||
|
|
||||||
|
def load_grammar(
|
||||||
|
language: Literal["python"] = "python", version: Optional[str] = ..., path: str = ...
|
||||||
|
) -> Grammar: ...
|
||||||
@@ -119,7 +119,6 @@ class NormalizerConfig(object):
|
|||||||
|
|
||||||
class Issue(object):
|
class Issue(object):
|
||||||
def __init__(self, node, code, message):
|
def __init__(self, node, code, message):
|
||||||
self._node = node
|
|
||||||
self.code = code
|
self.code = code
|
||||||
"""
|
"""
|
||||||
An integer code that stands for the type of error.
|
An integer code that stands for the type of error.
|
||||||
@@ -133,6 +132,7 @@ class Issue(object):
|
|||||||
The start position position of the error as a tuple (line, column). As
|
The start position position of the error as a tuple (line, column). As
|
||||||
always in |parso| the first line is 1 and the first column 0.
|
always in |parso| the first line is 1 and the first column 0.
|
||||||
"""
|
"""
|
||||||
|
self.end_pos = node.end_pos
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return self.start_pos == other.start_pos and self.code == other.code
|
return self.start_pos == other.start_pos and self.code == other.code
|
||||||
@@ -147,7 +147,6 @@ class Issue(object):
|
|||||||
return '<%s: %s>' % (self.__class__.__name__, self.code)
|
return '<%s: %s>' % (self.__class__.__name__, self.code)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class Rule(object):
|
class Rule(object):
|
||||||
code = None
|
code = None
|
||||||
message = None
|
message = None
|
||||||
|
|||||||
1
parso/pgen2/__init__.pyi
Normal file
1
parso/pgen2/__init__.pyi
Normal file
@@ -0,0 +1 @@
|
|||||||
|
from parso.pgen2.generator import generate_grammar as generate_grammar
|
||||||
@@ -309,13 +309,39 @@ def _calculate_tree_traversal(nonterminal_to_dfas):
|
|||||||
_calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal)
|
_calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal)
|
||||||
|
|
||||||
# Now that we have calculated the first terminals, we are sure that
|
# Now that we have calculated the first terminals, we are sure that
|
||||||
# there is no left recursion or ambiguities.
|
# there is no left recursion.
|
||||||
|
|
||||||
for dfas in nonterminal_to_dfas.values():
|
for dfas in nonterminal_to_dfas.values():
|
||||||
for dfa_state in dfas:
|
for dfa_state in dfas:
|
||||||
|
transitions = dfa_state.transitions
|
||||||
for nonterminal, next_dfa in dfa_state.nonterminal_arcs.items():
|
for nonterminal, next_dfa in dfa_state.nonterminal_arcs.items():
|
||||||
for transition, pushes in first_plans[nonterminal].items():
|
for transition, pushes in first_plans[nonterminal].items():
|
||||||
dfa_state.transitions[transition] = DFAPlan(next_dfa, pushes)
|
if transition in transitions:
|
||||||
|
prev_plan = transitions[transition]
|
||||||
|
# Make sure these are sorted so that error messages are
|
||||||
|
# at least deterministic
|
||||||
|
choices = sorted([
|
||||||
|
(
|
||||||
|
prev_plan.dfa_pushes[0].from_rule
|
||||||
|
if prev_plan.dfa_pushes
|
||||||
|
else prev_plan.next_dfa.from_rule
|
||||||
|
),
|
||||||
|
(
|
||||||
|
pushes[0].from_rule
|
||||||
|
if pushes else next_dfa.from_rule
|
||||||
|
),
|
||||||
|
])
|
||||||
|
raise ValueError(
|
||||||
|
"Rule %s is ambiguous; given a %s token, we "
|
||||||
|
"can't determine if we should evaluate %s or %s."
|
||||||
|
% (
|
||||||
|
(
|
||||||
|
dfa_state.from_rule,
|
||||||
|
transition,
|
||||||
|
) + tuple(choices)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
transitions[transition] = DFAPlan(next_dfa, pushes)
|
||||||
|
|
||||||
|
|
||||||
def _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal):
|
def _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal):
|
||||||
@@ -345,13 +371,6 @@ def _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal):
|
|||||||
raise ValueError("left recursion for rule %r" % nonterminal)
|
raise ValueError("left recursion for rule %r" % nonterminal)
|
||||||
|
|
||||||
for t, pushes in first_plans2.items():
|
for t, pushes in first_plans2.items():
|
||||||
check = new_first_plans.get(t)
|
|
||||||
if check is not None:
|
|
||||||
raise ValueError(
|
|
||||||
"Rule %s is ambiguous; %s is the"
|
|
||||||
" start of the rule %s as well as %s."
|
|
||||||
% (nonterminal, t, nonterminal2, check[-1].from_rule)
|
|
||||||
)
|
|
||||||
new_first_plans[t] = [next_] + pushes
|
new_first_plans[t] = [next_] + pushes
|
||||||
|
|
||||||
first_plans[nonterminal] = new_first_plans
|
first_plans[nonterminal] = new_first_plans
|
||||||
|
|||||||
38
parso/pgen2/generator.pyi
Normal file
38
parso/pgen2/generator.pyi
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
from typing import Any, Generic, Mapping, Sequence, Set, TypeVar, Union
|
||||||
|
|
||||||
|
from parso.pgen2.grammar_parser import NFAState
|
||||||
|
|
||||||
|
_TokenTypeT = TypeVar("_TokenTypeT")
|
||||||
|
|
||||||
|
class Grammar(Generic[_TokenTypeT]):
|
||||||
|
nonterminal_to_dfas: Mapping[str, Sequence[DFAState[_TokenTypeT]]]
|
||||||
|
reserved_syntax_strings: Mapping[str, ReservedString]
|
||||||
|
start_nonterminal: str
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
start_nonterminal: str,
|
||||||
|
rule_to_dfas: Mapping[str, Sequence[DFAState]],
|
||||||
|
reserved_syntax_strings: Mapping[str, ReservedString],
|
||||||
|
) -> None: ...
|
||||||
|
|
||||||
|
class DFAPlan:
|
||||||
|
next_dfa: DFAState
|
||||||
|
dfa_pushes: Sequence[DFAState]
|
||||||
|
|
||||||
|
class DFAState(Generic[_TokenTypeT]):
|
||||||
|
from_rule: str
|
||||||
|
nfa_set: Set[NFAState]
|
||||||
|
is_final: bool
|
||||||
|
arcs: Mapping[str, DFAState] # map from all terminals/nonterminals to DFAState
|
||||||
|
nonterminal_arcs: Mapping[str, DFAState]
|
||||||
|
transitions: Mapping[Union[_TokenTypeT, ReservedString], DFAPlan]
|
||||||
|
def __init__(
|
||||||
|
self, from_rule: str, nfa_set: Set[NFAState], final: NFAState
|
||||||
|
) -> None: ...
|
||||||
|
|
||||||
|
class ReservedString:
|
||||||
|
value: str
|
||||||
|
def __init__(self, value: str) -> None: ...
|
||||||
|
def __repr__(self) -> str: ...
|
||||||
|
|
||||||
|
def generate_grammar(bnf_grammar: str, token_namespace: Any) -> Grammar[Any]: ...
|
||||||
@@ -141,6 +141,9 @@ class NFAArc(object):
|
|||||||
self.next = next_
|
self.next = next_
|
||||||
self.nonterminal_or_string = nonterminal_or_string
|
self.nonterminal_or_string = nonterminal_or_string
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<%s: %s>' % (self.__class__.__name__, self.nonterminal_or_string)
|
||||||
|
|
||||||
|
|
||||||
class NFAState(object):
|
class NFAState(object):
|
||||||
def __init__(self, from_rule):
|
def __init__(self, from_rule):
|
||||||
|
|||||||
20
parso/pgen2/grammar_parser.pyi
Normal file
20
parso/pgen2/grammar_parser.pyi
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
from typing import Generator, List, Optional, Tuple
|
||||||
|
|
||||||
|
from parso.python.token import TokenType
|
||||||
|
|
||||||
|
class GrammarParser:
|
||||||
|
generator: Generator[TokenType, None, None]
|
||||||
|
def __init__(self, bnf_grammar: str) -> None: ...
|
||||||
|
def parse(self) -> Generator[Tuple[NFAState, NFAState], None, None]: ...
|
||||||
|
|
||||||
|
class NFAArc:
|
||||||
|
next: NFAState
|
||||||
|
nonterminal_or_string: Optional[str]
|
||||||
|
def __init__(
|
||||||
|
self, next_: NFAState, nonterminal_or_string: Optional[str]
|
||||||
|
) -> None: ...
|
||||||
|
|
||||||
|
class NFAState:
|
||||||
|
from_rule: str
|
||||||
|
arcs: List[NFAArc]
|
||||||
|
def __init__(self, from_rule: str) -> None: ...
|
||||||
@@ -682,6 +682,8 @@ class _NodesTree(object):
|
|||||||
last = new_nodes[-1]
|
last = new_nodes[-1]
|
||||||
if last.type == 'decorated':
|
if last.type == 'decorated':
|
||||||
last = last.children[-1]
|
last = last.children[-1]
|
||||||
|
if last.type in ('async_funcdef', 'async_stmt'):
|
||||||
|
last = last.children[-1]
|
||||||
last_line_offset_leaf = last.children[-2].get_last_leaf()
|
last_line_offset_leaf = last.children[-2].get_last_leaf()
|
||||||
assert last_line_offset_leaf == ':'
|
assert last_line_offset_leaf == ':'
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ from contextlib import contextmanager
|
|||||||
|
|
||||||
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
|
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
|
||||||
from parso.python.tree import search_ancestor
|
from parso.python.tree import search_ancestor
|
||||||
from parso.parser import ParserSyntaxError
|
|
||||||
|
|
||||||
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
|
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
|
||||||
_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
|
_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
|
||||||
@@ -17,6 +16,7 @@ ALLOWED_FUTURES = (
|
|||||||
'all_feature_names', 'nested_scopes', 'generators', 'division',
|
'all_feature_names', 'nested_scopes', 'generators', 'division',
|
||||||
'absolute_import', 'with_statement', 'print_function', 'unicode_literals',
|
'absolute_import', 'with_statement', 'print_function', 'unicode_literals',
|
||||||
)
|
)
|
||||||
|
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
|
||||||
|
|
||||||
|
|
||||||
def _iter_stmts(scope):
|
def _iter_stmts(scope):
|
||||||
@@ -35,12 +35,12 @@ def _iter_stmts(scope):
|
|||||||
|
|
||||||
def _get_comprehension_type(atom):
|
def _get_comprehension_type(atom):
|
||||||
first, second = atom.children[:2]
|
first, second = atom.children[:2]
|
||||||
if second.type == 'testlist_comp' and second.children[1].type == 'comp_for':
|
if second.type == 'testlist_comp' and second.children[1].type in _COMP_FOR_TYPES:
|
||||||
if first == '[':
|
if first == '[':
|
||||||
return 'list comprehension'
|
return 'list comprehension'
|
||||||
else:
|
else:
|
||||||
return 'generator expression'
|
return 'generator expression'
|
||||||
elif second.type == 'dictorsetmaker' and second.children[-1].type == 'comp_for':
|
elif second.type == 'dictorsetmaker' and second.children[-1].type in _COMP_FOR_TYPES:
|
||||||
if second.children[1] == ':':
|
if second.children[1] == ':':
|
||||||
return 'dict comprehension'
|
return 'dict comprehension'
|
||||||
else:
|
else:
|
||||||
@@ -94,19 +94,33 @@ def _is_future_import_first(import_from):
|
|||||||
|
|
||||||
|
|
||||||
def _iter_definition_exprs_from_lists(exprlist):
|
def _iter_definition_exprs_from_lists(exprlist):
|
||||||
for child in exprlist.children[::2]:
|
def check_expr(child):
|
||||||
if child.type == 'atom' and child.children[0] in ('(', '['):
|
if child.type == 'atom':
|
||||||
testlist_comp = child.children[0]
|
if child.children[0] == '(':
|
||||||
|
testlist_comp = child.children[1]
|
||||||
if testlist_comp.type == 'testlist_comp':
|
if testlist_comp.type == 'testlist_comp':
|
||||||
for expr in _iter_definition_exprs_from_lists(testlist_comp):
|
for expr in _iter_definition_exprs_from_lists(testlist_comp):
|
||||||
yield expr
|
yield expr
|
||||||
continue
|
return
|
||||||
|
else:
|
||||||
|
# It's a paren that doesn't do anything, like 1 + (1)
|
||||||
|
for c in check_expr(testlist_comp):
|
||||||
|
yield c
|
||||||
|
return
|
||||||
elif child.children[0] == '[':
|
elif child.children[0] == '[':
|
||||||
yield testlist_comp
|
yield testlist_comp
|
||||||
continue
|
return
|
||||||
|
|
||||||
yield child
|
yield child
|
||||||
|
|
||||||
|
if exprlist.type in _STAR_EXPR_PARENTS:
|
||||||
|
for child in exprlist.children[::2]:
|
||||||
|
for c in check_expr(child): # Python 2 sucks
|
||||||
|
yield c
|
||||||
|
else:
|
||||||
|
for c in check_expr(exprlist): # Python 2 sucks
|
||||||
|
yield c
|
||||||
|
|
||||||
|
|
||||||
def _get_expr_stmt_definition_exprs(expr_stmt):
|
def _get_expr_stmt_definition_exprs(expr_stmt):
|
||||||
exprs = []
|
exprs = []
|
||||||
for list_ in expr_stmt.children[:-2:2]:
|
for list_ in expr_stmt.children[:-2:2]:
|
||||||
@@ -119,8 +133,6 @@ def _get_expr_stmt_definition_exprs(expr_stmt):
|
|||||||
|
|
||||||
def _get_for_stmt_definition_exprs(for_stmt):
|
def _get_for_stmt_definition_exprs(for_stmt):
|
||||||
exprlist = for_stmt.children[1]
|
exprlist = for_stmt.children[1]
|
||||||
if exprlist.type != 'exprlist':
|
|
||||||
return [exprlist]
|
|
||||||
return list(_iter_definition_exprs_from_lists(exprlist))
|
return list(_iter_definition_exprs_from_lists(exprlist))
|
||||||
|
|
||||||
|
|
||||||
@@ -164,8 +176,7 @@ class _Context(object):
|
|||||||
self._analyze_names(self._global_names, 'global')
|
self._analyze_names(self._global_names, 'global')
|
||||||
self._analyze_names(self._nonlocal_names, 'nonlocal')
|
self._analyze_names(self._nonlocal_names, 'nonlocal')
|
||||||
|
|
||||||
# Python2.6 doesn't have dict comprehensions.
|
global_name_strs = {n.value: n for n in self._global_names}
|
||||||
global_name_strs = dict((n.value, n) for n in self._global_names)
|
|
||||||
for nonlocal_name in self._nonlocal_names:
|
for nonlocal_name in self._nonlocal_names:
|
||||||
try:
|
try:
|
||||||
global_name = global_name_strs[nonlocal_name.value]
|
global_name = global_name_strs[nonlocal_name.value]
|
||||||
@@ -279,7 +290,6 @@ class ErrorFinder(Normalizer):
|
|||||||
return ''
|
return ''
|
||||||
return super(ErrorFinder, self).visit(node)
|
return super(ErrorFinder, self).visit(node)
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def visit_node(self, node):
|
def visit_node(self, node):
|
||||||
self._check_type_rules(node)
|
self._check_type_rules(node)
|
||||||
@@ -461,17 +471,13 @@ class _YieldFromCheck(SyntaxRule):
|
|||||||
@ErrorFinder.register_rule(type='name')
|
@ErrorFinder.register_rule(type='name')
|
||||||
class _NameChecks(SyntaxRule):
|
class _NameChecks(SyntaxRule):
|
||||||
message = 'cannot assign to __debug__'
|
message = 'cannot assign to __debug__'
|
||||||
message_keyword = 'assignment to keyword'
|
|
||||||
message_none = 'cannot assign to None'
|
message_none = 'cannot assign to None'
|
||||||
|
|
||||||
def is_issue(self, leaf):
|
def is_issue(self, leaf):
|
||||||
self._normalizer.context.add_name(leaf)
|
self._normalizer.context.add_name(leaf)
|
||||||
|
|
||||||
if leaf.value == '__debug__' and leaf.is_definition():
|
if leaf.value == '__debug__' and leaf.is_definition():
|
||||||
if self._normalizer.version < (3, 0):
|
|
||||||
return True
|
return True
|
||||||
else:
|
|
||||||
self.add_issue(leaf, message=self.message_keyword)
|
|
||||||
if leaf.value == 'None' and self._normalizer.version < (3, 0) \
|
if leaf.value == 'None' and self._normalizer.version < (3, 0) \
|
||||||
and leaf.is_definition():
|
and leaf.is_definition():
|
||||||
self.add_issue(leaf, message=self.message_none)
|
self.add_issue(leaf, message=self.message_none)
|
||||||
@@ -539,7 +545,7 @@ class _StarStarCheck(SyntaxRule):
|
|||||||
def is_issue(self, leaf):
|
def is_issue(self, leaf):
|
||||||
if leaf.parent.type == 'dictorsetmaker':
|
if leaf.parent.type == 'dictorsetmaker':
|
||||||
comp_for = leaf.get_next_sibling().get_next_sibling()
|
comp_for = leaf.get_next_sibling().get_next_sibling()
|
||||||
return comp_for is not None and comp_for.type == 'comp_for'
|
return comp_for is not None and comp_for.type in _COMP_FOR_TYPES
|
||||||
|
|
||||||
|
|
||||||
@ErrorFinder.register_rule(value='yield')
|
@ErrorFinder.register_rule(value='yield')
|
||||||
@@ -570,11 +576,14 @@ class _BytesAndStringMix(SyntaxRule):
|
|||||||
message = "cannot mix bytes and nonbytes literals"
|
message = "cannot mix bytes and nonbytes literals"
|
||||||
|
|
||||||
def _is_bytes_literal(self, string):
|
def _is_bytes_literal(self, string):
|
||||||
|
if string.type == 'fstring':
|
||||||
|
return False
|
||||||
return 'b' in string.string_prefix.lower()
|
return 'b' in string.string_prefix.lower()
|
||||||
|
|
||||||
def is_issue(self, node):
|
def is_issue(self, node):
|
||||||
first = node.children[0]
|
first = node.children[0]
|
||||||
if first.type == 'string' and self._normalizer.version >= (3, 0):
|
# In Python 2 it's allowed to mix bytes and unicode.
|
||||||
|
if self._normalizer.version >= (3, 0):
|
||||||
first_is_bytes = self._is_bytes_literal(first)
|
first_is_bytes = self._is_bytes_literal(first)
|
||||||
for string in node.children[1:]:
|
for string in node.children[1:]:
|
||||||
if first_is_bytes != self._is_bytes_literal(string):
|
if first_is_bytes != self._is_bytes_literal(string):
|
||||||
@@ -587,7 +596,7 @@ class _TrailingImportComma(SyntaxRule):
|
|||||||
message = "trailing comma not allowed without surrounding parentheses"
|
message = "trailing comma not allowed without surrounding parentheses"
|
||||||
|
|
||||||
def is_issue(self, node):
|
def is_issue(self, node):
|
||||||
if node.children[-1] == ',':
|
if node.children[-1] == ',' and node.parent.children[-1] != ')':
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
@@ -635,7 +644,7 @@ class _StarExprRule(SyntaxRule):
|
|||||||
return True
|
return True
|
||||||
if node.parent.type == 'testlist_comp':
|
if node.parent.type == 'testlist_comp':
|
||||||
# [*[] for a in [1]]
|
# [*[] for a in [1]]
|
||||||
if node.parent.children[1].type == 'comp_for':
|
if node.parent.children[1].type in _COMP_FOR_TYPES:
|
||||||
self.add_issue(node, message=self.message_iterable_unpacking)
|
self.add_issue(node, message=self.message_iterable_unpacking)
|
||||||
if self._normalizer.version <= (3, 4):
|
if self._normalizer.version <= (3, 4):
|
||||||
n = search_ancestor(node, 'for_stmt', 'expr_stmt')
|
n = search_ancestor(node, 'for_stmt', 'expr_stmt')
|
||||||
@@ -728,10 +737,16 @@ class _ArgumentRule(SyntaxRule):
|
|||||||
if node.children[1] == '=' and first.type != 'name':
|
if node.children[1] == '=' and first.type != 'name':
|
||||||
if first.type == 'lambdef':
|
if first.type == 'lambdef':
|
||||||
# f(lambda: 1=1)
|
# f(lambda: 1=1)
|
||||||
|
if self._normalizer.version < (3, 8):
|
||||||
message = "lambda cannot contain assignment"
|
message = "lambda cannot contain assignment"
|
||||||
|
else:
|
||||||
|
message = 'expression cannot contain assignment, perhaps you meant "=="?'
|
||||||
else:
|
else:
|
||||||
# f(+x=1)
|
# f(+x=1)
|
||||||
|
if self._normalizer.version < (3, 8):
|
||||||
message = "keyword can't be an expression"
|
message = "keyword can't be an expression"
|
||||||
|
else:
|
||||||
|
message = 'expression cannot contain assignment, perhaps you meant "=="?'
|
||||||
self.add_issue(first, message=message)
|
self.add_issue(first, message=message)
|
||||||
|
|
||||||
|
|
||||||
@@ -755,7 +770,7 @@ class _ArglistRule(SyntaxRule):
|
|||||||
def is_issue(self, node):
|
def is_issue(self, node):
|
||||||
first_arg = node.children[0]
|
first_arg = node.children[0]
|
||||||
if first_arg.type == 'argument' \
|
if first_arg.type == 'argument' \
|
||||||
and first_arg.children[1].type == 'comp_for':
|
and first_arg.children[1].type in _COMP_FOR_TYPES:
|
||||||
# e.g. foo(x for x in [], b)
|
# e.g. foo(x for x in [], b)
|
||||||
return len(node.children) >= 2
|
return len(node.children) >= 2
|
||||||
else:
|
else:
|
||||||
@@ -784,7 +799,8 @@ class _ArglistRule(SyntaxRule):
|
|||||||
if first == '*':
|
if first == '*':
|
||||||
if kw_unpacking_only:
|
if kw_unpacking_only:
|
||||||
# foo(**kwargs, *args)
|
# foo(**kwargs, *args)
|
||||||
message = "iterable argument unpacking follows keyword argument unpacking"
|
message = "iterable argument unpacking " \
|
||||||
|
"follows keyword argument unpacking"
|
||||||
self.add_issue(argument, message=message)
|
self.add_issue(argument, message=message)
|
||||||
else:
|
else:
|
||||||
kw_unpacking_only = True
|
kw_unpacking_only = True
|
||||||
@@ -806,6 +822,7 @@ class _ArglistRule(SyntaxRule):
|
|||||||
message = "positional argument follows keyword argument"
|
message = "positional argument follows keyword argument"
|
||||||
self.add_issue(argument, message=message)
|
self.add_issue(argument, message=message)
|
||||||
|
|
||||||
|
|
||||||
@ErrorFinder.register_rule(type='parameters')
|
@ErrorFinder.register_rule(type='parameters')
|
||||||
@ErrorFinder.register_rule(type='lambdef')
|
@ErrorFinder.register_rule(type='lambdef')
|
||||||
class _ParameterRule(SyntaxRule):
|
class _ParameterRule(SyntaxRule):
|
||||||
@@ -846,6 +863,7 @@ class _TryStmtRule(SyntaxRule):
|
|||||||
@ErrorFinder.register_rule(type='fstring')
|
@ErrorFinder.register_rule(type='fstring')
|
||||||
class _FStringRule(SyntaxRule):
|
class _FStringRule(SyntaxRule):
|
||||||
_fstring_grammar = None
|
_fstring_grammar = None
|
||||||
|
message_expr = "f-string expression part cannot include a backslash"
|
||||||
message_nested = "f-string: expressions nested too deeply"
|
message_nested = "f-string: expressions nested too deeply"
|
||||||
message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'"
|
message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'"
|
||||||
|
|
||||||
@@ -856,6 +874,10 @@ class _FStringRule(SyntaxRule):
|
|||||||
if depth >= 2:
|
if depth >= 2:
|
||||||
self.add_issue(fstring_expr, message=self.message_nested)
|
self.add_issue(fstring_expr, message=self.message_nested)
|
||||||
|
|
||||||
|
expr = fstring_expr.children[1]
|
||||||
|
if '\\' in expr.get_code():
|
||||||
|
self.add_issue(expr, message=self.message_expr)
|
||||||
|
|
||||||
conversion = fstring_expr.children[2]
|
conversion = fstring_expr.children[2]
|
||||||
if conversion.type == 'fstring_conversion':
|
if conversion.type == 'fstring_conversion':
|
||||||
name = conversion.children[1]
|
name = conversion.children[1]
|
||||||
@@ -876,7 +898,7 @@ class _FStringRule(SyntaxRule):
|
|||||||
|
|
||||||
|
|
||||||
class _CheckAssignmentRule(SyntaxRule):
|
class _CheckAssignmentRule(SyntaxRule):
|
||||||
def _check_assignment(self, node, is_deletion=False):
|
def _check_assignment(self, node, is_deletion=False, is_namedexpr=False):
|
||||||
error = None
|
error = None
|
||||||
type_ = node.type
|
type_ = node.type
|
||||||
if type_ == 'lambdef':
|
if type_ == 'lambdef':
|
||||||
@@ -886,19 +908,36 @@ class _CheckAssignmentRule(SyntaxRule):
|
|||||||
error = _get_comprehension_type(node)
|
error = _get_comprehension_type(node)
|
||||||
if error is None:
|
if error is None:
|
||||||
if second.type == 'dictorsetmaker':
|
if second.type == 'dictorsetmaker':
|
||||||
|
if self._normalizer.version < (3, 8):
|
||||||
error = 'literal'
|
error = 'literal'
|
||||||
|
else:
|
||||||
|
if second.children[1] == ':':
|
||||||
|
error = 'dict display'
|
||||||
|
else:
|
||||||
|
error = 'set display'
|
||||||
elif first in ('(', '['):
|
elif first in ('(', '['):
|
||||||
if second.type == 'yield_expr':
|
if second.type == 'yield_expr':
|
||||||
error = 'yield expression'
|
error = 'yield expression'
|
||||||
elif second.type == 'testlist_comp':
|
elif second.type == 'testlist_comp':
|
||||||
|
# ([a, b] := [1, 2])
|
||||||
|
# ((a, b) := [1, 2])
|
||||||
|
if is_namedexpr:
|
||||||
|
if first == '(':
|
||||||
|
error = 'tuple'
|
||||||
|
elif first == '[':
|
||||||
|
error = 'list'
|
||||||
|
|
||||||
# This is not a comprehension, they were handled
|
# This is not a comprehension, they were handled
|
||||||
# further above.
|
# further above.
|
||||||
for child in second.children[::2]:
|
for child in second.children[::2]:
|
||||||
self._check_assignment(child, is_deletion)
|
self._check_assignment(child, is_deletion, is_namedexpr)
|
||||||
else: # Everything handled, must be useless brackets.
|
else: # Everything handled, must be useless brackets.
|
||||||
self._check_assignment(second, is_deletion)
|
self._check_assignment(second, is_deletion, is_namedexpr)
|
||||||
elif type_ == 'keyword':
|
elif type_ == 'keyword':
|
||||||
|
if self._normalizer.version < (3, 8):
|
||||||
error = 'keyword'
|
error = 'keyword'
|
||||||
|
else:
|
||||||
|
error = str(node.value)
|
||||||
elif type_ == 'operator':
|
elif type_ == 'operator':
|
||||||
if node.value == '...':
|
if node.value == '...':
|
||||||
error = 'Ellipsis'
|
error = 'Ellipsis'
|
||||||
@@ -923,30 +962,39 @@ class _CheckAssignmentRule(SyntaxRule):
|
|||||||
assert trailer.type == 'trailer'
|
assert trailer.type == 'trailer'
|
||||||
if trailer.children[0] == '(':
|
if trailer.children[0] == '(':
|
||||||
error = 'function call'
|
error = 'function call'
|
||||||
|
elif is_namedexpr and trailer.children[0] == '[':
|
||||||
|
error = 'subscript'
|
||||||
|
elif is_namedexpr and trailer.children[0] == '.':
|
||||||
|
error = 'attribute'
|
||||||
elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'):
|
elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'):
|
||||||
for child in node.children[::2]:
|
for child in node.children[::2]:
|
||||||
self._check_assignment(child, is_deletion)
|
self._check_assignment(child, is_deletion, is_namedexpr)
|
||||||
elif ('expr' in type_ and type_ != 'star_expr' # is a substring
|
elif ('expr' in type_ and type_ != 'star_expr' # is a substring
|
||||||
or '_test' in type_
|
or '_test' in type_
|
||||||
or type_ in ('term', 'factor')):
|
or type_ in ('term', 'factor')):
|
||||||
error = 'operator'
|
error = 'operator'
|
||||||
|
|
||||||
if error is not None:
|
if error is not None:
|
||||||
message = "can't %s %s" % ("delete" if is_deletion else "assign to", error)
|
if is_namedexpr:
|
||||||
|
# c.f. CPython bpo-39176, should be changed in next release
|
||||||
|
# message = 'cannot use assignment expressions with %s' % error
|
||||||
|
message = 'cannot use named assignment with %s' % error
|
||||||
|
else:
|
||||||
|
cannot = "can't" if self._normalizer.version < (3, 8) else "cannot"
|
||||||
|
message = ' '.join([cannot, "delete" if is_deletion else "assign to", error])
|
||||||
self.add_issue(node, message=message)
|
self.add_issue(node, message=message)
|
||||||
|
|
||||||
|
|
||||||
@ErrorFinder.register_rule(type='comp_for')
|
@ErrorFinder.register_rule(type='sync_comp_for')
|
||||||
class _CompForRule(_CheckAssignmentRule):
|
class _CompForRule(_CheckAssignmentRule):
|
||||||
message = "asynchronous comprehension outside of an asynchronous function"
|
message = "asynchronous comprehension outside of an asynchronous function"
|
||||||
|
|
||||||
def is_issue(self, node):
|
def is_issue(self, node):
|
||||||
# Some of the nodes here are already used, so no else if
|
expr_list = node.children[1]
|
||||||
expr_list = node.children[1 + int(node.children[0] == 'async')]
|
|
||||||
if expr_list.type != 'expr_list': # Already handled.
|
if expr_list.type != 'expr_list': # Already handled.
|
||||||
self._check_assignment(expr_list)
|
self._check_assignment(expr_list)
|
||||||
|
|
||||||
return node.children[0] == 'async' \
|
return node.parent.children[0] == 'async' \
|
||||||
and not self._normalizer.context.is_async_funcdef()
|
and not self._normalizer.context.is_async_funcdef()
|
||||||
|
|
||||||
|
|
||||||
@@ -992,3 +1040,71 @@ class _ForStmtRule(_CheckAssignmentRule):
|
|||||||
expr_list = for_stmt.children[1]
|
expr_list = for_stmt.children[1]
|
||||||
if expr_list.type != 'expr_list': # Already handled.
|
if expr_list.type != 'expr_list': # Already handled.
|
||||||
self._check_assignment(expr_list)
|
self._check_assignment(expr_list)
|
||||||
|
|
||||||
|
|
||||||
|
@ErrorFinder.register_rule(type='namedexpr_test')
|
||||||
|
class _NamedExprRule(_CheckAssignmentRule):
|
||||||
|
# namedexpr_test: test [':=' test]
|
||||||
|
|
||||||
|
def is_issue(self, namedexpr_test):
|
||||||
|
# assigned name
|
||||||
|
first = namedexpr_test.children[0]
|
||||||
|
|
||||||
|
def search_namedexpr_in_comp_for(node):
|
||||||
|
while True:
|
||||||
|
parent = node.parent
|
||||||
|
if parent is None:
|
||||||
|
return parent
|
||||||
|
if parent.type == 'sync_comp_for' and parent.children[3] == node:
|
||||||
|
return parent
|
||||||
|
node = parent
|
||||||
|
|
||||||
|
if search_namedexpr_in_comp_for(namedexpr_test):
|
||||||
|
# [i+1 for i in (i := range(5))]
|
||||||
|
# [i+1 for i in (j := range(5))]
|
||||||
|
# [i+1 for i in (lambda: (j := range(5)))()]
|
||||||
|
message = 'assignment expression cannot be used in a comprehension iterable expression'
|
||||||
|
self.add_issue(namedexpr_test, message=message)
|
||||||
|
|
||||||
|
# defined names
|
||||||
|
exprlist = list()
|
||||||
|
|
||||||
|
def process_comp_for(comp_for):
|
||||||
|
if comp_for.type == 'sync_comp_for':
|
||||||
|
comp = comp_for
|
||||||
|
elif comp_for.type == 'comp_for':
|
||||||
|
comp = comp_for.children[1]
|
||||||
|
exprlist.extend(_get_for_stmt_definition_exprs(comp))
|
||||||
|
|
||||||
|
def search_all_comp_ancestors(node):
|
||||||
|
has_ancestors = False
|
||||||
|
while True:
|
||||||
|
node = search_ancestor(node, 'testlist_comp', 'dictorsetmaker')
|
||||||
|
if node is None:
|
||||||
|
break
|
||||||
|
for child in node.children:
|
||||||
|
if child.type in _COMP_FOR_TYPES:
|
||||||
|
process_comp_for(child)
|
||||||
|
has_ancestors = True
|
||||||
|
break
|
||||||
|
return has_ancestors
|
||||||
|
|
||||||
|
# check assignment expressions in comprehensions
|
||||||
|
search_all = search_all_comp_ancestors(namedexpr_test)
|
||||||
|
if search_all:
|
||||||
|
if self._normalizer.context.node.type == 'classdef':
|
||||||
|
message = 'assignment expression within a comprehension ' \
|
||||||
|
'cannot be used in a class body'
|
||||||
|
self.add_issue(namedexpr_test, message=message)
|
||||||
|
|
||||||
|
namelist = [expr.value for expr in exprlist if expr.type == 'name']
|
||||||
|
if first.type == 'name' and first.value in namelist:
|
||||||
|
# [i := 0 for i, j in range(5)]
|
||||||
|
# [[(i := i) for j in range(5)] for i in range(5)]
|
||||||
|
# [i for i, j in range(5) if True or (i := 1)]
|
||||||
|
# [False and (i := 0) for i, j in range(5)]
|
||||||
|
message = 'assignment expression cannot rebind ' \
|
||||||
|
'comprehension iteration variable %r' % first.value
|
||||||
|
self.add_issue(namedexpr_test, message=message)
|
||||||
|
|
||||||
|
self._check_assignment(first, is_namedexpr=True)
|
||||||
|
|||||||
@@ -1,159 +0,0 @@
|
|||||||
# Grammar for Python
|
|
||||||
|
|
||||||
# Note: Changing the grammar specified in this file will most likely
|
|
||||||
# require corresponding changes in the parser module
|
|
||||||
# (../Modules/parsermodule.c). If you can't make the changes to
|
|
||||||
# that module yourself, please co-ordinate the required changes
|
|
||||||
# with someone who can; ask around on python-dev for help. Fred
|
|
||||||
# Drake <fdrake@acm.org> will probably be listening there.
|
|
||||||
|
|
||||||
# NOTE WELL: You should also follow all the steps listed in PEP 306,
|
|
||||||
# "How to Change Python's Grammar"
|
|
||||||
|
|
||||||
# Commands for Kees Blom's railroad program
|
|
||||||
#diagram:token NAME
|
|
||||||
#diagram:token NUMBER
|
|
||||||
#diagram:token STRING
|
|
||||||
#diagram:token NEWLINE
|
|
||||||
#diagram:token ENDMARKER
|
|
||||||
#diagram:token INDENT
|
|
||||||
#diagram:output\input python.bla
|
|
||||||
#diagram:token DEDENT
|
|
||||||
#diagram:output\textwidth 20.04cm\oddsidemargin 0.0cm\evensidemargin 0.0cm
|
|
||||||
#diagram:rules
|
|
||||||
|
|
||||||
# Start symbols for the grammar:
|
|
||||||
# single_input is a single interactive statement;
|
|
||||||
# file_input is a module or sequence of commands read from an input file;
|
|
||||||
# eval_input is the input for the eval() and input() functions.
|
|
||||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
|
||||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
|
||||||
file_input: (NEWLINE | stmt)* ENDMARKER
|
|
||||||
eval_input: testlist NEWLINE* ENDMARKER
|
|
||||||
|
|
||||||
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
|
|
||||||
decorators: decorator+
|
|
||||||
decorated: decorators (classdef | funcdef)
|
|
||||||
funcdef: 'def' NAME parameters ':' suite
|
|
||||||
parameters: '(' [varargslist] ')'
|
|
||||||
varargslist: ((fpdef ['=' test] ',')*
|
|
||||||
('*' NAME [',' '**' NAME] | '**' NAME) |
|
|
||||||
fpdef ['=' test] (',' fpdef ['=' test])* [','])
|
|
||||||
fpdef: NAME | '(' fplist ')'
|
|
||||||
fplist: fpdef (',' fpdef)* [',']
|
|
||||||
|
|
||||||
stmt: simple_stmt | compound_stmt
|
|
||||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
|
||||||
small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
|
|
||||||
import_stmt | global_stmt | exec_stmt | assert_stmt)
|
|
||||||
expr_stmt: testlist (augassign (yield_expr|testlist) |
|
|
||||||
('=' (yield_expr|testlist))*)
|
|
||||||
augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
|
||||||
'<<=' | '>>=' | '**=' | '//=')
|
|
||||||
# For normal assignments, additional restrictions enforced by the interpreter
|
|
||||||
print_stmt: 'print' ( [ test (',' test)* [','] ] |
|
|
||||||
'>>' test [ (',' test)+ [','] ] )
|
|
||||||
del_stmt: 'del' exprlist
|
|
||||||
pass_stmt: 'pass'
|
|
||||||
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
|
|
||||||
break_stmt: 'break'
|
|
||||||
continue_stmt: 'continue'
|
|
||||||
return_stmt: 'return' [testlist]
|
|
||||||
yield_stmt: yield_expr
|
|
||||||
raise_stmt: 'raise' [test [',' test [',' test]]]
|
|
||||||
import_stmt: import_name | import_from
|
|
||||||
import_name: 'import' dotted_as_names
|
|
||||||
import_from: ('from' ('.'* dotted_name | '.'+)
|
|
||||||
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
|
||||||
import_as_name: NAME ['as' NAME]
|
|
||||||
dotted_as_name: dotted_name ['as' NAME]
|
|
||||||
import_as_names: import_as_name (',' import_as_name)* [',']
|
|
||||||
dotted_as_names: dotted_as_name (',' dotted_as_name)*
|
|
||||||
dotted_name: NAME ('.' NAME)*
|
|
||||||
global_stmt: 'global' NAME (',' NAME)*
|
|
||||||
exec_stmt: 'exec' expr ['in' test [',' test]]
|
|
||||||
assert_stmt: 'assert' test [',' test]
|
|
||||||
|
|
||||||
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
|
|
||||||
if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
|
|
||||||
while_stmt: 'while' test ':' suite ['else' ':' suite]
|
|
||||||
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
|
||||||
try_stmt: ('try' ':' suite
|
|
||||||
((except_clause ':' suite)+
|
|
||||||
['else' ':' suite]
|
|
||||||
['finally' ':' suite] |
|
|
||||||
'finally' ':' suite))
|
|
||||||
with_stmt: 'with' with_item ':' suite
|
|
||||||
# Dave: Python2.6 actually defines a little bit of a different label called
|
|
||||||
# 'with_var'. However in 2.7+ this is the default. Apply it for
|
|
||||||
# consistency reasons.
|
|
||||||
with_item: test ['as' expr]
|
|
||||||
# NB compile.c makes sure that the default except clause is last
|
|
||||||
except_clause: 'except' [test [('as' | ',') test]]
|
|
||||||
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
|
||||||
|
|
||||||
# Backward compatibility cruft to support:
|
|
||||||
# [ x for x in lambda: True, lambda: False if x() ]
|
|
||||||
# even while also allowing:
|
|
||||||
# lambda x: 5 if x else 2
|
|
||||||
# (But not a mix of the two)
|
|
||||||
testlist_safe: old_test [(',' old_test)+ [',']]
|
|
||||||
old_test: or_test | old_lambdef
|
|
||||||
old_lambdef: 'lambda' [varargslist] ':' old_test
|
|
||||||
|
|
||||||
test: or_test ['if' or_test 'else' test] | lambdef
|
|
||||||
or_test: and_test ('or' and_test)*
|
|
||||||
and_test: not_test ('and' not_test)*
|
|
||||||
not_test: 'not' not_test | comparison
|
|
||||||
comparison: expr (comp_op expr)*
|
|
||||||
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
|
||||||
expr: xor_expr ('|' xor_expr)*
|
|
||||||
xor_expr: and_expr ('^' and_expr)*
|
|
||||||
and_expr: shift_expr ('&' shift_expr)*
|
|
||||||
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
|
|
||||||
arith_expr: term (('+'|'-') term)*
|
|
||||||
term: factor (('*'|'/'|'%'|'//') factor)*
|
|
||||||
factor: ('+'|'-'|'~') factor | power
|
|
||||||
power: atom trailer* ['**' factor]
|
|
||||||
atom: ('(' [yield_expr|testlist_comp] ')' |
|
|
||||||
'[' [listmaker] ']' |
|
|
||||||
'{' [dictorsetmaker] '}' |
|
|
||||||
'`' testlist1 '`' |
|
|
||||||
NAME | NUMBER | strings)
|
|
||||||
strings: STRING+
|
|
||||||
listmaker: test ( list_for | (',' test)* [','] )
|
|
||||||
# Dave: Renamed testlist_gexpr to testlist_comp, because in 2.7+ this is the
|
|
||||||
# default. It's more consistent like this.
|
|
||||||
testlist_comp: test ( gen_for | (',' test)* [','] )
|
|
||||||
lambdef: 'lambda' [varargslist] ':' test
|
|
||||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
|
||||||
subscriptlist: subscript (',' subscript)* [',']
|
|
||||||
subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop]
|
|
||||||
sliceop: ':' [test]
|
|
||||||
exprlist: expr (',' expr)* [',']
|
|
||||||
testlist: test (',' test)* [',']
|
|
||||||
# Dave: Rename from dictmaker to dictorsetmaker, because this is more
|
|
||||||
# consistent with the following grammars.
|
|
||||||
dictorsetmaker: test ':' test (',' test ':' test)* [',']
|
|
||||||
|
|
||||||
classdef: 'class' NAME ['(' [testlist] ')'] ':' suite
|
|
||||||
|
|
||||||
arglist: (argument ',')* (argument [',']
|
|
||||||
|'*' test (',' argument)* [',' '**' test]
|
|
||||||
|'**' test)
|
|
||||||
argument: test [gen_for] | test '=' test # Really [keyword '='] test
|
|
||||||
|
|
||||||
list_iter: list_for | list_if
|
|
||||||
list_for: 'for' exprlist 'in' testlist_safe [list_iter]
|
|
||||||
list_if: 'if' old_test [list_iter]
|
|
||||||
|
|
||||||
gen_iter: gen_for | gen_if
|
|
||||||
gen_for: 'for' exprlist 'in' or_test [gen_iter]
|
|
||||||
gen_if: 'if' old_test [gen_iter]
|
|
||||||
|
|
||||||
testlist1: test (',' test)*
|
|
||||||
|
|
||||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
|
||||||
encoding_decl: NAME
|
|
||||||
|
|
||||||
yield_expr: 'yield' [testlist]
|
|
||||||
@@ -107,7 +107,7 @@ atom: ('(' [yield_expr|testlist_comp] ')' |
|
|||||||
NAME | NUMBER | strings)
|
NAME | NUMBER | strings)
|
||||||
strings: STRING+
|
strings: STRING+
|
||||||
listmaker: test ( list_for | (',' test)* [','] )
|
listmaker: test ( list_for | (',' test)* [','] )
|
||||||
testlist_comp: test ( comp_for | (',' test)* [','] )
|
testlist_comp: test ( sync_comp_for | (',' test)* [','] )
|
||||||
lambdef: 'lambda' [varargslist] ':' test
|
lambdef: 'lambda' [varargslist] ':' test
|
||||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||||
subscriptlist: subscript (',' subscript)* [',']
|
subscriptlist: subscript (',' subscript)* [',']
|
||||||
@@ -115,8 +115,8 @@ subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop]
|
|||||||
sliceop: ':' [test]
|
sliceop: ':' [test]
|
||||||
exprlist: expr (',' expr)* [',']
|
exprlist: expr (',' expr)* [',']
|
||||||
testlist: test (',' test)* [',']
|
testlist: test (',' test)* [',']
|
||||||
dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
|
dictorsetmaker: ( (test ':' test (sync_comp_for | (',' test ':' test)* [','])) |
|
||||||
(test (comp_for | (',' test)* [','])) )
|
(test (sync_comp_for | (',' test)* [','])) )
|
||||||
|
|
||||||
classdef: 'class' NAME ['(' [testlist] ')'] ':' suite
|
classdef: 'class' NAME ['(' [testlist] ')'] ':' suite
|
||||||
|
|
||||||
@@ -125,14 +125,14 @@ arglist: (argument ',')* (argument [',']
|
|||||||
|'**' test)
|
|'**' test)
|
||||||
# The reason that keywords are test nodes instead of NAME is that using NAME
|
# The reason that keywords are test nodes instead of NAME is that using NAME
|
||||||
# results in an ambiguity. ast.c makes sure it's a NAME.
|
# results in an ambiguity. ast.c makes sure it's a NAME.
|
||||||
argument: test [comp_for] | test '=' test
|
argument: test [sync_comp_for] | test '=' test
|
||||||
|
|
||||||
list_iter: list_for | list_if
|
list_iter: list_for | list_if
|
||||||
list_for: 'for' exprlist 'in' testlist_safe [list_iter]
|
list_for: 'for' exprlist 'in' testlist_safe [list_iter]
|
||||||
list_if: 'if' old_test [list_iter]
|
list_if: 'if' old_test [list_iter]
|
||||||
|
|
||||||
comp_iter: comp_for | comp_if
|
comp_iter: sync_comp_for | comp_if
|
||||||
comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||||
comp_if: 'if' old_test [comp_iter]
|
comp_if: 'if' old_test [comp_iter]
|
||||||
|
|
||||||
testlist1: test (',' test)*
|
testlist1: test (',' test)*
|
||||||
|
|||||||
@@ -105,15 +105,15 @@ atom: ('(' [yield_expr|testlist_comp] ')' |
|
|||||||
'{' [dictorsetmaker] '}' |
|
'{' [dictorsetmaker] '}' |
|
||||||
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||||
strings: STRING+
|
strings: STRING+
|
||||||
testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
|
testlist_comp: (test|star_expr) ( sync_comp_for | (',' (test|star_expr))* [','] )
|
||||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||||
subscriptlist: subscript (',' subscript)* [',']
|
subscriptlist: subscript (',' subscript)* [',']
|
||||||
subscript: test | [test] ':' [test] [sliceop]
|
subscript: test | [test] ':' [test] [sliceop]
|
||||||
sliceop: ':' [test]
|
sliceop: ':' [test]
|
||||||
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||||
testlist: test (',' test)* [',']
|
testlist: test (',' test)* [',']
|
||||||
dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
|
dictorsetmaker: ( (test ':' test (sync_comp_for | (',' test ':' test)* [','])) |
|
||||||
(test (comp_for | (',' test)* [','])) )
|
(test (sync_comp_for | (',' test)* [','])) )
|
||||||
|
|
||||||
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
||||||
|
|
||||||
@@ -122,9 +122,9 @@ arglist: (argument ',')* (argument [',']
|
|||||||
|'**' test)
|
|'**' test)
|
||||||
# The reason that keywords are test nodes instead of NAME is that using NAME
|
# The reason that keywords are test nodes instead of NAME is that using NAME
|
||||||
# results in an ambiguity. ast.c makes sure it's a NAME.
|
# results in an ambiguity. ast.c makes sure it's a NAME.
|
||||||
argument: test [comp_for] | test '=' test # Really [keyword '='] test
|
argument: test [sync_comp_for] | test '=' test # Really [keyword '='] test
|
||||||
comp_iter: comp_for | comp_if
|
comp_iter: sync_comp_for | comp_if
|
||||||
comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||||
comp_if: 'if' test_nocond [comp_iter]
|
comp_if: 'if' test_nocond [comp_iter]
|
||||||
|
|
||||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||||
|
|||||||
@@ -105,15 +105,15 @@ atom: ('(' [yield_expr|testlist_comp] ')' |
|
|||||||
'{' [dictorsetmaker] '}' |
|
'{' [dictorsetmaker] '}' |
|
||||||
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||||
strings: STRING+
|
strings: STRING+
|
||||||
testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
|
testlist_comp: (test|star_expr) ( sync_comp_for | (',' (test|star_expr))* [','] )
|
||||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||||
subscriptlist: subscript (',' subscript)* [',']
|
subscriptlist: subscript (',' subscript)* [',']
|
||||||
subscript: test | [test] ':' [test] [sliceop]
|
subscript: test | [test] ':' [test] [sliceop]
|
||||||
sliceop: ':' [test]
|
sliceop: ':' [test]
|
||||||
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||||
testlist: test (',' test)* [',']
|
testlist: test (',' test)* [',']
|
||||||
dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
|
dictorsetmaker: ( (test ':' test (sync_comp_for | (',' test ':' test)* [','])) |
|
||||||
(test (comp_for | (',' test)* [','])) )
|
(test (sync_comp_for | (',' test)* [','])) )
|
||||||
|
|
||||||
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
||||||
|
|
||||||
@@ -122,9 +122,9 @@ arglist: (argument ',')* (argument [',']
|
|||||||
|'**' test)
|
|'**' test)
|
||||||
# The reason that keywords are test nodes instead of NAME is that using NAME
|
# The reason that keywords are test nodes instead of NAME is that using NAME
|
||||||
# results in an ambiguity. ast.c makes sure it's a NAME.
|
# results in an ambiguity. ast.c makes sure it's a NAME.
|
||||||
argument: test [comp_for] | test '=' test # Really [keyword '='] test
|
argument: test [sync_comp_for] | test '=' test # Really [keyword '='] test
|
||||||
comp_iter: comp_for | comp_if
|
comp_iter: sync_comp_for | comp_if
|
||||||
comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||||
comp_if: 'if' test_nocond [comp_iter]
|
comp_if: 'if' test_nocond [comp_iter]
|
||||||
|
|
||||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ atom: ('(' [yield_expr|testlist_comp] ')' |
|
|||||||
'{' [dictorsetmaker] '}' |
|
'{' [dictorsetmaker] '}' |
|
||||||
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||||
strings: STRING+
|
strings: STRING+
|
||||||
testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
|
testlist_comp: (test|star_expr) ( sync_comp_for | (',' (test|star_expr))* [','] )
|
||||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||||
subscriptlist: subscript (',' subscript)* [',']
|
subscriptlist: subscript (',' subscript)* [',']
|
||||||
subscript: test | [test] ':' [test] [sliceop]
|
subscript: test | [test] ':' [test] [sliceop]
|
||||||
@@ -120,9 +120,9 @@ sliceop: ':' [test]
|
|||||||
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||||
testlist: test (',' test)* [',']
|
testlist: test (',' test)* [',']
|
||||||
dictorsetmaker: ( ((test ':' test | '**' expr)
|
dictorsetmaker: ( ((test ':' test | '**' expr)
|
||||||
(comp_for | (',' (test ':' test | '**' expr))* [','])) |
|
(sync_comp_for | (',' (test ':' test | '**' expr))* [','])) |
|
||||||
((test | star_expr)
|
((test | star_expr)
|
||||||
(comp_for | (',' (test | star_expr))* [','])) )
|
(sync_comp_for | (',' (test | star_expr))* [','])) )
|
||||||
|
|
||||||
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
||||||
|
|
||||||
@@ -137,13 +137,13 @@ arglist: argument (',' argument)* [',']
|
|||||||
# Illegal combinations and orderings are blocked in ast.c:
|
# Illegal combinations and orderings are blocked in ast.c:
|
||||||
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
||||||
# that precede iterable unpackings are blocked; etc.
|
# that precede iterable unpackings are blocked; etc.
|
||||||
argument: ( test [comp_for] |
|
argument: ( test [sync_comp_for] |
|
||||||
test '=' test |
|
test '=' test |
|
||||||
'**' test |
|
'**' test |
|
||||||
'*' test )
|
'*' test )
|
||||||
|
|
||||||
comp_iter: comp_for | comp_if
|
comp_iter: sync_comp_for | comp_if
|
||||||
comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||||
comp_if: 'if' test_nocond [comp_iter]
|
comp_if: 'if' test_nocond [comp_iter]
|
||||||
|
|
||||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||||
|
|||||||
@@ -140,7 +140,8 @@ argument: ( test [comp_for] |
|
|||||||
'*' test )
|
'*' test )
|
||||||
|
|
||||||
comp_iter: comp_for | comp_if
|
comp_iter: comp_for | comp_if
|
||||||
comp_for: ['async'] 'for' exprlist 'in' or_test [comp_iter]
|
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||||
|
comp_for: ['async'] sync_comp_for
|
||||||
comp_if: 'if' test_nocond [comp_iter]
|
comp_if: 'if' test_nocond [comp_iter]
|
||||||
|
|
||||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||||
|
|||||||
@@ -138,7 +138,8 @@ argument: ( test [comp_for] |
|
|||||||
'*' test )
|
'*' test )
|
||||||
|
|
||||||
comp_iter: comp_for | comp_if
|
comp_iter: comp_for | comp_if
|
||||||
comp_for: ['async'] 'for' exprlist 'in' or_test [comp_iter]
|
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||||
|
comp_for: ['async'] sync_comp_for
|
||||||
comp_if: 'if' test_nocond [comp_iter]
|
comp_if: 'if' test_nocond [comp_iter]
|
||||||
|
|
||||||
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||||
|
|||||||
@@ -20,13 +20,25 @@ async_funcdef: 'async' funcdef
|
|||||||
funcdef: 'def' NAME parameters ['->' test] ':' suite
|
funcdef: 'def' NAME parameters ['->' test] ':' suite
|
||||||
|
|
||||||
parameters: '(' [typedargslist] ')'
|
parameters: '(' [typedargslist] ')'
|
||||||
typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
|
typedargslist: (
|
||||||
|
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
|
||||||
|
',' tfpdef ['=' test])* ([',' [
|
||||||
|
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||||
|
| '**' tfpdef [',']]])
|
||||||
|
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
|
||||||
|
| '**' tfpdef [',']]] )
|
||||||
|
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
|
||||||
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||||
| '**' tfpdef [',']]]
|
| '**' tfpdef [',']]]
|
||||||
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||||
| '**' tfpdef [','])
|
| '**' tfpdef [','])
|
||||||
|
)
|
||||||
tfpdef: NAME [':' test]
|
tfpdef: NAME [':' test]
|
||||||
varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||||
|
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
|
| '**' vfpdef [',']]]
|
||||||
|
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
|
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||||
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
| '**' vfpdef [',']]]
|
| '**' vfpdef [',']]]
|
||||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
@@ -69,8 +81,8 @@ assert_stmt: 'assert' test [',' test]
|
|||||||
|
|
||||||
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
|
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
|
||||||
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
|
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
|
||||||
if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
|
if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
|
||||||
while_stmt: 'while' test ':' suite ['else' ':' suite]
|
while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
|
||||||
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
||||||
try_stmt: ('try' ':' suite
|
try_stmt: ('try' ':' suite
|
||||||
((except_clause ':' suite)+
|
((except_clause ':' suite)+
|
||||||
@@ -83,6 +95,7 @@ with_item: test ['as' expr]
|
|||||||
except_clause: 'except' [test ['as' NAME]]
|
except_clause: 'except' [test ['as' NAME]]
|
||||||
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||||
|
|
||||||
|
namedexpr_test: test [':=' test]
|
||||||
test: or_test ['if' or_test 'else' test] | lambdef
|
test: or_test ['if' or_test 'else' test] | lambdef
|
||||||
test_nocond: or_test | lambdef_nocond
|
test_nocond: or_test | lambdef_nocond
|
||||||
lambdef: 'lambda' [varargslist] ':' test
|
lambdef: 'lambda' [varargslist] ':' test
|
||||||
@@ -108,7 +121,7 @@ atom: ('(' [yield_expr|testlist_comp] ')' |
|
|||||||
'[' [testlist_comp] ']' |
|
'[' [testlist_comp] ']' |
|
||||||
'{' [dictorsetmaker] '}' |
|
'{' [dictorsetmaker] '}' |
|
||||||
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||||
testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
|
testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
|
||||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||||
subscriptlist: subscript (',' subscript)* [',']
|
subscriptlist: subscript (',' subscript)* [',']
|
||||||
subscript: test | [test] ':' [test] [sliceop]
|
subscript: test | [test] ':' [test] [sliceop]
|
||||||
@@ -134,6 +147,7 @@ arglist: argument (',' argument)* [',']
|
|||||||
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
||||||
# that precede iterable unpackings are blocked; etc.
|
# that precede iterable unpackings are blocked; etc.
|
||||||
argument: ( test [comp_for] |
|
argument: ( test [comp_for] |
|
||||||
|
test ':=' test |
|
||||||
test '=' test |
|
test '=' test |
|
||||||
'**' test |
|
'**' test |
|
||||||
'*' test )
|
'*' test )
|
||||||
@@ -153,5 +167,5 @@ strings: (STRING | fstring)+
|
|||||||
fstring: FSTRING_START fstring_content* FSTRING_END
|
fstring: FSTRING_START fstring_content* FSTRING_END
|
||||||
fstring_content: FSTRING_STRING | fstring_expr
|
fstring_content: FSTRING_STRING | fstring_expr
|
||||||
fstring_conversion: '!' NAME
|
fstring_conversion: '!' NAME
|
||||||
fstring_expr: '{' testlist [ fstring_conversion ] [ fstring_format_spec ] '}'
|
fstring_expr: '{' testlist ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
|
||||||
fstring_format_spec: ':' fstring_content*
|
fstring_format_spec: ':' fstring_content*
|
||||||
|
|||||||
171
parso/python/grammar39.txt
Normal file
171
parso/python/grammar39.txt
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
# Grammar for Python
|
||||||
|
|
||||||
|
# NOTE WELL: You should also follow all the steps listed at
|
||||||
|
# https://devguide.python.org/grammar/
|
||||||
|
|
||||||
|
# Start symbols for the grammar:
|
||||||
|
# single_input is a single interactive statement;
|
||||||
|
# file_input is a module or sequence of commands read from an input file;
|
||||||
|
# eval_input is the input for the eval() functions.
|
||||||
|
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||||
|
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||||
|
file_input: (NEWLINE | stmt)* ENDMARKER
|
||||||
|
eval_input: testlist NEWLINE* ENDMARKER
|
||||||
|
|
||||||
|
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
|
||||||
|
decorators: decorator+
|
||||||
|
decorated: decorators (classdef | funcdef | async_funcdef)
|
||||||
|
|
||||||
|
async_funcdef: 'async' funcdef
|
||||||
|
funcdef: 'def' NAME parameters ['->' test] ':' suite
|
||||||
|
|
||||||
|
parameters: '(' [typedargslist] ')'
|
||||||
|
typedargslist: (
|
||||||
|
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
|
||||||
|
',' tfpdef ['=' test])* ([',' [
|
||||||
|
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||||
|
| '**' tfpdef [',']]])
|
||||||
|
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
|
||||||
|
| '**' tfpdef [',']]] )
|
||||||
|
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
|
||||||
|
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||||
|
| '**' tfpdef [',']]]
|
||||||
|
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
|
||||||
|
| '**' tfpdef [','])
|
||||||
|
)
|
||||||
|
tfpdef: NAME [':' test]
|
||||||
|
varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||||
|
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
|
| '**' vfpdef [',']]]
|
||||||
|
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
|
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||||
|
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
|
| '**' vfpdef [',']]]
|
||||||
|
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
|
||||||
|
| '**' vfpdef [',']
|
||||||
|
)
|
||||||
|
vfpdef: NAME
|
||||||
|
|
||||||
|
stmt: simple_stmt | compound_stmt
|
||||||
|
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||||
|
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||||
|
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||||
|
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
|
||||||
|
('=' (yield_expr|testlist_star_expr))*)
|
||||||
|
annassign: ':' test ['=' test]
|
||||||
|
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
||||||
|
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
||||||
|
'<<=' | '>>=' | '**=' | '//=')
|
||||||
|
# For normal and annotated assignments, additional restrictions enforced by the interpreter
|
||||||
|
del_stmt: 'del' exprlist
|
||||||
|
pass_stmt: 'pass'
|
||||||
|
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
|
||||||
|
break_stmt: 'break'
|
||||||
|
continue_stmt: 'continue'
|
||||||
|
return_stmt: 'return' [testlist_star_expr]
|
||||||
|
yield_stmt: yield_expr
|
||||||
|
raise_stmt: 'raise' [test ['from' test]]
|
||||||
|
import_stmt: import_name | import_from
|
||||||
|
import_name: 'import' dotted_as_names
|
||||||
|
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
|
||||||
|
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
|
||||||
|
'import' ('*' | '(' import_as_names ')' | import_as_names))
|
||||||
|
import_as_name: NAME ['as' NAME]
|
||||||
|
dotted_as_name: dotted_name ['as' NAME]
|
||||||
|
import_as_names: import_as_name (',' import_as_name)* [',']
|
||||||
|
dotted_as_names: dotted_as_name (',' dotted_as_name)*
|
||||||
|
dotted_name: NAME ('.' NAME)*
|
||||||
|
global_stmt: 'global' NAME (',' NAME)*
|
||||||
|
nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
|
||||||
|
assert_stmt: 'assert' test [',' test]
|
||||||
|
|
||||||
|
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
|
||||||
|
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
|
||||||
|
if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
|
||||||
|
while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
|
||||||
|
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
|
||||||
|
try_stmt: ('try' ':' suite
|
||||||
|
((except_clause ':' suite)+
|
||||||
|
['else' ':' suite]
|
||||||
|
['finally' ':' suite] |
|
||||||
|
'finally' ':' suite))
|
||||||
|
with_stmt: 'with' with_item (',' with_item)* ':' suite
|
||||||
|
with_item: test ['as' expr]
|
||||||
|
# NB compile.c makes sure that the default except clause is last
|
||||||
|
except_clause: 'except' [test ['as' NAME]]
|
||||||
|
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||||
|
|
||||||
|
namedexpr_test: test [':=' test]
|
||||||
|
test: or_test ['if' or_test 'else' test] | lambdef
|
||||||
|
test_nocond: or_test | lambdef_nocond
|
||||||
|
lambdef: 'lambda' [varargslist] ':' test
|
||||||
|
lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
|
||||||
|
or_test: and_test ('or' and_test)*
|
||||||
|
and_test: not_test ('and' not_test)*
|
||||||
|
not_test: 'not' not_test | comparison
|
||||||
|
comparison: expr (comp_op expr)*
|
||||||
|
# <> isn't actually a valid comparison operator in Python. It's here for the
|
||||||
|
# sake of a __future__ import described in PEP 401 (which really works :-)
|
||||||
|
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
||||||
|
star_expr: '*' expr
|
||||||
|
expr: xor_expr ('|' xor_expr)*
|
||||||
|
xor_expr: and_expr ('^' and_expr)*
|
||||||
|
and_expr: shift_expr ('&' shift_expr)*
|
||||||
|
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
|
||||||
|
arith_expr: term (('+'|'-') term)*
|
||||||
|
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
|
||||||
|
factor: ('+'|'-'|'~') factor | power
|
||||||
|
power: atom_expr ['**' factor]
|
||||||
|
atom_expr: ['await'] atom trailer*
|
||||||
|
atom: ('(' [yield_expr|testlist_comp] ')' |
|
||||||
|
'[' [testlist_comp] ']' |
|
||||||
|
'{' [dictorsetmaker] '}' |
|
||||||
|
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
|
||||||
|
testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
|
||||||
|
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||||
|
subscriptlist: subscript (',' subscript)* [',']
|
||||||
|
subscript: test | [test] ':' [test] [sliceop]
|
||||||
|
sliceop: ':' [test]
|
||||||
|
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||||
|
testlist: test (',' test)* [',']
|
||||||
|
dictorsetmaker: ( ((test ':' test | '**' expr)
|
||||||
|
(comp_for | (',' (test ':' test | '**' expr))* [','])) |
|
||||||
|
((test | star_expr)
|
||||||
|
(comp_for | (',' (test | star_expr))* [','])) )
|
||||||
|
|
||||||
|
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
|
||||||
|
|
||||||
|
arglist: argument (',' argument)* [',']
|
||||||
|
|
||||||
|
# The reason that keywords are test nodes instead of NAME is that using NAME
|
||||||
|
# results in an ambiguity. ast.c makes sure it's a NAME.
|
||||||
|
# "test '=' test" is really "keyword '=' test", but we have no such token.
|
||||||
|
# These need to be in a single rule to avoid grammar that is ambiguous
|
||||||
|
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
|
||||||
|
# we explicitly match '*' here, too, to give it proper precedence.
|
||||||
|
# Illegal combinations and orderings are blocked in ast.c:
|
||||||
|
# multiple (test comp_for) arguments are blocked; keyword unpackings
|
||||||
|
# that precede iterable unpackings are blocked; etc.
|
||||||
|
argument: ( test [comp_for] |
|
||||||
|
test ':=' test |
|
||||||
|
test '=' test |
|
||||||
|
'**' test |
|
||||||
|
'*' test )
|
||||||
|
|
||||||
|
comp_iter: comp_for | comp_if
|
||||||
|
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
|
||||||
|
comp_for: ['async'] sync_comp_for
|
||||||
|
comp_if: 'if' test_nocond [comp_iter]
|
||||||
|
|
||||||
|
# not used in grammar, but may appear in "node" passed from Parser to Compiler
|
||||||
|
encoding_decl: NAME
|
||||||
|
|
||||||
|
yield_expr: 'yield' [yield_arg]
|
||||||
|
yield_arg: 'from' test | testlist_star_expr
|
||||||
|
|
||||||
|
strings: (STRING | fstring)+
|
||||||
|
fstring: FSTRING_START fstring_content* FSTRING_END
|
||||||
|
fstring_content: FSTRING_STRING | fstring_expr
|
||||||
|
fstring_conversion: '!' NAME
|
||||||
|
fstring_expr: '{' testlist ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
|
||||||
|
fstring_format_spec: ':' fstring_content*
|
||||||
@@ -172,5 +172,5 @@ A list of syntax/indentation errors I've encountered in CPython.
|
|||||||
Version specific:
|
Version specific:
|
||||||
Python 3.5:
|
Python 3.5:
|
||||||
'yield' inside async function
|
'yield' inside async function
|
||||||
Python 3.3/3.4:
|
Python 3.4:
|
||||||
can use starred expression only as assignment target
|
can use starred expression only as assignment target
|
||||||
|
|||||||
@@ -39,13 +39,11 @@ class Parser(BaseParser):
|
|||||||
'for_stmt': tree.ForStmt,
|
'for_stmt': tree.ForStmt,
|
||||||
'while_stmt': tree.WhileStmt,
|
'while_stmt': tree.WhileStmt,
|
||||||
'try_stmt': tree.TryStmt,
|
'try_stmt': tree.TryStmt,
|
||||||
'comp_for': tree.CompFor,
|
'sync_comp_for': tree.SyncCompFor,
|
||||||
# Not sure if this is the best idea, but IMO it's the easiest way to
|
# Not sure if this is the best idea, but IMO it's the easiest way to
|
||||||
# avoid extreme amounts of work around the subtle difference of 2/3
|
# avoid extreme amounts of work around the subtle difference of 2/3
|
||||||
# grammar in list comoprehensions.
|
# grammar in list comoprehensions.
|
||||||
'list_for': tree.CompFor,
|
'list_for': tree.SyncCompFor,
|
||||||
# Same here. This just exists in Python 2.6.
|
|
||||||
'gen_for': tree.CompFor,
|
|
||||||
'decorator': tree.Decorator,
|
'decorator': tree.Decorator,
|
||||||
'lambdef': tree.Lambda,
|
'lambdef': tree.Lambda,
|
||||||
'old_lambdef': tree.Lambda,
|
'old_lambdef': tree.Lambda,
|
||||||
|
|||||||
30
parso/python/token.pyi
Normal file
30
parso/python/token.pyi
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
from typing import Container, Iterable
|
||||||
|
|
||||||
|
class TokenType:
|
||||||
|
name: str
|
||||||
|
contains_syntax: bool
|
||||||
|
def __init__(self, name: str, contains_syntax: bool) -> None: ...
|
||||||
|
|
||||||
|
class TokenTypes:
|
||||||
|
def __init__(
|
||||||
|
self, names: Iterable[str], contains_syntax: Container[str]
|
||||||
|
) -> None: ...
|
||||||
|
|
||||||
|
# not an actual class in the source code, but we need this class to type the fields of
|
||||||
|
# PythonTokenTypes
|
||||||
|
class _FakePythonTokenTypesClass(TokenTypes):
|
||||||
|
STRING: TokenType
|
||||||
|
NUMBER: TokenType
|
||||||
|
NAME: TokenType
|
||||||
|
ERRORTOKEN: TokenType
|
||||||
|
NEWLINE: TokenType
|
||||||
|
INDENT: TokenType
|
||||||
|
DEDENT: TokenType
|
||||||
|
ERROR_DEDENT: TokenType
|
||||||
|
FSTRING_STRING: TokenType
|
||||||
|
FSTRING_START: TokenType
|
||||||
|
FSTRING_END: TokenType
|
||||||
|
OP: TokenType
|
||||||
|
ENDMARKER: TokenType
|
||||||
|
|
||||||
|
PythonTokenTypes: _FakePythonTokenTypesClass = ...
|
||||||
@@ -19,10 +19,12 @@ import itertools as _itertools
|
|||||||
from codecs import BOM_UTF8
|
from codecs import BOM_UTF8
|
||||||
|
|
||||||
from parso.python.token import PythonTokenTypes
|
from parso.python.token import PythonTokenTypes
|
||||||
from parso._compatibility import py_version
|
|
||||||
from parso.utils import split_lines
|
from parso.utils import split_lines
|
||||||
|
|
||||||
|
|
||||||
|
# Maximum code point of Unicode 6.0: 0x10ffff (1,114,111)
|
||||||
|
MAX_UNICODE = '\U0010ffff'
|
||||||
|
|
||||||
STRING = PythonTokenTypes.STRING
|
STRING = PythonTokenTypes.STRING
|
||||||
NAME = PythonTokenTypes.NAME
|
NAME = PythonTokenTypes.NAME
|
||||||
NUMBER = PythonTokenTypes.NUMBER
|
NUMBER = PythonTokenTypes.NUMBER
|
||||||
@@ -47,12 +49,17 @@ BOM_UTF8_STRING = BOM_UTF8.decode('utf-8')
|
|||||||
|
|
||||||
_token_collection_cache = {}
|
_token_collection_cache = {}
|
||||||
|
|
||||||
if py_version >= 30:
|
if sys.version_info.major >= 3:
|
||||||
# Python 3 has str.isidentifier() to check if a char is a valid identifier
|
# Python 3 has str.isidentifier() to check if a char is a valid identifier
|
||||||
is_identifier = str.isidentifier
|
is_identifier = str.isidentifier
|
||||||
else:
|
else:
|
||||||
namechars = string.ascii_letters + '_'
|
# Python 2 doesn't, but it's not that important anymore and if you tokenize
|
||||||
is_identifier = lambda s: s in namechars
|
# Python 2 code with this, it's still ok. It's just that parsing Python 3
|
||||||
|
# code with this function is not 100% correct.
|
||||||
|
# This just means that Python 2 code matches a few identifiers too much,
|
||||||
|
# but that doesn't really matter.
|
||||||
|
def is_identifier(s):
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def group(*choices, **kwargs):
|
def group(*choices, **kwargs):
|
||||||
@@ -78,7 +85,7 @@ def _all_string_prefixes(version_info, include_fstring=False, only_fstring=False
|
|||||||
# and don't contain any permuations (include 'fr', but not
|
# and don't contain any permuations (include 'fr', but not
|
||||||
# 'rf'). The various permutations will be generated.
|
# 'rf'). The various permutations will be generated.
|
||||||
valid_string_prefixes = ['b', 'r', 'u']
|
valid_string_prefixes = ['b', 'r', 'u']
|
||||||
if version_info >= (3, 0):
|
if version_info.major >= 3:
|
||||||
valid_string_prefixes.append('br')
|
valid_string_prefixes.append('br')
|
||||||
|
|
||||||
result = set([''])
|
result = set([''])
|
||||||
@@ -98,7 +105,7 @@ def _all_string_prefixes(version_info, include_fstring=False, only_fstring=False
|
|||||||
# create a list with upper and lower versions of each
|
# create a list with upper and lower versions of each
|
||||||
# character
|
# character
|
||||||
result.update(different_case_versions(t))
|
result.update(different_case_versions(t))
|
||||||
if version_info <= (2, 7):
|
if version_info.major == 2:
|
||||||
# In Python 2 the order cannot just be random.
|
# In Python 2 the order cannot just be random.
|
||||||
result.update(different_case_versions('ur'))
|
result.update(different_case_versions('ur'))
|
||||||
result.update(different_case_versions('br'))
|
result.update(different_case_versions('br'))
|
||||||
@@ -118,8 +125,10 @@ def _get_token_collection(version_info):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
fstring_string_single_line = _compile(r'(?:[^{}\r\n]+|\{\{|\}\})+')
|
fstring_string_single_line = _compile(r'(?:\{\{|\}\}|\\(?:\r\n?|\n)|[^{}\r\n])+')
|
||||||
fstring_string_multi_line = _compile(r'(?:[^{}]+|\{\{|\}\})+')
|
fstring_string_multi_line = _compile(r'(?:[^{}]+|\{\{|\}\})+')
|
||||||
|
fstring_format_spec_single_line = _compile(r'(?:\\(?:\r\n?|\n)|[^{}\r\n])+')
|
||||||
|
fstring_format_spec_multi_line = _compile(r'[^{}]+')
|
||||||
|
|
||||||
|
|
||||||
def _create_token_collection(version_info):
|
def _create_token_collection(version_info):
|
||||||
@@ -128,7 +137,16 @@ def _create_token_collection(version_info):
|
|||||||
Whitespace = r'[ \f\t]*'
|
Whitespace = r'[ \f\t]*'
|
||||||
whitespace = _compile(Whitespace)
|
whitespace = _compile(Whitespace)
|
||||||
Comment = r'#[^\r\n]*'
|
Comment = r'#[^\r\n]*'
|
||||||
Name = r'\w+'
|
# Python 2 is pretty much not working properly anymore, we just ignore
|
||||||
|
# parsing unicode properly, which is fine, I guess.
|
||||||
|
if version_info[0] == 2:
|
||||||
|
Name = r'([A-Za-z_0-9]+)'
|
||||||
|
elif sys.version_info[0] == 2:
|
||||||
|
# Unfortunately the regex engine cannot deal with the regex below, so
|
||||||
|
# just use this one.
|
||||||
|
Name = r'(\w+)'
|
||||||
|
else:
|
||||||
|
Name = u'([A-Za-z_0-9\u0080-' + MAX_UNICODE + ']+)'
|
||||||
|
|
||||||
if version_info >= (3, 6):
|
if version_info >= (3, 6):
|
||||||
Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
|
Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
|
||||||
@@ -145,12 +163,14 @@ def _create_token_collection(version_info):
|
|||||||
else:
|
else:
|
||||||
Hexnumber = r'0[xX][0-9a-fA-F]+'
|
Hexnumber = r'0[xX][0-9a-fA-F]+'
|
||||||
Binnumber = r'0[bB][01]+'
|
Binnumber = r'0[bB][01]+'
|
||||||
if version_info >= (3, 0):
|
if version_info.major >= 3:
|
||||||
Octnumber = r'0[oO][0-7]+'
|
Octnumber = r'0[oO][0-7]+'
|
||||||
else:
|
else:
|
||||||
Octnumber = '0[oO]?[0-7]+'
|
Octnumber = '0[oO]?[0-7]+'
|
||||||
Decnumber = r'(?:0+|[1-9][0-9]*)'
|
Decnumber = r'(?:0+|[1-9][0-9]*)'
|
||||||
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
|
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
|
||||||
|
if version_info[0] < 3:
|
||||||
|
Intnumber += '[lL]?'
|
||||||
Exponent = r'[eE][-+]?[0-9]+'
|
Exponent = r'[eE][-+]?[0-9]+'
|
||||||
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
|
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
|
||||||
Expfloat = r'[0-9]+' + Exponent
|
Expfloat = r'[0-9]+' + Exponent
|
||||||
@@ -186,9 +206,13 @@ def _create_token_collection(version_info):
|
|||||||
|
|
||||||
Bracket = '[][(){}]'
|
Bracket = '[][(){}]'
|
||||||
|
|
||||||
special_args = [r'\r\n?', r'\n', r'[:;.,@]']
|
special_args = [r'\r\n?', r'\n', r'[;.,@]']
|
||||||
if version_info >= (3, 0):
|
if version_info >= (3, 0):
|
||||||
special_args.insert(0, r'\.\.\.')
|
special_args.insert(0, r'\.\.\.')
|
||||||
|
if version_info >= (3, 8):
|
||||||
|
special_args.insert(0, ":=?")
|
||||||
|
else:
|
||||||
|
special_args.insert(0, ":")
|
||||||
Special = group(*special_args)
|
Special = group(*special_args)
|
||||||
|
|
||||||
Funny = group(Operator, Bracket, Special)
|
Funny = group(Operator, Bracket, Special)
|
||||||
@@ -281,32 +305,45 @@ class FStringNode(object):
|
|||||||
return len(self.quote) == 3
|
return len(self.quote) == 3
|
||||||
|
|
||||||
def is_in_expr(self):
|
def is_in_expr(self):
|
||||||
return (self.parentheses_count - self.format_spec_count) > 0
|
return self.parentheses_count > self.format_spec_count
|
||||||
|
|
||||||
|
def is_in_format_spec(self):
|
||||||
|
return not self.is_in_expr() and self.format_spec_count
|
||||||
|
|
||||||
|
|
||||||
def _close_fstring_if_necessary(fstring_stack, string, start_pos, additional_prefix):
|
def _close_fstring_if_necessary(fstring_stack, string, start_pos, additional_prefix):
|
||||||
for fstring_stack_index, node in enumerate(fstring_stack):
|
for fstring_stack_index, node in enumerate(fstring_stack):
|
||||||
if string.startswith(node.quote):
|
lstripped_string = string.lstrip()
|
||||||
|
len_lstrip = len(string) - len(lstripped_string)
|
||||||
|
if lstripped_string.startswith(node.quote):
|
||||||
token = PythonToken(
|
token = PythonToken(
|
||||||
FSTRING_END,
|
FSTRING_END,
|
||||||
node.quote,
|
node.quote,
|
||||||
start_pos,
|
start_pos,
|
||||||
prefix=additional_prefix,
|
prefix=additional_prefix+string[:len_lstrip],
|
||||||
)
|
)
|
||||||
additional_prefix = ''
|
additional_prefix = ''
|
||||||
assert not node.previous_lines
|
assert not node.previous_lines
|
||||||
del fstring_stack[fstring_stack_index:]
|
del fstring_stack[fstring_stack_index:]
|
||||||
return token, '', len(node.quote)
|
return token, '', len(node.quote) + len_lstrip
|
||||||
return None, additional_prefix, 0
|
return None, additional_prefix, 0
|
||||||
|
|
||||||
|
|
||||||
def _find_fstring_string(endpats, fstring_stack, line, lnum, pos):
|
def _find_fstring_string(endpats, fstring_stack, line, lnum, pos):
|
||||||
tos = fstring_stack[-1]
|
tos = fstring_stack[-1]
|
||||||
allow_multiline = tos.allow_multiline()
|
allow_multiline = tos.allow_multiline()
|
||||||
|
if tos.is_in_format_spec():
|
||||||
if allow_multiline:
|
if allow_multiline:
|
||||||
match = fstring_string_multi_line.match(line, pos)
|
regex = fstring_format_spec_multi_line
|
||||||
else:
|
else:
|
||||||
match = fstring_string_single_line.match(line, pos)
|
regex = fstring_format_spec_single_line
|
||||||
|
else:
|
||||||
|
if allow_multiline:
|
||||||
|
regex = fstring_string_multi_line
|
||||||
|
else:
|
||||||
|
regex = fstring_string_single_line
|
||||||
|
|
||||||
|
match = regex.match(line, pos)
|
||||||
if match is None:
|
if match is None:
|
||||||
return tos.previous_lines, pos
|
return tos.previous_lines, pos
|
||||||
|
|
||||||
@@ -321,7 +358,9 @@ def _find_fstring_string(endpats, fstring_stack, line, lnum, pos):
|
|||||||
|
|
||||||
new_pos = pos
|
new_pos = pos
|
||||||
new_pos += len(string)
|
new_pos += len(string)
|
||||||
if allow_multiline and (string.endswith('\n') or string.endswith('\r')):
|
# even if allow_multiline is False, we still need to check for trailing
|
||||||
|
# newlines, because a single-line f-string can contain line continuations
|
||||||
|
if string.endswith('\n') or string.endswith('\r'):
|
||||||
tos.previous_lines += string
|
tos.previous_lines += string
|
||||||
string = ''
|
string = ''
|
||||||
else:
|
else:
|
||||||
@@ -419,8 +458,6 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
|||||||
tos = fstring_stack[-1]
|
tos = fstring_stack[-1]
|
||||||
if not tos.is_in_expr():
|
if not tos.is_in_expr():
|
||||||
string, pos = _find_fstring_string(endpats, fstring_stack, line, lnum, pos)
|
string, pos = _find_fstring_string(endpats, fstring_stack, line, lnum, pos)
|
||||||
if pos == max:
|
|
||||||
break
|
|
||||||
if string:
|
if string:
|
||||||
yield PythonToken(
|
yield PythonToken(
|
||||||
FSTRING_STRING, string,
|
FSTRING_STRING, string,
|
||||||
@@ -431,6 +468,8 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
|||||||
)
|
)
|
||||||
tos.previous_lines = ''
|
tos.previous_lines = ''
|
||||||
continue
|
continue
|
||||||
|
if pos == max:
|
||||||
|
break
|
||||||
|
|
||||||
rest = line[pos:]
|
rest = line[pos:]
|
||||||
fstring_end_token, additional_prefix, quote_length = _close_fstring_if_necessary(
|
fstring_end_token, additional_prefix, quote_length = _close_fstring_if_necessary(
|
||||||
@@ -444,6 +483,18 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
|||||||
yield fstring_end_token
|
yield fstring_end_token
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# in an f-string, match until the end of the string
|
||||||
|
if fstring_stack:
|
||||||
|
string_line = line
|
||||||
|
for fstring_stack_node in fstring_stack:
|
||||||
|
quote = fstring_stack_node.quote
|
||||||
|
end_match = endpats[quote].match(line, pos)
|
||||||
|
if end_match is not None:
|
||||||
|
end_match_string = end_match.group(0)
|
||||||
|
if len(end_match_string) - len(quote) + pos < len(string_line):
|
||||||
|
string_line = line[:pos] + end_match_string[:-len(quote)]
|
||||||
|
pseudomatch = pseudo_token.match(string_line, pos)
|
||||||
|
else:
|
||||||
pseudomatch = pseudo_token.match(line, pos)
|
pseudomatch = pseudo_token.match(line, pos)
|
||||||
if not pseudomatch: # scan for tokens
|
if not pseudomatch: # scan for tokens
|
||||||
match = whitespace.match(line, pos)
|
match = whitespace.match(line, pos)
|
||||||
@@ -491,6 +542,24 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
|||||||
if (initial in numchars or # ordinary number
|
if (initial in numchars or # ordinary number
|
||||||
(initial == '.' and token != '.' and token != '...')):
|
(initial == '.' and token != '.' and token != '...')):
|
||||||
yield PythonToken(NUMBER, token, spos, prefix)
|
yield PythonToken(NUMBER, token, spos, prefix)
|
||||||
|
elif pseudomatch.group(3) is not None: # ordinary name
|
||||||
|
if token in always_break_tokens:
|
||||||
|
fstring_stack[:] = []
|
||||||
|
paren_level = 0
|
||||||
|
# We only want to dedent if the token is on a new line.
|
||||||
|
if re.match(r'[ \f\t]*$', line[:start]):
|
||||||
|
while True:
|
||||||
|
indent = indents.pop()
|
||||||
|
if indent > start:
|
||||||
|
yield PythonToken(DEDENT, '', spos, '')
|
||||||
|
else:
|
||||||
|
indents.append(indent)
|
||||||
|
break
|
||||||
|
if is_identifier(token):
|
||||||
|
yield PythonToken(NAME, token, spos, prefix)
|
||||||
|
else:
|
||||||
|
for t in _split_illegal_unicode_name(token, spos, prefix):
|
||||||
|
yield t # yield from Python 2
|
||||||
elif initial in '\r\n':
|
elif initial in '\r\n':
|
||||||
if any(not f.allow_multiline() for f in fstring_stack):
|
if any(not f.allow_multiline() for f in fstring_stack):
|
||||||
# Would use fstring_stack.clear, but that's not available
|
# Would use fstring_stack.clear, but that's not available
|
||||||
@@ -504,6 +573,11 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
|||||||
new_line = True
|
new_line = True
|
||||||
elif initial == '#': # Comments
|
elif initial == '#': # Comments
|
||||||
assert not token.endswith("\n")
|
assert not token.endswith("\n")
|
||||||
|
if fstring_stack and fstring_stack[-1].is_in_expr():
|
||||||
|
# `#` is not allowed in f-string expressions
|
||||||
|
yield PythonToken(ERRORTOKEN, initial, spos, prefix)
|
||||||
|
pos = start + 1
|
||||||
|
else:
|
||||||
additional_prefix = prefix + token
|
additional_prefix = prefix + token
|
||||||
elif token in triple_quoted:
|
elif token in triple_quoted:
|
||||||
endprog = endpats[token]
|
endprog = endpats[token]
|
||||||
@@ -545,20 +619,6 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
|||||||
elif token in fstring_pattern_map: # The start of an fstring.
|
elif token in fstring_pattern_map: # The start of an fstring.
|
||||||
fstring_stack.append(FStringNode(fstring_pattern_map[token]))
|
fstring_stack.append(FStringNode(fstring_pattern_map[token]))
|
||||||
yield PythonToken(FSTRING_START, token, spos, prefix)
|
yield PythonToken(FSTRING_START, token, spos, prefix)
|
||||||
elif is_identifier(initial): # ordinary name
|
|
||||||
if token in always_break_tokens:
|
|
||||||
fstring_stack[:] = []
|
|
||||||
paren_level = 0
|
|
||||||
# We only want to dedent if the token is on a new line.
|
|
||||||
if re.match(r'[ \f\t]*$', line[:start]):
|
|
||||||
while True:
|
|
||||||
indent = indents.pop()
|
|
||||||
if indent > start:
|
|
||||||
yield PythonToken(DEDENT, '', spos, '')
|
|
||||||
else:
|
|
||||||
indents.append(indent)
|
|
||||||
break
|
|
||||||
yield PythonToken(NAME, token, spos, prefix)
|
|
||||||
elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n', '\\\r'): # continued stmt
|
elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n', '\\\r'): # continued stmt
|
||||||
additional_prefix += prefix + line[start:]
|
additional_prefix += prefix + line[start:]
|
||||||
break
|
break
|
||||||
@@ -574,9 +634,13 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
|||||||
else:
|
else:
|
||||||
if paren_level:
|
if paren_level:
|
||||||
paren_level -= 1
|
paren_level -= 1
|
||||||
elif token == ':' and fstring_stack \
|
elif token.startswith(':') and fstring_stack \
|
||||||
and fstring_stack[-1].parentheses_count == 1:
|
and fstring_stack[-1].parentheses_count \
|
||||||
|
- fstring_stack[-1].format_spec_count == 1:
|
||||||
|
# `:` and `:=` both count
|
||||||
fstring_stack[-1].format_spec_count += 1
|
fstring_stack[-1].format_spec_count += 1
|
||||||
|
token = ':'
|
||||||
|
pos = start + 1
|
||||||
|
|
||||||
yield PythonToken(OP, token, spos, prefix)
|
yield PythonToken(OP, token, spos, prefix)
|
||||||
|
|
||||||
@@ -593,6 +657,39 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
|||||||
yield PythonToken(ENDMARKER, '', end_pos, additional_prefix)
|
yield PythonToken(ENDMARKER, '', end_pos, additional_prefix)
|
||||||
|
|
||||||
|
|
||||||
|
def _split_illegal_unicode_name(token, start_pos, prefix):
|
||||||
|
def create_token():
|
||||||
|
return PythonToken(ERRORTOKEN if is_illegal else NAME, found, pos, prefix)
|
||||||
|
|
||||||
|
found = ''
|
||||||
|
is_illegal = False
|
||||||
|
pos = start_pos
|
||||||
|
for i, char in enumerate(token):
|
||||||
|
if is_illegal:
|
||||||
|
if is_identifier(char):
|
||||||
|
yield create_token()
|
||||||
|
found = char
|
||||||
|
is_illegal = False
|
||||||
|
prefix = ''
|
||||||
|
pos = start_pos[0], start_pos[1] + i
|
||||||
|
else:
|
||||||
|
found += char
|
||||||
|
else:
|
||||||
|
new_found = found + char
|
||||||
|
if is_identifier(new_found):
|
||||||
|
found = new_found
|
||||||
|
else:
|
||||||
|
if found:
|
||||||
|
yield create_token()
|
||||||
|
prefix = ''
|
||||||
|
pos = start_pos[0], start_pos[1] + i
|
||||||
|
found = char
|
||||||
|
is_illegal = True
|
||||||
|
|
||||||
|
if found:
|
||||||
|
yield create_token()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if len(sys.argv) >= 2:
|
if len(sys.argv) >= 2:
|
||||||
path = sys.argv[1]
|
path = sys.argv[1]
|
||||||
|
|||||||
24
parso/python/tokenize.pyi
Normal file
24
parso/python/tokenize.pyi
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
from typing import Generator, Iterable, NamedTuple, Tuple
|
||||||
|
|
||||||
|
from parso.python.token import TokenType
|
||||||
|
from parso.utils import PythonVersionInfo
|
||||||
|
|
||||||
|
class Token(NamedTuple):
|
||||||
|
type: TokenType
|
||||||
|
string: str
|
||||||
|
start_pos: Tuple[int, int]
|
||||||
|
prefix: str
|
||||||
|
@property
|
||||||
|
def end_pos(self) -> Tuple[int, int]: ...
|
||||||
|
|
||||||
|
class PythonToken(Token):
|
||||||
|
def __repr__(self) -> str: ...
|
||||||
|
|
||||||
|
def tokenize(
|
||||||
|
code: str, version_info: PythonVersionInfo, start_pos: Tuple[int, int] = (1, 0)
|
||||||
|
) -> Generator[PythonToken, None, None]: ...
|
||||||
|
def tokenize_lines(
|
||||||
|
lines: Iterable[str],
|
||||||
|
version_info: PythonVersionInfo,
|
||||||
|
start_pos: Tuple[int, int] = (1, 0),
|
||||||
|
) -> Generator[PythonToken, None, None]: ...
|
||||||
@@ -43,6 +43,10 @@ Parser Tree Classes
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
try:
|
||||||
|
from collections.abc import Mapping
|
||||||
|
except ImportError:
|
||||||
|
from collections import Mapping
|
||||||
|
|
||||||
from parso._compatibility import utf8_repr, unicode
|
from parso._compatibility import utf8_repr, unicode
|
||||||
from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, \
|
from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, \
|
||||||
@@ -53,10 +57,14 @@ from parso.utils import split_lines
|
|||||||
_FLOW_CONTAINERS = set(['if_stmt', 'while_stmt', 'for_stmt', 'try_stmt',
|
_FLOW_CONTAINERS = set(['if_stmt', 'while_stmt', 'for_stmt', 'try_stmt',
|
||||||
'with_stmt', 'async_stmt', 'suite'])
|
'with_stmt', 'async_stmt', 'suite'])
|
||||||
_RETURN_STMT_CONTAINERS = set(['suite', 'simple_stmt']) | _FLOW_CONTAINERS
|
_RETURN_STMT_CONTAINERS = set(['suite', 'simple_stmt']) | _FLOW_CONTAINERS
|
||||||
_FUNC_CONTAINERS = set(['suite', 'simple_stmt', 'decorated']) | _FLOW_CONTAINERS
|
|
||||||
|
_FUNC_CONTAINERS = set(
|
||||||
|
['suite', 'simple_stmt', 'decorated', 'async_funcdef']
|
||||||
|
) | _FLOW_CONTAINERS
|
||||||
|
|
||||||
_GET_DEFINITION_TYPES = set([
|
_GET_DEFINITION_TYPES = set([
|
||||||
'expr_stmt', 'comp_for', 'with_stmt', 'for_stmt', 'import_name',
|
'expr_stmt', 'sync_comp_for', 'with_stmt', 'for_stmt', 'import_name',
|
||||||
'import_from', 'param'
|
'import_from', 'param', 'del_stmt',
|
||||||
])
|
])
|
||||||
_IMPORTS = set(['import_name', 'import_from'])
|
_IMPORTS = set(['import_name', 'import_from'])
|
||||||
|
|
||||||
@@ -91,7 +99,7 @@ class DocstringMixin(object):
|
|||||||
|
|
||||||
class PythonMixin(object):
|
class PythonMixin(object):
|
||||||
"""
|
"""
|
||||||
Some Python specific utitilies.
|
Some Python specific utilities.
|
||||||
"""
|
"""
|
||||||
__slots__ = ()
|
__slots__ = ()
|
||||||
|
|
||||||
@@ -196,25 +204,22 @@ class Name(_LeafWithoutNewlines):
|
|||||||
return "<%s: %s@%s,%s>" % (type(self).__name__, self.value,
|
return "<%s: %s@%s,%s>" % (type(self).__name__, self.value,
|
||||||
self.line, self.column)
|
self.line, self.column)
|
||||||
|
|
||||||
def is_definition(self):
|
def is_definition(self, include_setitem=False):
|
||||||
"""
|
"""
|
||||||
Returns True if the name is being defined.
|
Returns True if the name is being defined.
|
||||||
"""
|
"""
|
||||||
return self.get_definition() is not None
|
return self.get_definition(include_setitem=include_setitem) is not None
|
||||||
|
|
||||||
def get_definition(self, import_name_always=False):
|
def get_definition(self, import_name_always=False, include_setitem=False):
|
||||||
"""
|
"""
|
||||||
Returns None if there's on definition for a name.
|
Returns None if there's no definition for a name.
|
||||||
|
|
||||||
:param import_name_alway: Specifies if an import name is always a
|
:param import_name_always: Specifies if an import name is always a
|
||||||
definition. Normally foo in `from foo import bar` is not a
|
definition. Normally foo in `from foo import bar` is not a
|
||||||
definition.
|
definition.
|
||||||
"""
|
"""
|
||||||
node = self.parent
|
node = self.parent
|
||||||
type_ = node.type
|
type_ = node.type
|
||||||
if type_ in ('power', 'atom_expr'):
|
|
||||||
# In `self.x = 3` self is not a definition, but x is.
|
|
||||||
return None
|
|
||||||
|
|
||||||
if type_ in ('funcdef', 'classdef'):
|
if type_ in ('funcdef', 'classdef'):
|
||||||
if self == node.name:
|
if self == node.name:
|
||||||
@@ -232,8 +237,10 @@ class Name(_LeafWithoutNewlines):
|
|||||||
while node is not None:
|
while node is not None:
|
||||||
if node.type == 'suite':
|
if node.type == 'suite':
|
||||||
return None
|
return None
|
||||||
|
if node.type == 'namedexpr_test':
|
||||||
|
return node.children[0]
|
||||||
if node.type in _GET_DEFINITION_TYPES:
|
if node.type in _GET_DEFINITION_TYPES:
|
||||||
if self in node.get_defined_names():
|
if self in node.get_defined_names(include_setitem):
|
||||||
return node
|
return node
|
||||||
if import_name_always and node.type in _IMPORTS:
|
if import_name_always and node.type in _IMPORTS:
|
||||||
return node
|
return node
|
||||||
@@ -442,7 +449,7 @@ class Module(Scope):
|
|||||||
recurse(child)
|
recurse(child)
|
||||||
|
|
||||||
recurse(self)
|
recurse(self)
|
||||||
self._used_names = dct
|
self._used_names = UsedNamesMapping(dct)
|
||||||
return self._used_names
|
return self._used_names
|
||||||
|
|
||||||
|
|
||||||
@@ -466,6 +473,9 @@ class ClassOrFunc(Scope):
|
|||||||
:rtype: list of :class:`Decorator`
|
:rtype: list of :class:`Decorator`
|
||||||
"""
|
"""
|
||||||
decorated = self.parent
|
decorated = self.parent
|
||||||
|
if decorated.type == 'async_funcdef':
|
||||||
|
decorated = decorated.parent
|
||||||
|
|
||||||
if decorated.type == 'decorated':
|
if decorated.type == 'decorated':
|
||||||
if decorated.children[0].type == 'decorators':
|
if decorated.children[0].type == 'decorators':
|
||||||
return decorated.children[0].children
|
return decorated.children[0].children
|
||||||
@@ -545,7 +555,8 @@ def _create_params(parent, argslist_list):
|
|||||||
if param_children[0] == '*' \
|
if param_children[0] == '*' \
|
||||||
and (len(param_children) == 1
|
and (len(param_children) == 1
|
||||||
or param_children[1] == ',') \
|
or param_children[1] == ',') \
|
||||||
or check_python2_nested_param(param_children[0]):
|
or check_python2_nested_param(param_children[0]) \
|
||||||
|
or param_children[0] == '/':
|
||||||
for p in param_children:
|
for p in param_children:
|
||||||
p.parent = parent
|
p.parent = parent
|
||||||
new_children += param_children
|
new_children += param_children
|
||||||
@@ -767,8 +778,8 @@ class ForStmt(Flow):
|
|||||||
"""
|
"""
|
||||||
return self.children[3]
|
return self.children[3]
|
||||||
|
|
||||||
def get_defined_names(self):
|
def get_defined_names(self, include_setitem=False):
|
||||||
return _defined_names(self.children[1])
|
return _defined_names(self.children[1], include_setitem)
|
||||||
|
|
||||||
|
|
||||||
class TryStmt(Flow):
|
class TryStmt(Flow):
|
||||||
@@ -791,7 +802,7 @@ class WithStmt(Flow):
|
|||||||
type = 'with_stmt'
|
type = 'with_stmt'
|
||||||
__slots__ = ()
|
__slots__ = ()
|
||||||
|
|
||||||
def get_defined_names(self):
|
def get_defined_names(self, include_setitem=False):
|
||||||
"""
|
"""
|
||||||
Returns the a list of `Name` that the with statement defines. The
|
Returns the a list of `Name` that the with statement defines. The
|
||||||
defined names are set after `as`.
|
defined names are set after `as`.
|
||||||
@@ -800,7 +811,7 @@ class WithStmt(Flow):
|
|||||||
for with_item in self.children[1:-2:2]:
|
for with_item in self.children[1:-2:2]:
|
||||||
# Check with items for 'as' names.
|
# Check with items for 'as' names.
|
||||||
if with_item.type == 'with_item':
|
if with_item.type == 'with_item':
|
||||||
names += _defined_names(with_item.children[2])
|
names += _defined_names(with_item.children[2], include_setitem)
|
||||||
return names
|
return names
|
||||||
|
|
||||||
def get_test_node_from_name(self, name):
|
def get_test_node_from_name(self, name):
|
||||||
@@ -841,7 +852,7 @@ class ImportFrom(Import):
|
|||||||
type = 'import_from'
|
type = 'import_from'
|
||||||
__slots__ = ()
|
__slots__ = ()
|
||||||
|
|
||||||
def get_defined_names(self):
|
def get_defined_names(self, include_setitem=False):
|
||||||
"""
|
"""
|
||||||
Returns the a list of `Name` that the import defines. The
|
Returns the a list of `Name` that the import defines. The
|
||||||
defined names are set after `import` or in case an alias - `as` - is
|
defined names are set after `import` or in case an alias - `as` - is
|
||||||
@@ -912,7 +923,7 @@ class ImportName(Import):
|
|||||||
type = 'import_name'
|
type = 'import_name'
|
||||||
__slots__ = ()
|
__slots__ = ()
|
||||||
|
|
||||||
def get_defined_names(self):
|
def get_defined_names(self, include_setitem=False):
|
||||||
"""
|
"""
|
||||||
Returns the a list of `Name` that the import defines. The defined names
|
Returns the a list of `Name` that the import defines. The defined names
|
||||||
is always the first name after `import` or in case an alias - `as` - is
|
is always the first name after `import` or in case an alias - `as` - is
|
||||||
@@ -969,7 +980,7 @@ class ImportName(Import):
|
|||||||
class KeywordStatement(PythonBaseNode):
|
class KeywordStatement(PythonBaseNode):
|
||||||
"""
|
"""
|
||||||
For the following statements: `assert`, `del`, `global`, `nonlocal`,
|
For the following statements: `assert`, `del`, `global`, `nonlocal`,
|
||||||
`raise`, `return`, `yield`, `return`, `yield`.
|
`raise`, `return`, `yield`.
|
||||||
|
|
||||||
`pass`, `continue` and `break` are not in there, because they are just
|
`pass`, `continue` and `break` are not in there, because they are just
|
||||||
simple keywords and the parser reduces it to a keyword.
|
simple keywords and the parser reduces it to a keyword.
|
||||||
@@ -988,6 +999,14 @@ class KeywordStatement(PythonBaseNode):
|
|||||||
def keyword(self):
|
def keyword(self):
|
||||||
return self.children[0].value
|
return self.children[0].value
|
||||||
|
|
||||||
|
def get_defined_names(self, include_setitem=False):
|
||||||
|
keyword = self.keyword
|
||||||
|
if keyword == 'del':
|
||||||
|
return _defined_names(self.children[1], include_setitem)
|
||||||
|
if keyword in ('global', 'nonlocal'):
|
||||||
|
return self.children[1::2]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
class AssertStmt(KeywordStatement):
|
class AssertStmt(KeywordStatement):
|
||||||
__slots__ = ()
|
__slots__ = ()
|
||||||
@@ -1013,7 +1032,7 @@ class YieldExpr(PythonBaseNode):
|
|||||||
__slots__ = ()
|
__slots__ = ()
|
||||||
|
|
||||||
|
|
||||||
def _defined_names(current):
|
def _defined_names(current, include_setitem):
|
||||||
"""
|
"""
|
||||||
A helper function to find the defined names in statements, for loops and
|
A helper function to find the defined names in statements, for loops and
|
||||||
list comprehensions.
|
list comprehensions.
|
||||||
@@ -1021,14 +1040,22 @@ def _defined_names(current):
|
|||||||
names = []
|
names = []
|
||||||
if current.type in ('testlist_star_expr', 'testlist_comp', 'exprlist', 'testlist'):
|
if current.type in ('testlist_star_expr', 'testlist_comp', 'exprlist', 'testlist'):
|
||||||
for child in current.children[::2]:
|
for child in current.children[::2]:
|
||||||
names += _defined_names(child)
|
names += _defined_names(child, include_setitem)
|
||||||
elif current.type in ('atom', 'star_expr'):
|
elif current.type in ('atom', 'star_expr'):
|
||||||
names += _defined_names(current.children[1])
|
names += _defined_names(current.children[1], include_setitem)
|
||||||
elif current.type in ('power', 'atom_expr'):
|
elif current.type in ('power', 'atom_expr'):
|
||||||
if current.children[-2] != '**': # Just if there's no operation
|
if current.children[-2] != '**': # Just if there's no operation
|
||||||
trailer = current.children[-1]
|
trailer = current.children[-1]
|
||||||
if trailer.children[0] == '.':
|
if trailer.children[0] == '.':
|
||||||
names.append(trailer.children[1])
|
names.append(trailer.children[1])
|
||||||
|
elif trailer.children[0] == '[' and include_setitem:
|
||||||
|
for node in current.children[-2::-1]:
|
||||||
|
if node.type == 'trailer':
|
||||||
|
names.append(node.children[1])
|
||||||
|
break
|
||||||
|
if node.type == 'name':
|
||||||
|
names.append(node)
|
||||||
|
break
|
||||||
else:
|
else:
|
||||||
names.append(current)
|
names.append(current)
|
||||||
return names
|
return names
|
||||||
@@ -1038,18 +1065,18 @@ class ExprStmt(PythonBaseNode, DocstringMixin):
|
|||||||
type = 'expr_stmt'
|
type = 'expr_stmt'
|
||||||
__slots__ = ()
|
__slots__ = ()
|
||||||
|
|
||||||
def get_defined_names(self):
|
def get_defined_names(self, include_setitem=False):
|
||||||
"""
|
"""
|
||||||
Returns a list of `Name` defined before the `=` sign.
|
Returns a list of `Name` defined before the `=` sign.
|
||||||
"""
|
"""
|
||||||
names = []
|
names = []
|
||||||
if self.children[1].type == 'annassign':
|
if self.children[1].type == 'annassign':
|
||||||
names = _defined_names(self.children[0])
|
names = _defined_names(self.children[0], include_setitem)
|
||||||
return [
|
return [
|
||||||
name
|
name
|
||||||
for i in range(0, len(self.children) - 2, 2)
|
for i in range(0, len(self.children) - 2, 2)
|
||||||
if '=' in self.children[i + 1].value
|
if '=' in self.children[i + 1].value
|
||||||
for name in _defined_names(self.children[i])
|
for name in _defined_names(self.children[i], include_setitem)
|
||||||
] + names
|
] + names
|
||||||
|
|
||||||
def get_rhs(self):
|
def get_rhs(self):
|
||||||
@@ -1142,7 +1169,7 @@ class Param(PythonBaseNode):
|
|||||||
else:
|
else:
|
||||||
return self._tfpdef()
|
return self._tfpdef()
|
||||||
|
|
||||||
def get_defined_names(self):
|
def get_defined_names(self, include_setitem=False):
|
||||||
return [self.name]
|
return [self.name]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -1158,6 +1185,13 @@ class Param(PythonBaseNode):
|
|||||||
index -= 2
|
index -= 2
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
|
try:
|
||||||
|
keyword_only_index = self.parent.children.index('/')
|
||||||
|
if index > keyword_only_index:
|
||||||
|
# Skip the ` /, `
|
||||||
|
index -= 2
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
return index - 1
|
return index - 1
|
||||||
|
|
||||||
def get_parent_function(self):
|
def get_parent_function(self):
|
||||||
@@ -1189,13 +1223,42 @@ class Param(PythonBaseNode):
|
|||||||
return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default)
|
return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default)
|
||||||
|
|
||||||
|
|
||||||
class CompFor(PythonBaseNode):
|
class SyncCompFor(PythonBaseNode):
|
||||||
type = 'comp_for'
|
type = 'sync_comp_for'
|
||||||
__slots__ = ()
|
__slots__ = ()
|
||||||
|
|
||||||
def get_defined_names(self):
|
def get_defined_names(self, include_setitem=False):
|
||||||
"""
|
"""
|
||||||
Returns the a list of `Name` that the comprehension defines.
|
Returns the a list of `Name` that the comprehension defines.
|
||||||
"""
|
"""
|
||||||
# allow async for
|
# allow async for
|
||||||
return _defined_names(self.children[self.children.index('for') + 1])
|
return _defined_names(self.children[1], include_setitem)
|
||||||
|
|
||||||
|
|
||||||
|
# This is simply here so an older Jedi version can work with this new parso
|
||||||
|
# version. Can be deleted in the next release.
|
||||||
|
CompFor = SyncCompFor
|
||||||
|
|
||||||
|
|
||||||
|
class UsedNamesMapping(Mapping):
|
||||||
|
"""
|
||||||
|
This class exists for the sole purpose of creating an immutable dict.
|
||||||
|
"""
|
||||||
|
def __init__(self, dct):
|
||||||
|
self._dict = dct
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
return self._dict[key]
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self._dict)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return iter(self._dict)
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return id(self)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
# Comparing these dicts does not make sense.
|
||||||
|
return self is other
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
|
import sys
|
||||||
from abc import abstractmethod, abstractproperty
|
from abc import abstractmethod, abstractproperty
|
||||||
|
|
||||||
from parso._compatibility import utf8_repr, encoding, py_version
|
from parso._compatibility import utf8_repr, encoding
|
||||||
from parso.utils import split_lines
|
from parso.utils import split_lines
|
||||||
|
|
||||||
|
|
||||||
@@ -321,7 +322,7 @@ class BaseNode(NodeOrLeaf):
|
|||||||
@utf8_repr
|
@utf8_repr
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
code = self.get_code().replace('\n', ' ').replace('\r', ' ').strip()
|
code = self.get_code().replace('\n', ' ').replace('\r', ' ').strip()
|
||||||
if not py_version >= 30:
|
if not sys.version_info.major >= 3:
|
||||||
code = code.encode(encoding, 'replace')
|
code = code.encode(encoding, 'replace')
|
||||||
return "<%s: %s@%s,%s>" % \
|
return "<%s: %s@%s,%s>" % \
|
||||||
(type(self).__name__, code, self.start_pos[0], self.start_pos[1])
|
(type(self).__name__, code, self.start_pos[0], self.start_pos[1])
|
||||||
|
|||||||
@@ -2,8 +2,9 @@ from collections import namedtuple
|
|||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
from ast import literal_eval
|
from ast import literal_eval
|
||||||
|
from functools import total_ordering
|
||||||
|
|
||||||
from parso._compatibility import unicode, total_ordering
|
from parso._compatibility import unicode
|
||||||
|
|
||||||
# The following is a list in Python that are line breaks in str.splitlines, but
|
# The following is a list in Python that are line breaks in str.splitlines, but
|
||||||
# not in Python. In Python only \r (Carriage Return, 0xD) and \n (Line Feed,
|
# not in Python. In Python only \r (Carriage Return, 0xD) and \n (Line Feed,
|
||||||
@@ -122,7 +123,7 @@ def _parse_version(version):
|
|||||||
match = re.match(r'(\d+)(?:\.(\d)(?:\.\d+)?)?$', version)
|
match = re.match(r'(\d+)(?:\.(\d)(?:\.\d+)?)?$', version)
|
||||||
if match is None:
|
if match is None:
|
||||||
raise ValueError('The given version is not in the right format. '
|
raise ValueError('The given version is not in the right format. '
|
||||||
'Use something like "3.2" or "3".')
|
'Use something like "3.8" or "3".')
|
||||||
|
|
||||||
major = int(match.group(1))
|
major = int(match.group(1))
|
||||||
minor = match.group(2)
|
minor = match.group(2)
|
||||||
@@ -163,13 +164,13 @@ class PythonVersionInfo(namedtuple('Version', 'major, minor')):
|
|||||||
|
|
||||||
def parse_version_string(version=None):
|
def parse_version_string(version=None):
|
||||||
"""
|
"""
|
||||||
Checks for a valid version number (e.g. `3.2` or `2.7.1` or `3`) and
|
Checks for a valid version number (e.g. `3.8` or `2.7.1` or `3`) and
|
||||||
returns a corresponding version info that is always two characters long in
|
returns a corresponding version info that is always two characters long in
|
||||||
decimal.
|
decimal.
|
||||||
"""
|
"""
|
||||||
if version is None:
|
if version is None:
|
||||||
version = '%s.%s' % sys.version_info[:2]
|
version = '%s.%s' % sys.version_info[:2]
|
||||||
if not isinstance(version, (unicode, str)):
|
if not isinstance(version, (unicode, str)):
|
||||||
raise TypeError("version must be a string like 3.2.")
|
raise TypeError('version must be a string like "3.8"')
|
||||||
|
|
||||||
return _parse_version(version)
|
return _parse_version(version)
|
||||||
|
|||||||
29
parso/utils.pyi
Normal file
29
parso/utils.pyi
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
from typing import NamedTuple, Optional, Sequence, Union
|
||||||
|
|
||||||
|
class Version(NamedTuple):
|
||||||
|
major: int
|
||||||
|
minor: int
|
||||||
|
micro: int
|
||||||
|
|
||||||
|
def split_lines(string: str, keepends: bool = ...) -> Sequence[str]: ...
|
||||||
|
def python_bytes_to_unicode(
|
||||||
|
source: Union[str, bytes], encoding: str = ..., errors: str = ...
|
||||||
|
) -> str: ...
|
||||||
|
def version_info() -> Version:
|
||||||
|
"""
|
||||||
|
Returns a namedtuple of parso's version, similar to Python's
|
||||||
|
``sys.version_info``.
|
||||||
|
"""
|
||||||
|
...
|
||||||
|
|
||||||
|
class PythonVersionInfo(NamedTuple):
|
||||||
|
major: int
|
||||||
|
minor: int
|
||||||
|
|
||||||
|
def parse_version_string(version: Optional[str]) -> PythonVersionInfo:
|
||||||
|
"""
|
||||||
|
Checks for a valid version number (e.g. `3.2` or `2.7.1` or `3`) and
|
||||||
|
returns a corresponding version info that is always two characters long in
|
||||||
|
decimal.
|
||||||
|
"""
|
||||||
|
...
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
[pytest]
|
[pytest]
|
||||||
addopts = --doctest-modules
|
addopts = --doctest-modules
|
||||||
|
|
||||||
|
testpaths = parso test
|
||||||
|
|
||||||
# Ignore broken files inblackbox test directories
|
# Ignore broken files inblackbox test directories
|
||||||
norecursedirs = .* docs scripts normalizer_issue_files build
|
norecursedirs = .* docs scripts normalizer_issue_files build
|
||||||
|
|
||||||
|
|||||||
10
setup.cfg
10
setup.cfg
@@ -1,2 +1,12 @@
|
|||||||
[bdist_wheel]
|
[bdist_wheel]
|
||||||
universal=1
|
universal=1
|
||||||
|
|
||||||
|
[flake8]
|
||||||
|
max-line-length = 100
|
||||||
|
ignore =
|
||||||
|
# do not use bare 'except'
|
||||||
|
E722,
|
||||||
|
# don't know why this was ever even an option, 1+1 should be possible.
|
||||||
|
E226,
|
||||||
|
# line break before binary operator
|
||||||
|
W503,
|
||||||
|
|||||||
3
setup.py
3
setup.py
@@ -27,6 +27,7 @@ setup(name='parso',
|
|||||||
packages=find_packages(exclude=['test']),
|
packages=find_packages(exclude=['test']),
|
||||||
package_data={'parso': ['python/grammar*.txt']},
|
package_data={'parso': ['python/grammar*.txt']},
|
||||||
platforms=['any'],
|
platforms=['any'],
|
||||||
|
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
|
||||||
classifiers=[
|
classifiers=[
|
||||||
'Development Status :: 4 - Beta',
|
'Development Status :: 4 - Beta',
|
||||||
'Environment :: Plugins',
|
'Environment :: Plugins',
|
||||||
@@ -34,10 +35,8 @@ setup(name='parso',
|
|||||||
'License :: OSI Approved :: MIT License',
|
'License :: OSI Approved :: MIT License',
|
||||||
'Operating System :: OS Independent',
|
'Operating System :: OS Independent',
|
||||||
'Programming Language :: Python :: 2',
|
'Programming Language :: Python :: 2',
|
||||||
'Programming Language :: Python :: 2.6',
|
|
||||||
'Programming Language :: Python :: 2.7',
|
'Programming Language :: Python :: 2.7',
|
||||||
'Programming Language :: Python :: 3',
|
'Programming Language :: Python :: 3',
|
||||||
'Programming Language :: Python :: 3.3',
|
|
||||||
'Programming Language :: Python :: 3.4',
|
'Programming Language :: Python :: 3.4',
|
||||||
'Programming Language :: Python :: 3.5',
|
'Programming Language :: Python :: 3.5',
|
||||||
'Programming Language :: Python :: 3.6',
|
'Programming Language :: Python :: 3.6',
|
||||||
|
|||||||
@@ -19,14 +19,6 @@ def build_nested(code, depth, base='def f():\n'):
|
|||||||
FAILING_EXAMPLES = [
|
FAILING_EXAMPLES = [
|
||||||
'1 +',
|
'1 +',
|
||||||
'?',
|
'?',
|
||||||
# Python/compile.c
|
|
||||||
dedent('''\
|
|
||||||
for a in [1]:
|
|
||||||
try:
|
|
||||||
pass
|
|
||||||
finally:
|
|
||||||
continue
|
|
||||||
'''), # 'continue' not supported inside 'finally' clause"
|
|
||||||
'continue',
|
'continue',
|
||||||
'break',
|
'break',
|
||||||
'return',
|
'return',
|
||||||
@@ -154,7 +146,7 @@ FAILING_EXAMPLES = [
|
|||||||
# Now nested parsing
|
# Now nested parsing
|
||||||
"f'{continue}'",
|
"f'{continue}'",
|
||||||
"f'{1;1}'",
|
"f'{1;1}'",
|
||||||
"f'{a=3}'",
|
"f'{a;}'",
|
||||||
"f'{b\"\" \"\"}'",
|
"f'{b\"\" \"\"}'",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -259,10 +251,6 @@ GLOBAL_NONLOCAL_ERROR = [
|
|||||||
|
|
||||||
if sys.version_info >= (3, 6):
|
if sys.version_info >= (3, 6):
|
||||||
FAILING_EXAMPLES += GLOBAL_NONLOCAL_ERROR
|
FAILING_EXAMPLES += GLOBAL_NONLOCAL_ERROR
|
||||||
FAILING_EXAMPLES += [
|
|
||||||
# Raises multiple errors in previous versions.
|
|
||||||
'async def foo():\n def nofoo():[x async for x in []]',
|
|
||||||
]
|
|
||||||
if sys.version_info >= (3, 5):
|
if sys.version_info >= (3, 5):
|
||||||
FAILING_EXAMPLES += [
|
FAILING_EXAMPLES += [
|
||||||
# Raises different errors so just ignore them for now.
|
# Raises different errors so just ignore them for now.
|
||||||
@@ -285,10 +273,18 @@ if sys.version_info >= (3,):
|
|||||||
'b"ä"',
|
'b"ä"',
|
||||||
# combining strings and unicode is allowed in Python 2.
|
# combining strings and unicode is allowed in Python 2.
|
||||||
'"s" b""',
|
'"s" b""',
|
||||||
|
'"s" b"" ""',
|
||||||
|
'b"" "" b"" ""',
|
||||||
|
]
|
||||||
|
if sys.version_info >= (3, 6):
|
||||||
|
FAILING_EXAMPLES += [
|
||||||
|
# Same as above, but for f-strings.
|
||||||
|
'f"s" b""',
|
||||||
|
'b"s" f""',
|
||||||
|
|
||||||
|
# f-string expression part cannot include a backslash
|
||||||
|
r'''f"{'\n'}"''',
|
||||||
]
|
]
|
||||||
if sys.version_info >= (2, 7):
|
|
||||||
# This is something that raises a different error in 2.6 than in the other
|
|
||||||
# versions. Just skip it for 2.6.
|
|
||||||
FAILING_EXAMPLES.append('[a, 1] += 3')
|
FAILING_EXAMPLES.append('[a, 1] += 3')
|
||||||
|
|
||||||
if sys.version_info[:2] == (3, 5):
|
if sys.version_info[:2] == (3, 5):
|
||||||
@@ -311,3 +307,57 @@ if sys.version_info[:2] <= (3, 4):
|
|||||||
'a = *[1], 2',
|
'a = *[1], 2',
|
||||||
'(*[1], 2)',
|
'(*[1], 2)',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
if sys.version_info[:2] < (3, 8):
|
||||||
|
FAILING_EXAMPLES += [
|
||||||
|
# Python/compile.c
|
||||||
|
dedent('''\
|
||||||
|
for a in [1]:
|
||||||
|
try:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
continue
|
||||||
|
'''), # 'continue' not supported inside 'finally' clause"
|
||||||
|
]
|
||||||
|
|
||||||
|
if sys.version_info[:2] >= (3, 8):
|
||||||
|
# assignment expressions from issue#89
|
||||||
|
FAILING_EXAMPLES += [
|
||||||
|
# Case 2
|
||||||
|
'(lambda: x := 1)',
|
||||||
|
'((lambda: x) := 1)',
|
||||||
|
# Case 3
|
||||||
|
'(a[i] := x)',
|
||||||
|
'((a[i]) := x)',
|
||||||
|
'(a(i) := x)',
|
||||||
|
# Case 4
|
||||||
|
'(a.b := c)',
|
||||||
|
'[(i.i:= 0) for ((i), j) in range(5)]',
|
||||||
|
# Case 5
|
||||||
|
'[i:= 0 for i, j in range(5)]',
|
||||||
|
'[(i:= 0) for ((i), j) in range(5)]',
|
||||||
|
'[(i:= 0) for ((i), j), in range(5)]',
|
||||||
|
'[(i:= 0) for ((i), j.i), in range(5)]',
|
||||||
|
'[[(i:= i) for j in range(5)] for i in range(5)]',
|
||||||
|
'[i for i, j in range(5) if True or (i:= 1)]',
|
||||||
|
'[False and (i:= 0) for i, j in range(5)]',
|
||||||
|
# Case 6
|
||||||
|
'[i+1 for i in (i:= range(5))]',
|
||||||
|
'[i+1 for i in (j:= range(5))]',
|
||||||
|
'[i+1 for i in (lambda: (j:= range(5)))()]',
|
||||||
|
# Case 7
|
||||||
|
'class Example:\n [(j := i) for i in range(5)]',
|
||||||
|
# Not in that issue
|
||||||
|
'(await a := x)',
|
||||||
|
'((await a) := x)',
|
||||||
|
# new discoveries
|
||||||
|
'((a, b) := (1, 2))',
|
||||||
|
'([a, b] := [1, 2])',
|
||||||
|
'({a, b} := {1, 2})',
|
||||||
|
'({a: b} := {1: 2})',
|
||||||
|
'(a + b := 1)',
|
||||||
|
'(True := 1)',
|
||||||
|
'(False := 1)',
|
||||||
|
'(None := 1)',
|
||||||
|
'(__debug__ := 1)',
|
||||||
|
]
|
||||||
|
|||||||
@@ -203,9 +203,6 @@ class FileTests:
|
|||||||
self._test_count = test_count
|
self._test_count = test_count
|
||||||
self._code_lines = self._code_lines
|
self._code_lines = self._code_lines
|
||||||
self._change_count = change_count
|
self._change_count = change_count
|
||||||
|
|
||||||
with open(file_path) as f:
|
|
||||||
code = f.read()
|
|
||||||
self._file_modifications = []
|
self._file_modifications = []
|
||||||
|
|
||||||
def _run(self, grammar, file_modifications, debugger, print_code=False):
|
def _run(self, grammar, file_modifications, debugger, print_code=False):
|
||||||
|
|||||||
@@ -5,11 +5,14 @@ Test all things related to the ``jedi.cache`` module.
|
|||||||
from os import unlink
|
from os import unlink
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
import time
|
||||||
|
|
||||||
from parso.cache import _NodeCacheItem, save_module, load_module, \
|
from parso.cache import _NodeCacheItem, save_module, load_module, \
|
||||||
_get_hashed_path, parser_cache, _load_from_file_system, _save_to_file_system
|
_get_hashed_path, parser_cache, _load_from_file_system, _save_to_file_system
|
||||||
from parso import load_grammar
|
from parso import load_grammar
|
||||||
from parso import cache
|
from parso import cache
|
||||||
|
from parso import file_io
|
||||||
|
from parso import parse
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
@@ -76,12 +79,63 @@ def test_modulepickling_simulate_deleted_cache(tmpdir):
|
|||||||
path = tmpdir.dirname + '/some_path'
|
path = tmpdir.dirname + '/some_path'
|
||||||
with open(path, 'w'):
|
with open(path, 'w'):
|
||||||
pass
|
pass
|
||||||
|
io = file_io.FileIO(path)
|
||||||
|
|
||||||
save_module(grammar._hashed, path, module, [])
|
save_module(grammar._hashed, io, module, lines=[])
|
||||||
assert load_module(grammar._hashed, path) == module
|
assert load_module(grammar._hashed, io) == module
|
||||||
|
|
||||||
unlink(_get_hashed_path(grammar._hashed, path))
|
unlink(_get_hashed_path(grammar._hashed, path))
|
||||||
parser_cache.clear()
|
parser_cache.clear()
|
||||||
|
|
||||||
cached2 = load_module(grammar._hashed, path)
|
cached2 = load_module(grammar._hashed, io)
|
||||||
assert cached2 is None
|
assert cached2 is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_cache_limit():
|
||||||
|
def cache_size():
|
||||||
|
return sum(len(v) for v in parser_cache.values())
|
||||||
|
|
||||||
|
try:
|
||||||
|
parser_cache.clear()
|
||||||
|
future_node_cache_item = _NodeCacheItem('bla', [], change_time=time.time() + 10e6)
|
||||||
|
old_node_cache_item = _NodeCacheItem('bla', [], change_time=time.time() - 10e4)
|
||||||
|
parser_cache['some_hash_old'] = {
|
||||||
|
'/path/%s' % i: old_node_cache_item for i in range(300)
|
||||||
|
}
|
||||||
|
parser_cache['some_hash_new'] = {
|
||||||
|
'/path/%s' % i: future_node_cache_item for i in range(300)
|
||||||
|
}
|
||||||
|
assert cache_size() == 600
|
||||||
|
parse('somecode', cache=True, path='/path/somepath')
|
||||||
|
assert cache_size() == 301
|
||||||
|
finally:
|
||||||
|
parser_cache.clear()
|
||||||
|
|
||||||
|
|
||||||
|
class _FixedTimeFileIO(file_io.KnownContentFileIO):
|
||||||
|
def __init__(self, path, content, last_modified):
|
||||||
|
super(_FixedTimeFileIO, self).__init__(path, content)
|
||||||
|
self._last_modified = last_modified
|
||||||
|
|
||||||
|
def get_last_modified(self):
|
||||||
|
return self._last_modified
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('diff_cache', [False, True])
|
||||||
|
@pytest.mark.parametrize('use_file_io', [False, True])
|
||||||
|
def test_cache_last_used_update(diff_cache, use_file_io):
|
||||||
|
p = '/path/last-used'
|
||||||
|
parser_cache.clear() # Clear, because then it's easier to find stuff.
|
||||||
|
parse('somecode', cache=True, path=p)
|
||||||
|
node_cache_item = next(iter(parser_cache.values()))[p]
|
||||||
|
now = time.time()
|
||||||
|
assert node_cache_item.last_used < now
|
||||||
|
|
||||||
|
if use_file_io:
|
||||||
|
f = _FixedTimeFileIO(p, 'code', node_cache_item.last_used - 10)
|
||||||
|
parse(file_io=f, cache=True, diff_cache=diff_cache)
|
||||||
|
else:
|
||||||
|
parse('somecode2', cache=True, path=p, diff_cache=diff_cache)
|
||||||
|
|
||||||
|
node_cache_item = next(iter(parser_cache.values()))[p]
|
||||||
|
assert now < node_cache_item.last_used < time.time()
|
||||||
|
|||||||
@@ -974,10 +974,12 @@ def test_random_unicode_characters(differ):
|
|||||||
Those issues were all found with the fuzzer.
|
Those issues were all found with the fuzzer.
|
||||||
"""
|
"""
|
||||||
differ.initialize('')
|
differ.initialize('')
|
||||||
differ.parse(u'\x1dĔBϞɛˁşʑ˳˻ȣſéÎ\x90̕ȟòwʘ\x1dĔBϞɛˁşʑ˳˻ȣſéÎ', parsers=1, expect_error_leaves=True)
|
differ.parse(u'\x1dĔBϞɛˁşʑ˳˻ȣſéÎ\x90̕ȟòwʘ\x1dĔBϞɛˁşʑ˳˻ȣſéÎ', parsers=1,
|
||||||
|
expect_error_leaves=True)
|
||||||
differ.parse(u'\r\r', parsers=1)
|
differ.parse(u'\r\r', parsers=1)
|
||||||
differ.parse(u"˟Ę\x05À\r rúƣ@\x8a\x15r()\n", parsers=1, expect_error_leaves=True)
|
differ.parse(u"˟Ę\x05À\r rúƣ@\x8a\x15r()\n", parsers=1, expect_error_leaves=True)
|
||||||
differ.parse(u'a\ntaǁ\rGĒōns__\n\nb', parsers=1)
|
differ.parse(u'a\ntaǁ\rGĒōns__\n\nb', parsers=1,
|
||||||
|
expect_error_leaves=sys.version_info[0] == 2)
|
||||||
s = ' if not (self, "_fi\x02\x0e\x08\n\nle"):'
|
s = ' if not (self, "_fi\x02\x0e\x08\n\nle"):'
|
||||||
differ.parse(s, parsers=1, expect_error_leaves=True)
|
differ.parse(s, parsers=1, expect_error_leaves=True)
|
||||||
differ.parse('')
|
differ.parse('')
|
||||||
@@ -987,7 +989,6 @@ def test_random_unicode_characters(differ):
|
|||||||
differ.parse(' a( # xx\ndef', parsers=2, expect_error_leaves=True)
|
differ.parse(' a( # xx\ndef', parsers=2, expect_error_leaves=True)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(sys.version_info < (2, 7), reason="No set literals in Python 2.6")
|
|
||||||
def test_dedent_end_positions(differ):
|
def test_dedent_end_positions(differ):
|
||||||
code1 = dedent('''\
|
code1 = dedent('''\
|
||||||
if 1:
|
if 1:
|
||||||
@@ -1243,7 +1244,7 @@ def test_open_bracket_case2(differ):
|
|||||||
differ.parse(code1, copies=2, parsers=0, expect_error_leaves=True)
|
differ.parse(code1, copies=2, parsers=0, expect_error_leaves=True)
|
||||||
|
|
||||||
|
|
||||||
def test_x(differ):
|
def test_some_weird_removals(differ):
|
||||||
code1 = dedent('''\
|
code1 = dedent('''\
|
||||||
class C:
|
class C:
|
||||||
1
|
1
|
||||||
@@ -1264,6 +1265,23 @@ def test_x(differ):
|
|||||||
omega
|
omega
|
||||||
''')
|
''')
|
||||||
differ.initialize(code1)
|
differ.initialize(code1)
|
||||||
differ.parse(code2, copies=ANY, parsers=ANY, expect_error_leaves=True)
|
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
|
||||||
differ.parse(code3, copies=ANY, parsers=ANY, expect_error_leaves=True)
|
differ.parse(code3, copies=1, parsers=2, expect_error_leaves=True)
|
||||||
differ.parse(code1, copies=1)
|
differ.parse(code1, copies=1)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(sys.version_info < (3, 5), reason="Async starts working in 3.5")
|
||||||
|
def test_async_copy(differ):
|
||||||
|
code1 = dedent('''\
|
||||||
|
async def main():
|
||||||
|
x = 3
|
||||||
|
print(
|
||||||
|
''')
|
||||||
|
code2 = dedent('''\
|
||||||
|
async def main():
|
||||||
|
x = 3
|
||||||
|
print()
|
||||||
|
''')
|
||||||
|
differ.initialize(code1)
|
||||||
|
differ.parse(code2, copies=1, parsers=1)
|
||||||
|
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
|
||||||
|
|||||||
@@ -7,31 +7,62 @@ from parso.python.tokenize import tokenize
|
|||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def grammar():
|
def grammar():
|
||||||
return load_grammar(version='3.6')
|
return load_grammar(version='3.8')
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
'code', [
|
'code', [
|
||||||
'{1}',
|
# simple cases
|
||||||
'{1:}',
|
'f"{1}"',
|
||||||
'',
|
'f"""{1}"""',
|
||||||
'{1!a}',
|
'f"{foo} {bar}"',
|
||||||
'{1!a:1}',
|
|
||||||
'{1:1}',
|
# empty string
|
||||||
'{1:1.{32}}',
|
'f""',
|
||||||
'{1::>4}',
|
'f""""""',
|
||||||
'{foo} {bar}',
|
|
||||||
|
# empty format specifier is okay
|
||||||
|
'f"{1:}"',
|
||||||
|
|
||||||
|
# use of conversion options
|
||||||
|
'f"{1!a}"',
|
||||||
|
'f"{1!a:1}"',
|
||||||
|
|
||||||
|
# format specifiers
|
||||||
|
'f"{1:1}"',
|
||||||
|
'f"{1:1.{32}}"',
|
||||||
|
'f"{1::>4}"',
|
||||||
|
'f"{x:{y}}"',
|
||||||
|
'f"{x:{y:}}"',
|
||||||
|
'f"{x:{y:1}}"',
|
||||||
|
|
||||||
# Escapes
|
# Escapes
|
||||||
'{{}}',
|
'f"{{}}"',
|
||||||
'{{{1}}}',
|
'f"{{{1}}}"',
|
||||||
'{{{1}',
|
'f"{{{1}"',
|
||||||
'1{{2{{3',
|
'f"1{{2{{3"',
|
||||||
'}}',
|
'f"}}"',
|
||||||
|
|
||||||
|
# New Python 3.8 syntax f'{a=}'
|
||||||
|
'f"{a=}"',
|
||||||
|
'f"{a()=}"',
|
||||||
|
|
||||||
|
# multiline f-string
|
||||||
|
'f"""abc\ndef"""',
|
||||||
|
'f"""abc{\n123}def"""',
|
||||||
|
|
||||||
|
# a line continuation inside of an fstring_string
|
||||||
|
'f"abc\\\ndef"',
|
||||||
|
'f"\\\n{123}\\\n"',
|
||||||
|
|
||||||
|
# a line continuation inside of an fstring_expr
|
||||||
|
'f"{\\\n123}"',
|
||||||
|
|
||||||
|
# a line continuation inside of an format spec
|
||||||
|
'f"{123:.2\\\nf}"',
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
def test_valid(code, grammar):
|
def test_valid(code, grammar):
|
||||||
code = 'f"""%s"""' % code
|
|
||||||
module = grammar.parse(code, error_recovery=False)
|
module = grammar.parse(code, error_recovery=False)
|
||||||
fstring = module.children[0]
|
fstring = module.children[0]
|
||||||
assert fstring.type == 'fstring'
|
assert fstring.type == 'fstring'
|
||||||
@@ -40,23 +71,34 @@ def test_valid(code, grammar):
|
|||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
'code', [
|
'code', [
|
||||||
'}',
|
# an f-string can't contain unmatched curly braces
|
||||||
'{',
|
'f"}"',
|
||||||
'{1!{a}}',
|
'f"{"',
|
||||||
'{!{a}}',
|
'f"""}"""',
|
||||||
'{}',
|
'f"""{"""',
|
||||||
'{:}',
|
|
||||||
'{:}}}',
|
# invalid conversion characters
|
||||||
'{:1}',
|
'f"{1!{a}}"',
|
||||||
'{!:}',
|
'f"{!{a}}"',
|
||||||
'{!}',
|
|
||||||
'{!a}',
|
# The curly braces must contain an expression
|
||||||
'{1:{}}',
|
'f"{}"',
|
||||||
'{1:{:}}',
|
'f"{:}"',
|
||||||
|
'f"{:}}}"',
|
||||||
|
'f"{:1}"',
|
||||||
|
'f"{!:}"',
|
||||||
|
'f"{!}"',
|
||||||
|
'f"{!a}"',
|
||||||
|
|
||||||
|
# invalid (empty) format specifiers
|
||||||
|
'f"{1:{}}"',
|
||||||
|
'f"{1:{:}}"',
|
||||||
|
|
||||||
|
# a newline without a line continuation inside a single-line string
|
||||||
|
'f"abc\ndef"',
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
def test_invalid(code, grammar):
|
def test_invalid(code, grammar):
|
||||||
code = 'f"""%s"""' % code
|
|
||||||
with pytest.raises(ParserSyntaxError):
|
with pytest.raises(ParserSyntaxError):
|
||||||
grammar.parse(code, error_recovery=False)
|
grammar.parse(code, error_recovery=False)
|
||||||
|
|
||||||
@@ -79,11 +121,18 @@ def test_tokenize_start_pos(code, positions):
|
|||||||
assert positions == [p.start_pos for p in tokens]
|
assert positions == [p.start_pos for p in tokens]
|
||||||
|
|
||||||
|
|
||||||
def test_roundtrip(grammar):
|
@pytest.mark.parametrize(
|
||||||
code = dedent("""\
|
'code', [
|
||||||
|
dedent("""\
|
||||||
f'''s{
|
f'''s{
|
||||||
str.uppe
|
str.uppe
|
||||||
'''
|
'''
|
||||||
""")
|
"""),
|
||||||
|
'f"foo',
|
||||||
|
'f"""foo',
|
||||||
|
'f"abc\ndef"',
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_roundtrip(grammar, code):
|
||||||
tree = grammar.parse(code)
|
tree = grammar.parse(code)
|
||||||
assert tree.get_code() == code
|
assert tree.get_code() == code
|
||||||
|
|||||||
@@ -28,4 +28,4 @@ def test_invalid_grammar_version(string):
|
|||||||
|
|
||||||
def test_grammar_int_version():
|
def test_grammar_int_version():
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
load_grammar(version=3.2)
|
load_grammar(version=3.8)
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ tests of pydocstyle.
|
|||||||
|
|
||||||
import difflib
|
import difflib
|
||||||
import re
|
import re
|
||||||
|
from functools import total_ordering
|
||||||
|
|
||||||
import parso
|
import parso
|
||||||
from parso._compatibility import total_ordering
|
|
||||||
from parso.utils import python_bytes_to_unicode
|
from parso.utils import python_bytes_to_unicode
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -189,3 +189,22 @@ def test_no_error_nodes(each_version):
|
|||||||
check(child)
|
check(child)
|
||||||
|
|
||||||
check(parse("if foo:\n bar", version=each_version))
|
check(parse("if foo:\n bar", version=each_version))
|
||||||
|
|
||||||
|
|
||||||
|
def test_named_expression(works_ge_py38):
|
||||||
|
works_ge_py38.parse("(a := 1, a + 1)")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'param_code', [
|
||||||
|
'a=1, /',
|
||||||
|
'a, /',
|
||||||
|
'a=1, /, b=3',
|
||||||
|
'a, /, b',
|
||||||
|
'a, /, b',
|
||||||
|
'a, /, *, b',
|
||||||
|
'a, /, **kwargs',
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_positional_only_arguments(works_ge_py38, param_code):
|
||||||
|
works_ge_py38.parse("def x(%s): pass" % param_code)
|
||||||
|
|||||||
@@ -142,7 +142,7 @@ def test_yields(each_version):
|
|||||||
|
|
||||||
|
|
||||||
def test_yield_from():
|
def test_yield_from():
|
||||||
y, = get_yield_exprs('def x(): (yield from 1)', '3.3')
|
y, = get_yield_exprs('def x(): (yield from 1)', '3.8')
|
||||||
assert y.type == 'yield_expr'
|
assert y.type == 'yield_expr'
|
||||||
|
|
||||||
|
|
||||||
@@ -180,3 +180,61 @@ def top_function_three():
|
|||||||
|
|
||||||
r = get_raise_stmts(code, 2) # Lists inside try-catch
|
r = get_raise_stmts(code, 2) # Lists inside try-catch
|
||||||
assert len(list(r)) == 2
|
assert len(list(r)) == 2
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'code, name_index, is_definition, include_setitem', [
|
||||||
|
('x = 3', 0, True, False),
|
||||||
|
('x.y = 3', 0, False, False),
|
||||||
|
('x.y = 3', 1, True, False),
|
||||||
|
('x.y = u.v = z', 0, False, False),
|
||||||
|
('x.y = u.v = z', 1, True, False),
|
||||||
|
('x.y = u.v = z', 2, False, False),
|
||||||
|
('x.y = u.v, w = z', 3, True, False),
|
||||||
|
('x.y = u.v, w = z', 4, True, False),
|
||||||
|
('x.y = u.v, w = z', 5, False, False),
|
||||||
|
|
||||||
|
('x, y = z', 0, True, False),
|
||||||
|
('x, y = z', 1, True, False),
|
||||||
|
('x, y = z', 2, False, False),
|
||||||
|
('x, y = z', 2, False, False),
|
||||||
|
('x[0], y = z', 2, False, False),
|
||||||
|
('x[0] = z', 0, False, False),
|
||||||
|
('x[0], y = z', 0, False, False),
|
||||||
|
('x[0], y = z', 2, False, True),
|
||||||
|
('x[0] = z', 0, True, True),
|
||||||
|
('x[0], y = z', 0, True, True),
|
||||||
|
('x: int = z', 0, True, False),
|
||||||
|
('x: int = z', 1, False, False),
|
||||||
|
('x: int = z', 2, False, False),
|
||||||
|
('x: int', 0, True, False),
|
||||||
|
('x: int', 1, False, False),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_is_definition(code, name_index, is_definition, include_setitem):
|
||||||
|
module = parse(code, version='3.8')
|
||||||
|
name = module.get_first_leaf()
|
||||||
|
while True:
|
||||||
|
if name.type == 'name':
|
||||||
|
if name_index == 0:
|
||||||
|
break
|
||||||
|
name_index -= 1
|
||||||
|
name = name.get_next_leaf()
|
||||||
|
|
||||||
|
assert name.is_definition(include_setitem=include_setitem) == is_definition
|
||||||
|
|
||||||
|
|
||||||
|
def test_iter_funcdefs():
|
||||||
|
code = dedent('''
|
||||||
|
def normal(): ...
|
||||||
|
async def asyn(): ...
|
||||||
|
@dec
|
||||||
|
def dec_normal(): ...
|
||||||
|
@dec1
|
||||||
|
@dec2
|
||||||
|
async def dec_async(): ...
|
||||||
|
def broken
|
||||||
|
''')
|
||||||
|
module = parse(code, version='3.8')
|
||||||
|
func_names = [f.name.value for f in module.iter_funcdefs()]
|
||||||
|
assert func_names == ['normal', 'asyn', 'dec_normal', 'dec_async']
|
||||||
|
|||||||
@@ -190,6 +190,19 @@ def test_old_octal_notation(works_in_py2):
|
|||||||
works_in_py2.parse("07")
|
works_in_py2.parse("07")
|
||||||
|
|
||||||
|
|
||||||
|
def test_long_notation(works_in_py2):
|
||||||
|
works_in_py2.parse("0xFl")
|
||||||
|
works_in_py2.parse("0xFL")
|
||||||
|
works_in_py2.parse("0b1l")
|
||||||
|
works_in_py2.parse("0B1L")
|
||||||
|
works_in_py2.parse("0o7l")
|
||||||
|
works_in_py2.parse("0O7L")
|
||||||
|
works_in_py2.parse("0l")
|
||||||
|
works_in_py2.parse("0L")
|
||||||
|
works_in_py2.parse("10l")
|
||||||
|
works_in_py2.parse("10L")
|
||||||
|
|
||||||
|
|
||||||
def test_new_binary_notation(each_version):
|
def test_new_binary_notation(each_version):
|
||||||
_parse("""0b101010""", each_version)
|
_parse("""0b101010""", each_version)
|
||||||
_invalid_syntax("""0b0101021""", each_version)
|
_invalid_syntax("""0b0101021""", each_version)
|
||||||
@@ -279,12 +292,22 @@ def test_left_recursion():
|
|||||||
generate_grammar('foo: foo NAME\n', tokenize.PythonTokenTypes)
|
generate_grammar('foo: foo NAME\n', tokenize.PythonTokenTypes)
|
||||||
|
|
||||||
|
|
||||||
def test_ambiguities():
|
@pytest.mark.parametrize(
|
||||||
with pytest.raises(ValueError, match='ambiguous'):
|
'grammar, error_match', [
|
||||||
generate_grammar('foo: bar | baz\nbar: NAME\nbaz: NAME\n', tokenize.PythonTokenTypes)
|
['foo: bar | baz\nbar: NAME\nbaz: NAME\n',
|
||||||
|
r"foo is ambiguous.*given a TokenType\(NAME\).*bar or baz"],
|
||||||
with pytest.raises(ValueError, match='ambiguous'):
|
['''foo: bar | baz\nbar: 'x'\nbaz: "x"\n''',
|
||||||
generate_grammar('''foo: bar | baz\nbar: 'x'\nbaz: "x"\n''', tokenize.PythonTokenTypes)
|
r"foo is ambiguous.*given a ReservedString\(x\).*bar or baz"],
|
||||||
|
['''foo: bar | 'x'\nbar: 'x'\n''',
|
||||||
with pytest.raises(ValueError, match='ambiguous'):
|
r"foo is ambiguous.*given a ReservedString\(x\).*bar or foo"],
|
||||||
generate_grammar('''foo: bar | 'x'\nbar: 'x'\n''', tokenize.PythonTokenTypes)
|
# An ambiguity with the second (not the first) child of a production
|
||||||
|
['outer: "a" [inner] "b" "c"\ninner: "b" "c" [inner]\n',
|
||||||
|
r"outer is ambiguous.*given a ReservedString\(b\).*inner or outer"],
|
||||||
|
# An ambiguity hidden by a level of indirection (middle)
|
||||||
|
['outer: "a" [middle] "b" "c"\nmiddle: inner\ninner: "b" "c" [inner]\n',
|
||||||
|
r"outer is ambiguous.*given a ReservedString\(b\).*middle or outer"],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_ambiguities(grammar, error_match):
|
||||||
|
with pytest.raises(ValueError, match=error_match):
|
||||||
|
generate_grammar(grammar, tokenize.PythonTokenTypes)
|
||||||
|
|||||||
@@ -37,10 +37,33 @@ def test_python_exception_matches(code):
|
|||||||
error, = errors
|
error, = errors
|
||||||
actual = error.message
|
actual = error.message
|
||||||
assert actual in wanted
|
assert actual in wanted
|
||||||
# Somehow in Python3.3 the SyntaxError().lineno is sometimes None
|
# Somehow in Python2.7 the SyntaxError().lineno is sometimes None
|
||||||
assert line_nr is None or line_nr == error.start_pos[0]
|
assert line_nr is None or line_nr == error.start_pos[0]
|
||||||
|
|
||||||
|
|
||||||
|
def test_non_async_in_async():
|
||||||
|
"""
|
||||||
|
This example doesn't work with FAILING_EXAMPLES, because the line numbers
|
||||||
|
are not always the same / incorrect in Python 3.8.
|
||||||
|
"""
|
||||||
|
if sys.version_info[:2] < (3, 5):
|
||||||
|
pytest.skip()
|
||||||
|
|
||||||
|
# Raises multiple errors in previous versions.
|
||||||
|
code = 'async def foo():\n def nofoo():[x async for x in []]'
|
||||||
|
wanted, line_nr = _get_actual_exception(code)
|
||||||
|
|
||||||
|
errors = _get_error_list(code)
|
||||||
|
if errors:
|
||||||
|
error, = errors
|
||||||
|
actual = error.message
|
||||||
|
assert actual in wanted
|
||||||
|
if sys.version_info[:2] < (3, 8):
|
||||||
|
assert line_nr == error.start_pos[0]
|
||||||
|
else:
|
||||||
|
assert line_nr == 0 # For whatever reason this is zero in Python 3.8+
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
('code', 'positions'), [
|
('code', 'positions'), [
|
||||||
('1 +', [(1, 3)]),
|
('1 +', [(1, 3)]),
|
||||||
@@ -95,21 +118,12 @@ def _get_actual_exception(code):
|
|||||||
assert False, "The piece of code should raise an exception."
|
assert False, "The piece of code should raise an exception."
|
||||||
|
|
||||||
# SyntaxError
|
# SyntaxError
|
||||||
# Python 2.6 has a bit different error messages here, so skip it.
|
|
||||||
if sys.version_info[:2] == (2, 6) and wanted == 'SyntaxError: unexpected EOF while parsing':
|
|
||||||
wanted = 'SyntaxError: invalid syntax'
|
|
||||||
|
|
||||||
if wanted == 'SyntaxError: non-keyword arg after keyword arg':
|
if wanted == 'SyntaxError: non-keyword arg after keyword arg':
|
||||||
# The python 3.5+ way, a bit nicer.
|
# The python 3.5+ way, a bit nicer.
|
||||||
wanted = 'SyntaxError: positional argument follows keyword argument'
|
wanted = 'SyntaxError: positional argument follows keyword argument'
|
||||||
elif wanted == 'SyntaxError: assignment to keyword':
|
elif wanted == 'SyntaxError: assignment to keyword':
|
||||||
return [wanted, "SyntaxError: can't assign to keyword"], line_nr
|
return [wanted, "SyntaxError: can't assign to keyword",
|
||||||
elif wanted == 'SyntaxError: assignment to None':
|
'SyntaxError: cannot assign to __debug__'], line_nr
|
||||||
# Python 2.6 does has a slightly different error.
|
|
||||||
wanted = 'SyntaxError: cannot assign to None'
|
|
||||||
elif wanted == 'SyntaxError: can not assign to __debug__':
|
|
||||||
# Python 2.6 does has a slightly different error.
|
|
||||||
wanted = 'SyntaxError: cannot assign to __debug__'
|
|
||||||
elif wanted == 'SyntaxError: can use starred expression only as assignment target':
|
elif wanted == 'SyntaxError: can use starred expression only as assignment target':
|
||||||
# Python 3.4/3.4 have a bit of a different warning than 3.5/3.6 in
|
# Python 3.4/3.4 have a bit of a different warning than 3.5/3.6 in
|
||||||
# certain places. But in others this error makes sense.
|
# certain places. But in others this error makes sense.
|
||||||
@@ -269,6 +283,19 @@ def test_valid_fstrings(code):
|
|||||||
assert not _get_error_list(code, version='3.6')
|
assert not _get_error_list(code, version='3.6')
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'code', [
|
||||||
|
'a = (b := 1)',
|
||||||
|
'[x4 := x ** 5 for x in range(7)]',
|
||||||
|
'[total := total + v for v in range(10)]',
|
||||||
|
'while chunk := file.read(2):\n pass',
|
||||||
|
'numbers = [y := math.factorial(x), y**2, y**3]',
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_valid_namedexpr(code):
|
||||||
|
assert not _get_error_list(code, version='3.8')
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
('code', 'message'), [
|
('code', 'message'), [
|
||||||
("f'{1+}'", ('invalid syntax')),
|
("f'{1+}'", ('invalid syntax')),
|
||||||
@@ -283,3 +310,14 @@ def test_invalid_fstrings(code, message):
|
|||||||
"""
|
"""
|
||||||
error, = _get_error_list(code, version='3.6')
|
error, = _get_error_list(code, version='3.6')
|
||||||
assert message in error.message
|
assert message in error.message
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'code', [
|
||||||
|
"from foo import (\nbar,\n rab,\n)",
|
||||||
|
"from foo import (bar, rab, )",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_trailing_comma(code):
|
||||||
|
errors = _get_error_list(code)
|
||||||
|
assert not errors
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
# -*- coding: utf-8 # This file contains Unicode characters.
|
# -*- coding: utf-8 # This file contains Unicode characters.
|
||||||
|
|
||||||
|
import sys
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
import sys
|
||||||
|
|
||||||
from parso._compatibility import py_version
|
|
||||||
from parso.utils import split_lines, parse_version_string
|
from parso.utils import split_lines, parse_version_string
|
||||||
from parso.python.token import PythonTokenTypes
|
from parso.python.token import PythonTokenTypes
|
||||||
from parso.python import tokenize
|
from parso.python import tokenize
|
||||||
@@ -16,6 +17,7 @@ from parso.python.tokenize import PythonToken
|
|||||||
NAME = PythonTokenTypes.NAME
|
NAME = PythonTokenTypes.NAME
|
||||||
NEWLINE = PythonTokenTypes.NEWLINE
|
NEWLINE = PythonTokenTypes.NEWLINE
|
||||||
STRING = PythonTokenTypes.STRING
|
STRING = PythonTokenTypes.STRING
|
||||||
|
NUMBER = PythonTokenTypes.NUMBER
|
||||||
INDENT = PythonTokenTypes.INDENT
|
INDENT = PythonTokenTypes.INDENT
|
||||||
DEDENT = PythonTokenTypes.DEDENT
|
DEDENT = PythonTokenTypes.DEDENT
|
||||||
ERRORTOKEN = PythonTokenTypes.ERRORTOKEN
|
ERRORTOKEN = PythonTokenTypes.ERRORTOKEN
|
||||||
@@ -135,12 +137,12 @@ def test_identifier_contains_unicode():
|
|||||||
''')
|
''')
|
||||||
token_list = _get_token_list(fundef)
|
token_list = _get_token_list(fundef)
|
||||||
unicode_token = token_list[1]
|
unicode_token = token_list[1]
|
||||||
if py_version >= 30:
|
if sys.version_info.major >= 3:
|
||||||
assert unicode_token[0] == NAME
|
assert unicode_token[0] == NAME
|
||||||
else:
|
else:
|
||||||
# Unicode tokens in Python 2 seem to be identified as operators.
|
# Unicode tokens in Python 2 seem to be identified as operators.
|
||||||
# They will be ignored in the parser, that's ok.
|
# They will be ignored in the parser, that's ok.
|
||||||
assert unicode_token[0] == OP
|
assert unicode_token[0] == ERRORTOKEN
|
||||||
|
|
||||||
|
|
||||||
def test_quoted_strings():
|
def test_quoted_strings():
|
||||||
@@ -183,19 +185,19 @@ def test_ur_literals():
|
|||||||
assert typ == NAME
|
assert typ == NAME
|
||||||
|
|
||||||
check('u""')
|
check('u""')
|
||||||
check('ur""', is_literal=not py_version >= 30)
|
check('ur""', is_literal=not sys.version_info.major >= 3)
|
||||||
check('Ur""', is_literal=not py_version >= 30)
|
check('Ur""', is_literal=not sys.version_info.major >= 3)
|
||||||
check('UR""', is_literal=not py_version >= 30)
|
check('UR""', is_literal=not sys.version_info.major >= 3)
|
||||||
check('bR""')
|
check('bR""')
|
||||||
# Starting with Python 3.3 this ordering is also possible.
|
# Starting with Python 3.3 this ordering is also possible.
|
||||||
if py_version >= 33:
|
if sys.version_info.major >= 3:
|
||||||
check('Rb""')
|
check('Rb""')
|
||||||
|
|
||||||
# Starting with Python 3.6 format strings where introduced.
|
# Starting with Python 3.6 format strings where introduced.
|
||||||
check('fr""', is_literal=py_version >= 36)
|
check('fr""', is_literal=sys.version_info >= (3, 6))
|
||||||
check('rF""', is_literal=py_version >= 36)
|
check('rF""', is_literal=sys.version_info >= (3, 6))
|
||||||
check('f""', is_literal=py_version >= 36)
|
check('f""', is_literal=sys.version_info >= (3, 6))
|
||||||
check('F""', is_literal=py_version >= 36)
|
check('F""', is_literal=sys.version_info >= (3, 6))
|
||||||
|
|
||||||
|
|
||||||
def test_error_literal():
|
def test_error_literal():
|
||||||
@@ -228,16 +230,29 @@ def test_endmarker_end_pos():
|
|||||||
check('a\\')
|
check('a\\')
|
||||||
|
|
||||||
|
|
||||||
|
xfail_py2 = dict(marks=[pytest.mark.xfail(sys.version_info[0] == 2, reason='Python 2')])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
('code', 'types'), [
|
('code', 'types'), [
|
||||||
|
# Indentation
|
||||||
(' foo', [INDENT, NAME, DEDENT]),
|
(' foo', [INDENT, NAME, DEDENT]),
|
||||||
(' foo\n bar', [INDENT, NAME, NEWLINE, ERROR_DEDENT, NAME, DEDENT]),
|
(' foo\n bar', [INDENT, NAME, NEWLINE, ERROR_DEDENT, NAME, DEDENT]),
|
||||||
(' foo\n bar \n baz', [INDENT, NAME, NEWLINE, ERROR_DEDENT, NAME,
|
(' foo\n bar \n baz', [INDENT, NAME, NEWLINE, ERROR_DEDENT, NAME,
|
||||||
NEWLINE, ERROR_DEDENT, NAME, DEDENT]),
|
NEWLINE, ERROR_DEDENT, NAME, DEDENT]),
|
||||||
(' foo\nbar', [INDENT, NAME, NEWLINE, DEDENT, NAME]),
|
(' foo\nbar', [INDENT, NAME, NEWLINE, DEDENT, NAME]),
|
||||||
|
|
||||||
|
# Name stuff
|
||||||
|
('1foo1', [NUMBER, NAME]),
|
||||||
|
pytest.param(
|
||||||
|
u'மெல்லினம்', [NAME],
|
||||||
|
**xfail_py2),
|
||||||
|
pytest.param(u'²', [ERRORTOKEN], **xfail_py2),
|
||||||
|
pytest.param(u'ä²ö', [NAME, ERRORTOKEN, NAME], **xfail_py2),
|
||||||
|
pytest.param(u'ää²¹öö', [NAME, ERRORTOKEN, NAME], **xfail_py2),
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
def test_indentation(code, types):
|
def test_token_types(code, types):
|
||||||
actual_types = [t.type for t in _get_token_list(code)]
|
actual_types = [t.type for t in _get_token_list(code)]
|
||||||
assert actual_types == types + [ENDMARKER]
|
assert actual_types == types + [ENDMARKER]
|
||||||
|
|
||||||
@@ -330,15 +345,72 @@ def test_backslash():
|
|||||||
('f" "{}', [FSTRING_START, FSTRING_STRING, FSTRING_END, OP, OP]),
|
('f" "{}', [FSTRING_START, FSTRING_STRING, FSTRING_END, OP, OP]),
|
||||||
(r'f"\""', [FSTRING_START, FSTRING_STRING, FSTRING_END]),
|
(r'f"\""', [FSTRING_START, FSTRING_STRING, FSTRING_END]),
|
||||||
(r'f"\""', [FSTRING_START, FSTRING_STRING, FSTRING_END]),
|
(r'f"\""', [FSTRING_START, FSTRING_STRING, FSTRING_END]),
|
||||||
|
|
||||||
|
# format spec
|
||||||
(r'f"Some {x:.2f}{y}"', [FSTRING_START, FSTRING_STRING, OP, NAME, OP,
|
(r'f"Some {x:.2f}{y}"', [FSTRING_START, FSTRING_STRING, OP, NAME, OP,
|
||||||
FSTRING_STRING, OP, OP, NAME, OP, FSTRING_END]),
|
FSTRING_STRING, OP, OP, NAME, OP, FSTRING_END]),
|
||||||
|
|
||||||
|
# multiline f-string
|
||||||
|
('f"""abc\ndef"""', [FSTRING_START, FSTRING_STRING, FSTRING_END]),
|
||||||
|
('f"""abc{\n123}def"""', [
|
||||||
|
FSTRING_START, FSTRING_STRING, OP, NUMBER, OP, FSTRING_STRING,
|
||||||
|
FSTRING_END
|
||||||
|
]),
|
||||||
|
|
||||||
|
# a line continuation inside of an fstring_string
|
||||||
|
('f"abc\\\ndef"', [
|
||||||
|
FSTRING_START, FSTRING_STRING, FSTRING_END
|
||||||
|
]),
|
||||||
|
('f"\\\n{123}\\\n"', [
|
||||||
|
FSTRING_START, FSTRING_STRING, OP, NUMBER, OP, FSTRING_STRING,
|
||||||
|
FSTRING_END
|
||||||
|
]),
|
||||||
|
|
||||||
|
# a line continuation inside of an fstring_expr
|
||||||
|
('f"{\\\n123}"', [FSTRING_START, OP, NUMBER, OP, FSTRING_END]),
|
||||||
|
|
||||||
|
# a line continuation inside of an format spec
|
||||||
|
('f"{123:.2\\\nf}"', [
|
||||||
|
FSTRING_START, OP, NUMBER, OP, FSTRING_STRING, OP, FSTRING_END
|
||||||
|
]),
|
||||||
|
|
||||||
|
# a newline without a line continuation inside a single-line string is
|
||||||
|
# wrong, and will generate an ERRORTOKEN
|
||||||
|
('f"abc\ndef"', [
|
||||||
|
FSTRING_START, FSTRING_STRING, NEWLINE, NAME, ERRORTOKEN
|
||||||
|
]),
|
||||||
|
|
||||||
|
# a more complex example
|
||||||
(r'print(f"Some {x:.2f}a{y}")', [
|
(r'print(f"Some {x:.2f}a{y}")', [
|
||||||
NAME, OP, FSTRING_START, FSTRING_STRING, OP, NAME, OP,
|
NAME, OP, FSTRING_START, FSTRING_STRING, OP, NAME, OP,
|
||||||
FSTRING_STRING, OP, FSTRING_STRING, OP, NAME, OP, FSTRING_END, OP
|
FSTRING_STRING, OP, FSTRING_STRING, OP, NAME, OP, FSTRING_END, OP
|
||||||
]),
|
]),
|
||||||
|
# issue #86, a string-like in an f-string expression
|
||||||
|
('f"{ ""}"', [
|
||||||
|
FSTRING_START, OP, FSTRING_END, STRING
|
||||||
|
]),
|
||||||
|
('f"{ f""}"', [
|
||||||
|
FSTRING_START, OP, NAME, FSTRING_END, STRING
|
||||||
|
]),
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
def test_fstring(code, types, version_ge_py36):
|
def test_fstring(code, types, version_ge_py36):
|
||||||
actual_types = [t.type for t in _get_token_list(code, version_ge_py36)]
|
actual_types = [t.type for t in _get_token_list(code, version_ge_py36)]
|
||||||
assert types + [ENDMARKER] == actual_types
|
assert types + [ENDMARKER] == actual_types
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
('code', 'types'), [
|
||||||
|
# issue #87, `:=` in the outest paratheses should be tokenized
|
||||||
|
# as a format spec marker and part of the format
|
||||||
|
('f"{x:=10}"', [
|
||||||
|
FSTRING_START, OP, NAME, OP, FSTRING_STRING, OP, FSTRING_END
|
||||||
|
]),
|
||||||
|
('f"{(x:=10)}"', [
|
||||||
|
FSTRING_START, OP, OP, NAME, OP, NUMBER, OP, OP, FSTRING_END
|
||||||
|
]),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_fstring_assignment_expression(code, types, version_ge_py38):
|
||||||
|
actual_types = [t.type for t in _get_token_list(code, version_ge_py38)]
|
||||||
|
assert types + [ENDMARKER] == actual_types
|
||||||
|
|||||||
16
tox.ini
16
tox.ini
@@ -1,19 +1,15 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py27, py33, py34, py35, py36, py37, pypy
|
envlist = {py27,py34,py35,py36,py37,py38}
|
||||||
[testenv]
|
[testenv]
|
||||||
extras = testing
|
extras = testing
|
||||||
deps =
|
deps =
|
||||||
py26,py33: pytest>=3.0.7,<3.3
|
py27,py34: pytest<3.3
|
||||||
py26,py33: setuptools<37
|
coverage: coverage
|
||||||
setenv =
|
setenv =
|
||||||
# https://github.com/tomchristie/django-rest-framework/issues/1957
|
# https://github.com/tomchristie/django-rest-framework/issues/1957
|
||||||
# tox corrupts __pycache__, solution from here:
|
# tox corrupts __pycache__, solution from here:
|
||||||
PYTHONDONTWRITEBYTECODE=1
|
PYTHONDONTWRITEBYTECODE=1
|
||||||
|
coverage: TOX_TESTENV_COMMAND=coverage run -m pytest
|
||||||
commands =
|
commands =
|
||||||
pytest {posargs:parso test}
|
{env:TOX_TESTENV_COMMAND:pytest} {posargs}
|
||||||
[testenv:cov]
|
coverage: coverage report
|
||||||
deps =
|
|
||||||
coverage
|
|
||||||
commands =
|
|
||||||
coverage run --source parso -m pytest
|
|
||||||
coverage report
|
|
||||||
|
|||||||
Reference in New Issue
Block a user