59 Commits

Author SHA1 Message Date
Dave Halter
3b263f0a0d Fix a failing test 2020-07-24 01:01:23 +02:00
Dave Halter
f52103f236 Prepare 0.7.1 release 2020-07-24 00:54:07 +02:00
Dave Halter
c53321a440 Comprehensions are not valid as class params, fixes #122 2020-07-24 00:32:24 +02:00
Dave Halter
d8a70abf19 Merge pull request #145 from PeterJCLaw/expose-type-stubs
Let consumers know that we have type annotations
2020-07-21 23:42:01 +02:00
Peter Law
c19d7c4e6d Let consumers know that we have type annotations
As well as the type stubs, this includes both the py.typed flag
file (for tools) and the classifier (for people).
2020-07-21 22:33:39 +01:00
Batuhan Taskaya
d42c0f1b3b Merge pull request #143 from Carreau/parse-alpha
Parse alpha, beta and rc versions strings.
2020-07-01 11:14:40 +03:00
Matthias Bussonnier
40e78ff7e0 Parse alpha, beta and rc versions strings.
fixes #142
2020-06-30 13:28:09 -07:00
Batuhan Taskaya
c88a2675b0 Merge pull request #140 from Kazy/fix-139-async-for-newline
Fix #139: newlines in async for comprehension
2020-06-29 20:01:53 +03:00
Jocelyn Boullier
88874a5a9f Fix #139: newlines in async for comprehension 2020-06-29 18:40:55 +02:00
Dave Halter
1e4076f9d9 Merge pull request #141 from isidentical/f-string-errors
Handle 3.9>= f-string errors
2020-06-29 00:03:57 +02:00
Batuhan Taskaya
73796f309d Just raise the f-string error, pass the other 2020-06-28 19:53:57 +03:00
Batuhan Taskaya
1cacdf366e Raise custom errors after break tokens 2020-06-28 19:48:11 +03:00
Batuhan Taskaya
d352bede13 Cover errors that raised by ErrorFinder 2020-06-28 19:37:22 +03:00
Batuhan Taskaya
572be783f3 Cover invalid syntaxes 2020-06-28 18:41:18 +03:00
Batuhan Taskaya
31171d7ae6 Handle 3.9>= f-string errors 2020-06-28 18:04:42 +03:00
Dave Halter
7e0586b0b9 Add a PyPI downloads badge 2020-06-27 15:18:27 +02:00
Dave Halter
cc347b1d3b Merge pull request #137 from isidentical/cannot-delete-starred
Update starred deletion messages for 3.9+
2020-06-22 00:15:01 +02:00
Batuhan Taskaya
841a5d96b3 Update starred deletion messages for 3.9+ 2020-06-21 19:47:18 +03:00
Dave Halter
d68b4e0cab Use Python 3 in deployment script 2020-06-20 01:21:35 +02:00
Dave Halter
d55b4f08dc Merge pull request #136 from davidhalter/permission_errors
Ignore permission errors when saving to cache
2020-06-19 20:27:59 +02:00
Dave Halter
58790c119e Fix issues of #136 2020-06-19 20:20:00 +02:00
Dave Halter
3923ecf12f Ignore permission errors when saving to cache
This might happen when a user doesn't have full access to his home directory.
Fixes davidhalter/jedi#1615
2020-06-19 12:06:46 +02:00
Dave Halter
bd33e4ef7e Merge pull request #135 from isidentical/starred-expr
Improve handling of starred expression on different contexts
2020-06-05 12:58:14 +02:00
Batuhan Taskaya
891bfdaa04 Test only python3+ 2020-06-04 22:09:04 +03:00
Batuhan Taskaya
5e1828b3f0 Check full error message 2020-06-04 22:02:12 +03:00
Batuhan Taskaya
6daf91880b Add a special case against augassign 2020-06-04 21:47:28 +03:00
Batuhan Taskaya
44cf64a5f7 Improve handling of starred expression on different contexts (load/store) 2020-06-04 21:35:48 +03:00
Batuhan Taskaya
fe24f0dc1b Implement garbage collections for inactive cache files (#121)
Cache files that weren't accessed in the last 30 days will be automatically
garbage collected. This collection happens when the `save_module` is called
via a lock system that would make it happen only one time per day.
2020-06-02 12:36:05 +03:00
Dave Halter
450e9d0a19 Merge pull request #130 from yuan-xy/patch-1
fix dump_nfa
2020-05-30 12:11:08 +02:00
yuan
93b5e6dffc Fix a one-word typo 2020-05-29 10:30:08 +03:00
yuan
4403b5cac5 Update generator.py 2020-05-29 08:56:38 +08:00
Batuhan Taskaya
6f29c551fd Adjust invalid aug assign target for 3.9+ 2020-05-27 00:55:31 +02:00
Dave Halter
d6b1d19d87 Merge pull request #129 from isidentical/extended-rhs-for-annassign
Extend annotated assignment rule's RHS
2020-05-26 00:13:46 +02:00
Batuhan Taskaya
e0dc415bbc Extend annotated assignment rule's RHS 2020-05-26 01:10:04 +03:00
Batuhan Taskaya
4c2c0ad077 Add python3.10 grammar (#125) 2020-05-26 00:58:09 +03:00
Batuhan Taskaya
5daa8b1db6 Merge pull request #124 from isidentical/nightly-builds 2020-05-25 00:18:29 +03:00
Batuhan Taskaya
c05e14c24e Test parso on nightly builds 2020-05-25 00:11:46 +03:00
Dave Halter
846513584e Merge pull request #119 from isidentical/check-all-args
Check all arguments for unparenthesized generator expressions
2020-05-23 23:18:00 +02:00
Batuhan Taskaya
6b0e01c220 Revert trailing comma for 3.6< 2020-05-23 21:17:08 +03:00
Batuhan Taskaya
92396a9a16 allow trailing comma <3.6, test both postive/negative cases 2020-05-23 17:45:20 +03:00
Batuhan Taskaya
fe54800cdd Check all arguments for unparenthesized generator expressions
Previously only the first argument on the argument list checked
against the generator expressions, now all argumnets are controlled.
2020-05-23 16:57:34 +03:00
Dave Halter
6ecd975516 Merge pull request #117 from isidentical/repeated-kwarg-39
Show which keyword argument is repeated on 3.9+
2020-05-23 15:15:14 +02:00
Batuhan Taskaya
27a7c16803 assert full message 2020-05-23 15:51:00 +03:00
Batuhan Taskaya
a06521d912 Don't give syntax errors for parenthesised kwargs <3.8 2020-05-23 14:43:43 +02:00
Batuhan Taskaya
216a77dce5 Show which keyword argument is repeated on 3.9+ 2020-05-23 14:06:24 +03:00
Dave Halter
8bb211fafb Merge pull request #116 from isidentical/forbidden-name
Raise violation on starred expressions where the child is a boolean/none
2020-05-23 11:51:08 +02:00
Batuhan Taskaya
342e308f57 Move checking to the _CheckAssignmentRule 2020-05-23 01:18:23 +03:00
Batuhan Taskaya
8f46481aaf Raise violation on starred expressions where the child is a boolean/none 2020-05-23 01:09:38 +03:00
Dave Halter
00621977b7 Merge pull request #115 from isidentical/finally-in-continue
Support finally in continue on 3.8+
2020-05-22 23:44:26 +02:00
Batuhan Taskaya
077e34be84 Support finally in continue on 3.8+
Thanks to [bpo-32489](https://bugs.python.org/issue32489) and sadly
for rejection of my [PEP 601](https://www.python.org/dev/peps/pep-0601/)
finally in continue is supported in 3.8+. I checked the blame and looks
like there was already a commit for the same subject, but that only
changes the test and not actually changes the checker (dfe7fba08e)
2020-05-22 18:47:46 +03:00
Dave Halter
a3f851d8f6 Merge pull request #114 from isidentical/future-annotations
Add support for 'from __future__ import annotations'
2020-05-22 16:18:53 +02:00
Batuhan Taskaya
261132e74c Add support for 'from __future__ import annotations'
PEP 563 brought a new `__future__` import for post-poning evaluation
of annotations that introduced in 3.7. This patch adds support for
that future feature, and removes 'all_feature_names' from that list
since it is not valid a syntax
(`from __future__ import all_feature_names`). Also it fixes a bug
related usage of `ALLOWED_FUTURES` (global and version independant
flags) instead of `allowed_futures` (extended version of the previ
ous flag that has some version specific flags, probably unnoticed)
2020-05-22 17:14:33 +03:00
Batuhan Taskaya
345374d040 Allow 'any' expression on decorators, PEP 614 2020-05-22 10:17:17 +02:00
Batuhan Taskaya
f8709852e3 Adapt Python3.9 errors on multiple star target
In Python3.9, the message "two starred expression in ..." changed
to "multiple starred expression in ...", with python/cpython#19168
2020-05-21 20:46:41 +02:00
Batuhan Taskaya
2dcc0d3770 Quick fix about invalid version test 2020-05-21 20:45:10 +02:00
Batuhan Taskaya
34b8b7dd79 Correctly parse 2-digit minor versions (py3.10) 2020-05-21 16:21:22 +02:00
WinChua
caadf3bf4c approve hit msg when python version is unsupported
currently, when the python version used is not supported, it will raise "Python version None is currently not supported."
2020-05-17 16:52:40 +02:00
Dave Halter
1b4c75608a Fix a python_bytes_to_unicode issue, fixes #107 2020-05-14 23:34:14 +02:00
Dave Halter
15403fd998 Use a Windows cache folder change from Jedi
See also 1115cbd94dcae6fb7b215c51f0407333c92c956e in Jedi and the PR in davidhalter/jedi#1575
2020-05-10 11:50:00 +02:00
30 changed files with 876 additions and 128 deletions

View File

@@ -7,9 +7,12 @@ python:
- 3.6
- 3.7
- 3.8.2
- nightly
- pypy2.7-6.0
- pypy3.5-6.0
matrix:
allow_failures:
- python: nightly
include:
- python: 3.5
env: TOXENV=py35-coverage

View File

@@ -50,6 +50,8 @@ Daniel Fiterman (@dfit99) <fitermandaniel2@gmail.com>
Simon Ruggier (@sruggier)
Élie Gouzien (@ElieGouzien)
Tim Gates (@timgates42) <tim.gates@iress.com>
Batuhan Taskaya (@isidentical) <isidentical@gmail.com>
Jocelyn Boullier (@Kazy) <jocelyn@boullier.bzh>
Note: (@user) means a github user name.

View File

@@ -3,6 +3,14 @@
Changelog
---------
0.7.1 (2020-07-24)
++++++++++++++++++
- Fixed a couple of smaller bugs (mostly syntax error detection in
``Grammar.iter_errors``)
This is going to be the last release that supports Python 2.7, 3.4 and 3.5.
0.7.0 (2020-04-13)
++++++++++++++++++

View File

@@ -11,6 +11,10 @@ parso - A Python Parser
:target: https://coveralls.io/github/davidhalter/parso?branch=master
:alt: Coverage Status
.. image:: https://pepy.tech/badge/parso
:target: https://pepy.tech/project/parso
:alt: PyPI Downloads
.. image:: https://raw.githubusercontent.com/davidhalter/parso/master/docs/_static/logo_characters.png
Parso is a Python parser that supports error recovery and round-trip parsing

View File

@@ -158,8 +158,17 @@ def works_ge_py35(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 5))
@pytest.fixture
def works_ge_py36(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 6))
@pytest.fixture
def works_ge_py38(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 8))
@pytest.fixture
def works_ge_py39(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 9))

View File

@@ -26,7 +26,7 @@ git checkout $BRANCH
tox
# Create tag
tag=v$(python -c "import $PROJECT_NAME; print($PROJECT_NAME.__version__)")
tag=v$(python3 -c "import $PROJECT_NAME; print($PROJECT_NAME.__version__)")
master_ref=$(git show-ref -s heads/$BRANCH)
tag_ref=$(git show-ref -s $tag || true)
@@ -43,7 +43,7 @@ fi
# Package and upload to PyPI
#rm -rf dist/ - Not needed anymore, because the folder is never reused.
echo `pwd`
python setup.py sdist bdist_wheel
python3 setup.py sdist bdist_wheel
# Maybe do a pip install twine before.
twine upload dist/*

View File

@@ -43,7 +43,7 @@ from parso.grammar import Grammar, load_grammar
from parso.utils import split_lines, python_bytes_to_unicode
__version__ = '0.7.0'
__version__ = '0.7.1'
def parse(code=None, **kwargs):

View File

@@ -2,6 +2,7 @@
To ensure compatibility from Python ``2.7`` - ``3.3``, a module has been
created. Clearly there is huge need to use conforming syntax.
"""
import os
import sys
import platform
@@ -44,11 +45,17 @@ def u(string):
try:
# Python 2.7
# Python 3.3+
FileNotFoundError = FileNotFoundError
except NameError:
# Python 2.7 (both IOError + OSError)
FileNotFoundError = EnvironmentError
try:
# Python 3.3+
FileNotFoundError = IOError
PermissionError = PermissionError
except NameError:
# Python 2.7 (both IOError + OSError)
PermissionError = EnvironmentError
def utf8_repr(func):
@@ -67,3 +74,28 @@ def utf8_repr(func):
return func
else:
return wrapper
if sys.version_info < (3, 5):
"""
A super-minimal shim around listdir that behave like
scandir for the information we need.
"""
class _DirEntry:
def __init__(self, name, basepath):
self.name = name
self.basepath = basepath
@property
def path(self):
return os.path.join(self.basepath, self.name)
def stat(self):
# won't follow symlinks
return os.lstat(os.path.join(self.basepath, self.name))
def scandir(dir):
return [_DirEntry(name, dir) for name in os.listdir(dir)]
else:
from os import scandir

View File

@@ -7,13 +7,15 @@ import shutil
import platform
import errno
import logging
import warnings
try:
import cPickle as pickle
except:
import pickle
from parso._compatibility import FileNotFoundError
from parso._compatibility import FileNotFoundError, PermissionError, scandir
from parso.file_io import FileIO
LOG = logging.getLogger(__name__)
@@ -21,6 +23,13 @@ _CACHED_FILE_MINIMUM_SURVIVAL = 60 * 10 # 10 minutes
"""
Cached files should survive at least a few minutes.
"""
_CACHED_FILE_MAXIMUM_SURVIVAL = 60 * 60 * 24 * 30
"""
Maximum time for a cached file to survive if it is not
accessed within.
"""
_CACHED_SIZE_TRIGGER = 600
"""
This setting limits the amount of cached files. It's basically a way to start
@@ -63,7 +72,8 @@ http://docs.python.org/3/library/sys.html#sys.implementation
def _get_default_cache_path():
if platform.system().lower() == 'windows':
dir_ = os.path.join(os.getenv('LOCALAPPDATA') or '~', 'Parso', 'Parso')
dir_ = os.path.join(os.getenv('LOCALAPPDATA')
or os.path.expanduser('~'), 'Parso', 'Parso')
elif platform.system().lower() == 'darwin':
dir_ = os.path.join('~', 'Library', 'Caches', 'Parso')
else:
@@ -81,6 +91,19 @@ On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
``$XDG_CACHE_HOME/parso`` is used instead of the default one.
"""
_CACHE_CLEAR_THRESHOLD = 60 * 60 * 24
def _get_cache_clear_lock(cache_path = None):
"""
The path where the cache lock is stored.
Cache lock will prevent continous cache clearing and only allow garbage
collection once a day (can be configured in _CACHE_CLEAR_THRESHOLD).
"""
cache_path = cache_path or _get_default_cache_path()
return FileIO(os.path.join(cache_path, "PARSO-CACHE-LOCK"))
parser_cache = {}
@@ -160,7 +183,7 @@ def _set_cache_item(hashed_grammar, path, module_cache_item):
parser_cache.setdefault(hashed_grammar, {})[path] = module_cache_item
def save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_path=None):
def try_to_save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_path=None):
path = file_io.path
try:
p_time = None if path is None else file_io.get_last_modified()
@@ -171,7 +194,18 @@ def save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_pat
item = _NodeCacheItem(module, lines, p_time)
_set_cache_item(hashed_grammar, path, item)
if pickling and path is not None:
_save_to_file_system(hashed_grammar, path, item, cache_path=cache_path)
try:
_save_to_file_system(hashed_grammar, path, item, cache_path=cache_path)
except PermissionError:
# It's not really a big issue if the cache cannot be saved to the
# file system. It's still in RAM in that case. However we should
# still warn the user that this is happening.
warnings.warn(
'Tried to save a file to %s, but got permission denied.',
Warning
)
else:
_remove_cache_and_update_lock(cache_path=cache_path)
def _save_to_file_system(hashed_grammar, path, item, cache_path=None):
@@ -186,6 +220,46 @@ def clear_cache(cache_path=None):
parser_cache.clear()
def clear_inactive_cache(
cache_path=None,
inactivity_threshold=_CACHED_FILE_MAXIMUM_SURVIVAL,
):
if cache_path is None:
cache_path = _get_default_cache_path()
if not os.path.exists(cache_path):
return False
for version_path in os.listdir(cache_path):
version_path = os.path.join(cache_path, version_path)
if not os.path.isdir(version_path):
continue
for file in scandir(version_path):
if (
file.stat().st_atime + _CACHED_FILE_MAXIMUM_SURVIVAL
<= time.time()
):
try:
os.remove(file.path)
except OSError: # silently ignore all failures
continue
else:
return True
def _remove_cache_and_update_lock(cache_path = None):
lock = _get_cache_clear_lock(cache_path=cache_path)
clear_lock_time = lock.get_last_modified()
if (
clear_lock_time is None # first time
or clear_lock_time + _CACHE_CLEAR_THRESHOLD <= time.time()
):
if not lock._touch():
# First make sure that as few as possible other cleanup jobs also
# get started. There is still a race condition but it's probably
# not a big problem.
return False
clear_inactive_cache(cache_path = cache_path)
def _get_hashed_path(hashed_grammar, path, cache_path=None):
directory = _get_cache_directory_path(cache_path=cache_path)

View File

@@ -1,4 +1,5 @@
import os
from parso._compatibility import FileNotFoundError
class FileIO(object):
@@ -22,6 +23,17 @@ class FileIO(object):
# Might raise FileNotFoundError, OSError for Python 2
return None
def _touch(self):
try:
os.utime(self.path, None)
except FileNotFoundError:
try:
file = open(self.path, 'a')
file.close()
except (OSError, IOError): # TODO Maybe log this?
return False
return True
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.path)

View File

@@ -7,7 +7,7 @@ from parso.utils import split_lines, python_bytes_to_unicode, parse_version_stri
from parso.python.diff import DiffParser
from parso.python.tokenize import tokenize_lines, tokenize
from parso.python.token import PythonTokenTypes
from parso.cache import parser_cache, load_module, save_module
from parso.cache import parser_cache, load_module, try_to_save_module
from parso.parser import BaseParser
from parso.python.parser import Parser as PythonParser
from parso.python.errors import ErrorFinderConfig
@@ -132,7 +132,7 @@ class Grammar(object):
old_lines=old_lines,
new_lines=lines
)
save_module(self._hashed, file_io, new_node, lines,
try_to_save_module(self._hashed, file_io, new_node, lines,
# Never pickle in pypy, it's slow as hell.
pickling=cache and not is_pypy,
cache_path=cache_path)
@@ -148,7 +148,7 @@ class Grammar(object):
root_node = p.parse(tokens=tokens)
if cache or diff_cache:
save_module(self._hashed, file_io, root_node, lines,
try_to_save_module(self._hashed, file_io, root_node, lines,
# Never pickle in pypy, it's slow as hell.
pickling=cache and not is_pypy,
cache_path=cache_path)
@@ -252,7 +252,7 @@ def load_grammar(**kwargs):
grammar = PythonGrammar(version_info, bnf_text)
return _loaded_grammars.setdefault(path, grammar)
except FileNotFoundError:
message = "Python version %s is currently not supported." % version
message = "Python version %s.%s is currently not supported." % (version_info.major, version_info.minor)
raise NotImplementedError(message)
else:
raise NotImplementedError("No support for language %s." % language)

View File

@@ -163,7 +163,7 @@ class Rule(object):
def get_node(self, node):
return node
def _get_message(self, message):
def _get_message(self, message, node):
if message is None:
message = self.message
if message is None:
@@ -176,7 +176,7 @@ class Rule(object):
if code is None:
raise ValueError("The error code on the class is not set.")
message = self._get_message(message)
message = self._get_message(message, node)
self._normalizer.add_issue(node, code, message)

View File

@@ -212,7 +212,8 @@ def _dump_nfa(start, finish):
todo = [start]
for i, state in enumerate(todo):
print(" State", i, state is finish and "(final)" or "")
for label, next_ in state.arcs:
for arc in state.arcs:
label, next_ = arc.nonterminal_or_string, arc.next
if next_ in todo:
j = todo.index(next_)
else:
@@ -244,7 +245,7 @@ def generate_grammar(bnf_grammar, token_namespace):
rule_to_dfas = {}
start_nonterminal = None
for nfa_a, nfa_z in GrammarParser(bnf_grammar).parse():
#_dump_nfa(a, z)
#_dump_nfa(nfa_a, nfa_z)
dfas = _make_dfas(nfa_a, nfa_z)
#_dump_dfas(dfas)
# oldlen = len(dfas)

0
parso/py.typed Normal file
View File

View File

@@ -6,6 +6,7 @@ from contextlib import contextmanager
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
from parso.python.tree import search_ancestor
from parso.python.tokenize import _get_token_collection
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
@@ -13,11 +14,84 @@ _STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
_MAX_BLOCK_SIZE = 20
_MAX_INDENT_COUNT = 100
ALLOWED_FUTURES = (
'all_feature_names', 'nested_scopes', 'generators', 'division',
'absolute_import', 'with_statement', 'print_function', 'unicode_literals',
'nested_scopes', 'generators', 'division', 'absolute_import',
'with_statement', 'print_function', 'unicode_literals',
)
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
def _get_rhs_name(node, version):
type_ = node.type
if type_ == "lambdef":
return "lambda"
elif type_ == "atom":
comprehension = _get_comprehension_type(node)
first, second = node.children[:2]
if comprehension is not None:
return comprehension
elif second.type == "dictorsetmaker":
if version < (3, 8):
return "literal"
else:
if second.children[1] == ":" or second.children[0] == "**":
return "dict display"
else:
return "set display"
elif (
first == "("
and (second == ")"
or (len(node.children) == 3 and node.children[1].type == "testlist_comp"))
):
return "tuple"
elif first == "(":
return _get_rhs_name(_remove_parens(node), version=version)
elif first == "[":
return "list"
elif first == "{" and second == "}":
return "dict display"
elif first == "{" and len(node.children) > 2:
return "set display"
elif type_ == "keyword":
if "yield" in node.value:
return "yield expression"
if version < (3, 8):
return "keyword"
else:
return str(node.value)
elif type_ == "operator" and node.value == "...":
return "Ellipsis"
elif type_ == "comparison":
return "comparison"
elif type_ in ("string", "number", "strings"):
return "literal"
elif type_ == "yield_expr":
return "yield expression"
elif type_ == "test":
return "conditional expression"
elif type_ in ("atom_expr", "power"):
if node.children[0] == "await":
return "await expression"
elif node.children[-1].type == "trailer":
trailer = node.children[-1]
if trailer.children[0] == "(":
return "function call"
elif trailer.children[0] == "[":
return "subscript"
elif trailer.children[0] == ".":
return "attribute"
elif (
("expr" in type_
and "star_expr" not in type_) # is a substring
or "_test" in type_
or type_ in ("term", "factor")
):
return "operator"
elif type_ == "star_expr":
return "starred"
elif type_ == "testlist_star_expr":
return "tuple"
elif type_ == "fstring":
return "f-string expression"
return type_ # shouldn't reach here
def _iter_stmts(scope):
"""
@@ -136,6 +210,21 @@ def _get_for_stmt_definition_exprs(for_stmt):
return list(_iter_definition_exprs_from_lists(exprlist))
def _is_argument_comprehension(argument):
return argument.children[1].type in _COMP_FOR_TYPES
def _any_fstring_error(version, node):
if version < (3, 9) or node is None:
return False
if node.type == "error_node":
return any(child.type == "fstring_start" for child in node.children)
elif node.type == "fstring":
return True
else:
return search_ancestor(node, "fstring")
class _Context(object):
def __init__(self, node, add_syntax_error, parent_context=None):
self.node = node
@@ -333,6 +422,11 @@ class ErrorFinder(Normalizer):
match = re.match('\\w{,2}("{1,3}|\'{1,3})', leaf.value)
if match is None:
message = 'invalid syntax'
if (
self.version >= (3, 9)
and leaf.value in _get_token_collection(self.version).always_break_tokens
):
message = "f-string: " + message
else:
if len(match.group(1)) == 1:
message = 'EOL while scanning string literal'
@@ -371,8 +465,8 @@ class ErrorFinder(Normalizer):
class IndentationRule(Rule):
code = 903
def _get_message(self, message):
message = super(IndentationRule, self)._get_message(message)
def _get_message(self, message, node):
message = super(IndentationRule, self)._get_message(message, node)
return "IndentationError: " + message
@@ -396,21 +490,34 @@ class ErrorFinderConfig(NormalizerConfig):
class SyntaxRule(Rule):
code = 901
def _get_message(self, message):
message = super(SyntaxRule, self)._get_message(message)
def _get_message(self, message, node):
message = super(SyntaxRule, self)._get_message(message, node)
if (
"f-string" not in message
and _any_fstring_error(self._normalizer.version, node)
):
message = "f-string: " + message
return "SyntaxError: " + message
@ErrorFinder.register_rule(type='error_node')
class _InvalidSyntaxRule(SyntaxRule):
message = "invalid syntax"
fstring_message = "f-string: invalid syntax"
def get_node(self, node):
return node.get_next_leaf()
def is_issue(self, node):
# Error leafs will be added later as an error.
return node.get_next_leaf().type != 'error_leaf'
error = node.get_next_leaf().type != 'error_leaf'
if (
error
and _any_fstring_error(self._normalizer.version, node)
):
self.add_issue(node, message=self.fstring_message)
else:
# Error leafs will be added later as an error.
return error
@ErrorFinder.register_rule(value='await')
@@ -449,7 +556,11 @@ class _ContinueChecks(SyntaxRule):
in_loop = True
if block.type == 'try_stmt':
last_block = block.children[-3]
if last_block == 'finally' and leaf.start_pos > last_block.start_pos:
if (
last_block == "finally"
and leaf.start_pos > last_block.start_pos
and self._normalizer.version < (3, 8)
):
self.add_issue(leaf, message=self.message_in_finally)
return False # Error already added
if not in_loop:
@@ -622,26 +733,24 @@ class _FutureImportRule(SyntaxRule):
allowed_futures = list(ALLOWED_FUTURES)
if self._normalizer.version >= (3, 5):
allowed_futures.append('generator_stop')
if self._normalizer.version >= (3, 7):
allowed_futures.append('annotations')
if name == 'braces':
self.add_issue(node, message="not a chance")
elif name == 'barry_as_FLUFL':
m = "Seriously I'm not implementing this :) ~ Dave"
self.add_issue(node, message=m)
elif name not in ALLOWED_FUTURES:
elif name not in allowed_futures:
message = "future feature %s is not defined" % name
self.add_issue(node, message=message)
@ErrorFinder.register_rule(type='star_expr')
class _StarExprRule(SyntaxRule):
message = "starred assignment target must be in a list or tuple"
message_iterable_unpacking = "iterable unpacking cannot be used in comprehension"
message_assignment = "can use starred expression only as assignment target"
def is_issue(self, node):
if node.parent.type not in _STAR_EXPR_PARENTS:
return True
if node.parent.type == 'testlist_comp':
# [*[] for a in [1]]
if node.parent.children[1].type in _COMP_FOR_TYPES:
@@ -665,7 +774,10 @@ class _StarExprRule(SyntaxRule):
class _StarExprParentRule(SyntaxRule):
def is_issue(self, node):
if node.parent.type == 'del_stmt':
self.add_issue(node.parent, message="can't use starred expression here")
if self._normalizer.version >= (3, 9):
self.add_issue(node.parent, message="cannot delete starred")
else:
self.add_issue(node.parent, message="can't use starred expression here")
else:
def is_definition(node, ancestor):
if ancestor is None:
@@ -684,7 +796,10 @@ class _StarExprParentRule(SyntaxRule):
args = [c for c in node.children if c != ',']
starred = [c for c in args if c.type == 'star_expr']
if len(starred) > 1:
message = "two starred expressions in assignment"
if self._normalizer.version < (3, 9):
message = "two starred expressions in assignment"
else:
message = "multiple starred expressions in assignment"
self.add_issue(starred[1], message=message)
elif starred:
count = args.index(starred[0])
@@ -734,6 +849,9 @@ class _AnnotatorRule(SyntaxRule):
class _ArgumentRule(SyntaxRule):
def is_issue(self, node):
first = node.children[0]
if self._normalizer.version < (3, 8):
# a((b)=c) is valid in <3.8
first = _remove_parens(first)
if node.children[1] == '=' and first.type != 'name':
if first.type == 'lambdef':
# f(lambda: 1=1)
@@ -749,6 +867,9 @@ class _ArgumentRule(SyntaxRule):
message = 'expression cannot contain assignment, perhaps you meant "=="?'
self.add_issue(first, message=message)
if _is_argument_comprehension(node) and node.parent.type == 'classdef':
self.add_issue(node, message='invalid syntax')
@ErrorFinder.register_rule(type='nonlocal_stmt')
class _NonlocalModuleLevelRule(SyntaxRule):
@@ -768,59 +889,60 @@ class _ArglistRule(SyntaxRule):
return "Generator expression must be parenthesized"
def is_issue(self, node):
first_arg = node.children[0]
if first_arg.type == 'argument' \
and first_arg.children[1].type in _COMP_FOR_TYPES:
# e.g. foo(x for x in [], b)
return len(node.children) >= 2
else:
arg_set = set()
kw_only = False
kw_unpacking_only = False
is_old_starred = False
# In python 3 this would be a bit easier (stars are part of
# argument), but we have to understand both.
for argument in node.children:
if argument == ',':
continue
arg_set = set()
kw_only = False
kw_unpacking_only = False
is_old_starred = False
# In python 3 this would be a bit easier (stars are part of
# argument), but we have to understand both.
for argument in node.children:
if argument == ',':
continue
if argument in ('*', '**'):
# Python < 3.5 has the order engraved in the grammar
# file. No need to do anything here.
is_old_starred = True
continue
if is_old_starred:
is_old_starred = False
continue
if argument in ('*', '**'):
# Python < 3.5 has the order engraved in the grammar
# file. No need to do anything here.
is_old_starred = True
continue
if is_old_starred:
is_old_starred = False
continue
if argument.type == 'argument':
first = argument.children[0]
if first in ('*', '**'):
if first == '*':
if kw_unpacking_only:
# foo(**kwargs, *args)
message = "iterable argument unpacking " \
"follows keyword argument unpacking"
self.add_issue(argument, message=message)
if argument.type == 'argument':
first = argument.children[0]
if _is_argument_comprehension(argument) and len(node.children) >= 2:
# a(a, b for b in c)
return True
if first in ('*', '**'):
if first == '*':
if kw_unpacking_only:
# foo(**kwargs, *args)
message = "iterable argument unpacking " \
"follows keyword argument unpacking"
self.add_issue(argument, message=message)
else:
kw_unpacking_only = True
else: # Is a keyword argument.
kw_only = True
if first.type == 'name':
if first.value in arg_set:
# f(x=1, x=2)
message = "keyword argument repeated"
if self._normalizer.version >= (3, 9):
message += ": {}".format(first.value)
self.add_issue(first, message=message)
else:
kw_unpacking_only = True
else: # Is a keyword argument.
kw_only = True
if first.type == 'name':
if first.value in arg_set:
# f(x=1, x=2)
self.add_issue(first, message="keyword argument repeated")
else:
arg_set.add(first.value)
else:
if kw_unpacking_only:
# f(**x, y)
message = "positional argument follows keyword argument unpacking"
self.add_issue(argument, message=message)
elif kw_only:
# f(x=2, y)
message = "positional argument follows keyword argument"
self.add_issue(argument, message=message)
arg_set.add(first.value)
else:
if kw_unpacking_only:
# f(**x, y)
message = "positional argument follows keyword argument unpacking"
self.add_issue(argument, message=message)
elif kw_only:
# f(x=2, y)
message = "positional argument follows keyword argument"
self.add_issue(argument, message=message)
@ErrorFinder.register_rule(type='parameters')
@@ -898,7 +1020,7 @@ class _FStringRule(SyntaxRule):
class _CheckAssignmentRule(SyntaxRule):
def _check_assignment(self, node, is_deletion=False, is_namedexpr=False):
def _check_assignment(self, node, is_deletion=False, is_namedexpr=False, is_aug_assign=False):
error = None
type_ = node.type
if type_ == 'lambdef':
@@ -915,6 +1037,16 @@ class _CheckAssignmentRule(SyntaxRule):
error = 'dict display'
else:
error = 'set display'
elif first == "{" and second == "}":
if self._normalizer.version < (3, 8):
error = 'literal'
else:
error = "dict display"
elif first == "{" and len(node.children) > 2:
if self._normalizer.version < (3, 8):
error = 'literal'
else:
error = "set display"
elif first in ('(', '['):
if second.type == 'yield_expr':
error = 'yield expression'
@@ -930,11 +1062,13 @@ class _CheckAssignmentRule(SyntaxRule):
# This is not a comprehension, they were handled
# further above.
for child in second.children[::2]:
self._check_assignment(child, is_deletion, is_namedexpr)
self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign)
else: # Everything handled, must be useless brackets.
self._check_assignment(second, is_deletion, is_namedexpr)
self._check_assignment(second, is_deletion, is_namedexpr, is_aug_assign)
elif type_ == 'keyword':
if self._normalizer.version < (3, 8):
if node.value == "yield":
error = "yield expression"
elif self._normalizer.version < (3, 8):
error = 'keyword'
else:
error = str(node.value)
@@ -966,13 +1100,28 @@ class _CheckAssignmentRule(SyntaxRule):
error = 'subscript'
elif is_namedexpr and trailer.children[0] == '.':
error = 'attribute'
elif type_ == "fstring":
if self._normalizer.version < (3, 8):
error = 'literal'
else:
error = "f-string expression"
elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'):
for child in node.children[::2]:
self._check_assignment(child, is_deletion, is_namedexpr)
self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign)
elif ('expr' in type_ and type_ != 'star_expr' # is a substring
or '_test' in type_
or type_ in ('term', 'factor')):
error = 'operator'
elif type_ == "star_expr":
if is_deletion:
if self._normalizer.version >= (3, 9):
error = "starred"
else:
self.add_issue(node, message="can't use starred expression here")
elif not search_ancestor(node, *_STAR_EXPR_PARENTS) and not is_aug_assign:
self.add_issue(node, message="starred assignment target must be in a list or tuple")
self._check_assignment(node.children[1])
if error is not None:
if is_namedexpr:
@@ -999,15 +1148,35 @@ class _CompForRule(_CheckAssignmentRule):
@ErrorFinder.register_rule(type='expr_stmt')
class _ExprStmtRule(_CheckAssignmentRule):
message = "illegal expression for augmented assignment"
extended_message = "'{target}' is an " + message
def is_issue(self, node):
for before_equal in node.children[:-2:2]:
self._check_assignment(before_equal)
augassign = node.children[1]
if augassign != '=' and augassign.type != 'annassign': # Is augassign.
return node.children[0].type in ('testlist_star_expr', 'atom', 'testlist')
is_aug_assign = augassign != '=' and augassign.type != 'annassign'
if self._normalizer.version <= (3, 8) or not is_aug_assign:
for before_equal in node.children[:-2:2]:
self._check_assignment(before_equal, is_aug_assign=is_aug_assign)
if is_aug_assign:
target = _remove_parens(node.children[0])
# a, a[b], a.b
if target.type == "name" or (
target.type in ("atom_expr", "power")
and target.children[1].type == "trailer"
and target.children[-1].children[0] != "("
):
return False
if self._normalizer.version <= (3, 8):
return True
else:
self.add_issue(
node,
message=self.extended_message.format(
target=_get_rhs_name(node.children[0], self._normalizer.version)
),
)
@ErrorFinder.register_rule(type='with_item')
class _WithItemRule(_CheckAssignmentRule):

171
parso/python/grammar310.txt Normal file
View File

@@ -0,0 +1,171 @@
# Grammar for Python
# NOTE WELL: You should also follow all the steps listed at
# https://devguide.python.org/grammar/
# Start symbols for the grammar:
# single_input is a single interactive statement;
# file_input is a module or sequence of commands read from an input file;
# eval_input is the input for the eval() functions.
# NB: compound_stmt in single_input is followed by extra NEWLINE!
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
file_input: stmt* ENDMARKER
eval_input: testlist NEWLINE* ENDMARKER
decorator: '@' namedexpr_test NEWLINE
decorators: decorator+
decorated: decorators (classdef | funcdef | async_funcdef)
async_funcdef: 'async' funcdef
funcdef: 'def' NAME parameters ['->' test] ':' suite
parameters: '(' [typedargslist] ')'
typedargslist: (
(tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] (
',' tfpdef ['=' test])* ([',' [
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
| '**' tfpdef [',']]])
| '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
| '**' tfpdef [',']]] )
| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
'*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
| '**' tfpdef [',']]]
| '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
| '**' tfpdef [','])
)
tfpdef: NAME [':' test]
varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
| '**' vfpdef [',']]]
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
| '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
'*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
| '**' vfpdef [',']]]
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
| '**' vfpdef [',']
)
vfpdef: NAME
stmt: simple_stmt | compound_stmt | NEWLINE
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
('=' (yield_expr|testlist_star_expr))*)
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
'<<=' | '>>=' | '**=' | '//=')
# For normal and annotated assignments, additional restrictions enforced by the interpreter
del_stmt: 'del' exprlist
pass_stmt: 'pass'
flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
break_stmt: 'break'
continue_stmt: 'continue'
return_stmt: 'return' [testlist_star_expr]
yield_stmt: yield_expr
raise_stmt: 'raise' [test ['from' test]]
import_stmt: import_name | import_from
import_name: 'import' dotted_as_names
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
'import' ('*' | '(' import_as_names ')' | import_as_names))
import_as_name: NAME ['as' NAME]
dotted_as_name: dotted_name ['as' NAME]
import_as_names: import_as_name (',' import_as_name)* [',']
dotted_as_names: dotted_as_name (',' dotted_as_name)*
dotted_name: NAME ('.' NAME)*
global_stmt: 'global' NAME (',' NAME)*
nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
assert_stmt: 'assert' test [',' test]
compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
async_stmt: 'async' (funcdef | with_stmt | for_stmt)
if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite]
while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
try_stmt: ('try' ':' suite
((except_clause ':' suite)+
['else' ':' suite]
['finally' ':' suite] |
'finally' ':' suite))
with_stmt: 'with' with_item (',' with_item)* ':' suite
with_item: test ['as' expr]
# NB compile.c makes sure that the default except clause is last
except_clause: 'except' [test ['as' NAME]]
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
namedexpr_test: test [':=' test]
test: or_test ['if' or_test 'else' test] | lambdef
test_nocond: or_test | lambdef_nocond
lambdef: 'lambda' [varargslist] ':' test
lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
or_test: and_test ('or' and_test)*
and_test: not_test ('and' not_test)*
not_test: 'not' not_test | comparison
comparison: expr (comp_op expr)*
# <> isn't actually a valid comparison operator in Python. It's here for the
# sake of a __future__ import described in PEP 401 (which really works :-)
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
star_expr: '*' expr
expr: xor_expr ('|' xor_expr)*
xor_expr: and_expr ('^' and_expr)*
and_expr: shift_expr ('&' shift_expr)*
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
arith_expr: term (('+'|'-') term)*
term: factor (('*'|'@'|'/'|'%'|'//') factor)*
factor: ('+'|'-'|'~') factor | power
power: atom_expr ['**' factor]
atom_expr: ['await'] atom trailer*
atom: ('(' [yield_expr|testlist_comp] ')' |
'[' [testlist_comp] ']' |
'{' [dictorsetmaker] '}' |
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
subscriptlist: subscript (',' subscript)* [',']
subscript: test | [test] ':' [test] [sliceop]
sliceop: ':' [test]
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
testlist: test (',' test)* [',']
dictorsetmaker: ( ((test ':' test | '**' expr)
(comp_for | (',' (test ':' test | '**' expr))* [','])) |
((test | star_expr)
(comp_for | (',' (test | star_expr))* [','])) )
classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
arglist: argument (',' argument)* [',']
# The reason that keywords are test nodes instead of NAME is that using NAME
# results in an ambiguity. ast.c makes sure it's a NAME.
# "test '=' test" is really "keyword '=' test", but we have no such token.
# These need to be in a single rule to avoid grammar that is ambiguous
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
# we explicitly match '*' here, too, to give it proper precedence.
# Illegal combinations and orderings are blocked in ast.c:
# multiple (test comp_for) arguments are blocked; keyword unpackings
# that precede iterable unpackings are blocked; etc.
argument: ( test [comp_for] |
test ':=' test |
test '=' test |
'**' test |
'*' test )
comp_iter: comp_for | comp_if
sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
comp_for: ['async'] sync_comp_for
comp_if: 'if' test_nocond [comp_iter]
# not used in grammar, but may appear in "node" passed from Parser to Compiler
encoding_decl: NAME
yield_expr: 'yield' [yield_arg]
yield_arg: 'from' test | testlist_star_expr
strings: (STRING | fstring)+
fstring: FSTRING_START fstring_content* FSTRING_END
fstring_content: FSTRING_STRING | fstring_expr
fstring_conversion: '!' NAME
fstring_expr: '{' testlist ['='] [ fstring_conversion ] [ fstring_format_spec ] '}'
fstring_format_spec: ':' fstring_content*

View File

@@ -52,7 +52,7 @@ small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
('=' (yield_expr|testlist_star_expr))*)
annassign: ':' test ['=' test]
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
'<<=' | '>>=' | '**=' | '//=')

View File

@@ -12,7 +12,7 @@ single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
file_input: stmt* ENDMARKER
eval_input: testlist NEWLINE* ENDMARKER
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
decorator: '@' namedexpr_test NEWLINE
decorators: decorator+
decorated: decorators (classdef | funcdef | async_funcdef)
@@ -52,7 +52,7 @@ small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
('=' (yield_expr|testlist_star_expr))*)
annassign: ':' test ['=' test]
annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
'<<=' | '>>=' | '**=' | '//=')

View File

@@ -260,7 +260,7 @@ def _create_token_collection(version_info):
'finally', 'while', 'with', 'return', 'continue',
'break', 'del', 'pass', 'global', 'assert')
if version_info >= (3, 5):
ALWAYS_BREAK_TOKENS += ('async', 'nonlocal')
ALWAYS_BREAK_TOKENS += ('nonlocal', )
pseudo_token_compiled = _compile(PseudoToken)
return TokenCollection(
pseudo_token_compiled, single_quoted, triple_quoted, endpats,

View File

@@ -168,7 +168,7 @@ class NodeOrLeaf(object):
@abstractmethod
def get_code(self, include_prefix=True):
"""
Returns the code that was input the input for the parser for this node.
Returns the code that was the input for the parser for this node.
:param include_prefix: Removes the prefix (whitespace and comments) of
e.g. a statement.

View File

@@ -105,8 +105,17 @@ def python_bytes_to_unicode(source, encoding='utf-8', errors='strict'):
if not isinstance(encoding, unicode):
encoding = unicode(encoding, 'utf-8', 'replace')
# Cast to unicode
return unicode(source, encoding, errors)
try:
# Cast to unicode
return unicode(source, encoding, errors)
except LookupError:
if errors == 'replace':
# This is a weird case that can happen if the given encoding is not
# a valid encoding. This usually shouldn't happen with provided
# encodings, but can happen if somebody uses encoding declarations
# like `# coding: foo-8`.
return unicode(source, 'utf-8', errors)
raise
def version_info():
@@ -120,7 +129,7 @@ def version_info():
def _parse_version(version):
match = re.match(r'(\d+)(?:\.(\d)(?:\.\d+)?)?$', version)
match = re.match(r'(\d+)(?:\.(\d{1,2})(?:\.\d+)?)?((a|b|rc)\d)?$', version)
if match is None:
raise ValueError('The given version is not in the right format. '
'Use something like "3.8" or "3".')

View File

@@ -25,7 +25,7 @@ setup(name='parso',
keywords='python parser parsing',
long_description=readme,
packages=find_packages(exclude=['test']),
package_data={'parso': ['python/grammar*.txt']},
package_data={'parso': ['python/grammar*.txt', 'py.typed', '*.pyi', '**/*.pyi']},
platforms=['any'],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
classifiers=[
@@ -44,6 +44,7 @@ setup(name='parso',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Editors :: Integrated Development Environments (IDE)',
'Topic :: Utilities',
'Typing :: Typed',
],
extras_require={
'testing': [

View File

@@ -52,9 +52,37 @@ FAILING_EXAMPLES = [
'f(x=2, y)',
'f(**x, *y)',
'f(**x, y=3, z)',
# augassign
'a, b += 3',
'(a, b) += 3',
'[a, b] += 3',
'f() += 1',
'lambda x:None+=1',
'{} += 1',
'{a:b} += 1',
'{1} += 1',
'{*x} += 1',
'(x,) += 1',
'(x, y if a else q) += 1',
'[] += 1',
'[1,2] += 1',
'[] += 1',
'None += 1',
'... += 1',
'a > 1 += 1',
'"test" += 1',
'1 += 1',
'1.0 += 1',
'(yield) += 1',
'(yield from x) += 1',
'(x if x else y) += 1',
'a() += 1',
'a + b += 1',
'+a += 1',
'a and b += 1',
'*a += 1',
'a, b += 1',
'f"xxx" += 1',
# All assignment tests
'lambda a: 1 = 1',
'[x for x in y] = 1',
@@ -308,6 +336,12 @@ if sys.version_info[:2] <= (3, 4):
'(*[1], 2)',
]
if sys.version_info[:2] >= (3, 7):
# This is somehow ok in previous versions.
FAILING_EXAMPLES += [
'class X(base for base in bases): pass',
]
if sys.version_info[:2] < (3, 8):
FAILING_EXAMPLES += [
# Python/compile.c

View File

@@ -12,13 +12,6 @@ from .__future__ import absolute_import
''r''u''
b'' BR''
for x in [1]:
try:
continue # Only the other continue and pass is an error.
finally:
#: E901
continue
for x in [1]:
break

View File

@@ -2,28 +2,38 @@
Test all things related to the ``jedi.cache`` module.
"""
from os import unlink
import os
import os.path
import pytest
import time
from parso.cache import _NodeCacheItem, save_module, load_module, \
_get_hashed_path, parser_cache, _load_from_file_system, _save_to_file_system
from parso.cache import (_CACHED_FILE_MAXIMUM_SURVIVAL, _VERSION_TAG,
_get_cache_clear_lock, _get_hashed_path,
_load_from_file_system, _NodeCacheItem,
_remove_cache_and_update_lock, _save_to_file_system,
load_module, parser_cache, try_to_save_module)
from parso._compatibility import is_pypy, PermissionError
from parso import load_grammar
from parso import cache
from parso import file_io
from parso import parse
skip_pypy = pytest.mark.skipif(
is_pypy,
reason="pickling in pypy is slow, since we don't pickle,"
"we never go into path of auto-collecting garbage"
)
@pytest.fixture()
def isolated_jedi_cache(monkeypatch, tmpdir):
"""
Set `jedi.settings.cache_directory` to a temporary directory during test.
Same as `clean_jedi_cache`, but create the temporary directory for
each test case (scope='function').
"""
monkeypatch.setattr(cache, '_default_cache_path', str(tmpdir))
def isolated_parso_cache(monkeypatch, tmpdir):
"""Set `parso.cache._default_cache_path` to a temporary directory
during the test. """
cache_path = str(os.path.join(str(tmpdir), "__parso_cache"))
monkeypatch.setattr(cache, '_default_cache_path', cache_path)
monkeypatch.setattr(cache, '_get_default_cache_path', lambda *args, **kwargs: cache_path)
return cache_path
def test_modulepickling_change_cache_dir(tmpdir):
@@ -57,7 +67,7 @@ def load_stored_item(hashed_grammar, path, item, cache_path):
return item
@pytest.mark.usefixtures("isolated_jedi_cache")
@pytest.mark.usefixtures("isolated_parso_cache")
def test_modulepickling_simulate_deleted_cache(tmpdir):
"""
Tests loading from a cache file after it is deleted.
@@ -81,10 +91,10 @@ def test_modulepickling_simulate_deleted_cache(tmpdir):
pass
io = file_io.FileIO(path)
save_module(grammar._hashed, io, module, lines=[])
try_to_save_module(grammar._hashed, io, module, lines=[])
assert load_module(grammar._hashed, io) == module
unlink(_get_hashed_path(grammar._hashed, path))
os.unlink(_get_hashed_path(grammar._hashed, path))
parser_cache.clear()
cached2 = load_module(grammar._hashed, io)
@@ -139,3 +149,43 @@ def test_cache_last_used_update(diff_cache, use_file_io):
node_cache_item = next(iter(parser_cache.values()))[p]
assert now < node_cache_item.last_used < time.time()
@skip_pypy
def test_inactive_cache(tmpdir, isolated_parso_cache):
parser_cache.clear()
test_subjects = "abcdef"
for path in test_subjects:
parse('somecode', cache=True, path=os.path.join(str(tmpdir), path))
raw_cache_path = os.path.join(isolated_parso_cache, _VERSION_TAG)
assert os.path.exists(raw_cache_path)
paths = os.listdir(raw_cache_path)
a_while_ago = time.time() - _CACHED_FILE_MAXIMUM_SURVIVAL
old_paths = set()
for path in paths[:len(test_subjects) // 2]: # make certain number of paths old
os.utime(os.path.join(raw_cache_path, path), (a_while_ago, a_while_ago))
old_paths.add(path)
# nothing should be cleared while the lock is on
assert os.path.exists(_get_cache_clear_lock().path)
_remove_cache_and_update_lock() # it shouldn't clear anything
assert len(os.listdir(raw_cache_path)) == len(test_subjects)
assert old_paths.issubset(os.listdir(raw_cache_path))
os.utime(_get_cache_clear_lock().path, (a_while_ago, a_while_ago))
_remove_cache_and_update_lock()
assert len(os.listdir(raw_cache_path)) == len(test_subjects) // 2
assert not old_paths.intersection(os.listdir(raw_cache_path))
@skip_pypy
def test_permission_error(monkeypatch):
def save(*args, **kwargs):
was_called[0] = True # Python 2... Use nonlocal instead
raise PermissionError
was_called = [False]
monkeypatch.setattr(cache, '_save_to_file_system', save)
with pytest.warns(Warning):
parse(path=__file__, cache=True, diff_cache=True)
assert was_called[0]

View File

@@ -20,7 +20,7 @@ def test_parse_version(string, result):
assert utils._parse_version(string) == result
@pytest.mark.parametrize('string', ['1.', 'a', '#', '1.3.4.5', '1.12'])
@pytest.mark.parametrize('string', ['1.', 'a', '#', '1.3.4.5'])
def test_invalid_grammar_version(string):
with pytest.raises(ValueError):
load_grammar(version=string)

View File

@@ -194,6 +194,9 @@ def test_no_error_nodes(each_version):
def test_named_expression(works_ge_py38):
works_ge_py38.parse("(a := 1, a + 1)")
def test_extended_rhs_annassign(works_ge_py38):
works_ge_py38.parse("x: y = z,")
works_ge_py38.parse("x: Tuple[int, ...] = z, *q, w")
@pytest.mark.parametrize(
'param_code', [
@@ -208,3 +211,13 @@ def test_named_expression(works_ge_py38):
)
def test_positional_only_arguments(works_ge_py38, param_code):
works_ge_py38.parse("def x(%s): pass" % param_code)
@pytest.mark.parametrize(
'expression', [
'a + a',
'lambda x: x',
'a := lambda x: x'
]
)
def test_decorator_expression(works_ge_py39, expression):
works_ge_py39.parse("@%s\ndef x(): pass" % expression)

View File

@@ -87,6 +87,39 @@ def test_async_for(works_ge_py35):
works_ge_py35.parse("async def foo():\n async for a in b: pass")
@pytest.mark.parametrize("body", [
"""[1 async for a in b
]""",
"""[1 async
for a in b
]""",
"""[
1
async for a in b
]""",
"""[
1
async for a
in b
]""",
"""[
1
async
for
a
in
b
]""",
""" [
1 async for a in b
]""",
])
def test_async_for_comprehension_newline(works_ge_py36, body):
# Issue #139
works_ge_py36.parse("""async def foo():
{}""".format(body))
def test_async_with(works_ge_py35):
works_ge_py35.parse("async def foo():\n async with a: pass")

View File

@@ -7,6 +7,8 @@ import warnings
import pytest
import parso
from textwrap import dedent
from parso._compatibility import is_pypy
from .failing_examples import FAILING_EXAMPLES, indent, build_nested
@@ -185,12 +187,13 @@ def test_statically_nested_blocks():
def test_future_import_first():
def is_issue(code, *args):
def is_issue(code, *args, **kwargs):
code = code % args
return bool(_get_error_list(code))
return bool(_get_error_list(code, **kwargs))
i1 = 'from __future__ import division'
i2 = 'from __future__ import absolute_import'
i3 = 'from __future__ import annotations'
assert not is_issue(i1)
assert not is_issue(i1 + ';' + i2)
assert not is_issue(i1 + '\n' + i2)
@@ -201,6 +204,8 @@ def test_future_import_first():
assert not is_issue('""\n%s;%s', i1, i2)
assert not is_issue('"";%s;%s ', i1, i2)
assert not is_issue('"";%s\n%s ', i1, i2)
assert not is_issue(i3, version="3.7")
assert is_issue(i3, version="3.6")
assert is_issue('1;' + i1)
assert is_issue('1\n' + i1)
assert is_issue('"";1\n' + i1)
@@ -268,6 +273,9 @@ def test_too_many_levels_of_indentation():
assert not _get_error_list(build_nested('pass', 49, base=base))
assert _get_error_list(build_nested('pass', 50, base=base))
def test_paren_kwarg():
assert _get_error_list("print((sep)=seperator)", version="3.8")
assert not _get_error_list("print((sep)=seperator)", version="3.7")
@pytest.mark.parametrize(
'code', [
@@ -321,3 +329,88 @@ def test_invalid_fstrings(code, message):
def test_trailing_comma(code):
errors = _get_error_list(code)
assert not errors
def test_continue_in_finally():
code = dedent('''\
for a in [1]:
try:
pass
finally:
continue
''')
assert not _get_error_list(code, version="3.8")
assert _get_error_list(code, version="3.7")
@pytest.mark.parametrize(
'template', [
"a, b, {target}, c = d",
"a, b, *{target}, c = d",
"(a, *{target}), c = d",
"for x, {target} in y: pass",
"for x, q, {target} in y: pass",
"for x, q, *{target} in y: pass",
"for (x, *{target}), q in y: pass",
]
)
@pytest.mark.parametrize(
'target', [
"True",
"False",
"None",
"__debug__"
]
)
def test_forbidden_name(template, target):
assert _get_error_list(template.format(target=target), version="3")
def test_repeated_kwarg():
# python 3.9+ shows which argument is repeated
assert (
_get_error_list("f(q=1, q=2)", version="3.8")[0].message
== "SyntaxError: keyword argument repeated"
)
assert (
_get_error_list("f(q=1, q=2)", version="3.9")[0].message
== "SyntaxError: keyword argument repeated: q"
)
@pytest.mark.parametrize(
('source', 'no_errors'), [
('a(a for a in b,)', False),
('a(a for a in b, a)', False),
('a(a, a for a in b)', False),
('a(a, b, a for a in b, c, d)', False),
('a(a for a in b)', True),
('a((a for a in b), c)', True),
('a(c, (a for a in b))', True),
('a(a, b, (a for a in b), c, d)', True),
]
)
def test_unparenthesized_genexp(source, no_errors):
assert bool(_get_error_list(source)) ^ no_errors
@pytest.mark.parametrize(
('source', 'no_errors'), [
('*x = 2', False),
('(*y) = 1', False),
('((*z)) = 1', False),
('a, *b = 1', True),
('a, *b, c = 1', True),
('a, (*b), c = 1', True),
('a, ((*b)), c = 1', True),
('a, (*b, c), d = 1', True),
('[*(1,2,3)]', True),
('{*(1,2,3)}', True),
('[*(1,2,3),]', True),
('[*(1,2,3), *(4,5,6)]', True),
('[0, *(1,2,3)]', True),
('{*(1,2,3),}', True),
('{*(1,2,3), *(4,5,6)}', True),
('{0, *(4,5,6)}', True)
]
)
def test_starred_expr(source, no_errors):
assert bool(_get_error_list(source, version="3")) ^ no_errors

View File

@@ -1,6 +1,11 @@
from codecs import BOM_UTF8
from parso.utils import split_lines, python_bytes_to_unicode
from parso.utils import (
split_lines,
parse_version_string,
python_bytes_to_unicode,
)
import parso
import pytest
@@ -63,3 +68,35 @@ def test_utf8_bom():
expr_stmt = module.children[0]
assert expr_stmt.type == 'expr_stmt'
assert unicode_bom == expr_stmt.get_first_leaf().prefix
@pytest.mark.parametrize(
('code', 'errors'), [
(b'# coding: wtf-12\nfoo', 'strict'),
(b'# coding: wtf-12\nfoo', 'replace'),
]
)
def test_bytes_to_unicode_failing_encoding(code, errors):
if errors == 'strict':
with pytest.raises(LookupError):
python_bytes_to_unicode(code, errors=errors)
else:
python_bytes_to_unicode(code, errors=errors)
@pytest.mark.parametrize(
('version_str', 'version'), [
('3', (3,)),
('3.6', (3, 6)),
('3.6.10', (3, 6)),
('3.10', (3, 10)),
('3.10a9', (3, 10)),
('3.10b9', (3, 10)),
('3.10rc9', (3, 10)),
]
)
def test_parse_version_string(version_str, version):
parsed_version = parse_version_string(version_str)
if len(version) == 1:
assert parsed_version[0] == version[0]
else:
assert parsed_version == version