17 Commits

Author SHA1 Message Date
Dave Halter
8a06f0da05 0.4.0 release notes 2019-04-05 18:57:21 +02:00
Dave Halter
bd95989c2e Change the default tox environments to test
These version will be tested before deploying
2019-04-05 18:55:23 +02:00
Miro Hrončok
57e91262cd Add Python 3.8 to tox.ini
Otherwise we get:

    Matching undeclared envs is deprecated.
    Be sure all the envs that Tox should run are declared in the tox config.
2019-04-05 18:43:43 +02:00
Miro Hrončok
476383cca9 Test on Python 3.8 2019-04-05 18:43:43 +02:00
Dave Halter
b2ab64d8f9 Fix Python 3.8 error issues 2019-04-05 18:30:48 +02:00
Dave Halter
18cbeb1a3d Fix an issue, because sync_comp_for exists now 2019-04-05 16:27:17 +02:00
Dave Halter
a5686d6cda PEP 8 2019-04-05 16:25:45 +02:00
Dave Halter
dfe7fba08e continue in finally is no longer an error 2019-04-05 16:17:30 +02:00
Dave Halter
6db7f40942 Python 2 compatibility 2019-04-03 01:24:06 +02:00
Dave Halter
d5eb96309c Increase the pickle version. With all the changes lately, it's better this way 2019-04-03 01:07:25 +02:00
Dave Halter
4c65368056 Some minor changes to file_io 2019-03-27 01:02:27 +01:00
Dave Halter
3e2956264c Add FileIO to make it possible to cache e.g. files from zip files 2019-03-25 00:48:59 +01:00
Dave Halter
e77a67cd36 PEP 8 2019-03-22 20:17:59 +01:00
Daniel Hahler
c4d6de2aab tests: add coverage tox factor, use it on Travis 2019-03-22 11:01:22 +01:00
Daniel Hahler
7770e73609 ci: Travis: use dist=xenial 2019-03-22 11:01:22 +01:00
Dave Halter
acccb4f28d 0.3.4 release 2019-02-13 00:19:07 +01:00
Dave Halter
3f6fc8a5ad Fix an f-string tokenizer issue 2019-02-13 00:17:37 +01:00
16 changed files with 205 additions and 90 deletions

View File

@@ -1,4 +1,5 @@
[run] [run]
source = parso
[report] [report]
# Regexes for lines to exclude from consideration # Regexes for lines to exclude from consideration

View File

@@ -1,25 +1,25 @@
dist: xenial
language: python language: python
sudo: false
python: python:
- 2.6
- 2.7 - 2.7
- 3.4 - 3.4
- 3.5 - 3.5
- 3.6 - 3.6
- pypy - 3.7
- 3.8-dev
- pypy2.7-6.0
- pypy3.5-6.0
matrix: matrix:
include: include:
- { python: "3.7", dist: xenial, sudo: true }
- python: 3.5 - python: 3.5
env: TOXENV=cov env: TOXENV=py35-coverage
allow_failures:
- env: TOXENV=cov
install: install:
- pip install --quiet tox-travis - pip install --quiet tox-travis
script: script:
- tox - tox
after_script: after_script:
- if [ $TOXENV == "cov" ]; then - |
pip install --quiet coveralls; if [ "${TOXENV%-coverage}" == "$TOXENV" ]; then
coveralls; pip install --quiet coveralls;
coveralls;
fi fi

View File

@@ -3,6 +3,17 @@
Changelog Changelog
--------- ---------
0.4.0 (2019-04-05)
++++++++++++++++++
- Python 3.8 support
- FileIO support, it's now possible to use abstract file IO, support is alpha
0.3.4 (2018-02-13)
+++++++++++++++++++
- Fix an f-string tokenizer error
0.3.3 (2018-02-06) 0.3.3 (2018-02-06)
+++++++++++++++++++ +++++++++++++++++++

View File

@@ -43,7 +43,7 @@ from parso.grammar import Grammar, load_grammar
from parso.utils import split_lines, python_bytes_to_unicode from parso.utils import split_lines, python_bytes_to_unicode
__version__ = '0.3.3' __version__ = '0.4.0'
def parse(code=None, **kwargs): def parse(code=None, **kwargs):

View File

@@ -18,7 +18,7 @@ from parso._compatibility import FileNotFoundError
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_PICKLE_VERSION = 30 _PICKLE_VERSION = 31
""" """
Version number (integer) for file system cache. Version number (integer) for file system cache.
@@ -45,6 +45,7 @@ we generate something similar. See:
http://docs.python.org/3/library/sys.html#sys.implementation http://docs.python.org/3/library/sys.html#sys.implementation
""" """
def _get_default_cache_path(): def _get_default_cache_path():
if platform.system().lower() == 'windows': if platform.system().lower() == 'windows':
dir_ = os.path.join(os.getenv('LOCALAPPDATA') or '~', 'Parso', 'Parso') dir_ = os.path.join(os.getenv('LOCALAPPDATA') or '~', 'Parso', 'Parso')
@@ -54,6 +55,7 @@ def _get_default_cache_path():
dir_ = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'parso') dir_ = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'parso')
return os.path.expanduser(dir_) return os.path.expanduser(dir_)
_default_cache_path = _get_default_cache_path() _default_cache_path = _get_default_cache_path()
""" """
The path where the cache is stored. The path where the cache is stored.
@@ -76,21 +78,26 @@ class _NodeCacheItem(object):
self.change_time = change_time self.change_time = change_time
def load_module(hashed_grammar, path, cache_path=None): def load_module(hashed_grammar, file_io, cache_path=None):
""" """
Returns a module or None, if it fails. Returns a module or None, if it fails.
""" """
try: try:
p_time = os.path.getmtime(path) p_time = file_io.get_last_modified()
except FileNotFoundError: except FileNotFoundError:
return None return None
try: try:
module_cache_item = parser_cache[hashed_grammar][path] module_cache_item = parser_cache[hashed_grammar][file_io.path]
if p_time <= module_cache_item.change_time: if p_time <= module_cache_item.change_time:
return module_cache_item.node return module_cache_item.node
except KeyError: except KeyError:
return _load_from_file_system(hashed_grammar, path, p_time, cache_path=cache_path) return _load_from_file_system(
hashed_grammar,
file_io.path,
p_time,
cache_path=cache_path
)
def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None): def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None):
@@ -121,9 +128,10 @@ def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None):
return module_cache_item.node return module_cache_item.node
def save_module(hashed_grammar, path, module, lines, pickling=True, cache_path=None): def save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_path=None):
path = file_io.path
try: try:
p_time = None if path is None else os.path.getmtime(path) p_time = None if path is None else file_io.get_last_modified()
except OSError: except OSError:
p_time = None p_time = None
pickling = False pickling = False

32
parso/file_io.py Normal file
View File

@@ -0,0 +1,32 @@
import os
class FileIO(object):
def __init__(self, path):
self.path = path
def read(self): # Returns bytes/str
# We would like to read unicode here, but we cannot, because we are not
# sure if it is a valid unicode file. Therefore just read whatever is
# here.
with open(self.path, 'rb') as f:
return f.read()
def get_last_modified(self):
"""
Returns float - timestamp
Might raise FileNotFoundError
"""
return os.path.getmtime(self.path)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.path)
class KnownContentFileIO(FileIO):
def __init__(self, path, content):
super(KnownContentFileIO, self).__init__(path)
self._content = content
def read(self):
return self._content

View File

@@ -12,6 +12,7 @@ from parso.parser import BaseParser
from parso.python.parser import Parser as PythonParser from parso.python.parser import Parser as PythonParser
from parso.python.errors import ErrorFinderConfig from parso.python.errors import ErrorFinderConfig
from parso.python import pep8 from parso.python import pep8
from parso.file_io import FileIO, KnownContentFileIO
_loaded_grammars = {} _loaded_grammars = {}
@@ -77,14 +78,14 @@ class Grammar(object):
def _parse(self, code=None, error_recovery=True, path=None, def _parse(self, code=None, error_recovery=True, path=None,
start_symbol=None, cache=False, diff_cache=False, start_symbol=None, cache=False, diff_cache=False,
cache_path=None, start_pos=(1, 0)): cache_path=None, file_io=None, start_pos=(1, 0)):
""" """
Wanted python3.5 * operator and keyword only arguments. Therefore just Wanted python3.5 * operator and keyword only arguments. Therefore just
wrap it all. wrap it all.
start_pos here is just a parameter internally used. Might be public start_pos here is just a parameter internally used. Might be public
sometime in the future. sometime in the future.
""" """
if code is None and path is None: if code is None and path is None and file_io is None:
raise TypeError("Please provide either code or a path.") raise TypeError("Please provide either code or a path.")
if start_symbol is None: if start_symbol is None:
@@ -93,15 +94,19 @@ class Grammar(object):
if error_recovery and start_symbol != 'file_input': if error_recovery and start_symbol != 'file_input':
raise NotImplementedError("This is currently not implemented.") raise NotImplementedError("This is currently not implemented.")
if cache and path is not None: if file_io is None:
module_node = load_module(self._hashed, path, cache_path=cache_path) if code is None:
file_io = FileIO(path)
else:
file_io = KnownContentFileIO(path, code)
if cache and file_io.path is not None:
module_node = load_module(self._hashed, file_io, cache_path=cache_path)
if module_node is not None: if module_node is not None:
return module_node return module_node
if code is None: if code is None:
with open(path, 'rb') as f: code = file_io.read()
code = f.read()
code = python_bytes_to_unicode(code) code = python_bytes_to_unicode(code)
lines = split_lines(code, keepends=True) lines = split_lines(code, keepends=True)
@@ -110,7 +115,7 @@ class Grammar(object):
raise TypeError("You have to define a diff parser to be able " raise TypeError("You have to define a diff parser to be able "
"to use this option.") "to use this option.")
try: try:
module_cache_item = parser_cache[self._hashed][path] module_cache_item = parser_cache[self._hashed][file_io.path]
except KeyError: except KeyError:
pass pass
else: else:
@@ -125,7 +130,7 @@ class Grammar(object):
old_lines=old_lines, old_lines=old_lines,
new_lines=lines new_lines=lines
) )
save_module(self._hashed, path, new_node, lines, save_module(self._hashed, file_io, new_node, lines,
# Never pickle in pypy, it's slow as hell. # Never pickle in pypy, it's slow as hell.
pickling=cache and not is_pypy, pickling=cache and not is_pypy,
cache_path=cache_path) cache_path=cache_path)
@@ -141,7 +146,7 @@ class Grammar(object):
root_node = p.parse(tokens=tokens) root_node = p.parse(tokens=tokens)
if cache or diff_cache: if cache or diff_cache:
save_module(self._hashed, path, root_node, lines, save_module(self._hashed, file_io, root_node, lines,
# Never pickle in pypy, it's slow as hell. # Never pickle in pypy, it's slow as hell.
pickling=cache and not is_pypy, pickling=cache and not is_pypy,
cache_path=cache_path) cache_path=cache_path)

View File

@@ -6,7 +6,6 @@ from contextlib import contextmanager
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
from parso.python.tree import search_ancestor from parso.python.tree import search_ancestor
from parso.parser import ParserSyntaxError
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt') _BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist') _STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
@@ -17,6 +16,7 @@ ALLOWED_FUTURES = (
'all_feature_names', 'nested_scopes', 'generators', 'division', 'all_feature_names', 'nested_scopes', 'generators', 'division',
'absolute_import', 'with_statement', 'print_function', 'unicode_literals', 'absolute_import', 'with_statement', 'print_function', 'unicode_literals',
) )
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
def _iter_stmts(scope): def _iter_stmts(scope):
@@ -35,12 +35,12 @@ def _iter_stmts(scope):
def _get_comprehension_type(atom): def _get_comprehension_type(atom):
first, second = atom.children[:2] first, second = atom.children[:2]
if second.type == 'testlist_comp' and second.children[1].type == 'comp_for': if second.type == 'testlist_comp' and second.children[1].type in _COMP_FOR_TYPES:
if first == '[': if first == '[':
return 'list comprehension' return 'list comprehension'
else: else:
return 'generator expression' return 'generator expression'
elif second.type == 'dictorsetmaker' and second.children[-1].type == 'comp_for': elif second.type == 'dictorsetmaker' and second.children[-1].type in _COMP_FOR_TYPES:
if second.children[1] == ':': if second.children[1] == ':':
return 'dict comprehension' return 'dict comprehension'
else: else:
@@ -107,6 +107,7 @@ def _iter_definition_exprs_from_lists(exprlist):
yield child yield child
def _get_expr_stmt_definition_exprs(expr_stmt): def _get_expr_stmt_definition_exprs(expr_stmt):
exprs = [] exprs = []
for list_ in expr_stmt.children[:-2:2]: for list_ in expr_stmt.children[:-2:2]:
@@ -273,13 +274,12 @@ class ErrorFinder(Normalizer):
def visit(self, node): def visit(self, node):
if node.type == 'error_node': if node.type == 'error_node':
with self.visit_node(node): with self.visit_node(node):
# Don't need to investigate the inners of an error node. We # Don't need to investigate the inners of an error node. We
# might find errors in there that should be ignored, because # might find errors in there that should be ignored, because
# the error node itself already shows that there's an issue. # the error node itself already shows that there's an issue.
return '' return ''
return super(ErrorFinder, self).visit(node) return super(ErrorFinder, self).visit(node)
@contextmanager @contextmanager
def visit_node(self, node): def visit_node(self, node):
self._check_type_rules(node) self._check_type_rules(node)
@@ -455,23 +455,19 @@ class _YieldFromCheck(SyntaxRule):
def is_issue(self, leaf): def is_issue(self, leaf):
return leaf.parent.type == 'yield_arg' \ return leaf.parent.type == 'yield_arg' \
and self._normalizer.context.is_async_funcdef() and self._normalizer.context.is_async_funcdef()
@ErrorFinder.register_rule(type='name') @ErrorFinder.register_rule(type='name')
class _NameChecks(SyntaxRule): class _NameChecks(SyntaxRule):
message = 'cannot assign to __debug__' message = 'cannot assign to __debug__'
message_keyword = 'assignment to keyword'
message_none = 'cannot assign to None' message_none = 'cannot assign to None'
def is_issue(self, leaf): def is_issue(self, leaf):
self._normalizer.context.add_name(leaf) self._normalizer.context.add_name(leaf)
if leaf.value == '__debug__' and leaf.is_definition(): if leaf.value == '__debug__' and leaf.is_definition():
if self._normalizer.version < (3, 0): return True
return True
else:
self.add_issue(leaf, message=self.message_keyword)
if leaf.value == 'None' and self._normalizer.version < (3, 0) \ if leaf.value == 'None' and self._normalizer.version < (3, 0) \
and leaf.is_definition(): and leaf.is_definition():
self.add_issue(leaf, message=self.message_none) self.add_issue(leaf, message=self.message_none)
@@ -539,7 +535,7 @@ class _StarStarCheck(SyntaxRule):
def is_issue(self, leaf): def is_issue(self, leaf):
if leaf.parent.type == 'dictorsetmaker': if leaf.parent.type == 'dictorsetmaker':
comp_for = leaf.get_next_sibling().get_next_sibling() comp_for = leaf.get_next_sibling().get_next_sibling()
return comp_for is not None and comp_for.type == 'comp_for' return comp_for is not None and comp_for.type in _COMP_FOR_TYPES
@ErrorFinder.register_rule(value='yield') @ErrorFinder.register_rule(value='yield')
@@ -618,7 +614,7 @@ class _FutureImportRule(SyntaxRule):
allowed_futures.append('generator_stop') allowed_futures.append('generator_stop')
if name == 'braces': if name == 'braces':
self.add_issue(node, message = "not a chance") self.add_issue(node, message="not a chance")
elif name == 'barry_as_FLUFL': elif name == 'barry_as_FLUFL':
m = "Seriously I'm not implementing this :) ~ Dave" m = "Seriously I'm not implementing this :) ~ Dave"
self.add_issue(node, message=m) self.add_issue(node, message=m)
@@ -638,7 +634,7 @@ class _StarExprRule(SyntaxRule):
return True return True
if node.parent.type == 'testlist_comp': if node.parent.type == 'testlist_comp':
# [*[] for a in [1]] # [*[] for a in [1]]
if node.parent.children[1].type == 'comp_for': if node.parent.children[1].type in _COMP_FOR_TYPES:
self.add_issue(node, message=self.message_iterable_unpacking) self.add_issue(node, message=self.message_iterable_unpacking)
if self._normalizer.version <= (3, 4): if self._normalizer.version <= (3, 4):
n = search_ancestor(node, 'for_stmt', 'expr_stmt') n = search_ancestor(node, 'for_stmt', 'expr_stmt')
@@ -715,8 +711,8 @@ class _AnnotatorRule(SyntaxRule):
if not (lhs.type == 'name' if not (lhs.type == 'name'
# subscript/attributes are allowed # subscript/attributes are allowed
or lhs.type in ('atom_expr', 'power') or lhs.type in ('atom_expr', 'power')
and trailer.type == 'trailer' and trailer.type == 'trailer'
and trailer.children[0] != '('): and trailer.children[0] != '('):
return True return True
else: else:
# x, y: str # x, y: str
@@ -731,10 +727,16 @@ class _ArgumentRule(SyntaxRule):
if node.children[1] == '=' and first.type != 'name': if node.children[1] == '=' and first.type != 'name':
if first.type == 'lambdef': if first.type == 'lambdef':
# f(lambda: 1=1) # f(lambda: 1=1)
message = "lambda cannot contain assignment" if self._normalizer.version < (3, 8):
message = "lambda cannot contain assignment"
else:
message = 'expression cannot contain assignment, perhaps you meant "=="?'
else: else:
# f(+x=1) # f(+x=1)
message = "keyword can't be an expression" if self._normalizer.version < (3, 8):
message = "keyword can't be an expression"
else:
message = 'expression cannot contain assignment, perhaps you meant "=="?'
self.add_issue(first, message=message) self.add_issue(first, message=message)
@@ -758,7 +760,7 @@ class _ArglistRule(SyntaxRule):
def is_issue(self, node): def is_issue(self, node):
first_arg = node.children[0] first_arg = node.children[0]
if first_arg.type == 'argument' \ if first_arg.type == 'argument' \
and first_arg.children[1].type == 'comp_for': and first_arg.children[1].type in _COMP_FOR_TYPES:
# e.g. foo(x for x in [], b) # e.g. foo(x for x in [], b)
return len(node.children) >= 2 return len(node.children) >= 2
else: else:
@@ -787,7 +789,8 @@ class _ArglistRule(SyntaxRule):
if first == '*': if first == '*':
if kw_unpacking_only: if kw_unpacking_only:
# foo(**kwargs, *args) # foo(**kwargs, *args)
message = "iterable argument unpacking follows keyword argument unpacking" message = "iterable argument unpacking " \
"follows keyword argument unpacking"
self.add_issue(argument, message=message) self.add_issue(argument, message=message)
else: else:
kw_unpacking_only = True kw_unpacking_only = True
@@ -809,6 +812,7 @@ class _ArglistRule(SyntaxRule):
message = "positional argument follows keyword argument" message = "positional argument follows keyword argument"
self.add_issue(argument, message=message) self.add_issue(argument, message=message)
@ErrorFinder.register_rule(type='parameters') @ErrorFinder.register_rule(type='parameters')
@ErrorFinder.register_rule(type='lambdef') @ErrorFinder.register_rule(type='lambdef')
class _ParameterRule(SyntaxRule): class _ParameterRule(SyntaxRule):
@@ -889,7 +893,13 @@ class _CheckAssignmentRule(SyntaxRule):
error = _get_comprehension_type(node) error = _get_comprehension_type(node)
if error is None: if error is None:
if second.type == 'dictorsetmaker': if second.type == 'dictorsetmaker':
error = 'literal' if self._normalizer.version < (3, 8):
error = 'literal'
else:
if second.children[1] == ':':
error = 'dict display'
else:
error = 'set display'
elif first in ('(', '['): elif first in ('(', '['):
if second.type == 'yield_expr': if second.type == 'yield_expr':
error = 'yield expression' error = 'yield expression'
@@ -901,7 +911,10 @@ class _CheckAssignmentRule(SyntaxRule):
else: # Everything handled, must be useless brackets. else: # Everything handled, must be useless brackets.
self._check_assignment(second, is_deletion) self._check_assignment(second, is_deletion)
elif type_ == 'keyword': elif type_ == 'keyword':
error = 'keyword' if self._normalizer.version < (3, 8):
error = 'keyword'
else:
error = str(node.value)
elif type_ == 'operator': elif type_ == 'operator':
if node.value == '...': if node.value == '...':
error = 'Ellipsis' error = 'Ellipsis'
@@ -929,25 +942,29 @@ class _CheckAssignmentRule(SyntaxRule):
elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'): elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'):
for child in node.children[::2]: for child in node.children[::2]:
self._check_assignment(child, is_deletion) self._check_assignment(child, is_deletion)
elif ('expr' in type_ and type_ != 'star_expr' # is a substring elif ('expr' in type_ and type_ != 'star_expr' # is a substring
or '_test' in type_ or '_test' in type_
or type_ in ('term', 'factor')): or type_ in ('term', 'factor')):
error = 'operator' error = 'operator'
if error is not None: if error is not None:
message = "can't %s %s" % ("delete" if is_deletion else "assign to", error) cannot = "can't" if self._normalizer.version < (3, 8) else "cannot"
message = ' '.join([cannot, "delete" if is_deletion else "assign to", error])
self.add_issue(node, message=message) self.add_issue(node, message=message)
@ErrorFinder.register_rule(type='comp_for') @ErrorFinder.register_rule(type='comp_for')
@ErrorFinder.register_rule(type='sync_comp_for')
class _CompForRule(_CheckAssignmentRule): class _CompForRule(_CheckAssignmentRule):
message = "asynchronous comprehension outside of an asynchronous function" message = "asynchronous comprehension outside of an asynchronous function"
def is_issue(self, node): def is_issue(self, node):
# Some of the nodes here are already used, so no else if # Some of the nodes here are already used, so no else if
expr_list = node.children[1 + int(node.children[0] == 'async')] if node.type != 'comp_for' or self._normalizer.version < (3, 8):
if expr_list.type != 'expr_list': # Already handled. # comp_for was replaced by sync_comp_for in Python 3.8.
self._check_assignment(expr_list) expr_list = node.children[1 + int(node.children[0] == 'async')]
if expr_list.type != 'expr_list': # Already handled.
self._check_assignment(expr_list)
return node.children[0] == 'async' \ return node.children[0] == 'async' \
and not self._normalizer.context.is_async_funcdef() and not self._normalizer.context.is_async_funcdef()

View File

@@ -419,8 +419,6 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
tos = fstring_stack[-1] tos = fstring_stack[-1]
if not tos.is_in_expr(): if not tos.is_in_expr():
string, pos = _find_fstring_string(endpats, fstring_stack, line, lnum, pos) string, pos = _find_fstring_string(endpats, fstring_stack, line, lnum, pos)
if pos == max:
break
if string: if string:
yield PythonToken( yield PythonToken(
FSTRING_STRING, string, FSTRING_STRING, string,
@@ -431,6 +429,8 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
) )
tos.previous_lines = '' tos.previous_lines = ''
continue continue
if pos == max:
break
rest = line[pos:] rest = line[pos:]
fstring_end_token, additional_prefix, quote_length = _close_fstring_if_necessary( fstring_end_token, additional_prefix, quote_length = _close_fstring_if_necessary(

View File

@@ -1,6 +1,8 @@
[pytest] [pytest]
addopts = --doctest-modules addopts = --doctest-modules
testpaths = parso test
# Ignore broken files inblackbox test directories # Ignore broken files inblackbox test directories
norecursedirs = .* docs scripts normalizer_issue_files build norecursedirs = .* docs scripts normalizer_issue_files build

View File

@@ -1,2 +1,12 @@
[bdist_wheel] [bdist_wheel]
universal=1 universal=1
[flake8]
max-line-length = 100
ignore =
# do not use bare 'except'
E722,
# don't know why this was ever even an option, 1+1 should be possible.
E226,
# line break before binary operator
W503,

View File

@@ -19,14 +19,6 @@ def build_nested(code, depth, base='def f():\n'):
FAILING_EXAMPLES = [ FAILING_EXAMPLES = [
'1 +', '1 +',
'?', '?',
# Python/compile.c
dedent('''\
for a in [1]:
try:
pass
finally:
continue
'''), # 'continue' not supported inside 'finally' clause"
'continue', 'continue',
'break', 'break',
'return', 'return',
@@ -259,10 +251,6 @@ GLOBAL_NONLOCAL_ERROR = [
if sys.version_info >= (3, 6): if sys.version_info >= (3, 6):
FAILING_EXAMPLES += GLOBAL_NONLOCAL_ERROR FAILING_EXAMPLES += GLOBAL_NONLOCAL_ERROR
FAILING_EXAMPLES += [
# Raises multiple errors in previous versions.
'async def foo():\n def nofoo():[x async for x in []]',
]
if sys.version_info >= (3, 5): if sys.version_info >= (3, 5):
FAILING_EXAMPLES += [ FAILING_EXAMPLES += [
# Raises different errors so just ignore them for now. # Raises different errors so just ignore them for now.
@@ -319,3 +307,15 @@ if sys.version_info[:2] <= (3, 4):
'a = *[1], 2', 'a = *[1], 2',
'(*[1], 2)', '(*[1], 2)',
] ]
if sys.version_info[:2] < (3, 8):
FAILING_EXAMPLES += [
# Python/compile.c
dedent('''\
for a in [1]:
try:
pass
finally:
continue
'''), # 'continue' not supported inside 'finally' clause"
]

View File

@@ -10,6 +10,7 @@ from parso.cache import _NodeCacheItem, save_module, load_module, \
_get_hashed_path, parser_cache, _load_from_file_system, _save_to_file_system _get_hashed_path, parser_cache, _load_from_file_system, _save_to_file_system
from parso import load_grammar from parso import load_grammar
from parso import cache from parso import cache
from parso import file_io
@pytest.fixture() @pytest.fixture()
@@ -76,12 +77,13 @@ def test_modulepickling_simulate_deleted_cache(tmpdir):
path = tmpdir.dirname + '/some_path' path = tmpdir.dirname + '/some_path'
with open(path, 'w'): with open(path, 'w'):
pass pass
io = file_io.FileIO(path)
save_module(grammar._hashed, path, module, []) save_module(grammar._hashed, io, module, lines=[])
assert load_module(grammar._hashed, path) == module assert load_module(grammar._hashed, io) == module
unlink(_get_hashed_path(grammar._hashed, path)) unlink(_get_hashed_path(grammar._hashed, path))
parser_cache.clear() parser_cache.clear()
cached2 = load_module(grammar._hashed, path) cached2 = load_module(grammar._hashed, io)
assert cached2 is None assert cached2 is None

View File

@@ -79,11 +79,17 @@ def test_tokenize_start_pos(code, positions):
assert positions == [p.start_pos for p in tokens] assert positions == [p.start_pos for p in tokens]
def test_roundtrip(grammar): @pytest.mark.parametrize(
code = dedent("""\ 'code', [
f'''s{ dedent("""\
str.uppe f'''s{
''' str.uppe
""") '''
"""),
'f"foo',
'f"""foo',
]
)
def test_roundtrip(grammar, code):
tree = grammar.parse(code) tree = grammar.parse(code)
assert tree.get_code() == code assert tree.get_code() == code

View File

@@ -41,6 +41,29 @@ def test_python_exception_matches(code):
assert line_nr is None or line_nr == error.start_pos[0] assert line_nr is None or line_nr == error.start_pos[0]
def test_non_async_in_async():
"""
This example doesn't work with FAILING_EXAMPLES, because the line numbers
are not always the same / incorrect in Python 3.8.
"""
if sys.version_info[:2] < (3, 5):
pytest.skip()
# Raises multiple errors in previous versions.
code = 'async def foo():\n def nofoo():[x async for x in []]'
wanted, line_nr = _get_actual_exception(code)
errors = _get_error_list(code)
if errors:
error, = errors
actual = error.message
assert actual in wanted
if sys.version_info[:2] < (3, 8):
assert line_nr == error.start_pos[0]
else:
assert line_nr == 0 # For whatever reason this is zero in Python 3.8+
@pytest.mark.parametrize( @pytest.mark.parametrize(
('code', 'positions'), [ ('code', 'positions'), [
('1 +', [(1, 3)]), ('1 +', [(1, 3)]),
@@ -103,7 +126,8 @@ def _get_actual_exception(code):
# The python 3.5+ way, a bit nicer. # The python 3.5+ way, a bit nicer.
wanted = 'SyntaxError: positional argument follows keyword argument' wanted = 'SyntaxError: positional argument follows keyword argument'
elif wanted == 'SyntaxError: assignment to keyword': elif wanted == 'SyntaxError: assignment to keyword':
return [wanted, "SyntaxError: can't assign to keyword"], line_nr return [wanted, "SyntaxError: can't assign to keyword",
'SyntaxError: cannot assign to __debug__'], line_nr
elif wanted == 'SyntaxError: assignment to None': elif wanted == 'SyntaxError: assignment to None':
# Python 2.6 does has a slightly different error. # Python 2.6 does has a slightly different error.
wanted = 'SyntaxError: cannot assign to None' wanted = 'SyntaxError: cannot assign to None'

13
tox.ini
View File

@@ -1,19 +1,16 @@
[tox] [tox]
envlist = py27, py33, py34, py35, py36, py37, pypy envlist = {py26,py27,py33,py34,py35,py36,py37}
[testenv] [testenv]
extras = testing extras = testing
deps = deps =
py26,py33: pytest>=3.0.7,<3.3 py26,py33: pytest>=3.0.7,<3.3
py26,py33: setuptools<37 py26,py33: setuptools<37
coverage: coverage
setenv = setenv =
# https://github.com/tomchristie/django-rest-framework/issues/1957 # https://github.com/tomchristie/django-rest-framework/issues/1957
# tox corrupts __pycache__, solution from here: # tox corrupts __pycache__, solution from here:
PYTHONDONTWRITEBYTECODE=1 PYTHONDONTWRITEBYTECODE=1
coverage: TOX_TESTENV_COMMAND=coverage run -m pytest
commands = commands =
pytest {posargs:parso test} {env:TOX_TESTENV_COMMAND:pytest} {posargs}
[testenv:cov] coverage: coverage report
deps =
coverage
commands =
coverage run --source parso -m pytest
coverage report