mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-08 05:34:51 +08:00
Compare commits
73 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b9725364ab | ||
|
|
66ecc264f9 | ||
|
|
63b73a05e6 | ||
|
|
baec4ac58f | ||
|
|
b5f58ac33c | ||
|
|
83cb71f7a1 | ||
|
|
30a2b2f40d | ||
|
|
d81e393c0c | ||
|
|
7822f8be84 | ||
|
|
93788a3e09 | ||
|
|
085f666ca1 | ||
|
|
9e546e42de | ||
|
|
7b14a86e0a | ||
|
|
f45941226f | ||
|
|
e04552b14a | ||
|
|
cd9c213a62 | ||
|
|
561e81df00 | ||
|
|
556ce86cde | ||
|
|
b12dd498bb | ||
|
|
db10b4fa72 | ||
|
|
ed38518052 | ||
|
|
ebc69545c7 | ||
|
|
67ebb6acac | ||
|
|
bcf76949b6 | ||
|
|
6c7b397cc7 | ||
|
|
1927ba7254 | ||
|
|
a6c33411d4 | ||
|
|
f8dce76ef7 | ||
|
|
3242e36859 | ||
|
|
734a4b0e67 | ||
|
|
1047204654 | ||
|
|
ae6af7849e | ||
|
|
e1632cdadc | ||
|
|
7f0dd35c37 | ||
|
|
ad88783ac9 | ||
|
|
8550a52e48 | ||
|
|
c88a736e35 | ||
|
|
a07146f8a5 | ||
|
|
0c0aa31a91 | ||
|
|
77327a4cea | ||
|
|
8bbd304eb9 | ||
|
|
62fd03edda | ||
|
|
12063d42fc | ||
|
|
c86af743df | ||
|
|
fb2ea551d5 | ||
|
|
ce170e8aae | ||
|
|
d674bc9895 | ||
|
|
0d9886c22a | ||
|
|
9f8a68677d | ||
|
|
a950b82066 | ||
|
|
38b7763e9a | ||
|
|
cf880f43d4 | ||
|
|
8e49d8ab5f | ||
|
|
77b3ad5843 | ||
|
|
29e3545241 | ||
|
|
3d95b65b21 | ||
|
|
b86ea25435 | ||
|
|
4c42a82ebc | ||
|
|
43651ef219 | ||
|
|
419d9e3174 | ||
|
|
2bef3cf6ff | ||
|
|
8e95820d78 | ||
|
|
c18c89eb6b | ||
|
|
afc556d809 | ||
|
|
cdb791fbdb | ||
|
|
93f1cdebbc | ||
|
|
d3ceafee01 | ||
|
|
237dc9e135 | ||
|
|
bd37353042 | ||
|
|
51a044cc70 | ||
|
|
2cd0d6c9fc | ||
|
|
287a86c242 | ||
|
|
0234a70e95 |
@@ -6,7 +6,7 @@ python:
|
||||
- 3.5
|
||||
- 3.6
|
||||
- 3.7
|
||||
- 3.8
|
||||
- 3.8.2
|
||||
- pypy2.7-6.0
|
||||
- pypy3.5-6.0
|
||||
matrix:
|
||||
|
||||
@@ -3,6 +3,15 @@
|
||||
Changelog
|
||||
---------
|
||||
|
||||
0.7.0 (2020-04-13)
|
||||
++++++++++++++++++
|
||||
|
||||
- Fix a lot of annoying bugs in the diff parser. The fuzzer did not find
|
||||
issues anymore even after running it for more than 24 hours (500k tests).
|
||||
- Small grammar change: suites can now contain newlines even after a newline.
|
||||
This should really not matter if you don't use error recovery. It allows for
|
||||
nicer error recovery.
|
||||
|
||||
0.6.2 (2020-02-27)
|
||||
++++++++++++++++++
|
||||
|
||||
|
||||
@@ -87,12 +87,12 @@ def pytest_configure(config):
|
||||
root = logging.getLogger()
|
||||
root.setLevel(logging.DEBUG)
|
||||
|
||||
ch = logging.StreamHandler(sys.stdout)
|
||||
ch.setLevel(logging.DEBUG)
|
||||
#ch = logging.StreamHandler(sys.stdout)
|
||||
#ch.setLevel(logging.DEBUG)
|
||||
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
#ch.setFormatter(formatter)
|
||||
|
||||
root.addHandler(ch)
|
||||
#root.addHandler(ch)
|
||||
|
||||
|
||||
class Checker():
|
||||
|
||||
@@ -43,7 +43,7 @@ from parso.grammar import Grammar, load_grammar
|
||||
from parso.utils import split_lines, python_bytes_to_unicode
|
||||
|
||||
|
||||
__version__ = '0.6.2'
|
||||
__version__ = '0.7.0'
|
||||
|
||||
|
||||
def parse(code=None, **kwargs):
|
||||
|
||||
@@ -138,7 +138,7 @@ class Grammar(object):
|
||||
cache_path=cache_path)
|
||||
return new_node
|
||||
|
||||
tokens = self._tokenizer(lines, start_pos)
|
||||
tokens = self._tokenizer(lines, start_pos=start_pos)
|
||||
|
||||
p = self._parser(
|
||||
self._pgen_grammar,
|
||||
@@ -215,8 +215,8 @@ class PythonGrammar(Grammar):
|
||||
)
|
||||
self.version_info = version_info
|
||||
|
||||
def _tokenize_lines(self, lines, start_pos):
|
||||
return tokenize_lines(lines, self.version_info, start_pos=start_pos)
|
||||
def _tokenize_lines(self, lines, **kwargs):
|
||||
return tokenize_lines(lines, self.version_info, **kwargs)
|
||||
|
||||
def _tokenize(self, code):
|
||||
# Used by Jedi.
|
||||
|
||||
@@ -134,7 +134,7 @@ class BaseParser(object):
|
||||
# However, the error recovery might have added the token again, if
|
||||
# the stack is empty, we're fine.
|
||||
raise InternalParseError(
|
||||
"incomplete input", token.type, token.value, token.start_pos
|
||||
"incomplete input", token.type, token.string, token.start_pos
|
||||
)
|
||||
|
||||
if len(self.stack) > 1:
|
||||
|
||||
@@ -1,9 +1,29 @@
|
||||
"""
|
||||
Basically a contains parser that is faster, because it tries to parse only
|
||||
parts and if anything changes, it only reparses the changed parts.
|
||||
The diff parser is trying to be a faster version of the normal parser by trying
|
||||
to reuse the nodes of a previous pass over the same file. This is also called
|
||||
incremental parsing in parser literature. The difference is mostly that with
|
||||
incremental parsing you get a range that needs to be reparsed. Here we
|
||||
calculate that range ourselves by using difflib. After that it's essentially
|
||||
incremental parsing.
|
||||
|
||||
It works with a simple diff in the beginning and will try to reuse old parser
|
||||
fragments.
|
||||
The biggest issue of this approach is that we reuse nodes in a mutable way. The
|
||||
intial design and idea is quite problematic for this parser, but it is also
|
||||
pretty fast. Measurements showed that just copying nodes in Python is simply
|
||||
quite a bit slower (especially for big files >3 kLOC). Therefore we did not
|
||||
want to get rid of the mutable nodes, since this is usually not an issue.
|
||||
|
||||
This is by far the hardest software I ever wrote, exactly because the initial
|
||||
design is crappy. When you have to account for a lot of mutable state, it
|
||||
creates a ton of issues that you would otherwise not have. This file took
|
||||
probably 3-6 months to write, which is insane for a parser.
|
||||
|
||||
There is a fuzzer in that helps test this whole thing. Please use it if you
|
||||
make changes here. If you run the fuzzer like::
|
||||
|
||||
test/fuzz_diff_parser.py random -n 100000
|
||||
|
||||
you can be pretty sure that everything is still fine. I sometimes run the
|
||||
fuzzer up to 24h to make sure everything is still ok.
|
||||
"""
|
||||
import re
|
||||
import difflib
|
||||
@@ -13,7 +33,7 @@ import logging
|
||||
from parso.utils import split_lines
|
||||
from parso.python.parser import Parser
|
||||
from parso.python.tree import EndMarker
|
||||
from parso.python.tokenize import PythonToken
|
||||
from parso.python.tokenize import PythonToken, BOM_UTF8_STRING
|
||||
from parso.python.token import PythonTokenTypes
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@@ -21,21 +41,37 @@ DEBUG_DIFF_PARSER = False
|
||||
|
||||
_INDENTATION_TOKENS = 'INDENT', 'ERROR_DEDENT', 'DEDENT'
|
||||
|
||||
NEWLINE = PythonTokenTypes.NEWLINE
|
||||
DEDENT = PythonTokenTypes.DEDENT
|
||||
NAME = PythonTokenTypes.NAME
|
||||
ERROR_DEDENT = PythonTokenTypes.ERROR_DEDENT
|
||||
ENDMARKER = PythonTokenTypes.ENDMARKER
|
||||
|
||||
|
||||
def _is_indentation_error_leaf(node):
|
||||
return node.type == 'error_leaf' and node.token_type in _INDENTATION_TOKENS
|
||||
|
||||
|
||||
def _get_previous_leaf_if_indentation(leaf):
|
||||
while leaf and leaf.type == 'error_leaf' \
|
||||
and leaf.token_type in _INDENTATION_TOKENS:
|
||||
while leaf and _is_indentation_error_leaf(leaf):
|
||||
leaf = leaf.get_previous_leaf()
|
||||
return leaf
|
||||
|
||||
|
||||
def _get_next_leaf_if_indentation(leaf):
|
||||
while leaf and leaf.type == 'error_leaf' \
|
||||
and leaf.token_type in _INDENTATION_TOKENS:
|
||||
leaf = leaf.get_previous_leaf()
|
||||
while leaf and _is_indentation_error_leaf(leaf):
|
||||
leaf = leaf.get_next_leaf()
|
||||
return leaf
|
||||
|
||||
|
||||
def _get_suite_indentation(tree_node):
|
||||
return _get_indentation(tree_node.children[1])
|
||||
|
||||
|
||||
def _get_indentation(tree_node):
|
||||
return tree_node.start_pos[1]
|
||||
|
||||
|
||||
def _assert_valid_graph(node):
|
||||
"""
|
||||
Checks if the parent/children relationship is correct.
|
||||
@@ -70,6 +106,10 @@ def _assert_valid_graph(node):
|
||||
actual = line, len(splitted[-1])
|
||||
else:
|
||||
actual = previous_start_pos[0], previous_start_pos[1] + len(content)
|
||||
if content.startswith(BOM_UTF8_STRING) \
|
||||
and node.get_start_pos_of_prefix() == (1, 0):
|
||||
# Remove the byte order mark
|
||||
actual = actual[0], actual[1] - 1
|
||||
|
||||
assert node.start_pos == actual, (node.start_pos, actual)
|
||||
else:
|
||||
@@ -78,6 +118,26 @@ def _assert_valid_graph(node):
|
||||
_assert_valid_graph(child)
|
||||
|
||||
|
||||
def _assert_nodes_are_equal(node1, node2):
|
||||
try:
|
||||
children1 = node1.children
|
||||
except AttributeError:
|
||||
assert not hasattr(node2, 'children'), (node1, node2)
|
||||
assert node1.value == node2.value, (node1, node2)
|
||||
assert node1.type == node2.type, (node1, node2)
|
||||
assert node1.prefix == node2.prefix, (node1, node2)
|
||||
assert node1.start_pos == node2.start_pos, (node1, node2)
|
||||
return
|
||||
else:
|
||||
try:
|
||||
children2 = node2.children
|
||||
except AttributeError:
|
||||
assert False, (node1, node2)
|
||||
for n1, n2 in zip(children1, children2):
|
||||
_assert_nodes_are_equal(n1, n2)
|
||||
assert len(children1) == len(children2), '\n' + repr(children1) + '\n' + repr(children2)
|
||||
|
||||
|
||||
def _get_debug_error_message(module, old_lines, new_lines):
|
||||
current_lines = split_lines(module.get_code(), keepends=True)
|
||||
current_diff = difflib.unified_diff(new_lines, current_lines)
|
||||
@@ -95,6 +155,15 @@ def _get_last_line(node_or_leaf):
|
||||
if _ends_with_newline(last_leaf):
|
||||
return last_leaf.start_pos[0]
|
||||
else:
|
||||
n = last_leaf.get_next_leaf()
|
||||
if n.type == 'endmarker' and '\n' in n.prefix:
|
||||
# This is a very special case and has to do with error recovery in
|
||||
# Parso. The problem is basically that there's no newline leaf at
|
||||
# the end sometimes (it's required in the grammar, but not needed
|
||||
# actually before endmarker, CPython just adds a newline to make
|
||||
# source code pass the parser, to account for that Parso error
|
||||
# recovery allows small_stmt instead of simple_stmt).
|
||||
return last_leaf.end_pos[0] + 1
|
||||
return last_leaf.end_pos[0]
|
||||
|
||||
|
||||
@@ -233,7 +302,7 @@ class DiffParser(object):
|
||||
|
||||
if operation == 'equal':
|
||||
line_offset = j1 - i1
|
||||
self._copy_from_old_parser(line_offset, i2, j2)
|
||||
self._copy_from_old_parser(line_offset, i1 + 1, i2, j2)
|
||||
elif operation == 'replace':
|
||||
self._parse(until_line=j2)
|
||||
elif operation == 'insert':
|
||||
@@ -249,8 +318,14 @@ class DiffParser(object):
|
||||
# If there is reasonable suspicion that the diff parser is not
|
||||
# behaving well, this should be enabled.
|
||||
try:
|
||||
assert self._module.get_code() == ''.join(new_lines)
|
||||
code = ''.join(new_lines)
|
||||
assert self._module.get_code() == code
|
||||
_assert_valid_graph(self._module)
|
||||
without_diff_parser_module = Parser(
|
||||
self._pgen_grammar,
|
||||
error_recovery=True
|
||||
).parse(self._tokenizer(new_lines))
|
||||
_assert_nodes_are_equal(self._module, without_diff_parser_module)
|
||||
except AssertionError:
|
||||
print(_get_debug_error_message(self._module, old_lines, new_lines))
|
||||
raise
|
||||
@@ -268,7 +343,7 @@ class DiffParser(object):
|
||||
if self._module.get_code() != ''.join(lines_new):
|
||||
LOG.warning('parser issue:\n%s\n%s', ''.join(old_lines), ''.join(lines_new))
|
||||
|
||||
def _copy_from_old_parser(self, line_offset, until_line_old, until_line_new):
|
||||
def _copy_from_old_parser(self, line_offset, start_line_old, until_line_old, until_line_new):
|
||||
last_until_line = -1
|
||||
while until_line_new > self._nodes_tree.parsed_until_line:
|
||||
parsed_until_line_old = self._nodes_tree.parsed_until_line - line_offset
|
||||
@@ -282,12 +357,18 @@ class DiffParser(object):
|
||||
p_children = line_stmt.parent.children
|
||||
index = p_children.index(line_stmt)
|
||||
|
||||
from_ = self._nodes_tree.parsed_until_line + 1
|
||||
copied_nodes = self._nodes_tree.copy_nodes(
|
||||
p_children[index:],
|
||||
until_line_old,
|
||||
line_offset
|
||||
)
|
||||
if start_line_old == 1 \
|
||||
and p_children[0].get_first_leaf().prefix.startswith(BOM_UTF8_STRING):
|
||||
# If there's a BOM in the beginning, just reparse. It's too
|
||||
# complicated to account for it otherwise.
|
||||
copied_nodes = []
|
||||
else:
|
||||
from_ = self._nodes_tree.parsed_until_line + 1
|
||||
copied_nodes = self._nodes_tree.copy_nodes(
|
||||
p_children[index:],
|
||||
until_line_old,
|
||||
line_offset
|
||||
)
|
||||
# Match all the nodes that are in the wanted range.
|
||||
if copied_nodes:
|
||||
self._copy_count += 1
|
||||
@@ -333,7 +414,10 @@ class DiffParser(object):
|
||||
node = self._try_parse_part(until_line)
|
||||
nodes = node.children
|
||||
|
||||
self._nodes_tree.add_parsed_nodes(nodes)
|
||||
self._nodes_tree.add_parsed_nodes(nodes, self._keyword_token_indents)
|
||||
if self._replace_tos_indent is not None:
|
||||
self._nodes_tree.indents[-1] = self._replace_tos_indent
|
||||
|
||||
LOG.debug(
|
||||
'parse_part from %s to %s (to %s in part parser)',
|
||||
nodes[0].get_start_pos_of_prefix()[0],
|
||||
@@ -369,34 +453,39 @@ class DiffParser(object):
|
||||
return self._active_parser.parse(tokens=tokens)
|
||||
|
||||
def _diff_tokenize(self, lines, until_line, line_offset=0):
|
||||
is_first_token = True
|
||||
omitted_first_indent = False
|
||||
indents = []
|
||||
tokens = self._tokenizer(lines, (1, 0))
|
||||
stack = self._active_parser.stack
|
||||
for typ, string, start_pos, prefix in tokens:
|
||||
start_pos = start_pos[0] + line_offset, start_pos[1]
|
||||
if typ == PythonTokenTypes.INDENT:
|
||||
indents.append(start_pos[1])
|
||||
if is_first_token:
|
||||
omitted_first_indent = True
|
||||
# We want to get rid of indents that are only here because
|
||||
# we only parse part of the file. These indents would only
|
||||
# get parsed as error leafs, which doesn't make any sense.
|
||||
is_first_token = False
|
||||
continue
|
||||
is_first_token = False
|
||||
was_newline = False
|
||||
indents = self._nodes_tree.indents
|
||||
initial_indentation_count = len(indents)
|
||||
|
||||
# In case of omitted_first_indent, it might not be dedented fully.
|
||||
# However this is a sign for us that a dedent happened.
|
||||
if typ == PythonTokenTypes.DEDENT \
|
||||
or typ == PythonTokenTypes.ERROR_DEDENT \
|
||||
and omitted_first_indent and len(indents) == 1:
|
||||
indents.pop()
|
||||
if omitted_first_indent and not indents:
|
||||
tokens = self._tokenizer(
|
||||
lines,
|
||||
start_pos=(line_offset + 1, 0),
|
||||
indents=indents,
|
||||
is_first_token=line_offset == 0,
|
||||
)
|
||||
stack = self._active_parser.stack
|
||||
self._replace_tos_indent = None
|
||||
self._keyword_token_indents = {}
|
||||
# print('start', line_offset + 1, indents)
|
||||
for token in tokens:
|
||||
# print(token, indents)
|
||||
typ = token.type
|
||||
if typ == DEDENT:
|
||||
if len(indents) < initial_indentation_count:
|
||||
# We are done here, only thing that can come now is an
|
||||
# endmarker or another dedented code block.
|
||||
typ, string, start_pos, prefix = next(tokens)
|
||||
while True:
|
||||
typ, string, start_pos, prefix = token = next(tokens)
|
||||
if typ in (DEDENT, ERROR_DEDENT):
|
||||
if typ == ERROR_DEDENT:
|
||||
# We want to force an error dedent in the next
|
||||
# parser/pass. To make this possible we just
|
||||
# increase the location by one.
|
||||
self._replace_tos_indent = start_pos[1] + 1
|
||||
pass
|
||||
else:
|
||||
break
|
||||
|
||||
if '\n' in prefix or '\r' in prefix:
|
||||
prefix = re.sub(r'[^\n\r]+\Z', '', prefix)
|
||||
else:
|
||||
@@ -404,36 +493,38 @@ class DiffParser(object):
|
||||
if start_pos[1] - len(prefix) == 0:
|
||||
prefix = ''
|
||||
yield PythonToken(
|
||||
PythonTokenTypes.ENDMARKER, '',
|
||||
(start_pos[0] + line_offset, 0),
|
||||
ENDMARKER, '',
|
||||
start_pos,
|
||||
prefix
|
||||
)
|
||||
break
|
||||
elif typ == PythonTokenTypes.NEWLINE and start_pos[0] >= until_line:
|
||||
yield PythonToken(typ, string, start_pos, prefix)
|
||||
# Check if the parser is actually in a valid suite state.
|
||||
if _suite_or_file_input_is_valid(self._pgen_grammar, stack):
|
||||
start_pos = start_pos[0] + 1, 0
|
||||
while len(indents) > int(omitted_first_indent):
|
||||
indents.pop()
|
||||
yield PythonToken(PythonTokenTypes.DEDENT, '', start_pos, '')
|
||||
elif typ == NEWLINE and token.start_pos[0] >= until_line:
|
||||
was_newline = True
|
||||
elif was_newline:
|
||||
was_newline = False
|
||||
if len(indents) == initial_indentation_count:
|
||||
# Check if the parser is actually in a valid suite state.
|
||||
if _suite_or_file_input_is_valid(self._pgen_grammar, stack):
|
||||
yield PythonToken(ENDMARKER, '', token.start_pos, '')
|
||||
break
|
||||
|
||||
yield PythonToken(PythonTokenTypes.ENDMARKER, '', start_pos, '')
|
||||
break
|
||||
else:
|
||||
continue
|
||||
if typ == NAME and token.string in ('class', 'def'):
|
||||
self._keyword_token_indents[token.start_pos] = list(indents)
|
||||
|
||||
yield PythonToken(typ, string, start_pos, prefix)
|
||||
yield token
|
||||
|
||||
|
||||
class _NodesTreeNode(object):
|
||||
_ChildrenGroup = namedtuple('_ChildrenGroup', 'prefix children line_offset last_line_offset_leaf')
|
||||
_ChildrenGroup = namedtuple(
|
||||
'_ChildrenGroup',
|
||||
'prefix children line_offset last_line_offset_leaf')
|
||||
|
||||
def __init__(self, tree_node, parent=None):
|
||||
def __init__(self, tree_node, parent=None, indentation=0):
|
||||
self.tree_node = tree_node
|
||||
self._children_groups = []
|
||||
self.parent = parent
|
||||
self._node_children = []
|
||||
self.indentation = indentation
|
||||
|
||||
def finish(self):
|
||||
children = []
|
||||
@@ -461,10 +552,13 @@ class _NodesTreeNode(object):
|
||||
def add_child_node(self, child_node):
|
||||
self._node_children.append(child_node)
|
||||
|
||||
def add_tree_nodes(self, prefix, children, line_offset=0, last_line_offset_leaf=None):
|
||||
def add_tree_nodes(self, prefix, children, line_offset=0,
|
||||
last_line_offset_leaf=None):
|
||||
if last_line_offset_leaf is None:
|
||||
last_line_offset_leaf = children[-1].get_last_leaf()
|
||||
group = self._ChildrenGroup(prefix, children, line_offset, last_line_offset_leaf)
|
||||
group = self._ChildrenGroup(
|
||||
prefix, children, line_offset, last_line_offset_leaf
|
||||
)
|
||||
self._children_groups.append(group)
|
||||
|
||||
def get_last_line(self, suffix):
|
||||
@@ -491,6 +585,9 @@ class _NodesTreeNode(object):
|
||||
return max(line, self._node_children[-1].get_last_line(suffix))
|
||||
return line
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
|
||||
|
||||
|
||||
class _NodesTree(object):
|
||||
def __init__(self, module):
|
||||
@@ -499,34 +596,19 @@ class _NodesTree(object):
|
||||
self._module = module
|
||||
self._prefix_remainder = ''
|
||||
self.prefix = ''
|
||||
self.indents = [0]
|
||||
|
||||
@property
|
||||
def parsed_until_line(self):
|
||||
return self._working_stack[-1].get_last_line(self.prefix)
|
||||
|
||||
def _get_insertion_node(self, indentation_node):
|
||||
indentation = indentation_node.start_pos[1]
|
||||
|
||||
# find insertion node
|
||||
while True:
|
||||
node = self._working_stack[-1]
|
||||
tree_node = node.tree_node
|
||||
if tree_node.type == 'suite':
|
||||
# A suite starts with NEWLINE, ...
|
||||
node_indentation = tree_node.children[1].start_pos[1]
|
||||
|
||||
if indentation >= node_indentation: # Not a Dedent
|
||||
# We might be at the most outer layer: modules. We
|
||||
# don't want to depend on the first statement
|
||||
# having the right indentation.
|
||||
return node
|
||||
|
||||
elif tree_node.type == 'file_input':
|
||||
def _update_insertion_node(self, indentation):
|
||||
for node in reversed(list(self._working_stack)):
|
||||
if node.indentation < indentation or node is self._working_stack[0]:
|
||||
return node
|
||||
|
||||
self._working_stack.pop()
|
||||
|
||||
def add_parsed_nodes(self, tree_nodes):
|
||||
def add_parsed_nodes(self, tree_nodes, keyword_token_indents):
|
||||
old_prefix = self.prefix
|
||||
tree_nodes = self._remove_endmarker(tree_nodes)
|
||||
if not tree_nodes:
|
||||
@@ -535,23 +617,27 @@ class _NodesTree(object):
|
||||
|
||||
assert tree_nodes[0].type != 'newline'
|
||||
|
||||
node = self._get_insertion_node(tree_nodes[0])
|
||||
node = self._update_insertion_node(tree_nodes[0].start_pos[1])
|
||||
assert node.tree_node.type in ('suite', 'file_input')
|
||||
node.add_tree_nodes(old_prefix, tree_nodes)
|
||||
# tos = Top of stack
|
||||
self._update_tos(tree_nodes[-1])
|
||||
self._update_parsed_node_tos(tree_nodes[-1], keyword_token_indents)
|
||||
|
||||
def _update_tos(self, tree_node):
|
||||
if tree_node.type in ('suite', 'file_input'):
|
||||
new_tos = _NodesTreeNode(tree_node)
|
||||
def _update_parsed_node_tos(self, tree_node, keyword_token_indents):
|
||||
if tree_node.type == 'suite':
|
||||
def_leaf = tree_node.parent.children[0]
|
||||
new_tos = _NodesTreeNode(
|
||||
tree_node,
|
||||
indentation=keyword_token_indents[def_leaf.start_pos][-1],
|
||||
)
|
||||
new_tos.add_tree_nodes('', list(tree_node.children))
|
||||
|
||||
self._working_stack[-1].add_child_node(new_tos)
|
||||
self._working_stack.append(new_tos)
|
||||
|
||||
self._update_tos(tree_node.children[-1])
|
||||
self._update_parsed_node_tos(tree_node.children[-1], keyword_token_indents)
|
||||
elif _func_or_class_has_suite(tree_node):
|
||||
self._update_tos(tree_node.children[-1])
|
||||
self._update_parsed_node_tos(tree_node.children[-1], keyword_token_indents)
|
||||
|
||||
def _remove_endmarker(self, tree_nodes):
|
||||
"""
|
||||
@@ -561,7 +647,8 @@ class _NodesTree(object):
|
||||
is_endmarker = last_leaf.type == 'endmarker'
|
||||
self._prefix_remainder = ''
|
||||
if is_endmarker:
|
||||
separation = max(last_leaf.prefix.rfind('\n'), last_leaf.prefix.rfind('\r'))
|
||||
prefix = last_leaf.prefix
|
||||
separation = max(prefix.rfind('\n'), prefix.rfind('\r'))
|
||||
if separation > -1:
|
||||
# Remove the whitespace part of the prefix after a newline.
|
||||
# That is not relevant if parentheses were opened. Always parse
|
||||
@@ -577,6 +664,26 @@ class _NodesTree(object):
|
||||
tree_nodes = tree_nodes[:-1]
|
||||
return tree_nodes
|
||||
|
||||
def _get_matching_indent_nodes(self, tree_nodes, is_new_suite):
|
||||
# There might be a random dedent where we have to stop copying.
|
||||
# Invalid indents are ok, because the parser handled that
|
||||
# properly before. An invalid dedent can happen, because a few
|
||||
# lines above there was an invalid indent.
|
||||
node_iterator = iter(tree_nodes)
|
||||
if is_new_suite:
|
||||
yield next(node_iterator)
|
||||
|
||||
first_node = next(node_iterator)
|
||||
indent = _get_indentation(first_node)
|
||||
if not is_new_suite and indent not in self.indents:
|
||||
return
|
||||
yield first_node
|
||||
|
||||
for n in node_iterator:
|
||||
if _get_indentation(n) != indent:
|
||||
return
|
||||
yield n
|
||||
|
||||
def copy_nodes(self, tree_nodes, until_line, line_offset):
|
||||
"""
|
||||
Copies tree nodes from the old parser tree.
|
||||
@@ -588,19 +695,38 @@ class _NodesTree(object):
|
||||
# issues.
|
||||
return []
|
||||
|
||||
self._get_insertion_node(tree_nodes[0])
|
||||
indentation = _get_indentation(tree_nodes[0])
|
||||
old_working_stack = list(self._working_stack)
|
||||
old_prefix = self.prefix
|
||||
old_indents = self.indents
|
||||
self.indents = [i for i in self.indents if i <= indentation]
|
||||
|
||||
new_nodes, self._working_stack, self.prefix = self._copy_nodes(
|
||||
self._update_insertion_node(indentation)
|
||||
|
||||
new_nodes, self._working_stack, self.prefix, added_indents = self._copy_nodes(
|
||||
list(self._working_stack),
|
||||
tree_nodes,
|
||||
until_line,
|
||||
line_offset,
|
||||
self.prefix,
|
||||
)
|
||||
if new_nodes:
|
||||
self.indents += added_indents
|
||||
else:
|
||||
self._working_stack = old_working_stack
|
||||
self.prefix = old_prefix
|
||||
self.indents = old_indents
|
||||
return new_nodes
|
||||
|
||||
def _copy_nodes(self, working_stack, nodes, until_line, line_offset, prefix=''):
|
||||
def _copy_nodes(self, working_stack, nodes, until_line, line_offset,
|
||||
prefix='', is_nested=False):
|
||||
new_nodes = []
|
||||
added_indents = []
|
||||
|
||||
nodes = list(self._get_matching_indent_nodes(
|
||||
nodes,
|
||||
is_new_suite=is_nested,
|
||||
))
|
||||
|
||||
new_prefix = ''
|
||||
for node in nodes:
|
||||
@@ -620,26 +746,83 @@ class _NodesTree(object):
|
||||
if _func_or_class_has_suite(node):
|
||||
new_nodes.append(node)
|
||||
break
|
||||
try:
|
||||
c = node.children
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
# This case basically appears with error recovery of one line
|
||||
# suites like `def foo(): bar.-`. In this case we might not
|
||||
# include a newline in the statement and we need to take care
|
||||
# of that.
|
||||
n = node
|
||||
if n.type == 'decorated':
|
||||
n = n.children[-1]
|
||||
if n.type in ('async_funcdef', 'async_stmt'):
|
||||
n = n.children[-1]
|
||||
if n.type in ('classdef', 'funcdef'):
|
||||
suite_node = n.children[-1]
|
||||
else:
|
||||
suite_node = c[-1]
|
||||
|
||||
if suite_node.type in ('error_leaf', 'error_node'):
|
||||
break
|
||||
|
||||
new_nodes.append(node)
|
||||
|
||||
# Pop error nodes at the end from the list
|
||||
if new_nodes:
|
||||
while new_nodes:
|
||||
last_node = new_nodes[-1]
|
||||
if (last_node.type in ('error_leaf', 'error_node')
|
||||
or _is_flow_node(new_nodes[-1])):
|
||||
# Error leafs/nodes don't have a defined start/end. Error
|
||||
# nodes might not end with a newline (e.g. if there's an
|
||||
# open `(`). Therefore ignore all of them unless they are
|
||||
# succeeded with valid parser state.
|
||||
# If we copy flows at the end, they might be continued
|
||||
# after the copy limit (in the new parser).
|
||||
# In this while loop we try to remove until we find a newline.
|
||||
new_prefix = ''
|
||||
new_nodes.pop()
|
||||
while new_nodes:
|
||||
last_node = new_nodes[-1]
|
||||
if last_node.get_last_leaf().type == 'newline':
|
||||
break
|
||||
new_nodes.pop()
|
||||
continue
|
||||
if len(new_nodes) > 1 and new_nodes[-2].type == 'error_node':
|
||||
# The problem here is that Parso error recovery sometimes
|
||||
# influences nodes before this node.
|
||||
# Since the new last node is an error node this will get
|
||||
# cleaned up in the next while iteration.
|
||||
new_nodes.pop()
|
||||
continue
|
||||
break
|
||||
|
||||
if not new_nodes:
|
||||
return [], working_stack, prefix
|
||||
return [], working_stack, prefix, added_indents
|
||||
|
||||
tos = working_stack[-1]
|
||||
last_node = new_nodes[-1]
|
||||
had_valid_suite_last = False
|
||||
# Pop incomplete suites from the list
|
||||
if _func_or_class_has_suite(last_node):
|
||||
suite = last_node
|
||||
while suite.type != 'suite':
|
||||
suite = suite.children[-1]
|
||||
|
||||
suite_tos = _NodesTreeNode(suite)
|
||||
indent = _get_suite_indentation(suite)
|
||||
added_indents.append(indent)
|
||||
|
||||
suite_tos = _NodesTreeNode(suite, indentation=_get_indentation(last_node))
|
||||
# Don't need to pass line_offset here, it's already done by the
|
||||
# parent.
|
||||
suite_nodes, new_working_stack, new_prefix = self._copy_nodes(
|
||||
working_stack + [suite_tos], suite.children, until_line, line_offset
|
||||
suite_nodes, new_working_stack, new_prefix, ai = self._copy_nodes(
|
||||
working_stack + [suite_tos], suite.children, until_line, line_offset,
|
||||
is_nested=True,
|
||||
)
|
||||
added_indents += ai
|
||||
if len(suite_nodes) < 2:
|
||||
# A suite only with newline is not valid.
|
||||
new_nodes.pop()
|
||||
@@ -650,25 +833,6 @@ class _NodesTree(object):
|
||||
working_stack = new_working_stack
|
||||
had_valid_suite_last = True
|
||||
|
||||
if new_nodes:
|
||||
last_node = new_nodes[-1]
|
||||
if (last_node.type in ('error_leaf', 'error_node') or
|
||||
_is_flow_node(new_nodes[-1])):
|
||||
# Error leafs/nodes don't have a defined start/end. Error
|
||||
# nodes might not end with a newline (e.g. if there's an
|
||||
# open `(`). Therefore ignore all of them unless they are
|
||||
# succeeded with valid parser state.
|
||||
# If we copy flows at the end, they might be continued
|
||||
# after the copy limit (in the new parser).
|
||||
# In this while loop we try to remove until we find a newline.
|
||||
new_prefix = ''
|
||||
new_nodes.pop()
|
||||
while new_nodes:
|
||||
last_node = new_nodes[-1]
|
||||
if last_node.get_last_leaf().type == 'newline':
|
||||
break
|
||||
new_nodes.pop()
|
||||
|
||||
if new_nodes:
|
||||
if not _ends_with_newline(new_nodes[-1].get_last_leaf()) and not had_valid_suite_last:
|
||||
p = new_nodes[-1].get_next_leaf().prefix
|
||||
@@ -688,11 +852,13 @@ class _NodesTree(object):
|
||||
assert last_line_offset_leaf == ':'
|
||||
else:
|
||||
last_line_offset_leaf = new_nodes[-1].get_last_leaf()
|
||||
tos.add_tree_nodes(prefix, new_nodes, line_offset, last_line_offset_leaf)
|
||||
tos.add_tree_nodes(
|
||||
prefix, new_nodes, line_offset, last_line_offset_leaf,
|
||||
)
|
||||
prefix = new_prefix
|
||||
self._prefix_remainder = ''
|
||||
|
||||
return new_nodes, working_stack, prefix
|
||||
return new_nodes, working_stack, prefix, added_indents
|
||||
|
||||
def close(self):
|
||||
self._base_node.finish()
|
||||
@@ -708,6 +874,8 @@ class _NodesTree(object):
|
||||
lines = split_lines(self.prefix)
|
||||
assert len(lines) > 0
|
||||
if len(lines) == 1:
|
||||
if lines[0].startswith(BOM_UTF8_STRING) and end_pos == [1, 0]:
|
||||
end_pos[1] -= 1
|
||||
end_pos[1] += len(lines[0])
|
||||
else:
|
||||
end_pos[0] += len(lines) - 1
|
||||
|
||||
@@ -976,9 +976,7 @@ class _CheckAssignmentRule(SyntaxRule):
|
||||
|
||||
if error is not None:
|
||||
if is_namedexpr:
|
||||
# c.f. CPython bpo-39176, should be changed in next release
|
||||
# message = 'cannot use assignment expressions with %s' % error
|
||||
message = 'cannot use named assignment with %s' % error
|
||||
message = 'cannot use assignment expressions with %s' % error
|
||||
else:
|
||||
cannot = "can't" if self._normalizer.version < (3, 8) else "cannot"
|
||||
message = ' '.join([cannot, "delete" if is_deletion else "assign to", error])
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
# eval_input is the input for the eval() and input() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: (NEWLINE | stmt)* ENDMARKER
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
|
||||
@@ -30,7 +30,7 @@ varargslist: ((fpdef ['=' test] ',')*
|
||||
fpdef: NAME | '(' fplist ')'
|
||||
fplist: fpdef (',' fpdef)* [',']
|
||||
|
||||
stmt: simple_stmt | compound_stmt
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | exec_stmt | assert_stmt)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: (NEWLINE | stmt)* ENDMARKER
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
|
||||
@@ -33,7 +33,7 @@ varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [','
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: (NEWLINE | stmt)* ENDMARKER
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
|
||||
@@ -33,7 +33,7 @@ varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [','
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: (NEWLINE | stmt)* ENDMARKER
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
|
||||
@@ -38,7 +38,7 @@ varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [','
|
||||
| '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: (NEWLINE | stmt)* ENDMARKER
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
|
||||
decorators: decorator+
|
||||
@@ -35,7 +35,7 @@ varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: (NEWLINE | stmt)* ENDMARKER
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
|
||||
decorators: decorator+
|
||||
@@ -33,7 +33,7 @@ varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
|
||||
)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: (NEWLINE | stmt)* ENDMARKER
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
|
||||
@@ -46,7 +46,7 @@ varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef [
|
||||
)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# eval_input is the input for the eval() functions.
|
||||
# NB: compound_stmt in single_input is followed by extra NEWLINE!
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
file_input: (NEWLINE | stmt)* ENDMARKER
|
||||
file_input: stmt* ENDMARKER
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
|
||||
@@ -46,7 +46,7 @@ varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef [
|
||||
)
|
||||
vfpdef: NAME
|
||||
|
||||
stmt: simple_stmt | compound_stmt
|
||||
stmt: simple_stmt | compound_stmt | NEWLINE
|
||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||
small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||
import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
|
||||
|
||||
@@ -126,10 +126,10 @@ class Parser(BaseParser):
|
||||
|
||||
if self._start_nonterminal == 'file_input' and \
|
||||
(token.type == PythonTokenTypes.ENDMARKER
|
||||
or token.type == DEDENT and '\n' not in last_leaf.value
|
||||
and '\r' not in last_leaf.value):
|
||||
or token.type == DEDENT and not last_leaf.value.endswith('\n')
|
||||
and not last_leaf.value.endswith('\r')):
|
||||
# In Python statements need to end with a newline. But since it's
|
||||
# possible (and valid in Python ) that there's no newline at the
|
||||
# possible (and valid in Python) that there's no newline at the
|
||||
# end of a file, we have to recover even if the user doesn't want
|
||||
# error recovery.
|
||||
if self.stack[-1].dfa.from_rule == 'simple_stmt':
|
||||
@@ -208,6 +208,7 @@ class Parser(BaseParser):
|
||||
o = self._omit_dedent_list
|
||||
if o and o[-1] == self._indent_counter:
|
||||
o.pop()
|
||||
self._indent_counter -= 1
|
||||
continue
|
||||
|
||||
self._indent_counter -= 1
|
||||
|
||||
@@ -12,7 +12,6 @@ memory optimizations here.
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
import string
|
||||
import re
|
||||
from collections import namedtuple
|
||||
import itertools as _itertools
|
||||
@@ -218,10 +217,10 @@ def _create_token_collection(version_info):
|
||||
Funny = group(Operator, Bracket, Special)
|
||||
|
||||
# First (or only) line of ' or " string.
|
||||
ContStr = group(StringPrefix + r"'[^\r\n'\\]*(?:\\.[^\r\n'\\]*)*" +
|
||||
group("'", r'\\(?:\r\n?|\n)'),
|
||||
StringPrefix + r'"[^\r\n"\\]*(?:\\.[^\r\n"\\]*)*' +
|
||||
group('"', r'\\(?:\r\n?|\n)'))
|
||||
ContStr = group(StringPrefix + r"'[^\r\n'\\]*(?:\\.[^\r\n'\\]*)*"
|
||||
+ group("'", r'\\(?:\r\n?|\n)'),
|
||||
StringPrefix + r'"[^\r\n"\\]*(?:\\.[^\r\n"\\]*)*'
|
||||
+ group('"', r'\\(?:\r\n?|\n)'))
|
||||
pseudo_extra_pool = [Comment, Triple]
|
||||
all_quotes = '"', "'", '"""', "'''"
|
||||
if fstring_prefixes:
|
||||
@@ -258,11 +257,14 @@ def _create_token_collection(version_info):
|
||||
fstring_pattern_map[t + quote] = quote
|
||||
|
||||
ALWAYS_BREAK_TOKENS = (';', 'import', 'class', 'def', 'try', 'except',
|
||||
'finally', 'while', 'with', 'return')
|
||||
'finally', 'while', 'with', 'return', 'continue',
|
||||
'break', 'del', 'pass', 'global', 'assert')
|
||||
if version_info >= (3, 5):
|
||||
ALWAYS_BREAK_TOKENS += ('async', 'nonlocal')
|
||||
pseudo_token_compiled = _compile(PseudoToken)
|
||||
return TokenCollection(
|
||||
pseudo_token_compiled, single_quoted, triple_quoted, endpats,
|
||||
whitespace, fstring_pattern_map, ALWAYS_BREAK_TOKENS
|
||||
whitespace, fstring_pattern_map, set(ALWAYS_BREAK_TOKENS)
|
||||
)
|
||||
|
||||
|
||||
@@ -311,7 +313,7 @@ class FStringNode(object):
|
||||
return not self.is_in_expr() and self.format_spec_count
|
||||
|
||||
|
||||
def _close_fstring_if_necessary(fstring_stack, string, start_pos, additional_prefix):
|
||||
def _close_fstring_if_necessary(fstring_stack, string, line_nr, column, additional_prefix):
|
||||
for fstring_stack_index, node in enumerate(fstring_stack):
|
||||
lstripped_string = string.lstrip()
|
||||
len_lstrip = len(string) - len(lstripped_string)
|
||||
@@ -319,7 +321,7 @@ def _close_fstring_if_necessary(fstring_stack, string, start_pos, additional_pre
|
||||
token = PythonToken(
|
||||
FSTRING_END,
|
||||
node.quote,
|
||||
start_pos,
|
||||
(line_nr, column + len_lstrip),
|
||||
prefix=additional_prefix+string[:len_lstrip],
|
||||
)
|
||||
additional_prefix = ''
|
||||
@@ -381,13 +383,14 @@ def _print_tokens(func):
|
||||
"""
|
||||
def wrapper(*args, **kwargs):
|
||||
for token in func(*args, **kwargs):
|
||||
print(token) # This print is intentional for debugging!
|
||||
yield token
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
# @_print_tokens
|
||||
def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
def tokenize_lines(lines, version_info, start_pos=(1, 0), indents=None, is_first_token=True):
|
||||
"""
|
||||
A heavily modified Python standard library tokenizer.
|
||||
|
||||
@@ -398,17 +401,19 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
def dedent_if_necessary(start):
|
||||
while start < indents[-1]:
|
||||
if start > indents[-2]:
|
||||
yield PythonToken(ERROR_DEDENT, '', (lnum, 0), '')
|
||||
yield PythonToken(ERROR_DEDENT, '', (lnum, start), '')
|
||||
indents[-1] = start
|
||||
break
|
||||
yield PythonToken(DEDENT, '', spos, '')
|
||||
indents.pop()
|
||||
yield PythonToken(DEDENT, '', spos, '')
|
||||
|
||||
pseudo_token, single_quoted, triple_quoted, endpats, whitespace, \
|
||||
fstring_pattern_map, always_break_tokens, = \
|
||||
_get_token_collection(version_info)
|
||||
paren_level = 0 # count parentheses
|
||||
indents = [0]
|
||||
max = 0
|
||||
if indents is None:
|
||||
indents = [0]
|
||||
max_ = 0
|
||||
numchars = '0123456789'
|
||||
contstr = ''
|
||||
contline = None
|
||||
@@ -419,25 +424,24 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
new_line = True
|
||||
prefix = '' # Should never be required, but here for safety
|
||||
additional_prefix = ''
|
||||
first = True
|
||||
lnum = start_pos[0] - 1
|
||||
fstring_stack = []
|
||||
for line in lines: # loop over lines in stream
|
||||
lnum += 1
|
||||
pos = 0
|
||||
max = len(line)
|
||||
if first:
|
||||
max_ = len(line)
|
||||
if is_first_token:
|
||||
if line.startswith(BOM_UTF8_STRING):
|
||||
additional_prefix = BOM_UTF8_STRING
|
||||
line = line[1:]
|
||||
max = len(line)
|
||||
max_ = len(line)
|
||||
|
||||
# Fake that the part before was already parsed.
|
||||
line = '^' * start_pos[1] + line
|
||||
pos = start_pos[1]
|
||||
max += start_pos[1]
|
||||
max_ += start_pos[1]
|
||||
|
||||
first = False
|
||||
is_first_token = False
|
||||
|
||||
if contstr: # continued string
|
||||
endmatch = endprog.match(line)
|
||||
@@ -453,7 +457,7 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
contline = contline + line
|
||||
continue
|
||||
|
||||
while pos < max:
|
||||
while pos < max_:
|
||||
if fstring_stack:
|
||||
tos = fstring_stack[-1]
|
||||
if not tos.is_in_expr():
|
||||
@@ -468,14 +472,15 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
)
|
||||
tos.previous_lines = ''
|
||||
continue
|
||||
if pos == max:
|
||||
if pos == max_:
|
||||
break
|
||||
|
||||
rest = line[pos:]
|
||||
fstring_end_token, additional_prefix, quote_length = _close_fstring_if_necessary(
|
||||
fstring_stack,
|
||||
rest,
|
||||
(lnum, pos),
|
||||
lnum,
|
||||
pos,
|
||||
additional_prefix,
|
||||
)
|
||||
pos += quote_length
|
||||
@@ -496,9 +501,39 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
pseudomatch = pseudo_token.match(string_line, pos)
|
||||
else:
|
||||
pseudomatch = pseudo_token.match(line, pos)
|
||||
|
||||
if pseudomatch:
|
||||
prefix = additional_prefix + pseudomatch.group(1)
|
||||
additional_prefix = ''
|
||||
start, pos = pseudomatch.span(2)
|
||||
spos = (lnum, start)
|
||||
token = pseudomatch.group(2)
|
||||
if token == '':
|
||||
assert prefix
|
||||
additional_prefix = prefix
|
||||
# This means that we have a line with whitespace/comments at
|
||||
# the end, which just results in an endmarker.
|
||||
break
|
||||
initial = token[0]
|
||||
else:
|
||||
match = whitespace.match(line, pos)
|
||||
initial = line[match.end()]
|
||||
start = match.end()
|
||||
spos = (lnum, start)
|
||||
|
||||
if new_line and initial not in '\r\n#' and (initial != '\\' or pseudomatch is None):
|
||||
new_line = False
|
||||
if paren_level == 0 and not fstring_stack:
|
||||
indent_start = start
|
||||
if indent_start > indents[-1]:
|
||||
yield PythonToken(INDENT, '', spos, '')
|
||||
indents.append(indent_start)
|
||||
for t in dedent_if_necessary(indent_start):
|
||||
yield t
|
||||
|
||||
if not pseudomatch: # scan for tokens
|
||||
match = whitespace.match(line, pos)
|
||||
if pos == 0:
|
||||
if new_line and paren_level == 0 and not fstring_stack:
|
||||
for t in dedent_if_necessary(match.end()):
|
||||
yield t
|
||||
pos = match.end()
|
||||
@@ -511,50 +546,18 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
pos += 1
|
||||
continue
|
||||
|
||||
prefix = additional_prefix + pseudomatch.group(1)
|
||||
additional_prefix = ''
|
||||
start, pos = pseudomatch.span(2)
|
||||
spos = (lnum, start)
|
||||
token = pseudomatch.group(2)
|
||||
if token == '':
|
||||
assert prefix
|
||||
additional_prefix = prefix
|
||||
# This means that we have a line with whitespace/comments at
|
||||
# the end, which just results in an endmarker.
|
||||
break
|
||||
initial = token[0]
|
||||
|
||||
if new_line and initial not in '\r\n\\#':
|
||||
new_line = False
|
||||
if paren_level == 0 and not fstring_stack:
|
||||
i = 0
|
||||
indent_start = start
|
||||
while line[i] == '\f':
|
||||
i += 1
|
||||
# TODO don't we need to change spos as well?
|
||||
indent_start -= 1
|
||||
if indent_start > indents[-1]:
|
||||
yield PythonToken(INDENT, '', spos, '')
|
||||
indents.append(indent_start)
|
||||
for t in dedent_if_necessary(indent_start):
|
||||
yield t
|
||||
|
||||
if (initial in numchars or # ordinary number
|
||||
(initial == '.' and token != '.' and token != '...')):
|
||||
if (initial in numchars # ordinary number
|
||||
or (initial == '.' and token != '.' and token != '...')):
|
||||
yield PythonToken(NUMBER, token, spos, prefix)
|
||||
elif pseudomatch.group(3) is not None: # ordinary name
|
||||
if token in always_break_tokens:
|
||||
if token in always_break_tokens and (fstring_stack or paren_level):
|
||||
fstring_stack[:] = []
|
||||
paren_level = 0
|
||||
# We only want to dedent if the token is on a new line.
|
||||
if re.match(r'[ \f\t]*$', line[:start]):
|
||||
while True:
|
||||
indent = indents.pop()
|
||||
if indent > start:
|
||||
yield PythonToken(DEDENT, '', spos, '')
|
||||
else:
|
||||
indents.append(indent)
|
||||
break
|
||||
m = re.match(r'[ \f\t]*$', line[:start])
|
||||
if m is not None:
|
||||
for t in dedent_if_necessary(m.end()):
|
||||
yield t
|
||||
if is_identifier(token):
|
||||
yield PythonToken(NAME, token, spos, prefix)
|
||||
else:
|
||||
@@ -587,7 +590,7 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
token = line[start:pos]
|
||||
yield PythonToken(STRING, token, spos, prefix)
|
||||
else:
|
||||
contstr_start = (lnum, start) # multiple lines
|
||||
contstr_start = spos # multiple lines
|
||||
contstr = line[start:]
|
||||
contline = line
|
||||
break
|
||||
@@ -649,10 +652,22 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
if contstr.endswith('\n') or contstr.endswith('\r'):
|
||||
new_line = True
|
||||
|
||||
end_pos = lnum, max
|
||||
if fstring_stack:
|
||||
tos = fstring_stack[-1]
|
||||
if tos.previous_lines:
|
||||
yield PythonToken(
|
||||
FSTRING_STRING, tos.previous_lines,
|
||||
tos.last_string_start_pos,
|
||||
# Never has a prefix because it can start anywhere and
|
||||
# include whitespace.
|
||||
prefix=''
|
||||
)
|
||||
|
||||
end_pos = lnum, max_
|
||||
# As the last position we just take the maximally possible position. We
|
||||
# remove -1 for the last new line.
|
||||
for indent in indents[1:]:
|
||||
indents.pop()
|
||||
yield PythonToken(DEDENT, '', end_pos, '')
|
||||
yield PythonToken(ENDMARKER, '', end_pos, additional_prefix)
|
||||
|
||||
|
||||
@@ -50,6 +50,11 @@ def find_python_files_in_tree(file_path):
|
||||
yield file_path
|
||||
return
|
||||
for root, dirnames, filenames in os.walk(file_path):
|
||||
if 'chardet' in root:
|
||||
# Stuff like chardet/langcyrillicmodel.py is just very slow to
|
||||
# parse and machine generated, so ignore those.
|
||||
continue
|
||||
|
||||
for name in filenames:
|
||||
if name.endswith('.py'):
|
||||
yield os.path.join(root, name)
|
||||
@@ -102,9 +107,17 @@ class LineCopy:
|
||||
|
||||
class FileModification:
|
||||
@classmethod
|
||||
def generate(cls, code_lines, change_count):
|
||||
def generate(cls, code_lines, change_count, previous_file_modification=None):
|
||||
if previous_file_modification is not None and random.random() > 0.5:
|
||||
# We want to keep the previous modifications in some cases to make
|
||||
# more complex parser issues visible.
|
||||
code_lines = previous_file_modification.apply(code_lines)
|
||||
added_modifications = previous_file_modification.modification_list
|
||||
else:
|
||||
added_modifications = []
|
||||
return cls(
|
||||
list(cls._generate_line_modifications(code_lines, change_count)),
|
||||
added_modifications
|
||||
+ list(cls._generate_line_modifications(code_lines, change_count)),
|
||||
# work with changed trees more than with normal ones.
|
||||
check_original=random.random() > 0.8,
|
||||
)
|
||||
@@ -158,18 +171,18 @@ class FileModification:
|
||||
yield l
|
||||
|
||||
def __init__(self, modification_list, check_original):
|
||||
self._modification_list = modification_list
|
||||
self.modification_list = modification_list
|
||||
self._check_original = check_original
|
||||
|
||||
def _apply(self, code_lines):
|
||||
def apply(self, code_lines):
|
||||
changed_lines = list(code_lines)
|
||||
for modification in self._modification_list:
|
||||
for modification in self.modification_list:
|
||||
modification.apply(changed_lines)
|
||||
return changed_lines
|
||||
|
||||
def run(self, grammar, code_lines, print_code):
|
||||
code = ''.join(code_lines)
|
||||
modified_lines = self._apply(code_lines)
|
||||
modified_lines = self.apply(code_lines)
|
||||
modified_code = ''.join(modified_lines)
|
||||
|
||||
if print_code:
|
||||
@@ -197,7 +210,7 @@ class FileModification:
|
||||
class FileTests:
|
||||
def __init__(self, file_path, test_count, change_count):
|
||||
self._path = file_path
|
||||
with open(file_path) as f:
|
||||
with open(file_path, errors='replace') as f:
|
||||
code = f.read()
|
||||
self._code_lines = split_lines(code, keepends=True)
|
||||
self._test_count = test_count
|
||||
@@ -228,8 +241,12 @@ class FileTests:
|
||||
|
||||
def run(self, grammar, debugger):
|
||||
def iterate():
|
||||
fm = None
|
||||
for _ in range(self._test_count):
|
||||
fm = FileModification.generate(self._code_lines, self._change_count)
|
||||
fm = FileModification.generate(
|
||||
self._code_lines, self._change_count,
|
||||
previous_file_modification=fm
|
||||
)
|
||||
self._file_modifications.append(fm)
|
||||
yield fm
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import pytest
|
||||
from parso.utils import split_lines
|
||||
from parso import cache
|
||||
from parso import load_grammar
|
||||
from parso.python.diff import DiffParser, _assert_valid_graph
|
||||
from parso.python.diff import DiffParser, _assert_valid_graph, _assert_nodes_are_equal
|
||||
from parso import parse
|
||||
|
||||
ANY = object()
|
||||
@@ -69,6 +69,9 @@ class Differ(object):
|
||||
|
||||
_assert_valid_graph(new_module)
|
||||
|
||||
without_diff_parser_module = parse(code)
|
||||
_assert_nodes_are_equal(new_module, without_diff_parser_module)
|
||||
|
||||
error_node = _check_error_leaves_nodes(new_module)
|
||||
assert expect_error_leaves == (error_node is not None), error_node
|
||||
if parsers is not ANY:
|
||||
@@ -88,15 +91,15 @@ def test_change_and_undo(differ):
|
||||
# Parse the function and a.
|
||||
differ.initialize(func_before + 'a')
|
||||
# Parse just b.
|
||||
differ.parse(func_before + 'b', copies=1, parsers=1)
|
||||
differ.parse(func_before + 'b', copies=1, parsers=2)
|
||||
# b has changed to a again, so parse that.
|
||||
differ.parse(func_before + 'a', copies=1, parsers=1)
|
||||
differ.parse(func_before + 'a', copies=1, parsers=2)
|
||||
# Same as before parsers should not be used. Just a simple copy.
|
||||
differ.parse(func_before + 'a', copies=1)
|
||||
|
||||
# Now that we have a newline at the end, everything is easier in Python
|
||||
# syntax, we can parse once and then get a copy.
|
||||
differ.parse(func_before + 'a\n', copies=1, parsers=1)
|
||||
differ.parse(func_before + 'a\n', copies=1, parsers=2)
|
||||
differ.parse(func_before + 'a\n', copies=1)
|
||||
|
||||
# Getting rid of an old parser: Still no parsers used.
|
||||
@@ -135,7 +138,7 @@ def test_if_simple(differ):
|
||||
differ.initialize(src + 'a')
|
||||
differ.parse(src + else_ + "a", copies=0, parsers=1)
|
||||
|
||||
differ.parse(else_, parsers=1, copies=1, expect_error_leaves=True)
|
||||
differ.parse(else_, parsers=2, expect_error_leaves=True)
|
||||
differ.parse(src + else_, parsers=1)
|
||||
|
||||
|
||||
@@ -152,7 +155,7 @@ def test_func_with_for_and_comment(differ):
|
||||
# COMMENT
|
||||
a""")
|
||||
differ.initialize(src)
|
||||
differ.parse('a\n' + src, copies=1, parsers=2)
|
||||
differ.parse('a\n' + src, copies=1, parsers=3)
|
||||
|
||||
|
||||
def test_one_statement_func(differ):
|
||||
@@ -236,7 +239,7 @@ def test_backslash(differ):
|
||||
def y():
|
||||
pass
|
||||
""")
|
||||
differ.parse(src, parsers=2)
|
||||
differ.parse(src, parsers=1)
|
||||
|
||||
src = dedent(r"""
|
||||
def first():
|
||||
@@ -247,7 +250,7 @@ def test_backslash(differ):
|
||||
def second():
|
||||
pass
|
||||
""")
|
||||
differ.parse(src, parsers=1)
|
||||
differ.parse(src, parsers=2)
|
||||
|
||||
|
||||
def test_full_copy(differ):
|
||||
@@ -261,10 +264,10 @@ def test_wrong_whitespace(differ):
|
||||
hello
|
||||
'''
|
||||
differ.initialize(code)
|
||||
differ.parse(code + 'bar\n ', parsers=3)
|
||||
differ.parse(code + 'bar\n ', parsers=2, expect_error_leaves=True)
|
||||
|
||||
code += """abc(\npass\n """
|
||||
differ.parse(code, parsers=2, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code, parsers=2, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_issues_with_error_leaves(differ):
|
||||
@@ -279,7 +282,7 @@ def test_issues_with_error_leaves(differ):
|
||||
str
|
||||
''')
|
||||
differ.initialize(code)
|
||||
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code2, parsers=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_unfinished_nodes(differ):
|
||||
@@ -299,7 +302,7 @@ def test_unfinished_nodes(differ):
|
||||
a(1)
|
||||
''')
|
||||
differ.initialize(code)
|
||||
differ.parse(code2, parsers=1, copies=2)
|
||||
differ.parse(code2, parsers=2, copies=2)
|
||||
|
||||
|
||||
def test_nested_if_and_scopes(differ):
|
||||
@@ -365,7 +368,7 @@ def test_totally_wrong_whitespace(differ):
|
||||
'''
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=4, copies=0, expect_error_leaves=True)
|
||||
differ.parse(code2, parsers=2, copies=0, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_node_insertion(differ):
|
||||
@@ -439,7 +442,7 @@ def test_in_class_movements(differ):
|
||||
""")
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=2, copies=1)
|
||||
differ.parse(code2, parsers=1)
|
||||
|
||||
|
||||
def test_in_parentheses_newlines(differ):
|
||||
@@ -484,7 +487,7 @@ def test_indentation_issue(differ):
|
||||
""")
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1)
|
||||
differ.parse(code2, parsers=2)
|
||||
|
||||
|
||||
def test_endmarker_newline(differ):
|
||||
@@ -585,7 +588,7 @@ def test_if_removal_and_reappearence(differ):
|
||||
la
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=4, expect_error_leaves=True)
|
||||
differ.parse(code2, parsers=3, copies=2, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=1, copies=1)
|
||||
differ.parse(code3, parsers=1, copies=1)
|
||||
|
||||
@@ -618,8 +621,8 @@ def test_differing_docstrings(differ):
|
||||
''')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=3, copies=1)
|
||||
differ.parse(code1, parsers=3, copies=1)
|
||||
differ.parse(code2, parsers=2, copies=1)
|
||||
differ.parse(code1, parsers=2, copies=1)
|
||||
|
||||
|
||||
def test_one_call_in_function_change(differ):
|
||||
@@ -649,7 +652,7 @@ def test_one_call_in_function_change(differ):
|
||||
''')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=2, copies=1)
|
||||
|
||||
|
||||
@@ -711,7 +714,7 @@ def test_docstring_removal(differ):
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=2)
|
||||
differ.parse(code1, parsers=2, copies=1)
|
||||
differ.parse(code1, parsers=3, copies=1)
|
||||
|
||||
|
||||
def test_paren_in_strange_position(differ):
|
||||
@@ -783,7 +786,7 @@ def test_parentheses_before_method(differ):
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=1, copies=1)
|
||||
differ.parse(code1, parsers=2, copies=1)
|
||||
|
||||
|
||||
def test_indentation_issues(differ):
|
||||
@@ -824,10 +827,10 @@ def test_indentation_issues(differ):
|
||||
''')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=2, copies=2, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=2)
|
||||
differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=1, parsers=2)
|
||||
differ.parse(code3, parsers=2, copies=1)
|
||||
differ.parse(code1, parsers=1, copies=2)
|
||||
differ.parse(code1, parsers=2, copies=1)
|
||||
|
||||
|
||||
def test_error_dedent_issues(differ):
|
||||
@@ -860,7 +863,7 @@ def test_error_dedent_issues(differ):
|
||||
''')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=6, copies=2, expect_error_leaves=True)
|
||||
differ.parse(code2, parsers=3, copies=0, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=1, copies=0)
|
||||
|
||||
|
||||
@@ -892,8 +895,8 @@ Some'random text: yeah
|
||||
''')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=1, copies=1)
|
||||
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=2, copies=1)
|
||||
|
||||
|
||||
def test_many_nested_ifs(differ):
|
||||
@@ -946,7 +949,7 @@ def test_with_and_funcdef_in_call(differ, prefix):
|
||||
code2 = insert_line_into_code(code1, 3, 'def y(self, args):\n')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=3, expect_error_leaves=True)
|
||||
differ.parse(code2, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=1)
|
||||
|
||||
|
||||
@@ -961,14 +964,10 @@ def test_wrong_backslash(differ):
|
||||
code2 = insert_line_into_code(code1, 3, '\\.whl$\n')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=2, copies=2, expect_error_leaves=True)
|
||||
differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=1, copies=1)
|
||||
|
||||
|
||||
def test_comment_change(differ):
|
||||
differ.initialize('')
|
||||
|
||||
|
||||
def test_random_unicode_characters(differ):
|
||||
"""
|
||||
Those issues were all found with the fuzzer.
|
||||
@@ -984,9 +983,9 @@ def test_random_unicode_characters(differ):
|
||||
differ.parse(s, parsers=1, expect_error_leaves=True)
|
||||
differ.parse('')
|
||||
differ.parse(s + '\n', parsers=1, expect_error_leaves=True)
|
||||
differ.parse(u' result = (\r\f\x17\t\x11res)', parsers=2, expect_error_leaves=True)
|
||||
differ.parse(u' result = (\r\f\x17\t\x11res)', parsers=1, expect_error_leaves=True)
|
||||
differ.parse('')
|
||||
differ.parse(' a( # xx\ndef', parsers=2, expect_error_leaves=True)
|
||||
differ.parse(' a( # xx\ndef', parsers=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_dedent_end_positions(differ):
|
||||
@@ -997,7 +996,7 @@ def test_dedent_end_positions(differ):
|
||||
c = {
|
||||
5}
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
code2 = dedent(u'''\
|
||||
if 1:
|
||||
if ⌟ഒᜈྡྷṭb:
|
||||
2
|
||||
@@ -1040,7 +1039,7 @@ def test_random_character_insertion(differ):
|
||||
# 4
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, copies=1, parsers=3, expect_error_leaves=True)
|
||||
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=1, parsers=1)
|
||||
|
||||
|
||||
@@ -1101,8 +1100,8 @@ def test_all_sorts_of_indentation(differ):
|
||||
end
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, copies=1, parsers=4, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=1, parsers=3)
|
||||
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
|
||||
|
||||
code3 = dedent('''\
|
||||
if 1:
|
||||
@@ -1112,7 +1111,7 @@ def test_all_sorts_of_indentation(differ):
|
||||
d
|
||||
\x00
|
||||
''')
|
||||
differ.parse(code3, parsers=2, expect_error_leaves=True)
|
||||
differ.parse(code3, parsers=1, expect_error_leaves=True)
|
||||
differ.parse('')
|
||||
|
||||
|
||||
@@ -1129,7 +1128,7 @@ def test_dont_copy_dedents_in_beginning(differ):
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=2)
|
||||
differ.parse(code1, parsers=1, copies=1)
|
||||
|
||||
|
||||
def test_dont_copy_error_leaves(differ):
|
||||
@@ -1149,7 +1148,7 @@ def test_dont_copy_error_leaves(differ):
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=2)
|
||||
differ.parse(code1, parsers=1)
|
||||
|
||||
|
||||
def test_error_dedent_in_between(differ):
|
||||
@@ -1173,7 +1172,7 @@ def test_error_dedent_in_between(differ):
|
||||
z
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=1, parsers=2)
|
||||
|
||||
|
||||
@@ -1199,8 +1198,8 @@ def test_some_other_indentation_issues(differ):
|
||||
a
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, copies=2, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=2, parsers=2)
|
||||
differ.parse(code2, copies=0, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=1, parsers=1)
|
||||
|
||||
|
||||
def test_open_bracket_case1(differ):
|
||||
@@ -1240,8 +1239,8 @@ def test_open_bracket_case2(differ):
|
||||
d
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=2, parsers=0, expect_error_leaves=True)
|
||||
differ.parse(code2, copies=0, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=0, parsers=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_some_weird_removals(differ):
|
||||
@@ -1266,7 +1265,7 @@ def test_some_weird_removals(differ):
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code3, copies=1, parsers=2, expect_error_leaves=True)
|
||||
differ.parse(code3, copies=1, parsers=3, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=1)
|
||||
|
||||
|
||||
@@ -1285,3 +1284,467 @@ def test_async_copy(differ):
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, copies=1, parsers=1)
|
||||
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_parent_on_decorator(differ):
|
||||
code1 = dedent('''\
|
||||
class AClass:
|
||||
@decorator()
|
||||
def b_test(self):
|
||||
print("Hello")
|
||||
print("world")
|
||||
|
||||
def a_test(self):
|
||||
pass''')
|
||||
code2 = dedent('''\
|
||||
class AClass:
|
||||
@decorator()
|
||||
def b_test(self):
|
||||
print("Hello")
|
||||
print("world")
|
||||
|
||||
def a_test(self):
|
||||
pass''')
|
||||
differ.initialize(code1)
|
||||
module_node = differ.parse(code2, parsers=1)
|
||||
cls = module_node.children[0]
|
||||
cls_suite = cls.children[-1]
|
||||
assert len(cls_suite.children) == 3
|
||||
|
||||
|
||||
def test_wrong_indent_in_def(differ):
|
||||
code1 = dedent('''\
|
||||
def x():
|
||||
a
|
||||
b
|
||||
''')
|
||||
|
||||
code2 = dedent('''\
|
||||
def x():
|
||||
//
|
||||
b
|
||||
c
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=1)
|
||||
|
||||
|
||||
def test_backslash_issue(differ):
|
||||
code1 = dedent('''
|
||||
pre = (
|
||||
'')
|
||||
after = 'instead'
|
||||
''')
|
||||
code2 = dedent('''
|
||||
pre = (
|
||||
'')
|
||||
\\if
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=1, copies=1)
|
||||
|
||||
|
||||
def test_paren_with_indentation(differ):
|
||||
code1 = dedent('''
|
||||
class C:
|
||||
def f(self, fullname, path=None):
|
||||
x
|
||||
|
||||
def load_module(self, fullname):
|
||||
a
|
||||
for prefix in self.search_path:
|
||||
try:
|
||||
b
|
||||
except ImportError:
|
||||
c
|
||||
else:
|
||||
raise
|
||||
def x():
|
||||
pass
|
||||
''')
|
||||
code2 = dedent('''
|
||||
class C:
|
||||
def f(self, fullname, path=None):
|
||||
x
|
||||
|
||||
(
|
||||
a
|
||||
for prefix in self.search_path:
|
||||
try:
|
||||
b
|
||||
except ImportError:
|
||||
c
|
||||
else:
|
||||
raise
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=3, copies=1)
|
||||
|
||||
|
||||
def test_error_dedent_in_function(differ):
|
||||
code1 = dedent('''\
|
||||
def x():
|
||||
a
|
||||
b
|
||||
c
|
||||
d
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
def x():
|
||||
a
|
||||
b
|
||||
c
|
||||
d
|
||||
e
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_with_formfeed(differ):
|
||||
code1 = dedent('''\
|
||||
@bla
|
||||
async def foo():
|
||||
1
|
||||
yield from []
|
||||
return
|
||||
return ''
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
@bla
|
||||
async def foo():
|
||||
1
|
||||
\x0cimport
|
||||
return
|
||||
return ''
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_repeating_invalid_indent(differ):
|
||||
code1 = dedent('''\
|
||||
def foo():
|
||||
return
|
||||
|
||||
@bla
|
||||
a
|
||||
def foo():
|
||||
a
|
||||
b
|
||||
c
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
def foo():
|
||||
return
|
||||
|
||||
@bla
|
||||
a
|
||||
b
|
||||
c
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_another_random_indent(differ):
|
||||
code1 = dedent('''\
|
||||
def foo():
|
||||
a
|
||||
b
|
||||
c
|
||||
return
|
||||
def foo():
|
||||
d
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
def foo():
|
||||
a
|
||||
c
|
||||
return
|
||||
def foo():
|
||||
d
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=3)
|
||||
|
||||
|
||||
def test_invalid_function(differ):
|
||||
code1 = dedent('''\
|
||||
a
|
||||
def foo():
|
||||
def foo():
|
||||
b
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
a
|
||||
def foo():
|
||||
def foo():
|
||||
b
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_async_func2(differ):
|
||||
code1 = dedent('''\
|
||||
async def foo():
|
||||
return ''
|
||||
@bla
|
||||
async def foo():
|
||||
x
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
async def foo():
|
||||
return ''
|
||||
|
||||
{
|
||||
@bla
|
||||
async def foo():
|
||||
x
|
||||
y
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_weird_ending(differ):
|
||||
code1 = dedent('''\
|
||||
def foo():
|
||||
a
|
||||
return
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
def foo():
|
||||
a
|
||||
nonlocal xF"""
|
||||
y"""''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_nested_class(differ):
|
||||
code1 = dedent('''\
|
||||
def c():
|
||||
a = 3
|
||||
class X:
|
||||
b
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
def c():
|
||||
a = 3
|
||||
class X:
|
||||
elif
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_class_with_paren_breaker(differ):
|
||||
code1 = dedent('''\
|
||||
class Grammar:
|
||||
x
|
||||
def parse():
|
||||
y
|
||||
parser(
|
||||
)
|
||||
z
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
class Grammar:
|
||||
x
|
||||
def parse():
|
||||
y
|
||||
parser(
|
||||
finally ;
|
||||
)
|
||||
z
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_byte_order_mark(differ):
|
||||
code2 = dedent('''\
|
||||
|
||||
x
|
||||
\ufeff
|
||||
else :
|
||||
''')
|
||||
differ.initialize('\n')
|
||||
differ.parse(code2, parsers=2, expect_error_leaves=True)
|
||||
|
||||
code3 = dedent('''\
|
||||
\ufeff
|
||||
if:
|
||||
|
||||
x
|
||||
''')
|
||||
differ.initialize('\n')
|
||||
differ.parse(code3, parsers=2, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_byte_order_mark2(differ):
|
||||
code = u'\ufeff# foo'
|
||||
differ.initialize(code)
|
||||
differ.parse(code + 'x', parsers=ANY)
|
||||
|
||||
|
||||
def test_byte_order_mark3(differ):
|
||||
code1 = u"\ufeff#\ny\n"
|
||||
code2 = u'x\n\ufeff#\n\ufeff#\ny\n'
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, expect_error_leaves=True, parsers=ANY, copies=ANY)
|
||||
differ.parse(code1, parsers=1)
|
||||
|
||||
|
||||
def test_backslash_insertion(differ):
|
||||
code1 = dedent('''
|
||||
def f():
|
||||
x
|
||||
def g():
|
||||
base = "" \\
|
||||
""
|
||||
return
|
||||
''')
|
||||
code2 = dedent('''
|
||||
def f():
|
||||
x
|
||||
def g():
|
||||
base = "" \\
|
||||
def h():
|
||||
""
|
||||
return
|
||||
''')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=2, copies=1)
|
||||
|
||||
|
||||
def test_fstring_with_error_leaf(differ):
|
||||
code1 = dedent("""\
|
||||
def f():
|
||||
x
|
||||
def g():
|
||||
y
|
||||
""")
|
||||
code2 = dedent("""\
|
||||
def f():
|
||||
x
|
||||
F'''
|
||||
def g():
|
||||
y
|
||||
{a
|
||||
\x01
|
||||
""")
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_yet_another_backslash(differ):
|
||||
code1 = dedent('''\
|
||||
def f():
|
||||
x
|
||||
def g():
|
||||
y
|
||||
base = "" \\
|
||||
"" % to
|
||||
return
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
def f():
|
||||
x
|
||||
def g():
|
||||
y
|
||||
base = "" \\
|
||||
\x0f
|
||||
return
|
||||
''')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True)
|
||||
differ.parse(code1, parsers=ANY, copies=ANY)
|
||||
|
||||
|
||||
def test_backslash_before_def(differ):
|
||||
code1 = dedent('''\
|
||||
def f():
|
||||
x
|
||||
|
||||
def g():
|
||||
y
|
||||
z
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
def f():
|
||||
x
|
||||
>\\
|
||||
def g():
|
||||
y
|
||||
x
|
||||
z
|
||||
''')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_backslash_with_imports(differ):
|
||||
code1 = dedent('''\
|
||||
from x import y, \\
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
from x import y, \\
|
||||
z
|
||||
''')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1)
|
||||
differ.parse(code1, parsers=1)
|
||||
|
||||
|
||||
def test_one_line_function_error_recovery(differ):
|
||||
code1 = dedent('''\
|
||||
class X:
|
||||
x
|
||||
def y(): word """
|
||||
# a
|
||||
# b
|
||||
c(self)
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
class X:
|
||||
x
|
||||
def y(): word """
|
||||
# a
|
||||
# b
|
||||
c(\x01+self)
|
||||
''')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_one_line_property_error_recovery(differ):
|
||||
code1 = dedent('''\
|
||||
class X:
|
||||
x
|
||||
@property
|
||||
def encoding(self): True -
|
||||
return 1
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
class X:
|
||||
x
|
||||
@property
|
||||
def encoding(self): True -
|
||||
return 1
|
||||
''')
|
||||
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from textwrap import dedent
|
||||
|
||||
from parso import parse, load_grammar
|
||||
|
||||
|
||||
@@ -83,3 +85,65 @@ def test_invalid_token_in_fstr():
|
||||
assert error1.type == 'error_leaf'
|
||||
assert error2.value == '"'
|
||||
assert error2.type == 'error_leaf'
|
||||
|
||||
|
||||
def test_dedent_issues1():
|
||||
code = dedent('''\
|
||||
class C:
|
||||
@property
|
||||
f
|
||||
g
|
||||
end
|
||||
''')
|
||||
module = load_grammar(version='3.8').parse(code)
|
||||
klass, endmarker = module.children
|
||||
suite = klass.children[-1]
|
||||
assert suite.children[2].type == 'error_leaf'
|
||||
assert suite.children[3].get_code(include_prefix=False) == 'f\n'
|
||||
assert suite.children[5].get_code(include_prefix=False) == 'g\n'
|
||||
assert suite.type == 'suite'
|
||||
|
||||
|
||||
def test_dedent_issues2():
|
||||
code = dedent('''\
|
||||
class C:
|
||||
@property
|
||||
if 1:
|
||||
g
|
||||
else:
|
||||
h
|
||||
end
|
||||
''')
|
||||
module = load_grammar(version='3.8').parse(code)
|
||||
klass, endmarker = module.children
|
||||
suite = klass.children[-1]
|
||||
assert suite.children[2].type == 'error_leaf'
|
||||
if_ = suite.children[3]
|
||||
assert if_.children[0] == 'if'
|
||||
assert if_.children[3].type == 'suite'
|
||||
assert if_.children[3].get_code() == '\n g\n'
|
||||
assert if_.children[4] == 'else'
|
||||
assert if_.children[6].type == 'suite'
|
||||
assert if_.children[6].get_code() == '\n h\n'
|
||||
|
||||
assert suite.children[4].get_code(include_prefix=False) == 'end\n'
|
||||
assert suite.type == 'suite'
|
||||
|
||||
|
||||
def test_dedent_issues3():
|
||||
code = dedent('''\
|
||||
class C:
|
||||
f
|
||||
g
|
||||
''')
|
||||
module = load_grammar(version='3.8').parse(code)
|
||||
klass, endmarker = module.children
|
||||
suite = klass.children[-1]
|
||||
assert len(suite.children) == 4
|
||||
assert suite.children[1].get_code() == ' f\n'
|
||||
assert suite.children[1].type == 'simple_stmt'
|
||||
assert suite.children[2].get_code() == ''
|
||||
assert suite.children[2].type == 'error_leaf'
|
||||
assert suite.children[2].token_type == 'ERROR_DEDENT'
|
||||
assert suite.children[3].get_code() == ' g\n'
|
||||
assert suite.children[3].type == 'simple_stmt'
|
||||
|
||||
@@ -118,3 +118,16 @@ def test_carriage_return_at_end(code, types):
|
||||
assert tree.get_code() == code
|
||||
assert [c.type for c in tree.children] == types
|
||||
assert tree.end_pos == (len(code) + 1, 0)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('code', [
|
||||
' ',
|
||||
' F"""',
|
||||
' F"""\n',
|
||||
' F""" \n',
|
||||
' F""" \n3',
|
||||
' f"""\n"""',
|
||||
' f"""\n"""\n',
|
||||
])
|
||||
def test_full_code_round_trip(code):
|
||||
assert parse(code).get_code() == code
|
||||
|
||||
@@ -29,13 +29,17 @@ def _invalid_syntax(code, version=None, **kwargs):
|
||||
print(module.children)
|
||||
|
||||
|
||||
def test_formfeed(each_py2_version):
|
||||
s = u"""print 1\n\x0Cprint 2\n"""
|
||||
t = _parse(s, each_py2_version)
|
||||
assert t.children[0].children[0].type == 'print_stmt'
|
||||
assert t.children[1].children[0].type == 'print_stmt'
|
||||
s = u"""1\n\x0C\x0C2\n"""
|
||||
t = _parse(s, each_py2_version)
|
||||
def test_formfeed(each_version):
|
||||
s = u"foo\n\x0c\nfoo\n"
|
||||
t = _parse(s, each_version)
|
||||
assert t.children[0].children[0].type == 'name'
|
||||
assert t.children[1].children[0].type == 'name'
|
||||
s = u"1\n\x0c\x0c\n2\n"
|
||||
t = _parse(s, each_version)
|
||||
|
||||
with pytest.raises(ParserSyntaxError):
|
||||
s = u"\n\x0c2\n"
|
||||
_parse(s, each_version)
|
||||
|
||||
|
||||
def test_matrix_multiplication_operator(works_ge_py35):
|
||||
|
||||
@@ -4,7 +4,6 @@ import sys
|
||||
from textwrap import dedent
|
||||
|
||||
import pytest
|
||||
import sys
|
||||
|
||||
from parso.utils import split_lines, parse_version_string
|
||||
from parso.python.token import PythonTokenTypes
|
||||
@@ -239,7 +238,7 @@ xfail_py2 = dict(marks=[pytest.mark.xfail(sys.version_info[0] == 2, reason='Pyth
|
||||
(' foo', [INDENT, NAME, DEDENT]),
|
||||
(' foo\n bar', [INDENT, NAME, NEWLINE, ERROR_DEDENT, NAME, DEDENT]),
|
||||
(' foo\n bar \n baz', [INDENT, NAME, NEWLINE, ERROR_DEDENT, NAME,
|
||||
NEWLINE, ERROR_DEDENT, NAME, DEDENT]),
|
||||
NEWLINE, NAME, DEDENT]),
|
||||
(' foo\nbar', [INDENT, NAME, NEWLINE, DEDENT, NAME]),
|
||||
|
||||
# Name stuff
|
||||
@@ -250,6 +249,21 @@ xfail_py2 = dict(marks=[pytest.mark.xfail(sys.version_info[0] == 2, reason='Pyth
|
||||
pytest.param(u'²', [ERRORTOKEN], **xfail_py2),
|
||||
pytest.param(u'ä²ö', [NAME, ERRORTOKEN, NAME], **xfail_py2),
|
||||
pytest.param(u'ää²¹öö', [NAME, ERRORTOKEN, NAME], **xfail_py2),
|
||||
(' \x00a', [INDENT, ERRORTOKEN, NAME, DEDENT]),
|
||||
(dedent('''\
|
||||
class BaseCache:
|
||||
a
|
||||
def
|
||||
b
|
||||
def
|
||||
c
|
||||
'''), [NAME, NAME, OP, NEWLINE, INDENT, NAME, NEWLINE,
|
||||
ERROR_DEDENT, NAME, NEWLINE, INDENT, NAME, NEWLINE, DEDENT,
|
||||
NAME, NEWLINE, INDENT, NAME, NEWLINE, DEDENT, DEDENT]),
|
||||
(' )\n foo', [INDENT, OP, NEWLINE, ERROR_DEDENT, NAME, DEDENT]),
|
||||
('a\n b\n )\n c', [NAME, NEWLINE, INDENT, NAME, NEWLINE, INDENT, OP,
|
||||
NEWLINE, DEDENT, NAME, DEDENT]),
|
||||
(' 1 \\\ndef', [INDENT, NUMBER, NAME, DEDENT]),
|
||||
]
|
||||
)
|
||||
def test_token_types(code, types):
|
||||
@@ -258,7 +272,7 @@ def test_token_types(code, types):
|
||||
|
||||
|
||||
def test_error_string():
|
||||
t1, newline, endmarker = _get_token_list(' "\n')
|
||||
indent, t1, newline, token, endmarker = _get_token_list(' "\n')
|
||||
assert t1.type == ERRORTOKEN
|
||||
assert t1.prefix == ' '
|
||||
assert t1.string == '"'
|
||||
@@ -319,16 +333,18 @@ def test_brackets_no_indentation():
|
||||
|
||||
|
||||
def test_form_feed():
|
||||
error_token, endmarker = _get_token_list(dedent('''\
|
||||
indent, error_token, dedent_, endmarker = _get_token_list(dedent('''\
|
||||
\f"""'''))
|
||||
assert error_token.prefix == '\f'
|
||||
assert error_token.string == '"""'
|
||||
assert endmarker.prefix == ''
|
||||
assert indent.type == INDENT
|
||||
assert dedent_.type == DEDENT
|
||||
|
||||
|
||||
def test_carriage_return():
|
||||
lst = _get_token_list(' =\\\rclass')
|
||||
assert [t.type for t in lst] == [INDENT, OP, DEDENT, NAME, ENDMARKER]
|
||||
assert [t.type for t in lst] == [INDENT, OP, NAME, DEDENT, ENDMARKER]
|
||||
|
||||
|
||||
def test_backslash():
|
||||
@@ -339,6 +355,7 @@ def test_backslash():
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('code', 'types'), [
|
||||
# f-strings
|
||||
('f"', [FSTRING_START]),
|
||||
('f""', [FSTRING_START, FSTRING_END]),
|
||||
('f" {}"', [FSTRING_START, FSTRING_STRING, OP, OP, FSTRING_END]),
|
||||
@@ -394,7 +411,7 @@ def test_backslash():
|
||||
]),
|
||||
]
|
||||
)
|
||||
def test_fstring(code, types, version_ge_py36):
|
||||
def test_fstring_token_types(code, types, version_ge_py36):
|
||||
actual_types = [t.type for t in _get_token_list(code, version_ge_py36)]
|
||||
assert types + [ENDMARKER] == actual_types
|
||||
|
||||
@@ -414,3 +431,13 @@ def test_fstring(code, types, version_ge_py36):
|
||||
def test_fstring_assignment_expression(code, types, version_ge_py38):
|
||||
actual_types = [t.type for t in _get_token_list(code, version_ge_py38)]
|
||||
assert types + [ENDMARKER] == actual_types
|
||||
|
||||
|
||||
def test_fstring_end_error_pos(version_ge_py38):
|
||||
f_start, f_string, bracket, f_end, endmarker = \
|
||||
_get_token_list('f" { "', version_ge_py38)
|
||||
assert f_start.start_pos == (1, 0)
|
||||
assert f_string.start_pos == (1, 2)
|
||||
assert bracket.start_pos == (1, 3)
|
||||
assert f_end.start_pos == (1, 5)
|
||||
assert endmarker.start_pos == (1, 6)
|
||||
|
||||
Reference in New Issue
Block a user