mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-07 05:14:29 +08:00
Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
acccb4f28d | ||
|
|
3f6fc8a5ad | ||
|
|
f1ee7614c9 | ||
|
|
58850f8bfa | ||
|
|
d38a60278e | ||
|
|
6c65aea47d | ||
|
|
0d37ff865c | ||
|
|
076e296497 |
@@ -3,6 +3,18 @@
|
||||
Changelog
|
||||
---------
|
||||
|
||||
0.3.4 (2018-02-13)
|
||||
+++++++++++++++++++
|
||||
|
||||
- Fix an f-string tokenizer error
|
||||
|
||||
0.3.3 (2018-02-06)
|
||||
+++++++++++++++++++
|
||||
|
||||
- Fix async errors in the diff parser
|
||||
- A fix in iter_errors
|
||||
- This is a very small bugfix release
|
||||
|
||||
0.3.2 (2018-01-24)
|
||||
+++++++++++++++++++
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ from parso.grammar import Grammar, load_grammar
|
||||
from parso.utils import split_lines, python_bytes_to_unicode
|
||||
|
||||
|
||||
__version__ = '0.3.2'
|
||||
__version__ = '0.3.4'
|
||||
|
||||
|
||||
def parse(code=None, **kwargs):
|
||||
|
||||
@@ -682,6 +682,8 @@ class _NodesTree(object):
|
||||
last = new_nodes[-1]
|
||||
if last.type == 'decorated':
|
||||
last = last.children[-1]
|
||||
if last.type in ('async_funcdef', 'async_stmt'):
|
||||
last = last.children[-1]
|
||||
last_line_offset_leaf = last.children[-2].get_last_leaf()
|
||||
assert last_line_offset_leaf == ':'
|
||||
else:
|
||||
|
||||
@@ -570,11 +570,14 @@ class _BytesAndStringMix(SyntaxRule):
|
||||
message = "cannot mix bytes and nonbytes literals"
|
||||
|
||||
def _is_bytes_literal(self, string):
|
||||
if string.type == 'fstring':
|
||||
return False
|
||||
return 'b' in string.string_prefix.lower()
|
||||
|
||||
def is_issue(self, node):
|
||||
first = node.children[0]
|
||||
if first.type == 'string' and self._normalizer.version >= (3, 0):
|
||||
# In Python 2 it's allowed to mix bytes and unicode.
|
||||
if self._normalizer.version >= (3, 0):
|
||||
first_is_bytes = self._is_bytes_literal(first)
|
||||
for string in node.children[1:]:
|
||||
if first_is_bytes != self._is_bytes_literal(string):
|
||||
|
||||
@@ -419,8 +419,6 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
tos = fstring_stack[-1]
|
||||
if not tos.is_in_expr():
|
||||
string, pos = _find_fstring_string(endpats, fstring_stack, line, lnum, pos)
|
||||
if pos == max:
|
||||
break
|
||||
if string:
|
||||
yield PythonToken(
|
||||
FSTRING_STRING, string,
|
||||
@@ -431,6 +429,8 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
)
|
||||
tos.previous_lines = ''
|
||||
continue
|
||||
if pos == max:
|
||||
break
|
||||
|
||||
rest = line[pos:]
|
||||
fstring_end_token, additional_prefix, quote_length = _close_fstring_if_necessary(
|
||||
|
||||
@@ -969,7 +969,7 @@ class ImportName(Import):
|
||||
class KeywordStatement(PythonBaseNode):
|
||||
"""
|
||||
For the following statements: `assert`, `del`, `global`, `nonlocal`,
|
||||
`raise`, `return`, `yield`, `return`, `yield`.
|
||||
`raise`, `return`, `yield`.
|
||||
|
||||
`pass`, `continue` and `break` are not in there, because they are just
|
||||
simple keywords and the parser reduces it to a keyword.
|
||||
|
||||
@@ -285,6 +285,14 @@ if sys.version_info >= (3,):
|
||||
'b"ä"',
|
||||
# combining strings and unicode is allowed in Python 2.
|
||||
'"s" b""',
|
||||
'"s" b"" ""',
|
||||
'b"" "" b"" ""',
|
||||
]
|
||||
if sys.version_info >= (3, 6):
|
||||
FAILING_EXAMPLES += [
|
||||
# Same as above, but for f-strings.
|
||||
'f"s" b""',
|
||||
'b"s" f""',
|
||||
]
|
||||
if sys.version_info >= (2, 7):
|
||||
# This is something that raises a different error in 2.6 than in the other
|
||||
|
||||
@@ -203,9 +203,6 @@ class FileTests:
|
||||
self._test_count = test_count
|
||||
self._code_lines = self._code_lines
|
||||
self._change_count = change_count
|
||||
|
||||
with open(file_path) as f:
|
||||
code = f.read()
|
||||
self._file_modifications = []
|
||||
|
||||
def _run(self, grammar, file_modifications, debugger, print_code=False):
|
||||
|
||||
@@ -1243,7 +1243,7 @@ def test_open_bracket_case2(differ):
|
||||
differ.parse(code1, copies=2, parsers=0, expect_error_leaves=True)
|
||||
|
||||
|
||||
def test_x(differ):
|
||||
def test_some_weird_removals(differ):
|
||||
code1 = dedent('''\
|
||||
class C:
|
||||
1
|
||||
@@ -1264,6 +1264,23 @@ def test_x(differ):
|
||||
omega
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, copies=ANY, parsers=ANY, expect_error_leaves=True)
|
||||
differ.parse(code3, copies=ANY, parsers=ANY, expect_error_leaves=True)
|
||||
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
|
||||
differ.parse(code3, copies=1, parsers=2, expect_error_leaves=True)
|
||||
differ.parse(code1, copies=1)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.version_info < (3, 5), reason="Async starts working in 3.5")
|
||||
def test_async_copy(differ):
|
||||
code1 = dedent('''\
|
||||
async def main():
|
||||
x = 3
|
||||
print(
|
||||
''')
|
||||
code2 = dedent('''\
|
||||
async def main():
|
||||
x = 3
|
||||
print()
|
||||
''')
|
||||
differ.initialize(code1)
|
||||
differ.parse(code2, copies=1, parsers=1)
|
||||
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
|
||||
|
||||
@@ -79,11 +79,17 @@ def test_tokenize_start_pos(code, positions):
|
||||
assert positions == [p.start_pos for p in tokens]
|
||||
|
||||
|
||||
def test_roundtrip(grammar):
|
||||
code = dedent("""\
|
||||
@pytest.mark.parametrize(
|
||||
'code', [
|
||||
dedent("""\
|
||||
f'''s{
|
||||
str.uppe
|
||||
'''
|
||||
""")
|
||||
"""),
|
||||
'f"foo',
|
||||
'f"""foo',
|
||||
]
|
||||
)
|
||||
def test_roundtrip(grammar, code):
|
||||
tree = grammar.parse(code)
|
||||
assert tree.get_code() == code
|
||||
|
||||
Reference in New Issue
Block a user