8 Commits

Author SHA1 Message Date
Dave Halter
acccb4f28d 0.3.4 release 2019-02-13 00:19:07 +01:00
Dave Halter
3f6fc8a5ad Fix an f-string tokenizer issue 2019-02-13 00:17:37 +01:00
Dave Halter
f1ee7614c9 Release of 0.3.3 2019-02-06 09:55:18 +01:00
Dave Halter
58850f8bfa Rename a test 2019-02-06 09:51:46 +01:00
Dave Halter
d38a60278e Remove some unused code 2019-02-06 09:50:27 +01:00
Dave Halter
6c65aea47d Fix working with async functions in the diff parser, fixes #56 2019-02-06 09:31:46 +01:00
Dave Halter
0d37ff865c Fix bytes/fstring mixing when using iter_errors, fixes #57. 2019-02-06 01:28:47 +01:00
Dave Halter
076e296497 Improve a docstring, fixes #55. 2019-01-26 21:34:56 +01:00
10 changed files with 62 additions and 17 deletions

View File

@@ -3,6 +3,18 @@
Changelog Changelog
--------- ---------
0.3.4 (2018-02-13)
+++++++++++++++++++
- Fix an f-string tokenizer error
0.3.3 (2018-02-06)
+++++++++++++++++++
- Fix async errors in the diff parser
- A fix in iter_errors
- This is a very small bugfix release
0.3.2 (2018-01-24) 0.3.2 (2018-01-24)
+++++++++++++++++++ +++++++++++++++++++

View File

@@ -43,7 +43,7 @@ from parso.grammar import Grammar, load_grammar
from parso.utils import split_lines, python_bytes_to_unicode from parso.utils import split_lines, python_bytes_to_unicode
__version__ = '0.3.2' __version__ = '0.3.4'
def parse(code=None, **kwargs): def parse(code=None, **kwargs):

View File

@@ -682,6 +682,8 @@ class _NodesTree(object):
last = new_nodes[-1] last = new_nodes[-1]
if last.type == 'decorated': if last.type == 'decorated':
last = last.children[-1] last = last.children[-1]
if last.type in ('async_funcdef', 'async_stmt'):
last = last.children[-1]
last_line_offset_leaf = last.children[-2].get_last_leaf() last_line_offset_leaf = last.children[-2].get_last_leaf()
assert last_line_offset_leaf == ':' assert last_line_offset_leaf == ':'
else: else:

View File

@@ -570,11 +570,14 @@ class _BytesAndStringMix(SyntaxRule):
message = "cannot mix bytes and nonbytes literals" message = "cannot mix bytes and nonbytes literals"
def _is_bytes_literal(self, string): def _is_bytes_literal(self, string):
if string.type == 'fstring':
return False
return 'b' in string.string_prefix.lower() return 'b' in string.string_prefix.lower()
def is_issue(self, node): def is_issue(self, node):
first = node.children[0] first = node.children[0]
if first.type == 'string' and self._normalizer.version >= (3, 0): # In Python 2 it's allowed to mix bytes and unicode.
if self._normalizer.version >= (3, 0):
first_is_bytes = self._is_bytes_literal(first) first_is_bytes = self._is_bytes_literal(first)
for string in node.children[1:]: for string in node.children[1:]:
if first_is_bytes != self._is_bytes_literal(string): if first_is_bytes != self._is_bytes_literal(string):

View File

@@ -419,8 +419,6 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
tos = fstring_stack[-1] tos = fstring_stack[-1]
if not tos.is_in_expr(): if not tos.is_in_expr():
string, pos = _find_fstring_string(endpats, fstring_stack, line, lnum, pos) string, pos = _find_fstring_string(endpats, fstring_stack, line, lnum, pos)
if pos == max:
break
if string: if string:
yield PythonToken( yield PythonToken(
FSTRING_STRING, string, FSTRING_STRING, string,
@@ -431,6 +429,8 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
) )
tos.previous_lines = '' tos.previous_lines = ''
continue continue
if pos == max:
break
rest = line[pos:] rest = line[pos:]
fstring_end_token, additional_prefix, quote_length = _close_fstring_if_necessary( fstring_end_token, additional_prefix, quote_length = _close_fstring_if_necessary(

View File

@@ -969,7 +969,7 @@ class ImportName(Import):
class KeywordStatement(PythonBaseNode): class KeywordStatement(PythonBaseNode):
""" """
For the following statements: `assert`, `del`, `global`, `nonlocal`, For the following statements: `assert`, `del`, `global`, `nonlocal`,
`raise`, `return`, `yield`, `return`, `yield`. `raise`, `return`, `yield`.
`pass`, `continue` and `break` are not in there, because they are just `pass`, `continue` and `break` are not in there, because they are just
simple keywords and the parser reduces it to a keyword. simple keywords and the parser reduces it to a keyword.

View File

@@ -285,6 +285,14 @@ if sys.version_info >= (3,):
'b"ä"', 'b"ä"',
# combining strings and unicode is allowed in Python 2. # combining strings and unicode is allowed in Python 2.
'"s" b""', '"s" b""',
'"s" b"" ""',
'b"" "" b"" ""',
]
if sys.version_info >= (3, 6):
FAILING_EXAMPLES += [
# Same as above, but for f-strings.
'f"s" b""',
'b"s" f""',
] ]
if sys.version_info >= (2, 7): if sys.version_info >= (2, 7):
# This is something that raises a different error in 2.6 than in the other # This is something that raises a different error in 2.6 than in the other

View File

@@ -203,9 +203,6 @@ class FileTests:
self._test_count = test_count self._test_count = test_count
self._code_lines = self._code_lines self._code_lines = self._code_lines
self._change_count = change_count self._change_count = change_count
with open(file_path) as f:
code = f.read()
self._file_modifications = [] self._file_modifications = []
def _run(self, grammar, file_modifications, debugger, print_code=False): def _run(self, grammar, file_modifications, debugger, print_code=False):

View File

@@ -1243,7 +1243,7 @@ def test_open_bracket_case2(differ):
differ.parse(code1, copies=2, parsers=0, expect_error_leaves=True) differ.parse(code1, copies=2, parsers=0, expect_error_leaves=True)
def test_x(differ): def test_some_weird_removals(differ):
code1 = dedent('''\ code1 = dedent('''\
class C: class C:
1 1
@@ -1264,6 +1264,23 @@ def test_x(differ):
omega omega
''') ''')
differ.initialize(code1) differ.initialize(code1)
differ.parse(code2, copies=ANY, parsers=ANY, expect_error_leaves=True) differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code3, copies=ANY, parsers=ANY, expect_error_leaves=True) differ.parse(code3, copies=1, parsers=2, expect_error_leaves=True)
differ.parse(code1, copies=1) differ.parse(code1, copies=1)
@pytest.mark.skipif(sys.version_info < (3, 5), reason="Async starts working in 3.5")
def test_async_copy(differ):
code1 = dedent('''\
async def main():
x = 3
print(
''')
code2 = dedent('''\
async def main():
x = 3
print()
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1)
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)

View File

@@ -79,11 +79,17 @@ def test_tokenize_start_pos(code, positions):
assert positions == [p.start_pos for p in tokens] assert positions == [p.start_pos for p in tokens]
def test_roundtrip(grammar): @pytest.mark.parametrize(
code = dedent("""\ 'code', [
f'''s{ dedent("""\
str.uppe f'''s{
''' str.uppe
""") '''
"""),
'f"foo',
'f"""foo',
]
)
def test_roundtrip(grammar, code):
tree = grammar.parse(code) tree = grammar.parse(code)
assert tree.get_code() == code assert tree.get_code() == code