Fix tokenizer: Closing parentheses in the wrong place should not lead to strange behavior

This commit is contained in:
Dave Halter
2019-01-13 14:51:34 +01:00
parent e10802ab09
commit dd1761da96
2 changed files with 16 additions and 1 deletions

View File

@@ -585,7 +585,8 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
if fstring_stack: if fstring_stack:
fstring_stack[-1].close_parentheses(token) fstring_stack[-1].close_parentheses(token)
else: else:
paren_level -= 1 if paren_level:
paren_level -= 1
elif token == ':' and fstring_stack \ elif token == ':' and fstring_stack \
and fstring_stack[-1].parentheses_count == 1: and fstring_stack[-1].parentheses_count == 1:
fstring_stack[-1].format_spec_count += 1 fstring_stack[-1].format_spec_count += 1

View File

@@ -285,3 +285,17 @@ def test_error_token_after_dedent():
ERRORTOKEN, NAME, NEWLINE, ENDMARKER ERRORTOKEN, NAME, NEWLINE, ENDMARKER
] ]
assert [t.type for t in lst] == expected assert [t.type for t in lst] == expected
def test_brackets_no_indentation():
"""
There used to be an issue that the parentheses counting would go below
zero. This should not happen.
"""
code = dedent("""\
}
{
}
""")
lst = _get_token_list(code)
assert [t.type for t in lst] == [OP, NEWLINE, OP, OP, NEWLINE, ENDMARKER]