Fix tokenizer error tokens

This commit is contained in:
Dave Halter
2020-04-07 09:55:28 +02:00
parent f45941226f
commit 7b14a86e0a
2 changed files with 26 additions and 1 deletions

View File

@@ -532,7 +532,7 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0), indents=None, is_first
if not pseudomatch: # scan for tokens
match = whitespace.match(line, pos)
if pos == 0 and paren_level == 0 and not fstring_stack:
if new_line and paren_level == 0 and not fstring_stack:
for t in dedent_if_necessary(match.end()):
yield t
pos = match.end()

View File

@@ -1636,3 +1636,28 @@ def test_fstring_with_error_leaf(differ):
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
def test_yet_another_backslash(differ):
code1 = dedent('''\
def f():
x
def g():
y
base = "" \\
"" % to
return
''')
code2 = dedent('''\
def f():
x
def g():
y
base = "" \\
\x0f
return
''')
differ.initialize(code1)
differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True)
differ.parse(code1, parsers=ANY, copies=ANY)