Fix tokenizer: backslashes sometimes led to newline token generation

This commit is contained in:
Dave Halter
2019-01-14 09:59:16 +01:00
parent 1e25445176
commit 9cc8178998
3 changed files with 9 additions and 2 deletions

View File

@@ -483,7 +483,7 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
break
initial = token[0]
if new_line and initial not in '\r\n#':
if new_line and initial not in '\r\n\\#':
new_line = False
if paren_level == 0 and not fstring_stack:
i = 0