Fix tokenizer: backslashes sometimes led to newline token generation

This commit is contained in:
Dave Halter
2019-01-14 09:59:16 +01:00
parent 1e25445176
commit 9cc8178998
3 changed files with 9 additions and 2 deletions

View File

@@ -40,7 +40,7 @@ _python_reserved_strings = tuple(
)
_random_python_fragments = _python_reserved_strings + (
' ', '\t', '\n', '\r', '\f', 'f"', 'F"""', "fr'", "RF'''", '"', '"""', "'",
"'''", ';', ' some_random_word ', '\\'
"'''", ';', ' some_random_word ', '\\', '#',
)
@@ -172,6 +172,7 @@ class FileTests:
code = f.read()
self._code_lines = split_lines(code, keepends=True)
self._test_count = test_count
self._code_lines = self._code_lines[:30]
self._change_count = change_count
with open(file_path) as f: