mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-06 21:04:29 +08:00
Fix another tokenizer issue
This commit is contained in:
@@ -457,10 +457,11 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
|
||||
pseudomatch = pseudo_token.match(line, pos)
|
||||
if not pseudomatch: # scan for tokens
|
||||
match = whitespace.match(line, pos)
|
||||
if pos == 0:
|
||||
for t in dedent_if_necessary(match.end()):
|
||||
yield t
|
||||
pos = match.end()
|
||||
new_line = False
|
||||
for t in dedent_if_necessary(pos):
|
||||
yield t
|
||||
yield PythonToken(
|
||||
ERRORTOKEN, line[pos], (lnum, pos),
|
||||
additional_prefix + match.group(0)
|
||||
|
||||
@@ -961,4 +961,8 @@ def test_random_unicode_characters(differ):
|
||||
differ.parse('\r\r', parsers=1)
|
||||
differ.parse("˟Ę\x05À\r rúƣ@\x8a\x15r()\n", parsers=1, expect_error_leaves=True)
|
||||
differ.parse('a\ntaǁ\rGĒōns__\n\nb', parsers=1)
|
||||
s = ' if not (self, "_fi\x02\x0e\x08\n\nle"):'
|
||||
differ.parse(s, parsers=1, expect_error_leaves=True)
|
||||
differ.parse('')
|
||||
differ.parse(s + '\n', parsers=1, expect_error_leaves=True)
|
||||
differ.parse('')
|
||||
|
||||
Reference in New Issue
Block a user