mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-07 05:14:29 +08:00
Fix the tokenizer
This commit is contained in:
@@ -542,7 +542,7 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0), indents=None, is_first
|
|||||||
m = re.match(r'[ \f\t]*$', line[:start])
|
m = re.match(r'[ \f\t]*$', line[:start])
|
||||||
if m is not None:
|
if m is not None:
|
||||||
yield from dedent_if_necessary(m.end())
|
yield from dedent_if_necessary(m.end())
|
||||||
if token.is_identifier():
|
if token.isidentifier():
|
||||||
yield PythonToken(NAME, token, spos, prefix)
|
yield PythonToken(NAME, token, spos, prefix)
|
||||||
else:
|
else:
|
||||||
yield from _split_illegal_unicode_name(token, spos, prefix)
|
yield from _split_illegal_unicode_name(token, spos, prefix)
|
||||||
@@ -664,7 +664,7 @@ def _split_illegal_unicode_name(token, start_pos, prefix):
|
|||||||
pos = start_pos
|
pos = start_pos
|
||||||
for i, char in enumerate(token):
|
for i, char in enumerate(token):
|
||||||
if is_illegal:
|
if is_illegal:
|
||||||
if is_identifier(char):
|
if char.isidentifier():
|
||||||
yield create_token()
|
yield create_token()
|
||||||
found = char
|
found = char
|
||||||
is_illegal = False
|
is_illegal = False
|
||||||
@@ -674,7 +674,7 @@ def _split_illegal_unicode_name(token, start_pos, prefix):
|
|||||||
found += char
|
found += char
|
||||||
else:
|
else:
|
||||||
new_found = found + char
|
new_found = found + char
|
||||||
if is_identifier(new_found):
|
if new_found.isidentifier():
|
||||||
found = new_found
|
found = new_found
|
||||||
else:
|
else:
|
||||||
if found:
|
if found:
|
||||||
|
|||||||
Reference in New Issue
Block a user