forked from VimPlug/jedi
ignore dedents in tokenizer
This commit is contained in:
@@ -402,7 +402,6 @@ class Parser(object):
|
|||||||
""" Generate the next tokenize pattern. """
|
""" Generate the next tokenize pattern. """
|
||||||
typ, tok, start_pos, end_pos = next(self._gen)
|
typ, tok, start_pos, end_pos = next(self._gen)
|
||||||
# dedents shouldn't change positions
|
# dedents shouldn't change positions
|
||||||
if typ != tokenize.DEDENT:
|
|
||||||
self.start_pos = start_pos
|
self.start_pos = start_pos
|
||||||
if typ not in (tokenize.INDENT, tokenize.NEWLINE, tokenize.NL):
|
if typ not in (tokenize.INDENT, tokenize.NEWLINE, tokenize.NL):
|
||||||
self.start_pos, self.end_pos = start_pos, end_pos
|
self.start_pos, self.end_pos = start_pos, end_pos
|
||||||
@@ -433,15 +432,6 @@ class Parser(object):
|
|||||||
# debug.dbg('main: tok=[%s] type=[%s] indent=[%s]', \
|
# debug.dbg('main: tok=[%s] type=[%s] indent=[%s]', \
|
||||||
# tok, tokenize.tok_name[token_type], start_position[0])
|
# tok, tokenize.tok_name[token_type], start_position[0])
|
||||||
|
|
||||||
while token_type == tokenize.DEDENT and self._scope != self.module:
|
|
||||||
token_type, tok = self.next()
|
|
||||||
if self.start_pos[1] <= self._scope.start_pos[1]:
|
|
||||||
self._scope.end_pos = self.start_pos
|
|
||||||
self._scope = self._scope.parent
|
|
||||||
if isinstance(self._scope, pr.Module) \
|
|
||||||
and not isinstance(self._scope, pr.SubModule):
|
|
||||||
self._scope = self.module
|
|
||||||
|
|
||||||
# check again for unindented stuff. this is true for syntax
|
# check again for unindented stuff. this is true for syntax
|
||||||
# errors. only check for names, because thats relevant here. If
|
# errors. only check for names, because thats relevant here. If
|
||||||
# some docstrings are not indented, I don't care.
|
# some docstrings are not indented, I don't care.
|
||||||
|
|||||||
@@ -150,7 +150,10 @@ def source_tokens(source, line_offset=0):
|
|||||||
|
|
||||||
|
|
||||||
def generate_tokens(readline, line_offset=0):
|
def generate_tokens(readline, line_offset=0):
|
||||||
"""The original stdlib Python version with minor modifications"""
|
"""
|
||||||
|
The original stdlib Python version with minor modifications.
|
||||||
|
Modified to not care about dedents.
|
||||||
|
"""
|
||||||
lnum = line_offset
|
lnum = line_offset
|
||||||
parenlev = 0
|
parenlev = 0
|
||||||
continued = False
|
continued = False
|
||||||
@@ -225,7 +228,6 @@ def generate_tokens(readline, line_offset=0):
|
|||||||
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos))
|
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos))
|
||||||
while column < indents[-1]:
|
while column < indents[-1]:
|
||||||
indents = indents[:-1]
|
indents = indents[:-1]
|
||||||
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos))
|
|
||||||
|
|
||||||
else: # continued statement
|
else: # continued statement
|
||||||
if not line:
|
if not line:
|
||||||
@@ -288,8 +290,6 @@ def generate_tokens(readline, line_offset=0):
|
|||||||
(lnum, pos), (lnum, pos + 1))
|
(lnum, pos), (lnum, pos + 1))
|
||||||
pos += 1
|
pos += 1
|
||||||
|
|
||||||
for indent in indents[1:]: # pop remaining indent levels
|
|
||||||
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0))
|
|
||||||
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0))
|
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0))
|
||||||
|
|
||||||
|
|
||||||
@@ -341,8 +341,8 @@ class NoErrorTokenizer(object):
|
|||||||
raise common.MultiLevelStopIteration()
|
raise common.MultiLevelStopIteration()
|
||||||
# ignore indents/comments
|
# ignore indents/comments
|
||||||
if self.is_fast_parser \
|
if self.is_fast_parser \
|
||||||
and self.previous[0] in (INDENT, NL, None, NEWLINE, DEDENT) \
|
and self.previous[0] in (INDENT, NL, None, NEWLINE) \
|
||||||
and c[0] not in (COMMENT, INDENT, NL, NEWLINE, DEDENT):
|
and c[0] not in (COMMENT, INDENT, NL, NEWLINE):
|
||||||
# print c, tok_name[c[0]]
|
# print c, tok_name[c[0]]
|
||||||
|
|
||||||
tok = c[1]
|
tok = c[1]
|
||||||
|
|||||||
Reference in New Issue
Block a user