1
0
forked from VimPlug/jedi

ignore dedents in tokenizer

This commit is contained in:
Dave Halter
2014-02-20 01:21:20 +01:00
parent 3e9b72b636
commit c26ae3c00d
2 changed files with 9 additions and 19 deletions

View File

@@ -402,7 +402,6 @@ class Parser(object):
""" Generate the next tokenize pattern. """
typ, tok, start_pos, end_pos = next(self._gen)
# dedents shouldn't change positions
if typ != tokenize.DEDENT:
self.start_pos = start_pos
if typ not in (tokenize.INDENT, tokenize.NEWLINE, tokenize.NL):
self.start_pos, self.end_pos = start_pos, end_pos
@@ -433,15 +432,6 @@ class Parser(object):
# debug.dbg('main: tok=[%s] type=[%s] indent=[%s]', \
# tok, tokenize.tok_name[token_type], start_position[0])
while token_type == tokenize.DEDENT and self._scope != self.module:
token_type, tok = self.next()
if self.start_pos[1] <= self._scope.start_pos[1]:
self._scope.end_pos = self.start_pos
self._scope = self._scope.parent
if isinstance(self._scope, pr.Module) \
and not isinstance(self._scope, pr.SubModule):
self._scope = self.module
# check again for unindented stuff. this is true for syntax
# errors. only check for names, because thats relevant here. If
# some docstrings are not indented, I don't care.

View File

@@ -150,7 +150,10 @@ def source_tokens(source, line_offset=0):
def generate_tokens(readline, line_offset=0):
"""The original stdlib Python version with minor modifications"""
"""
The original stdlib Python version with minor modifications.
Modified to not care about dedents.
"""
lnum = line_offset
parenlev = 0
continued = False
@@ -225,7 +228,6 @@ def generate_tokens(readline, line_offset=0):
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos))
while column < indents[-1]:
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos))
else: # continued statement
if not line:
@@ -288,8 +290,6 @@ def generate_tokens(readline, line_offset=0):
(lnum, pos), (lnum, pos + 1))
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0))
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0))
@@ -341,8 +341,8 @@ class NoErrorTokenizer(object):
raise common.MultiLevelStopIteration()
# ignore indents/comments
if self.is_fast_parser \
and self.previous[0] in (INDENT, NL, None, NEWLINE, DEDENT) \
and c[0] not in (COMMENT, INDENT, NL, NEWLINE, DEDENT):
and self.previous[0] in (INDENT, NL, None, NEWLINE) \
and c[0] not in (COMMENT, INDENT, NL, NEWLINE):
# print c, tok_name[c[0]]
tok = c[1]