ignore dedents in tokenizer

This commit is contained in:
Dave Halter
2014-02-20 01:21:20 +01:00
parent 3e9b72b636
commit c26ae3c00d
2 changed files with 9 additions and 19 deletions

View File

@@ -402,10 +402,9 @@ class Parser(object):
""" Generate the next tokenize pattern. """
typ, tok, start_pos, end_pos = next(self._gen)
# dedents shouldn't change positions
if typ != tokenize.DEDENT:
self.start_pos = start_pos
if typ not in (tokenize.INDENT, tokenize.NEWLINE, tokenize.NL):
self.start_pos, self.end_pos = start_pos, end_pos
self.start_pos = start_pos
if typ not in (tokenize.INDENT, tokenize.NEWLINE, tokenize.NL):
self.start_pos, self.end_pos = start_pos, end_pos
self._current = typ, tok
return self._current
@@ -433,15 +432,6 @@ class Parser(object):
# debug.dbg('main: tok=[%s] type=[%s] indent=[%s]', \
# tok, tokenize.tok_name[token_type], start_position[0])
while token_type == tokenize.DEDENT and self._scope != self.module:
token_type, tok = self.next()
if self.start_pos[1] <= self._scope.start_pos[1]:
self._scope.end_pos = self.start_pos
self._scope = self._scope.parent
if isinstance(self._scope, pr.Module) \
and not isinstance(self._scope, pr.SubModule):
self._scope = self.module
# check again for unindented stuff. this is true for syntax
# errors. only check for names, because thats relevant here. If
# some docstrings are not indented, I don't care.