forked from VimPlug/jedi
removed the INDENT usages that were left in the parser code
This commit is contained in:
@@ -157,8 +157,6 @@ class Parser(object):
|
|||||||
token_type, tok = self.next()
|
token_type, tok = self.next()
|
||||||
if brackets and tok == '\n':
|
if brackets and tok == '\n':
|
||||||
token_type, tok = self.next()
|
token_type, tok = self.next()
|
||||||
if token_type == tokenize.INDENT:
|
|
||||||
continue # TODO REMOVE, after removing the indents.
|
|
||||||
i, token_type, tok = self._parse_dot_name(self._current)
|
i, token_type, tok = self._parse_dot_name(self._current)
|
||||||
if not i:
|
if not i:
|
||||||
defunct = True
|
defunct = True
|
||||||
@@ -402,7 +400,7 @@ class Parser(object):
|
|||||||
typ, tok, start_pos, end_pos = next(self._gen)
|
typ, tok, start_pos, end_pos = next(self._gen)
|
||||||
# dedents shouldn't change positions
|
# dedents shouldn't change positions
|
||||||
self.start_pos = start_pos
|
self.start_pos = start_pos
|
||||||
if typ not in (tokenize.INDENT, tokenize.NEWLINE):
|
if typ not in (tokenize.NEWLINE,):
|
||||||
self.start_pos, self.end_pos = start_pos, end_pos
|
self.start_pos, self.end_pos = start_pos, end_pos
|
||||||
|
|
||||||
self._current = typ, tok
|
self._current = typ, tok
|
||||||
@@ -617,8 +615,7 @@ class Parser(object):
|
|||||||
self._scope.add_statement(stmt)
|
self._scope.add_statement(stmt)
|
||||||
self.freshscope = False
|
self.freshscope = False
|
||||||
else:
|
else:
|
||||||
if token_type not in [tokenize.COMMENT, tokenize.INDENT,
|
if token_type not in [tokenize.COMMENT, tokenize.NEWLINE]:
|
||||||
tokenize.NEWLINE]:
|
|
||||||
debug.warning('Token not used: %s %s %s', tok,
|
debug.warning('Token not used: %s %s %s', tok,
|
||||||
tokenize.tok_name[token_type], self.start_pos)
|
tokenize.tok_name[token_type], self.start_pos)
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -157,8 +157,6 @@ def generate_tokens(readline, line_offset=0):
|
|||||||
numchars = '0123456789'
|
numchars = '0123456789'
|
||||||
contstr = ''
|
contstr = ''
|
||||||
contline = None
|
contline = None
|
||||||
indents = [0]
|
|
||||||
|
|
||||||
while True: # loop over lines in stream
|
while True: # loop over lines in stream
|
||||||
try:
|
try:
|
||||||
line = readline()
|
line = readline()
|
||||||
@@ -208,14 +206,6 @@ def generate_tokens(readline, line_offset=0):
|
|||||||
else:
|
else:
|
||||||
yield TokenInfo(NEWLINE, line[pos:], (lnum, pos), (lnum, len(line)))
|
yield TokenInfo(NEWLINE, line[pos:], (lnum, pos), (lnum, len(line)))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if column > indents[-1]: # count indents or dedents
|
|
||||||
indents.append(column)
|
|
||||||
#print repr(line), lnum
|
|
||||||
#yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos))
|
|
||||||
while column < indents[-1]:
|
|
||||||
indents = indents[:-1]
|
|
||||||
|
|
||||||
else: # continued statement
|
else: # continued statement
|
||||||
continued = False
|
continued = False
|
||||||
|
|
||||||
@@ -318,10 +308,10 @@ class NoErrorTokenizer(object):
|
|||||||
if not self.first_stmt:
|
if not self.first_stmt:
|
||||||
self.closed = True
|
self.closed = True
|
||||||
raise common.MultiLevelStopIteration()
|
raise common.MultiLevelStopIteration()
|
||||||
# ignore indents/comments
|
# ignore comments/ newlines
|
||||||
if self.is_fast_parser \
|
if self.is_fast_parser \
|
||||||
and self.previous[0] in (INDENT, None, NEWLINE) \
|
and self.previous[0] in (None, NEWLINE) \
|
||||||
and c[0] not in (COMMENT, INDENT, NEWLINE):
|
and c[0] not in (COMMENT, NEWLINE):
|
||||||
# print c, tok_name[c[0]]
|
# print c, tok_name[c[0]]
|
||||||
|
|
||||||
tok = c[1]
|
tok = c[1]
|
||||||
|
|||||||
Reference in New Issue
Block a user