1
0
forked from VimPlug/jedi

tokenize removed NL/NEWLINE ambiguity

This commit is contained in:
Dave Halter
2014-02-20 01:52:30 +01:00
parent c26ae3c00d
commit d7033726fd
2 changed files with 8 additions and 16 deletions

View File

@@ -71,11 +71,9 @@ class Parser(object):
# because of `self.module.used_names`.
d.parent = self.module
if self._current[0] in (tokenize.NL, tokenize.NEWLINE):
if self._current[0] in (tokenize.NEWLINE,):
# we added a newline before, so we need to "remove" it again.
self.end_pos = self._gen.previous[2]
elif self._current[0] == tokenize.INDENT:
self.end_pos = self._gen.last_previous[2]
self.start_pos = self.module.start_pos
self.module.end_pos = self.end_pos
@@ -403,7 +401,7 @@ class Parser(object):
typ, tok, start_pos, end_pos = next(self._gen)
# dedents shouldn't change positions
self.start_pos = start_pos
if typ not in (tokenize.INDENT, tokenize.NEWLINE, tokenize.NL):
if typ not in (tokenize.INDENT, tokenize.NEWLINE):
self.start_pos, self.end_pos = start_pos, end_pos
self._current = typ, tok
@@ -619,7 +617,7 @@ class Parser(object):
self.freshscope = False
else:
if token_type not in [tokenize.COMMENT, tokenize.INDENT,
tokenize.NEWLINE, tokenize.NL]:
tokenize.NEWLINE]:
debug.warning('Token not used: %s %s %s', tok,
tokenize.tok_name[token_type], self.start_pos)
continue
@@ -655,7 +653,3 @@ class PushBackTokenizer(object):
@property
def previous(self):
return self._tokenizer.previous
@property
def last_previous(self):
return self._tokenizer.last_previous

View File

@@ -23,8 +23,6 @@ namechars = string.ascii_letters + '_'
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
@@ -215,11 +213,11 @@ def generate_tokens(readline, line_offset=0):
nl_pos = pos + len(comment_token)
yield TokenInfo(COMMENT, comment_token, (lnum, pos),
(lnum, pos + len(comment_token)))
yield TokenInfo(NL, line[nl_pos:], (lnum, nl_pos),
yield TokenInfo(NEWLINE, line[nl_pos:], (lnum, nl_pos),
(lnum, len(line)))
else:
yield TokenInfo(
(NL, COMMENT)[line[pos] == '#'], line[pos:],
(NEWLINE, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)))
continue
@@ -246,7 +244,7 @@ def generate_tokens(readline, line_offset=0):
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos)
elif initial in '\r\n':
yield TokenInfo(NL if parenlev > 0 else NEWLINE,
yield TokenInfo(NEWLINE,
token, spos, epos)
elif initial == '#':
assert not token.endswith("\n")
@@ -341,8 +339,8 @@ class NoErrorTokenizer(object):
raise common.MultiLevelStopIteration()
# ignore indents/comments
if self.is_fast_parser \
and self.previous[0] in (INDENT, NL, None, NEWLINE) \
and c[0] not in (COMMENT, INDENT, NL, NEWLINE):
and self.previous[0] in (INDENT, None, NEWLINE) \
and c[0] not in (COMMENT, INDENT, NEWLINE):
# print c, tok_name[c[0]]
tok = c[1]