diff --git a/jedi/parser/fast.py b/jedi/parser/fast.py index 6bde5bcf..054343b2 100644 --- a/jedi/parser/fast.py +++ b/jedi/parser/fast.py @@ -414,7 +414,9 @@ class FastTokenizer(object): raise common.MultiLevelStopIteration() current = next(self.gen) - if current[0] == ENDMARKER: + tok_type = current.type + tok_str = current.string + if tok_type == ENDMARKER: raise common.MultiLevelStopIteration() self.previous = self.current @@ -428,11 +430,11 @@ class FastTokenizer(object): if not self.first_stmt: self.closed = True raise common.MultiLevelStopIteration() + # ignore comments/ newlines - if self.previous[0] in (None, NEWLINE) and current[0] not in (COMMENT, NEWLINE): + if self.previous.type in (None, NEWLINE) and tok_type not in (COMMENT, NEWLINE): # print c, tok_name[c[0]] - tok = current[1] - indent = current[2][1] + indent = current.start_pos[1] if indent < self.parser_indent: # -> dedent self.parser_indent = indent self.new_indent = False @@ -444,17 +446,17 @@ class FastTokenizer(object): self.new_indent = False if not self.in_flow: - if tok in FLOWS or tok in breaks: - self.in_flow = tok in FLOWS + if tok_str in FLOWS or tok_str in breaks: + self.in_flow = tok_str in FLOWS if not self.is_decorator and not self.in_flow: close() - self.is_decorator = '@' == tok + self.is_decorator = '@' == tok_str if not self.is_decorator: self.old_parser_indent = self.parser_indent self.parser_indent += 1 # new scope: must be higher self.new_indent = True - if tok != '@': + if tok_str != '@': if self.first_stmt and not self.new_indent: self.parser_indent = indent self.first_stmt = False diff --git a/jedi/parser/token.py b/jedi/parser/token.py index 27848741..7b9b7942 100644 --- a/jedi/parser/token.py +++ b/jedi/parser/token.py @@ -12,14 +12,7 @@ from jedi._compatibility import unicode from jedi.parser.tokenize import Token -class TokenNoCompat(Token): - __slots__ = () - - def __getitem__(self, key): - raise NotImplementedError("Compatibility only for basic token.") - - -class TokenDocstring(TokenNoCompat): +class TokenDocstring(Token): """A string token that is a docstring. as_string() will clean the token representing the docstring. diff --git a/jedi/parser/tokenize.py b/jedi/parser/tokenize.py index 8d78185c..fd0ac00a 100644 --- a/jedi/parser/tokenize.py +++ b/jedi/parser/tokenize.py @@ -42,8 +42,6 @@ class Token(object): methods that maintain compatibility to existing code that expects the above structure. - >>> tuple(Token(1, 'foo' ,(3,4))) - (1, 'foo', (3, 4), (3, 7)) >>> repr(Token(1, "test", (1, 1))) "" >>> Token(1, 'bar', (3, 4)).__getstate__() @@ -70,21 +68,8 @@ class Token(object): self._start_pos_col = start_pos[1] def __repr__(self): - return "<%s: %s>" % (type(self).__name__, tuple(self)[:3]) - - # Backward compatibility - def __getitem__(self, key): - # Builds the same structure as tuple used to have - if key == 0: - return self.type - elif key == 1: - return self.string - elif key == 2: - return (self._start_pos_line, self._start_pos_col) - elif key == 3: - return self.end_pos - else: - raise IndexError("list index out of range") + content = self.type, self.string, (self._start_pos_line, self._start_pos_col) + return "<%s: %s>" % (type(self).__name__, content) @property def start_pos(self): @@ -109,12 +94,7 @@ class Token(object): # Make cache footprint smaller for faster unpickling def __getstate__(self): - return ( - self.type, - self.string, - self._start_pos_line, - self._start_pos_col, - ) + return (self.type, self.string, self._start_pos_line, self._start_pos_col) def __setstate__(self, state): self.type = state[0]