From 3232ae5b0c56381455e556529bda8a81e071a812 Mon Sep 17 00:00:00 2001 From: Dave Halter Date: Thu, 20 Feb 2014 18:45:22 +0100 Subject: [PATCH] removed parentheses counting from generate_tokens --- jedi/parser/__init__.py | 5 +++-- jedi/parser/tokenize.py | 14 +++----------- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/jedi/parser/__init__.py b/jedi/parser/__init__.py index e4055d17..7551ff02 100644 --- a/jedi/parser/__init__.py +++ b/jedi/parser/__init__.py @@ -63,7 +63,6 @@ class Parser(object): while s is not None: s.end_pos = self.end_pos s = s.parent - pass # clean up unused decorators for d in self._decorators: @@ -157,7 +156,9 @@ class Parser(object): brackets = True token_type, tok = self.next() if brackets and tok == '\n': - self.next() + token_type, tok = self.next() + if token_type == tokenize.INDENT: + continue # TODO REMOVE, after removing the indents. i, token_type, tok = self._parse_dot_name(self._current) if not i: defunct = True diff --git a/jedi/parser/tokenize.py b/jedi/parser/tokenize.py index 9f049d2f..faa8bc77 100644 --- a/jedi/parser/tokenize.py +++ b/jedi/parser/tokenize.py @@ -153,7 +153,6 @@ def generate_tokens(readline, line_offset=0): Modified to not care about dedents. """ lnum = line_offset - parenlev = 0 continued = False numchars = '0123456789' contstr, needcont = '', 0 @@ -190,7 +189,7 @@ def generate_tokens(readline, line_offset=0): contline = contline + line continue - elif parenlev == 0 and not continued: # new statement + elif not continued: # new statement if not line: break column = 0 @@ -216,9 +215,7 @@ def generate_tokens(readline, line_offset=0): yield TokenInfo(NEWLINE, line[nl_pos:], (lnum, nl_pos), (lnum, len(line))) else: - yield TokenInfo( - (NEWLINE, COMMENT)[line[pos] == '#'], line[pos:], - (lnum, pos), (lnum, len(line))) + yield TokenInfo(NEWLINE, line[pos:], (lnum, pos), (lnum, len(line))) continue if column > indents[-1]: # count indents or dedents @@ -244,8 +241,7 @@ def generate_tokens(readline, line_offset=0): (initial == '.' and token != '.' and token != '...')): yield TokenInfo(NUMBER, token, spos, epos) elif initial in '\r\n': - yield TokenInfo(NEWLINE, - token, spos, epos) + yield TokenInfo(NEWLINE, token, spos, epos) elif initial == '#': assert not token.endswith("\n") yield TokenInfo(COMMENT, token, spos, epos) @@ -278,10 +274,6 @@ def generate_tokens(readline, line_offset=0): elif initial == '\\': # continued stmt continued = True else: - if initial in '([{': - parenlev += 1 - elif initial in ')]}': - parenlev -= 1 yield TokenInfo(OP, token, spos, epos) else: yield TokenInfo(ERRORTOKEN, line[pos],