From a7a8a73a2cac00ec7e22e870380ed50832f15320 Mon Sep 17 00:00:00 2001 From: Dave Halter Date: Tue, 25 Feb 2014 01:20:55 +0100 Subject: [PATCH] removed Parser._start_pos as well --- jedi/parser/__init__.py | 57 ++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/jedi/parser/__init__.py b/jedi/parser/__init__.py index e181ec74..24b041b8 100644 --- a/jedi/parser/__init__.py +++ b/jedi/parser/__init__.py @@ -41,14 +41,15 @@ class Parser(object): tokenizer=None, top_module=None, offset=0): self.no_docstr = no_docstr - self._start_pos = 1 + offset, 0 - # initialize global Scope - self.module = pr.SubModule(module_path, self._start_pos, top_module) - self._scope = self.module - tokenizer = tokenizer or tokenize.source_tokens(source) self._gen = PushBackTokenizer(tokenizer) + + start_pos = 1 + offset, 0 + # initialize global Scope + self.module = pr.SubModule(module_path, start_pos, top_module) + self._scope = self.module self._top_module = top_module or self.module + try: self._parse() except (common.MultiLevelStopIteration, StopIteration): @@ -69,7 +70,6 @@ class Parser(object): # because of `self.module.used_names`. d.parent = self.module - self._start_pos = self.module.start_pos self.module.end_pos = self._gen.current.end if self._gen.current.type in (tokenize.NEWLINE,): # This case is only relevant with the FastTokenizer, because @@ -114,8 +114,8 @@ class Parser(object): # token maybe a name or star return None, tok - append((tok.string, self._start_pos)) - first_pos = self._start_pos + first_pos = tok.start + append((tok.string, first_pos)) while True: end_pos = tok.end tok = self.next() @@ -124,7 +124,7 @@ class Parser(object): tok = self.next() if tok.type != tokenize.NAME: break - append((tok.string, self._start_pos)) + append((tok.string, tok.start)) n = pr.Name(self.module, names, first_pos, end_pos) if names else None return n, tok @@ -207,12 +207,12 @@ class Parser(object): :return: Return a Scope representation of the tokens. :rtype: Function """ - first_pos = self._start_pos + first_pos = self._gen.current.start tok = self.next() if tok.type != tokenize.NAME: return None - fname = pr.Name(self.module, [(tok.string, self._start_pos)], self._start_pos, + fname = pr.Name(self.module, [(tok.string, tok.start)], tok.start, tok.end) tok = self.next() @@ -245,15 +245,15 @@ class Parser(object): :return: Return a Scope representation of the tokens. :rtype: Class """ - first_pos = self._start_pos + first_pos = self._gen.current.start cname = self.next() if cname.type != tokenize.NAME: debug.warning("class: syntax err, token is not a name@%s (%s: %s)", - self._start_pos[0], tokenize.tok_name[cname.type], cname.string) + cname.start[0], tokenize.tok_name[cname.type], cname.string) return None - cname = pr.Name(self.module, [(cname.string, self._start_pos)], - self._start_pos, cname.end) + cname = pr.Name(self.module, [(cname.string, cname.start)], + cname.start, cname.end) super = [] _next = self.next() @@ -262,7 +262,7 @@ class Parser(object): _next = self.next() if _next.string != ':': - debug.warning("class syntax: %s@%s", cname, self._start_pos[0]) + debug.warning("class syntax: %s@%s", cname, _next.start[0]) return None return pr.Class(self.module, cname, super, first_pos) @@ -295,7 +295,7 @@ class Parser(object): self.next() tok = self.next() - first_pos = self._start_pos + first_pos = tok.start opening_brackets = ['{', '(', '['] closing_brackets = ['}', ')', ']'] @@ -396,7 +396,6 @@ class Parser(object): #typ, tok, start_pos, end_pos = next(self._gen) _current = next(self._gen) # dedents shouldn't change positions - self._start_pos = _current.start #self._current = typ, tok return _current @@ -422,6 +421,7 @@ class Parser(object): for tok in self.iterator: token_type = tok.type tok_str = tok.string + first_pos = tok.start self.module.temp_used_names = [] # debug.dbg('main: tok=[%s] type=[%s] indent=[%s]', \ # tok, tokenize.tok_name[token_type], start_position[0]) @@ -429,10 +429,10 @@ class Parser(object): # check again for unindented stuff. this is true for syntax # errors. only check for names, because thats relevant here. If # some docstrings are not indented, I don't care. - while self._start_pos[1] <= self._scope.start_pos[1] \ + while first_pos[1] <= self._scope.start_pos[1] \ and (token_type == tokenize.NAME or tok_str in ['(', '['])\ and self._scope != self.module: - self._scope.end_pos = self._start_pos + self._scope.end_pos = first_pos self._scope = self._scope.parent if isinstance(self._scope, pr.Module) \ and not isinstance(self._scope, pr.SubModule): @@ -442,11 +442,10 @@ class Parser(object): use_as_parent_scope = self._top_module else: use_as_parent_scope = self._scope - first_pos = self._start_pos if tok_str == 'def': func = self._parse_function() if func is None: - debug.warning("function: syntax error@%s", self._start_pos[0]) + debug.warning("function: syntax error@%s", first_pos[0]) continue self.freshscope = True self._scope = self._scope.add_scope(func, self._decorators) @@ -454,7 +453,7 @@ class Parser(object): elif tok_str == 'class': cls = self._parse_class() if cls is None: - debug.warning("class: syntax error@%s" % self._start_pos[0]) + debug.warning("class: syntax error@%s" % first_pos[0]) continue self.freshscope = True self._scope = self._scope.add_scope(cls, self._decorators) @@ -496,7 +495,7 @@ class Parser(object): tok_str = 'import' mod = None if not mod and not relative_count or tok_str != "import": - debug.warning("from: syntax error@%s", self._start_pos[0]) + debug.warning("from: syntax error@%s", tok.start[0]) defunct = True if tok_str != 'import': self._gen.push_last_back() @@ -522,7 +521,7 @@ class Parser(object): set_stmt, tok = self._parse_statement(added_breaks=['in'], names_are_set_vars=True) if tok.string != 'in': - debug.warning('syntax err, for flow incomplete @%s', self._start_pos[0]) + debug.warning('syntax err, for flow incomplete @%s', tok.start[0]) try: statement, tok = self._parse_statement() @@ -532,7 +531,7 @@ class Parser(object): f = pr.ForFlow(self.module, s, first_pos, set_stmt) self._scope = self._scope.add_statement(f) if tok is None or tok.string != ':': - debug.warning('syntax err, for flow started @%s', self._start_pos[0]) + debug.warning('syntax err, for flow started @%s', first_pos[0]) elif tok_str in ['if', 'while', 'try', 'with'] + extended_flow: added_breaks = [] command = tok_str @@ -569,10 +568,10 @@ class Parser(object): s = self._scope.add_statement(f) self._scope = s if tok.string != ':': - debug.warning('syntax err, flow started @%s', self._start_pos[0]) + debug.warning('syntax err, flow started @%s', tok.start[0]) # returns elif tok_str in ['return', 'yield']: - s = self._start_pos + s = tok.start self.freshscope = False # add returns to the scope func = self._scope.get_parent_until(pr.Function) @@ -624,7 +623,7 @@ class Parser(object): else: if token_type not in [tokenize.COMMENT, tokenize.NEWLINE]: debug.warning('Token not used: %s %s %s', tok_str, - tokenize.tok_name[token_type], self._start_pos) + tokenize.tok_name[token_type], first_pos) continue self.no_docstr = False