diff --git a/jedi/api/helpers.py b/jedi/api/helpers.py index 97b08f57..41934f30 100644 --- a/jedi/api/helpers.py +++ b/jedi/api/helpers.py @@ -134,7 +134,7 @@ def get_stack_at_position(grammar, code_lines, module_node, pos): p = parser.ParserWithRecovery(grammar, code, start_parsing=False) try: - p.parse(tokenizer=tokenize_without_endmarker(code)) + p.parse(tokens=tokenize_without_endmarker(code)) except EndMarkerReached: return Stack(p.pgen_parser.stack) raise SystemError("This really shouldn't happen. There's a bug in Jedi.") diff --git a/jedi/parser/diff.py b/jedi/parser/diff.py index cd81f15e..ee0e87eb 100644 --- a/jedi/parser/diff.py +++ b/jedi/parser/diff.py @@ -323,7 +323,7 @@ class DiffParser(object): parsed_until_line = self._nodes_stack.parsed_until_line lines_after = self._parser_lines_new[parsed_until_line:] #print('parse_content', parsed_until_line, lines_after, until_line) - tokenizer = self._diff_tokenize( + tokens = self._diff_tokenize( lines_after, until_line, line_offset=parsed_until_line @@ -333,7 +333,7 @@ class DiffParser(object): source='\n', start_parsing=False ) - return self._active_parser.parse(tokenizer=tokenizer) + return self._active_parser.parse(tokens=tokens) def _diff_tokenize(self, lines, until_line, line_offset=0): is_first_token = True diff --git a/jedi/parser/python/__init__.py b/jedi/parser/python/__init__.py index 0429e167..886a1039 100644 --- a/jedi/parser/python/__init__.py +++ b/jedi/parser/python/__init__.py @@ -79,8 +79,9 @@ def parse(code, grammar=None, error_recovery=True, start_symbol='file_input'): else: kwargs = dict(start_symbol=start_symbol) parser = Parser - p = parser(grammar, code, tokens=tokens, **kwargs) + p = parser(grammar, code, start_parsing=False, **kwargs) + module = p.parse(tokens=tokens) if added_newline: p._remove_last_newline() - return p.get_root_node() + return module diff --git a/jedi/parser/python/parser.py b/jedi/parser/python/parser.py index 6400b5db..08344206 100644 --- a/jedi/parser/python/parser.py +++ b/jedi/parser/python/parser.py @@ -173,8 +173,8 @@ class ParserWithRecovery(Parser): start_parsing=start_parsing ) - def parse(self, tokenizer): - root_node = super(ParserWithRecovery, self).parse(self._tokenize(tokenizer)) + def parse(self, tokens): + root_node = super(ParserWithRecovery, self).parse(self._tokenize(tokens)) root_node.path = self._module_path return root_node @@ -242,8 +242,8 @@ class ParserWithRecovery(Parser): stack[start_index:] = [] return failed_stack - def _tokenize(self, tokenizer): - for typ, value, start_pos, prefix in tokenizer: + def _tokenize(self, tokens): + for typ, value, start_pos, prefix in tokens: # print(tokenize.tok_name[typ], repr(value), start_pos, repr(prefix)) if typ == DEDENT: # We need to count indents, because if we just omit any DEDENT,