diff --git a/jedi/parser/__init__.py b/jedi/parser/__init__.py index 22d77b0b..6123b4f8 100644 --- a/jedi/parser/__init__.py +++ b/jedi/parser/__init__.py @@ -122,7 +122,7 @@ class Parser(object): p = PgenParser(grammar, self.convert_node, self.convert_leaf, self.error_recovery) tokenizer = tokenizer or tokenize.source_tokens(source) - self.module = p.parse(p.tokenize(self._tokenize(tokenizer))) + self.module = p.parse(self._tokenize(tokenizer)) self.module.used_names = self.used_names self.module.path = module_path diff --git a/jedi/parser/pgen2/parse.py b/jedi/parser/pgen2/parse.py index 070187e7..7c4a6049 100644 --- a/jedi/parser/pgen2/parse.py +++ b/jedi/parser/pgen2/parse.py @@ -101,16 +101,6 @@ class PgenParser(object): self.rootnode = None self.error_recovery = error_recovery - def tokenize(self, tokenizer): - """ - This is not a real tokenizer, but it adds indents. You could hand the - parse function a normal tokenizer (e.g. the lib2to3 one). But if we use - the parser stack we are able to do error recovery from wrong indents. - """ - for type, value, prefix, start_pos in tokenizer: - #print(token.tok_name[type], value) - yield type, value, prefix, start_pos - def parse(self, tokenizer): for type, value, prefix, start_pos in tokenizer: if self.addtoken(type, value, prefix, start_pos):