mirror of
https://github.com/davidhalter/jedi.git
synced 2026-01-17 17:35:03 +08:00
Remove some old unused tokenize stuff.
This commit is contained in:
@@ -122,7 +122,7 @@ class Parser(object):
|
||||
p = PgenParser(grammar, self.convert_node, self.convert_leaf,
|
||||
self.error_recovery)
|
||||
tokenizer = tokenizer or tokenize.source_tokens(source)
|
||||
self.module = p.parse(p.tokenize(self._tokenize(tokenizer)))
|
||||
self.module = p.parse(self._tokenize(tokenizer))
|
||||
|
||||
self.module.used_names = self.used_names
|
||||
self.module.path = module_path
|
||||
|
||||
@@ -101,16 +101,6 @@ class PgenParser(object):
|
||||
self.rootnode = None
|
||||
self.error_recovery = error_recovery
|
||||
|
||||
def tokenize(self, tokenizer):
|
||||
"""
|
||||
This is not a real tokenizer, but it adds indents. You could hand the
|
||||
parse function a normal tokenizer (e.g. the lib2to3 one). But if we use
|
||||
the parser stack we are able to do error recovery from wrong indents.
|
||||
"""
|
||||
for type, value, prefix, start_pos in tokenizer:
|
||||
#print(token.tok_name[type], value)
|
||||
yield type, value, prefix, start_pos
|
||||
|
||||
def parse(self, tokenizer):
|
||||
for type, value, prefix, start_pos in tokenizer:
|
||||
if self.addtoken(type, value, prefix, start_pos):
|
||||
|
||||
Reference in New Issue
Block a user