Remove some old unused tokenize stuff.

This commit is contained in:
Dave Halter
2014-12-15 17:44:40 +01:00
parent 955f125c0d
commit 680fdd574b
2 changed files with 1 additions and 11 deletions

View File

@@ -122,7 +122,7 @@ class Parser(object):
p = PgenParser(grammar, self.convert_node, self.convert_leaf,
self.error_recovery)
tokenizer = tokenizer or tokenize.source_tokens(source)
self.module = p.parse(p.tokenize(self._tokenize(tokenizer)))
self.module = p.parse(self._tokenize(tokenizer))
self.module.used_names = self.used_names
self.module.path = module_path

View File

@@ -101,16 +101,6 @@ class PgenParser(object):
self.rootnode = None
self.error_recovery = error_recovery
def tokenize(self, tokenizer):
"""
This is not a real tokenizer, but it adds indents. You could hand the
parse function a normal tokenizer (e.g. the lib2to3 one). But if we use
the parser stack we are able to do error recovery from wrong indents.
"""
for type, value, prefix, start_pos in tokenizer:
#print(token.tok_name[type], value)
yield type, value, prefix, start_pos
def parse(self, tokenizer):
for type, value, prefix, start_pos in tokenizer:
if self.addtoken(type, value, prefix, start_pos):