1
0
forked from VimPlug/jedi

Remove the old driver code of pgen2.

This commit is contained in:
Dave Halter
2014-12-15 17:18:01 +01:00
parent 4e0172a915
commit 55a6dbc8a2
3 changed files with 5 additions and 63 deletions

View File

@@ -17,10 +17,10 @@ complexity of the ``Parser`` (there's another parser sitting inside
""" """
import os import os
from jedi._compatibility import next
from jedi.parser import tree as pt from jedi.parser import tree as pt
from jedi.parser import tokenize from jedi.parser import tokenize
from jedi.parser import pgen2 from jedi.parser import pgen2
from jedi.parser.pgen2.parse import PgenParser
OPERATOR_KEYWORDS = 'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or' OPERATOR_KEYWORDS = 'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or'
# Not used yet. In the future I intend to add something like KeywordStatement # Not used yet. In the future I intend to add something like KeywordStatement
@@ -118,8 +118,8 @@ class Parser(object):
self.used_names = {} self.used_names = {}
self.scope_names_stack = [{}] self.scope_names_stack = [{}]
self.error_statement_stacks = [] self.error_statement_stacks = []
p = pgen2.parse.Parser(grammar, self.convert_node, self.convert_leaf, p = PgenParser(grammar, self.convert_node, self.convert_leaf,
self.error_recovery) self.error_recovery)
tokenizer = tokenizer or tokenize.source_tokens(source) tokenizer = tokenizer or tokenize.source_tokens(source)
self.module = p.parse(p.tokenize(self._tokenize(tokenizer))) self.module = p.parse(p.tokenize(self._tokenize(tokenizer)))

View File

@@ -5,73 +5,15 @@
# Copyright 2006 Google, Inc. All Rights Reserved. # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement. # Licensed to PSF under a Contributor Agreement.
__all__ = ["Driver", "load_grammar"] __all__ = ["load_grammar"]
import os import os
import sys import sys
import logging import logging
import io
from . import pgen from . import pgen
from . import grammar from . import grammar
from . import parse
from . import token
from . import tokenize
class Driver(object):
def __init__(self, grammar, convert_node, convert_leaf, error_recovery, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert_node = convert_node
self.convert_leaf = convert_leaf
self.error_recovery = error_recovery
def parse_tokens(self, tokens):
p = parse.Parser(self.grammar, self.convert_node, self.convert_leaf, self.error_recovery)
return p.parse(self._tokenize(tokens))
def _tokenize(self, tokens):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
lineno = 1
column = 0
prefix = ""
for type, value, start, end, line_text in tokens:
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL): # NL != NEWLINE
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
#self.logger.debug("%s %r (prefix=%r)", token.tok_name[type], value, prefix)
yield type, value, prefix, start
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
def parse_string(self, text):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(io.StringIO(text).readline)
return self.parse_tokens(tokens)
def load_grammar(grammar_path="grammar.txt", pickle_path=None, def load_grammar(grammar_path="grammar.txt", pickle_path=None,

View File

@@ -26,7 +26,7 @@ class ParseError(Exception):
self.start_pos = start_pos self.start_pos = start_pos
class Parser(object): class PgenParser(object):
"""Parser engine. """Parser engine.
The proper usage sequence is: The proper usage sequence is: