mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-21 21:11:13 +08:00
Trying to remove token from pgen2.
This commit is contained in:
@@ -15,7 +15,7 @@ how this parsing engine works.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Local imports
|
# Local imports
|
||||||
from . import token
|
from jedi.parser import tokenize
|
||||||
|
|
||||||
|
|
||||||
class ParseError(Exception):
|
class ParseError(Exception):
|
||||||
@@ -131,7 +131,7 @@ class PgenParser(object):
|
|||||||
ilabel = self.classify(type, value, start_pos)
|
ilabel = self.classify(type, value, start_pos)
|
||||||
except ParseError:
|
except ParseError:
|
||||||
# Currently we ignore tokens like `?`.
|
# Currently we ignore tokens like `?`.
|
||||||
print('invalid token', token.tok_name[type], value)
|
print('invalid token', tokenize.tok_name[type], value)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Loop until the token is shifted; may raise exceptions
|
# Loop until the token is shifted; may raise exceptions
|
||||||
@@ -180,7 +180,7 @@ class PgenParser(object):
|
|||||||
|
|
||||||
def classify(self, type, value, start_pos):
|
def classify(self, type, value, start_pos):
|
||||||
"""Turn a token into a label. (Internal)"""
|
"""Turn a token into a label. (Internal)"""
|
||||||
if type == token.NAME:
|
if type == tokenize.NAME:
|
||||||
# Check for reserved words
|
# Check for reserved words
|
||||||
ilabel = self.grammar.keywords.get(value)
|
ilabel = self.grammar.keywords.get(value)
|
||||||
if ilabel is not None:
|
if ilabel is not None:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
# Modifications are dual-licensed: MIT and PSF.
|
# Modifications are dual-licensed: MIT and PSF.
|
||||||
|
|
||||||
# Pgen imports
|
# Pgen imports
|
||||||
from . import grammar, token, tokenize
|
from . import grammar, tokenize
|
||||||
|
|
||||||
|
|
||||||
class ParserGenerator(object):
|
class ParserGenerator(object):
|
||||||
@@ -74,9 +74,9 @@ class ParserGenerator(object):
|
|||||||
return ilabel
|
return ilabel
|
||||||
else:
|
else:
|
||||||
# A named token (NAME, NUMBER, STRING)
|
# A named token (NAME, NUMBER, STRING)
|
||||||
itoken = getattr(token, label, None)
|
itoken = getattr(tokenize, label, None)
|
||||||
assert isinstance(itoken, int), label
|
assert isinstance(itoken, int), label
|
||||||
assert itoken in token.tok_name, label
|
assert itoken in tokenize.tok_name, label
|
||||||
if itoken in c.tokens:
|
if itoken in c.tokens:
|
||||||
return c.tokens[itoken]
|
return c.tokens[itoken]
|
||||||
else:
|
else:
|
||||||
@@ -92,7 +92,7 @@ class ParserGenerator(object):
|
|||||||
if value in c.keywords:
|
if value in c.keywords:
|
||||||
return c.keywords[value]
|
return c.keywords[value]
|
||||||
else:
|
else:
|
||||||
c.labels.append((token.NAME, value))
|
c.labels.append((tokenize.NAME, value))
|
||||||
c.keywords[value] = ilabel
|
c.keywords[value] = ilabel
|
||||||
return ilabel
|
return ilabel
|
||||||
else:
|
else:
|
||||||
@@ -147,14 +147,14 @@ class ParserGenerator(object):
|
|||||||
dfas = {}
|
dfas = {}
|
||||||
startsymbol = None
|
startsymbol = None
|
||||||
# MSTART: (NEWLINE | RULE)* ENDMARKER
|
# MSTART: (NEWLINE | RULE)* ENDMARKER
|
||||||
while self.type != token.ENDMARKER:
|
while self.type != tokenize.ENDMARKER:
|
||||||
while self.type == token.NEWLINE:
|
while self.type == tokenize.NEWLINE:
|
||||||
self.gettoken()
|
self.gettoken()
|
||||||
# RULE: NAME ':' RHS NEWLINE
|
# RULE: NAME ':' RHS NEWLINE
|
||||||
name = self.expect(token.NAME)
|
name = self.expect(tokenize.NAME)
|
||||||
self.expect(token.OP, ":")
|
self.expect(tokenize.OP, ":")
|
||||||
a, z = self.parse_rhs()
|
a, z = self.parse_rhs()
|
||||||
self.expect(token.NEWLINE)
|
self.expect(tokenize.NEWLINE)
|
||||||
#self.dump_nfa(name, a, z)
|
#self.dump_nfa(name, a, z)
|
||||||
dfa = self.make_dfa(a, z)
|
dfa = self.make_dfa(a, z)
|
||||||
#self.dump_dfa(name, dfa)
|
#self.dump_dfa(name, dfa)
|
||||||
@@ -271,7 +271,7 @@ class ParserGenerator(object):
|
|||||||
# ALT: ITEM+
|
# ALT: ITEM+
|
||||||
a, b = self.parse_item()
|
a, b = self.parse_item()
|
||||||
while (self.value in ("(", "[") or
|
while (self.value in ("(", "[") or
|
||||||
self.type in (token.NAME, token.STRING)):
|
self.type in (tokenize.NAME, tokenize.STRING)):
|
||||||
c, d = self.parse_item()
|
c, d = self.parse_item()
|
||||||
b.addarc(c)
|
b.addarc(c)
|
||||||
b = d
|
b = d
|
||||||
@@ -282,7 +282,7 @@ class ParserGenerator(object):
|
|||||||
if self.value == "[":
|
if self.value == "[":
|
||||||
self.gettoken()
|
self.gettoken()
|
||||||
a, z = self.parse_rhs()
|
a, z = self.parse_rhs()
|
||||||
self.expect(token.OP, "]")
|
self.expect(tokenize.OP, "]")
|
||||||
a.addarc(z)
|
a.addarc(z)
|
||||||
return a, z
|
return a, z
|
||||||
else:
|
else:
|
||||||
@@ -302,9 +302,9 @@ class ParserGenerator(object):
|
|||||||
if self.value == "(":
|
if self.value == "(":
|
||||||
self.gettoken()
|
self.gettoken()
|
||||||
a, z = self.parse_rhs()
|
a, z = self.parse_rhs()
|
||||||
self.expect(token.OP, ")")
|
self.expect(tokenize.OP, ")")
|
||||||
return a, z
|
return a, z
|
||||||
elif self.type in (token.NAME, token.STRING):
|
elif self.type in (tokenize.NAME, tokenize.STRING):
|
||||||
a = NFAState()
|
a = NFAState()
|
||||||
z = NFAState()
|
z = NFAState()
|
||||||
a.addarc(z, self.value)
|
a.addarc(z, self.value)
|
||||||
@@ -327,7 +327,7 @@ class ParserGenerator(object):
|
|||||||
while tup[0] in (tokenize.COMMENT, tokenize.NL):
|
while tup[0] in (tokenize.COMMENT, tokenize.NL):
|
||||||
tup = next(self.generator)
|
tup = next(self.generator)
|
||||||
self.type, self.value, self.begin, self.end, self.line = tup
|
self.type, self.value, self.begin, self.end, self.line = tup
|
||||||
#print token.tok_name[self.type], repr(self.value)
|
#print tokenize.tok_name[self.type], repr(self.value)
|
||||||
|
|
||||||
def raise_error(self, msg, *args):
|
def raise_error(self, msg, *args):
|
||||||
if args:
|
if args:
|
||||||
|
|||||||
Reference in New Issue
Block a user