1
0
forked from VimPlug/jedi

Add syntax errors to the parser.

This commit is contained in:
Dave Halter
2015-03-04 17:12:51 +01:00
parent a3c2108ecf
commit 9c2e73d460
2 changed files with 22 additions and 15 deletions

View File

@@ -22,7 +22,7 @@ from jedi.parser import tree as pt
from jedi.parser import tokenize
from jedi.parser import token
from jedi.parser.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER,
STRING, OP)
STRING, OP, ERRORTOKEN)
from jedi.parser.pgen2.pgen import generate_grammar
from jedi.parser.pgen2.parse import PgenParser
@@ -74,6 +74,12 @@ class ErrorStatement(object):
return first_type
class ParserSyntaxError(object):
def __init__(self, message, position):
self.message = message
self.position = position
class Parser(object):
"""
This class is used to parse a Python file, it then divides them into a
@@ -115,6 +121,8 @@ class Parser(object):
'lambdef_nocond': pt.Lambda,
}
self.syntax_errors = []
self._global_names = []
self._omit_dedent_list = []
self._indent_counter = 0
@@ -321,11 +329,17 @@ class Parser(object):
self._indent_counter -= 1
elif typ == INDENT:
self._indent_counter += 1
elif typ == ERRORTOKEN:
self._add_syntax_error('Strange token', start_pos)
continue
if typ == OP:
typ = token.opmap[value]
yield typ, value, prefix, start_pos
def _add_syntax_error(self, message, position):
self.syntax_errors.append(ParserSyntaxError(message, position))
def __repr__(self):
return "<%s: %s>" % (type(self).__name__, self.module)

View File

@@ -117,12 +117,7 @@ class PgenParser(object):
def addtoken(self, type, value, prefix, start_pos):
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
try:
ilabel = self.classify(type, value, start_pos)
except ParseError:
# Currently we ignore tokens like `?`.
print('invalid token', tokenize.tok_name[type], repr(value))
return
ilabel = self.classify(type, value, start_pos)
# Loop until the token is shifted; may raise exceptions
while True:
@@ -171,14 +166,12 @@ class PgenParser(object):
def classify(self, type, value, start_pos):
"""Turn a token into a label. (Internal)"""
if type == tokenize.NAME:
# Check for reserved words
ilabel = self.grammar.keywords.get(value)
if ilabel is not None:
return ilabel
ilabel = self.grammar.tokens.get(type)
if ilabel is None:
raise ParseError("bad token", type, value, start_pos)
return ilabel
# Check for reserved words (keywords)
try:
return self.grammar.keywords[value]
except KeyError:
pass
return self.grammar.tokens[type]
def shift(self, type, value, newstate, prefix, start_pos):
"""Shift a token. (Internal)"""