Finally add all fstring errors except the nested invalid syntax stuff that occurs in nested Python.

This commit is contained in:
Dave Halter
2017-08-26 12:16:06 +02:00
parent 68cc383d02
commit 5b7a01ba62
3 changed files with 28 additions and 19 deletions

View File

@@ -844,7 +844,7 @@ class _FStringRule(SyntaxRule):
message_nested = "f-string: expressions nested too deeply" message_nested = "f-string: expressions nested too deeply"
message_backslash = "f-string expression part cannot include a backslash" # f'{"\"}' or f'{"\\"}' message_backslash = "f-string expression part cannot include a backslash" # f'{"\"}' or f'{"\\"}'
message_comment = "f-string expression part cannot include '#'" # f'{#}' message_comment = "f-string expression part cannot include '#'" # f'{#}'
message_string = "f-string: unterminated string" # f'{"}' message_unterminated_string = "f-string: unterminated string" # f'{"}'
message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'" message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'"
message_incomplete = "f-string: expecting '}'" # f'{' message_incomplete = "f-string: expecting '}'" # f'{'
@@ -865,6 +865,12 @@ class _FStringRule(SyntaxRule):
if child.type == 'expression': if child.type == 'expression':
self._check_expression(child) self._check_expression(child)
elif child.type == 'error_node': elif child.type == 'error_node':
next_ = child.get_next_leaf()
if next_.type == 'error_leaf' and next_.original_type == 'unterminated_string':
self.add_issue(next_, message=self.message_unterminated_string)
# At this point nothing more is comming except the error
# leaf that we've already checked here.
break
self.add_issue(child, message=self.message_incomplete) self.add_issue(child, message=self.message_incomplete)
elif child.type == 'error_leaf': elif child.type == 'error_leaf':
self.add_issue(child, message=self.message_single_closing) self.add_issue(child, message=self.message_single_closing)

View File

@@ -1,9 +1,9 @@
import re import re
from itertools import count
from parso.utils import PythonVersionInfo from parso.utils import PythonVersionInfo
from parso.utils import split_lines from parso.utils import split_lines
from parso.python.tokenize import Token from parso.python.tokenize import Token
from parso.python import token
from parso import parser from parso import parser
from parso.tree import TypedLeaf, ErrorNode, ErrorLeaf from parso.tree import TypedLeaf, ErrorNode, ErrorLeaf
@@ -11,14 +11,15 @@ version36 = PythonVersionInfo(3, 6)
class TokenNamespace: class TokenNamespace:
LBRACE = token.LBRACE _c = count()
RBRACE = token.RBRACE LBRACE = next(_c)
ENDMARKER = token.ENDMARKER RBRACE = next(_c)
ERRORTOKEN = token.ERRORTOKEN ENDMARKER = next(_c)
COLON = token.COLON COLON = next(_c)
CONVERSION = 100 CONVERSION = next(_c)
PYTHON_EXPR = 101 PYTHON_EXPR = next(_c)
EXCLAMATION_MARK = 102 EXCLAMATION_MARK = next(_c)
UNTERMINATED_STRING = next(_c)
token_map = dict((v, k) for k, v in locals().items()) token_map = dict((v, k) for k, v in locals().items())
@@ -138,7 +139,6 @@ def _tokenize(code, start_pos=(1, 0)):
elif found == ':' and (squared_count or curly_count): elif found == ':' and (squared_count or curly_count):
expression += found expression += found
elif found in ('"', "'"): elif found in ('"', "'"):
expression += found
search = found search = found
if len(code) > start + 1 and \ if len(code) > start + 1 and \
code[start] == found == code[start+1]: code[start] == found == code[start+1]:
@@ -147,8 +147,14 @@ def _tokenize(code, start_pos=(1, 0)):
index = code.find(search, start) index = code.find(search, start)
if index == -1: if index == -1:
index = len(code) yield tok(expression, type=TokenNamespace.PYTHON_EXPR)
expression += code[start:index] yield tok(
found + code[start:],
type=TokenNamespace.UNTERMINATED_STRING,
)
start = len(code)
break
expression += found + code[start:index]
start = index + 1 start = index + 1
elif found == '!' and len(code) > start and code[start] == '=': elif found == '!' and len(code) > start and code[start] == '=':
# This is a python `!=` and not a conversion. # This is a python `!=` and not a conversion.
@@ -198,13 +204,9 @@ class Parser(parser.BaseParser):
add_token_callback add_token_callback
) )
token_type = TokenNamespace.token_map[typ].lower()
if len(stack) == 1: if len(stack) == 1:
error_leaf = ErrorLeaf( error_leaf = ErrorLeaf(token_type, value, start_pos, prefix)
TokenNamespace.token_map[typ].lower(),
value,
start_pos,
prefix
)
stack[0][2][1].append(error_leaf) stack[0][2][1].append(error_leaf)
else: else:
dfa, state, (type_, nodes) = stack[1] dfa, state, (type_, nodes) = stack[1]

View File

@@ -149,6 +149,7 @@ FAILING_EXAMPLES = [
"f'{'", "f'{'",
"f'{'", "f'{'",
"f'}'", "f'}'",
"f'{\"}'",
] ]
GLOBAL_NONLOCAL_ERROR = [ GLOBAL_NONLOCAL_ERROR = [