Finally add all fstring errors except the nested invalid syntax stuff that occurs in nested Python.

This commit is contained in:
Dave Halter
2017-08-26 12:16:06 +02:00
parent 68cc383d02
commit 5b7a01ba62
3 changed files with 28 additions and 19 deletions

View File

@@ -844,7 +844,7 @@ class _FStringRule(SyntaxRule):
message_nested = "f-string: expressions nested too deeply"
message_backslash = "f-string expression part cannot include a backslash" # f'{"\"}' or f'{"\\"}'
message_comment = "f-string expression part cannot include '#'" # f'{#}'
message_string = "f-string: unterminated string" # f'{"}'
message_unterminated_string = "f-string: unterminated string" # f'{"}'
message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'"
message_incomplete = "f-string: expecting '}'" # f'{'
@@ -865,6 +865,12 @@ class _FStringRule(SyntaxRule):
if child.type == 'expression':
self._check_expression(child)
elif child.type == 'error_node':
next_ = child.get_next_leaf()
if next_.type == 'error_leaf' and next_.original_type == 'unterminated_string':
self.add_issue(next_, message=self.message_unterminated_string)
# At this point nothing more is comming except the error
# leaf that we've already checked here.
break
self.add_issue(child, message=self.message_incomplete)
elif child.type == 'error_leaf':
self.add_issue(child, message=self.message_single_closing)

View File

@@ -1,9 +1,9 @@
import re
from itertools import count
from parso.utils import PythonVersionInfo
from parso.utils import split_lines
from parso.python.tokenize import Token
from parso.python import token
from parso import parser
from parso.tree import TypedLeaf, ErrorNode, ErrorLeaf
@@ -11,14 +11,15 @@ version36 = PythonVersionInfo(3, 6)
class TokenNamespace:
LBRACE = token.LBRACE
RBRACE = token.RBRACE
ENDMARKER = token.ENDMARKER
ERRORTOKEN = token.ERRORTOKEN
COLON = token.COLON
CONVERSION = 100
PYTHON_EXPR = 101
EXCLAMATION_MARK = 102
_c = count()
LBRACE = next(_c)
RBRACE = next(_c)
ENDMARKER = next(_c)
COLON = next(_c)
CONVERSION = next(_c)
PYTHON_EXPR = next(_c)
EXCLAMATION_MARK = next(_c)
UNTERMINATED_STRING = next(_c)
token_map = dict((v, k) for k, v in locals().items())
@@ -138,7 +139,6 @@ def _tokenize(code, start_pos=(1, 0)):
elif found == ':' and (squared_count or curly_count):
expression += found
elif found in ('"', "'"):
expression += found
search = found
if len(code) > start + 1 and \
code[start] == found == code[start+1]:
@@ -147,8 +147,14 @@ def _tokenize(code, start_pos=(1, 0)):
index = code.find(search, start)
if index == -1:
index = len(code)
expression += code[start:index]
yield tok(expression, type=TokenNamespace.PYTHON_EXPR)
yield tok(
found + code[start:],
type=TokenNamespace.UNTERMINATED_STRING,
)
start = len(code)
break
expression += found + code[start:index]
start = index + 1
elif found == '!' and len(code) > start and code[start] == '=':
# This is a python `!=` and not a conversion.
@@ -198,13 +204,9 @@ class Parser(parser.BaseParser):
add_token_callback
)
token_type = TokenNamespace.token_map[typ].lower()
if len(stack) == 1:
error_leaf = ErrorLeaf(
TokenNamespace.token_map[typ].lower(),
value,
start_pos,
prefix
)
error_leaf = ErrorLeaf(token_type, value, start_pos, prefix)
stack[0][2][1].append(error_leaf)
else:
dfa, state, (type_, nodes) = stack[1]

View File

@@ -149,6 +149,7 @@ FAILING_EXAMPLES = [
"f'{'",
"f'{'",
"f'}'",
"f'{\"}'",
]
GLOBAL_NONLOCAL_ERROR = [