Added the fstring grammar without the tokenization part

This means that fstrings are not yet parsed, because there are no f-string tokens.
This commit is contained in:
Dave Halter
2018-03-28 02:03:18 +02:00
parent ba0e7a2e9d
commit 9f88fe16a3
4 changed files with 26 additions and 4 deletions

View File

@@ -563,7 +563,8 @@ class _ReturnAndYieldChecks(SyntaxRule):
and self._normalizer.version == (3, 5):
self.add_issue(self.get_node(leaf), message=self.message_async_yield)
@ErrorFinder.register_rule(type='atom')
@ErrorFinder.register_rule(type='strings')
class _BytesAndStringMix(SyntaxRule):
# e.g. 's' b''
message = "cannot mix bytes and nonbytes literals"
@@ -949,7 +950,7 @@ class _CheckAssignmentRule(SyntaxRule):
first, second = node.children[:2]
error = _get_comprehension_type(node)
if error is None:
if second.type in ('dictorsetmaker', 'string'):
if second.type == 'dictorsetmaker':
error = 'literal'
elif first in ('(', '['):
if second.type == 'yield_expr':
@@ -968,7 +969,7 @@ class _CheckAssignmentRule(SyntaxRule):
error = 'Ellipsis'
elif type_ == 'comparison':
error = 'comparison'
elif type_ in ('string', 'number'):
elif type_ in ('string', 'number', 'strings'):
error = 'literal'
elif type_ == 'yield_expr':
# This one seems to be a slightly different warning in Python.

View File

@@ -108,7 +108,7 @@ atom_expr: ['await'] atom trailer*
atom: ('(' [yield_expr|testlist_comp] ')' |
'[' [testlist_comp] ']' |
'{' [dictorsetmaker] '}' |
NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False')
NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
subscriptlist: subscript (',' subscript)* [',']
@@ -148,3 +148,9 @@ encoding_decl: NAME
yield_expr: 'yield' [yield_arg]
yield_arg: 'from' test | testlist
strings: (STRING | fstring)+
fstring: FSTRING_START fstring_content FSTRING_END
fstring_content: (FSTRING_STRING | fstring_expr)*
fstring_expr: '{' testlist [ FSTRING_CONVERSION ] [ fstring_format_spec ] '}'
fstring_format_spec: ':' fstring_content

View File

@@ -32,6 +32,14 @@ if py_version < 35:
ERROR_DEDENT = next(_counter)
tok_name[ERROR_DEDENT] = 'ERROR_DEDENT'
FSTRING_START = next(_counter)
tok_name[FSTRING_START] = 'FSTRING_START'
FSTRING_END = next(_counter)
tok_name[FSTRING_END] = 'FSTRING_END'
FSTRING_STRING = next(_counter)
tok_name[FSTRING_STRING] = 'FSTRING_STRING'
FSTRING_CONVERSION = next(_counter)
tok_name[FSTRING_CONVERSION] = 'FSTRING_CONVERSION'
# Map from operator to number (since tokenize doesn't do this)

View File

@@ -25,6 +25,13 @@ from parso._compatibility import py_version
from parso.utils import split_lines
#fstring_start = /[f|fr|rf]["|"""|'|''']/
#fstring_end = <same as the second part of the fstring start>
fstring_expr_start = ''
fstring_string = r'([^{}\n]+|\{\{|\}\})*'
fstring_conversion = r'![sra]'
TokenCollection = namedtuple(
'TokenCollection',
'pseudo_token single_quoted triple_quoted endpats always_break_tokens',