mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-06 21:04:29 +08:00
Whitespace.
This commit is contained in:
@@ -22,6 +22,7 @@ def test_end_pos_one_line():
|
|||||||
string = simple_stmt.children[0].get_rhs()
|
string = simple_stmt.children[0].get_rhs()
|
||||||
assert string.end_pos == (3, 14)
|
assert string.end_pos == (3, 14)
|
||||||
|
|
||||||
|
|
||||||
def test_end_pos_multi_line():
|
def test_end_pos_multi_line():
|
||||||
parsed = parse(dedent('''
|
parsed = parse(dedent('''
|
||||||
def testit():
|
def testit():
|
||||||
@@ -32,6 +33,7 @@ def test_end_pos_multi_line():
|
|||||||
string_leaf = expr_stmt.get_rhs().children[0]
|
string_leaf = expr_stmt.get_rhs().children[0]
|
||||||
assert string_leaf.end_pos == (4, 11)
|
assert string_leaf.end_pos == (4, 11)
|
||||||
|
|
||||||
|
|
||||||
def test_simple_no_whitespace():
|
def test_simple_no_whitespace():
|
||||||
# Test a simple one line string, no preceding whitespace
|
# Test a simple one line string, no preceding whitespace
|
||||||
simple_docstring = '"""simple one line docstring"""'
|
simple_docstring = '"""simple one line docstring"""'
|
||||||
@@ -41,6 +43,7 @@ def test_simple_no_whitespace():
|
|||||||
assert prefix == ''
|
assert prefix == ''
|
||||||
assert value == '"""simple one line docstring"""'
|
assert value == '"""simple one line docstring"""'
|
||||||
|
|
||||||
|
|
||||||
def test_simple_with_whitespace():
|
def test_simple_with_whitespace():
|
||||||
# Test a simple one line string with preceding whitespace and newline
|
# Test a simple one line string with preceding whitespace and newline
|
||||||
simple_docstring = ' """simple one line docstring""" \r\n'
|
simple_docstring = ' """simple one line docstring""" \r\n'
|
||||||
@@ -55,6 +58,7 @@ def test_simple_with_whitespace():
|
|||||||
assert prefix == ' '
|
assert prefix == ' '
|
||||||
assert typ == NEWLINE
|
assert typ == NEWLINE
|
||||||
|
|
||||||
|
|
||||||
def test_function_whitespace():
|
def test_function_whitespace():
|
||||||
# Test function definition whitespace identification
|
# Test function definition whitespace identification
|
||||||
fundef = dedent('''
|
fundef = dedent('''
|
||||||
@@ -79,6 +83,7 @@ def test_function_whitespace():
|
|||||||
if value == 'if':
|
if value == 'if':
|
||||||
assert prefix == ' '
|
assert prefix == ' '
|
||||||
|
|
||||||
|
|
||||||
def test_tokenize_multiline_I():
|
def test_tokenize_multiline_I():
|
||||||
# Make sure multiline string having newlines have the end marker on the
|
# Make sure multiline string having newlines have the end marker on the
|
||||||
# next line
|
# next line
|
||||||
@@ -88,6 +93,7 @@ def test_tokenize_multiline_I():
|
|||||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n', (1, 0), ''),
|
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n', (1, 0), ''),
|
||||||
TokenInfo(ENDMARKER , '', (2, 0), '')]
|
TokenInfo(ENDMARKER , '', (2, 0), '')]
|
||||||
|
|
||||||
|
|
||||||
def test_tokenize_multiline_II():
|
def test_tokenize_multiline_II():
|
||||||
# Make sure multiline string having no newlines have the end marker on
|
# Make sure multiline string having no newlines have the end marker on
|
||||||
# same line
|
# same line
|
||||||
@@ -97,6 +103,7 @@ def test_tokenize_multiline_II():
|
|||||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""', (1, 0), ''),
|
assert token_list == [TokenInfo(ERRORTOKEN, '""""', (1, 0), ''),
|
||||||
TokenInfo(ENDMARKER, '', (1, 4), '')]
|
TokenInfo(ENDMARKER, '', (1, 4), '')]
|
||||||
|
|
||||||
|
|
||||||
def test_tokenize_multiline_III():
|
def test_tokenize_multiline_III():
|
||||||
# Make sure multiline string having newlines have the end marker on the
|
# Make sure multiline string having newlines have the end marker on the
|
||||||
# next line even if several newline
|
# next line even if several newline
|
||||||
@@ -106,6 +113,7 @@ def test_tokenize_multiline_III():
|
|||||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n\n', (1, 0), ''),
|
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n\n', (1, 0), ''),
|
||||||
TokenInfo(ENDMARKER, '', (3, 0), '')]
|
TokenInfo(ENDMARKER, '', (3, 0), '')]
|
||||||
|
|
||||||
|
|
||||||
def test_identifier_contains_unicode():
|
def test_identifier_contains_unicode():
|
||||||
fundef = dedent('''
|
fundef = dedent('''
|
||||||
def 我あφ():
|
def 我あφ():
|
||||||
@@ -121,6 +129,7 @@ def test_identifier_contains_unicode():
|
|||||||
# They will be ignored in the parser, that's ok.
|
# They will be ignored in the parser, that's ok.
|
||||||
assert unicode_token[0] == OP
|
assert unicode_token[0] == OP
|
||||||
|
|
||||||
|
|
||||||
def test_quoted_strings():
|
def test_quoted_strings():
|
||||||
string_tokens = [
|
string_tokens = [
|
||||||
'u"test"',
|
'u"test"',
|
||||||
|
|||||||
Reference in New Issue
Block a user