mirror of
https://github.com/davidhalter/parso.git
synced 2025-12-09 22:25:53 +08:00
Remove a unittest.TestCase usage and replace it with pytest tests.
This commit is contained in:
@@ -9,139 +9,135 @@ from parso import tokenize
|
|||||||
from parso.python import parse
|
from parso.python import parse
|
||||||
from parso.tokenize import TokenInfo
|
from parso.tokenize import TokenInfo
|
||||||
|
|
||||||
from .helpers import unittest
|
|
||||||
|
|
||||||
def _get_token_list(string):
|
def _get_token_list(string):
|
||||||
return list(tokenize.source_tokens(string))
|
return list(tokenize.source_tokens(string))
|
||||||
|
|
||||||
|
|
||||||
class TokenTest(unittest.TestCase):
|
def test_end_pos_one_line():
|
||||||
def test_end_pos_one_line(self):
|
parsed = parse(dedent('''
|
||||||
parsed = parse(dedent('''
|
def testit():
|
||||||
def testit():
|
a = "huhu"
|
||||||
a = "huhu"
|
'''))
|
||||||
'''))
|
simple_stmt = next(parsed.iter_funcdefs()).get_suite().children[-1]
|
||||||
simple_stmt = next(parsed.iter_funcdefs()).get_suite().children[-1]
|
string = simple_stmt.children[0].get_rhs()
|
||||||
string = simple_stmt.children[0].get_rhs()
|
assert string.end_pos == (3, 14)
|
||||||
assert string.end_pos == (3, 14)
|
|
||||||
|
|
||||||
def test_end_pos_multi_line(self):
|
def test_end_pos_multi_line():
|
||||||
parsed = parse(dedent('''
|
parsed = parse(dedent('''
|
||||||
def testit():
|
def testit():
|
||||||
a = """huhu
|
a = """huhu
|
||||||
asdfasdf""" + "h"
|
asdfasdf""" + "h"
|
||||||
'''))
|
'''))
|
||||||
expr_stmt = next(parsed.iter_funcdefs()).get_suite().children[1].children[0]
|
expr_stmt = next(parsed.iter_funcdefs()).get_suite().children[1].children[0]
|
||||||
string_leaf = expr_stmt.get_rhs().children[0]
|
string_leaf = expr_stmt.get_rhs().children[0]
|
||||||
assert string_leaf.end_pos == (4, 11)
|
assert string_leaf.end_pos == (4, 11)
|
||||||
|
|
||||||
def test_simple_no_whitespace(self):
|
def test_simple_no_whitespace():
|
||||||
# Test a simple one line string, no preceding whitespace
|
# Test a simple one line string, no preceding whitespace
|
||||||
simple_docstring = '"""simple one line docstring"""'
|
simple_docstring = '"""simple one line docstring"""'
|
||||||
tokens = tokenize.source_tokens(simple_docstring)
|
tokens = tokenize.source_tokens(simple_docstring)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
_, value, _, prefix = token_list[0]
|
_, value, _, prefix = token_list[0]
|
||||||
assert prefix == ''
|
assert prefix == ''
|
||||||
assert value == '"""simple one line docstring"""'
|
assert value == '"""simple one line docstring"""'
|
||||||
|
|
||||||
def test_simple_with_whitespace(self):
|
def test_simple_with_whitespace():
|
||||||
# Test a simple one line string with preceding whitespace and newline
|
# Test a simple one line string with preceding whitespace and newline
|
||||||
simple_docstring = ' """simple one line docstring""" \r\n'
|
simple_docstring = ' """simple one line docstring""" \r\n'
|
||||||
tokens = tokenize.source_tokens(simple_docstring)
|
tokens = tokenize.source_tokens(simple_docstring)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
assert token_list[0][0] == INDENT
|
assert token_list[0][0] == INDENT
|
||||||
typ, value, start_pos, prefix = token_list[1]
|
typ, value, start_pos, prefix = token_list[1]
|
||||||
assert prefix == ' '
|
assert prefix == ' '
|
||||||
assert value == '"""simple one line docstring"""'
|
assert value == '"""simple one line docstring"""'
|
||||||
assert typ == STRING
|
assert typ == STRING
|
||||||
typ, value, start_pos, prefix = token_list[2]
|
typ, value, start_pos, prefix = token_list[2]
|
||||||
assert prefix == ' '
|
assert prefix == ' '
|
||||||
assert typ == NEWLINE
|
assert typ == NEWLINE
|
||||||
|
|
||||||
def test_function_whitespace(self):
|
def test_function_whitespace():
|
||||||
# Test function definition whitespace identification
|
# Test function definition whitespace identification
|
||||||
fundef = dedent('''
|
fundef = dedent('''
|
||||||
def test_whitespace(*args, **kwargs):
|
def test_whitespace(*args, **kwargs):
|
||||||
x = 1
|
x = 1
|
||||||
if x > 0:
|
if x > 0:
|
||||||
print(True)
|
print(True)
|
||||||
''')
|
''')
|
||||||
tokens = tokenize.source_tokens(fundef)
|
tokens = tokenize.source_tokens(fundef)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
for _, value, _, prefix in token_list:
|
for _, value, _, prefix in token_list:
|
||||||
if value == 'test_whitespace':
|
if value == 'test_whitespace':
|
||||||
assert prefix == ' '
|
assert prefix == ' '
|
||||||
if value == '(':
|
if value == '(':
|
||||||
assert prefix == ''
|
assert prefix == ''
|
||||||
if value == '*':
|
if value == '*':
|
||||||
assert prefix == ''
|
assert prefix == ''
|
||||||
if value == '**':
|
if value == '**':
|
||||||
assert prefix == ' '
|
assert prefix == ' '
|
||||||
if value == 'print':
|
if value == 'print':
|
||||||
assert prefix == ' '
|
assert prefix == ' '
|
||||||
if value == 'if':
|
if value == 'if':
|
||||||
assert prefix == ' '
|
assert prefix == ' '
|
||||||
|
|
||||||
def test_tokenize_multiline_I(self):
|
def test_tokenize_multiline_I():
|
||||||
# Make sure multiline string having newlines have the end marker on the
|
# Make sure multiline string having newlines have the end marker on the
|
||||||
# next line
|
# next line
|
||||||
fundef = '''""""\n'''
|
fundef = '''""""\n'''
|
||||||
tokens = tokenize.source_tokens(fundef)
|
tokens = tokenize.source_tokens(fundef)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n', (1, 0), ''),
|
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n', (1, 0), ''),
|
||||||
TokenInfo(ENDMARKER , '', (2, 0), '')]
|
TokenInfo(ENDMARKER , '', (2, 0), '')]
|
||||||
|
|
||||||
def test_tokenize_multiline_II(self):
|
def test_tokenize_multiline_II():
|
||||||
# Make sure multiline string having no newlines have the end marker on
|
# Make sure multiline string having no newlines have the end marker on
|
||||||
# same line
|
# same line
|
||||||
fundef = '''""""'''
|
fundef = '''""""'''
|
||||||
tokens = tokenize.source_tokens(fundef)
|
tokens = tokenize.source_tokens(fundef)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""', (1, 0), ''),
|
assert token_list == [TokenInfo(ERRORTOKEN, '""""', (1, 0), ''),
|
||||||
TokenInfo(ENDMARKER, '', (1, 4), '')]
|
TokenInfo(ENDMARKER, '', (1, 4), '')]
|
||||||
|
|
||||||
def test_tokenize_multiline_III(self):
|
def test_tokenize_multiline_III():
|
||||||
# Make sure multiline string having newlines have the end marker on the
|
# Make sure multiline string having newlines have the end marker on the
|
||||||
# next line even if several newline
|
# next line even if several newline
|
||||||
fundef = '''""""\n\n'''
|
fundef = '''""""\n\n'''
|
||||||
tokens = tokenize.source_tokens(fundef)
|
tokens = tokenize.source_tokens(fundef)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n\n', (1, 0), ''),
|
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n\n', (1, 0), ''),
|
||||||
TokenInfo(ENDMARKER, '', (3, 0), '')]
|
TokenInfo(ENDMARKER, '', (3, 0), '')]
|
||||||
|
|
||||||
def test_identifier_contains_unicode(self):
|
def test_identifier_contains_unicode():
|
||||||
fundef = dedent('''
|
fundef = dedent('''
|
||||||
def 我あφ():
|
def 我あφ():
|
||||||
pass
|
pass
|
||||||
''')
|
''')
|
||||||
tokens = tokenize.source_tokens(fundef)
|
tokens = tokenize.source_tokens(fundef)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
unicode_token = token_list[1]
|
unicode_token = token_list[1]
|
||||||
if py_version >= 30:
|
if py_version >= 30:
|
||||||
assert unicode_token[0] == NAME
|
assert unicode_token[0] == NAME
|
||||||
else:
|
else:
|
||||||
# Unicode tokens in Python 2 seem to be identified as operators.
|
# Unicode tokens in Python 2 seem to be identified as operators.
|
||||||
# They will be ignored in the parser, that's ok.
|
# They will be ignored in the parser, that's ok.
|
||||||
assert unicode_token[0] == OP
|
assert unicode_token[0] == OP
|
||||||
|
|
||||||
def test_quoted_strings(self):
|
def test_quoted_strings():
|
||||||
|
string_tokens = [
|
||||||
|
'u"test"',
|
||||||
|
'u"""test"""',
|
||||||
|
'U"""test"""',
|
||||||
|
"u'''test'''",
|
||||||
|
"U'''test'''",
|
||||||
|
]
|
||||||
|
|
||||||
string_tokens = [
|
for s in string_tokens:
|
||||||
'u"test"',
|
module = parse('''a = %s\n''' % s)
|
||||||
'u"""test"""',
|
simple_stmt = module.children[0]
|
||||||
'U"""test"""',
|
expr_stmt = simple_stmt.children[0]
|
||||||
"u'''test'''",
|
assert len(expr_stmt.children) == 3
|
||||||
"U'''test'''",
|
string_tok = expr_stmt.children[2]
|
||||||
]
|
assert string_tok.type == 'string'
|
||||||
|
assert string_tok.value == s
|
||||||
for s in string_tokens:
|
|
||||||
module = parse('''a = %s\n''' % s)
|
|
||||||
simple_stmt = module.children[0]
|
|
||||||
expr_stmt = simple_stmt.children[0]
|
|
||||||
assert len(expr_stmt.children) == 3
|
|
||||||
string_tok = expr_stmt.children[2]
|
|
||||||
assert string_tok.type == 'string'
|
|
||||||
assert string_tok.value == s
|
|
||||||
|
|
||||||
|
|
||||||
def test_ur_literals():
|
def test_ur_literals():
|
||||||
|
|||||||
Reference in New Issue
Block a user