mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-09 07:14:48 +08:00
Refactored the parser calls. Now it's possible to use jedi.parser.python.parse to quickly parse something.
This commit is contained in:
@@ -2,9 +2,10 @@
|
||||
|
||||
from textwrap import dedent
|
||||
|
||||
from jedi._compatibility import u, is_py3, py_version
|
||||
from jedi._compatibility import is_py3, py_version
|
||||
from jedi.parser.token import NAME, OP, NEWLINE, STRING, INDENT, ERRORTOKEN, ENDMARKER
|
||||
from jedi.parser import ParserWithRecovery, load_grammar, tokenize
|
||||
from jedi.parser import tokenize
|
||||
from jedi.parser.python import parse
|
||||
from jedi.common import splitlines
|
||||
from jedi.parser.tokenize import TokenInfo
|
||||
|
||||
@@ -17,25 +18,25 @@ def _get_token_list(string):
|
||||
|
||||
class TokenTest(unittest.TestCase):
|
||||
def test_end_pos_one_line(self):
|
||||
parsed = ParserWithRecovery(load_grammar(), dedent(u('''
|
||||
parsed = parse(dedent('''
|
||||
def testit():
|
||||
a = "huhu"
|
||||
''')))
|
||||
tok = parsed.module.subscopes[0].statements[0].children[2]
|
||||
'''))
|
||||
tok = parsed.subscopes[0].statements[0].children[2]
|
||||
assert tok.end_pos == (3, 14)
|
||||
|
||||
def test_end_pos_multi_line(self):
|
||||
parsed = ParserWithRecovery(load_grammar(), dedent(u('''
|
||||
parsed = parse(dedent('''
|
||||
def testit():
|
||||
a = """huhu
|
||||
asdfasdf""" + "h"
|
||||
''')))
|
||||
tok = parsed.module.subscopes[0].statements[0].children[2].children[0]
|
||||
'''))
|
||||
tok = parsed.subscopes[0].statements[0].children[2].children[0]
|
||||
assert tok.end_pos == (4, 11)
|
||||
|
||||
def test_simple_no_whitespace(self):
|
||||
# Test a simple one line string, no preceding whitespace
|
||||
simple_docstring = u('"""simple one line docstring"""')
|
||||
simple_docstring = '"""simple one line docstring"""'
|
||||
tokens = tokenize.source_tokens(simple_docstring)
|
||||
token_list = list(tokens)
|
||||
_, value, _, prefix = token_list[0]
|
||||
@@ -44,7 +45,7 @@ class TokenTest(unittest.TestCase):
|
||||
|
||||
def test_simple_with_whitespace(self):
|
||||
# Test a simple one line string with preceding whitespace and newline
|
||||
simple_docstring = u(' """simple one line docstring""" \r\n')
|
||||
simple_docstring = ' """simple one line docstring""" \r\n'
|
||||
tokens = tokenize.source_tokens(simple_docstring)
|
||||
token_list = list(tokens)
|
||||
assert token_list[0][0] == INDENT
|
||||
@@ -58,12 +59,12 @@ class TokenTest(unittest.TestCase):
|
||||
|
||||
def test_function_whitespace(self):
|
||||
# Test function definition whitespace identification
|
||||
fundef = dedent(u('''
|
||||
fundef = dedent('''
|
||||
def test_whitespace(*args, **kwargs):
|
||||
x = 1
|
||||
if x > 0:
|
||||
print(True)
|
||||
'''))
|
||||
''')
|
||||
tokens = tokenize.source_tokens(fundef)
|
||||
token_list = list(tokens)
|
||||
for _, value, _, prefix in token_list:
|
||||
@@ -83,7 +84,7 @@ class TokenTest(unittest.TestCase):
|
||||
def test_tokenize_multiline_I(self):
|
||||
# Make sure multiline string having newlines have the end marker on the
|
||||
# next line
|
||||
fundef = u('''""""\n''')
|
||||
fundef = '''""""\n'''
|
||||
tokens = tokenize.source_tokens(fundef)
|
||||
token_list = list(tokens)
|
||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n', (1, 0), ''),
|
||||
@@ -92,7 +93,7 @@ class TokenTest(unittest.TestCase):
|
||||
def test_tokenize_multiline_II(self):
|
||||
# Make sure multiline string having no newlines have the end marker on
|
||||
# same line
|
||||
fundef = u('''""""''')
|
||||
fundef = '''""""'''
|
||||
tokens = tokenize.source_tokens(fundef)
|
||||
token_list = list(tokens)
|
||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""', (1, 0), ''),
|
||||
@@ -101,17 +102,17 @@ class TokenTest(unittest.TestCase):
|
||||
def test_tokenize_multiline_III(self):
|
||||
# Make sure multiline string having newlines have the end marker on the
|
||||
# next line even if several newline
|
||||
fundef = u('''""""\n\n''')
|
||||
fundef = '''""""\n\n'''
|
||||
tokens = tokenize.source_tokens(fundef)
|
||||
token_list = list(tokens)
|
||||
assert token_list == [TokenInfo(ERRORTOKEN, '""""\n\n', (1, 0), ''),
|
||||
TokenInfo(ENDMARKER, '', (3, 0), '')]
|
||||
|
||||
def test_identifier_contains_unicode(self):
|
||||
fundef = dedent(u('''
|
||||
fundef = dedent('''
|
||||
def 我あφ():
|
||||
pass
|
||||
'''))
|
||||
''')
|
||||
tokens = tokenize.source_tokens(fundef)
|
||||
token_list = list(tokens)
|
||||
unicode_token = token_list[1]
|
||||
@@ -133,8 +134,8 @@ class TokenTest(unittest.TestCase):
|
||||
]
|
||||
|
||||
for s in string_tokens:
|
||||
parsed = ParserWithRecovery(load_grammar(), u('''a = %s\n''' % s))
|
||||
simple_stmt = parsed.module.children[0]
|
||||
module = parse('''a = %s\n''' % s)
|
||||
simple_stmt = module.children[0]
|
||||
expr_stmt = simple_stmt.children[0]
|
||||
assert len(expr_stmt.children) == 3
|
||||
string_tok = expr_stmt.children[2]
|
||||
|
||||
Reference in New Issue
Block a user