mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-09 23:34:45 +08:00
Use textwrap.dedent for better readability of the testing code.
This commit is contained in:
@@ -1,6 +1,7 @@
|
|||||||
# -*- coding: utf-8 # This file contains Unicode characters.
|
# -*- coding: utf-8 # This file contains Unicode characters.
|
||||||
|
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
|
from textwrap import dedent
|
||||||
|
|
||||||
from jedi._compatibility import u, is_py3
|
from jedi._compatibility import u, is_py3
|
||||||
from jedi.parser.token import NAME, OP, NEWLINE, STRING, INDENT
|
from jedi.parser.token import NAME, OP, NEWLINE, STRING, INDENT
|
||||||
@@ -12,19 +13,19 @@ from ..helpers import unittest
|
|||||||
|
|
||||||
class TokenTest(unittest.TestCase):
|
class TokenTest(unittest.TestCase):
|
||||||
def test_end_pos_one_line(self):
|
def test_end_pos_one_line(self):
|
||||||
parsed = parser.Parser(parser.load_grammar(), u('''
|
parsed = parser.Parser(parser.load_grammar(), dedent(u('''
|
||||||
def testit():
|
def testit():
|
||||||
a = "huhu"
|
a = "huhu"
|
||||||
'''))
|
''')))
|
||||||
tok = parsed.module.subscopes[0].statements[0].children[2]
|
tok = parsed.module.subscopes[0].statements[0].children[2]
|
||||||
assert tok.end_pos == (3, 14)
|
assert tok.end_pos == (3, 14)
|
||||||
|
|
||||||
def test_end_pos_multi_line(self):
|
def test_end_pos_multi_line(self):
|
||||||
parsed = parser.Parser(parser.load_grammar(), u('''
|
parsed = parser.Parser(parser.load_grammar(), dedent(u('''
|
||||||
def testit():
|
def testit():
|
||||||
a = """huhu
|
a = """huhu
|
||||||
asdfasdf""" + "h"
|
asdfasdf""" + "h"
|
||||||
'''))
|
''')))
|
||||||
tok = parsed.module.subscopes[0].statements[0].children[2].children[0]
|
tok = parsed.module.subscopes[0].statements[0].children[2].children[0]
|
||||||
assert tok.end_pos == (4, 11)
|
assert tok.end_pos == (4, 11)
|
||||||
|
|
||||||
@@ -55,11 +56,12 @@ asdfasdf""" + "h"
|
|||||||
|
|
||||||
def test_function_whitespace(self):
|
def test_function_whitespace(self):
|
||||||
# Test function definition whitespace identification
|
# Test function definition whitespace identification
|
||||||
fundef = u('''def test_whitespace(*args, **kwargs):
|
fundef = dedent(u('''
|
||||||
|
def test_whitespace(*args, **kwargs):
|
||||||
x = 1
|
x = 1
|
||||||
if x > 0:
|
if x > 0:
|
||||||
print(True)
|
print(True)
|
||||||
''')
|
'''))
|
||||||
fundef_io = StringIO(fundef)
|
fundef_io = StringIO(fundef)
|
||||||
tokens = parser.tokenize.generate_tokens(fundef_io.readline)
|
tokens = parser.tokenize.generate_tokens(fundef_io.readline)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
@@ -78,10 +80,10 @@ asdfasdf""" + "h"
|
|||||||
assert prefix == ' '
|
assert prefix == ' '
|
||||||
|
|
||||||
def test_identifier_contains_unicode(self):
|
def test_identifier_contains_unicode(self):
|
||||||
fundef = u('''
|
fundef = dedent(u('''
|
||||||
def 我あφ():
|
def 我あφ():
|
||||||
pass
|
pass
|
||||||
''')
|
'''))
|
||||||
fundef_io = StringIO(fundef)
|
fundef_io = StringIO(fundef)
|
||||||
tokens = parser.tokenize.generate_tokens(fundef_io.readline)
|
tokens = parser.tokenize.generate_tokens(fundef_io.readline)
|
||||||
token_list = list(tokens)
|
token_list = list(tokens)
|
||||||
|
|||||||
Reference in New Issue
Block a user