mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-06 22:14:27 +08:00
refactored test_integration, so that those tests can be called from multiple places.
This commit is contained in:
32
test/base.py
32
test/base.py
@@ -7,10 +7,7 @@ import os
|
||||
from os.path import abspath, dirname
|
||||
import functools
|
||||
|
||||
import pytest
|
||||
|
||||
import jedi
|
||||
from jedi._compatibility import is_py25
|
||||
|
||||
|
||||
test_dir = dirname(abspath(__file__))
|
||||
@@ -63,32 +60,3 @@ def cwd_at(path):
|
||||
os.chdir(oldcwd)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
_py25_fails = 0
|
||||
py25_allowed_fails = 9
|
||||
|
||||
|
||||
def skip_py25_fails(func):
|
||||
"""
|
||||
Skip first `py25_allowed_fails` failures in Python 2.5.
|
||||
|
||||
.. todo:: Remove this decorator by implementing "skip tag" for
|
||||
integration tests.
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwds):
|
||||
global _py25_fails
|
||||
try:
|
||||
func(*args, **kwds)
|
||||
except AssertionError:
|
||||
_py25_fails += 1
|
||||
if _py25_fails > py25_allowed_fails:
|
||||
raise
|
||||
else:
|
||||
pytest.skip("%d-th failure (there can be %d failures)" %
|
||||
(_py25_fails, py25_allowed_fails))
|
||||
return wrapper
|
||||
|
||||
if not is_py25:
|
||||
skip_py25_fails = lambda f: f
|
||||
|
||||
78
test/run.py
78
test/run.py
@@ -100,7 +100,8 @@ import os
|
||||
import re
|
||||
|
||||
import jedi
|
||||
from jedi._compatibility import unicode, StringIO, reduce, is_py25
|
||||
from jedi._compatibility import unicode, StringIO, reduce, is_py25, \
|
||||
literal_eval
|
||||
|
||||
|
||||
TEST_COMPLETIONS = 0
|
||||
@@ -110,7 +111,6 @@ TEST_USAGES = 3
|
||||
|
||||
|
||||
class IntegrationTestCase(object):
|
||||
|
||||
def __init__(self, test_type, correct, line_nr, column, start, line,
|
||||
path=None):
|
||||
self.test_type = test_type
|
||||
@@ -130,6 +130,80 @@ class IntegrationTestCase(object):
|
||||
def script(self):
|
||||
return jedi.Script(self.source, self.line_nr, self.column, self.path)
|
||||
|
||||
def run(self, compare_cb):
|
||||
testers = {
|
||||
TEST_COMPLETIONS: self.run_completion,
|
||||
TEST_DEFINITIONS: self.run_definition,
|
||||
TEST_ASSIGNMENTS: self.run_goto,
|
||||
TEST_USAGES: self.run_related_name,
|
||||
}
|
||||
return testers[self.test_type](compare_cb)
|
||||
|
||||
def run_completion(self, compare_cb):
|
||||
completions = self.script().complete()
|
||||
#import cProfile; cProfile.run('script.complete()')
|
||||
|
||||
comp_str = set([c.word for c in completions])
|
||||
return compare_cb(self, comp_str, set(literal_eval(self.correct)))
|
||||
|
||||
def run_definition(self, compare_cb):
|
||||
def definition(correct, correct_start, path):
|
||||
def defs(line_nr, indent):
|
||||
s = jedi.Script(script.source, line_nr, indent, path)
|
||||
return set(s.definition())
|
||||
|
||||
should_be = set()
|
||||
number = 0
|
||||
for index in re.finditer('(?: +|$)', correct):
|
||||
if correct == ' ':
|
||||
continue
|
||||
# -1 for the comment, +3 because of the comment start `#? `
|
||||
start = index.start()
|
||||
number += 1
|
||||
try:
|
||||
should_be |= defs(self.line_nr - 1, start + correct_start)
|
||||
except Exception:
|
||||
print('could not resolve %s indent %s'
|
||||
% (self.line_nr - 1, start))
|
||||
raise
|
||||
# because the objects have different ids, `repr`, then compare.
|
||||
should_str = set(r.desc_with_module for r in should_be)
|
||||
if len(should_str) < number:
|
||||
raise Exception('Solution @%s not right, '
|
||||
'too few test results: %s' % (self.line_nr - 1, should_str))
|
||||
return should_str
|
||||
|
||||
script = self.script()
|
||||
should_str = definition(self.correct, self.start, script.source_path)
|
||||
result = script.definition()
|
||||
is_str = set(r.desc_with_module for r in result)
|
||||
return compare_cb(self, is_str, should_str)
|
||||
|
||||
def run_goto(self, compare_cb):
|
||||
result = self.script().goto()
|
||||
comp_str = str(sorted(str(r.description) for r in result))
|
||||
return compare_cb(self, comp_str, self.correct)
|
||||
|
||||
def run_related_name(self, compare_cb):
|
||||
result = self.script().related_names()
|
||||
self.correct = self.correct.strip()
|
||||
compare = sorted((r.module_name, r.start_pos[0], r.start_pos[1])
|
||||
for r in result)
|
||||
wanted = []
|
||||
if not self.correct:
|
||||
positions = []
|
||||
else:
|
||||
positions = literal_eval(self.correct)
|
||||
for pos_tup in positions:
|
||||
if type(pos_tup[0]) == str:
|
||||
# this means that there is a module specified
|
||||
wanted.append(pos_tup)
|
||||
else:
|
||||
wanted.append(('renaming', self.line_nr + pos_tup[0],
|
||||
pos_tup[1]))
|
||||
|
||||
return compare_cb(self, compare, sorted(wanted))
|
||||
|
||||
|
||||
def collect_file_tests(lines, lines_to_execute):
|
||||
makecase = lambda t: IntegrationTestCase(t, correct, line_nr, column,
|
||||
|
||||
@@ -1,14 +1,8 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
import pytest
|
||||
|
||||
from . import base
|
||||
from .run import \
|
||||
TEST_COMPLETIONS, TEST_DEFINITIONS, TEST_ASSIGNMENTS, TEST_USAGES
|
||||
|
||||
import jedi
|
||||
from jedi._compatibility import literal_eval
|
||||
|
||||
|
||||
def assert_case_equal(case, actual, desired):
|
||||
@@ -26,90 +20,12 @@ desired = %s
|
||||
""" % (case, actual, desired)
|
||||
|
||||
|
||||
def run_completion_test(case):
|
||||
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
|
||||
completions = script.complete()
|
||||
#import cProfile; cProfile.run('script.complete()')
|
||||
|
||||
comp_str = set([c.word for c in completions])
|
||||
assert_case_equal(case, comp_str, set(literal_eval(correct)))
|
||||
|
||||
|
||||
def run_definition_test(case):
|
||||
def definition(correct, correct_start, path):
|
||||
def defs(line_nr, indent):
|
||||
s = jedi.Script(script.source, line_nr, indent, path)
|
||||
return set(s.definition())
|
||||
|
||||
should_be = set()
|
||||
number = 0
|
||||
for index in re.finditer('(?: +|$)', correct):
|
||||
if correct == ' ':
|
||||
continue
|
||||
# -1 for the comment, +3 because of the comment start `#? `
|
||||
start = index.start()
|
||||
number += 1
|
||||
try:
|
||||
should_be |= defs(line_nr - 1, start + correct_start)
|
||||
except Exception:
|
||||
print('could not resolve %s indent %s' % (line_nr - 1, start))
|
||||
raise
|
||||
# because the objects have different ids, `repr` it, then compare it.
|
||||
should_str = set(r.desc_with_module for r in should_be)
|
||||
if len(should_str) < number:
|
||||
raise Exception('Solution @%s not right, too few test results: %s'
|
||||
% (line_nr - 1, should_str))
|
||||
return should_str
|
||||
|
||||
(correct, line_nr, column, start, line) = \
|
||||
(case.correct, case.line_nr, case.column, case.start, case.line)
|
||||
script = case.script()
|
||||
should_str = definition(correct, start, script.source_path)
|
||||
result = script.definition()
|
||||
is_str = set(r.desc_with_module for r in result)
|
||||
assert_case_equal(case, is_str, should_str)
|
||||
|
||||
|
||||
def run_goto_test(case):
|
||||
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
|
||||
result = script.goto()
|
||||
comp_str = str(sorted(str(r.description) for r in result))
|
||||
assert_case_equal(case, comp_str, correct)
|
||||
|
||||
|
||||
def run_related_name_test(case):
|
||||
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
|
||||
result = script.related_names()
|
||||
correct = correct.strip()
|
||||
compare = sorted((r.module_name, r.start_pos[0], r.start_pos[1])
|
||||
for r in result)
|
||||
wanted = []
|
||||
if not correct:
|
||||
positions = []
|
||||
else:
|
||||
positions = literal_eval(correct)
|
||||
for pos_tup in positions:
|
||||
if type(pos_tup[0]) == str:
|
||||
# this means that there is a module specified
|
||||
wanted.append(pos_tup)
|
||||
else:
|
||||
wanted.append(('renaming', line_nr + pos_tup[0], pos_tup[1]))
|
||||
|
||||
assert_case_equal(case, compare, sorted(wanted))
|
||||
|
||||
|
||||
def test_integration(case, monkeypatch, pytestconfig):
|
||||
if case.skip is not None:
|
||||
pytest.skip(case.skip)
|
||||
repo_root = base.root_dir
|
||||
monkeypatch.chdir(os.path.join(repo_root, 'jedi'))
|
||||
testers = {
|
||||
TEST_COMPLETIONS: run_completion_test,
|
||||
TEST_DEFINITIONS: run_definition_test,
|
||||
TEST_ASSIGNMENTS: run_goto_test,
|
||||
TEST_USAGES: run_related_name_test,
|
||||
}
|
||||
base.skip_py25_fails(testers[case.test_type])(case)
|
||||
case.run(assert_case_equal)
|
||||
|
||||
|
||||
def test_refactor(refactor_case):
|
||||
|
||||
Reference in New Issue
Block a user