forked from VimPlug/jedi
Refactor run.py to use the new API
This commit is contained in:
72
test/run.py
72
test/run.py
@@ -10,9 +10,9 @@ tests.
|
||||
|
||||
There are different kind of tests:
|
||||
|
||||
- completions / goto_definitions ``#?``
|
||||
- goto_assignments: ``#!``
|
||||
- usages: ``#<``
|
||||
- completions / inference ``#?``
|
||||
- goto: ``#!``
|
||||
- references: ``#<``
|
||||
|
||||
How to run tests?
|
||||
+++++++++++++++++
|
||||
@@ -26,7 +26,7 @@ multiple Python versions.
|
||||
|
||||
Integration test cases are located in ``test/completion`` directory
|
||||
and each test case is indicated by either the comment ``#?`` (completions /
|
||||
definitions), ``#!`` (assignments), or ``#<`` (usages).
|
||||
inference), ``#!`` (goto), or ``#<`` (references).
|
||||
There is also support for third party libraries. In a normal test run they are
|
||||
not being executed, you have to provide a ``--thirdparty`` option.
|
||||
|
||||
@@ -76,17 +76,17 @@ For example::
|
||||
Because it follows ``a.rea`` and a is an ``int``, which has a ``real``
|
||||
property.
|
||||
|
||||
Goto Definitions
|
||||
++++++++++++++++
|
||||
Inference
|
||||
+++++++++
|
||||
|
||||
Definition tests use the same symbols like completion tests. This is
|
||||
Inference tests use the same symbols like completion tests. This is
|
||||
possible because the completion tests are defined with a list::
|
||||
|
||||
#? int()
|
||||
ab = 3; ab
|
||||
|
||||
Goto Assignments
|
||||
++++++++++++++++
|
||||
Goto
|
||||
++++
|
||||
|
||||
Tests look like this::
|
||||
|
||||
@@ -100,8 +100,8 @@ describes the position of the test (otherwise it's just the end of line)::
|
||||
#! 2 ['abc=1']
|
||||
abc
|
||||
|
||||
Usages
|
||||
++++++
|
||||
References
|
||||
++++++++++
|
||||
|
||||
Tests look like this::
|
||||
|
||||
@@ -131,9 +131,9 @@ from jedi.inference.analysis import Warning
|
||||
|
||||
|
||||
TEST_COMPLETIONS = 0
|
||||
TEST_DEFINITIONS = 1
|
||||
TEST_ASSIGNMENTS = 2
|
||||
TEST_USAGES = 3
|
||||
TEST_INFERENCE = 1
|
||||
TEST_GOTO = 2
|
||||
TEST_REFERENCES = 3
|
||||
|
||||
|
||||
grammar36 = parso.load_grammar(version='3.6')
|
||||
@@ -195,28 +195,25 @@ class IntegrationTestCase(BaseTestCase):
|
||||
self.line_nr_test, self.line.rstrip())
|
||||
|
||||
def script(self, environment):
|
||||
return jedi.Script(
|
||||
self.source, self.line_nr, self.column, self.path,
|
||||
environment=environment
|
||||
)
|
||||
return jedi.Script(self.source, path=self.path, environment=environment)
|
||||
|
||||
def run(self, compare_cb, environment=None):
|
||||
testers = {
|
||||
TEST_COMPLETIONS: self.run_completion,
|
||||
TEST_DEFINITIONS: self.run_goto_definitions,
|
||||
TEST_ASSIGNMENTS: self.run_goto_assignments,
|
||||
TEST_USAGES: self.run_usages,
|
||||
TEST_INFERENCE: self.run_inference,
|
||||
TEST_GOTO: self.run_goto,
|
||||
TEST_REFERENCES: self.run_find_references,
|
||||
}
|
||||
return testers[self.test_type](compare_cb, environment)
|
||||
|
||||
def run_completion(self, compare_cb, environment):
|
||||
completions = self.script(environment).completions()
|
||||
#import cProfile; cProfile.run('script.completions()')
|
||||
completions = self.script(environment).complete(self.line_nr, self.column)
|
||||
# import cProfile; cProfile.run('...')
|
||||
|
||||
comp_str = {c.name for c in completions}
|
||||
return compare_cb(self, comp_str, set(literal_eval(self.correct)))
|
||||
|
||||
def run_goto_definitions(self, compare_cb, environment):
|
||||
def run_inference(self, compare_cb, environment):
|
||||
script = self.script(environment)
|
||||
inference_state = script._inference_state
|
||||
|
||||
@@ -233,9 +230,6 @@ class IntegrationTestCase(BaseTestCase):
|
||||
node = parser.get_root_node()
|
||||
module_context = script._get_module_context()
|
||||
user_context = get_user_context(module_context, (self.line_nr, 0))
|
||||
# TODO needed?
|
||||
#if user_context._value.api_type == 'function':
|
||||
# user_context = user_context.get_function_execution()
|
||||
node.parent = user_context.tree_node
|
||||
results = convert_values(user_context.infer_node(node))
|
||||
if not results:
|
||||
@@ -250,17 +244,17 @@ class IntegrationTestCase(BaseTestCase):
|
||||
return should
|
||||
|
||||
should = definition(self.correct, self.start, script.path)
|
||||
result = script.goto_definitions()
|
||||
result = script.infer(self.line_nr, self.column)
|
||||
is_str = set(comparison(r) for r in result)
|
||||
return compare_cb(self, is_str, should)
|
||||
|
||||
def run_goto_assignments(self, compare_cb, environment):
|
||||
result = self.script(environment).goto_assignments()
|
||||
def run_goto(self, compare_cb, environment):
|
||||
result = self.script(environment).goto(self.line_nr, self.column)
|
||||
comp_str = str(sorted(str(r.description) for r in result))
|
||||
return compare_cb(self, comp_str, self.correct)
|
||||
|
||||
def run_usages(self, compare_cb, environment):
|
||||
result = self.script(environment).usages()
|
||||
def run_find_references(self, compare_cb, environment):
|
||||
result = self.script(environment).find_references(self.line_nr, self.column)
|
||||
self.correct = self.correct.strip()
|
||||
compare = sorted(
|
||||
(re.sub(r'^test\.completion\.', '', r.module_name), r.line, r.column)
|
||||
@@ -287,7 +281,7 @@ class IntegrationTestCase(BaseTestCase):
|
||||
class StaticAnalysisCase(BaseTestCase):
|
||||
"""
|
||||
Static Analysis cases lie in the static_analysis folder.
|
||||
The tests also start with `#!`, like the goto_definition tests.
|
||||
The tests also start with `#!`, like the inference tests.
|
||||
"""
|
||||
def __init__(self, path):
|
||||
self._path = path
|
||||
@@ -311,12 +305,14 @@ class StaticAnalysisCase(BaseTestCase):
|
||||
return cases
|
||||
|
||||
def run(self, compare_cb, environment):
|
||||
def typ_str(inst):
|
||||
return 'warning ' if isinstance(inst, Warning) else ''
|
||||
|
||||
analysis = jedi.Script(
|
||||
self._source,
|
||||
path=self._path,
|
||||
environment=environment,
|
||||
)._analysis()
|
||||
typ_str = lambda inst: 'warning ' if isinstance(inst, Warning) else ''
|
||||
analysis = [(r.line, r.column, typ_str(r) + r.name)
|
||||
for r in analysis]
|
||||
compare_cb(self, analysis, self.collect_comparison())
|
||||
@@ -354,19 +350,19 @@ def collect_file_tests(path, lines, lines_to_execute):
|
||||
else:
|
||||
column = len(line) - 1 # -1 for the \n
|
||||
if test_type == '!':
|
||||
yield makecase(TEST_ASSIGNMENTS)
|
||||
yield makecase(TEST_GOTO)
|
||||
elif test_type == '<':
|
||||
yield makecase(TEST_USAGES)
|
||||
yield makecase(TEST_REFERENCES)
|
||||
elif correct.startswith('['):
|
||||
yield makecase(TEST_COMPLETIONS)
|
||||
else:
|
||||
yield makecase(TEST_DEFINITIONS)
|
||||
yield makecase(TEST_INFERENCE)
|
||||
correct = None
|
||||
else:
|
||||
skip_version_info = skip_python_version(line) or skip_version_info
|
||||
try:
|
||||
r = re.search(r'(?:^|(?<=\s))#([?!<])\s*([^\n]*)', line)
|
||||
# test_type is ? for completion and ! for goto_assignments
|
||||
# test_type is ? for completion and ! for goto
|
||||
test_type = r.group(1)
|
||||
correct = r.group(2)
|
||||
# Quick hack to make everything work (not quite a bloody unicorn hack though).
|
||||
|
||||
Reference in New Issue
Block a user