mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-06 22:14:27 +08:00
Remove old test code
This commit is contained in:
@@ -10,8 +10,7 @@ import traceback
|
||||
import re
|
||||
import itertools
|
||||
|
||||
from . import base
|
||||
|
||||
from . import base # required to setup import path
|
||||
from jedi._compatibility import reduce
|
||||
import jedi
|
||||
from jedi import refactoring
|
||||
@@ -100,60 +99,3 @@ def collect_dir_tests(base_dir, test_files):
|
||||
source = f.read()
|
||||
for case in collect_file_tests(source, path, lines_to_execute):
|
||||
yield case
|
||||
|
||||
|
||||
def run_test(cases):
|
||||
"""
|
||||
This is the completion test for some cases. The tests are not unit test
|
||||
like, they are rather integration tests.
|
||||
It uses comments to specify a test in the next line. The comment also says,
|
||||
which results are expected. The comment always begins with `#?`. The last
|
||||
row symbolizes the cursor.
|
||||
|
||||
For example::
|
||||
|
||||
#? ['ab']
|
||||
ab = 3; a
|
||||
|
||||
#? int()
|
||||
ab = 3; ab
|
||||
"""
|
||||
fails = 0
|
||||
tests = 0
|
||||
for case in cases:
|
||||
try:
|
||||
if not case.check():
|
||||
print(case)
|
||||
print(' ' + repr(str(case.result)))
|
||||
print(' ' + repr(case.desired))
|
||||
fails += 1
|
||||
except Exception:
|
||||
print(traceback.format_exc())
|
||||
print(case)
|
||||
fails += 1
|
||||
tests += 1
|
||||
return tests, fails
|
||||
|
||||
|
||||
def test_dir(refactoring_test_dir):
|
||||
for (path, cases) in itertools.groupby(
|
||||
collect_dir_tests(refactoring_test_dir, test_files),
|
||||
lambda case: case.path):
|
||||
num_tests, fails = run_test(cases)
|
||||
|
||||
base.test_sum += num_tests
|
||||
f_name = os.path.basename(path)
|
||||
s = 'run %s tests with %s fails (%s)' % (num_tests, fails, f_name)
|
||||
base.tests_fail += fails
|
||||
print(s)
|
||||
base.summary.append(s)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
refactoring_test_dir = os.path.join(base.test_dir, 'refactor')
|
||||
test_files = base.get_test_list()
|
||||
test_dir(refactoring_test_dir)
|
||||
|
||||
base.print_summary()
|
||||
|
||||
sys.exit(1 if base.tests_fail else 0)
|
||||
|
||||
243
test/run.py
243
test/run.py
@@ -31,48 +31,6 @@ If you want to debug a test, just use the --debug option.
|
||||
Auto-Completion
|
||||
+++++++++++++++
|
||||
|
||||
.. autofunction:: run_completion_test
|
||||
|
||||
Definition
|
||||
++++++++++
|
||||
|
||||
.. autofunction:: run_definition_test
|
||||
|
||||
Goto
|
||||
++++
|
||||
|
||||
.. autofunction:: run_goto_test
|
||||
|
||||
Related Names
|
||||
+++++++++++++
|
||||
|
||||
.. autofunction:: run_related_name_test
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import traceback
|
||||
import itertools
|
||||
|
||||
from . import base
|
||||
|
||||
from jedi._compatibility import unicode, StringIO, reduce, literal_eval, is_py25
|
||||
|
||||
import jedi
|
||||
from jedi import debug
|
||||
|
||||
|
||||
sys.path.pop(0) # pop again, because it might affect the completion
|
||||
|
||||
|
||||
TEST_COMPLETIONS = 0
|
||||
TEST_DEFINITIONS = 1
|
||||
TEST_ASSIGNMENTS = 2
|
||||
TEST_USAGES = 3
|
||||
|
||||
|
||||
def run_completion_test(case):
|
||||
"""
|
||||
Uses comments to specify a test in the next line. The comment says, which
|
||||
results are expected. The comment always begins with `#?`. The last row
|
||||
symbolizes the cursor.
|
||||
@@ -85,74 +43,18 @@ def run_completion_test(case):
|
||||
Because it follows ``a.rea`` and a is an ``int``, which has a ``real``
|
||||
property.
|
||||
|
||||
Returns 1 for fail and 0 for success.
|
||||
"""
|
||||
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
|
||||
completions = script.complete()
|
||||
#import cProfile; cProfile.run('script.complete()')
|
||||
Definition
|
||||
++++++++++
|
||||
|
||||
comp_str = set([c.word for c in completions])
|
||||
if comp_str != set(literal_eval(correct)):
|
||||
print('Solution @%s not right, received %s, wanted %s'\
|
||||
% (line_nr - 1, comp_str, correct))
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def run_definition_test(case):
|
||||
"""
|
||||
Definition tests use the same symbols like completion tests. This is
|
||||
possible because the completion tests are defined with a list::
|
||||
|
||||
#? int()
|
||||
ab = 3; ab
|
||||
|
||||
Returns 1 for fail and 0 for success.
|
||||
"""
|
||||
def definition(correct, correct_start, path):
|
||||
def defs(line_nr, indent):
|
||||
s = jedi.Script(script.source, line_nr, indent, path)
|
||||
return set(s.definition())
|
||||
Goto
|
||||
++++
|
||||
|
||||
should_be = set()
|
||||
number = 0
|
||||
for index in re.finditer('(?: +|$)', correct):
|
||||
if correct == ' ':
|
||||
continue
|
||||
# -1 for the comment, +3 because of the comment start `#? `
|
||||
start = index.start()
|
||||
if base.print_debug:
|
||||
jedi.set_debug_function(None)
|
||||
number += 1
|
||||
try:
|
||||
should_be |= defs(line_nr - 1, start + correct_start)
|
||||
except Exception:
|
||||
print('could not resolve %s indent %s' % (line_nr - 1, start))
|
||||
raise
|
||||
if base.print_debug:
|
||||
jedi.set_debug_function(debug.print_to_stdout)
|
||||
# because the objects have different ids, `repr` it, then compare it.
|
||||
should_str = set(r.desc_with_module for r in should_be)
|
||||
if len(should_str) < number:
|
||||
raise Exception('Solution @%s not right, too few test results: %s'
|
||||
% (line_nr - 1, should_str))
|
||||
return should_str
|
||||
|
||||
(correct, line_nr, column, start, line) = \
|
||||
(case.correct, case.line_nr, case.column, case.start, case.line)
|
||||
script = case.script()
|
||||
should_str = definition(correct, start, script.source_path)
|
||||
result = script.definition()
|
||||
is_str = set(r.desc_with_module for r in result)
|
||||
if is_str != should_str:
|
||||
print('Solution @%s not right, received %s, wanted %s' \
|
||||
% (line_nr - 1, is_str, should_str))
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def run_goto_test(case):
|
||||
"""
|
||||
Tests look like this::
|
||||
|
||||
abc = 1
|
||||
@@ -165,51 +67,31 @@ def run_goto_test(case):
|
||||
#! 2 ['abc=1']
|
||||
abc
|
||||
|
||||
Returns 1 for fail and 0 for success.
|
||||
"""
|
||||
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
|
||||
result = script.goto()
|
||||
comp_str = str(sorted(str(r.description) for r in result))
|
||||
if comp_str != correct:
|
||||
print('Solution @%s not right, received %s, wanted %s'\
|
||||
% (line_nr - 1, comp_str, correct))
|
||||
return 1
|
||||
return 0
|
||||
Related Names
|
||||
+++++++++++++
|
||||
|
||||
|
||||
def run_related_name_test(case):
|
||||
"""
|
||||
Tests look like this::
|
||||
|
||||
abc = 1
|
||||
#< abc@1,0 abc@3,0
|
||||
abc
|
||||
|
||||
Returns 1 for fail and 0 for success.
|
||||
"""
|
||||
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
|
||||
result = script.related_names()
|
||||
correct = correct.strip()
|
||||
compare = sorted((r.module_name, r.start_pos[0], r.start_pos[1])
|
||||
for r in result)
|
||||
wanted = []
|
||||
if not correct:
|
||||
positions = []
|
||||
else:
|
||||
positions = literal_eval(correct)
|
||||
for pos_tup in positions:
|
||||
if type(pos_tup[0]) == str:
|
||||
# this means that there is a module specified
|
||||
wanted.append(pos_tup)
|
||||
else:
|
||||
wanted.append(('renaming', line_nr + pos_tup[0], pos_tup[1]))
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
|
||||
wanted = sorted(wanted)
|
||||
if compare != wanted:
|
||||
print('Solution @%s not right, received %s, wanted %s'\
|
||||
% (line_nr - 1, compare, wanted))
|
||||
return 1
|
||||
return 0
|
||||
from . import base # required to setup import path
|
||||
import jedi
|
||||
from jedi._compatibility import unicode, StringIO, reduce, is_py25
|
||||
|
||||
|
||||
sys.path.pop(0) # pop again, because it might affect the completion
|
||||
|
||||
|
||||
TEST_COMPLETIONS = 0
|
||||
TEST_DEFINITIONS = 1
|
||||
TEST_ASSIGNMENTS = 2
|
||||
TEST_USAGES = 3
|
||||
|
||||
|
||||
class IntegrationTestCase(object):
|
||||
@@ -290,86 +172,3 @@ def collect_dir_tests(base_dir, test_files, thirdparty=False):
|
||||
case.path = path
|
||||
case.source = source
|
||||
yield case
|
||||
|
||||
|
||||
def run_test(cases):
|
||||
"""
|
||||
This is the completion test for some cases. The tests are not unit test
|
||||
like, they are rather integration tests.
|
||||
"""
|
||||
testers = {
|
||||
TEST_COMPLETIONS: run_completion_test,
|
||||
TEST_DEFINITIONS: run_definition_test,
|
||||
TEST_ASSIGNMENTS: run_goto_test,
|
||||
TEST_USAGES: run_related_name_test,
|
||||
}
|
||||
|
||||
tests = 0
|
||||
fails = 0
|
||||
for case in cases:
|
||||
tests += 1
|
||||
try:
|
||||
fails += testers[case.test_type](case)
|
||||
except Exception:
|
||||
print(traceback.format_exc())
|
||||
print(case)
|
||||
fails += 1
|
||||
return tests, fails
|
||||
|
||||
|
||||
def test_dir(completion_test_dir, thirdparty=False):
|
||||
for (path, cases) in itertools.groupby(
|
||||
collect_dir_tests(completion_test_dir, test_files, thirdparty),
|
||||
lambda case: case.path):
|
||||
f_name = os.path.basename(path)
|
||||
|
||||
if thirdparty:
|
||||
lib = f_name.replace('_.py', '')
|
||||
try:
|
||||
# there is always an underline at the end.
|
||||
# It looks like: completion/thirdparty/pylab_.py
|
||||
__import__(lib)
|
||||
except ImportError:
|
||||
base.summary.append('Thirdparty-Library %s not found.' %
|
||||
f_name)
|
||||
continue
|
||||
|
||||
num_tests, fails = run_test(cases)
|
||||
base.test_sum += num_tests
|
||||
|
||||
s = 'run %s tests with %s fails (%s)' % (num_tests, fails, f_name)
|
||||
base.tests_fail += fails
|
||||
print(s)
|
||||
base.summary.append(s)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
i = sys.argv.index('--thirdparty')
|
||||
thirdparty = True
|
||||
sys.argv = sys.argv[:i] + sys.argv[i + 1:]
|
||||
except ValueError:
|
||||
thirdparty = False
|
||||
|
||||
test_files = base.get_test_list()
|
||||
|
||||
# completion tests:
|
||||
completion_test_dir = os.path.join(base.test_dir, 'completion')
|
||||
|
||||
# execute tests
|
||||
test_dir(completion_test_dir)
|
||||
if test_files or thirdparty:
|
||||
completion_test_dir += '/thirdparty'
|
||||
test_dir(completion_test_dir, thirdparty=True)
|
||||
|
||||
base.print_summary()
|
||||
#from guppy import hpy
|
||||
#hpy()
|
||||
#print hpy().heap()
|
||||
|
||||
exit_code = 1 if base.tests_fail else 0
|
||||
if sys.hexversion < 0x02060000 and base.tests_fail <= 9:
|
||||
# Python 2.5 has major incompabillities (e.g. no property.setter),
|
||||
# therefore it is not possible to pass all tests.
|
||||
exit_code = 0
|
||||
sys.exit(exit_code)
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
set -e
|
||||
|
||||
python regression.py
|
||||
python run.py
|
||||
echo
|
||||
python refactor.py
|
||||
echo
|
||||
nosetests --with-doctest --doctest-tests ../jedi/
|
||||
Reference in New Issue
Block a user