Separate test collection and run in refactoring.py

This commit is contained in:
Takafumi Arakaki
2013-03-10 22:49:51 +01:00
parent d063aa3d66
commit 0a13b8f114

View File

@@ -8,6 +8,8 @@ import sys
import os
import traceback
import re
import itertools
import base
from jedi._compatibility import reduce
@@ -15,25 +17,54 @@ import jedi
from jedi import refactoring
def run_test(source, f_name, lines_to_execute):
"""
This is the completion test for some cases. The tests are not unit test
like, they are rather integration tests.
It uses comments to specify a test in the next line. The comment also says,
which results are expected. The comment always begins with `#?`. The last
row symbolizes the cursor.
class RefactoringCase(object):
For example::
def __init__(self, name, source, line_nr, index, path, f_name,
new_name, start_line_test, desired):
self.name = name
self.source = source
self.line_nr = line_nr
self.index = index
self.path = path
self.f_name = f_name
self.new_name = new_name
self.start_line_test = start_line_test
self.desired = desired
#? ['ab']
ab = 3; a
def run(self):
(source, line_nr, index, path, f_name, new_name, start_line_test) \
= (self.source, self.line_nr, self.index, self.path, self.f_name,
self.new_name, self.start_line_test)
script = jedi.Script(source, line_nr, index, path)
refactor_func = getattr(refactoring, f_name.replace('.py', ''))
args = (script, new_name) if new_name else (script,)
refactor_object = refactor_func(*args)
#? int()
ab = 3; ab
"""
fails = 0
tests = 0
# parse the refactor format
# try to get the right excerpt of the newfile
f = refactor_object.new_files()[path]
lines = f.splitlines()[start_line_test:]
end = start_line_test + len(lines)
pop_start = None
for i, l in enumerate(lines):
if l.startswith('# +++'):
end = i
break
elif '#? ' in l:
pop_start = i
lines.pop(pop_start)
self.result = '\n'.join(lines[:end - 1]).strip()
return self.result
def check(self):
return self.run() == self.desired
def __repr__(self):
return '<%s: %s:%s>' % (self.__class__.__name__,
self.name, self.line_nr - 1)
def collect_file_tests(source, f_name, lines_to_execute):
r = r'^# --- ?([^\n]*)\n((?:(?!\n# \+\+\+).)*)' \
r'\n# \+\+\+((?:(?!\n# ---).)*)'
for match in re.finditer(r, source, re.DOTALL | re.MULTILINE):
@@ -56,54 +87,67 @@ def run_test(source, f_name, lines_to_execute):
continue
path = os.path.abspath(refactoring_test_dir + os.path.sep + f_name)
yield RefactoringCase(name, source, line_nr, index, path, f_name,
new_name, start_line_test, second)
def collect_dir_tests(base_dir, test_files):
for f_name in os.listdir(base_dir):
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
if f_name.endswith(".py") and (not test_files or files_to_execute):
path = os.path.join(refactoring_test_dir, f_name)
with open(path) as f:
source = f.read()
for case in collect_file_tests(source, f_name, lines_to_execute):
yield case
def run_test(cases):
"""
This is the completion test for some cases. The tests are not unit test
like, they are rather integration tests.
It uses comments to specify a test in the next line. The comment also says,
which results are expected. The comment always begins with `#?`. The last
row symbolizes the cursor.
For example::
#? ['ab']
ab = 3; a
#? int()
ab = 3; ab
"""
fails = 0
tests = 0
for case in cases:
try:
script = jedi.Script(source, line_nr, index, path)
refactor_func = getattr(refactoring, f_name.replace('.py', ''))
args = (script, new_name) if new_name else (script,)
refactor_object = refactor_func(*args)
# try to get the right excerpt of the newfile
f = refactor_object.new_files()[path]
lines = f.splitlines()[start_line_test:]
end = start_line_test + len(lines)
pop_start = None
for i, l in enumerate(lines):
if l.startswith('# +++'):
end = i
break
elif '#? ' in l:
pop_start = i
lines.pop(pop_start)
result = '\n'.join(lines[:end - 1]).strip()
if second != result:
print('test @%s: not the same result, %s' % (line_nr - 1, name))
print(' ' + repr(str(result)))
print(' ' + repr(second))
if not case.check():
print(case)
print(' ' + repr(str(case.result)))
print(' ' + repr(case.desired))
fails += 1
except Exception:
print(traceback.format_exc())
print('test @%s: %s' % (line_nr - 1, name))
print(case)
fails += 1
tests += 1
return tests, fails
def test_dir(refactoring_test_dir):
for f_name in os.listdir(refactoring_test_dir):
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
if f_name.endswith(".py") and (not test_files or files_to_execute):
path = os.path.join(refactoring_test_dir, f_name)
with open(path) as f:
num_tests, fails = run_test(f.read(), f_name, lines_to_execute)
for (path, cases) in itertools.groupby(
collect_dir_tests(refactoring_test_dir, test_files),
lambda case: case.path):
num_tests, fails = run_test(cases)
base.test_sum += num_tests
s = 'run %s tests with %s fails (%s)' % (num_tests, fails, f_name)
base.tests_fail += fails
print(s)
base.summary.append(s)
base.test_sum += num_tests
f_name = os.path.basename(path)
s = 'run %s tests with %s fails (%s)' % (num_tests, fails, f_name)
base.tests_fail += fails
print(s)
base.summary.append(s)
if __name__ == '__main__':