1
0
forked from VimPlug/jedi

Merge pull request #155 from tkf/migrate-to-pytest

Migrate to pytest
This commit is contained in:
David Halter
2013-03-12 03:59:16 -07:00
11 changed files with 224 additions and 414 deletions

View File

@@ -6,12 +6,11 @@ python:
- 3.2
install:
- if [[ $TRAVIS_PYTHON_VERSION == '2.5' ]]; then
pip install --use-mirrors simplejson unittest2;
export PIP_INSECURE=t;
fi
- if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; then
pip install --use-mirrors unittest2;
fi
- pip install --use-mirrors nose
- pip install --use-mirrors tox
script:
- cd test
- ./test.sh
- export TOXENV=$(echo "$TRAVIS_PYTHON_VERSION" |
sed --regexp-extended 's/([0-9])\.([0-9])/py\1\2/g')
- echo "TOXENV=$TOXENV"
- tox

7
pytest.ini Normal file
View File

@@ -0,0 +1,7 @@
[pytest]
addopts = --assert=plain
# Activate `clean_jedi_cache` fixture for all tests. This should be
# fine as long as we are using `clean_jedi_cache` as a session scoped
# fixture.
usefixtures = clean_jedi_cache

View File

@@ -1,4 +1,3 @@
import time
import sys
if sys.hexversion < 0x02070000:
import unittest2 as unittest
@@ -8,48 +7,17 @@ import os
from os.path import abspath, dirname
import functools
test_dir = dirname(abspath(__file__))
root_dir = dirname(test_dir)
sys.path.insert(0, root_dir)
import pytest
import jedi
from jedi import debug
test_sum = 0
t_start = time.time()
# Sorry I didn't use argparse here. It's because argparse is not in the
# stdlib in 2.5.
args = sys.argv[1:]
print_debug = False
try:
i = args.index('--debug')
args = args[:i] + args[i + 1:]
except ValueError:
pass
else:
print_debug = True
jedi.set_debug_function(debug.print_to_stdout)
sys.argv = sys.argv[:1] + args
summary = []
tests_fail = 0
from jedi._compatibility import is_py25
def get_test_list():
# get test list, that should be executed
test_files = {}
last = None
for arg in sys.argv[1:]:
if arg.isdigit():
if last is None:
continue
test_files[last].append(int(arg))
else:
test_files[arg] = []
last = arg
return test_files
test_dir = dirname(abspath(__file__))
root_dir = dirname(test_dir)
sample_int = 1 # This is used in completion/imports.py
class TestBase(unittest.TestCase):
@@ -76,13 +44,6 @@ class TestBase(unittest.TestCase):
return script.function_definition()
def print_summary():
print('\nSummary: (%s fails of %s tests) in %.3fs' % \
(tests_fail, test_sum, time.time() - t_start))
for s in summary:
print(s)
def cwd_at(path):
"""
Decorator to run function at `path`.
@@ -102,3 +63,32 @@ def cwd_at(path):
os.chdir(oldcwd)
return wrapper
return decorator
_py25_fails = 0
py25_allowed_fails = 9
def skip_py25_fails(func):
"""
Skip first `py25_allowed_fails` failures in Python 2.5.
.. todo:: Remove this decorator by implementing "skip tag" for
integration tests.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
global _py25_fails
try:
func(*args, **kwds)
except AssertionError:
_py25_fails += 1
if _py25_fails > py25_allowed_fails:
raise
else:
pytest.skip("%d-th failure (there can be %d failures)" %
(_py25_fails, py25_allowed_fails))
return wrapper
if not is_py25:
skip_py25_fails = lambda f: f

View File

@@ -154,9 +154,9 @@ mod1.a
from .. import base
#? int()
base.tests_fail
base.sample_int
from ..base import tests_fail as f
from ..base import sample_int as f
#? int()
f

View File

@@ -1,13 +1,23 @@
from os.path import join, dirname, abspath
default_base_dir = join(dirname(abspath(__file__)), 'completion')
import os
import shutil
import tempfile
import run
import pytest
from . import base
from . import run
from . import refactor
def pytest_addoption(parser):
parser.addoption(
"--base-dir", default=default_base_dir,
"--integration-case-dir",
default=os.path.join(base.test_dir, 'completion'),
help="Directory in which integration test case files locate.")
parser.addoption(
"--refactor-case-dir",
default=os.path.join(base.test_dir, 'refactor'),
help="Directory in which refactoring test case files locate.")
parser.addoption(
"--test-files", "-T", default=[], action='append',
help=(
@@ -15,7 +25,7 @@ def pytest_addoption(parser):
"For example: -T generators.py:10,13,19. "
"Note that you can use -m to specify the test case by id."))
parser.addoption(
"--thirdparty",
"--thirdparty", action='store_true',
help="Include integration tests that requires third party modules.")
@@ -38,11 +48,40 @@ def pytest_generate_tests(metafunc):
"""
:type metafunc: _pytest.python.Metafunc
"""
test_files = dict(map(parse_test_files_option,
metafunc.config.option.test_files))
if 'case' in metafunc.fixturenames:
base_dir = metafunc.config.option.base_dir
test_files = dict(map(parse_test_files_option,
metafunc.config.option.test_files))
base_dir = metafunc.config.option.integration_case_dir
thirdparty = metafunc.config.option.thirdparty
cases = list(run.collect_dir_tests(base_dir, test_files))
if thirdparty:
cases.extend(run.collect_dir_tests(
os.path.join(base_dir, 'thirdparty'), test_files))
metafunc.parametrize('case', cases)
if 'refactor_case' in metafunc.fixturenames:
base_dir = metafunc.config.option.refactor_case_dir
metafunc.parametrize(
'case',
run.collect_dir_tests(base_dir, test_files, thirdparty))
'refactor_case',
refactor.collect_dir_tests(base_dir, test_files))
@pytest.fixture(scope='session')
def clean_jedi_cache(request):
"""
Set `jedi.settings.cache_directory` to a temporary directory during test.
Note that you can't use built-in `tmpdir` and `monkeypatch`
fixture here because their scope is 'function', which is not used
in 'session' scope fixture.
This fixture is activated in ../pytest.ini.
"""
settings = base.jedi.settings
old = settings.cache_directory
tmp = tempfile.mkdtemp(prefix='jedi-test-')
settings.cache_directory = tmp
@request.addfinalizer
def restore():
settings.cache_directory = old
shutil.rmtree(tmp)

View File

@@ -4,13 +4,8 @@ Refactoring tests work a little bit similar to Black Box tests. But the idea is
here to compare two versions of code.
"""
from __future__ import with_statement
import sys
import os
import traceback
import re
import itertools
import base
from jedi._compatibility import reduce
import jedi
@@ -64,7 +59,7 @@ class RefactoringCase(object):
self.name, self.line_nr - 1)
def collect_file_tests(source, f_name, lines_to_execute):
def collect_file_tests(source, path, lines_to_execute):
r = r'^# --- ?([^\n]*)\n((?:(?!\n# \+\+\+).)*)' \
r'\n# \+\+\+((?:(?!\n# ---).)*)'
for match in re.finditer(r, source, re.DOTALL | re.MULTILINE):
@@ -86,7 +81,6 @@ def collect_file_tests(source, f_name, lines_to_execute):
if lines_to_execute and line_nr - 1 not in lines_to_execute:
continue
path = os.path.join(os.path.abspath(refactoring_test_dir), f_name)
yield RefactoringCase(name, source, line_nr, index, path,
new_name, start_line_test, second)
@@ -96,65 +90,8 @@ def collect_dir_tests(base_dir, test_files):
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
if f_name.endswith(".py") and (not test_files or files_to_execute):
path = os.path.join(refactoring_test_dir, f_name)
path = os.path.join(base_dir, f_name)
with open(path) as f:
source = f.read()
for case in collect_file_tests(source, f_name, lines_to_execute):
for case in collect_file_tests(source, path, lines_to_execute):
yield case
def run_test(cases):
"""
This is the completion test for some cases. The tests are not unit test
like, they are rather integration tests.
It uses comments to specify a test in the next line. The comment also says,
which results are expected. The comment always begins with `#?`. The last
row symbolizes the cursor.
For example::
#? ['ab']
ab = 3; a
#? int()
ab = 3; ab
"""
fails = 0
tests = 0
for case in cases:
try:
if not case.check():
print(case)
print(' ' + repr(str(case.result)))
print(' ' + repr(case.desired))
fails += 1
except Exception:
print(traceback.format_exc())
print(case)
fails += 1
tests += 1
return tests, fails
def test_dir(refactoring_test_dir):
for (path, cases) in itertools.groupby(
collect_dir_tests(refactoring_test_dir, test_files),
lambda case: case.path):
num_tests, fails = run_test(cases)
base.test_sum += num_tests
f_name = os.path.basename(path)
s = 'run %s tests with %s fails (%s)' % (num_tests, fails, f_name)
base.tests_fail += fails
print(s)
base.summary.append(s)
if __name__ == '__main__':
refactoring_test_dir = os.path.join(base.test_dir, 'refactor')
test_files = base.get_test_list()
test_dir(refactoring_test_dir)
base.print_summary()
sys.exit(1 if base.tests_fail else 0)

View File

@@ -17,52 +17,90 @@ There are different kind of tests:
How to run tests?
+++++++++++++++++
Basically ``run.py`` searches the ``completion`` directory for files with lines
starting with the symbol above. There is also support for third party
libraries. In a normal test run (``./run.py``) they are not being executed, you
have to provide a ``--thirdparty`` option.
Jedi uses pytest_ to run unit and integration tests. To run tests,
simply run ``py.test``. You can also use tox_ to run tests for
multiple Python versions.
Now it's much more important, that you know how test only one file (``./run.py
classes``, where ``classes`` is the name of the file to test) or even one test
(``./run.py classes 90``, which would just execute the test on line 90).
.. _pytest: http://pytest.org
.. _tox: http://testrun.org/tox
If you want to debug a test, just use the --debug option.
Integration test cases are located in ``test/completion`` directory
and each test cases are indicated by the comment ``#?`` (complete /
definitions), ``#!`` (assignments) and ``#<`` (usages). There is also
support for third party libraries. In a normal test run they are not
being executed, you have to provide a ``--thirdparty`` option.
In addition to standard `-k` and `-m` options in py.test, you can use
`-T` (`--test-files`) option to specify integration test cases to run.
It takes the format of ``FILE_NAME[:LINE[,LINE[,...]]]`` where
``FILE_NAME`` is a file in ``test/completion`` and ``LINE`` is a line
number of the test comment. Here is some recipes:
Run tests only in ``basic.py`` and ``imports.py``::
py.test test/test_integration.py -T basic.py -T imports.py
Run test at line 4, 6, and 8 in ``basic.py``::
py.test test/test_integration.py -T basic.py:4,6,8
See ``py.test --help`` for more information.
If you want to debug a test, just use the --pdb option.
Auto-Completion
+++++++++++++++
.. autofunction:: run_completion_test
Uses comments to specify a test in the next line. The comment says, which
results are expected. The comment always begins with `#?`. The last row
symbolizes the cursor.
For example::
#? ['real']
a = 3; a.rea
Because it follows ``a.rea`` and a is an ``int``, which has a ``real``
property.
Definition
++++++++++
.. autofunction:: run_definition_test
Definition tests use the same symbols like completion tests. This is
possible because the completion tests are defined with a list::
#? int()
ab = 3; ab
Goto
++++
.. autofunction:: run_goto_test
Tests look like this::
abc = 1
#! ['abc=1']
abc
Additionally it is possible to add a number which describes to position of
the test (otherwise it's just end of line)::
#! 2 ['abc=1']
abc
Related Names
+++++++++++++
.. autofunction:: run_related_name_test
Tests look like this::
abc = 1
#< abc@1,0 abc@3,0
abc
"""
import os
import sys
import re
import traceback
import itertools
import base
from jedi._compatibility import unicode, StringIO, reduce, literal_eval, is_py25
import jedi
from jedi import debug
sys.path.pop(0) # pop again, because it might affect the completion
from jedi._compatibility import unicode, StringIO, reduce, is_py25
TEST_COMPLETIONS = 0
@@ -71,147 +109,6 @@ TEST_ASSIGNMENTS = 2
TEST_USAGES = 3
def run_completion_test(case):
"""
Uses comments to specify a test in the next line. The comment says, which
results are expected. The comment always begins with `#?`. The last row
symbolizes the cursor.
For example::
#? ['real']
a = 3; a.rea
Because it follows ``a.rea`` and a is an ``int``, which has a ``real``
property.
Returns 1 for fail and 0 for success.
"""
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
completions = script.complete()
#import cProfile; cProfile.run('script.complete()')
comp_str = set([c.word for c in completions])
if comp_str != set(literal_eval(correct)):
print('Solution @%s not right, received %s, wanted %s'\
% (line_nr - 1, comp_str, correct))
return 1
return 0
def run_definition_test(case):
"""
Definition tests use the same symbols like completion tests. This is
possible because the completion tests are defined with a list::
#? int()
ab = 3; ab
Returns 1 for fail and 0 for success.
"""
def definition(correct, correct_start, path):
def defs(line_nr, indent):
s = jedi.Script(script.source, line_nr, indent, path)
return set(s.definition())
should_be = set()
number = 0
for index in re.finditer('(?: +|$)', correct):
if correct == ' ':
continue
# -1 for the comment, +3 because of the comment start `#? `
start = index.start()
if base.print_debug:
jedi.set_debug_function(None)
number += 1
try:
should_be |= defs(line_nr - 1, start + correct_start)
except Exception:
print('could not resolve %s indent %s' % (line_nr - 1, start))
raise
if base.print_debug:
jedi.set_debug_function(debug.print_to_stdout)
# because the objects have different ids, `repr` it, then compare it.
should_str = set(r.desc_with_module for r in should_be)
if len(should_str) < number:
raise Exception('Solution @%s not right, too few test results: %s'
% (line_nr - 1, should_str))
return should_str
(correct, line_nr, column, start, line) = \
(case.correct, case.line_nr, case.column, case.start, case.line)
script = case.script()
should_str = definition(correct, start, script.source_path)
result = script.definition()
is_str = set(r.desc_with_module for r in result)
if is_str != should_str:
print('Solution @%s not right, received %s, wanted %s' \
% (line_nr - 1, is_str, should_str))
return 1
return 0
def run_goto_test(case):
"""
Tests look like this::
abc = 1
#! ['abc=1']
abc
Additionally it is possible to add a number which describes to position of
the test (otherwise it's just end of line)::
#! 2 ['abc=1']
abc
Returns 1 for fail and 0 for success.
"""
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
result = script.goto()
comp_str = str(sorted(str(r.description) for r in result))
if comp_str != correct:
print('Solution @%s not right, received %s, wanted %s'\
% (line_nr - 1, comp_str, correct))
return 1
return 0
def run_related_name_test(case):
"""
Tests look like this::
abc = 1
#< abc@1,0 abc@3,0
abc
Returns 1 for fail and 0 for success.
"""
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
result = script.related_names()
correct = correct.strip()
compare = sorted((r.module_name, r.start_pos[0], r.start_pos[1])
for r in result)
wanted = []
if not correct:
positions = []
else:
positions = literal_eval(correct)
for pos_tup in positions:
if type(pos_tup[0]) == str:
# this means that there is a module specified
wanted.append(pos_tup)
else:
wanted.append(('renaming', line_nr + pos_tup[0], pos_tup[1]))
wanted = sorted(wanted)
if compare != wanted:
print('Solution @%s not right, received %s, wanted %s'\
% (line_nr - 1, compare, wanted))
return 1
return 0
class IntegrationTestCase(object):
def __init__(self, test_type, correct, line_nr, column, start, line,
@@ -274,7 +171,7 @@ def collect_file_tests(lines, lines_to_execute):
correct = None
def collect_dir_tests(base_dir, test_files, thirdparty=False):
def collect_dir_tests(base_dir, test_files):
for f_name in os.listdir(base_dir):
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
@@ -290,86 +187,3 @@ def collect_dir_tests(base_dir, test_files, thirdparty=False):
case.path = path
case.source = source
yield case
def run_test(cases):
"""
This is the completion test for some cases. The tests are not unit test
like, they are rather integration tests.
"""
testers = {
TEST_COMPLETIONS: run_completion_test,
TEST_DEFINITIONS: run_definition_test,
TEST_ASSIGNMENTS: run_goto_test,
TEST_USAGES: run_related_name_test,
}
tests = 0
fails = 0
for case in cases:
tests += 1
try:
fails += testers[case.test_type](case)
except Exception:
print(traceback.format_exc())
print(case)
fails += 1
return tests, fails
def test_dir(completion_test_dir, thirdparty=False):
for (path, cases) in itertools.groupby(
collect_dir_tests(completion_test_dir, test_files, thirdparty),
lambda case: case.path):
f_name = os.path.basename(path)
if thirdparty:
lib = f_name.replace('_.py', '')
try:
# there is always an underline at the end.
# It looks like: completion/thirdparty/pylab_.py
__import__(lib)
except ImportError:
base.summary.append('Thirdparty-Library %s not found.' %
f_name)
continue
num_tests, fails = run_test(cases)
base.test_sum += num_tests
s = 'run %s tests with %s fails (%s)' % (num_tests, fails, f_name)
base.tests_fail += fails
print(s)
base.summary.append(s)
if __name__ == '__main__':
try:
i = sys.argv.index('--thirdparty')
thirdparty = True
sys.argv = sys.argv[:i] + sys.argv[i + 1:]
except ValueError:
thirdparty = False
test_files = base.get_test_list()
# completion tests:
completion_test_dir = os.path.join(base.test_dir, 'completion')
# execute tests
test_dir(completion_test_dir)
if test_files or thirdparty:
completion_test_dir += '/thirdparty'
test_dir(completion_test_dir, thirdparty=True)
base.print_summary()
#from guppy import hpy
#hpy()
#print hpy().heap()
exit_code = 1 if base.tests_fail else 0
if sys.hexversion < 0x02060000 and base.tests_fail <= 9:
# Python 2.5 has major incompabillities (e.g. no property.setter),
# therefore it is not possible to pass all tests.
exit_code = 0
sys.exit(exit_code)

View File

@@ -1,8 +0,0 @@
set -e
python regression.py
python run.py
echo
python refactor.py
echo
nosetests --with-doctest --doctest-tests ../jedi/

View File

@@ -1,23 +1,36 @@
import os
import re
from run import \
from . import base
from .run import \
TEST_COMPLETIONS, TEST_DEFINITIONS, TEST_ASSIGNMENTS, TEST_USAGES
import jedi
from jedi._compatibility import literal_eval
def assert_case_equal(case, actual, desired):
"""
Assert ``actual == desired`` with formatted message.
This is not needed for typical py.test use case, but as we need
``--assert=plain`` (see ../pytest.ini) to workaround some issue
due to py.test magic, let's format the message by hand.
"""
assert actual == desired, """
Test %r failed.
actual = %s
desired = %s
""" % (case, actual, desired)
def run_completion_test(case):
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
completions = script.complete()
#import cProfile; cProfile.run('script.complete()')
comp_str = set([c.word for c in completions])
if comp_str != set(literal_eval(correct)):
raise AssertionError(
'Solution @%s not right, received %s, wanted %s'\
% (line_nr - 1, comp_str, correct))
assert_case_equal(case, comp_str, set(literal_eval(correct)))
def run_definition_test(case):
@@ -52,19 +65,14 @@ def run_definition_test(case):
should_str = definition(correct, start, script.source_path)
result = script.definition()
is_str = set(r.desc_with_module for r in result)
if is_str != should_str:
raise AssertionError(
'Solution @%s not right, received %s, wanted %s'
% (line_nr - 1, is_str, should_str))
assert_case_equal(case, is_str, should_str)
def run_goto_test(case):
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
result = script.goto()
comp_str = str(sorted(str(r.description) for r in result))
if comp_str != correct:
raise AssertionError('Solution @%s not right, received %s, wanted %s'
% (line_nr - 1, comp_str, correct))
assert_case_equal(case, comp_str, correct)
def run_related_name_test(case):
@@ -85,14 +93,11 @@ def run_related_name_test(case):
else:
wanted.append(('renaming', line_nr + pos_tup[0], pos_tup[1]))
wanted = sorted(wanted)
if compare != wanted:
raise AssertionError('Solution @%s not right, received %s, wanted %s'
% (line_nr - 1, compare, wanted))
assert_case_equal(case, compare, sorted(wanted))
def test_integration(case, monkeypatch, pytestconfig):
repo_root = os.path.dirname(os.path.dirname(pytestconfig.option.base_dir))
repo_root = base.root_dir
monkeypatch.chdir(os.path.join(repo_root, 'jedi'))
testers = {
TEST_COMPLETIONS: run_completion_test,
@@ -100,4 +105,15 @@ def test_integration(case, monkeypatch, pytestconfig):
TEST_ASSIGNMENTS: run_goto_test,
TEST_USAGES: run_related_name_test,
}
testers[case.test_type](case)
base.skip_py25_fails(testers[case.test_type])(case)
def test_refactor(refactor_case):
"""
Run refactoring test case.
:type refactor_case: :class:`.refactor.RefactoringCase`
"""
refactor_case.run()
assert_case_equal(refactor_case,
refactor_case.result, refactor_case.desired)

View File

@@ -11,7 +11,9 @@ import itertools
import os
import textwrap
from base import TestBase, unittest, cwd_at
import pytest
from .base import TestBase, unittest, cwd_at
import jedi
from jedi._compatibility import is_py25, utf8, unicode
@@ -524,5 +526,16 @@ class TestSpeed(TestBase):
script.function_definition()
#print(jedi.imports.imports_processed)
@pytest.mark.skipif("sys.version_info >= (3,0)")
def test_settings_module():
"""
jedi.settings and jedi.cache.settings must be the same module.
"""
from jedi import cache
from jedi import settings
assert cache.settings is settings
if __name__ == '__main__':
unittest.main()

19
tox.ini
View File

@@ -1,16 +1,13 @@
[tox]
envlist = py25, py26, py27, py32
[testenv]
setenv =
XDG_CACHE_HOME={envtmpdir}/cache
deps =
nose
pytest
commands =
python regression.py
python run.py
python refactor.py
nosetests --with-doctest --doctest-tests {toxinidir}/jedi
changedir = test
py.test []
# Doctests can't be run with the main tests because then py.test
# tries to import broken python files under test/*/.
py.test --doctest-modules {toxinidir}/jedi
[testenv:py25]
deps =
simplejson
@@ -20,3 +17,9 @@ deps =
deps =
unittest2
{[testenv]deps}
[testenv:py32]
# TODO: Without this setting, test uses ~/.cache/jedi/.
# There could be a bug due to import hack.
# See test_settings_module in test/test_regression.py.
setenv =
XDG_CACHE_HOME={envtmpdir}/cache