From 932ce397d6d92d18932384df85f4cf6574d52ce1 Mon Sep 17 00:00:00 2001 From: Takafumi Arakaki Date: Sun, 10 Mar 2013 12:21:00 +0100 Subject: [PATCH 1/7] Separate test collection and run --- test/run.py | 117 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 69 insertions(+), 48 deletions(-) diff --git a/test/run.py b/test/run.py index 96d14198..5d33bd96 100755 --- a/test/run.py +++ b/test/run.py @@ -64,6 +64,12 @@ from jedi import debug sys.path.pop(0) # pop again, because it might affect the completion +TEST_COMPLETIONS = 0 +TEST_DEFINITIONS = 1 +TEST_ASSIGNMENTS = 2 +TEST_USAGES = 3 + + def run_completion_test(script, correct, line_nr): """ Uses comments to specify a test in the next line. The comment says, which @@ -169,6 +175,46 @@ def run_related_name_test(script, correct, line_nr): return 0 +def collect_tests(lines, lines_to_execute): + makecase = lambda t: (t, correct, line_nr, column, start, line) + start = None + correct = None + test_type = None + for line_nr, line in enumerate(lines): + line_nr += 1 # py2.5 doesn't know about the additional enumerate param + line = unicode(line) + if correct: + r = re.match('^(\d+)\s*(.*)$', correct) + if r: + column = int(r.group(1)) + correct = r.group(2) + start += r.regs[2][0] # second group, start index + else: + column = len(line) - 1 # -1 for the \n + if test_type == '!': + yield makecase(TEST_ASSIGNMENTS) + elif test_type == '<': + yield makecase(TEST_USAGES) + elif correct.startswith('['): + yield makecase(TEST_COMPLETIONS) + else: + yield makecase(TEST_DEFINITIONS) + correct = None + else: + try: + r = re.search(r'(?:^|(?<=\s))#([?!<])\s*([^\n]+)', line) + # test_type is ? for completion and ! for goto + test_type = r.group(1) + correct = r.group(2) + start = r.start() + except AttributeError: + correct = None + else: + # skip the test, if this is not specified test + if lines_to_execute and line_nr not in lines_to_execute: + correct = None + + def run_test(source, f_name, lines_to_execute): """ This is the completion test for some cases. The tests are not unit test @@ -203,55 +249,30 @@ def run_test(source, f_name, lines_to_execute): % (line_nr - 1, should_str)) return should_str - fails = 0 + def run_definition_test_wrapper(script, correct, line_nr): + should_str = definition(correct, start, path) + return run_definition_test(script, should_str, line_nr) + + testers = { + TEST_COMPLETIONS: run_completion_test, + TEST_DEFINITIONS: run_definition_test_wrapper, + TEST_ASSIGNMENTS: run_goto_test, + TEST_USAGES: run_related_name_test, + } + tests = 0 - correct = None - test_type = None - start = None - for line_nr, line in enumerate(StringIO(source)): - line_nr += 1 # py2.5 doesn't know about the additional enumerate param - line = unicode(line) - if correct: - r = re.match('^(\d+)\s*(.*)$', correct) - if r: - index = int(r.group(1)) - correct = r.group(2) - start += r.regs[2][0] # second group, start index - else: - index = len(line) - 1 # -1 for the \n - # if a list is wanted, use the completion test, otherwise the - # definition test - path = completion_test_dir + os.path.sep + f_name - try: - script = jedi.Script(source, line_nr, index, path) - if test_type == '!': - fails += run_goto_test(script, correct, line_nr) - elif test_type == '<': - fails += run_related_name_test(script, correct, line_nr) - elif correct.startswith('['): - fails += run_completion_test(script, correct, line_nr) - else: - should_str = definition(correct, start, path) - fails += run_definition_test(script, should_str, line_nr) - except Exception: - print(traceback.format_exc()) - print('test @%s: %s' % (line_nr - 1, line)) - fails += 1 - correct = None - tests += 1 - else: - try: - r = re.search(r'(?:^|(?<=\s))#([?!<])\s*([^\n]+)', line) - # test_type is ? for completion and ! for goto - test_type = r.group(1) - correct = r.group(2) - start = r.start() - except AttributeError: - correct = None - else: - # reset the test, if only one specific test is wanted - if lines_to_execute and line_nr not in lines_to_execute: - correct = None + fails = 0 + cases = collect_tests(StringIO(source), lines_to_execute) + path = completion_test_dir + os.path.sep + f_name + for (test_type, correct, line_nr, column, start, line) in cases: + tests += 1 + try: + script = jedi.Script(source, line_nr, column, path) + fails += testers[test_type](script, correct, line_nr) + except Exception: + print(traceback.format_exc()) + print('test @%s: %s' % (line_nr - 1, line)) + fails += 1 return tests, fails From 418bce909a1cc44d366b935e24baf2ef6f6b70d1 Mon Sep 17 00:00:00 2001 From: Takafumi Arakaki Date: Sun, 10 Mar 2013 13:16:05 +0100 Subject: [PATCH 2/7] run_{test_type} functions takes same args now --- test/run.py | 79 ++++++++++++++++++++++++++--------------------------- 1 file changed, 38 insertions(+), 41 deletions(-) diff --git a/test/run.py b/test/run.py index 5d33bd96..89f7312f 100755 --- a/test/run.py +++ b/test/run.py @@ -70,7 +70,7 @@ TEST_ASSIGNMENTS = 2 TEST_USAGES = 3 -def run_completion_test(script, correct, line_nr): +def run_completion_test(script, correct, line_nr, *_): """ Uses comments to specify a test in the next line. The comment says, which results are expected. The comment always begins with `#?`. The last row @@ -97,7 +97,7 @@ def run_completion_test(script, correct, line_nr): return 0 -def run_definition_test(script, should_str, line_nr): +def run_definition_test(script, correct, line_nr, column, start, line): """ Definition tests use the same symbols like completion tests. This is possible because the completion tests are defined with a list:: @@ -107,6 +107,36 @@ def run_definition_test(script, should_str, line_nr): Returns 1 for fail and 0 for success. """ + def definition(correct, correct_start, path): + def defs(line_nr, indent): + s = jedi.Script(script.source, line_nr, indent, path) + return set(s.definition()) + + should_be = set() + number = 0 + for index in re.finditer('(?: +|$)', correct): + if correct == ' ': + continue + # -1 for the comment, +3 because of the comment start `#? ` + start = index.start() + if base.print_debug: + jedi.set_debug_function(None) + number += 1 + try: + should_be |= defs(line_nr - 1, start + correct_start) + except Exception: + print('could not resolve %s indent %s' % (line_nr - 1, start)) + raise + if base.print_debug: + jedi.set_debug_function(debug.print_to_stdout) + # because the objects have different ids, `repr` it, then compare it. + should_str = set(r.desc_with_module for r in should_be) + if len(should_str) < number: + raise Exception('Solution @%s not right, too few test results: %s' + % (line_nr - 1, should_str)) + return should_str + + should_str = definition(correct, start, script.source_path) result = script.definition() is_str = set(r.desc_with_module for r in result) if is_str != should_str: @@ -116,7 +146,7 @@ def run_definition_test(script, should_str, line_nr): return 0 -def run_goto_test(script, correct, line_nr): +def run_goto_test(script, correct, line_nr, *_): """ Tests look like this:: @@ -141,7 +171,7 @@ def run_goto_test(script, correct, line_nr): return 0 -def run_related_name_test(script, correct, line_nr): +def run_related_name_test(script, correct, line_nr, *_): """ Tests look like this:: @@ -220,55 +250,22 @@ def run_test(source, f_name, lines_to_execute): This is the completion test for some cases. The tests are not unit test like, they are rather integration tests. """ - def definition(correct, correct_start, path): - def defs(line_nr, indent): - script = jedi.Script(source, line_nr, indent, path) - return set(script.definition()) - - should_be = set() - number = 0 - for index in re.finditer('(?: +|$)', correct): - if correct == ' ': - continue - # -1 for the comment, +3 because of the comment start `#? ` - start = index.start() - if base.print_debug: - jedi.set_debug_function(None) - number += 1 - try: - should_be |= defs(line_nr - 1, start + correct_start) - except Exception: - print('could not resolve %s indent %s' % (line_nr - 1, start)) - raise - if base.print_debug: - jedi.set_debug_function(debug.print_to_stdout) - # because the objects have different ids, `repr` it, then compare it. - should_str = set(r.desc_with_module for r in should_be) - if len(should_str) < number: - raise Exception('Solution @%s not right, too few test results: %s' - % (line_nr - 1, should_str)) - return should_str - - def run_definition_test_wrapper(script, correct, line_nr): - should_str = definition(correct, start, path) - return run_definition_test(script, should_str, line_nr) - testers = { TEST_COMPLETIONS: run_completion_test, - TEST_DEFINITIONS: run_definition_test_wrapper, + TEST_DEFINITIONS: run_definition_test, TEST_ASSIGNMENTS: run_goto_test, TEST_USAGES: run_related_name_test, } tests = 0 fails = 0 - cases = collect_tests(StringIO(source), lines_to_execute) path = completion_test_dir + os.path.sep + f_name - for (test_type, correct, line_nr, column, start, line) in cases: + for case in collect_tests(StringIO(source), lines_to_execute): + (test_type, correct, line_nr, column, start, line) = case tests += 1 try: script = jedi.Script(source, line_nr, column, path) - fails += testers[test_type](script, correct, line_nr) + fails += testers[test_type](script, *case[1:]) except Exception: print(traceback.format_exc()) print('test @%s: %s' % (line_nr - 1, line)) From 6a10f795511a5e02c199babd64acc276750b9930 Mon Sep 17 00:00:00 2001 From: Takafumi Arakaki Date: Sun, 10 Mar 2013 14:13:42 +0100 Subject: [PATCH 3/7] Do not destruct test case in run_test --- test/run.py | 46 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/test/run.py b/test/run.py index 89f7312f..f4a553e8 100755 --- a/test/run.py +++ b/test/run.py @@ -70,7 +70,7 @@ TEST_ASSIGNMENTS = 2 TEST_USAGES = 3 -def run_completion_test(script, correct, line_nr, *_): +def run_completion_test(case): """ Uses comments to specify a test in the next line. The comment says, which results are expected. The comment always begins with `#?`. The last row @@ -86,6 +86,7 @@ def run_completion_test(script, correct, line_nr, *_): Returns 1 for fail and 0 for success. """ + (script, correct, line_nr) = (case.script(), case.correct, case.line_nr) completions = script.complete() #import cProfile; cProfile.run('script.complete()') @@ -97,7 +98,7 @@ def run_completion_test(script, correct, line_nr, *_): return 0 -def run_definition_test(script, correct, line_nr, column, start, line): +def run_definition_test(case): """ Definition tests use the same symbols like completion tests. This is possible because the completion tests are defined with a list:: @@ -136,6 +137,9 @@ def run_definition_test(script, correct, line_nr, column, start, line): % (line_nr - 1, should_str)) return should_str + (correct, line_nr, column, start, line) = \ + (case.correct, case.line_nr, case.column, case.start, case.line) + script = case.script() should_str = definition(correct, start, script.source_path) result = script.definition() is_str = set(r.desc_with_module for r in result) @@ -146,7 +150,7 @@ def run_definition_test(script, correct, line_nr, column, start, line): return 0 -def run_goto_test(script, correct, line_nr, *_): +def run_goto_test(case): """ Tests look like this:: @@ -162,6 +166,7 @@ def run_goto_test(script, correct, line_nr, *_): Returns 1 for fail and 0 for success. """ + (script, correct, line_nr) = (case.script(), case.correct, case.line_nr) result = script.goto() comp_str = str(sorted(str(r.description) for r in result)) if comp_str != correct: @@ -171,7 +176,7 @@ def run_goto_test(script, correct, line_nr, *_): return 0 -def run_related_name_test(script, correct, line_nr, *_): +def run_related_name_test(case): """ Tests look like this:: @@ -181,6 +186,7 @@ def run_related_name_test(script, correct, line_nr, *_): Returns 1 for fail and 0 for success. """ + (script, correct, line_nr) = (case.script(), case.correct, case.line_nr) result = script.related_names() correct = correct.strip() compare = sorted((r.module_name, r.start_pos[0], r.start_pos[1]) @@ -205,8 +211,30 @@ def run_related_name_test(script, correct, line_nr, *_): return 0 +class IntegrationTestCase(object): + + def __init__(self, test_type, correct, line_nr, column, start, line, + path=None): + self.test_type = test_type + self.correct = correct + self.line_nr = line_nr + self.column = column + self.start = start + self.line = line + self.path = path + + def __repr__(self): + name = os.path.basename(self.path) if self.path else None + return '<%s: %s:%s:%s>' % (self.__class__.__name__, + name, self.line_nr, self.line.rstrip()) + + def script(self): + return jedi.Script(self.source, self.line_nr, self.column, self.path) + + def collect_tests(lines, lines_to_execute): - makecase = lambda t: (t, correct, line_nr, column, start, line) + makecase = lambda t: IntegrationTestCase(t, correct, line_nr, column, + start, line) start = None correct = None test_type = None @@ -261,14 +289,14 @@ def run_test(source, f_name, lines_to_execute): fails = 0 path = completion_test_dir + os.path.sep + f_name for case in collect_tests(StringIO(source), lines_to_execute): - (test_type, correct, line_nr, column, start, line) = case + case.path = path + case.source = source tests += 1 try: - script = jedi.Script(source, line_nr, column, path) - fails += testers[test_type](script, *case[1:]) + fails += testers[case.test_type](case) except Exception: print(traceback.format_exc()) - print('test @%s: %s' % (line_nr - 1, line)) + print(case) fails += 1 return tests, fails From 00912e69fe952b82392d38a8b3071cd125714dc1 Mon Sep 17 00:00:00 2001 From: Takafumi Arakaki Date: Sun, 10 Mar 2013 15:04:53 +0100 Subject: [PATCH 4/7] Completely separate test collection and run --- test/run.py | 75 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 42 insertions(+), 33 deletions(-) diff --git a/test/run.py b/test/run.py index f4a553e8..c3d53455 100755 --- a/test/run.py +++ b/test/run.py @@ -52,6 +52,7 @@ import os import sys import re import traceback +import itertools import base @@ -232,7 +233,7 @@ class IntegrationTestCase(object): return jedi.Script(self.source, self.line_nr, self.column, self.path) -def collect_tests(lines, lines_to_execute): +def collect_file_tests(lines, lines_to_execute): makecase = lambda t: IntegrationTestCase(t, correct, line_nr, column, start, line) start = None @@ -273,7 +274,25 @@ def collect_tests(lines, lines_to_execute): correct = None -def run_test(source, f_name, lines_to_execute): +def collect_dir_tests(base_dir, test_files, thirdparty=False): + for f_name in os.listdir(base_dir): + files_to_execute = [a for a in test_files.items() if a[0] in f_name] + lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, []) + if f_name.endswith(".py") and (not test_files or files_to_execute): + # for python2.5 certain tests are not being done, because it + # only has these features partially. + if is_py25 and f_name in ['generators.py', 'types.py']: + continue + path = os.path.join(base_dir, f_name) + source = open(path).read() + for case in collect_file_tests(StringIO(source), + lines_to_execute): + case.path = path + case.source = source + yield case + + +def run_test(cases): """ This is the completion test for some cases. The tests are not unit test like, they are rather integration tests. @@ -287,10 +306,7 @@ def run_test(source, f_name, lines_to_execute): tests = 0 fails = 0 - path = completion_test_dir + os.path.sep + f_name - for case in collect_tests(StringIO(source), lines_to_execute): - case.path = path - case.source = source + for case in cases: tests += 1 try: fails += testers[case.test_type](case) @@ -302,36 +318,29 @@ def run_test(source, f_name, lines_to_execute): def test_dir(completion_test_dir, thirdparty=False): - for f_name in os.listdir(completion_test_dir): - files_to_execute = [a for a in test_files.items() if a[0] in f_name] - lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, []) - if f_name.endswith(".py") and (not test_files or files_to_execute): - # for python2.5 certain tests are not being done, because it - # only has these features partially. - if is_py25 and f_name in ['generators.py', 'types.py']: + for (path, cases) in itertools.groupby( + collect_dir_tests(completion_test_dir, test_files, thirdparty), + lambda case: case.path): + f_name = os.path.basename(path) + + if thirdparty: + lib = f_name.replace('_.py', '') + try: + # there is always an underline at the end. + # It looks like: completion/thirdparty/pylab_.py + __import__(lib) + except ImportError: + base.summary.append('Thirdparty-Library %s not found.' % + f_name) continue - if thirdparty: - lib = f_name.replace('_.py', '') - try: - # there is always an underline at the end. - # It looks like: completion/thirdparty/pylab_.py - __import__(lib) - except ImportError: - base.summary.append('Thirdparty-Library %s not found.' % - f_name) - continue + num_tests, fails = run_test(cases) + base.test_sum += num_tests - path = os.path.join(completion_test_dir, f_name) - f = open(path) - num_tests, fails = run_test(f.read(), f_name, lines_to_execute) - global test_sum - base.test_sum += num_tests - - s = 'run %s tests with %s fails (%s)' % (num_tests, fails, f_name) - base.tests_fail += fails - print(s) - base.summary.append(s) + s = 'run %s tests with %s fails (%s)' % (num_tests, fails, f_name) + base.tests_fail += fails + print(s) + base.summary.append(s) if __name__ == '__main__': From 20c9709aefaf55cdad287d9d91125889a29e6f9f Mon Sep 17 00:00:00 2001 From: Takafumi Arakaki Date: Sun, 10 Mar 2013 17:03:06 +0100 Subject: [PATCH 5/7] Do not change cwd at import time --- test/base.py | 29 ++++++++++++++++++++++++++--- test/refactor.py | 2 +- test/regression.py | 4 +++- test/run.py | 2 +- 4 files changed, 31 insertions(+), 6 deletions(-) diff --git a/test/base.py b/test/base.py index f78e939b..3557d738 100644 --- a/test/base.py +++ b/test/base.py @@ -4,13 +4,15 @@ import time import sys import os from os.path import abspath, dirname - -sys.path.insert(0, abspath(dirname(abspath(__file__)) + '/..')) -os.chdir(os.path.dirname(os.path.abspath(__file__)) + '/../jedi') +import functools import jedi from jedi import debug + +test_dir = dirname(abspath(__file__)) + + test_sum = 0 t_start = time.time() # Sorry I didn't use argparse here. It's because argparse is not in the @@ -77,3 +79,24 @@ def print_summary(): (tests_fail, test_sum, time.time() - t_start)) for s in summary: print(s) + + +def cwd_at(path): + """ + Decorator to run function at `path`. + + :type path: str + :arg path: relative path from repository root (e.g., ``'jedi'``). + """ + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwds): + try: + oldcwd = os.getcwd() + repo_root = os.path.dirname(test_dir) + os.chdir(os.path.join(repo_root, path)) + return func(*args, **kwds) + finally: + os.chdir(oldcwd) + return wrapper + return decorator diff --git a/test/refactor.py b/test/refactor.py index 74228ecb..4f01edd7 100755 --- a/test/refactor.py +++ b/test/refactor.py @@ -107,7 +107,7 @@ def test_dir(refactoring_test_dir): if __name__ == '__main__': - refactoring_test_dir = '../test/refactor' + refactoring_test_dir = os.path.join(base.test_dir, 'refactor') test_files = base.get_test_list() test_dir(refactoring_test_dir) diff --git a/test/regression.py b/test/regression.py index 8f01ea14..2212ba05 100755 --- a/test/regression.py +++ b/test/regression.py @@ -11,7 +11,7 @@ import functools import itertools import os -from base import TestBase +from base import TestBase, cwd_at import jedi from jedi._compatibility import is_py25, utf8, unicode @@ -131,6 +131,7 @@ class TestRegression(TestBase): assert self.definition("import sys_blabla", (1, 8)) == [] assert len(self.definition("import sys", (1, 8))) == 1 + @cwd_at('jedi') def test_complete_on_empty_import(self): # should just list the files in the directory assert 10 < len(self.complete("from .", path='')) < 30 @@ -234,6 +235,7 @@ class TestRegression(TestBase): s = """def foo(""" assert self.function_definition(s) is None + @cwd_at('jedi') def test_add_dynamic_mods(self): api.settings.additional_dynamic_modules = ['dynamic.py'] # Fictional module that defines a function. diff --git a/test/run.py b/test/run.py index c3d53455..289f7ce5 100755 --- a/test/run.py +++ b/test/run.py @@ -354,7 +354,7 @@ if __name__ == '__main__': test_files = base.get_test_list() # completion tests: - completion_test_dir = '../test/completion' + completion_test_dir = os.path.join(base.test_dir, 'completion') # execute tests test_dir(completion_test_dir) From 3b41a47c08df983d4b83186be57f6c321e0fc3ee Mon Sep 17 00:00:00 2001 From: Takafumi Arakaki Date: Sun, 10 Mar 2013 18:58:32 +0100 Subject: [PATCH 6/7] Re-add sys.path setup in test/base.py --- test/base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/base.py b/test/base.py index 3557d738..29cc966b 100644 --- a/test/base.py +++ b/test/base.py @@ -6,13 +6,13 @@ import os from os.path import abspath, dirname import functools +test_dir = dirname(abspath(__file__)) +root_dir = dirname(test_dir) +sys.path.insert(0, root_dir) + import jedi from jedi import debug - -test_dir = dirname(abspath(__file__)) - - test_sum = 0 t_start = time.time() # Sorry I didn't use argparse here. It's because argparse is not in the From c3aaf7b4bade6a7054605d87bcadb91b93d35eb1 Mon Sep 17 00:00:00 2001 From: Takafumi Arakaki Date: Sun, 10 Mar 2013 20:26:09 +0100 Subject: [PATCH 7/7] Show line_nr of test comment, not the line after --- test/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/run.py b/test/run.py index 289f7ce5..106f6359 100755 --- a/test/run.py +++ b/test/run.py @@ -227,7 +227,7 @@ class IntegrationTestCase(object): def __repr__(self): name = os.path.basename(self.path) if self.path else None return '<%s: %s:%s:%s>' % (self.__class__.__name__, - name, self.line_nr, self.line.rstrip()) + name, self.line_nr - 1, self.line.rstrip()) def script(self): return jedi.Script(self.source, self.line_nr, self.column, self.path)