diff --git a/_compatibility.py b/_compatibility.py index 23af7262..27102c85 100644 --- a/_compatibility.py +++ b/_compatibility.py @@ -2,6 +2,8 @@ This is a compatibility module, to make it possible to use jedi also with older python versions. """ +def is_py3k(): + return sys.hexversion >= 0x03000000 import sys # next was defined in python 2.6, in python 3 obj.next won't be possible @@ -58,7 +60,7 @@ except NameError: return s.decode("utf-8") # exec function -if sys.hexversion >= 0x03000000: +if is_py3k(): def exec_function(source, global_map): exec(source, global_map) else: @@ -67,7 +69,7 @@ else: # tokenize function import tokenize -if sys.hexversion >= 0x03000000: +if is_py3k(): tokenize_func = tokenize.tokenize else: tokenize_func = tokenize.generate_tokens @@ -79,7 +81,7 @@ except ImportError: from io import BytesIO # hasattr function used because python -if sys.hexversion >= 0x03000000: +if is_py3k(): hasattr = hasattr else: def hasattr(obj, name): diff --git a/evaluate.py b/evaluate.py index 0128bc8b..5b5e7005 100644 --- a/evaluate.py +++ b/evaluate.py @@ -27,7 +27,6 @@ import itertools import copy import parsing -import modules import debug import builtin import imports @@ -793,7 +792,8 @@ def get_defined_names_for_position(obj, position=None, start_scope=None): return names_new -def get_names_for_scope(scope, position=None, star_search=True): +def get_names_for_scope(scope, position=None, star_search=True, + include_builtin=True): """ Get all completions possible for the current scope. The star search option is only here to provide an optimization. Otherwise @@ -820,9 +820,10 @@ def get_names_for_scope(scope, position=None, star_search=True): for g in get_names_for_scope(s, star_search=False): yield g - # Add builtins to the global scope. - builtin_scope = builtin.Builtin.scope - yield builtin_scope, builtin_scope.get_defined_names() + # Add builtins to the global scope. + if include_builtin: + builtin_scope = builtin.Builtin.scope + yield builtin_scope, builtin_scope.get_defined_names() def get_scopes_for_name(scope, name_str, position=None, search_global=False): diff --git a/functions.py b/functions.py index 0bdfeb63..884f6ddf 100644 --- a/functions.py +++ b/functions.py @@ -133,7 +133,7 @@ def complete(source, line, column, source_path): :param col: The column to complete in. :type col: int :param source_path: The path in the os, the current module is in. - :type source_path: int + :type source_path: str :return: list of Completion objects. :rtype: list @@ -186,8 +186,7 @@ def prepare_goto(source, position, source_path, is_like_search): user_stmt = f.parser.user_stmt if isinstance(user_stmt, parsing.Import): - scopes = [imports.ImportPath(user_stmt, is_like_search, - evaluate.follow_path)] + scopes = [imports.ImportPath(user_stmt, is_like_search)] else: # just parse one statement, take it and evaluate it r = parsing.PyFuzzyParser(path, source_path) diff --git a/imports.py b/imports.py new file mode 100644 index 00000000..a74cc45a --- /dev/null +++ b/imports.py @@ -0,0 +1,157 @@ +import os +import pkgutil +import imp + +import builtin +import modules +import debug +import parsing +import evaluate + + +class ModuleNotFound(Exception): + pass + + +class ImportPath(object): + global_namespace = object() + def __init__(self, import_stmt, is_like_search=False): + """ replace """ + #print import_stmt + self.import_path = [] + if import_stmt.from_ns: + self.import_path += import_stmt.from_ns.names + if import_stmt.namespace: + self.import_path += import_stmt.namespace.names + + if is_like_search: + # drop one path part, because that is used by the like search + self.import_path.pop() + + self.file_path = os.path.dirname(import_stmt.get_parent_until().path) + + def get_defined_names(self): + names = [] + for scope in self.follow(): + if scope is ImportPath.global_namespace: + names += self.get_module_names() + names += self.get_module_names([self.file_path]) + else: + for s, n in evaluate.get_names_for_scope(scope, + include_builtin=False): + names += n + if isinstance(scope, parsing.Module) \ + and scope.path.endswith('__init__.py'): + names += \ + self.get_module_names([os.path.dirname(scope.path)]) + return names + + def get_module_names(self, search_path=None): + names = [] + for module_loader, name, is_pkg in pkgutil.iter_modules(search_path): + names.append(parsing.Name([name], (float('inf'), float('inf')), + (float('inf'), float('inf')))) + return names + + def follow(self): + """ + """ + if self.import_path: + scope, rest = self.follow_file_system() + if rest: + scopes = evaluate.follow_path(iter(rest), scope) + else: + scopes = [scope] + + new = [] + for scope in scopes: + new += remove_star_imports(scope) + scopes += new + else: + scopes = [ImportPath.global_namespace] + debug.dbg('after import', scopes) + return scopes + + def follow_file_system(self): + """ + Find a module with a path (of the module, like usb.backend.libusb10). + TODO: relative imports + """ + def follow_str(ns, string): + debug.dbg('follow_module', ns, string) + if ns: + path = [ns[1]] + else: + path = None + debug.dbg('search_module', string, path, self.file_path) + try: + i = imp.find_module(string, path) + except ImportError: + # find builtins (ommit path): + i = imp.find_module(string, builtin.module_find_path) + return i + + # TODO handle relative paths - they are included in the import object + current_namespace = None + builtin.module_find_path.insert(0, self.file_path) + # now execute those paths + rest = [] + for i, s in enumerate(self.import_path): + try: + current_namespace = follow_str(current_namespace, s) + except ImportError: + if current_namespace: + rest = self.import_path[i:] + else: + raise ModuleNotFound( + 'The module you searched has not been found') + + builtin.module_find_path.pop(0) + path = current_namespace[1] + is_package_directory = current_namespace[2][2] == imp.PKG_DIRECTORY + + f = None + if is_package_directory or current_namespace[0]: + # is a directory module + if is_package_directory: + path += '/__init__.py' + with open(path) as f: + source = f.read() + else: + source = current_namespace[0].read() + if path.endswith('.py'): + f = modules.Module(path, source) + else: + f = builtin.Parser(path=path) + else: + f = builtin.Parser(name=path) + + return f.parser.top, rest + +def strip_imports(scopes): + """ + Here we strip the imports - they don't get resolved necessarily. + Really used anymore? + """ + result = [] + for s in scopes: + if isinstance(s, parsing.Import): + try: + result += ImportPath(s).follow() + except ModuleNotFound: + debug.warning('Module not found: ' + str(s)) + else: + result.append(s) + return result + +def remove_star_imports(scope): + """ + """ + modules = strip_imports(i for i in scope.get_imports() if i.star) + new = [] + for m in modules: + new += remove_star_imports(m) + modules += new + + # Filter duplicate modules. + return set(modules) diff --git a/modules.py b/modules.py index b79b0df2..8838f474 100644 --- a/modules.py +++ b/modules.py @@ -1,10 +1,8 @@ from __future__ import with_statement import re import tokenize -import imp import os -import debug import parsing import builtin @@ -12,10 +10,6 @@ files = {} load_module_cb = None -class ModuleNotFound(Exception): - pass - - class Module(builtin.CachedModule): """ Manages all files, that are parsed and caches them. diff --git a/parsing.py b/parsing.py index d0f53f61..2c1bc12b 100644 --- a/parsing.py +++ b/parsing.py @@ -28,7 +28,8 @@ Ignored statements: - print (no use for it, just slows down) - exec (dangerous - not controllable) """ -from _compatibility import next, literal_eval, tokenize_func, BytesIO, property +from _compatibility import (next, literal_eval, tokenize_func, BytesIO, + property, is_py3k) import tokenize import re @@ -937,6 +938,8 @@ class PyFuzzyParser(object): self.user_position = user_position self.user_stmt = None self.code = code + '\n' # end with \n, because the parser needs it + if is_py3k(): + self.code = self.code.encode() # initialize global Scope self.top = Module(module_path) @@ -977,7 +980,7 @@ class PyFuzzyParser(object): self.user_stmt = i else: self.user_stmt = i - print 'up', self.user_stmt + #print 'up', self.user_stmt def _parsedotname(self, pre_used_token=None): @@ -1281,7 +1284,7 @@ class PyFuzzyParser(object): :raises: IndentationError """ - buf = BytesIO(self.code.encode()) + buf = BytesIO(self.code) self.gen = tokenize_func(buf.readline) self.currentscope = self.scope diff --git a/test/completion/imports.py b/test/completion/imports.py index bba14755..344e38bf 100644 --- a/test/completion/imports.py +++ b/test/completion/imports.py @@ -60,5 +60,11 @@ func_with_import().sleep #? ['sqlite3'] import sqlite -#? ['time'] -from datetime import +#? ['classes'] +import classes + +#? ['timedelta'] +from datetime import timedelta + +#? ['Cursor'] +from sqlite3 import Cursor diff --git a/test/run.py b/test/run.py index d161082b..b071fc72 100755 --- a/test/run.py +++ b/test/run.py @@ -18,7 +18,7 @@ if only_line is not None: #functions.set_debug_function(functions.debug.print_to_stdout) -def run_completion_test(correct, source, line_nr, line): +def run_completion_test(correct, source, line_nr, line, path): """ Runs tests for completions. Return if the test was a fail or not, with 1 for fail and 0 for success. @@ -26,8 +26,7 @@ def run_completion_test(correct, source, line_nr, line): # lines start with 1 and column is just the last (makes no # difference for testing) try: - completions = functions.complete(source, line_nr, len(line), - completion_test_dir) + completions = functions.complete(source, line_nr, len(line), path) except (Exception, functions.evaluate.MultiLevelAttributeError): print('test @%s: %s' % (line_nr - 1, line)) print(traceback.format_exc()) @@ -42,14 +41,13 @@ def run_completion_test(correct, source, line_nr, line): return 0 -def run_definition_test(correct, source, line_nr, line, correct_start): +def run_definition_test(correct, source, line_nr, line, correct_start, path): """ Runs tests for definitions. Return if the test was a fail or not, with 1 for fail and 0 for success. """ def defs(line_nr, indent): - return set(functions.get_definitions(source, line_nr, indent, - completion_test_dir)) + return set(functions.get_definitions(source, line_nr, indent, path)) try: result = defs(line_nr, len(line)) except (Exception, functions.evaluate.MultiLevelAttributeError): @@ -79,7 +77,7 @@ def run_definition_test(correct, source, line_nr, line, correct_start): return 0 -def run_test(source): +def run_test(source, f_name): """ This is the completion test for some cases. The tests are not unit test like, they are rather integration tests. @@ -88,8 +86,11 @@ def run_test(source): row symbolizes the cursor. For example: - #? ['ab'] - ab = 3; a + >>> #? ['ab'] + >>> ab = 3; a + + >>> #? int() + >>> ab = 3; ab """ fails = 0 tests = 0 @@ -100,11 +101,13 @@ def run_test(source): if correct: # if a list is wanted, use the completion test, otherwise the # get_definition test + path = completion_test_dir + os.path.sep + f_name if correct.startswith('['): - fails += run_completion_test(correct, source, line_nr, line) + fails += run_completion_test(correct, source, line_nr, line, + path) else: fails += run_definition_test(correct, source, line_nr, line, - start) + start, path) correct = None tests += 1 else: @@ -138,7 +141,7 @@ def test_dir(completion_test_dir, third_party=False): continue path = os.path.join(completion_test_dir, f_name) f = open(path) - num_tests, fails = run_test(f.read()) + num_tests, fails = run_test(f.read(), f_name) s = 'run %s tests with %s fails (%s)' % (num_tests, fails, f_name) if fails: @@ -152,7 +155,8 @@ summary = [] tests_pass = True test_dir(completion_test_dir) -test_dir(completion_test_dir + '/thirdparty', third_party=True) +completion_test_dir += '/thirdparty' +test_dir(completion_test_dir, third_party=True) print('\nSummary:') for s in summary: